Update V8 to r4730 as required by WebKit r60469
diff --git a/ChangeLog b/ChangeLog
index 30cc6b9..3c7003a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,32 @@
+2010-05-26: Version 2.2.12
+
+        Allowed accessors to be defined on objects rather than just object
+        templates.
+
+        Changed the ScriptData API.
+
+
+2010-05-21: Version 2.2.11
+
+        Fix crash bug in liveedit on 64 bit.
+
+        Use 'full compiler' when debugging is active.  This should increase
+        the density of possible break points, making single step more fine
+        grained.  This will only take effect for functions compiled after
+        debugging has been started, so recompilation of all functions is
+        required to get the full effect.  IA32 and x64 only for now.
+
+        Misc. fixes to the Solaris build.
+
+        Add new flags --print-cumulative-gc-stat and --trace-gc-nvp.
+
+        Add filtering of CPU profiles by security context.
+
+        Fix crash bug on ARM when running without VFP2 or VFP3.
+
+        Incremental performance improvements in all backends.
+
+
 2010-05-17: Version 2.2.10
 
         Performance improvements in the x64 and ARM backends.
@@ -11,7 +40,7 @@
         non date string (issue 696).
 
         Allow unaligned memory accesses on ARM targets that support it (by
-	Subrato K De of CodeAurora <subratokde@codeaurora.org>).
+        Subrato K De of CodeAurora <subratokde@codeaurora.org>).
 
         C++ API for retrieving JavaScript stack trace information.
 
diff --git a/SConstruct b/SConstruct
index 1056de7..cf6b57d 100644
--- a/SConstruct
+++ b/SConstruct
@@ -210,12 +210,6 @@
       'CCFLAGS':      ['-m32', '-DCAN_USE_UNALIGNED_ACCESSES=1'],
       'LINKFLAGS':    ['-m32']
     },
-    'armvariant:thumb2': {
-      'CPPDEFINES':   ['V8_ARM_VARIANT_THUMB']
-    },
-    'armvariant:arm': {
-      'CPPDEFINES':   ['V8_ARM_VARIANT_ARM']
-    },
     'arch:mips': {
       'CPPDEFINES':   ['V8_TARGET_ARCH_MIPS'],
       'simulator:none': {
@@ -765,11 +759,6 @@
     'default': 'hidden',
     'help': 'shared library symbol visibility'
   },
-  'armvariant': {
-    'values': ['arm', 'thumb2', 'none'],
-    'default': 'none',
-    'help': 'generate thumb2 instructions instead of arm instructions (default)'
-  },
   'pgo': {
     'values': ['off', 'instrument', 'optimize'],
     'default': 'off',
@@ -963,10 +952,6 @@
     if 'msvcltcg' in ARGUMENTS:
       print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
     options['msvcltcg'] = 'on'
-  if (options['armvariant'] == 'none' and options['arch'] == 'arm'):
-    options['armvariant'] = 'arm'
-  if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
-    options['armvariant'] = 'none'
   if options['arch'] == 'mips':
     if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
       # Print a warning if native regexp is specified for mips
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 8a78f9f..cb51e93 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,4 @@
 We use a V8 revision that has been used for a Chromium release.
 
-http://src.chromium.org/svn/releases/6.0.415.0/DEPS
-http://v8.googlecode.com/svn/trunk@4660
+http://src.chromium.org/svn/releases/6.0.423.0/DEPS
+http://v8.googlecode.com/svn/trunk@4730
diff --git a/include/v8-debug.h b/include/v8-debug.h
index f7b4fa1..c53b634 100644
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -144,6 +144,39 @@
   
 
   /**
+   * An event details object passed to the debug event listener.
+   */
+  class EventDetails {
+   public:
+    /**
+     * Event type.
+     */
+    virtual DebugEvent GetEvent() const = 0;
+
+    /**
+     * Access to execution state and event data of the debug event. Don't store
+     * these cross callbacks as their content becomes invalid.
+     */
+    virtual Handle<Object> GetExecutionState() const = 0;
+    virtual Handle<Object> GetEventData() const = 0;
+
+    /**
+     * Get the context active when the debug event happened. Note this is not
+     * the current active context as the JavaScript part of the debugger is
+     * running in it's own context which is entered at this point.
+     */
+    virtual Handle<Context> GetEventContext() const = 0;
+
+    /**
+     * Client data passed with the corresponding callbak whet it was registered.
+     */
+    virtual Handle<Value> GetCallbackData() const = 0;
+
+    virtual ~EventDetails() {}
+  };
+
+
+  /**
    * Debug event callback function.
    *
    * \param event the type of the debug event that triggered the callback
@@ -157,6 +190,15 @@
                                 Handle<Object> event_data,
                                 Handle<Value> data);
 
+  /**
+   * Debug event callback function.
+   *
+   * \param event_details object providing information about the debug event
+   *
+   * A EventCallback2 does not take possession of the event data,
+   * and must not rely on the data persisting after the handler returns.
+   */
+  typedef void (*EventCallback2)(const EventDetails& event_details);
 
   /**
    * Debug message callback function.
@@ -165,7 +207,7 @@
    * \param length length of the message
    * \param client_data the data value passed when registering the message handler
 
-   * A MessageHandler does not take posession of the message string,
+   * A MessageHandler does not take possession of the message string,
    * and must not rely on the data persisting after the handler returns.
    *
    * This message handler is deprecated. Use MessageHandler2 instead.
@@ -178,7 +220,7 @@
    *
    * \param message the debug message handler message object
 
-   * A MessageHandler does not take posession of the message data,
+   * A MessageHandler does not take possession of the message data,
    * and must not rely on the data persisting after the handler returns.
    */
   typedef void (*MessageHandler2)(const Message& message);
@@ -196,6 +238,8 @@
   // Set a C debug event listener.
   static bool SetDebugEventListener(EventCallback that,
                                     Handle<Value> data = Handle<Value>());
+  static bool SetDebugEventListener2(EventCallback2 that,
+                                     Handle<Value> data = Handle<Value>());
 
   // Set a JavaScript debug event listener.
   static bool SetDebugEventListener(v8::Handle<v8::Object> that,
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index f1b8ffb..bb41072 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -140,22 +140,37 @@
 class V8EXPORT CpuProfiler {
  public:
   /**
+   * A note on security tokens usage. As scripts from different
+   * origins can run inside a single V8 instance, it is possible to
+   * have functions from different security contexts intermixed in a
+   * single CPU profile. To avoid exposing function names belonging to
+   * other contexts, filtering by security token is performed while
+   * obtaining profiling results.
+   */
+
+  /**
    * Returns the number of profiles collected (doesn't include
    * profiles that are being collected at the moment of call.)
    */
   static int GetProfilesCount();
 
   /** Returns a profile by index. */
-  static const CpuProfile* GetProfile(int index);
+  static const CpuProfile* GetProfile(
+      int index,
+      Handle<Value> security_token = Handle<Value>());
 
   /** Returns a profile by uid. */
-  static const CpuProfile* FindProfile(unsigned uid);
+  static const CpuProfile* FindProfile(
+      unsigned uid,
+      Handle<Value> security_token = Handle<Value>());
 
   /**
    * Starts collecting CPU profile. Title may be an empty string. It
    * is allowed to have several profiles being collected at
    * once. Attempts to start collecting several profiles with the same
-   * title are silently ignored.
+   * title are silently ignored. While collecting a profile, functions
+   * from all security contexts are included in it. The token-based
+   * filtering is only performed when querying for a profile.
    */
   static void StartProfiling(Handle<String> title);
 
@@ -163,7 +178,9 @@
    * Stops collecting CPU profile with a given title and returns it.
    * If the title given is empty, finishes the last profile started.
    */
-  static const CpuProfile* StopProfiling(Handle<String> title);
+  static const CpuProfile* StopProfiling(
+      Handle<String> title,
+      Handle<Value> security_token = Handle<Value>());
 };
 
 
diff --git a/include/v8.h b/include/v8.h
index eb12de8..5b5dabe 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -126,6 +126,7 @@
 class FunctionTemplate;
 class ObjectTemplate;
 class Data;
+class AccessorInfo;
 class StackTrace;
 class StackFrame;
 
@@ -512,11 +513,37 @@
 class V8EXPORT ScriptData {  // NOLINT
  public:
   virtual ~ScriptData() { }
+  /**
+   * Pre-compiles the specified script (context-independent).
+   *
+   * \param input Pointer to UTF-8 script source code.
+   * \param length Length of UTF-8 script source code.
+   */
   static ScriptData* PreCompile(const char* input, int length);
-  static ScriptData* New(unsigned* data, int length);
 
+  /**
+   * Load previous pre-compilation data.
+   *
+   * \param data Pointer to data returned by a call to Data() of a previous
+   *   ScriptData. Ownership is not transferred.
+   * \param length Length of data.
+   */
+  static ScriptData* New(const char* data, int length);
+
+  /**
+   * Returns the length of Data().
+   */
   virtual int Length() = 0;
-  virtual unsigned* Data() = 0;
+
+  /**
+   * Returns a serialized representation of this ScriptData that can later be
+   * passed to New(). NOTE: Serialized data is platform-dependent.
+   */
+  virtual const char* Data() = 0;
+
+  /**
+   * Returns true if the source code could not be parsed.
+   */
   virtual bool HasError() = 0;
 };
 
@@ -1306,6 +1333,41 @@
 };
 
 /**
+ * Accessor[Getter|Setter] are used as callback functions when
+ * setting|getting a particular property. See Object and ObjectTemplate's
+ * method SetAccessor.
+ */
+typedef Handle<Value> (*AccessorGetter)(Local<String> property,
+                                        const AccessorInfo& info);
+
+
+typedef void (*AccessorSetter)(Local<String> property,
+                               Local<Value> value,
+                               const AccessorInfo& info);
+
+
+/**
+ * Access control specifications.
+ *
+ * Some accessors should be accessible across contexts.  These
+ * accessors have an explicit access control parameter which specifies
+ * the kind of cross-context access that should be allowed.
+ *
+ * Additionally, for security, accessors can prohibit overwriting by
+ * accessors defined in JavaScript.  For objects that have such
+ * accessors either locally or in their prototype chain it is not
+ * possible to overwrite the accessor by using __defineGetter__ or
+ * __defineSetter__ from JavaScript code.
+ */
+enum AccessControl {
+  DEFAULT               = 0,
+  ALL_CAN_READ          = 1,
+  ALL_CAN_WRITE         = 1 << 1,
+  PROHIBITS_OVERWRITING = 1 << 2
+};
+
+
+/**
  * A JavaScript object (ECMA-262, 4.3.3)
  */
 class V8EXPORT Object : public Value {
@@ -1347,6 +1409,13 @@
 
   bool Delete(uint32_t index);
 
+  bool SetAccessor(Handle<String> name,
+                   AccessorGetter getter,
+                   AccessorSetter setter = 0,
+                   Handle<Value> data = Handle<Value>(),
+                   AccessControl settings = DEFAULT,
+                   PropertyAttribute attribute = None);
+
   /**
    * Returns an array containing the names of the enumerable properties
    * of this object, including properties from prototype objects.  The
@@ -1642,19 +1711,6 @@
 typedef int (*LookupCallback)(Local<Object> self, Local<String> name);
 
 /**
- * Accessor[Getter|Setter] are used as callback functions when
- * setting|getting a particular property. See objectTemplate::SetAccessor.
- */
-typedef Handle<Value> (*AccessorGetter)(Local<String> property,
-                                        const AccessorInfo& info);
-
-
-typedef void (*AccessorSetter)(Local<String> property,
-                               Local<Value> value,
-                               const AccessorInfo& info);
-
-
-/**
  * NamedProperty[Getter|Setter] are used as interceptors on object.
  * See ObjectTemplate::SetNamedPropertyHandler.
  */
@@ -1734,27 +1790,6 @@
 
 
 /**
- * Access control specifications.
- *
- * Some accessors should be accessible across contexts.  These
- * accessors have an explicit access control parameter which specifies
- * the kind of cross-context access that should be allowed.
- *
- * Additionally, for security, accessors can prohibit overwriting by
- * accessors defined in JavaScript.  For objects that have such
- * accessors either locally or in their prototype chain it is not
- * possible to overwrite the accessor by using __defineGetter__ or
- * __defineSetter__ from JavaScript code.
- */
-enum AccessControl {
-  DEFAULT               = 0,
-  ALL_CAN_READ          = 1,
-  ALL_CAN_WRITE         = 1 << 1,
-  PROHIBITS_OVERWRITING = 1 << 2
-};
-
-
-/**
  * Access type specification.
  */
 enum AccessType {
@@ -2866,7 +2901,12 @@
    */
   void ReattachGlobal(Handle<Object> global_object);
 
-  /** Creates a new context. */
+  /** Creates a new context.
+   *
+   * Returns a persistent handle to the newly allocated context. This
+   * persistent handle has to be disposed when the context is no
+   * longer used so the context can be garbage collected.
+   */
   static Persistent<Context> New(
       ExtensionConfiguration* extensions = NULL,
       Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
diff --git a/src/SConscript b/src/SConscript
index b68f6d1..8466a0c 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -136,13 +136,8 @@
     arm/register-allocator-arm.cc
     arm/stub-cache-arm.cc
     arm/virtual-frame-arm.cc
-    """),
-  'armvariant:arm': Split("""
     arm/assembler-arm.cc
     """),
-  'armvariant:thumb2': Split("""
-    arm/assembler-thumb2.cc
-    """),
   'arch:mips': Split("""
     fast-codegen.cc
     mips/assembler-mips.cc
diff --git a/src/api.cc b/src/api.cc
index a4c38b7..a7948ae 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -48,7 +48,7 @@
 
 #define LOG_API(expr) LOG(ApiEntryCall(expr))
 
-#ifdef ENABLE_HEAP_PROTECTION
+#ifdef ENABLE_VMSTATE_TRACKING
 #define ENTER_V8 i::VMState __state__(i::OTHER)
 #define LEAVE_V8 i::VMState __state__(i::EXTERNAL)
 #else
@@ -58,11 +58,10 @@
 
 namespace v8 {
 
-
-#define ON_BAILOUT(location, code)              \
-  if (IsDeadCheck(location)) {                  \
-    code;                                       \
-    UNREACHABLE();                              \
+#define ON_BAILOUT(location, code)                                 \
+  if (IsDeadCheck(location) || v8::V8::IsExecutionTerminating()) { \
+    code;                                                          \
+    UNREACHABLE();                                                 \
   }
 
 
@@ -776,6 +775,28 @@
 }
 
 
+static i::Handle<i::AccessorInfo> MakeAccessorInfo(
+      v8::Handle<String> name,
+      AccessorGetter getter,
+      AccessorSetter setter,
+      v8::Handle<Value> data,
+      v8::AccessControl settings,
+      v8::PropertyAttribute attributes) {
+  i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
+  ASSERT(getter != NULL);
+  obj->set_getter(*FromCData(getter));
+  obj->set_setter(*FromCData(setter));
+  if (data.IsEmpty()) data = v8::Undefined();
+  obj->set_data(*Utils::OpenHandle(*data));
+  obj->set_name(*Utils::OpenHandle(*name));
+  if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
+  if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
+  if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
+  obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
+  return obj;
+}
+
+
 void FunctionTemplate::AddInstancePropertyAccessor(
       v8::Handle<String> name,
       AccessorGetter getter,
@@ -788,18 +809,10 @@
   }
   ENTER_V8;
   HandleScope scope;
-  i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
-  ASSERT(getter != NULL);
-  obj->set_getter(*FromCData(getter));
-  obj->set_setter(*FromCData(setter));
-  if (data.IsEmpty()) data = v8::Undefined();
-  obj->set_data(*Utils::OpenHandle(*data));
-  obj->set_name(*Utils::OpenHandle(*name));
-  if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
-  if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
-  if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
-  obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
 
+  i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name,
+                                                    getter, setter, data,
+                                                    settings, attributes);
   i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
   if (list->IsUndefined()) {
     list = NeanderArray().value();
@@ -1106,8 +1119,19 @@
 }
 
 
-ScriptData* ScriptData::New(unsigned* data, int length) {
-  return new i::ScriptDataImpl(i::Vector<unsigned>(data, length));
+ScriptData* ScriptData::New(const char* data, int length) {
+  // Return an empty ScriptData if the length is obviously invalid.
+  if (length % sizeof(unsigned) != 0) {
+    return new i::ScriptDataImpl(i::Vector<unsigned>());
+  }
+
+  // Copy the data to ensure it is properly aligned.
+  int deserialized_data_length = length / sizeof(unsigned);
+  unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
+  memcpy(deserialized_data, data, length);
+
+  return new i::ScriptDataImpl(
+      i::Vector<unsigned>(deserialized_data, deserialized_data_length));
 }
 
 
@@ -2354,6 +2378,23 @@
 }
 
 
+bool Object::SetAccessor(Handle<String> name,
+                         AccessorGetter getter,
+                         AccessorSetter setter,
+                         v8::Handle<Value> data,
+                         AccessControl settings,
+                         PropertyAttribute attributes) {
+  ON_BAILOUT("v8::Object::SetAccessor()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
+                                                     getter, setter, data,
+                                                     settings, attributes);
+  i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
+  return !result.is_null() && !result->IsUndefined();
+}
+
+
 bool v8::Object::HasRealNamedProperty(Handle<String> key) {
   ON_BAILOUT("v8::Object::HasRealNamedProperty()", return false);
   return Utils::OpenHandle(this)->HasRealNamedProperty(
@@ -3992,10 +4033,40 @@
 // --- D e b u g   S u p p o r t ---
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
+
+static v8::Debug::EventCallback event_callback = NULL;
+
+static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
+  if (event_callback) {
+    event_callback(event_details.GetEvent(),
+                   event_details.GetExecutionState(),
+                   event_details.GetEventData(),
+                   event_details.GetCallbackData());
+  }
+}
+
+
 bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
   EnsureInitialized("v8::Debug::SetDebugEventListener()");
   ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
   ENTER_V8;
+
+  event_callback = that;
+
+  HandleScope scope;
+  i::Handle<i::Object> proxy = i::Factory::undefined_value();
+  if (that != NULL) {
+    proxy = i::Factory::NewProxy(FUNCTION_ADDR(EventCallbackWrapper));
+  }
+  i::Debugger::SetEventListener(proxy, Utils::OpenHandle(*data));
+  return true;
+}
+
+
+bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
+  EnsureInitialized("v8::Debug::SetDebugEventListener2()");
+  ON_BAILOUT("v8::Debug::SetDebugEventListener2()", return false);
+  ENTER_V8;
   HandleScope scope;
   i::Handle<i::Object> proxy = i::Factory::undefined_value();
   if (that != NULL) {
@@ -4250,15 +4321,23 @@
 }
 
 
-const CpuProfile* CpuProfiler::GetProfile(int index) {
+const CpuProfile* CpuProfiler::GetProfile(int index,
+                                          Handle<Value> security_token) {
   IsDeadCheck("v8::CpuProfiler::GetProfile");
-  return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::GetProfile(index));
+  return reinterpret_cast<const CpuProfile*>(
+      i::CpuProfiler::GetProfile(
+          security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
+          index));
 }
 
 
-const CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
+const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
+                                           Handle<Value> security_token) {
   IsDeadCheck("v8::CpuProfiler::FindProfile");
-  return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::FindProfile(uid));
+  return reinterpret_cast<const CpuProfile*>(
+      i::CpuProfiler::FindProfile(
+          security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
+          uid));
 }
 
 
@@ -4268,10 +4347,13 @@
 }
 
 
-const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
+const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
+                                             Handle<Value> security_token) {
   IsDeadCheck("v8::CpuProfiler::StopProfiling");
   return reinterpret_cast<const CpuProfile*>(
-      i::CpuProfiler::StopProfiling(*Utils::OpenHandle(*title)));
+      i::CpuProfiler::StopProfiling(
+          security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
+          *Utils::OpenHandle(*title)));
 }
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 3f0854e..e292cef 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -39,6 +39,7 @@
 
 #include "arm/assembler-arm.h"
 #include "cpu.h"
+#include "debug.h"
 
 
 namespace v8 {
@@ -73,6 +74,11 @@
 }
 
 
+int RelocInfo::target_address_size() {
+  return Assembler::kExternalTargetSize;
+}
+
+
 void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
@@ -162,6 +168,26 @@
 }
 
 
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    visitor->VisitPointer(target_object_address());
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    visitor->VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  } else if (Debug::has_break_points() &&
+             RelocInfo::IsJSReturn(mode) &&
+             IsPatchedReturnSequence()) {
+    visitor->VisitDebugTarget(this);
+#endif
+  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+
 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
   rm_ = no_reg;
   imm32_ = immediate;
@@ -169,13 +195,6 @@
 }
 
 
-Operand::Operand(const char* s) {
-  rm_ = no_reg;
-  imm32_ = reinterpret_cast<int32_t>(s);
-  rmode_ = RelocInfo::EMBEDDED_STRING;
-}
-
-
 Operand::Operand(const ExternalReference& f)  {
   rm_ = no_reg;
   imm32_ = reinterpret_cast<int32_t>(f.address());
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index f1f59ce..050e15b 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -36,6 +36,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "arm/assembler-arm-inl.h"
 #include "serialize.h"
 
@@ -106,6 +108,15 @@
 const int RelocInfo::kApplyMask = 0;
 
 
+bool RelocInfo::IsCodedSpecially() {
+  // The deserializer needs to know whether a pointer is specially coded.  Being
+  // specially coded on ARM means that it is a movw/movt instruction.  We don't
+  // generate those yet.
+  return false;
+}
+
+
+
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   // Patch the code at the current address with the supplied instructions.
   Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -268,6 +279,20 @@
     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
 const Instr kBlxRegPattern =
     B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
+// A mask for the Rd register for push, pop, ldr, str instructions.
+const Instr kRdMask = 0x0000f000;
+static const int kRdShift = 12;
+static const Instr kLdrRegFpOffsetPattern =
+    al | B26 | L | Offset | fp.code() * B16;
+static const Instr kStrRegFpOffsetPattern =
+    al | B26 | Offset | fp.code() * B16;
+static const Instr kLdrRegFpNegOffsetPattern =
+    al | B26 | L | NegOffset | fp.code() * B16;
+static const Instr kStrRegFpNegOffsetPattern =
+    al | B26 | NegOffset | fp.code() * B16;
+static const Instr kLdrStrInstrTypeMask = 0xffff0000;
+static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
+static const Instr kLdrStrOffsetMask = 0x00000fff;
 
 // Spare buffer.
 static const int kMinimalBufferSize = 4*KB;
@@ -395,6 +420,43 @@
 }
 
 
+Register Assembler::GetRd(Instr instr) {
+  Register reg;
+  reg.code_ = ((instr & kRdMask) >> kRdShift);
+  return reg;
+}
+
+
+bool Assembler::IsPush(Instr instr) {
+  return ((instr & ~kRdMask) == kPushRegPattern);
+}
+
+
+bool Assembler::IsPop(Instr instr) {
+  return ((instr & ~kRdMask) == kPopRegPattern);
+}
+
+
+bool Assembler::IsStrRegFpOffset(Instr instr) {
+  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsLdrRegFpOffset(Instr instr) {
+  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsStrRegFpNegOffset(Instr instr) {
+  return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
+}
+
+
+bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
+  return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
+}
+
+
 // Labels refer to positions in the (to be) generated code.
 // There are bound, linked, and unused labels.
 //
@@ -887,15 +949,12 @@
   //   str(src, MemOperand(sp, 4, NegPreIndex), al);
   //   add(sp, sp, Operand(kPointerSize));
   // Both instructions can be eliminated.
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-      last_bound_pos_ <= (pc_offset() - pattern_size) &&
-      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+  if (can_peephole_optimize(2) &&
       // Pattern.
       instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
       (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
     pc_ -= 2 * kInstrSize;
-    if (FLAG_print_push_pop_elimination) {
+    if (FLAG_print_peephole_optimization) {
       PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
     }
   }
@@ -1086,20 +1145,170 @@
   }
   addrmod2(cond | B26 | L, dst, src);
 
-  // Eliminate pattern: push(r), pop(r)
-  //   str(r, MemOperand(sp, 4, NegPreIndex), al)
-  //   ldr(r, MemOperand(sp, 4, PostIndex), al)
-  // Both instructions can be eliminated.
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-      last_bound_pos_ <= (pc_offset() - pattern_size) &&
-      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
-      instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
-    pc_ -= 2 * kInstrSize;
-    if (FLAG_print_push_pop_elimination) {
-      PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+  // Eliminate pattern: push(ry), pop(rx)
+  //   str(ry, MemOperand(sp, 4, NegPreIndex), al)
+  //   ldr(rx, MemOperand(sp, 4, PostIndex), al)
+  // Both instructions can be eliminated if ry = rx.
+  // If ry != rx, a register copy from ry to rx is inserted
+  // after eliminating the push and the pop instructions.
+  Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
+  Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
+
+  if (can_peephole_optimize(2) &&
+      IsPush(push_instr) &&
+      IsPop(pop_instr)) {
+    if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
+      // For consecutive push and pop on different registers,
+      // we delete both the push & pop and insert a register move.
+      // push ry, pop rx --> mov rx, ry
+      Register reg_pushed, reg_popped;
+      reg_pushed = GetRd(push_instr);
+      reg_popped = GetRd(pop_instr);
+      pc_ -= 2 * kInstrSize;
+      // Insert a mov instruction, which is better than a pair of push & pop
+      mov(reg_popped, reg_pushed);
+      if (FLAG_print_peephole_optimization) {
+        PrintF("%x push/pop (diff reg) replaced by a reg move\n", pc_offset());
+      }
+    } else {
+      // For consecutive push and pop on the same register,
+      // both the push and the pop can be deleted.
+      pc_ -= 2 * kInstrSize;
+      if (FLAG_print_peephole_optimization) {
+        PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+      }
+    }
+  }
+
+  if (can_peephole_optimize(2)) {
+    Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
+    Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
+
+    if ((IsStrRegFpOffset(str_instr) &&
+         IsLdrRegFpOffset(ldr_instr)) ||
+       (IsStrRegFpNegOffset(str_instr) &&
+         IsLdrRegFpNegOffset(ldr_instr))) {
+      if ((ldr_instr & kLdrStrInstrArgumentMask) ==
+            (str_instr & kLdrStrInstrArgumentMask)) {
+        // Pattern: Ldr/str same fp+offset, same register.
+        //
+        // The following:
+        // str rx, [fp, #-12]
+        // ldr rx, [fp, #-12]
+        //
+        // Becomes:
+        // str rx, [fp, #-12]
+
+        pc_ -= 1 * kInstrSize;
+        if (FLAG_print_peephole_optimization) {
+          PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
+        }
+      } else if ((ldr_instr & kLdrStrOffsetMask) ==
+                 (str_instr & kLdrStrOffsetMask)) {
+        // Pattern: Ldr/str same fp+offset, different register.
+        //
+        // The following:
+        // str rx, [fp, #-12]
+        // ldr ry, [fp, #-12]
+        //
+        // Becomes:
+        // str rx, [fp, #-12]
+        // mov ry, rx
+
+        Register reg_stored, reg_loaded;
+        reg_stored = GetRd(str_instr);
+        reg_loaded = GetRd(ldr_instr);
+        pc_ -= 1 * kInstrSize;
+        // Insert a mov instruction, which is better than ldr.
+        mov(reg_loaded, reg_stored);
+        if (FLAG_print_peephole_optimization) {
+          PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
+        }
+      }
+    }
+  }
+
+  if (can_peephole_optimize(3)) {
+    Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
+    Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
+    Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
+    if (IsPush(mem_write_instr) &&
+        IsPop(mem_read_instr)) {
+      if ((IsLdrRegFpOffset(ldr_instr) ||
+        IsLdrRegFpNegOffset(ldr_instr))) {
+        if ((mem_write_instr & kRdMask) ==
+              (mem_read_instr & kRdMask)) {
+          // Pattern: push & pop from/to same register,
+          // with a fp+offset ldr in between
+          //
+          // The following:
+          // str rx, [sp, #-4]!
+          // ldr rz, [fp, #-24]
+          // ldr rx, [sp], #+4
+          //
+          // Becomes:
+          // if(rx == rz)
+          //   delete all
+          // else
+          //   ldr rz, [fp, #-24]
+
+          if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
+            pc_ -= 3 * kInstrSize;
+          } else {
+            pc_ -= 3 * kInstrSize;
+            // Reinsert back the ldr rz.
+            emit(ldr_instr);
+          }
+          if (FLAG_print_peephole_optimization) {
+            PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
+          }
+        } else {
+          // Pattern: push & pop from/to different registers
+          // with a fp+offset ldr in between
+          //
+          // The following:
+          // str rx, [sp, #-4]!
+          // ldr rz, [fp, #-24]
+          // ldr ry, [sp], #+4
+          //
+          // Becomes:
+          // if(ry == rz)
+          //   mov ry, rx;
+          // else if(rx != rz)
+          //   ldr rz, [fp, #-24]
+          //   mov ry, rx
+          // else if((ry != rz) || (rx == rz)) becomes:
+          //   mov ry, rx
+          //   ldr rz, [fp, #-24]
+
+          Register reg_pushed, reg_popped;
+          if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
+            reg_pushed = GetRd(mem_write_instr);
+            reg_popped = GetRd(mem_read_instr);
+            pc_ -= 3 * kInstrSize;
+            mov(reg_popped, reg_pushed);
+          } else if ((mem_write_instr & kRdMask)
+                                != (ldr_instr & kRdMask)) {
+            reg_pushed = GetRd(mem_write_instr);
+            reg_popped = GetRd(mem_read_instr);
+            pc_ -= 3 * kInstrSize;
+            emit(ldr_instr);
+            mov(reg_popped, reg_pushed);
+          } else if (((mem_read_instr & kRdMask)
+                                     != (ldr_instr & kRdMask)) ||
+                    ((mem_write_instr & kRdMask)
+                                     == (ldr_instr & kRdMask)) ) {
+            reg_pushed = GetRd(mem_write_instr);
+            reg_popped = GetRd(mem_read_instr);
+            pc_ -= 3 * kInstrSize;
+            mov(reg_popped, reg_pushed);
+            emit(ldr_instr);
+          }
+          if (FLAG_print_peephole_optimization) {
+            PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
+          }
+        }
+      }
     }
   }
 }
@@ -1111,16 +1320,13 @@
   // Eliminate pattern: pop(), push(r)
   //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
   // ->  str r, [sp, 0], al
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-     last_bound_pos_ <= (pc_offset() - pattern_size) &&
-     reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+  if (can_peephole_optimize(2) &&
      // Pattern.
      instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
      instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
     pc_ -= 2 * kInstrSize;
     emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
-    if (FLAG_print_push_pop_elimination) {
+    if (FLAG_print_peephole_optimization) {
       PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
     }
   }
@@ -1157,33 +1363,25 @@
 }
 
 
-void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) {
+void Assembler::ldrd(Register dst1, Register dst2,
+                     const MemOperand& src, Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(ARMv7));
   ASSERT(src.rm().is(no_reg));
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
-  addrmod3(cond | B7 | B6 | B4, dst, src);
-#else
-  ldr(dst, src, cond);
-  MemOperand src1(src);
-  src1.set_offset(src1.offset() + 4);
-  Register dst1(dst);
-  dst1.code_ = dst1.code_ + 1;
-  ldr(dst1, src1, cond);
-#endif
+  ASSERT(!dst1.is(lr));  // r14.
+  ASSERT_EQ(0, dst1.code() % 2);
+  ASSERT_EQ(dst1.code() + 1, dst2.code());
+  addrmod3(cond | B7 | B6 | B4, dst1, src);
 }
 
 
-void Assembler::strd(Register src, const MemOperand& dst, Condition cond) {
+void Assembler::strd(Register src1, Register src2,
+                     const MemOperand& dst, Condition cond) {
   ASSERT(dst.rm().is(no_reg));
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
-  addrmod3(cond | B7 | B6 | B5 | B4, src, dst);
-#else
-  str(src, dst, cond);
-  MemOperand dst1(dst);
-  dst1.set_offset(dst1.offset() + 4);
-  Register src1(src);
-  src1.code_ = src1.code_ + 1;
-  str(src1, dst1, cond);
-#endif
+  ASSERT(!src1.is(lr));  // r14.
+  ASSERT_EQ(0, src1.code() % 2);
+  ASSERT_EQ(src1.code() + 1, src2.code());
+  ASSERT(CpuFeatures::IsEnabled(ARMv7));
+  addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
 }
 
 // Load/Store multiple instructions.
@@ -1216,26 +1414,6 @@
 }
 
 
-// Semaphore instructions.
-void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
-  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
-  ASSERT(!dst.is(base) && !src.is(base));
-  emit(cond | P | base.code()*B16 | dst.code()*B12 |
-       B7 | B4 | src.code());
-}
-
-
-void Assembler::swpb(Register dst,
-                     Register src,
-                     Register base,
-                     Condition cond) {
-  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
-  ASSERT(!dst.is(base) && !src.is(base));
-  emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
-       B7 | B4 | src.code());
-}
-
-
 // Exception-generating instructions and debugging support.
 void Assembler::stop(const char* msg) {
 #ifndef __arm__
@@ -1779,34 +1957,6 @@
 }
 
 
-void Assembler::lea(Register dst,
-                    const MemOperand& x,
-                    SBit s,
-                    Condition cond) {
-  int am = x.am_;
-  if (!x.rm_.is_valid()) {
-    // Immediate offset.
-    if ((am & P) == 0)  // post indexing
-      mov(dst, Operand(x.rn_), s, cond);
-    else if ((am & U) == 0)  // negative indexing
-      sub(dst, x.rn_, Operand(x.offset_), s, cond);
-    else
-      add(dst, x.rn_, Operand(x.offset_), s, cond);
-  } else {
-    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
-    // register offset the constructors make sure than both shift_imm_
-    // and shift_op_ are initialized.
-    ASSERT(!x.rm_.is(pc));
-    if ((am & P) == 0)  // post indexing
-      mov(dst, Operand(x.rn_), s, cond);
-    else if ((am & U) == 0)  // negative indexing
-      sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
-    else
-      add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
-  }
-}
-
-
 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
   uint32_t dummy1;
   uint32_t dummy2;
@@ -2062,3 +2212,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 61b84d4..a1b98f6 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -80,6 +80,11 @@
     return 1 << code_;
   }
 
+  void set_code(int code) {
+    code_ = code;
+    ASSERT(is_valid());
+  }
+
   // Unfortunately we can't make this private in a struct.
   int code_;
 };
@@ -458,7 +463,8 @@
       return offset_;
   }
 
-  Register rm() const {return rm_;}
+  Register rn() const { return rn_; }
+  Register rm() const { return rm_; }
 
  private:
   Register rn_;  // base
@@ -767,17 +773,17 @@
   void strh(Register src, const MemOperand& dst, Condition cond = al);
   void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
   void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
-  void ldrd(Register dst, const MemOperand& src, Condition cond = al);
-  void strd(Register src, const MemOperand& dst, Condition cond = al);
+  void ldrd(Register dst1,
+            Register dst2,
+            const MemOperand& src, Condition cond = al);
+  void strd(Register src1,
+            Register src2,
+            const MemOperand& dst, Condition cond = al);
 
   // Load/Store multiple instructions
   void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
   void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
 
-  // Semaphore instructions
-  void swp(Register dst, Register src, Register base, Condition cond = al);
-  void swpb(Register dst, Register src, Register base, Condition cond = al);
-
   // Exception-generating instructions and debugging support
   void stop(const char* msg);
 
@@ -924,10 +930,6 @@
     add(sp, sp, Operand(kPointerSize));
   }
 
-  // Load effective address of memory operand x into register dst
-  void lea(Register dst, const MemOperand& x,
-           SBit s = LeaveCC, Condition cond = al);
-
   // Jump unconditionally to given label.
   void jmp(Label* L) { b(L, al); }
 
@@ -976,6 +978,12 @@
   int current_position() const { return current_position_; }
   int current_statement_position() const { return current_statement_position_; }
 
+  bool can_peephole_optimize(int instructions) {
+    if (!FLAG_peephole_optimization) return false;
+    if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
+    return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
+  }
+
   // Read/patch instructions
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
   static void instr_at_put(byte* pc, Instr instr) {
@@ -987,6 +995,13 @@
   static bool IsLdrRegisterImmediate(Instr instr);
   static int GetLdrRegisterImmediateOffset(Instr instr);
   static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
+  static Register GetRd(Instr instr);
+  static bool IsPush(Instr instr);
+  static bool IsPop(Instr instr);
+  static bool IsStrRegFpOffset(Instr instr);
+  static bool IsLdrRegFpOffset(Instr instr);
+  static bool IsStrRegFpNegOffset(Instr instr);
+  static bool IsLdrRegFpNegOffset(Instr instr);
 
 
  protected:
diff --git a/src/arm/assembler-thumb2-inl.h b/src/arm/assembler-thumb2-inl.h
deleted file mode 100644
index 9e0fc2f..0000000
--- a/src/arm/assembler-thumb2-inl.h
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-
-#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
-#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
-
-#include "arm/assembler-thumb2.h"
-#include "cpu.h"
-
-
-namespace v8 {
-namespace internal {
-
-Condition NegateCondition(Condition cc) {
-  ASSERT(cc != al);
-  return static_cast<Condition>(cc ^ ne);
-}
-
-
-void RelocInfo::apply(intptr_t delta) {
-  if (RelocInfo::IsInternalReference(rmode_)) {
-    // absolute code pointer inside code object moves with the code object.
-    int32_t* p = reinterpret_cast<int32_t*>(pc_);
-    *p += delta;  // relocate entry
-  }
-  // We do not use pc relative addressing on ARM, so there is
-  // nothing else to do.
-}
-
-
-Address RelocInfo::target_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  return Assembler::target_address_at(pc_);
-}
-
-
-Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
-}
-
-
-void RelocInfo::set_target_address(Address target) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  Assembler::set_target_address_at(pc_, target);
-}
-
-
-Object* RelocInfo::target_object() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return Memory::Object_at(Assembler::target_address_address_at(pc_));
-}
-
-
-Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
-}
-
-
-Object** RelocInfo::target_object_address() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
-}
-
-
-void RelocInfo::set_target_object(Object* target) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
-}
-
-
-Address* RelocInfo::target_reference_address() {
-  ASSERT(rmode_ == EXTERNAL_REFERENCE);
-  return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
-}
-
-
-Address RelocInfo::call_address() {
-  ASSERT(IsPatchedReturnSequence());
-  // The 2 instructions offset assumes patched return sequence.
-  ASSERT(IsJSReturn(rmode()));
-  return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_address(Address target) {
-  ASSERT(IsPatchedReturnSequence());
-  // The 2 instructions offset assumes patched return sequence.
-  ASSERT(IsJSReturn(rmode()));
-  Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
-}
-
-
-Object* RelocInfo::call_object() {
-  return *call_object_address();
-}
-
-
-Object** RelocInfo::call_object_address() {
-  ASSERT(IsPatchedReturnSequence());
-  // The 2 instructions offset assumes patched return sequence.
-  ASSERT(IsJSReturn(rmode()));
-  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
-}
-
-
-void RelocInfo::set_call_object(Object* target) {
-  *call_object_address() = target;
-}
-
-
-bool RelocInfo::IsPatchedReturnSequence() {
-  // On ARM a "call instruction" is actually two instructions.
-  //   mov lr, pc
-  //   ldr pc, [pc, #XXX]
-  return (Assembler::instr_at(pc_) == kMovLrPc)
-          && ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
-              == kLdrPCPattern);
-}
-
-
-Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
-  rm_ = no_reg;
-  imm32_ = immediate;
-  rmode_ = rmode;
-}
-
-
-Operand::Operand(const char* s) {
-  rm_ = no_reg;
-  imm32_ = reinterpret_cast<int32_t>(s);
-  rmode_ = RelocInfo::EMBEDDED_STRING;
-}
-
-
-Operand::Operand(const ExternalReference& f)  {
-  rm_ = no_reg;
-  imm32_ = reinterpret_cast<int32_t>(f.address());
-  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
-}
-
-
-Operand::Operand(Smi* value) {
-  rm_ = no_reg;
-  imm32_ =  reinterpret_cast<intptr_t>(value);
-  rmode_ = RelocInfo::NONE;
-}
-
-
-Operand::Operand(Register rm) {
-  rm_ = rm;
-  rs_ = no_reg;
-  shift_op_ = LSL;
-  shift_imm_ = 0;
-}
-
-
-bool Operand::is_reg() const {
-  return rm_.is_valid() &&
-         rs_.is(no_reg) &&
-         shift_op_ == LSL &&
-         shift_imm_ == 0;
-}
-
-
-void Assembler::CheckBuffer() {
-  if (buffer_space() <= kGap) {
-    GrowBuffer();
-  }
-  if (pc_offset() >= next_buffer_check_) {
-    CheckConstPool(false, true);
-  }
-}
-
-
-void Assembler::emit(Instr x) {
-  CheckBuffer();
-  *reinterpret_cast<Instr*>(pc_) = x;
-  pc_ += kInstrSize;
-}
-
-
-Address Assembler::target_address_address_at(Address pc) {
-  Address target_pc = pc;
-  Instr instr = Memory::int32_at(target_pc);
-  // If we have a bx instruction, the instruction before the bx is
-  // what we need to patch.
-  static const int32_t kBxInstMask = 0x0ffffff0;
-  static const int32_t kBxInstPattern = 0x012fff10;
-  if ((instr & kBxInstMask) == kBxInstPattern) {
-    target_pc -= kInstrSize;
-    instr = Memory::int32_at(target_pc);
-  }
-  // Verify that the instruction to patch is a
-  // ldr<cond> <Rd>, [pc +/- offset_12].
-  ASSERT((instr & 0x0f7f0000) == 0x051f0000);
-  int offset = instr & 0xfff;  // offset_12 is unsigned
-  if ((instr & (1 << 23)) == 0) offset = -offset;  // U bit defines offset sign
-  // Verify that the constant pool comes after the instruction referencing it.
-  ASSERT(offset >= -4);
-  return target_pc + offset + 8;
-}
-
-
-Address Assembler::target_address_at(Address pc) {
-  return Memory::Address_at(target_address_address_at(pc));
-}
-
-
-void Assembler::set_target_at(Address constant_pool_entry,
-                              Address target) {
-  Memory::Address_at(constant_pool_entry) = target;
-}
-
-
-void Assembler::set_target_address_at(Address pc, Address target) {
-  Memory::Address_at(target_address_address_at(pc)) = target;
-  // Intuitively, we would think it is necessary to flush the instruction cache
-  // after patching a target address in the code as follows:
-  //   CPU::FlushICache(pc, sizeof(target));
-  // However, on ARM, no instruction was actually patched by the assignment
-  // above; the target address is not part of an instruction, it is patched in
-  // the constant pool and is read via a data access; the instruction accessing
-  // this address in the constant pool remains unchanged.
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_ASSEMBLER_THUMB2_INL_H_
diff --git a/src/arm/assembler-thumb2.cc b/src/arm/assembler-thumb2.cc
deleted file mode 100644
index e31c429..0000000
--- a/src/arm/assembler-thumb2.cc
+++ /dev/null
@@ -1,1878 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-#include "v8.h"
-
-#include "arm/assembler-thumb2-inl.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// Safe default is no features.
-unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::enabled_ = 0;
-unsigned CpuFeatures::found_by_runtime_probing_ = 0;
-
-void CpuFeatures::Probe() {
-  // If the compiler is allowed to use vfp then we can use vfp too in our
-  // code generation.
-#if !defined(__arm__)
-  // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
-  if (FLAG_enable_vfp3) {
-      supported_ |= 1u << VFP3;
-  }
-  // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
-  if (FLAG_enable_armv7) {
-      supported_ |= 1u << ARMv7;
-  }
-#else
-  if (Serializer::enabled()) {
-    supported_ |= OS::CpuFeaturesImpliedByPlatform();
-    return;  // No features if we might serialize.
-  }
-
-  if (OS::ArmCpuHasFeature(VFP3)) {
-    // This implementation also sets the VFP flags if
-    // runtime detection of VFP returns true.
-    supported_ |= 1u << VFP3;
-    found_by_runtime_probing_ |= 1u << VFP3;
-  }
-
-  if (OS::ArmCpuHasFeature(ARMv7)) {
-    supported_ |= 1u << ARMv7;
-    found_by_runtime_probing_ |= 1u << ARMv7;
-  }
-#endif
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Register and CRegister
-
-Register no_reg = { -1 };
-
-Register r0  = {  0 };
-Register r1  = {  1 };
-Register r2  = {  2 };
-Register r3  = {  3 };
-Register r4  = {  4 };
-Register r5  = {  5 };
-Register r6  = {  6 };
-Register r7  = {  7 };
-Register r8  = {  8 };  // Used as context register.
-Register r9  = {  9 };
-Register r10 = { 10 };  // Used as roots register.
-Register fp  = { 11 };
-Register ip  = { 12 };
-Register sp  = { 13 };
-Register lr  = { 14 };
-Register pc  = { 15 };
-
-
-CRegister no_creg = { -1 };
-
-CRegister cr0  = {  0 };
-CRegister cr1  = {  1 };
-CRegister cr2  = {  2 };
-CRegister cr3  = {  3 };
-CRegister cr4  = {  4 };
-CRegister cr5  = {  5 };
-CRegister cr6  = {  6 };
-CRegister cr7  = {  7 };
-CRegister cr8  = {  8 };
-CRegister cr9  = {  9 };
-CRegister cr10 = { 10 };
-CRegister cr11 = { 11 };
-CRegister cr12 = { 12 };
-CRegister cr13 = { 13 };
-CRegister cr14 = { 14 };
-CRegister cr15 = { 15 };
-
-// Support for the VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2".
-SwVfpRegister s0  = {  0 };
-SwVfpRegister s1  = {  1 };
-SwVfpRegister s2  = {  2 };
-SwVfpRegister s3  = {  3 };
-SwVfpRegister s4  = {  4 };
-SwVfpRegister s5  = {  5 };
-SwVfpRegister s6  = {  6 };
-SwVfpRegister s7  = {  7 };
-SwVfpRegister s8  = {  8 };
-SwVfpRegister s9  = {  9 };
-SwVfpRegister s10 = { 10 };
-SwVfpRegister s11 = { 11 };
-SwVfpRegister s12 = { 12 };
-SwVfpRegister s13 = { 13 };
-SwVfpRegister s14 = { 14 };
-SwVfpRegister s15 = { 15 };
-SwVfpRegister s16 = { 16 };
-SwVfpRegister s17 = { 17 };
-SwVfpRegister s18 = { 18 };
-SwVfpRegister s19 = { 19 };
-SwVfpRegister s20 = { 20 };
-SwVfpRegister s21 = { 21 };
-SwVfpRegister s22 = { 22 };
-SwVfpRegister s23 = { 23 };
-SwVfpRegister s24 = { 24 };
-SwVfpRegister s25 = { 25 };
-SwVfpRegister s26 = { 26 };
-SwVfpRegister s27 = { 27 };
-SwVfpRegister s28 = { 28 };
-SwVfpRegister s29 = { 29 };
-SwVfpRegister s30 = { 30 };
-SwVfpRegister s31 = { 31 };
-
-DwVfpRegister d0  = {  0 };
-DwVfpRegister d1  = {  1 };
-DwVfpRegister d2  = {  2 };
-DwVfpRegister d3  = {  3 };
-DwVfpRegister d4  = {  4 };
-DwVfpRegister d5  = {  5 };
-DwVfpRegister d6  = {  6 };
-DwVfpRegister d7  = {  7 };
-DwVfpRegister d8  = {  8 };
-DwVfpRegister d9  = {  9 };
-DwVfpRegister d10 = { 10 };
-DwVfpRegister d11 = { 11 };
-DwVfpRegister d12 = { 12 };
-DwVfpRegister d13 = { 13 };
-DwVfpRegister d14 = { 14 };
-DwVfpRegister d15 = { 15 };
-
-// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-const int RelocInfo::kApplyMask = 0;
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
-  // Patch the code at the current address with the supplied instructions.
-  Instr* pc = reinterpret_cast<Instr*>(pc_);
-  Instr* instr = reinterpret_cast<Instr*>(instructions);
-  for (int i = 0; i < instruction_count; i++) {
-    *(pc + i) = *(instr + i);
-  }
-
-  // Indicate that code has changed.
-  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
-  // Patch the code at the current address with a call to the target.
-  UNIMPLEMENTED();
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Operand and MemOperand
-// See assembler-thumb2-inl.h for inlined constructors
-
-Operand::Operand(Handle<Object> handle) {
-  rm_ = no_reg;
-  // Verify all Objects referred by code are NOT in new space.
-  Object* obj = *handle;
-  ASSERT(!Heap::InNewSpace(obj));
-  if (obj->IsHeapObject()) {
-    imm32_ = reinterpret_cast<intptr_t>(handle.location());
-    rmode_ = RelocInfo::EMBEDDED_OBJECT;
-  } else {
-    // no relocation needed
-    imm32_ =  reinterpret_cast<intptr_t>(obj);
-    rmode_ = RelocInfo::NONE;
-  }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
-  ASSERT(is_uint5(shift_imm));
-  ASSERT(shift_op != ROR || shift_imm != 0);  // use RRX if you mean it
-  rm_ = rm;
-  rs_ = no_reg;
-  shift_op_ = shift_op;
-  shift_imm_ = shift_imm & 31;
-  if (shift_op == RRX) {
-    // encoded as ROR with shift_imm == 0
-    ASSERT(shift_imm == 0);
-    shift_op_ = ROR;
-    shift_imm_ = 0;
-  }
-}
-
-
-Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
-  ASSERT(shift_op != RRX);
-  rm_ = rm;
-  rs_ = no_reg;
-  shift_op_ = shift_op;
-  rs_ = rs;
-}
-
-
-MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
-  rn_ = rn;
-  rm_ = no_reg;
-  offset_ = offset;
-  am_ = am;
-}
-
-MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
-  rn_ = rn;
-  rm_ = rm;
-  shift_op_ = LSL;
-  shift_imm_ = 0;
-  am_ = am;
-}
-
-
-MemOperand::MemOperand(Register rn, Register rm,
-                       ShiftOp shift_op, int shift_imm, AddrMode am) {
-  ASSERT(is_uint5(shift_imm));
-  rn_ = rn;
-  rm_ = rm;
-  shift_op_ = shift_op;
-  shift_imm_ = shift_imm & 31;
-  am_ = am;
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of Assembler.
-
-// Instruction encoding bits.
-enum {
-  H   = 1 << 5,   // halfword (or byte)
-  S6  = 1 << 6,   // signed (or unsigned)
-  L   = 1 << 20,  // load (or store)
-  S   = 1 << 20,  // set condition code (or leave unchanged)
-  W   = 1 << 21,  // writeback base register (or leave unchanged)
-  A   = 1 << 21,  // accumulate in multiply instruction (or not)
-  B   = 1 << 22,  // unsigned byte (or word)
-  N   = 1 << 22,  // long (or short)
-  U   = 1 << 23,  // positive (or negative) offset/index
-  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
-  I   = 1 << 25,  // immediate shifter operand (or not)
-
-  B4  = 1 << 4,
-  B5  = 1 << 5,
-  B6  = 1 << 6,
-  B7  = 1 << 7,
-  B8  = 1 << 8,
-  B9  = 1 << 9,
-  B12 = 1 << 12,
-  B16 = 1 << 16,
-  B18 = 1 << 18,
-  B19 = 1 << 19,
-  B20 = 1 << 20,
-  B21 = 1 << 21,
-  B22 = 1 << 22,
-  B23 = 1 << 23,
-  B24 = 1 << 24,
-  B25 = 1 << 25,
-  B26 = 1 << 26,
-  B27 = 1 << 27,
-
-  // Instruction bit masks.
-  RdMask     = 15 << 12,  // in str instruction
-  CondMask   = 15 << 28,
-  CoprocessorMask = 15 << 8,
-  OpCodeMask = 15 << 21,  // in data-processing instructions
-  Imm24Mask  = (1 << 24) - 1,
-  Off12Mask  = (1 << 12) - 1,
-  // Reserved condition.
-  nv = 15 << 28
-};
-
-
-// add(sp, sp, 4) instruction (aka Pop())
-static const Instr kPopInstruction =
-    al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-static const Instr kPushRegPattern =
-    al | B26 | 4 | NegPreIndex | sp.code() * B16;
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-static const Instr kPopRegPattern =
-    al | B26 | L | 4 | PostIndex | sp.code() * B16;
-// mov lr, pc
-const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
-// ldr pc, [pc, #XXX]
-const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
-
-// Spare buffer.
-static const int kMinimalBufferSize = 4*KB;
-static byte* spare_buffer_ = NULL;
-
-Assembler::Assembler(void* buffer, int buffer_size) {
-  if (buffer == NULL) {
-    // Do our own buffer management.
-    if (buffer_size <= kMinimalBufferSize) {
-      buffer_size = kMinimalBufferSize;
-
-      if (spare_buffer_ != NULL) {
-        buffer = spare_buffer_;
-        spare_buffer_ = NULL;
-      }
-    }
-    if (buffer == NULL) {
-      buffer_ = NewArray<byte>(buffer_size);
-    } else {
-      buffer_ = static_cast<byte*>(buffer);
-    }
-    buffer_size_ = buffer_size;
-    own_buffer_ = true;
-
-  } else {
-    // Use externally provided buffer instead.
-    ASSERT(buffer_size > 0);
-    buffer_ = static_cast<byte*>(buffer);
-    buffer_size_ = buffer_size;
-    own_buffer_ = false;
-  }
-
-  // Setup buffer pointers.
-  ASSERT(buffer_ != NULL);
-  pc_ = buffer_;
-  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
-  num_prinfo_ = 0;
-  next_buffer_check_ = 0;
-  no_const_pool_before_ = 0;
-  last_const_pool_end_ = 0;
-  last_bound_pos_ = 0;
-  current_statement_position_ = RelocInfo::kNoPosition;
-  current_position_ = RelocInfo::kNoPosition;
-  written_statement_position_ = current_statement_position_;
-  written_position_ = current_position_;
-}
-
-
-Assembler::~Assembler() {
-  if (own_buffer_) {
-    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
-      spare_buffer_ = buffer_;
-    } else {
-      DeleteArray(buffer_);
-    }
-  }
-}
-
-
-void Assembler::GetCode(CodeDesc* desc) {
-  // Emit constant pool if necessary.
-  CheckConstPool(true, false);
-  ASSERT(num_prinfo_ == 0);
-
-  // Setup code descriptor.
-  desc->buffer = buffer_;
-  desc->buffer_size = buffer_size_;
-  desc->instr_size = pc_offset();
-  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-}
-
-
-void Assembler::Align(int m) {
-  ASSERT(m >= 4 && IsPowerOf2(m));
-  while ((pc_offset() & (m - 1)) != 0) {
-    nop();
-  }
-}
-
-
-// Labels refer to positions in the (to be) generated code.
-// There are bound, linked, and unused labels.
-//
-// Bound labels refer to known positions in the already
-// generated code. pos() is the position the label refers to.
-//
-// Linked labels refer to unknown positions in the code
-// to be generated; pos() is the position of the last
-// instruction using the label.
-
-
-// The link chain is terminated by a negative code position (must be aligned)
-const int kEndOfChain = -4;
-
-
-int Assembler::target_at(int pos)  {
-  Instr instr = instr_at(pos);
-  if ((instr & ~Imm24Mask) == 0) {
-    // Emitted label constant, not part of a branch.
-    return instr - (Code::kHeaderSize - kHeapObjectTag);
-  }
-  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
-  int imm26 = ((instr & Imm24Mask) << 8) >> 6;
-  if ((instr & CondMask) == nv && (instr & B24) != 0)
-    // blx uses bit 24 to encode bit 2 of imm26
-    imm26 += 2;
-
-  return pos + kPcLoadDelta + imm26;
-}
-
-
-void Assembler::target_at_put(int pos, int target_pos) {
-  Instr instr = instr_at(pos);
-  if ((instr & ~Imm24Mask) == 0) {
-    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
-    // Emitted label constant, not part of a branch.
-    // Make label relative to Code* of generated Code object.
-    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
-    return;
-  }
-  int imm26 = target_pos - (pos + kPcLoadDelta);
-  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
-  if ((instr & CondMask) == nv) {
-    // blx uses bit 24 to encode bit 2 of imm26
-    ASSERT((imm26 & 1) == 0);
-    instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
-  } else {
-    ASSERT((imm26 & 3) == 0);
-    instr &= ~Imm24Mask;
-  }
-  int imm24 = imm26 >> 2;
-  ASSERT(is_int24(imm24));
-  instr_at_put(pos, instr | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::print(Label* L) {
-  if (L->is_unused()) {
-    PrintF("unused label\n");
-  } else if (L->is_bound()) {
-    PrintF("bound label to %d\n", L->pos());
-  } else if (L->is_linked()) {
-    Label l = *L;
-    PrintF("unbound label");
-    while (l.is_linked()) {
-      PrintF("@ %d ", l.pos());
-      Instr instr = instr_at(l.pos());
-      if ((instr & ~Imm24Mask) == 0) {
-        PrintF("value\n");
-      } else {
-        ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
-        int cond = instr & CondMask;
-        const char* b;
-        const char* c;
-        if (cond == nv) {
-          b = "blx";
-          c = "";
-        } else {
-          if ((instr & B24) != 0)
-            b = "bl";
-          else
-            b = "b";
-
-          switch (cond) {
-            case eq: c = "eq"; break;
-            case ne: c = "ne"; break;
-            case hs: c = "hs"; break;
-            case lo: c = "lo"; break;
-            case mi: c = "mi"; break;
-            case pl: c = "pl"; break;
-            case vs: c = "vs"; break;
-            case vc: c = "vc"; break;
-            case hi: c = "hi"; break;
-            case ls: c = "ls"; break;
-            case ge: c = "ge"; break;
-            case lt: c = "lt"; break;
-            case gt: c = "gt"; break;
-            case le: c = "le"; break;
-            case al: c = ""; break;
-            default:
-              c = "";
-              UNREACHABLE();
-          }
-        }
-        PrintF("%s%s\n", b, c);
-      }
-      next(&l);
-    }
-  } else {
-    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
-  }
-}
-
-
-void Assembler::bind_to(Label* L, int pos) {
-  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
-  while (L->is_linked()) {
-    int fixup_pos = L->pos();
-    next(L);  // call next before overwriting link with target at fixup_pos
-    target_at_put(fixup_pos, pos);
-  }
-  L->bind_to(pos);
-
-  // Keep track of the last bound label so we don't eliminate any instructions
-  // before a bound label.
-  if (pos > last_bound_pos_)
-    last_bound_pos_ = pos;
-}
-
-
-void Assembler::link_to(Label* L, Label* appendix) {
-  if (appendix->is_linked()) {
-    if (L->is_linked()) {
-      // Append appendix to L's list.
-      int fixup_pos;
-      int link = L->pos();
-      do {
-        fixup_pos = link;
-        link = target_at(fixup_pos);
-      } while (link > 0);
-      ASSERT(link == kEndOfChain);
-      target_at_put(fixup_pos, appendix->pos());
-    } else {
-      // L is empty, simply use appendix.
-      *L = *appendix;
-    }
-  }
-  appendix->Unuse();  // appendix should not be used anymore
-}
-
-
-void Assembler::bind(Label* L) {
-  ASSERT(!L->is_bound());  // label can only be bound once
-  bind_to(L, pc_offset());
-}
-
-
-void Assembler::next(Label* L) {
-  ASSERT(L->is_linked());
-  int link = target_at(L->pos());
-  if (link > 0) {
-    L->link_to(link);
-  } else {
-    ASSERT(link == kEndOfChain);
-    L->Unuse();
-  }
-}
-
-
-// Low-level code emission routines depending on the addressing mode.
-static bool fits_shifter(uint32_t imm32,
-                         uint32_t* rotate_imm,
-                         uint32_t* immed_8,
-                         Instr* instr) {
-  // imm32 must be unsigned.
-  for (int rot = 0; rot < 16; rot++) {
-    uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
-    if ((imm8 <= 0xff)) {
-      *rotate_imm = rot;
-      *immed_8 = imm8;
-      return true;
-    }
-  }
-  // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
-  if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
-    if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
-      *instr ^= 0x2*B21;
-      return true;
-    }
-  }
-  return false;
-}
-
-
-// We have to use the temporary register for things that can be relocated even
-// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
-// space.  There is no guarantee that the relocated location can be similarly
-// encoded.
-static bool MustUseIp(RelocInfo::Mode rmode) {
-  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
-    if (!Serializer::enabled()) {
-      Serializer::TooLateToEnableNow();
-    }
-#endif
-    return Serializer::enabled();
-  } else if (rmode == RelocInfo::NONE) {
-    return false;
-  }
-  return true;
-}
-
-
-void Assembler::addrmod1(Instr instr,
-                         Register rn,
-                         Register rd,
-                         const Operand& x) {
-  CheckBuffer();
-  ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
-  if (!x.rm_.is_valid()) {
-    // Immediate.
-    uint32_t rotate_imm;
-    uint32_t immed_8;
-    if (MustUseIp(x.rmode_) ||
-        !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
-      // The immediate operand cannot be encoded as a shifter operand, so load
-      // it first to register ip and change the original instruction to use ip.
-      // However, if the original instruction is a 'mov rd, x' (not setting the
-      // condition code), then replace it with a 'ldr rd, [pc]'.
-      RecordRelocInfo(x.rmode_, x.imm32_);
-      CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
-      Condition cond = static_cast<Condition>(instr & CondMask);
-      if ((instr & ~CondMask) == 13*B21) {  // mov, S not set
-        ldr(rd, MemOperand(pc, 0), cond);
-      } else {
-        ldr(ip, MemOperand(pc, 0), cond);
-        addrmod1(instr, rn, rd, Operand(ip));
-      }
-      return;
-    }
-    instr |= I | rotate_imm*B8 | immed_8;
-  } else if (!x.rs_.is_valid()) {
-    // Immediate shift.
-    instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
-  } else {
-    // Register shift.
-    ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
-    instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
-  }
-  emit(instr | rn.code()*B16 | rd.code()*B12);
-  if (rn.is(pc) || x.rm_.is(pc))
-    // Block constant pool emission for one instruction after reading pc.
-    BlockConstPoolBefore(pc_offset() + kInstrSize);
-}
-
-
-void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
-  ASSERT((instr & ~(CondMask | B | L)) == B26);
-  int am = x.am_;
-  if (!x.rm_.is_valid()) {
-    // Immediate offset.
-    int offset_12 = x.offset_;
-    if (offset_12 < 0) {
-      offset_12 = -offset_12;
-      am ^= U;
-    }
-    if (!is_uint12(offset_12)) {
-      // Immediate offset cannot be encoded, load it first to register ip
-      // rn (and rd in a load) should never be ip, or will be trashed.
-      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
-      mov(ip, Operand(x.offset_), LeaveCC,
-          static_cast<Condition>(instr & CondMask));
-      addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
-      return;
-    }
-    ASSERT(offset_12 >= 0);  // no masking needed
-    instr |= offset_12;
-  } else {
-    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
-    // register offset the constructors make sure than both shift_imm_
-    // and shift_op_ are initialized.
-    ASSERT(!x.rm_.is(pc));
-    instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
-  }
-  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
-  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
-  ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
-  ASSERT(x.rn_.is_valid());
-  int am = x.am_;
-  if (!x.rm_.is_valid()) {
-    // Immediate offset.
-    int offset_8 = x.offset_;
-    if (offset_8 < 0) {
-      offset_8 = -offset_8;
-      am ^= U;
-    }
-    if (!is_uint8(offset_8)) {
-      // Immediate offset cannot be encoded, load it first to register ip
-      // rn (and rd in a load) should never be ip, or will be trashed.
-      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
-      mov(ip, Operand(x.offset_), LeaveCC,
-          static_cast<Condition>(instr & CondMask));
-      addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
-      return;
-    }
-    ASSERT(offset_8 >= 0);  // no masking needed
-    instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
-  } else if (x.shift_imm_ != 0) {
-    // Scaled register offset not supported, load index first
-    // rn (and rd in a load) should never be ip, or will be trashed.
-    ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
-    mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
-        static_cast<Condition>(instr & CondMask));
-    addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
-    return;
-  } else {
-    // Register offset.
-    ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
-    instr |= x.rm_.code();
-  }
-  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
-  emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
-}
-
-
-void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
-  ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
-  ASSERT(rl != 0);
-  ASSERT(!rn.is(pc));
-  emit(instr | rn.code()*B16 | rl);
-}
-
-
-void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
-  // Unindexed addressing is not encoded by this function.
-  ASSERT_EQ((B27 | B26),
-            (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
-  ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
-  int am = x.am_;
-  int offset_8 = x.offset_;
-  ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
-  offset_8 >>= 2;
-  if (offset_8 < 0) {
-    offset_8 = -offset_8;
-    am ^= U;
-  }
-  ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
-  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
-
-  // Post-indexed addressing requires W == 1; different than in addrmod2/3.
-  if ((am & P) == 0)
-    am |= W;
-
-  ASSERT(offset_8 >= 0);  // no masking needed
-  emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
-}
-
-
-int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
-  int target_pos;
-  if (L->is_bound()) {
-    target_pos = L->pos();
-  } else {
-    if (L->is_linked()) {
-      target_pos = L->pos();  // L's link
-    } else {
-      target_pos = kEndOfChain;
-    }
-    L->link_to(pc_offset());
-  }
-
-  // Block the emission of the constant pool, since the branch instruction must
-  // be emitted at the pc offset recorded by the label.
-  BlockConstPoolBefore(pc_offset() + kInstrSize);
-  return target_pos - (pc_offset() + kPcLoadDelta);
-}
-
-
-void Assembler::label_at_put(Label* L, int at_offset) {
-  int target_pos;
-  if (L->is_bound()) {
-    target_pos = L->pos();
-  } else {
-    if (L->is_linked()) {
-      target_pos = L->pos();  // L's link
-    } else {
-      target_pos = kEndOfChain;
-    }
-    L->link_to(at_offset);
-    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
-  }
-}
-
-
-// Branch instructions.
-void Assembler::b(int branch_offset, Condition cond) {
-  ASSERT((branch_offset & 3) == 0);
-  int imm24 = branch_offset >> 2;
-  ASSERT(is_int24(imm24));
-  emit(cond | B27 | B25 | (imm24 & Imm24Mask));
-
-  if (cond == al)
-    // Dead code is a good location to emit the constant pool.
-    CheckConstPool(false, false);
-}
-
-
-void Assembler::bl(int branch_offset, Condition cond) {
-  ASSERT((branch_offset & 3) == 0);
-  int imm24 = branch_offset >> 2;
-  ASSERT(is_int24(imm24));
-  emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::blx(int branch_offset) {  // v5 and above
-  WriteRecordedPositions();
-  ASSERT((branch_offset & 1) == 0);
-  int h = ((branch_offset & 2) >> 1)*B24;
-  int imm24 = branch_offset >> 2;
-  ASSERT(is_int24(imm24));
-  emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
-}
-
-
-void Assembler::blx(Register target, Condition cond) {  // v5 and above
-  WriteRecordedPositions();
-  ASSERT(!target.is(pc));
-  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
-}
-
-
-void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
-  WriteRecordedPositions();
-  ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
-  emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
-}
-
-
-// Data-processing instructions.
-
-// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
-// Instruction details available in ARM DDI 0406A, A8-464.
-// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
-//  Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
-void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
-                     const Operand& src3, Condition cond) {
-  ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
-  ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
-  ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
-  emit(cond | 0x3F*B21 | src3.imm32_*B16 |
-       dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
-}
-
-
-void Assembler::and_(Register dst, Register src1, const Operand& src2,
-                     SBit s, Condition cond) {
-  addrmod1(cond | 0*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::eor(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 1*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::sub(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 2*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::rsb(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 3*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::add(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 4*B21 | s, src1, dst, src2);
-
-  // Eliminate pattern: push(r), pop()
-  //   str(src, MemOperand(sp, 4, NegPreIndex), al);
-  //   add(sp, sp, Operand(kPointerSize));
-  // Both instructions can be eliminated.
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-      last_bound_pos_ <= (pc_offset() - pattern_size) &&
-      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
-      (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
-    pc_ -= 2 * kInstrSize;
-    if (FLAG_print_push_pop_elimination) {
-      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
-    }
-  }
-}
-
-
-void Assembler::adc(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 5*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::sbc(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 6*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::rsc(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 7*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 8*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 9*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 10*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
-  addrmod1(cond | 11*B21 | S, src1, r0, src2);
-}
-
-
-void Assembler::orr(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 12*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
-  if (dst.is(pc)) {
-    WriteRecordedPositions();
-  }
-  addrmod1(cond | 13*B21 | s, r0, dst, src);
-}
-
-
-void Assembler::bic(Register dst, Register src1, const Operand& src2,
-                    SBit s, Condition cond) {
-  addrmod1(cond | 14*B21 | s, src1, dst, src2);
-}
-
-
-void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
-  addrmod1(cond | 15*B21 | s, r0, dst, src);
-}
-
-
-// Multiply instructions.
-void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
-                    SBit s, Condition cond) {
-  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
-  emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
-       src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::mul(Register dst, Register src1, Register src2,
-                    SBit s, Condition cond) {
-  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
-  // dst goes in bits 16-19 for this instruction!
-  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smlal(Register dstL,
-                      Register dstH,
-                      Register src1,
-                      Register src2,
-                      SBit s,
-                      Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
-  emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
-       src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::smull(Register dstL,
-                      Register dstH,
-                      Register src1,
-                      Register src2,
-                      SBit s,
-                      Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
-  emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
-       src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umlal(Register dstL,
-                      Register dstH,
-                      Register src1,
-                      Register src2,
-                      SBit s,
-                      Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
-  emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
-       src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-void Assembler::umull(Register dstL,
-                      Register dstH,
-                      Register src1,
-                      Register src2,
-                      SBit s,
-                      Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
-  emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
-       src2.code()*B8 | B7 | B4 | src1.code());
-}
-
-
-// Miscellaneous arithmetic instructions.
-void Assembler::clz(Register dst, Register src, Condition cond) {
-  // v5 and above.
-  ASSERT(!dst.is(pc) && !src.is(pc));
-  emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
-       15*B8 | B4 | src.code());
-}
-
-
-// Status register access instructions.
-void Assembler::mrs(Register dst, SRegister s, Condition cond) {
-  ASSERT(!dst.is(pc));
-  emit(cond | B24 | s | 15*B16 | dst.code()*B12);
-}
-
-
-void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
-                    Condition cond) {
-  ASSERT(fields >= B16 && fields < B20);  // at least one field set
-  Instr instr;
-  if (!src.rm_.is_valid()) {
-    // Immediate.
-    uint32_t rotate_imm;
-    uint32_t immed_8;
-    if (MustUseIp(src.rmode_) ||
-        !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
-      // Immediate operand cannot be encoded, load it first to register ip.
-      RecordRelocInfo(src.rmode_, src.imm32_);
-      ldr(ip, MemOperand(pc, 0), cond);
-      msr(fields, Operand(ip), cond);
-      return;
-    }
-    instr = I | rotate_imm*B8 | immed_8;
-  } else {
-    ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
-    instr = src.rm_.code();
-  }
-  emit(cond | instr | B24 | B21 | fields | 15*B12);
-}
-
-
-// Load/Store instructions.
-void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
-  if (dst.is(pc)) {
-    WriteRecordedPositions();
-  }
-  addrmod2(cond | B26 | L, dst, src);
-
-  // Eliminate pattern: push(r), pop(r)
-  //   str(r, MemOperand(sp, 4, NegPreIndex), al)
-  //   ldr(r, MemOperand(sp, 4, PostIndex), al)
-  // Both instructions can be eliminated.
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-      last_bound_pos_ <= (pc_offset() - pattern_size) &&
-      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-      // Pattern.
-      instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
-      instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
-    pc_ -= 2 * kInstrSize;
-    if (FLAG_print_push_pop_elimination) {
-      PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
-    }
-  }
-}
-
-
-void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
-  addrmod2(cond | B26, src, dst);
-
-  // Eliminate pattern: pop(), push(r)
-  //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
-  // ->  str r, [sp, 0], al
-  int pattern_size = 2 * kInstrSize;
-  if (FLAG_push_pop_elimination &&
-     last_bound_pos_ <= (pc_offset() - pattern_size) &&
-     reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
-     // Pattern.
-     instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
-     instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
-    pc_ -= 2 * kInstrSize;
-    emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
-    if (FLAG_print_push_pop_elimination) {
-      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
-    }
-  }
-}
-
-
-void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
-  addrmod2(cond | B26 | B | L, dst, src);
-}
-
-
-void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
-  addrmod2(cond | B26 | B, src, dst);
-}
-
-
-void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
-  addrmod3(cond | L | B7 | H | B4, dst, src);
-}
-
-
-void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
-  addrmod3(cond | B7 | H | B4, src, dst);
-}
-
-
-void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
-  addrmod3(cond | L | B7 | S6 | B4, dst, src);
-}
-
-
-void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
-  addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
-}
-
-
-// Load/Store multiple instructions.
-void Assembler::ldm(BlockAddrMode am,
-                    Register base,
-                    RegList dst,
-                    Condition cond) {
-  // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
-  ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
-
-  addrmod4(cond | B27 | am | L, base, dst);
-
-  // Emit the constant pool after a function return implemented by ldm ..{..pc}.
-  if (cond == al && (dst & pc.bit()) != 0) {
-    // There is a slight chance that the ldm instruction was actually a call,
-    // in which case it would be wrong to return into the constant pool; we
-    // recognize this case by checking if the emission of the pool was blocked
-    // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
-    // the case, we emit a jump over the pool.
-    CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
-  }
-}
-
-
-void Assembler::stm(BlockAddrMode am,
-                    Register base,
-                    RegList src,
-                    Condition cond) {
-  addrmod4(cond | B27 | am, base, src);
-}
-
-
-// Semaphore instructions.
-void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
-  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
-  ASSERT(!dst.is(base) && !src.is(base));
-  emit(cond | P | base.code()*B16 | dst.code()*B12 |
-       B7 | B4 | src.code());
-}
-
-
-void Assembler::swpb(Register dst,
-                     Register src,
-                     Register base,
-                     Condition cond) {
-  ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
-  ASSERT(!dst.is(base) && !src.is(base));
-  emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
-       B7 | B4 | src.code());
-}
-
-
-// Exception-generating instructions and debugging support.
-void Assembler::stop(const char* msg) {
-#if !defined(__arm__)
-  // The simulator handles these special instructions and stops execution.
-  emit(15 << 28 | ((intptr_t) msg));
-#else
-  // Just issue a simple break instruction for now. Alternatively we could use
-  // the swi(0x9f0001) instruction on Linux.
-  bkpt(0);
-#endif
-}
-
-
-void Assembler::bkpt(uint32_t imm16) {  // v5 and above
-  ASSERT(is_uint16(imm16));
-  emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
-}
-
-
-void Assembler::swi(uint32_t imm24, Condition cond) {
-  ASSERT(is_uint24(imm24));
-  emit(cond | 15*B24 | imm24);
-}
-
-
-// Coprocessor instructions.
-void Assembler::cdp(Coprocessor coproc,
-                    int opcode_1,
-                    CRegister crd,
-                    CRegister crn,
-                    CRegister crm,
-                    int opcode_2,
-                    Condition cond) {
-  ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
-  emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
-       crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
-}
-
-
-void Assembler::cdp2(Coprocessor coproc,
-                     int opcode_1,
-                     CRegister crd,
-                     CRegister crn,
-                     CRegister crm,
-                     int opcode_2) {  // v5 and above
-  cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::mcr(Coprocessor coproc,
-                    int opcode_1,
-                    Register rd,
-                    CRegister crn,
-                    CRegister crm,
-                    int opcode_2,
-                    Condition cond) {
-  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
-  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
-       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mcr2(Coprocessor coproc,
-                     int opcode_1,
-                     Register rd,
-                     CRegister crn,
-                     CRegister crm,
-                     int opcode_2) {  // v5 and above
-  mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::mrc(Coprocessor coproc,
-                    int opcode_1,
-                    Register rd,
-                    CRegister crn,
-                    CRegister crm,
-                    int opcode_2,
-                    Condition cond) {
-  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
-  emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
-       rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
-}
-
-
-void Assembler::mrc2(Coprocessor coproc,
-                     int opcode_1,
-                     Register rd,
-                     CRegister crn,
-                     CRegister crm,
-                     int opcode_2) {  // v5 and above
-  mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
-                    CRegister crd,
-                    const MemOperand& src,
-                    LFlag l,
-                    Condition cond) {
-  addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
-}
-
-
-void Assembler::ldc(Coprocessor coproc,
-                    CRegister crd,
-                    Register rn,
-                    int option,
-                    LFlag l,
-                    Condition cond) {
-  // Unindexed addressing.
-  ASSERT(is_uint8(option));
-  emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
-       coproc*B8 | (option & 255));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
-                     CRegister crd,
-                     const MemOperand& src,
-                     LFlag l) {  // v5 and above
-  ldc(coproc, crd, src, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::ldc2(Coprocessor coproc,
-                     CRegister crd,
-                     Register rn,
-                     int option,
-                     LFlag l) {  // v5 and above
-  ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::stc(Coprocessor coproc,
-                    CRegister crd,
-                    const MemOperand& dst,
-                    LFlag l,
-                    Condition cond) {
-  addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
-}
-
-
-void Assembler::stc(Coprocessor coproc,
-                    CRegister crd,
-                    Register rn,
-                    int option,
-                    LFlag l,
-                    Condition cond) {
-  // Unindexed addressing.
-  ASSERT(is_uint8(option));
-  emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
-       coproc*B8 | (option & 255));
-}
-
-
-void Assembler::stc2(Coprocessor
-                     coproc, CRegister crd,
-                     const MemOperand& dst,
-                     LFlag l) {  // v5 and above
-  stc(coproc, crd, dst, l, static_cast<Condition>(nv));
-}
-
-
-void Assembler::stc2(Coprocessor coproc,
-                     CRegister crd,
-                     Register rn,
-                     int option,
-                     LFlag l) {  // v5 and above
-  stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
-}
-
-
-// Support for VFP.
-void Assembler::vldr(const DwVfpRegister dst,
-                     const Register base,
-                     int offset,
-                     const Condition cond) {
-  // Ddst = MEM(Rbase + offset).
-  // Instruction details available in ARM DDI 0406A, A8-628.
-  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
-  // Vdst(15-12) | 1011(11-8) | offset
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(offset % 4 == 0);
-  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
-       0xB*B8 | ((offset / 4) & 255));
-}
-
-
-void Assembler::vstr(const DwVfpRegister src,
-                     const Register base,
-                     int offset,
-                     const Condition cond) {
-  // MEM(Rbase + offset) = Dsrc.
-  // Instruction details available in ARM DDI 0406A, A8-786.
-  // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
-  // Vsrc(15-12) | 1011(11-8) | (offset/4)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(offset % 4 == 0);
-  emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
-       0xB*B8 | ((offset / 4) & 255));
-}
-
-
-void Assembler::vmov(const DwVfpRegister dst,
-                     const Register src1,
-                     const Register src2,
-                     const Condition cond) {
-  // Dm = <Rt,Rt2>.
-  // Instruction details available in ARM DDI 0406A, A8-646.
-  // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
-  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(!src1.is(pc) && !src2.is(pc));
-  emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
-       src1.code()*B12 | 0xB*B8 | B4 | dst.code());
-}
-
-
-void Assembler::vmov(const Register dst1,
-                     const Register dst2,
-                     const DwVfpRegister src,
-                     const Condition cond) {
-  // <Rt,Rt2> = Dm.
-  // Instruction details available in ARM DDI 0406A, A8-646.
-  // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
-  // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(!dst1.is(pc) && !dst2.is(pc));
-  emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
-       dst1.code()*B12 | 0xB*B8 | B4 | src.code());
-}
-
-
-void Assembler::vmov(const SwVfpRegister dst,
-                     const Register src,
-                     const Condition cond) {
-  // Sn = Rt.
-  // Instruction details available in ARM DDI 0406A, A8-642.
-  // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
-  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(!src.is(pc));
-  emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
-       src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
-}
-
-
-void Assembler::vmov(const Register dst,
-                     const SwVfpRegister src,
-                     const Condition cond) {
-  // Rt = Sn.
-  // Instruction details available in ARM DDI 0406A, A8-642.
-  // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
-  // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  ASSERT(!dst.is(pc));
-  emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
-       dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
-}
-
-
-void Assembler::vcvt(const DwVfpRegister dst,
-                     const SwVfpRegister src,
-                     const Condition cond) {
-  // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
-  // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
-       dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
-       (0x1 & src.code())*B5 | (src.code() >> 1));
-}
-
-
-void Assembler::vcvt(const SwVfpRegister dst,
-                     const DwVfpRegister src,
-                     const Condition cond) {
-  // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
-  // Instruction details available in ARM DDI 0406A, A8-576.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
-       0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
-       0x5*B9 | B8 | B7 | B6 | src.code());
-}
-
-
-void Assembler::vadd(const DwVfpRegister dst,
-                     const DwVfpRegister src1,
-                     const DwVfpRegister src2,
-                     const Condition cond) {
-  // Dd = vadd(Dn, Dm) double precision floating point addition.
-  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
-  // Instruction details available in ARM DDI 0406A, A8-536.
-  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
-       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vsub(const DwVfpRegister dst,
-                     const DwVfpRegister src1,
-                     const DwVfpRegister src2,
-                     const Condition cond) {
-  // Dd = vsub(Dn, Dm) double precision floating point subtraction.
-  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
-  // Instruction details available in ARM DDI 0406A, A8-784.
-  // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
-       dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
-}
-
-
-void Assembler::vmul(const DwVfpRegister dst,
-                     const DwVfpRegister src1,
-                     const DwVfpRegister src2,
-                     const Condition cond) {
-  // Dd = vmul(Dn, Dm) double precision floating point multiplication.
-  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
-  // Instruction details available in ARM DDI 0406A, A8-784.
-  // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
-       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vdiv(const DwVfpRegister dst,
-                     const DwVfpRegister src1,
-                     const DwVfpRegister src2,
-                     const Condition cond) {
-  // Dd = vdiv(Dn, Dm) double precision floating point division.
-  // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
-  // Instruction details available in ARM DDI 0406A, A8-584.
-  // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
-       dst.code()*B12 | 0x5*B9 | B8 | src2.code());
-}
-
-
-void Assembler::vcmp(const DwVfpRegister src1,
-                     const DwVfpRegister src2,
-                     const SBit s,
-                     const Condition cond) {
-  // vcmp(Dd, Dm) double precision floating point comparison.
-  // Instruction details available in ARM DDI 0406A, A8-570.
-  // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
-  // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
-       src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
-}
-
-
-void Assembler::vmrs(Register dst, Condition cond) {
-  // Instruction details available in ARM DDI 0406A, A8-652.
-  // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
-  // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
-  ASSERT(CpuFeatures::IsEnabled(VFP3));
-  emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
-       dst.code()*B12 | 0xA*B8 | B4);
-}
-
-
-// Pseudo instructions.
-void Assembler::lea(Register dst,
-                    const MemOperand& x,
-                    SBit s,
-                    Condition cond) {
-  int am = x.am_;
-  if (!x.rm_.is_valid()) {
-    // Immediate offset.
-    if ((am & P) == 0)  // post indexing
-      mov(dst, Operand(x.rn_), s, cond);
-    else if ((am & U) == 0)  // negative indexing
-      sub(dst, x.rn_, Operand(x.offset_), s, cond);
-    else
-      add(dst, x.rn_, Operand(x.offset_), s, cond);
-  } else {
-    // Register offset (shift_imm_ and shift_op_ are 0) or scaled
-    // register offset the constructors make sure than both shift_imm_
-    // and shift_op_ are initialized.
-    ASSERT(!x.rm_.is(pc));
-    if ((am & P) == 0)  // post indexing
-      mov(dst, Operand(x.rn_), s, cond);
-    else if ((am & U) == 0)  // negative indexing
-      sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
-    else
-      add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
-  }
-}
-
-
-bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
-  uint32_t dummy1;
-  uint32_t dummy2;
-  return fits_shifter(imm32, &dummy1, &dummy2, NULL);
-}
-
-
-void Assembler::BlockConstPoolFor(int instructions) {
-  BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
-// Debugging.
-void Assembler::RecordJSReturn() {
-  WriteRecordedPositions();
-  CheckBuffer();
-  RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordComment(const char* msg) {
-  if (FLAG_debug_code) {
-    CheckBuffer();
-    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
-  }
-}
-
-
-void Assembler::RecordPosition(int pos) {
-  if (pos == RelocInfo::kNoPosition) return;
-  ASSERT(pos >= 0);
-  current_position_ = pos;
-}
-
-
-void Assembler::RecordStatementPosition(int pos) {
-  if (pos == RelocInfo::kNoPosition) return;
-  ASSERT(pos >= 0);
-  current_statement_position_ = pos;
-}
-
-
-void Assembler::WriteRecordedPositions() {
-  // Write the statement position if it is different from what was written last
-  // time.
-  if (current_statement_position_ != written_statement_position_) {
-    CheckBuffer();
-    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
-    written_statement_position_ = current_statement_position_;
-  }
-
-  // Write the position if it is different from what was written last time and
-  // also different from the written statement position.
-  if (current_position_ != written_position_ &&
-      current_position_ != written_statement_position_) {
-    CheckBuffer();
-    RecordRelocInfo(RelocInfo::POSITION, current_position_);
-    written_position_ = current_position_;
-  }
-}
-
-
-void Assembler::GrowBuffer() {
-  if (!own_buffer_) FATAL("external code buffer is too small");
-
-  // Compute new buffer size.
-  CodeDesc desc;  // the new buffer
-  if (buffer_size_ < 4*KB) {
-    desc.buffer_size = 4*KB;
-  } else if (buffer_size_ < 1*MB) {
-    desc.buffer_size = 2*buffer_size_;
-  } else {
-    desc.buffer_size = buffer_size_ + 1*MB;
-  }
-  CHECK_GT(desc.buffer_size, 0);  // no overflow
-
-  // Setup new buffer.
-  desc.buffer = NewArray<byte>(desc.buffer_size);
-
-  desc.instr_size = pc_offset();
-  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
-
-  // Copy the data.
-  int pc_delta = desc.buffer - buffer_;
-  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
-  memmove(desc.buffer, buffer_, desc.instr_size);
-  memmove(reloc_info_writer.pos() + rc_delta,
-          reloc_info_writer.pos(), desc.reloc_size);
-
-  // Switch buffers.
-  DeleteArray(buffer_);
-  buffer_ = desc.buffer;
-  buffer_size_ = desc.buffer_size;
-  pc_ += pc_delta;
-  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
-                               reloc_info_writer.last_pc() + pc_delta);
-
-  // None of our relocation types are pc relative pointing outside the code
-  // buffer nor pc absolute pointing inside the code buffer, so there is no need
-  // to relocate any emitted relocation entries.
-
-  // Relocate pending relocation entries.
-  for (int i = 0; i < num_prinfo_; i++) {
-    RelocInfo& rinfo = prinfo_[i];
-    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
-           rinfo.rmode() != RelocInfo::POSITION);
-    if (rinfo.rmode() != RelocInfo::JS_RETURN) {
-      rinfo.set_pc(rinfo.pc() + pc_delta);
-    }
-  }
-}
-
-
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
-  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
-    // Adjust code for new modes.
-    ASSERT(RelocInfo::IsJSReturn(rmode)
-           || RelocInfo::IsComment(rmode)
-           || RelocInfo::IsPosition(rmode));
-    // These modes do not need an entry in the constant pool.
-  } else {
-    ASSERT(num_prinfo_ < kMaxNumPRInfo);
-    prinfo_[num_prinfo_++] = rinfo;
-    // Make sure the constant pool is not emitted in place of the next
-    // instruction for which we just recorded relocation info.
-    BlockConstPoolBefore(pc_offset() + kInstrSize);
-  }
-  if (rinfo.rmode() != RelocInfo::NONE) {
-    // Don't record external references unless the heap will be serialized.
-    if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-#ifdef DEBUG
-      if (!Serializer::enabled()) {
-        Serializer::TooLateToEnableNow();
-      }
-#endif
-      if (!Serializer::enabled() && !FLAG_debug_code) {
-        return;
-      }
-    }
-    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
-    reloc_info_writer.Write(&rinfo);
-  }
-}
-
-
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
-  // Calculate the offset of the next check. It will be overwritten
-  // when a const pool is generated or when const pools are being
-  // blocked for a specific range.
-  next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
-  // There is nothing to do if there are no pending relocation info entries.
-  if (num_prinfo_ == 0) return;
-
-  // We emit a constant pool at regular intervals of about kDistBetweenPools
-  // or when requested by parameter force_emit (e.g. after each function).
-  // We prefer not to emit a jump unless the max distance is reached or if we
-  // are running low on slots, which can happen if a lot of constants are being
-  // emitted (e.g. --debug-code and many static references).
-  int dist = pc_offset() - last_const_pool_end_;
-  if (!force_emit && dist < kMaxDistBetweenPools &&
-      (require_jump || dist < kDistBetweenPools) &&
-      // TODO(1236125): Cleanup the "magic" number below. We know that
-      // the code generation will test every kCheckConstIntervalInst.
-      // Thus we are safe as long as we generate less than 7 constant
-      // entries per instruction.
-      (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
-    return;
-  }
-
-  // If we did not return by now, we need to emit the constant pool soon.
-
-  // However, some small sequences of instructions must not be broken up by the
-  // insertion of a constant pool; such sequences are protected by setting
-  // no_const_pool_before_, which is checked here. Also, recursive calls to
-  // CheckConstPool are blocked by no_const_pool_before_.
-  if (pc_offset() < no_const_pool_before_) {
-    // Emission is currently blocked; make sure we try again as soon as
-    // possible.
-    next_buffer_check_ = no_const_pool_before_;
-
-    // Something is wrong if emission is forced and blocked at the same time.
-    ASSERT(!force_emit);
-    return;
-  }
-
-  int jump_instr = require_jump ? kInstrSize : 0;
-
-  // Check that the code buffer is large enough before emitting the constant
-  // pool and relocation information (include the jump over the pool and the
-  // constant pool marker).
-  int max_needed_space =
-      jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
-  while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
-
-  // Block recursive calls to CheckConstPool.
-  BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
-                       num_prinfo_*kInstrSize);
-  // Don't bother to check for the emit calls below.
-  next_buffer_check_ = no_const_pool_before_;
-
-  // Emit jump over constant pool if necessary.
-  Label after_pool;
-  if (require_jump) b(&after_pool);
-
-  RecordComment("[ Constant Pool");
-
-  // Put down constant pool marker "Undefined instruction" as specified by
-  // A3.1 Instruction set encoding.
-  emit(0x03000000 | num_prinfo_);
-
-  // Emit constant pool entries.
-  for (int i = 0; i < num_prinfo_; i++) {
-    RelocInfo& rinfo = prinfo_[i];
-    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
-           rinfo.rmode() != RelocInfo::POSITION &&
-           rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
-    Instr instr = instr_at(rinfo.pc());
-
-    // Instruction to patch must be a ldr/str [pc, #offset].
-    // P and U set, B and W clear, Rn == pc, offset12 still 0.
-    ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
-           (2*B25 | P | U | pc.code()*B16));
-    int delta = pc_ - rinfo.pc() - 8;
-    ASSERT(delta >= -4);  // instr could be ldr pc, [pc, #-4] followed by targ32
-    if (delta < 0) {
-      instr &= ~U;
-      delta = -delta;
-    }
-    ASSERT(is_uint12(delta));
-    instr_at_put(rinfo.pc(), instr + delta);
-    emit(rinfo.data());
-  }
-  num_prinfo_ = 0;
-  last_const_pool_end_ = pc_offset();
-
-  RecordComment("]");
-
-  if (after_pool.is_linked()) {
-    bind(&after_pool);
-  }
-
-  // Since a constant pool was just emitted, move the check offset forward by
-  // the standard interval.
-  next_buffer_check_ = pc_offset() + kCheckConstInterval;
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/arm/assembler-thumb2.h b/src/arm/assembler-thumb2.h
deleted file mode 100644
index 2da1138..0000000
--- a/src/arm/assembler-thumb2.h
+++ /dev/null
@@ -1,1036 +0,0 @@
-// Copyright (c) 1994-2006 Sun Microsystems Inc.
-// All Rights Reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions
-// are met:
-//
-// - Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// - Redistribution in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the
-// distribution.
-//
-// - Neither the name of Sun Microsystems or the names of contributors may
-// be used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-// OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// The original source code covered by the above license above has been
-// modified significantly by Google Inc.
-// Copyright 2010 the V8 project authors. All rights reserved.
-
-// A light-weight ARM Assembler
-// Generates user mode instructions for the ARM architecture up to version 5
-
-#ifndef V8_ARM_ASSEMBLER_THUMB2_H_
-#define V8_ARM_ASSEMBLER_THUMB2_H_
-#include <stdio.h>
-#include "assembler.h"
-#include "serialize.h"
-
-namespace v8 {
-namespace internal {
-
-// CPU Registers.
-//
-// 1) We would prefer to use an enum, but enum values are assignment-
-// compatible with int, which has caused code-generation bugs.
-//
-// 2) We would prefer to use a class instead of a struct but we don't like
-// the register initialization to depend on the particular initialization
-// order (which appears to be different on OS X, Linux, and Windows for the
-// installed versions of C++ we tried). Using a struct permits C-style
-// "initialization". Also, the Register objects cannot be const as this
-// forces initialization stubs in MSVC, making us dependent on initialization
-// order.
-//
-// 3) By not using an enum, we are possibly preventing the compiler from
-// doing certain constant folds, which may significantly reduce the
-// code generated for some assembly instructions (because they boil down
-// to a few constants). If this is a problem, we could change the code
-// such that we use an enum in optimized mode, and the struct in debug
-// mode. This way we get the compile-time error checking in debug mode
-// and best performance in optimized code.
-//
-// Core register
-struct Register {
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  bool is(Register reg) const  { return code_ == reg.code_; }
-  int code() const  {
-    ASSERT(is_valid());
-    return code_;
-  }
-  int bit() const  {
-    ASSERT(is_valid());
-    return 1 << code_;
-  }
-
-  // Unfortunately we can't make this private in a struct.
-  int code_;
-};
-
-
-extern Register no_reg;
-extern Register r0;
-extern Register r1;
-extern Register r2;
-extern Register r3;
-extern Register r4;
-extern Register r5;
-extern Register r6;
-extern Register r7;
-extern Register r8;
-extern Register r9;
-extern Register r10;
-extern Register fp;
-extern Register ip;
-extern Register sp;
-extern Register lr;
-extern Register pc;
-
-
-// Single word VFP register.
-struct SwVfpRegister {
-  bool is_valid() const  { return 0 <= code_ && code_ < 32; }
-  bool is(SwVfpRegister reg) const  { return code_ == reg.code_; }
-  int code() const  {
-    ASSERT(is_valid());
-    return code_;
-  }
-  int bit() const  {
-    ASSERT(is_valid());
-    return 1 << code_;
-  }
-
-  int code_;
-};
-
-
-// Double word VFP register.
-struct DwVfpRegister {
-  // Supporting d0 to d15, can be later extended to d31.
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  bool is(DwVfpRegister reg) const  { return code_ == reg.code_; }
-  int code() const  {
-    ASSERT(is_valid());
-    return code_;
-  }
-  int bit() const  {
-    ASSERT(is_valid());
-    return 1 << code_;
-  }
-
-  int code_;
-};
-
-
-// Support for VFP registers s0 to s31 (d0 to d15).
-// Note that "s(N):s(N+1)" is the same as "d(N/2)".
-extern SwVfpRegister s0;
-extern SwVfpRegister s1;
-extern SwVfpRegister s2;
-extern SwVfpRegister s3;
-extern SwVfpRegister s4;
-extern SwVfpRegister s5;
-extern SwVfpRegister s6;
-extern SwVfpRegister s7;
-extern SwVfpRegister s8;
-extern SwVfpRegister s9;
-extern SwVfpRegister s10;
-extern SwVfpRegister s11;
-extern SwVfpRegister s12;
-extern SwVfpRegister s13;
-extern SwVfpRegister s14;
-extern SwVfpRegister s15;
-extern SwVfpRegister s16;
-extern SwVfpRegister s17;
-extern SwVfpRegister s18;
-extern SwVfpRegister s19;
-extern SwVfpRegister s20;
-extern SwVfpRegister s21;
-extern SwVfpRegister s22;
-extern SwVfpRegister s23;
-extern SwVfpRegister s24;
-extern SwVfpRegister s25;
-extern SwVfpRegister s26;
-extern SwVfpRegister s27;
-extern SwVfpRegister s28;
-extern SwVfpRegister s29;
-extern SwVfpRegister s30;
-extern SwVfpRegister s31;
-
-extern DwVfpRegister d0;
-extern DwVfpRegister d1;
-extern DwVfpRegister d2;
-extern DwVfpRegister d3;
-extern DwVfpRegister d4;
-extern DwVfpRegister d5;
-extern DwVfpRegister d6;
-extern DwVfpRegister d7;
-extern DwVfpRegister d8;
-extern DwVfpRegister d9;
-extern DwVfpRegister d10;
-extern DwVfpRegister d11;
-extern DwVfpRegister d12;
-extern DwVfpRegister d13;
-extern DwVfpRegister d14;
-extern DwVfpRegister d15;
-
-
-// Coprocessor register
-struct CRegister {
-  bool is_valid() const  { return 0 <= code_ && code_ < 16; }
-  bool is(CRegister creg) const  { return code_ == creg.code_; }
-  int code() const  {
-    ASSERT(is_valid());
-    return code_;
-  }
-  int bit() const  {
-    ASSERT(is_valid());
-    return 1 << code_;
-  }
-
-  // Unfortunately we can't make this private in a struct.
-  int code_;
-};
-
-
-extern CRegister no_creg;
-extern CRegister cr0;
-extern CRegister cr1;
-extern CRegister cr2;
-extern CRegister cr3;
-extern CRegister cr4;
-extern CRegister cr5;
-extern CRegister cr6;
-extern CRegister cr7;
-extern CRegister cr8;
-extern CRegister cr9;
-extern CRegister cr10;
-extern CRegister cr11;
-extern CRegister cr12;
-extern CRegister cr13;
-extern CRegister cr14;
-extern CRegister cr15;
-
-
-// Coprocessor number
-enum Coprocessor {
-  p0  = 0,
-  p1  = 1,
-  p2  = 2,
-  p3  = 3,
-  p4  = 4,
-  p5  = 5,
-  p6  = 6,
-  p7  = 7,
-  p8  = 8,
-  p9  = 9,
-  p10 = 10,
-  p11 = 11,
-  p12 = 12,
-  p13 = 13,
-  p14 = 14,
-  p15 = 15
-};
-
-
-// Condition field in instructions.
-enum Condition {
-  eq =  0 << 28,  // Z set            equal.
-  ne =  1 << 28,  // Z clear          not equal.
-  nz =  1 << 28,  // Z clear          not zero.
-  cs =  2 << 28,  // C set            carry set.
-  hs =  2 << 28,  // C set            unsigned higher or same.
-  cc =  3 << 28,  // C clear          carry clear.
-  lo =  3 << 28,  // C clear          unsigned lower.
-  mi =  4 << 28,  // N set            negative.
-  pl =  5 << 28,  // N clear          positive or zero.
-  vs =  6 << 28,  // V set            overflow.
-  vc =  7 << 28,  // V clear          no overflow.
-  hi =  8 << 28,  // C set, Z clear   unsigned higher.
-  ls =  9 << 28,  // C clear or Z set unsigned lower or same.
-  ge = 10 << 28,  // N == V           greater or equal.
-  lt = 11 << 28,  // N != V           less than.
-  gt = 12 << 28,  // Z clear, N == V  greater than.
-  le = 13 << 28,  // Z set or N != V  less then or equal
-  al = 14 << 28   //                  always.
-};
-
-
-// Returns the equivalent of !cc.
-INLINE(Condition NegateCondition(Condition cc));
-
-
-// Corresponds to transposing the operands of a comparison.
-inline Condition ReverseCondition(Condition cc) {
-  switch (cc) {
-    case lo:
-      return hi;
-    case hi:
-      return lo;
-    case hs:
-      return ls;
-    case ls:
-      return hs;
-    case lt:
-      return gt;
-    case gt:
-      return lt;
-    case ge:
-      return le;
-    case le:
-      return ge;
-    default:
-      return cc;
-  };
-}
-
-
-// Branch hints are not used on the ARM.  They are defined so that they can
-// appear in shared function signatures, but will be ignored in ARM
-// implementations.
-enum Hint { no_hint };
-
-// Hints are not used on the arm.  Negating is trivial.
-inline Hint NegateHint(Hint ignored) { return no_hint; }
-
-
-// -----------------------------------------------------------------------------
-// Addressing modes and instruction variants
-
-// Shifter operand shift operation
-enum ShiftOp {
-  LSL = 0 << 5,
-  LSR = 1 << 5,
-  ASR = 2 << 5,
-  ROR = 3 << 5,
-  RRX = -1
-};
-
-
-// Condition code updating mode
-enum SBit {
-  SetCC   = 1 << 20,  // set condition code
-  LeaveCC = 0 << 20   // leave condition code unchanged
-};
-
-
-// Status register selection
-enum SRegister {
-  CPSR = 0 << 22,
-  SPSR = 1 << 22
-};
-
-
-// Status register fields
-enum SRegisterField {
-  CPSR_c = CPSR | 1 << 16,
-  CPSR_x = CPSR | 1 << 17,
-  CPSR_s = CPSR | 1 << 18,
-  CPSR_f = CPSR | 1 << 19,
-  SPSR_c = SPSR | 1 << 16,
-  SPSR_x = SPSR | 1 << 17,
-  SPSR_s = SPSR | 1 << 18,
-  SPSR_f = SPSR | 1 << 19
-};
-
-// Status register field mask (or'ed SRegisterField enum values)
-typedef uint32_t SRegisterFieldMask;
-
-
-// Memory operand addressing mode
-enum AddrMode {
-  // bit encoding P U W
-  Offset       = (8|4|0) << 21,  // offset (without writeback to base)
-  PreIndex     = (8|4|1) << 21,  // pre-indexed addressing with writeback
-  PostIndex    = (0|4|0) << 21,  // post-indexed addressing with writeback
-  NegOffset    = (8|0|0) << 21,  // negative offset (without writeback to base)
-  NegPreIndex  = (8|0|1) << 21,  // negative pre-indexed with writeback
-  NegPostIndex = (0|0|0) << 21   // negative post-indexed with writeback
-};
-
-
-// Load/store multiple addressing mode
-enum BlockAddrMode {
-  // bit encoding P U W
-  da           = (0|0|0) << 21,  // decrement after
-  ia           = (0|4|0) << 21,  // increment after
-  db           = (8|0|0) << 21,  // decrement before
-  ib           = (8|4|0) << 21,  // increment before
-  da_w         = (0|0|1) << 21,  // decrement after with writeback to base
-  ia_w         = (0|4|1) << 21,  // increment after with writeback to base
-  db_w         = (8|0|1) << 21,  // decrement before with writeback to base
-  ib_w         = (8|4|1) << 21   // increment before with writeback to base
-};
-
-
-// Coprocessor load/store operand size
-enum LFlag {
-  Long  = 1 << 22,  // long load/store coprocessor
-  Short = 0 << 22   // short load/store coprocessor
-};
-
-
-// -----------------------------------------------------------------------------
-// Machine instruction Operands
-
-// Class Operand represents a shifter operand in data processing instructions
-class Operand BASE_EMBEDDED {
- public:
-  // immediate
-  INLINE(explicit Operand(int32_t immediate,
-         RelocInfo::Mode rmode = RelocInfo::NONE));
-  INLINE(explicit Operand(const ExternalReference& f));
-  INLINE(explicit Operand(const char* s));
-  explicit Operand(Handle<Object> handle);
-  INLINE(explicit Operand(Smi* value));
-
-  // rm
-  INLINE(explicit Operand(Register rm));
-
-  // rm <shift_op> shift_imm
-  explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
-
-  // rm <shift_op> rs
-  explicit Operand(Register rm, ShiftOp shift_op, Register rs);
-
-  // Return true if this is a register operand.
-  INLINE(bool is_reg() const);
-
-  Register rm() const { return rm_; }
-
- private:
-  Register rm_;
-  Register rs_;
-  ShiftOp shift_op_;
-  int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
-  int32_t imm32_;  // valid if rm_ == no_reg
-  RelocInfo::Mode rmode_;
-
-  friend class Assembler;
-};
-
-
-// Class MemOperand represents a memory operand in load and store instructions
-class MemOperand BASE_EMBEDDED {
- public:
-  // [rn +/- offset]      Offset/NegOffset
-  // [rn +/- offset]!     PreIndex/NegPreIndex
-  // [rn], +/- offset     PostIndex/NegPostIndex
-  // offset is any signed 32-bit value; offset is first loaded to register ip if
-  // it does not fit the addressing mode (12-bit unsigned and sign bit)
-  explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
-
-  // [rn +/- rm]          Offset/NegOffset
-  // [rn +/- rm]!         PreIndex/NegPreIndex
-  // [rn], +/- rm         PostIndex/NegPostIndex
-  explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
-
-  // [rn +/- rm <shift_op> shift_imm]      Offset/NegOffset
-  // [rn +/- rm <shift_op> shift_imm]!     PreIndex/NegPreIndex
-  // [rn], +/- rm <shift_op> shift_imm     PostIndex/NegPostIndex
-  explicit MemOperand(Register rn, Register rm,
-                      ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
-
- private:
-  Register rn_;  // base
-  Register rm_;  // register offset
-  int32_t offset_;  // valid if rm_ == no_reg
-  ShiftOp shift_op_;
-  int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
-  AddrMode am_;  // bits P, U, and W
-
-  friend class Assembler;
-};
-
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
-  // Detect features of the target CPU. Set safe defaults if the serializer
-  // is enabled (snapshots must be portable).
-  static void Probe();
-
-  // Check whether a feature is supported by the target CPU.
-  static bool IsSupported(CpuFeature f) {
-    if (f == VFP3 && !FLAG_enable_vfp3) return false;
-    return (supported_ & (1u << f)) != 0;
-  }
-
-  // Check whether a feature is currently enabled.
-  static bool IsEnabled(CpuFeature f) {
-    return (enabled_ & (1u << f)) != 0;
-  }
-
-  // Enable a specified feature within a scope.
-  class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-   public:
-    explicit Scope(CpuFeature f) {
-      ASSERT(CpuFeatures::IsSupported(f));
-      ASSERT(!Serializer::enabled() ||
-             (found_by_runtime_probing_ & (1u << f)) == 0);
-      old_enabled_ = CpuFeatures::enabled_;
-      CpuFeatures::enabled_ |= 1u << f;
-    }
-    ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
-   private:
-    unsigned old_enabled_;
-#else
-   public:
-    explicit Scope(CpuFeature f) {}
-#endif
-  };
-
- private:
-  static unsigned supported_;
-  static unsigned enabled_;
-  static unsigned found_by_runtime_probing_;
-};
-
-
-typedef int32_t Instr;
-
-
-extern const Instr kMovLrPc;
-extern const Instr kLdrPCPattern;
-
-
-class Assembler : public Malloced {
- public:
-  // Create an assembler. Instructions and relocation information are emitted
-  // into a buffer, with the instructions starting from the beginning and the
-  // relocation information starting from the end of the buffer. See CodeDesc
-  // for a detailed comment on the layout (globals.h).
-  //
-  // If the provided buffer is NULL, the assembler allocates and grows its own
-  // buffer, and buffer_size determines the initial buffer size. The buffer is
-  // owned by the assembler and deallocated upon destruction of the assembler.
-  //
-  // If the provided buffer is not NULL, the assembler uses the provided buffer
-  // for code generation and assumes its size to be buffer_size. If the buffer
-  // is too small, a fatal error occurs. No deallocation of the buffer is done
-  // upon destruction of the assembler.
-  Assembler(void* buffer, int buffer_size);
-  ~Assembler();
-
-  // GetCode emits any pending (non-emitted) code and fills the descriptor
-  // desc. GetCode() is idempotent; it returns the same result if no other
-  // Assembler functions are invoked in between GetCode() calls.
-  void GetCode(CodeDesc* desc);
-
-  // Label operations & relative jumps (PPUM Appendix D)
-  //
-  // Takes a branch opcode (cc) and a label (L) and generates
-  // either a backward branch or a forward branch and links it
-  // to the label fixup chain. Usage:
-  //
-  // Label L;    // unbound label
-  // j(cc, &L);  // forward branch to unbound label
-  // bind(&L);   // bind label to the current pc
-  // j(cc, &L);  // backward branch to bound label
-  // bind(&L);   // illegal: a label may be bound only once
-  //
-  // Note: The same Label can be used for forward and backward branches
-  // but it may be bound only once.
-
-  void bind(Label* L);  // binds an unbound label L to the current code position
-
-  // Returns the branch offset to the given label from the current code position
-  // Links the label to the current position if it is still unbound
-  // Manages the jump elimination optimization if the second parameter is true.
-  int branch_offset(Label* L, bool jump_elimination_allowed);
-
-  // Puts a labels target address at the given position.
-  // The high 8 bits are set to zero.
-  void label_at_put(Label* L, int at_offset);
-
-  // Return the address in the constant pool of the code target address used by
-  // the branch/call instruction at pc.
-  INLINE(static Address target_address_address_at(Address pc));
-
-  // Read/Modify the code target address in the branch/call instruction at pc.
-  INLINE(static Address target_address_at(Address pc));
-  INLINE(static void set_target_address_at(Address pc, Address target));
-
-  // This sets the branch destination (which is in the constant pool on ARM).
-  // This is for calls and branches within generated code.
-  inline static void set_target_at(Address constant_pool_entry, Address target);
-
-  // This sets the branch destination (which is in the constant pool on ARM).
-  // This is for calls and branches to runtime code.
-  inline static void set_external_target_at(Address constant_pool_entry,
-                                            Address target) {
-    set_target_at(constant_pool_entry, target);
-  }
-
-  // Here we are patching the address in the constant pool, not the actual call
-  // instruction.  The address in the constant pool is the same size as a
-  // pointer.
-  static const int kCallTargetSize = kPointerSize;
-  static const int kExternalTargetSize = kPointerSize;
-
-  // Size of an instruction.
-  static const int kInstrSize = sizeof(Instr);
-
-  // Distance between the instruction referring to the address of the call
-  // target (ldr pc, [target addr in const pool]) and the return address
-  static const int kCallTargetAddressOffset = kInstrSize;
-
-  // Distance between start of patched return sequence and the emitted address
-  // to jump to.
-  static const int kPatchReturnSequenceAddressOffset = kInstrSize;
-
-  // Difference between address of current opcode and value read from pc
-  // register.
-  static const int kPcLoadDelta = 8;
-
-  static const int kJSReturnSequenceLength = 4;
-
-  // ---------------------------------------------------------------------------
-  // Code generation
-
-  // Insert the smallest number of nop instructions
-  // possible to align the pc offset to a multiple
-  // of m. m must be a power of 2 (>= 4).
-  void Align(int m);
-
-  // Branch instructions
-  void b(int branch_offset, Condition cond = al);
-  void bl(int branch_offset, Condition cond = al);
-  void blx(int branch_offset);  // v5 and above
-  void blx(Register target, Condition cond = al);  // v5 and above
-  void bx(Register target, Condition cond = al);  // v5 and above, plus v4t
-
-  // Convenience branch instructions using labels
-  void b(Label* L, Condition cond = al)  {
-    b(branch_offset(L, cond == al), cond);
-  }
-  void b(Condition cond, Label* L)  { b(branch_offset(L, cond == al), cond); }
-  void bl(Label* L, Condition cond = al)  { bl(branch_offset(L, false), cond); }
-  void bl(Condition cond, Label* L)  { bl(branch_offset(L, false), cond); }
-  void blx(Label* L)  { blx(branch_offset(L, false)); }  // v5 and above
-
-  // Data-processing instructions
-  void ubfx(Register dst, Register src1, const Operand& src2,
-            const Operand& src3, Condition cond = al);
-
-  void and_(Register dst, Register src1, const Operand& src2,
-            SBit s = LeaveCC, Condition cond = al);
-
-  void eor(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void sub(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-  void sub(Register dst, Register src1, Register src2,
-           SBit s = LeaveCC, Condition cond = al) {
-    sub(dst, src1, Operand(src2), s, cond);
-  }
-
-  void rsb(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void add(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void adc(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void sbc(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void rsc(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void tst(Register src1, const Operand& src2, Condition cond = al);
-  void tst(Register src1, Register src2, Condition cond = al) {
-    tst(src1, Operand(src2), cond);
-  }
-
-  void teq(Register src1, const Operand& src2, Condition cond = al);
-
-  void cmp(Register src1, const Operand& src2, Condition cond = al);
-  void cmp(Register src1, Register src2, Condition cond = al) {
-    cmp(src1, Operand(src2), cond);
-  }
-
-  void cmn(Register src1, const Operand& src2, Condition cond = al);
-
-  void orr(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-  void orr(Register dst, Register src1, Register src2,
-           SBit s = LeaveCC, Condition cond = al) {
-    orr(dst, src1, Operand(src2), s, cond);
-  }
-
-  void mov(Register dst, const Operand& src,
-           SBit s = LeaveCC, Condition cond = al);
-  void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
-    mov(dst, Operand(src), s, cond);
-  }
-
-  void bic(Register dst, Register src1, const Operand& src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void mvn(Register dst, const Operand& src,
-           SBit s = LeaveCC, Condition cond = al);
-
-  // Multiply instructions
-
-  void mla(Register dst, Register src1, Register src2, Register srcA,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void mul(Register dst, Register src1, Register src2,
-           SBit s = LeaveCC, Condition cond = al);
-
-  void smlal(Register dstL, Register dstH, Register src1, Register src2,
-             SBit s = LeaveCC, Condition cond = al);
-
-  void smull(Register dstL, Register dstH, Register src1, Register src2,
-             SBit s = LeaveCC, Condition cond = al);
-
-  void umlal(Register dstL, Register dstH, Register src1, Register src2,
-             SBit s = LeaveCC, Condition cond = al);
-
-  void umull(Register dstL, Register dstH, Register src1, Register src2,
-             SBit s = LeaveCC, Condition cond = al);
-
-  // Miscellaneous arithmetic instructions
-
-  void clz(Register dst, Register src, Condition cond = al);  // v5 and above
-
-  // Status register access instructions
-
-  void mrs(Register dst, SRegister s, Condition cond = al);
-  void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
-
-  // Load/Store instructions
-  void ldr(Register dst, const MemOperand& src, Condition cond = al);
-  void str(Register src, const MemOperand& dst, Condition cond = al);
-  void ldrb(Register dst, const MemOperand& src, Condition cond = al);
-  void strb(Register src, const MemOperand& dst, Condition cond = al);
-  void ldrh(Register dst, const MemOperand& src, Condition cond = al);
-  void strh(Register src, const MemOperand& dst, Condition cond = al);
-  void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
-  void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
-
-  // Load/Store multiple instructions
-  void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
-  void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
-
-  // Semaphore instructions
-  void swp(Register dst, Register src, Register base, Condition cond = al);
-  void swpb(Register dst, Register src, Register base, Condition cond = al);
-
-  // Exception-generating instructions and debugging support
-  void stop(const char* msg);
-
-  void bkpt(uint32_t imm16);  // v5 and above
-  void swi(uint32_t imm24, Condition cond = al);
-
-  // Coprocessor instructions
-
-  void cdp(Coprocessor coproc, int opcode_1,
-           CRegister crd, CRegister crn, CRegister crm,
-           int opcode_2, Condition cond = al);
-
-  void cdp2(Coprocessor coproc, int opcode_1,
-            CRegister crd, CRegister crn, CRegister crm,
-            int opcode_2);  // v5 and above
-
-  void mcr(Coprocessor coproc, int opcode_1,
-           Register rd, CRegister crn, CRegister crm,
-           int opcode_2 = 0, Condition cond = al);
-
-  void mcr2(Coprocessor coproc, int opcode_1,
-            Register rd, CRegister crn, CRegister crm,
-            int opcode_2 = 0);  // v5 and above
-
-  void mrc(Coprocessor coproc, int opcode_1,
-           Register rd, CRegister crn, CRegister crm,
-           int opcode_2 = 0, Condition cond = al);
-
-  void mrc2(Coprocessor coproc, int opcode_1,
-            Register rd, CRegister crn, CRegister crm,
-            int opcode_2 = 0);  // v5 and above
-
-  void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
-           LFlag l = Short, Condition cond = al);
-  void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
-           LFlag l = Short, Condition cond = al);
-
-  void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
-            LFlag l = Short);  // v5 and above
-  void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
-            LFlag l = Short);  // v5 and above
-
-  void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
-           LFlag l = Short, Condition cond = al);
-  void stc(Coprocessor coproc, CRegister crd, Register base, int option,
-           LFlag l = Short, Condition cond = al);
-
-  void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
-            LFlag l = Short);  // v5 and above
-  void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
-            LFlag l = Short);  // v5 and above
-
-  // Support for VFP.
-  // All these APIs support S0 to S31 and D0 to D15.
-  // Currently these APIs do not support extended D registers, i.e, D16 to D31.
-  // However, some simple modifications can allow
-  // these APIs to support D16 to D31.
-
-  void vldr(const DwVfpRegister dst,
-            const Register base,
-            int offset,  // Offset must be a multiple of 4.
-            const Condition cond = al);
-  void vstr(const DwVfpRegister src,
-            const Register base,
-            int offset,  // Offset must be a multiple of 4.
-            const Condition cond = al);
-  void vmov(const DwVfpRegister dst,
-            const Register src1,
-            const Register src2,
-            const Condition cond = al);
-  void vmov(const Register dst1,
-            const Register dst2,
-            const DwVfpRegister src,
-            const Condition cond = al);
-  void vmov(const SwVfpRegister dst,
-            const Register src,
-            const Condition cond = al);
-  void vmov(const Register dst,
-            const SwVfpRegister src,
-            const Condition cond = al);
-  void vcvt(const DwVfpRegister dst,
-            const SwVfpRegister src,
-            const Condition cond = al);
-  void vcvt(const SwVfpRegister dst,
-            const DwVfpRegister src,
-            const Condition cond = al);
-
-  void vadd(const DwVfpRegister dst,
-            const DwVfpRegister src1,
-            const DwVfpRegister src2,
-            const Condition cond = al);
-  void vsub(const DwVfpRegister dst,
-            const DwVfpRegister src1,
-            const DwVfpRegister src2,
-            const Condition cond = al);
-  void vmul(const DwVfpRegister dst,
-            const DwVfpRegister src1,
-            const DwVfpRegister src2,
-            const Condition cond = al);
-  void vdiv(const DwVfpRegister dst,
-            const DwVfpRegister src1,
-            const DwVfpRegister src2,
-            const Condition cond = al);
-  void vcmp(const DwVfpRegister src1,
-            const DwVfpRegister src2,
-            const SBit s = LeaveCC,
-            const Condition cond = al);
-  void vmrs(const Register dst,
-            const Condition cond = al);
-
-  // Pseudo instructions
-  void nop()  { mov(r0, Operand(r0)); }
-
-  void push(Register src, Condition cond = al) {
-    str(src, MemOperand(sp, 4, NegPreIndex), cond);
-  }
-
-  void pop(Register dst, Condition cond = al) {
-    ldr(dst, MemOperand(sp, 4, PostIndex), cond);
-  }
-
-  void pop() {
-    add(sp, sp, Operand(kPointerSize));
-  }
-
-  // Load effective address of memory operand x into register dst
-  void lea(Register dst, const MemOperand& x,
-           SBit s = LeaveCC, Condition cond = al);
-
-  // Jump unconditionally to given label.
-  void jmp(Label* L) { b(L, al); }
-
-  // Check the code size generated from label to here.
-  int InstructionsGeneratedSince(Label* l) {
-    return (pc_offset() - l->pos()) / kInstrSize;
-  }
-
-  // Check whether an immediate fits an addressing mode 1 instruction.
-  bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
-
-  // Postpone the generation of the constant pool for the specified number of
-  // instructions.
-  void BlockConstPoolFor(int instructions);
-
-  // Debugging
-
-  // Mark address of the ExitJSFrame code.
-  void RecordJSReturn();
-
-  // Record a comment relocation entry that can be used by a disassembler.
-  // Use --debug_code to enable.
-  void RecordComment(const char* msg);
-
-  void RecordPosition(int pos);
-  void RecordStatementPosition(int pos);
-  void WriteRecordedPositions();
-
-  int pc_offset() const { return pc_ - buffer_; }
-  int current_position() const { return current_position_; }
-  int current_statement_position() const { return current_statement_position_; }
-
- protected:
-  int buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
-  // Read/patch instructions
-  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
-  void instr_at_put(byte* pc, Instr instr) {
-    *reinterpret_cast<Instr*>(pc) = instr;
-  }
-  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
-  void instr_at_put(int pos, Instr instr) {
-    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
-  }
-
-  // Decode branch instruction at pos and return branch target pos
-  int target_at(int pos);
-
-  // Patch branch instruction at pos to branch to given branch target pos
-  void target_at_put(int pos, int target_pos);
-
-  // Check if is time to emit a constant pool for pending reloc info entries
-  void CheckConstPool(bool force_emit, bool require_jump);
-
-  // Block the emission of the constant pool before pc_offset
-  void BlockConstPoolBefore(int pc_offset) {
-    if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
-  }
-
- private:
-  // Code buffer:
-  // The buffer into which code and relocation info are generated.
-  byte* buffer_;
-  int buffer_size_;
-  // True if the assembler owns the buffer, false if buffer is external.
-  bool own_buffer_;
-
-  // Buffer size and constant pool distance are checked together at regular
-  // intervals of kBufferCheckInterval emitted bytes
-  static const int kBufferCheckInterval = 1*KB/2;
-  int next_buffer_check_;  // pc offset of next buffer check
-
-  // Code generation
-  // The relocation writer's position is at least kGap bytes below the end of
-  // the generated instructions. This is so that multi-instruction sequences do
-  // not have to check for overflow. The same is true for writes of large
-  // relocation info entries.
-  static const int kGap = 32;
-  byte* pc_;  // the program counter; moves forward
-
-  // Constant pool generation
-  // Pools are emitted in the instruction stream, preferably after unconditional
-  // jumps or after returns from functions (in dead code locations).
-  // If a long code sequence does not contain unconditional jumps, it is
-  // necessary to emit the constant pool before the pool gets too far from the
-  // location it is accessed from. In this case, we emit a jump over the emitted
-  // constant pool.
-  // Constants in the pool may be addresses of functions that gets relocated;
-  // if so, a relocation info entry is associated to the constant pool entry.
-
-  // Repeated checking whether the constant pool should be emitted is rather
-  // expensive. By default we only check again once a number of instructions
-  // has been generated. That also means that the sizing of the buffers is not
-  // an exact science, and that we rely on some slop to not overrun buffers.
-  static const int kCheckConstIntervalInst = 32;
-  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
-
-
-  // Pools are emitted after function return and in dead code at (more or less)
-  // regular intervals of kDistBetweenPools bytes
-  static const int kDistBetweenPools = 1*KB;
-
-  // Constants in pools are accessed via pc relative addressing, which can
-  // reach +/-4KB thereby defining a maximum distance between the instruction
-  // and the accessed constant. We satisfy this constraint by limiting the
-  // distance between pools.
-  static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
-
-  // Emission of the constant pool may be blocked in some code sequences
-  int no_const_pool_before_;  // block emission before this pc offset
-
-  // Keep track of the last emitted pool to guarantee a maximal distance
-  int last_const_pool_end_;  // pc offset following the last constant pool
-
-  // Relocation info generation
-  // Each relocation is encoded as a variable size value
-  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
-  RelocInfoWriter reloc_info_writer;
-  // Relocation info records are also used during code generation as temporary
-  // containers for constants and code target addresses until they are emitted
-  // to the constant pool. These pending relocation info records are temporarily
-  // stored in a separate buffer until a constant pool is emitted.
-  // If every instruction in a long sequence is accessing the pool, we need one
-  // pending relocation entry per instruction.
-  static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
-  RelocInfo prinfo_[kMaxNumPRInfo];  // the buffer of pending relocation info
-  int num_prinfo_;  // number of pending reloc info entries in the buffer
-
-  // The bound position, before this we cannot do instruction elimination.
-  int last_bound_pos_;
-
-  // source position information
-  int current_position_;
-  int current_statement_position_;
-  int written_position_;
-  int written_statement_position_;
-
-  // Code emission
-  inline void CheckBuffer();
-  void GrowBuffer();
-  inline void emit(Instr x);
-
-  // Instruction generation
-  void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
-  void addrmod2(Instr instr, Register rd, const MemOperand& x);
-  void addrmod3(Instr instr, Register rd, const MemOperand& x);
-  void addrmod4(Instr instr, Register rn, RegList rl);
-  void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
-
-  // Labels
-  void print(Label* L);
-  void bind_to(Label* L, int pos);
-  void link_to(Label* L, Label* appendix);
-  void next(Label* L);
-
-  // Record reloc info for current pc_
-  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
-
-  friend class RegExpMacroAssemblerARM;
-  friend class RelocInfo;
-  friend class CodePatcher;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_ARM_ASSEMBLER_THUMB2_H_
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 5718cb3..1f77656 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "debug.h"
 #include "runtime.h"
@@ -130,7 +132,7 @@
   // of the JSArray.
   // result: JSObject
   // scratch2: start of next object
-  __ lea(scratch1, MemOperand(result, JSArray::kSize));
+  __ add(scratch1, result, Operand(JSArray::kSize));
   __ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
 
   // Clear the heap tag on the elements array.
@@ -1311,3 +1313,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 68ae026..64ed425 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "compiler.h"
@@ -1368,6 +1370,7 @@
   // give us a megamorphic load site. Not super, but it works.
   LoadAndSpill(applicand);
   Handle<String> name = Factory::LookupAsciiSymbol("apply");
+  frame_->Dup();
   frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
   frame_->EmitPush(r0);
 
@@ -1511,7 +1514,7 @@
   // Then process it as a normal function call.
   __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
   __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ strd(r0, MemOperand(sp, 2 * kPointerSize));
+  __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
 
   CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
   frame_->CallStub(&call_function, 3);
@@ -2304,12 +2307,10 @@
   node->continue_target()->SetExpectedHeight();
 
   // Load the current count to r0, load the length to r1.
-  __ ldrd(r0, frame_->ElementAt(0));
+  __ Ldrd(r0, r1, frame_->ElementAt(0));
   __ cmp(r0, r1);  // compare to the array length
   node->break_target()->Branch(hs);
 
-  __ ldr(r0, frame_->ElementAt(0));
-
   // Get the i'th entry of the array.
   __ ldr(r2, frame_->ElementAt(2));
   __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -2727,7 +2728,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function info and instantiate it.
@@ -2748,7 +2748,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
   InstantiateFunction(node->shared_function_info());
   ASSERT_EQ(original_height + 1, frame_->height());
@@ -3007,8 +3006,6 @@
                      typeof_state == INSIDE_TYPEOF
                          ? RelocInfo::CODE_TARGET
                          : RelocInfo::CODE_TARGET_CONTEXT);
-  // Drop the global object. The result is in r0.
-  frame_->Drop();
 }
 
 
@@ -3422,7 +3419,6 @@
       frame_->Dup();
     }
     EmitNamedLoad(name, var != NULL);
-    frame_->Drop();  // Receiver is left on the stack.
     frame_->EmitPush(r0);
 
     // Perform the binary operation.
@@ -3561,9 +3557,7 @@
   // Perform the assignment.  It is safe to ignore constants here.
   ASSERT(node->op() != Token::INIT_CONST);
   CodeForSourcePosition(node->position());
-  frame_->PopToR0();
   EmitKeyedStore(prop->key()->type());
-  frame_->Drop(2);  // Key and receiver are left on the stack.
   frame_->EmitPush(r0);
 
   // Stack layout:
@@ -4047,37 +4041,35 @@
 
 
 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
-  LoadAndSpill(args->at(0));
-  frame_->EmitPop(r0);
-  __ tst(r0, Operand(kSmiTagMask));
+  Load(args->at(0));
+  Register reg = frame_->PopToRegister();
+  __ tst(reg, Operand(kSmiTagMask));
   cc_reg_ = eq;
 }
 
 
 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
   ASSERT_EQ(args->length(), 3);
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (ShouldGenerateLog(args->at(0))) {
-    LoadAndSpill(args->at(1));
-    LoadAndSpill(args->at(2));
+    Load(args->at(1));
+    Load(args->at(2));
+    frame_->SpillAll();
+    VirtualFrame::SpilledScope spilled_scope(frame_);
     __ CallRuntime(Runtime::kLog, 2);
   }
 #endif
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-  frame_->EmitPush(r0);
+  frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
 }
 
 
 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
-  LoadAndSpill(args->at(0));
-  frame_->EmitPop(r0);
-  __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
+  Load(args->at(0));
+  Register reg = frame_->PopToRegister();
+  __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
   cc_reg_ = eq;
 }
 
@@ -4108,22 +4100,23 @@
 // flatten the string, which will ensure that the answer is in the left hand
 // side the next time around.
 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 2);
   Comment(masm_, "[ GenerateFastCharCodeAt");
 
-  LoadAndSpill(args->at(0));
-  LoadAndSpill(args->at(1));
-  frame_->EmitPop(r1);  // Index.
-  frame_->EmitPop(r2);  // String.
+  Load(args->at(0));
+  Load(args->at(1));
+  Register index = frame_->PopToRegister();         // Index.
+  Register string = frame_->PopToRegister(index);   // String.
+  Register result = VirtualFrame::scratch0();
+  Register scratch = VirtualFrame::scratch1();
 
   Label slow_case;
   Label exit;
   StringHelper::GenerateFastCharCodeAt(masm_,
-                                       r2,
-                                       r1,
-                                       r3,
-                                       r0,
+                                       string,
+                                       index,
+                                       scratch,
+                                       result,
                                        &slow_case,
                                        &slow_case,
                                        &slow_case,
@@ -4133,10 +4126,10 @@
   __ bind(&slow_case);
   // Move the undefined value into the result register, which will
   // trigger the slow case.
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
 
   __ bind(&exit);
-  frame_->EmitPush(r0);
+  frame_->EmitPush(result);
 }
 
 
@@ -4216,9 +4209,8 @@
   __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
   // Undetectable objects behave like undefined when tested with typeof.
   __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
-  __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
-  __ cmp(r1, Operand(1 << Map::kIsUndetectable));
-  false_target()->Branch(eq);
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  false_target()->Branch(ne);
 
   __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
   __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
@@ -4258,48 +4250,52 @@
 
 
 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 0);
 
+  Register scratch0 = VirtualFrame::scratch0();
+  Register scratch1 = VirtualFrame::scratch1();
   // Get the frame pointer for the calling frame.
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
 
   // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &check_frame_marker);
-  __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch1,
+         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ ldr(scratch0,
+         MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
 
   // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
-  __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+  __ ldr(scratch1,
+         MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
   cc_reg_ = eq;
 }
 
 
 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 0);
 
-  Label exit;
-
-  // Get the number of formal parameters.
-  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+  Register tos = frame_->GetTOSRegister();
+  Register scratch0 = VirtualFrame::scratch0();
+  Register scratch1 = VirtualFrame::scratch1();
 
   // Check if the calling frame is an arguments adaptor frame.
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &exit);
+  __ ldr(scratch0,
+         MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch1,
+         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Get the number of formal parameters.
+  __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
 
   // Arguments adaptor case: Read the arguments length from the
   // adaptor frame.
-  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ ldr(tos,
+         MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
+         eq);
 
-  __ bind(&exit);
-  frame_->EmitPush(r0);
+  frame_->EmitPush(tos);
 }
 
 
@@ -4737,15 +4733,14 @@
 
 
 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
-  LoadAndSpill(args->at(0));
-  LoadAndSpill(args->at(1));
-  frame_->EmitPop(r0);
-  frame_->EmitPop(r1);
-  __ cmp(r0, r1);
+  Load(args->at(0));
+  Load(args->at(1));
+  Register lhs = frame_->PopToRegister();
+  Register rhs = frame_->PopToRegister(lhs);
+  __ cmp(lhs, rhs);
   cc_reg_ = eq;
 }
 
@@ -5044,6 +5039,7 @@
   // after evaluating the left hand side (due to the shortcut
   // semantics), but the compiler must (statically) know if the result
   // of compiling the binary operation is materialized or not.
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   if (node->op() == Token::AND) {
     JumpTarget is_true;
     LoadConditionAndSpill(node->left(),
@@ -5055,8 +5051,7 @@
       JumpTarget pop_and_continue;
       JumpTarget exit;
 
-      __ ldr(r0, frame_->Top());  // Duplicate the stack top.
-      frame_->EmitPush(r0);
+      frame_->Dup();
       // Avoid popping the result if it converts to 'false' using the
       // standard ToBoolean() conversion as described in ECMA-262,
       // section 9.2, page 30.
@@ -5065,7 +5060,7 @@
 
       // Pop the result of evaluating the first part.
       pop_and_continue.Bind();
-      frame_->EmitPop(r0);
+      frame_->Pop();
 
       // Evaluate right side expression.
       is_true.Bind();
@@ -5102,8 +5097,7 @@
       JumpTarget pop_and_continue;
       JumpTarget exit;
 
-      __ ldr(r0, frame_->Top());
-      frame_->EmitPush(r0);
+      frame_->Dup();
       // Avoid popping the result if it converts to 'true' using the
       // standard ToBoolean() conversion as described in ECMA-262,
       // section 9.2, page 30.
@@ -5112,7 +5106,7 @@
 
       // Pop the result of evaluating the first part.
       pop_and_continue.Bind();
-      frame_->EmitPop(r0);
+      frame_->Pop();
 
       // Evaluate right side expression.
       is_false.Bind();
@@ -5147,7 +5141,6 @@
   Comment cmnt(masm_, "[ BinaryOperation");
 
   if (node->op() == Token::AND || node->op() == Token::OR) {
-    VirtualFrame::SpilledScope spilled_scope(frame_);
     GenerateLogicalBooleanOperation(node);
   } else {
     // Optimize for the case where (at least) one of the expressions
@@ -5200,9 +5193,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  __ ldr(r0, frame_->Function());
-  frame_->EmitPush(r0);
+  frame_->EmitPush(MemOperand(frame_->Function()));
   ASSERT_EQ(original_height + 1, frame_->height());
 }
 
@@ -5430,26 +5421,30 @@
 
 class DeferredReferenceGetNamedValue: public DeferredCode {
  public:
-  explicit DeferredReferenceGetNamedValue(Handle<String> name) : name_(name) {
+  explicit DeferredReferenceGetNamedValue(Register receiver,
+                                          Handle<String> name)
+      : receiver_(receiver), name_(name) {
     set_comment("[ DeferredReferenceGetNamedValue");
   }
 
   virtual void Generate();
 
  private:
+  Register receiver_;
   Handle<String> name_;
 };
 
 
 void DeferredReferenceGetNamedValue::Generate() {
+  ASSERT(receiver_.is(r0) || receiver_.is(r1));
+
   Register scratch1 = VirtualFrame::scratch0();
   Register scratch2 = VirtualFrame::scratch1();
   __ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
   __ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
 
-  // Setup the registers and call load IC.
-  // On entry to this deferred code, r0 is assumed to already contain the
-  // receiver from the top of the stack.
+  // Ensure receiver in r0 and name in r2 to match load ic calling convention.
+  __ Move(r0, receiver_);
   __ mov(r2, Operand(name_));
 
   // The rest of the instructions in the deferred code must be together.
@@ -5517,11 +5512,19 @@
 
 class DeferredReferenceSetKeyedValue: public DeferredCode {
  public:
-  DeferredReferenceSetKeyedValue() {
+  DeferredReferenceSetKeyedValue(Register value,
+                                 Register key,
+                                 Register receiver)
+      : value_(value), key_(key), receiver_(receiver) {
     set_comment("[ DeferredReferenceSetKeyedValue");
   }
 
   virtual void Generate();
+
+ private:
+  Register value_;
+  Register key_;
+  Register receiver_;
 };
 
 
@@ -5532,10 +5535,17 @@
   __ IncrementCounter(
       &Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
 
+  // Ensure value in r0, key in r1 and receiver in r2 to match keyed store ic
+  // calling convention.
+  if (value_.is(r1)) {
+    __ Swap(r0, r1, ip);
+  }
+  ASSERT(receiver_.is(r2));
+
   // The rest of the instructions in the deferred code must be together.
   { Assembler::BlockConstPoolScope block_const_pool(masm_);
-    // Call keyed load IC. It has receiver amd key on the stack and the value to
-    // store in r0.
+    // Call keyed store IC. It has the arguments value, key and receiver in r0,
+    // r1 and r2.
     Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
     // The call must be followed by a nop instruction to indicate that the
@@ -5573,10 +5583,11 @@
     // this code
 
     // Load the receiver from the stack.
-    frame_->SpillAllButCopyTOSToR0();
+    Register receiver = frame_->PopToRegister();
+    VirtualFrame::SpilledScope spilled(frame_);
 
     DeferredReferenceGetNamedValue* deferred =
-        new DeferredReferenceGetNamedValue(name);
+        new DeferredReferenceGetNamedValue(receiver, name);
 
 #ifdef DEBUG
     int kInlinedNamedLoadInstructions = 7;
@@ -5586,19 +5597,19 @@
 
     { Assembler::BlockConstPoolScope block_const_pool(masm_);
       // Check that the receiver is a heap object.
-      __ tst(r0, Operand(kSmiTagMask));
+      __ tst(receiver, Operand(kSmiTagMask));
       deferred->Branch(eq);
 
       // Check the map. The null map used below is patched by the inline cache
       // code.
-      __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+      __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
       __ mov(r3, Operand(Factory::null_value()));
       __ cmp(r2, r3);
       deferred->Branch(ne);
 
       // Initially use an invalid index. The index will be patched by the
       // inline cache code.
-      __ ldr(r0, MemOperand(r0, 0));
+      __ ldr(r0, MemOperand(receiver, 0));
 
       // Make sure that the expected number of instructions are generated.
       ASSERT_EQ(kInlinedNamedLoadInstructions,
@@ -5695,7 +5706,7 @@
 
       __ mov(r0, scratch1);
       // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatchSize,
+      ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch,
                 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
     }
 
@@ -5705,78 +5716,86 @@
 
 
 void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
-  VirtualFrame::SpilledScope scope(frame_);
   // Generate inlined version of the keyed store if the code is in a loop
   // and the key is likely to be a smi.
   if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
     // Inline the keyed store.
     Comment cmnt(masm_, "[ Inlined store to keyed property");
 
-    DeferredReferenceSetKeyedValue* deferred =
-        new DeferredReferenceSetKeyedValue();
+    Register scratch1 = VirtualFrame::scratch0();
+    Register scratch2 = VirtualFrame::scratch1();
+    Register scratch3 = r3;
 
     // Counter will be decremented in the deferred code. Placed here to avoid
     // having it in the instruction stream below where patching will occur.
     __ IncrementCounter(&Counters::keyed_store_inline, 1,
-                        frame_->scratch0(), frame_->scratch1());
+                        scratch1, scratch2);
+
+    // Load the value, key and receiver from the stack.
+    Register value = frame_->PopToRegister();
+    Register key = frame_->PopToRegister(value);
+    Register receiver = r2;
+    frame_->EmitPop(receiver);
+    VirtualFrame::SpilledScope spilled(frame_);
+
+    // The deferred code expects value, key and receiver in registers.
+    DeferredReferenceSetKeyedValue* deferred =
+        new DeferredReferenceSetKeyedValue(value, key, receiver);
 
     // Check that the value is a smi. As this inlined code does not set the
     // write barrier it is only possible to store smi values.
-    __ tst(r0, Operand(kSmiTagMask));
+    __ tst(value, Operand(kSmiTagMask));
     deferred->Branch(ne);
 
-    // Load the key and receiver from the stack.
-    __ ldr(r1, MemOperand(sp, 0));
-    __ ldr(r2, MemOperand(sp, kPointerSize));
-
     // Check that the key is a smi.
-    __ tst(r1, Operand(kSmiTagMask));
+    __ tst(key, Operand(kSmiTagMask));
     deferred->Branch(ne);
 
     // Check that the receiver is a heap object.
-    __ tst(r2, Operand(kSmiTagMask));
+    __ tst(receiver, Operand(kSmiTagMask));
     deferred->Branch(eq);
 
     // Check that the receiver is a JSArray.
-    __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
+    __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
     deferred->Branch(ne);
 
     // Check that the key is within bounds. Both the key and the length of
     // the JSArray are smis. Use unsigned comparison to handle negative keys.
-    __ ldr(r3, FieldMemOperand(r2, JSArray::kLengthOffset));
-    __ cmp(r3, r1);
+    __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
+    __ cmp(scratch1, key);
     deferred->Branch(ls);  // Unsigned less equal.
 
     // The following instructions are the part of the inlined store keyed
     // property code which can be patched. Therefore the exact number of
     // instructions generated need to be fixed, so the constant pool is blocked
     // while generating this code.
-#ifdef DEBUG
-    int kInlinedKeyedStoreInstructions = 7;
-    Label check_inlined_codesize;
-    masm_->bind(&check_inlined_codesize);
-#endif
     { Assembler::BlockConstPoolScope block_const_pool(masm_);
       // Get the elements array from the receiver and check that it
       // is not a dictionary.
-      __ ldr(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
-      __ ldr(r4, FieldMemOperand(r3, JSObject::kMapOffset));
+      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+      __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
       // Read the fixed array map from the constant pool (not from the root
       // array) so that the value can be patched.  When debugging, we patch this
       // comparison to always fail so that we will hit the IC call in the
       // deferred code which will allow the debugger to break for fast case
       // stores.
-      __ mov(r5, Operand(Factory::fixed_array_map()));
-      __ cmp(r4, r5);
+#ifdef DEBUG
+    Label check_inlined_codesize;
+    masm_->bind(&check_inlined_codesize);
+#endif
+      __ mov(scratch3, Operand(Factory::fixed_array_map()));
+      __ cmp(scratch2, scratch3);
       deferred->Branch(ne);
 
       // Store the value.
-      __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-      __ str(r0, MemOperand(r3, r1, LSL,
-                            kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
+      __ add(scratch1, scratch1,
+             Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+      __ str(value,
+             MemOperand(scratch1, key, LSL,
+                        kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
 
       // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(kInlinedKeyedStoreInstructions,
+      ASSERT_EQ(kInlinedKeyedStoreInstructionsAfterPatch,
                 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
     }
 
@@ -5839,19 +5858,20 @@
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
+      if (persist_after_get_) {
+        cgen_->frame()->Dup();
+      }
       cgen_->EmitNamedLoad(GetName(), is_global);
       cgen_->frame()->EmitPush(r0);
-      if (!persist_after_get_) {
-        cgen_->UnloadReference(this);
-      }
+      if (!persist_after_get_) set_unloaded();
       break;
     }
 
     case KEYED: {
+      ASSERT(property != NULL);
       if (persist_after_get_) {
         cgen_->frame()->Dup2();
       }
-      ASSERT(property != NULL);
       cgen_->EmitKeyedLoad();
       cgen_->frame()->EmitPush(r0);
       if (!persist_after_get_) set_unloaded();
@@ -5892,16 +5912,13 @@
     }
 
     case KEYED: {
-      VirtualFrame::SpilledScope scope(frame);
       Comment cmnt(masm, "[ Store to keyed Property");
       Property* property = expression_->AsProperty();
       ASSERT(property != NULL);
       cgen_->CodeForSourcePosition(property->position());
-
-      frame->EmitPop(r0);  // Value.
       cgen_->EmitKeyedStore(property->key()->type());
       frame->EmitPush(r0);
-      cgen_->UnloadReference(this);
+      set_unloaded();
       break;
     }
 
@@ -6362,7 +6379,7 @@
     ConvertToDoubleStub stub1(r3, r2, r7, r6);
     __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
     // Load rhs to a double in r0, r1.
-    __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
     __ pop(lr);
   }
 
@@ -6397,7 +6414,7 @@
   } else {
     __ push(lr);
     // Load lhs to a double in r2, r3.
-    __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
     // Convert rhs to a double in r0, r1.
     __ mov(r7, Operand(r0));
     ConvertToDoubleStub stub2(r1, r0, r7, r6);
@@ -6561,8 +6578,8 @@
     __ sub(r7, r1, Operand(kHeapObjectTag));
     __ vldr(d7, r7, HeapNumber::kValueOffset);
   } else {
-    __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
-    __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
   }
   __ jmp(both_loaded_as_doubles);
 }
@@ -6939,7 +6956,7 @@
       __ vldr(d7, r7, HeapNumber::kValueOffset);
     } else {
       // Calling convention says that second double is in r2 and r3.
-      __ ldrd(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+      __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
     }
     __ jmp(&finished_loading_r0);
     __ bind(&r0_is_smi);
@@ -6991,7 +7008,7 @@
       __ vldr(d6, r7, HeapNumber::kValueOffset);
     } else {
       // Calling convention says that first double is in r0 and r1.
-      __ ldrd(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+      __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
     }
     __ jmp(&finished_loading_r1);
     __ bind(&r1_is_smi);
@@ -7062,7 +7079,7 @@
       __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
   #else
       // Double returned in registers 0 and 1.
-      __ strd(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+      __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
   #endif
       __ mov(r0, Operand(r5));
       // And we are done.
@@ -10020,3 +10037,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 33a85c4..361ea13 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -220,7 +220,8 @@
   static int InlineRuntimeCallArgumentsCount(Handle<String> name);
 
   // Constants related to patching of inlined load/store.
-  static const int kInlinedKeyedLoadInstructionsAfterPatchSize = 19;
+  static const int kInlinedKeyedLoadInstructionsAfterPatch = 19;
+  static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
 
  private:
   // Construction/Destruction
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index 2e37120..4e186d1 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "constants-arm.h"
 
 
@@ -128,3 +130,5 @@
 
 
 } }  // namespace assembler::arm
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index d50c203..3d3e6ae 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -32,6 +32,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "cpu.h"
 #include "macro-assembler.h"
 
@@ -136,3 +138,5 @@
 }
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index d02ba76..69fc504 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "debug.h"
 
@@ -170,10 +172,11 @@
 
 void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
   // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
   //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[4]  : receiver
-  Generate_DebugBreakCallHelper(masm, 0);
+  Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
 }
 
 
@@ -237,3 +240,5 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 4051096..0ac7d19 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -56,6 +56,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "constants-arm.h"
 #include "disasm.h"
 #include "macro-assembler.h"
@@ -1356,3 +1358,5 @@
 
 
 }  // namespace disasm
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index 5dedc29..48eaf46 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "fast-codegen.h"
 #include "scopes.h"
@@ -236,3 +238,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index 0cb7f12..271e4a6 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -27,12 +27,10 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "frames-inl.h"
-#ifdef V8_ARM_VARIANT_THUMB
-#include "arm/assembler-thumb2-inl.h"
-#else
 #include "arm/assembler-arm-inl.h"
-#endif
 
 
 namespace v8 {
@@ -121,3 +119,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 6680af9..fecc213 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
@@ -62,7 +64,7 @@
   if (mode == PRIMARY) {
     int locals_count = scope()->num_stack_slots();
 
-    __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+    __ Push(lr, fp, cp, r1);
     if (locals_count > 0) {
       // Load undefined value here, so the value is ready for the loop
       // below.
@@ -80,11 +82,17 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (scope()->num_heap_slots() > 0) {
+    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    if (heap_slots > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is in r1.
       __ push(r1);
-      __ CallRuntime(Runtime::kNewContext, 1);
+      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+        FastNewContextStub stub(heap_slots);
+        __ CallStub(&stub);
+      } else {
+        __ CallRuntime(Runtime::kNewContext, 1);
+      }
       function_in_register = false;
       // Context is returned in both r0 and cp.  It replaces the context
       // passed to us.  It's saved in the stack and kept live in cp.
@@ -142,6 +150,21 @@
     }
   }
 
+  { Comment cmnt(masm_, "[ Declarations");
+    // For named function expressions, declare the function name as a
+    // constant.
+    if (scope()->is_function_scope() && scope()->function() != NULL) {
+      EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+    }
+    // Visit all the explicit declarations unless there is an illegal
+    // redeclaration.
+    if (scope()->HasIllegalRedeclaration()) {
+      scope()->VisitIllegalRedeclaration(this);
+    } else {
+      VisitDeclarations(scope()->declarations());
+    }
+  }
+
   // Check the stack for overflow or break request.
   // Put the lr setup instruction in the delay slot.  The kInstrSize is
   // added to the implicit 8 byte offset that always applies to operations
@@ -158,10 +181,6 @@
            lo);
   }
 
-  { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(scope()->declarations());
-  }
-
   if (FLAG_trace) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
@@ -382,6 +401,38 @@
   }
 }
 
+void FullCodeGenerator::PrepareTest(Label* materialize_true,
+                                    Label* materialize_false,
+                                    Label** if_true,
+                                    Label** if_false) {
+  switch (context_) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+    case Expression::kEffect:
+      // In an effect context, the true and the false case branch to the
+      // same label.
+      *if_true = *if_false = materialize_true;
+      break;
+    case Expression::kValue:
+      *if_true = materialize_true;
+      *if_false = materialize_false;
+      break;
+    case Expression::kTest:
+      *if_true = true_label_;
+      *if_false = false_label_;
+      break;
+    case Expression::kValueTest:
+      *if_true = materialize_true;
+      *if_false = false_label_;
+      break;
+    case Expression::kTestValue:
+      *if_true = true_label_;
+      *if_false = materialize_false;
+      break;
+  }
+}
+
 
 void FullCodeGenerator::Apply(Expression::Context context,
                               Label* materialize_true,
@@ -396,19 +447,25 @@
 
     case Expression::kValue: {
       Label done;
-      __ bind(materialize_true);
-      __ mov(result_register(), Operand(Factory::true_value()));
-      __ jmp(&done);
-      __ bind(materialize_false);
-      __ mov(result_register(), Operand(Factory::false_value()));
-      __ bind(&done);
       switch (location_) {
         case kAccumulator:
+          __ bind(materialize_true);
+          __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+          __ jmp(&done);
+          __ bind(materialize_false);
+          __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
           break;
         case kStack:
-          __ push(result_register());
+          __ bind(materialize_true);
+          __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+          __ push(ip);
+          __ jmp(&done);
+          __ bind(materialize_false);
+          __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+          __ push(ip);
           break;
       }
+      __ bind(&done);
       break;
     }
 
@@ -417,12 +474,13 @@
 
     case Expression::kValueTest:
       __ bind(materialize_true);
-      __ mov(result_register(), Operand(Factory::true_value()));
       switch (location_) {
         case kAccumulator:
+          __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
           break;
         case kStack:
-          __ push(result_register());
+          __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+          __ push(ip);
           break;
       }
       __ jmp(true_label_);
@@ -430,12 +488,13 @@
 
     case Expression::kTestValue:
       __ bind(materialize_false);
-      __ mov(result_register(), Operand(Factory::false_value()));
       switch (location_) {
         case kAccumulator:
+          __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
           break;
         case kStack:
-          __ push(result_register());
+          __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+          __ push(ip);
           break;
       }
       __ jmp(false_label_);
@@ -444,6 +503,68 @@
 }
 
 
+// Convert constant control flow (true or false) to the result expected for
+// a given expression context.
+void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
+  switch (context) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+    case Expression::kEffect:
+      break;
+    case Expression::kValue: {
+      Heap::RootListIndex value_root_index =
+          flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+      switch (location_) {
+        case kAccumulator:
+          __ LoadRoot(result_register(), value_root_index);
+          break;
+        case kStack:
+          __ LoadRoot(ip, value_root_index);
+          __ push(ip);
+          break;
+      }
+      break;
+    }
+    case Expression::kTest:
+      __ b(flag ? true_label_ : false_label_);
+      break;
+    case Expression::kTestValue:
+      switch (location_) {
+        case kAccumulator:
+          // If value is false it's needed.
+          if (!flag) __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+          break;
+        case kStack:
+          // If value is false it's needed.
+          if (!flag) {
+            __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+            __ push(ip);
+          }
+          break;
+      }
+      __ b(flag ? true_label_ : false_label_);
+      break;
+    case Expression::kValueTest:
+      switch (location_) {
+        case kAccumulator:
+          // If value is true it's needed.
+          if (flag) __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+          break;
+        case kStack:
+          // If value is true it's needed.
+          if (flag) {
+            __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+            __ push(ip);
+          }
+          break;
+      }
+      __ b(flag ? true_label_ : false_label_);
+      break;
+  }
+}
+
+
 void FullCodeGenerator::DoTest(Expression::Context context) {
   // The value to test is pushed on the stack, and duplicated on the stack
   // if necessary (for value/test and test/value contexts).
@@ -549,22 +670,23 @@
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+                                        Variable::Mode mode,
+                                        FunctionLiteral* function) {
   Comment cmnt(masm_, "[ Declaration");
-  Variable* var = decl->proxy()->var();
-  ASSERT(var != NULL);  // Must have been resolved.
-  Slot* slot = var->slot();
-  Property* prop = var->AsProperty();
+  ASSERT(variable != NULL);  // Must have been resolved.
+  Slot* slot = variable->slot();
+  Property* prop = variable->AsProperty();
 
   if (slot != NULL) {
     switch (slot->type()) {
       case Slot::PARAMETER:
       case Slot::LOCAL:
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
           __ str(ip, MemOperand(fp, SlotOffset(slot)));
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kAccumulator);
+        } else if (function != NULL) {
+          VisitForValue(function, kAccumulator);
           __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
         }
         break;
@@ -574,7 +696,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ ldr(r1,
@@ -582,12 +704,12 @@
           __ cmp(r1, cp);
           __ Check(eq, "Unexpected declaration in current context.");
         }
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
           __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
           // No write barrier since the_hole_value is in old space.
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kAccumulator);
+        } else if (function != NULL) {
+          VisitForValue(function, kAccumulator);
           __ str(result_register(),
                  CodeGenerator::ContextOperand(cp, slot->index()));
           int offset = Context::SlotOffset(slot->index());
@@ -599,27 +721,27 @@
         break;
 
       case Slot::LOOKUP: {
-        __ mov(r2, Operand(var->name()));
+        __ mov(r2, Operand(variable->name()));
         // Declaration nodes are always introduced in one of two modes.
-        ASSERT(decl->mode() == Variable::VAR ||
-               decl->mode() == Variable::CONST);
+        ASSERT(mode == Variable::VAR ||
+               mode == Variable::CONST);
         PropertyAttributes attr =
-            (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+            (mode == Variable::VAR) ? NONE : READ_ONLY;
         __ mov(r1, Operand(Smi::FromInt(attr)));
         // Push initial value, if any.
         // Note: For variables we must not push an initial value (such as
         // 'undefined') because we may have a (legal) redeclaration and we
         // must not destroy the current value.
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
-          __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
-        } else if (decl->fun() != NULL) {
-          __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
+          __ Push(cp, r2, r1, r0);
+        } else if (function != NULL) {
+          __ Push(cp, r2, r1);
           // Push initial value for function declaration.
-          VisitForValue(decl->fun(), kStack);
+          VisitForValue(function, kStack);
         } else {
           __ mov(r0, Operand(Smi::FromInt(0)));  // No initial value!
-          __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+          __ Push(cp, r2, r1, r0);
         }
         __ CallRuntime(Runtime::kDeclareContextSlot, 4);
         break;
@@ -627,53 +749,275 @@
     }
 
   } else if (prop != NULL) {
-    if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+    if (function != NULL || mode == Variable::CONST) {
       // We are declaring a function or constant that rewrites to a
       // property.  Use (keyed) IC to set the initial value.
       VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kStack);
-
-      if (decl->fun() != NULL) {
-        VisitForValue(decl->fun(), kAccumulator);
+      if (function != NULL) {
+        VisitForValue(prop->key(), kStack);
+        VisitForValue(function, kAccumulator);
+        __ pop(r1);  // Key.
       } else {
+        VisitForValue(prop->key(), kAccumulator);
+        __ mov(r1, result_register());  // Key.
         __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
       }
+      __ pop(r2);  // Receiver.
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       __ Call(ic, RelocInfo::CODE_TARGET);
-
-      // Value in r0 is ignored (declarations are statements).  Receiver
-      // and key on stack are discarded.
-      __ Drop(2);
+      // Value in r0 is ignored (declarations are statements).
     }
   }
 }
 
 
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+  EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
+}
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
   __ mov(r1, Operand(pairs));
   __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
-  __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
+  __ Push(cp, r1, r0);
   __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
 
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
-  Comment cmnt(masm_, "[ FunctionLiteral");
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+  // Keep the switch value on the stack until a case matches.
+  VisitForValue(stmt->tag(), kStack);
 
-  // Build the shared function info and instantiate the function based
-  // on it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(expr, script(), this);
-  if (HasStackOverflow()) return;
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
 
-  // Create a new closure.
-  __ mov(r0, Operand(function_info));
-  __ stm(db_w, sp, cp.bit() | r0.bit());
-  __ CallRuntime(Runtime::kNewClosure, 2);
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForValue(clause->label(), kAccumulator);
+
+    // Perform the comparison as if via '==='.  The comparison stub expects
+    // the smi vs. smi case to be handled before it is called.
+    Label slow_case;
+    __ ldr(r1, MemOperand(sp, 0));  // Switch value.
+    __ mov(r2, r1);
+    __ orr(r2, r2, r0);
+    __ tst(r2, Operand(kSmiTagMask));
+    __ b(ne, &slow_case);
+    __ cmp(r1, r0);
+    __ b(ne, &next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ b(clause->body_target()->entry_label());
+
+    __ bind(&slow_case);
+    CompareStub stub(eq, true);
+    __ CallStub(&stub);
+    __ tst(r0, r0);
+    __ b(ne, &next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ b(clause->body_target()->entry_label());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  __ Drop(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ b(nested_statement.break_target());
+  } else {
+    __ b(default_clause->body_target()->entry_label());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target()->entry_label());
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_target());
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  Comment cmnt(masm_, "[ ForInStatement");
+  SetStatementPosition(stmt);
+
+  Label loop, exit;
+  ForIn loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Get the object to enumerate over. Both SpiderMonkey and JSC
+  // ignore null and undefined in contrast to the specification; see
+  // ECMA-262 section 12.6.4.
+  VisitForValue(stmt->enumerable(), kAccumulator);
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, &exit);
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, &exit);
+
+  // Convert the object to a JS object.
+  Label convert, done_convert;
+  __ BranchOnSmi(r0, &convert);
+  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+  __ b(hs, &done_convert);
+  __ bind(&convert);
+  __ push(r0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+  __ bind(&done_convert);
+  __ push(r0);
+
+  // TODO(kasperl): Check cache validity in generated code. This is a
+  // fast case for the JSObject::IsSimpleEnum cache validity
+  // checks. If we cannot guarantee cache validity, call the runtime
+  // system to check cache validity or get the property names in a
+  // fixed array.
+
+  // Get the set of properties to enumerate.
+  __ push(r0);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ mov(r2, r0);
+  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+  __ cmp(r1, ip);
+  __ b(ne, &fixed_array);
+
+  // We got a map in register r0. Get the enumeration cache from it.
+  __ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset));
+  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
+  __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Setup the four remaining stack slots.
+  __ push(r0);  // Map.
+  __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ mov(r1, Operand(r1, LSL, kSmiTagSize));
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  // Push enumeration cache, enumeration cache length (as smi) and zero.
+  __ Push(r2, r1, r0);
+  __ jmp(&loop);
+
+  // We got a fixed array in register r0. Iterate through that.
+  __ bind(&fixed_array);
+  __ mov(r1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
+  __ Push(r1, r0);
+  __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
+  __ mov(r1, Operand(r1, LSL, kSmiTagSize));
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ Push(r1, r0);  // Fixed array length (as smi) and initial index.
+
+  // Generate code for doing the condition check.
+  __ bind(&loop);
+  // Load the current count to r0, load the length to r1.
+  __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
+  __ cmp(r0, r1);  // Compare to the array length.
+  __ b(hs, loop_statement.break_target());
+
+  // Get the current entry of the array into register r3.
+  __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case into register r2.
+  __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  Label update_each;
+  __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
+  __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r4, Operand(r2));
+  __ b(eq, &update_each);
+
+  // Convert the entry to a string or null if it isn't a property
+  // anymore. If the property has been removed while iterating, we
+  // just skip it.
+  __ push(r1);  // Enumerable.
+  __ push(r3);  // Current entry.
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
+  __ mov(r3, Operand(r0));
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r3, ip);
+  __ b(eq, loop_statement.continue_target());
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register r3.
+  __ bind(&update_each);
+  __ mov(result_register(), r3);
+  // Perform the assignment as if via '='.
+  EmitAssignment(stmt->each());
+
+  // Generate code for the body of the loop.
+  Label stack_limit_hit, stack_check_done;
+  Visit(stmt->body());
+
+  __ StackLimitCheck(&stack_limit_hit);
+  __ bind(&stack_check_done);
+
+  // Generate code for the going to the next element by incrementing
+  // the index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_target());
+  __ pop(r0);
+  __ add(r0, r0, Operand(Smi::FromInt(1)));
+  __ push(r0);
+  __ b(&loop);
+
+  // Slow case for the stack limit check.
+  StackCheckStub stack_check_stub;
+  __ bind(&stack_limit_hit);
+  __ CallStub(&stack_check_stub);
+  __ b(&stack_check_done);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_target());
+  __ Drop(5);
+
+  // Exit and decrement the loop depth.
+  __ bind(&exit);
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  if (scope()->is_function_scope() && info->num_literals() == 0) {
+    FastNewClosureStub stub;
+    __ mov(r0, Operand(info));
+    __ push(r0);
+    __ CallStub(&stub);
+  } else {
+    __ mov(r0, Operand(info));
+    __ Push(cp, r0);
+    __ CallRuntime(Runtime::kNewClosure, 2);
+  }
   Apply(context_, r0);
 }
 
@@ -695,18 +1039,17 @@
   if (var->is_global() && !var->is_this()) {
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in r2 and the global
-    // object on the stack.
+    // object (receiver) in r0.
     __ ldr(r0, CodeGenerator::GlobalObject());
-    __ push(r0);
     __ mov(r2, Operand(var->name()));
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
-    DropAndApply(1, context, r0);
+    Apply(context, r0);
 
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
     Comment cmnt(masm_, "Lookup slot");
     __ mov(r1, Operand(var->name()));
-    __ stm(db_w, sp, cp.bit() | r1.bit());  // Context and name.
+    __ Push(cp, r1);  // Context and name.
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     Apply(context, r0);
 
@@ -714,8 +1057,21 @@
     Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
                             ? "Context slot"
                             : "Stack slot");
-    Apply(context, slot);
-
+    if (var->mode() == Variable::CONST) {
+       // Constants may be the hole value if they have not been initialized.
+       // Unhole them.
+       Label done;
+       MemOperand slot_operand = EmitSlotSearch(slot, r0);
+       __ ldr(r0, slot_operand);
+       __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+       __ cmp(r0, ip);
+       __ b(ne, &done);
+       __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+       __ bind(&done);
+       Apply(context, r0);
+     } else {
+       Apply(context, slot);
+     }
   } else {
     Comment cmnt(masm_, "Rewritten parameter");
     ASSERT_NOT_NULL(property);
@@ -851,6 +1207,10 @@
 
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
+
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
@@ -858,16 +1218,18 @@
   __ Push(r3, r2, r1);
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else {
+  } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+  } else {
+    FastCloneShallowArrayStub stub(length);
+    __ CallStub(&stub);
   }
 
   bool result_saved = false;  // Is the result saved to the stack?
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  ZoneList<Expression*>* subexprs = expr->values();
-  for (int i = 0, len = subexprs->length(); i < len; i++) {
+  for (int i = 0; i < length; i++) {
     Expression* subexpr = subexprs->at(i);
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
@@ -904,7 +1266,13 @@
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
   Comment cmnt(masm_, "[ Assignment");
-  ASSERT(expr->op() != Token::INIT_CONST);
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // on the left-hand side.
+  if (!expr->target()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->target());
+    return;
+  }
+
   // Left-hand side can only be a property, a global or a (parameter or local)
   // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -984,6 +1352,7 @@
   switch (assign_type) {
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             expr->op(),
                              context_);
       break;
     case NAMED_PROPERTY:
@@ -1000,7 +1369,7 @@
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
   __ mov(r2, Operand(key->handle()));
-  __ ldr(r0, MemOperand(sp, 0));
+  // Call load IC. It has arguments receiver and property name r0 and r2.
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
 }
@@ -1023,15 +1392,64 @@
 }
 
 
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+  // Invalid left-hand sides are rewritten to have a 'throw
+  // ReferenceError' on the left-hand side.
+  if (!expr->IsValidLeftHandSide()) {
+    VisitForEffect(expr);
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->AsProperty();
+  if (prop != NULL) {
+    assign_type = (prop->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      __ push(r0);  // Preserve value.
+      VisitForValue(prop->obj(), kAccumulator);
+      __ mov(r1, r0);
+      __ pop(r0);  // Restore value.
+      __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+      Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+      __ Call(ic, RelocInfo::CODE_TARGET);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ push(r0);  // Preserve value.
+      VisitForValue(prop->obj(), kStack);
+      VisitForValue(prop->key(), kAccumulator);
+      __ mov(r1, r0);
+      __ pop(r2);
+      __ pop(r0);  // Restore value.
+      Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+      __ Call(ic, RelocInfo::CODE_TARGET);
+      break;
+    }
+  }
+}
+
+
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+                                               Token::Value op,
                                                Expression::Context context) {
-  // Three main cases: global variables, lookup slots, and all other
-  // types of slots.  Left-hand-side parameters that rewrite to
-  // explicit property accesses do not reach here.
+  // Left-hand sides that rewrite to explicit property accesses do not reach
+  // here.
   ASSERT(var != NULL);
   ASSERT(var->is_global() || var->slot() != NULL);
 
-  Slot* slot = var->slot();
   if (var->is_global()) {
     ASSERT(!var->is_this());
     // Assignment to a global variable.  Use inline caching for the
@@ -1042,43 +1460,61 @@
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
 
-  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    __ push(result_register());  // Value.
-    __ mov(r1, Operand(var->name()));
-    __ stm(db_w, sp, cp.bit() | r1.bit());  // Context and name.
-    __ CallRuntime(Runtime::kStoreContextSlot, 3);
-
-  } else if (var->slot() != NULL) {
+  } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
+    // Perform the assignment for non-const variables and for initialization
+    // of const variables.  Const assignments are simply skipped.
+    Label done;
     Slot* slot = var->slot();
     switch (slot->type()) {
-      case Slot::LOCAL:
       case Slot::PARAMETER:
+      case Slot::LOCAL:
+        if (op == Token::INIT_CONST) {
+          // Detect const reinitialization by checking for the hole value.
+          __ ldr(r1, MemOperand(fp, SlotOffset(slot)));
+          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+          __ cmp(r1, ip);
+          __ b(ne, &done);
+        }
+        // Perform the assignment.
         __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
         break;
 
       case Slot::CONTEXT: {
         MemOperand target = EmitSlotSearch(slot, r1);
+        if (op == Token::INIT_CONST) {
+          // Detect const reinitialization by checking for the hole value.
+          __ ldr(r2, target);
+          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+          __ cmp(r2, ip);
+          __ b(ne, &done);
+        }
+        // Perform the assignment and issue the write barrier.
         __ str(result_register(), target);
-
         // RecordWrite may destroy all its register arguments.
         __ mov(r3, result_register());
         int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-
         __ mov(r2, Operand(offset));
         __ RecordWrite(r1, r2, r3);
         break;
       }
 
       case Slot::LOOKUP:
-        UNREACHABLE();
+        // Call the runtime for the assignment.  The runtime will ignore
+        // const reinitialization.
+        __ push(r0);  // Value.
+        __ mov(r0, Operand(slot->var()->name()));
+        __ Push(cp, r0);  // Context and name.
+        if (op == Token::INIT_CONST) {
+          // The runtime will ignore const redeclaration.
+          __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+        } else {
+          __ CallRuntime(Runtime::kStoreContextSlot, 3);
+        }
         break;
     }
-
-  } else {
-    // Variables rewritten as properties are not treated as variables in
-    // assignments.
-    UNREACHABLE();
+    __ bind(&done);
   }
+
   Apply(context, result_register());
 }
 
@@ -1103,6 +1539,8 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+  // Load receiver to r1. Leave a copy in the stack if needed for turning the
+  // receiver into fast case.
   if (expr->ends_initialization_block()) {
     __ ldr(r1, MemOperand(sp));
   } else {
@@ -1115,7 +1553,8 @@
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
     __ push(r0);  // Result of assignment, saved even if not needed.
-    __ ldr(ip, MemOperand(sp, kPointerSize));  // Receiver is under value.
+    // Receiver is under the result value.
+    __ ldr(ip, MemOperand(sp, kPointerSize));
     __ push(ip);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(r0);
@@ -1143,21 +1582,30 @@
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
+  __ pop(r1);  // Key.
+  // Load receiver to r2. Leave a copy in the stack if needed for turning the
+  // receiver into fast case.
+  if (expr->ends_initialization_block()) {
+    __ ldr(r2, MemOperand(sp));
+  } else {
+    __ pop(r2);
+  }
+
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
     __ push(r0);  // Result of assignment, saved even if not needed.
-    // Receiver is under the key and value.
-    __ ldr(ip, MemOperand(sp, 2 * kPointerSize));
+    // Receiver is under the result value.
+    __ ldr(ip, MemOperand(sp, kPointerSize));
     __ push(ip);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(r0);
+    DropAndApply(1, context_, r0);
+  } else {
+    Apply(context_, r0);
   }
-
-  // Receiver and key are still on stack.
-  DropAndApply(2, context_, r0);
 }
 
 
@@ -1165,14 +1613,12 @@
   Comment cmnt(masm_, "[ Property");
   Expression* key = expr->key();
 
-  // Evaluate receiver.
-  VisitForValue(expr->obj(), kStack);
-
   if (key->IsPropertyName()) {
+    VisitForValue(expr->obj(), kAccumulator);
     EmitNamedPropertyLoad(expr);
-    // Drop receiver left on the stack by IC.
-    DropAndApply(1, context_, r0);
+    Apply(context_, r0);
   } else {
+    VisitForValue(expr->obj(), kStack);
     VisitForValue(expr->key(), kAccumulator);
     __ pop(r1);
     EmitKeyedPropertyLoad(expr);
@@ -1211,7 +1657,8 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
-  CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -1225,8 +1672,51 @@
   Variable* var = fun->AsVariableProxy()->AsVariable();
 
   if (var != NULL && var->is_possibly_eval()) {
-    // Call to the identifier 'eval'.
-    UNREACHABLE();
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    VisitForValue(fun, kStack);
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ push(r2);  // Reserved receiver slot.
+
+    // Push the arguments.
+    ZoneList<Expression*>* args = expr->arguments();
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      VisitForValue(args->at(i), kStack);
+    }
+
+    // Push copy of the function - found below the arguments.
+    __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ push(r1);
+
+    // Push copy of the first argument or undefined if it doesn't exist.
+    if (arg_count > 0) {
+      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+      __ push(r1);
+    } else {
+      __ push(r2);
+    }
+
+    // Push the receiver of the enclosing function and do runtime call.
+    __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+    __ push(r1);
+    __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+
+    // The runtime call returns a pair of values in r0 (function) and
+    // r1 (receiver). Touch up the stack with the right values.
+    __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+    // Record source position for debugger.
+    SetSourcePosition(expr->position());
+    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    __ CallStub(&stub);
+    // Restore context register.
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    DropAndApply(1, context_, r0);
   } else if (var != NULL && !var->is_this() && var->is_global()) {
     // Push global object as receiver for the call IC.
     __ ldr(r0, CodeGenerator::GlobalObject());
@@ -1234,8 +1724,16 @@
     EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
   } else if (var != NULL && var->slot() != NULL &&
              var->slot()->type() == Slot::LOOKUP) {
-    // Call to a lookup slot.
-    UNREACHABLE();
+    // Call to a lookup slot (dynamically introduced variable).  Call the
+    // runtime to find the function to call (returned in eax) and the object
+    // holding it (returned in edx).
+    __ push(context_register());
+    __ mov(r2, Operand(var->name()));
+    __ push(r2);
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    __ push(r0);  // Function.
+    __ push(r1);  // Receiver.
+    EmitCallWithStub(expr);
   } else if (fun->AsProperty() != NULL) {
     // Call to an object property.
     Property* prop = fun->AsProperty();
@@ -1331,7 +1829,720 @@
 }
 
 
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (strcmp("_IsSmi", *name->ToCString()) == 0) {
+    EmitIsSmi(expr->arguments());
+  } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
+    EmitIsNonNegativeSmi(expr->arguments());
+  } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
+    EmitIsObject(expr->arguments());
+  } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
+    EmitIsUndetectableObject(expr->arguments());
+  } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
+    EmitIsFunction(expr->arguments());
+  } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
+    EmitIsArray(expr->arguments());
+  } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
+    EmitIsRegExp(expr->arguments());
+  } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
+    EmitIsConstructCall(expr->arguments());
+  } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
+    EmitObjectEquals(expr->arguments());
+  } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
+    EmitArguments(expr->arguments());
+  } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
+    EmitArgumentsLength(expr->arguments());
+  } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
+    EmitClassOf(expr->arguments());
+  } else if (strcmp("_Log", *name->ToCString()) == 0) {
+    EmitLog(expr->arguments());
+  } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
+    EmitRandomHeapNumber(expr->arguments());
+  } else if (strcmp("_SubString", *name->ToCString()) == 0) {
+    EmitSubString(expr->arguments());
+  } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
+    EmitRegExpExec(expr->arguments());
+  } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
+    EmitValueOf(expr->arguments());
+  } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
+    EmitSetValueOf(expr->arguments());
+  } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
+    EmitNumberToString(expr->arguments());
+  } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
+    EmitCharFromCode(expr->arguments());
+  } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
+    EmitFastCharCodeAt(expr->arguments());
+  } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
+    EmitStringAdd(expr->arguments());
+  } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
+    EmitStringCompare(expr->arguments());
+  } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
+    EmitMathPow(expr->arguments());
+  } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
+    EmitMathSin(expr->arguments());
+  } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
+    EmitMathCos(expr->arguments());
+  } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
+    EmitMathSqrt(expr->arguments());
+  } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
+    EmitCallFunction(expr->arguments());
+  } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
+    EmitRegExpConstructResult(expr->arguments());
+  } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
+    EmitSwapElements(expr->arguments());
+  } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
+    EmitGetFromCache(expr->arguments());
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ tst(r0, Operand(kSmiTagMask | 0x80000000));
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+  __ BranchOnSmi(r0, if_false);
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, if_true);
+  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  __ b(ne, if_false);
+  __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+  __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+  __ b(lt, if_false);
+  __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+  __ b(le, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  __ b(ne, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  // Get the frame pointer for the calling frame.
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &check_frame_marker);
+  __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+  __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ pop(r1);
+  __ cmp(r0, r1);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in edx and the formal
+  // parameter count in eax.
+  VisitForValue(args->at(0), kAccumulator);
+  __ mov(r1, r0);
+  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label exit;
+  // Get the number of formal parameters.
+  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &exit);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  // If the object is a smi, we return null.
+  __ BranchOnSmi(r0, &null);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);  // Map is now in r0.
+  __ b(lt, &null);
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+  __ b(eq, &function);
+
+  // Check if the constructor in the map is a function.
+  __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
+  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  __ b(ne, &non_function_constructor);
+
+  // r0 now contains the constructor function. Grab the
+  // instance class name from there.
+  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ b(&done);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ LoadRoot(r0, Heap::kNullValueRootIndex);
+
+  // All done.
+  __ bind(&done);
+
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+    VisitForValue(args->at(1), kStack);
+    VisitForValue(args->at(2), kStack);
+    __ CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  // Finally, we're expected to leave a value on the top of the stack.
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
+
+  __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
+
+  __ bind(&slow_allocate_heapnumber);
+  // To allocate a heap number, and ensure that it is not a smi, we
+  // call the runtime function FUnaryMinus on 0, returning the double
+  // -0.0. A new, distinct heap number is returned each time.
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ push(r0);
+  __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+  __ mov(r4, Operand(r0));
+
+  __ bind(&heapnumber_allocated);
+
+  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  if (CpuFeatures::IsSupported(VFP3)) {
+    __ PrepareCallCFunction(0, r1);
+    __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+    CpuFeatures::Scope scope(VFP3);
+    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+    // Create this constant using mov/orr to avoid PC relative load.
+    __ mov(r1, Operand(0x41000000));
+    __ orr(r1, r1, Operand(0x300000));
+    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
+    __ vmov(d7, r0, r1);
+    // Move 0x4130000000000000 to VFP.
+    __ mov(r0, Operand(0));
+    __ vmov(d8, r0, r1);
+    // Subtract and store the result in the heap number.
+    __ vsub(d7, d7, d8);
+    __ sub(r0, r4, Operand(kHeapObjectTag));
+    __ vstr(d7, r0, HeapNumber::kValueOffset);
+    __ mov(r0, r4);
+  } else {
+    __ mov(r0, Operand(r4));
+    __ PrepareCallCFunction(1, r1);
+    __ CallCFunction(
+        ExternalReference::fill_heap_number_with_random_function(), 1);
+  }
+
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  SubStringStub stub;
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  RegExpExecStub stub;
+  ASSERT(args->length() == 4);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  VisitForValue(args->at(3), kStack);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ BranchOnSmi(r0, &done);
+  // If the object is not a value type, return the object.
+  __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
+  __ b(ne, &done);
+  __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the runtime function.
+  ASSERT(args->length() == 2);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  __ CallRuntime(Runtime::kMath_pow, 2);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  VisitForValue(args->at(0), kStack);  // Load the object.
+  VisitForValue(args->at(1), kAccumulator);  // Load the value.
+  __ pop(r1);  // r0 = value. r1 = object.
+
+  Label done;
+  // If the object is a smi, return the value.
+  __ BranchOnSmi(r1, &done);
+
+  // If the object is not a value type, return the value.
+  __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
+  __ b(ne, &done);
+
+  // Store the value.
+  __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
+  __ RecordWrite(r1, r2, r3);
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and call the stub.
+  VisitForValue(args->at(0), kStack);
+
+  NumberToStringStub stub;
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label slow_case, done;
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiShiftSize == 0);
+  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+  __ tst(r0, Operand(kSmiTagMask |
+                       ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+  __ b(nz, &slow_case);
+  __ mov(r1, Operand(Factory::single_character_string_cache()));
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiTagSize == 1);
+  ASSERT(kSmiShiftSize == 0);
+  // At this point code register contains smi tagged ascii char code.
+  __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+  __ cmp(r1, r2);
+  __ b(eq, &slow_case);
+  __ mov(r0, r1);
+  __ b(&done);
+
+  __ bind(&slow_case);
+  __ push(r0);
+  __ CallRuntime(Runtime::kCharFromCode, 1);
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
+  // TODO(fsc): Port the complete implementation from the classic back-end.
+  // Move the undefined value into the result register, which will
+  // trigger the slow case.
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  Apply(context_, r0);
+}
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+
+  StringCompareStub stub;
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_sin, 1);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_cos, 1);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime function.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_sqrt, 1);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+  ASSERT(args->length() >= 2);
+
+  int arg_count = args->length() - 2;  // For receiver and function.
+  VisitForValue(args->at(0), kStack);  // Receiver.
+  for (int i = 0; i < arg_count; i++) {
+    VisitForValue(args->at(i + 1), kStack);
+  }
+  VisitForValue(args->at(arg_count + 1), kAccumulator);  // Function.
+
+  // InvokeFunction requires function in r1. Move it in there.
+  if (!result_register().is(r1)) __ mov(r1, result_register());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(r1, count, CALL_FUNCTION);
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      Top::global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    Apply(context_, r0);
+    return;
+  }
+
+  VisitForValue(args->at(1), kAccumulator);
+
+  Register key = r0;
+  Register cache = r1;
+  __ ldr(cache, CodeGenerator::ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+  __ ldr(cache,
+         CodeGenerator::ContextOperand(
+             cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ ldr(cache,
+         FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+
+  Label done, not_found;
+  // tmp now holds finger offset as a smi.
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+  // r2 now holds finger offset as a smi.
+  __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // r3 now points to the start of fixed array elements.
+  __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
+  // Note side effect of PreIndex: r3 now points to the key of the pair.
+  __ cmp(key, r2);
+  __ b(ne, &not_found);
+
+  __ ldr(r0, MemOperand(r3, kPointerSize));
+  __ b(&done);
+
+  __ bind(&not_found);
+  // Call runtime to perform the lookup.
+  __ Push(cache, key);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (name->length() > 0 && name->Get(0) == '_') {
+    Comment cmnt(masm_, "[ InlineRuntimeCall");
+    EmitInlineRuntimeCall(expr);
+    return;
+  }
+
   Comment cmnt(masm_, "[ CallRuntime");
   ZoneList<Expression*>* args = expr->arguments();
 
@@ -1366,6 +2577,49 @@
 
 void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* prop = expr->expression()->AsProperty();
+      Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+      if (prop == NULL && var == NULL) {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        Apply(context_, true);
+      } else if (var != NULL &&
+                 !var->is_global() &&
+                 var->slot() != NULL &&
+                 var->slot()->type() != Slot::LOOKUP) {
+        // Result of deleting non-global, non-dynamic variables is false.
+        // The subexpression does not have side effects.
+        Apply(context_, false);
+      } else {
+        // Property or variable reference.  Call the delete builtin with
+        // object and property name as arguments.
+        if (prop != NULL) {
+          VisitForValue(prop->obj(), kStack);
+          VisitForValue(prop->key(), kStack);
+        } else if (var->is_global()) {
+          __ ldr(r1, CodeGenerator::GlobalObject());
+          __ mov(r0, Operand(var->name()));
+          __ Push(r1, r0);
+        } else {
+          // Non-global variable.  Call the runtime to look up the context
+          // where the variable was introduced.
+          __ push(context_register());
+          __ mov(r2, Operand(var->name()));
+          __ push(r2);
+          __ CallRuntime(Runtime::kLookupContext, 2);
+          __ push(r0);
+          __ mov(r2, Operand(var->name()));
+          __ push(r2);
+        }
+        __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+        Apply(context_, r0);
+      }
+      break;
+    }
+
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
       VisitForEffect(expr->expression());
@@ -1406,33 +2660,15 @@
 
     case Token::NOT: {
       Comment cmnt(masm_, "[ UnaryOperation (NOT)");
-      Label materialize_true, materialize_false, done;
-      // Initially assume a pure test context.  Notice that the labels are
-      // swapped.
-      Label* if_true = false_label_;
-      Label* if_false = true_label_;
-      switch (context_) {
-        case Expression::kUninitialized:
-          UNREACHABLE();
-          break;
-        case Expression::kEffect:
-          if_true = &done;
-          if_false = &done;
-          break;
-        case Expression::kValue:
-          if_true = &materialize_false;
-          if_false = &materialize_true;
-          break;
-        case Expression::kTest:
-          break;
-        case Expression::kValueTest:
-          if_false = &materialize_true;
-          break;
-        case Expression::kTestValue:
-          if_true = &materialize_false;
-          break;
-      }
+      Label materialize_true, materialize_false;
+      Label* if_true = NULL;
+      Label* if_false = NULL;
+
+      // Notice that the labels are swapped.
+      PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
+
       VisitForControl(expr->expression(), if_true, if_false);
+
       Apply(context_, if_false, if_true);  // Labels swapped.
       break;
     }
@@ -1445,18 +2681,17 @@
           proxy->var()->is_global()) {
         Comment cmnt(masm_, "Global variable");
         __ ldr(r0, CodeGenerator::GlobalObject());
-        __ push(r0);
         __ mov(r2, Operand(proxy->name()));
         Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
         // Use a regular load, not a contextual load, to avoid a reference
         // error.
         __ Call(ic, RelocInfo::CODE_TARGET);
-        __ str(r0, MemOperand(sp));
+        __ push(r0);
       } else if (proxy != NULL &&
                  proxy->var()->slot() != NULL &&
                  proxy->var()->slot()->type() == Slot::LOOKUP) {
         __ mov(r0, Operand(proxy->name()));
-        __ stm(db_w, sp, cp.bit() | r0.bit());
+        __ Push(cp, r0);
         __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
         __ push(r0);
       } else {
@@ -1507,8 +2742,7 @@
       VisitForValue(expr->expression(), kAccumulator);
       // Avoid calling the stub for Smis.
       Label smi, done;
-      __ tst(result_register(), Operand(kSmiTagMask));
-      __ b(eq, &smi);
+      __ BranchOnSmi(result_register(), &smi);
       // Non-smi: call stub leaving result in accumulator register.
       __ CallStub(&stub);
       __ b(&done);
@@ -1530,6 +2764,12 @@
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // as the left-hand side.
+  if (!expr->expression()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
 
   // Expression can only be a property, a global or a (parameter or local)
   // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
@@ -1557,10 +2797,13 @@
       __ mov(ip, Operand(Smi::FromInt(0)));
       __ push(ip);
     }
-    VisitForValue(prop->obj(), kStack);
     if (assign_type == NAMED_PROPERTY) {
+      // Put the object both on the stack and in the accumulator.
+      VisitForValue(prop->obj(), kAccumulator);
+      __ push(r0);
       EmitNamedPropertyLoad(prop);
     } else {
+      VisitForValue(prop->obj(), kStack);
       VisitForValue(prop->key(), kAccumulator);
       __ ldr(r1, MemOperand(sp, 0));
       __ push(r0);
@@ -1570,8 +2813,7 @@
 
   // Call ToNumber only if operand is not a smi.
   Label no_conversion;
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &no_conversion);
+  __ BranchOnSmi(r0, &no_conversion);
   __ push(r0);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
   __ bind(&no_conversion);
@@ -1615,8 +2857,7 @@
     __ b(vs, &stub_call);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &done);
+    __ BranchOnSmi(r0, &done);
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@@ -1631,6 +2872,7 @@
     case VARIABLE:
       if (expr->is_postfix()) {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN,
                                Expression::kEffect);
         // For all contexts except kEffect: We have the result on
         // top of the stack.
@@ -1639,6 +2881,7 @@
         }
       } else {
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN,
                                context_);
       }
       break;
@@ -1657,15 +2900,16 @@
       break;
     }
     case KEYED_PROPERTY: {
+      __ pop(r1);  // Key.
+      __ pop(r2);  // Receiver.
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       __ Call(ic, RelocInfo::CODE_TARGET);
       if (expr->is_postfix()) {
-        __ Drop(2);  // Result is on the stack under the key and the receiver.
         if (context_ != Expression::kEffect) {
           ApplyTOS(context_);
         }
       } else {
-        DropAndApply(2, context_, r0);
+        Apply(context_, r0);
       }
       break;
     }
@@ -1708,36 +2952,41 @@
 }
 
 
+void FullCodeGenerator::EmitNullCompare(bool strict,
+                                        Register obj,
+                                        Register null_const,
+                                        Label* if_true,
+                                        Label* if_false,
+                                        Register scratch) {
+  __ cmp(obj, null_const);
+  if (strict) {
+    __ b(eq, if_true);
+  } else {
+    __ b(eq, if_true);
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(obj, ip);
+    __ b(eq, if_true);
+    __ BranchOnSmi(obj, if_false);
+    // It can be an undetectable object.
+    __ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ tst(scratch, Operand(1 << Map::kIsUndetectable));
+    __ b(ne, if_true);
+  }
+  __ jmp(if_false);
+}
+
+
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-  Label materialize_true, materialize_false, done;
-  // Initially assume we are in a test context.
-  Label* if_true = true_label_;
-  Label* if_false = false_label_;
-  switch (context_) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-      break;
-    case Expression::kEffect:
-      if_true = &done;
-      if_false = &done;
-      break;
-    case Expression::kValue:
-      if_true = &materialize_true;
-      if_false = &materialize_false;
-      break;
-    case Expression::kTest:
-      break;
-    case Expression::kValueTest:
-      if_true = &materialize_true;
-      break;
-    case Expression::kTestValue:
-      if_false = &materialize_false;
-      break;
-  }
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
 
   VisitForValue(expr->left(), kStack);
   switch (expr->op()) {
@@ -1768,10 +3017,24 @@
         case Token::EQ_STRICT:
           strict = true;
           // Fall through
-        case Token::EQ:
+        case Token::EQ: {
           cc = eq;
           __ pop(r1);
+          // If either operand is constant null we do a fast compare
+          // against null.
+          Literal* right_literal = expr->right()->AsLiteral();
+          Literal* left_literal = expr->left()->AsLiteral();
+          if (right_literal != NULL && right_literal->handle()->IsNull()) {
+            EmitNullCompare(strict, r1, r0, if_true, if_false, r2);
+            Apply(context_, if_true, if_false);
+            return;
+          } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
+            EmitNullCompare(strict, r0, r1, if_true, if_false, r2);
+            Apply(context_, if_true, if_false);
+            return;
+          }
           break;
+        }
         case Token::LT:
           cc = lt;
           __ pop(r1);
@@ -1802,8 +3065,7 @@
       // before it is called.
       Label slow_case;
       __ orr(r2, r0, Operand(r1));
-      __ tst(r2, Operand(kSmiTagMask));
-      __ b(ne, &slow_case);
+      __ BranchOnNotSmi(r2, &slow_case);
       __ cmp(r1, r0);
       __ b(cc, if_true);
       __ jmp(if_false);
@@ -1877,3 +3139,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index c308d69..ba318fd 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "assembler-arm.h"
 #include "codegen.h"
 #include "codegen-inl.h"
@@ -641,8 +643,8 @@
   // Patch the map check.
   Address ldr_map_instr_address =
       inline_end_address -
-      CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatchSize *
-      Assembler::kInstrSize;
+      (CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch *
+      Assembler::kInstrSize);
   Assembler::set_target_address_at(ldr_map_instr_address,
                                    reinterpret_cast<Address>(map));
   return true;
@@ -672,7 +674,9 @@
 
   // Patch the map check.
   Address ldr_map_instr_address =
-      inline_end_address - 5 * Assembler::kInstrSize;
+      inline_end_address -
+      (CodeGenerator::kInlinedKeyedStoreInstructionsAfterPatch *
+      Assembler::kInstrSize);
   Assembler::set_target_address_at(ldr_map_instr_address,
                                    reinterpret_cast<Address>(map));
   return true;
@@ -1207,13 +1211,13 @@
 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
   //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[1]  : receiver
   // -----------------------------------
 
-  __ ldm(ia, sp, r2.bit() | r3.bit());
-  __ Push(r3, r2, r0);
+  // Push receiver, key and value for runtime call.
+  __ Push(r2, r1, r0);
 
   ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
   __ TailCallExternalReference(ref, 3, 1);
@@ -1223,12 +1227,13 @@
 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
   //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[1]  : receiver
   // -----------------------------------
-  __ ldm(ia, sp, r1.bit() | r3.bit());  // r0 == value, r1 == key, r3 == object
-  __ Push(r3, r1, r0);
+
+  // Push receiver, key and value for runtime call.
+  __ Push(r2, r1, r0);
 
   __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
 }
@@ -1237,147 +1242,135 @@
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
   //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[1]  : receiver
   // -----------------------------------
-  Label slow, fast, array, extra, exit, check_pixel_array;
+  Label slow, fast, array, extra, check_pixel_array;
 
-  // Get the key and the object from the stack.
-  __ ldm(ia, sp, r1.bit() | r3.bit());  // r1 = key, r3 = receiver
+  // Register usage.
+  Register value = r0;
+  Register key = r1;
+  Register receiver = r2;
+  Register elements = r3;  // Elements array of the receiver.
+  // r4 and r5 are used as general scratch registers.
+
   // Check that the key is a smi.
-  __ tst(r1, Operand(kSmiTagMask));
+  __ tst(key, Operand(kSmiTagMask));
   __ b(ne, &slow);
   // Check that the object isn't a smi.
-  __ tst(r3, Operand(kSmiTagMask));
+  __ tst(receiver, Operand(kSmiTagMask));
   __ b(eq, &slow);
   // Get the map of the object.
-  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ ldrb(ip, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ ldrb(ip, FieldMemOperand(r4, Map::kBitFieldOffset));
   __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
   __ b(ne, &slow);
   // Check if the object is a JS array or not.
-  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  __ cmp(r2, Operand(JS_ARRAY_TYPE));
-  // r1 == key.
+  __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+  __ cmp(r4, Operand(JS_ARRAY_TYPE));
   __ b(eq, &array);
   // Check that the object is some kind of JS object.
-  __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
   __ b(lt, &slow);
 
-
   // Object case: Check key against length in the elements array.
-  __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
-  __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(r2, ip);
+  __ cmp(r4, ip);
   __ b(ne, &check_pixel_array);
   // Untag the key (for checking against untagged length in the fixed array).
-  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+  __ mov(r4, Operand(key, ASR, kSmiTagSize));
   // Compute address to store into and check array bounds.
-  __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
-  __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset));
-  __ cmp(r1, Operand(ip));
+  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ cmp(r4, Operand(ip));
   __ b(lo, &fast);
 
-
-  // Slow case:
+  // Slow case, handle jump to runtime.
   __ bind(&slow);
+  // Entry registers are intact.
+  // r0: value.
+  // r1: key.
+  // r2: receiver.
   GenerateRuntimeSetProperty(masm);
 
   // Check whether the elements is a pixel array.
-  // r0: value
-  // r1: index (as a smi), zero-extended.
-  // r3: elements array
+  // r4: elements map.
   __ bind(&check_pixel_array);
   __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
-  __ cmp(r2, ip);
+  __ cmp(r4, ip);
   __ b(ne, &slow);
   // Check that the value is a smi. If a conversion is needed call into the
   // runtime to convert and clamp.
-  __ BranchOnNotSmi(r0, &slow);
-  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // Untag the key.
-  __ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset));
-  __ cmp(r1, Operand(ip));
+  __ BranchOnNotSmi(value, &slow);
+  __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag the key.
+  __ ldr(ip, FieldMemOperand(elements, PixelArray::kLengthOffset));
+  __ cmp(r4, Operand(ip));
   __ b(hs, &slow);
-  __ mov(r4, r0);  // Save the value.
-  __ mov(r0, Operand(r0, ASR, kSmiTagSize));  // Untag the value.
+  __ mov(r5, Operand(value, ASR, kSmiTagSize));  // Untag the value.
   {  // Clamp the value to [0..255].
     Label done;
-    __ tst(r0, Operand(0xFFFFFF00));
+    __ tst(r5, Operand(0xFFFFFF00));
     __ b(eq, &done);
-    __ mov(r0, Operand(0), LeaveCC, mi);  // 0 if negative.
-    __ mov(r0, Operand(255), LeaveCC, pl);  // 255 if positive.
+    __ mov(r5, Operand(0), LeaveCC, mi);  // 0 if negative.
+    __ mov(r5, Operand(255), LeaveCC, pl);  // 255 if positive.
     __ bind(&done);
   }
-  __ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset));
-  __ strb(r0, MemOperand(r2, r1));
-  __ mov(r0, Operand(r4));  // Return the original value.
+  // Get the pointer to the external array. This clobbers elements.
+  __ ldr(elements,
+         FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
+  __ strb(r5, MemOperand(elements, r4));  // Elements is now external array.
   __ Ret();
 
-
   // Extra capacity case: Check if there is extra capacity to
   // perform the store and update the length. Used for adding one
   // element to the array by writing to array[array.length].
-  // r0 == value, r1 == key, r2 == elements, r3 == object
   __ bind(&extra);
-  __ b(ne, &slow);  // do not leave holes in the array
-  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // untag
-  __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset));
-  __ cmp(r1, Operand(ip));
+  // Condition code from comparing key and array length is still available.
+  __ b(ne, &slow);  // Only support writing to writing to array[array.length].
+  // Check for room in the elements backing store.
+  __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag key.
+  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ cmp(r4, Operand(ip));
   __ b(hs, &slow);
-  __ mov(r1, Operand(r1, LSL, kSmiTagSize));  // restore tag
-  __ add(r1, r1, Operand(1 << kSmiTagSize));  // and increment
-  __ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset));
-  __ mov(r3, Operand(r2));
-  // NOTE: Computing the address to store into must take the fact
-  // that the key has been incremented into account.
-  int displacement = FixedArray::kHeaderSize - kHeapObjectTag -
-      ((1 << kSmiTagSize) * 2);
-  __ add(r2, r2, Operand(displacement));
-  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+  // Calculate key + 1 as smi.
+  ASSERT_EQ(0, kSmiTag);
+  __ add(r4, key, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
   __ b(&fast);
 
-
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode; if it is the
   // length is always a smi.
-  // r0 == value, r3 == object
   __ bind(&array);
-  __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
-  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ ldr(r4, FieldMemOperand(elements, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-  __ cmp(r1, ip);
+  __ cmp(r4, ip);
   __ b(ne, &slow);
 
-  // Check the key against the length in the array, compute the
-  // address to store into and fall through to fast case.
-  __ ldr(r1, MemOperand(sp));  // restore key
-  // r0 == value, r1 == key, r2 == elements, r3 == object.
-  __ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
-  __ cmp(r1, Operand(ip));
+  // Check the key against the length in the array.
+  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ cmp(key, Operand(ip));
   __ b(hs, &extra);
-  __ mov(r3, Operand(r2));
-  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+  // Fall through to fast case.
 
-
-  // Fast case: Do the store.
-  // r0 == value, r2 == address to store into, r3 == elements
   __ bind(&fast);
-  __ str(r0, MemOperand(r2));
+  // Fast case, store the value to the elements backing store.
+  __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(value, MemOperand(r5));
   // Skip write barrier if the written value is a smi.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &exit);
+  __ tst(value, Operand(kSmiTagMask));
+  __ Ret(eq);
   // Update write barrier for the elements array address.
-  __ sub(r1, r2, Operand(r3));
-  __ RecordWrite(r3, r1, r2);
+  __ sub(r4, r5, Operand(elements));
+  __ RecordWrite(elements, r4, r5);
 
-  __ bind(&exit);
   __ Ret();
 }
 
@@ -1471,20 +1464,23 @@
                                          ExternalArrayType array_type) {
   // ---------- S t a t e --------------
   //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
   //  -- lr     : return address
-  //  -- sp[0]  : key
-  //  -- sp[1]  : receiver
   // -----------------------------------
   Label slow, check_heap_number;
 
-  // Get the key and the object from the stack.
-  __ ldm(ia, sp, r1.bit() | r2.bit());  // r1 = key, r2 = receiver
+  // Register usage.
+  Register value = r0;
+  Register key = r1;
+  Register receiver = r2;
+  // r3 mostly holds the elements array or the destination external array.
 
   // Check that the object isn't a smi.
-  __ BranchOnSmi(r2, &slow);
+  __ BranchOnSmi(receiver, &slow);
 
-  // Check that the object is a JS object. Load map into r3
-  __ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE);
+  // Check that the object is a JS object. Load map into r3.
+  __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
   __ b(le, &slow);
 
   // Check that the receiver does not require access checks.  We need
@@ -1494,73 +1490,70 @@
   __ b(ne, &slow);
 
   // Check that the key is a smi.
-  __ BranchOnNotSmi(r1, &slow);
+  __ BranchOnNotSmi(key, &slow);
 
-  // Check that the elements array is the appropriate type of
-  // ExternalArray.
-  // r0: value
-  // r1: index (smi)
-  // r2: object
-  __ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
-  __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+  // Check that the elements array is the appropriate type of ExternalArray.
+  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
-  __ cmp(r3, ip);
+  __ cmp(r4, ip);
   __ b(ne, &slow);
 
   // Check that the index is in range.
-  __ mov(r1, Operand(r1, ASR, kSmiTagSize));  // Untag the index.
-  __ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset));
-  __ cmp(r1, ip);
+  __ mov(r4, Operand(key, ASR, kSmiTagSize));  // Untag the index.
+  __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
+  __ cmp(r4, ip);
   // Unsigned comparison catches both negative and too-large values.
   __ b(hs, &slow);
 
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
-  // r0: value
-  // r1: index (integer)
-  // r2: array
-  __ BranchOnNotSmi(r0, &check_heap_number);
-  __ mov(r3, Operand(r0, ASR, kSmiTagSize));  // Untag the value.
-  __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
+  // r3: external array.
+  // r4: key (integer).
+  __ BranchOnNotSmi(value, &check_heap_number);
+  __ mov(r5, Operand(value, ASR, kSmiTagSize));  // Untag the value.
+  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
 
-  // r1: index (integer)
-  // r2: base pointer of external storage
-  // r3: value (integer)
+  // r3: base pointer of external storage.
+  // r4: key (integer).
+  // r5: value (integer).
   switch (array_type) {
     case kExternalByteArray:
     case kExternalUnsignedByteArray:
-      __ strb(r3, MemOperand(r2, r1, LSL, 0));
+      __ strb(r5, MemOperand(r3, r4, LSL, 0));
       break;
     case kExternalShortArray:
     case kExternalUnsignedShortArray:
-      __ strh(r3, MemOperand(r2, r1, LSL, 1));
+      __ strh(r5, MemOperand(r3, r4, LSL, 1));
       break;
     case kExternalIntArray:
     case kExternalUnsignedIntArray:
-      __ str(r3, MemOperand(r2, r1, LSL, 2));
+      __ str(r5, MemOperand(r3, r4, LSL, 2));
       break;
     case kExternalFloatArray:
       // Need to perform int-to-float conversion.
-      ConvertIntToFloat(masm, r3, r4, r5, r6);
-      __ str(r4, MemOperand(r2, r1, LSL, 2));
+      ConvertIntToFloat(masm, r5, r6, r7, r9);
+      __ str(r6, MemOperand(r3, r4, LSL, 2));
       break;
     default:
       UNREACHABLE();
       break;
   }
 
-  // r0: value
+  // Entry registers are intact, r0 holds the value which is the return value.
   __ Ret();
 
 
-  // r0: value
-  // r1: index (integer)
-  // r2: external array object
+  // r3: external array.
+  // r4: index (integer).
   __ bind(&check_heap_number);
-  __ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE);
+  __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
   __ b(ne, &slow);
 
-  __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset));
+  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
+
+  // r3: base pointer of external storage.
+  // r4: key (integer).
 
   // The WebGL specification leaves the behavior of storing NaN and
   // +/-Infinity into integer arrays basically undefined. For more
@@ -1570,13 +1563,13 @@
 
     // vldr requires offset to be a multiple of 4 so we can not
     // include -kHeapObjectTag into it.
-    __ sub(r3, r0, Operand(kHeapObjectTag));
-    __ vldr(d0, r3, HeapNumber::kValueOffset);
+    __ sub(r5, r0, Operand(kHeapObjectTag));
+    __ vldr(d0, r5, HeapNumber::kValueOffset);
 
     if (array_type == kExternalFloatArray) {
       __ vcvt_f32_f64(s0, d0);
-      __ vmov(r3, s0);
-      __ str(r3, MemOperand(r2, r1, LSL, 2));
+      __ vmov(r5, s0);
+      __ str(r5, MemOperand(r3, r4, LSL, 2));
     } else {
       Label done;
 
@@ -1585,38 +1578,38 @@
       __ vcmp(d0, d0);
       // Move vector status bits to normal status bits.
       __ vmrs(v8::internal::pc);
-      __ mov(r3, Operand(0), LeaveCC, vs);  // NaN converts to 0
+      __ mov(r5, Operand(0), LeaveCC, vs);  // NaN converts to 0.
       __ b(vs, &done);
 
-      // Test whether exponent equal to 0x7FF (infinity or NaN)
-      __ vmov(r4, r3, d0);
+      // Test whether exponent equal to 0x7FF (infinity or NaN).
+      __ vmov(r6, r7, d0);
       __ mov(r5, Operand(0x7FF00000));
-      __ and_(r3, r3, Operand(r5));
-      __ teq(r3, Operand(r5));
-      __ mov(r3, Operand(0), LeaveCC, eq);
+      __ and_(r6, r6, Operand(r5));
+      __ teq(r6, Operand(r5));
+      __ mov(r6, Operand(0), LeaveCC, eq);
 
-      // Not infinity or NaN simply convert to int
+      // Not infinity or NaN simply convert to int.
       if (IsElementTypeSigned(array_type)) {
         __ vcvt_s32_f64(s0, d0, ne);
       } else {
         __ vcvt_u32_f64(s0, d0, ne);
       }
 
-      __ vmov(r3, s0, ne);
+      __ vmov(r5, s0, ne);
 
       __ bind(&done);
       switch (array_type) {
         case kExternalByteArray:
         case kExternalUnsignedByteArray:
-          __ strb(r3, MemOperand(r2, r1, LSL, 0));
+          __ strb(r5, MemOperand(r3, r4, LSL, 0));
           break;
         case kExternalShortArray:
         case kExternalUnsignedShortArray:
-          __ strh(r3, MemOperand(r2, r1, LSL, 1));
+          __ strh(r5, MemOperand(r3, r4, LSL, 1));
           break;
         case kExternalIntArray:
         case kExternalUnsignedIntArray:
-          __ str(r3, MemOperand(r2, r1, LSL, 2));
+          __ str(r5, MemOperand(r3, r4, LSL, 2));
           break;
         default:
           UNREACHABLE();
@@ -1624,12 +1617,12 @@
       }
     }
 
-    // r0: original value
+    // Entry registers are intact, r0 holds the value which is the return value.
     __ Ret();
   } else {
-    // VFP3 is not available do manual conversions
-    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset));
-    __ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+    // VFP3 is not available do manual conversions.
+    __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
+    __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
 
     if (array_type == kExternalFloatArray) {
       Label done, nan_or_infinity_or_zero;
@@ -1641,106 +1634,108 @@
 
       // Test for all special exponent values: zeros, subnormal numbers, NaNs
       // and infinities. All these should be converted to 0.
-      __ mov(r5, Operand(HeapNumber::kExponentMask));
-      __ and_(r6, r3, Operand(r5), SetCC);
+      __ mov(r7, Operand(HeapNumber::kExponentMask));
+      __ and_(r9, r5, Operand(r7), SetCC);
       __ b(eq, &nan_or_infinity_or_zero);
 
-      __ teq(r6, Operand(r5));
-      __ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq);
+      __ teq(r9, Operand(r7));
+      __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
       __ b(eq, &nan_or_infinity_or_zero);
 
       // Rebias exponent.
-      __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
-      __ add(r6,
-             r6,
+      __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+      __ add(r9,
+             r9,
              Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
 
-      __ cmp(r6, Operand(kBinary32MaxExponent));
-      __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt);
-      __ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt);
+      __ cmp(r9, Operand(kBinary32MaxExponent));
+      __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
+      __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
       __ b(gt, &done);
 
-      __ cmp(r6, Operand(kBinary32MinExponent));
-      __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt);
+      __ cmp(r9, Operand(kBinary32MinExponent));
+      __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
       __ b(lt, &done);
 
-      __ and_(r7, r3, Operand(HeapNumber::kSignMask));
-      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
-      __ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift));
-      __ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift));
-      __ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift));
+      __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+      __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
+      __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
+      __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
 
       __ bind(&done);
-      __ str(r3, MemOperand(r2, r1, LSL, 2));
+      __ str(r5, MemOperand(r3, r4, LSL, 2));
+      // Entry registers are intact, r0 holds the value which is the return
+      // value.
       __ Ret();
 
       __ bind(&nan_or_infinity_or_zero);
-      __ and_(r7, r3, Operand(HeapNumber::kSignMask));
-      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
-      __ orr(r6, r6, r7);
-      __ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift));
-      __ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift));
+      __ and_(r7, r5, Operand(HeapNumber::kSignMask));
+      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+      __ orr(r9, r9, r7);
+      __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
+      __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
       __ b(&done);
     } else {
-      bool is_signed_type  = IsElementTypeSigned(array_type);
+      bool is_signed_type = IsElementTypeSigned(array_type);
       int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
-      int32_t min_value    = is_signed_type ? 0x80000000 : 0x00000000;
+      int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
 
       Label done, sign;
 
       // Test for all special exponent values: zeros, subnormal numbers, NaNs
       // and infinities. All these should be converted to 0.
-      __ mov(r5, Operand(HeapNumber::kExponentMask));
-      __ and_(r6, r3, Operand(r5), SetCC);
-      __ mov(r3, Operand(0), LeaveCC, eq);
+      __ mov(r7, Operand(HeapNumber::kExponentMask));
+      __ and_(r9, r5, Operand(r7), SetCC);
+      __ mov(r5, Operand(0), LeaveCC, eq);
       __ b(eq, &done);
 
-      __ teq(r6, Operand(r5));
-      __ mov(r3, Operand(0), LeaveCC, eq);
+      __ teq(r9, Operand(r7));
+      __ mov(r5, Operand(0), LeaveCC, eq);
       __ b(eq, &done);
 
       // Unbias exponent.
-      __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift));
-      __ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC);
+      __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
+      __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
       // If exponent is negative than result is 0.
-      __ mov(r3, Operand(0), LeaveCC, mi);
+      __ mov(r5, Operand(0), LeaveCC, mi);
       __ b(mi, &done);
 
-      // If exponent is too big than result is minimal value
-      __ cmp(r6, Operand(meaningfull_bits - 1));
-      __ mov(r3, Operand(min_value), LeaveCC, ge);
+      // If exponent is too big than result is minimal value.
+      __ cmp(r9, Operand(meaningfull_bits - 1));
+      __ mov(r5, Operand(min_value), LeaveCC, ge);
       __ b(ge, &done);
 
-      __ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC);
-      __ and_(r3, r3, Operand(HeapNumber::kMantissaMask));
-      __ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
+      __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
+      __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
+      __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
 
-      __ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
-      __ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl);
+      __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+      __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
       __ b(pl, &sign);
 
-      __ rsb(r6, r6, Operand(0));
-      __ mov(r3, Operand(r3, LSL, r6));
-      __ rsb(r6, r6, Operand(meaningfull_bits));
-      __ orr(r3, r3, Operand(r4, LSR, r6));
+      __ rsb(r9, r9, Operand(0));
+      __ mov(r5, Operand(r5, LSL, r9));
+      __ rsb(r9, r9, Operand(meaningfull_bits));
+      __ orr(r5, r5, Operand(r6, LSR, r9));
 
       __ bind(&sign);
-      __ teq(r5, Operand(0));
-      __ rsb(r3, r3, Operand(0), LeaveCC, ne);
+      __ teq(r7, Operand(0));
+      __ rsb(r5, r5, Operand(0), LeaveCC, ne);
 
       __ bind(&done);
       switch (array_type) {
         case kExternalByteArray:
         case kExternalUnsignedByteArray:
-          __ strb(r3, MemOperand(r2, r1, LSL, 0));
+          __ strb(r5, MemOperand(r3, r4, LSL, 0));
           break;
         case kExternalShortArray:
         case kExternalUnsignedShortArray:
-          __ strh(r3, MemOperand(r2, r1, LSL, 1));
+          __ strh(r5, MemOperand(r3, r4, LSL, 1));
           break;
         case kExternalIntArray:
         case kExternalUnsignedIntArray:
-          __ str(r3, MemOperand(r2, r1, LSL, 2));
+          __ str(r5, MemOperand(r3, r4, LSL, 2));
           break;
         default:
           UNREACHABLE();
@@ -1751,6 +1746,11 @@
 
   // Slow case: call runtime.
   __ bind(&slow);
+
+  // Entry registers are intact.
+  // r0: value
+  // r1: key
+  // r2: receiver
   GenerateRuntimeSetProperty(masm);
 }
 
@@ -1841,3 +1841,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index 8d182be..3c43d16 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "jump-target-inl.h"
 #include "register-allocator-inl.h"
@@ -136,3 +138,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index c4b153f..29c48a4 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "debug.h"
@@ -352,6 +354,51 @@
 }
 
 
+void MacroAssembler::Ldrd(Register dst1, Register dst2,
+                          const MemOperand& src, Condition cond) {
+  ASSERT(src.rm().is(no_reg));
+  ASSERT(!dst1.is(lr));  // r14.
+  ASSERT_EQ(0, dst1.code() % 2);
+  ASSERT_EQ(dst1.code() + 1, dst2.code());
+
+  // Generate two ldr instructions if ldrd is not available.
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatures::Scope scope(ARMv7);
+    ldrd(dst1, dst2, src, cond);
+  } else {
+    MemOperand src2(src);
+    src2.set_offset(src2.offset() + 4);
+    if (dst1.is(src.rn())) {
+      ldr(dst2, src2, cond);
+      ldr(dst1, src, cond);
+    } else {
+      ldr(dst1, src, cond);
+      ldr(dst2, src2, cond);
+    }
+  }
+}
+
+
+void MacroAssembler::Strd(Register src1, Register src2,
+                          const MemOperand& dst, Condition cond) {
+  ASSERT(dst.rm().is(no_reg));
+  ASSERT(!src1.is(lr));  // r14.
+  ASSERT_EQ(0, src1.code() % 2);
+  ASSERT_EQ(src1.code() + 1, src2.code());
+
+  // Generate two str instructions if strd is not available.
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatures::Scope scope(ARMv7);
+    strd(src1, src2, dst, cond);
+  } else {
+    MemOperand dst2(dst);
+    dst2.set_offset(dst2.offset() + 4);
+    str(src1, dst, cond);
+    str(src2, dst2, cond);
+  }
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   // r0-r3: preserved
   stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
@@ -1725,3 +1772,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 9cf93da..494f2b6 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -185,6 +185,18 @@
     }
   }
 
+  // Load two consecutive registers with two consecutive memory locations.
+  void Ldrd(Register dst1,
+            Register dst2,
+            const MemOperand& src,
+            Condition cond = al);
+
+  // Store two consecutive registers to two consecutive memory locations.
+  void Strd(Register src1,
+            Register src2,
+            const MemOperand& dst,
+            Condition cond = al);
+
   // ---------------------------------------------------------------------------
   // Stack limit support
 
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 64fe5d6..e8910f4 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -26,6 +26,9 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "v8.h"
+
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "unicode.h"
 #include "log.h"
 #include "ast.h"
@@ -1255,3 +1258,5 @@
 #endif  // V8_INTERPRETED_REGEXP
 
 }}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/register-allocator-arm.cc b/src/arm/register-allocator-arm.cc
index ad0c7f9..3b35574 100644
--- a/src/arm/register-allocator-arm.cc
+++ b/src/arm/register-allocator-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 
@@ -57,3 +59,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index e4601f3..e72a879 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -29,6 +29,8 @@
 #include <cstdarg>
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "disasm.h"
 #include "assembler.h"
 #include "arm/constants-arm.h"
@@ -2731,3 +2733,5 @@
 } }  // namespace assembler::arm
 
 #endif  // __arm__
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 877354c..d82ef21 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "ic-inl.h"
 #include "codegen-inl.h"
 #include "stub-cache.h"
@@ -434,7 +436,7 @@
                         Register holder,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         Label* miss_label) {
@@ -454,7 +456,8 @@
     }
 
     if (!optimize) {
-      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
+                     miss_label);
       return;
     }
 
@@ -464,14 +467,18 @@
     __ push(receiver);
     __ Push(holder, name_);
 
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
     CompileCallLoadPropertyWithInterceptor(masm,
                                            receiver,
                                            holder,
                                            name_,
-                                           holder_obj);
+                                           interceptor_holder);
 
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
     Label interceptor_failed;
-    // Compare with no_interceptor_result_sentinel.
     __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
     __ cmp(r0, scratch1);
     __ b(eq, &interceptor_failed);
@@ -486,13 +493,17 @@
     __ LeaveInternalFrame();
 
     if (lookup->type() == FIELD) {
-      holder = stub_compiler->CheckPrototypes(holder_obj,
+      // We found FIELD property in prototype chain of interceptor's holder.
+      // Check that the maps from interceptor's holder to field's holder
+      // haven't changed...
+      holder = stub_compiler->CheckPrototypes(interceptor_holder,
                                               holder,
                                               lookup->holder(),
                                               scratch1,
                                               scratch2,
                                               name,
                                               miss_label);
+      // ... and retrieve a field from field's holder.
       stub_compiler->GenerateFastPropertyLoad(masm,
                                               r0,
                                               holder,
@@ -500,35 +511,40 @@
                                               lookup->GetFieldIndex());
       __ Ret();
     } else {
+      // We found CALLBACKS property in prototype chain of interceptor's
+      // holder.
       ASSERT(lookup->type() == CALLBACKS);
       ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
       ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
+      // Prepare for tail call: push receiver to stack.
       Label cleanup;
-      __ pop(scratch2);
-      __ Push(receiver, scratch2);
+      __ push(receiver);
 
-      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+      // Check that the maps from interceptor's holder to callback's holder
+      // haven't changed.
+      holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
                                               lookup->holder(), scratch1,
                                               scratch2,
                                               name,
                                               &cleanup);
 
+      // Continue tail call preparation: push remaining parameters.
       __ push(holder);
       __ Move(holder, Handle<AccessorInfo>(callback));
       __ push(holder);
       __ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
       __ Push(scratch1, name_);
 
+      // Tail call to runtime.
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
       __ TailCallExternalReference(ref, 5, 1);
 
+      // Clean up code: we pushed receiver and need to remove it.
       __ bind(&cleanup);
-      __ pop(scratch1);
       __ pop(scratch2);
-      __ push(scratch1);
     }
   }
 
@@ -537,9 +553,9 @@
                       Register receiver,
                       Register holder,
                       Register scratch,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
-    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     ExternalReference ref = ExternalReference(
         IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
@@ -715,7 +731,7 @@
                        Register receiver,
                        Register scratch1,
                        Register scratch2,
-                       JSObject* holder_obj,
+                       JSObject* interceptor_holder,
                        LookupResult* lookup,
                        String* name,
                        const CallOptimization& optimization,
@@ -728,10 +744,13 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
        !lookup->holder()->IsGlobalObject()) {
-     depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+     depth1 =
+         optimization.GetPrototypeDepthOfExpectedType(object,
+                                                      interceptor_holder);
      if (depth1 == kInvalidProtoDepth) {
-       depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
-                                                             lookup->holder());
+       depth2 =
+           optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                        lookup->holder());
      }
      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
                             (depth2 != kInvalidProtoDepth);
@@ -746,23 +765,31 @@
       ReserveSpaceForFastApiCall(masm, scratch1);
     }
 
+    // Check that the maps from receiver to interceptor's holder
+    // haven't changed and thus we can invoke interceptor.
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj, scratch1,
-                                        scratch2, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, name,
+                                        depth1, miss);
 
+    // Invoke an interceptor and if it provides a value,
+    // branch to |regular_invoke|.
     Label regular_invoke;
-    LoadWithInterceptor(masm, receiver, holder, holder_obj, scratch2,
+    LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
                         &regular_invoke);
 
-    // Generate code for the failed interceptor case.
+    // Interceptor returned nothing for this property.  Try to use cached
+    // constant function.
 
-    // Check the lookup is still valid.
-    stub_compiler_->CheckPrototypes(holder_obj, receiver,
+    // Check that the maps from interceptor's holder to constant function's
+    // holder haven't changed and thus we can use cached constant function.
+    stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
                                     lookup->holder(), scratch1,
                                     scratch2, name, depth2, miss);
 
+    // Invoke function.
     if (can_do_fast_api_call) {
       GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
@@ -770,12 +797,14 @@
                         JUMP_FUNCTION);
     }
 
+    // Deferred code for fast API call case---clean preallocated space.
     if (can_do_fast_api_call) {
       __ bind(&miss_cleanup);
       FreeSpaceForFastApiCall(masm);
       __ b(miss_label);
     }
 
+    // Invoke a regular function.
     __ bind(&regular_invoke);
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
@@ -788,10 +817,10 @@
                       Register scratch1,
                       Register scratch2,
                       String* name,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         miss_label);
 
@@ -804,7 +833,7 @@
                              receiver,
                              holder,
                              name_,
-                             holder_obj);
+                             interceptor_holder);
 
     __ CallExternalReference(
           ExternalReference(
@@ -1618,15 +1647,11 @@
                                                  JSObject* object,
                                                  JSObject* last) {
   // ----------- S t a t e -------------
-  //  -- r2    : name
+  //  -- r0    : receiver
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  // Load receiver.
-  __ ldr(r0, MemOperand(sp, 0));
-
   // Check that receiver is not a smi.
   __ tst(r0, Operand(kSmiTagMask));
   __ b(eq, &miss);
@@ -1663,14 +1688,12 @@
                                            int index,
                                            String* name) {
   // ----------- S t a t e -------------
+  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r0, MemOperand(sp, 0));
-
   GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1685,13 +1708,12 @@
                                               JSObject* holder,
                                               AccessorInfo* callback) {
   // ----------- S t a t e -------------
+  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r0, MemOperand(sp, 0));
   Failure* failure = Failure::InternalError();
   bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
                                       callback, name, &miss, &failure);
@@ -1710,14 +1732,12 @@
                                               Object* value,
                                               String* name) {
   // ----------- S t a t e -------------
+  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp] : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r0, MemOperand(sp, 0));
-
   GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1731,14 +1751,12 @@
                                                  JSObject* holder,
                                                  String* name) {
   // ----------- S t a t e -------------
+  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  __ ldr(r0, MemOperand(sp, 0));
-
   LookupResult lookup;
   LookupPostInterceptor(holder, name, &lookup);
   GenerateLoadInterceptor(object,
@@ -1764,10 +1782,9 @@
                                             String* name,
                                             bool is_dont_delete) {
   // ----------- S t a t e -------------
+  //  -- r0    : receiver
   //  -- r2    : name
   //  -- lr    : return address
-  //  -- r0    : receiver
-  //  -- sp[0] : receiver
   // -----------------------------------
   Label miss;
 
@@ -1974,32 +1991,31 @@
                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- r0    : value
-  //  -- r2    : name
+  //  -- r1    : key
+  //  -- r2    : receiver
   //  -- lr    : return address
-  //  -- [sp]  : receiver
   // -----------------------------------
   Label miss;
 
-  __ IncrementCounter(&Counters::keyed_store_field, 1, r1, r3);
+  __ IncrementCounter(&Counters::keyed_store_field, 1, r3, r4);
 
   // Check that the name has not changed.
-  __ cmp(r2, Operand(Handle<String>(name)));
+  __ cmp(r1, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  // Load receiver from the stack.
-  __ ldr(r3, MemOperand(sp));
-  // r1 is used as scratch register, r3 and r2 might be clobbered.
+  // r3 is used as scratch register. r1 and r2 keep their values if a jump to
+  // the miss label is generated.
   GenerateStoreField(masm(),
                      object,
                      index,
                      transition,
-                     r3, r2, r1,
+                     r2, r1, r3,
                      &miss);
   __ bind(&miss);
 
-  __ DecrementCounter(&Counters::keyed_store_field, 1, r1, r3);
-  __ mov(r2, Operand(Handle<String>(name)));  // restore name register.
+  __ DecrementCounter(&Counters::keyed_store_field, 1, r3, r4);
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+
   __ Jump(ic, RelocInfo::CODE_TARGET);
 
   // Return the generated code.
@@ -2153,3 +2169,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index f7b337d..3acd2df 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_ARM)
+
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "scopes.h"
@@ -307,7 +309,8 @@
 
 void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
-  SpillAllButCopyTOSToR0();
+  PopToR0();
+  SpillAll();
   __ mov(r2, Operand(name));
   CallCodeObject(ic, mode, 0);
 }
@@ -337,8 +340,10 @@
 
 
 void VirtualFrame::CallKeyedStoreIC() {
-  ASSERT(SpilledScope::is_spilled());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  PopToR1R0();
+  SpillAll();
+  EmitPop(r2);
   CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
 }
 
@@ -505,36 +510,40 @@
 
 
 void VirtualFrame::Dup() {
-  AssertIsNotSpilled();
-  switch (top_of_stack_state_) {
-    case NO_TOS_REGISTERS:
-      __ ldr(r0, MemOperand(sp, 0));
-      top_of_stack_state_ = R0_TOS;
-      break;
-    case R0_TOS:
-      __ mov(r1, r0);
-      // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
-      top_of_stack_state_ = R0_R1_TOS;
-      break;
-    case R1_TOS:
-      __ mov(r0, r1);
-      // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
-      top_of_stack_state_ = R0_R1_TOS;
-      break;
-    case R0_R1_TOS:
-      __ push(r1);
-      __ mov(r1, r0);
-      // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
-      top_of_stack_state_ = R0_R1_TOS;
-      break;
-    case R1_R0_TOS:
-      __ push(r0);
-      __ mov(r0, r1);
-      // r0 and r1 contains the same value. Prefer a state with r0 holding TOS.
-      top_of_stack_state_ = R0_R1_TOS;
-      break;
-    default:
-      UNREACHABLE();
+  if (SpilledScope::is_spilled()) {
+    __ ldr(ip, MemOperand(sp, 0));
+    __ push(ip);
+  } else {
+    switch (top_of_stack_state_) {
+      case NO_TOS_REGISTERS:
+        __ ldr(r0, MemOperand(sp, 0));
+        top_of_stack_state_ = R0_TOS;
+        break;
+      case R0_TOS:
+        __ mov(r1, r0);
+        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+        top_of_stack_state_ = R0_R1_TOS;
+        break;
+      case R1_TOS:
+        __ mov(r0, r1);
+        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+        top_of_stack_state_ = R0_R1_TOS;
+        break;
+      case R0_R1_TOS:
+        __ push(r1);
+        __ mov(r1, r0);
+        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+        top_of_stack_state_ = R0_R1_TOS;
+        break;
+      case R1_R0_TOS:
+        __ push(r0);
+        __ mov(r0, r1);
+        // r0 and r1 contains the same value. Prefer state with r0 holding TOS.
+        top_of_stack_state_ = R0_R1_TOS;
+        break;
+      default:
+        UNREACHABLE();
+    }
   }
   element_count_++;
 }
@@ -749,3 +758,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index 655194d..9471d61 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -278,7 +278,8 @@
                      InvokeJSFlags flag,
                      int arg_count);
 
-  // Call load IC. Receiver is on the stack. Result is returned in r0.
+  // Call load IC. Receiver is on the stack and is consumed. Result is returned
+  // in r0.
   void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
 
   // Call store IC. If the load is contextual, value is found on top of the
@@ -290,8 +291,8 @@
   // Result is returned in r0.
   void CallKeyedLoadIC();
 
-  // Call keyed store IC. Key and receiver are on the stack and the value is in
-  // r0. Result is returned in r0.
+  // Call keyed store IC. Value, key and receiver are on the stack. All three
+  // are consumed. Result is returned in r0.
   void CallKeyedStoreIC();
 
   // Call into an IC stub given the number of arguments it removes
diff --git a/src/assembler.cc b/src/assembler.cc
index 87f363b..871ca86 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -424,8 +424,6 @@
       return "no reloc";
     case RelocInfo::EMBEDDED_OBJECT:
       return "embedded object";
-    case RelocInfo::EMBEDDED_STRING:
-      return "embedded string";
     case RelocInfo::CONSTRUCT_CALL:
       return "code target (js construct call)";
     case RelocInfo::CODE_TARGET_CONTEXT:
@@ -508,7 +506,6 @@
       ASSERT(code->address() == HeapObject::cast(found)->address());
       break;
     }
-    case RelocInfo::EMBEDDED_STRING:
     case RUNTIME_ENTRY:
     case JS_RETURN:
     case COMMENT:
diff --git a/src/assembler.h b/src/assembler.h
index 5d03c1f..74613b3 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -38,6 +38,7 @@
 #include "runtime.h"
 #include "top.h"
 #include "token.h"
+#include "objects.h"
 
 namespace v8 {
 namespace internal {
@@ -121,7 +122,6 @@
     DEBUG_BREAK,
     CODE_TARGET,         // code target which is not any of the above.
     EMBEDDED_OBJECT,
-    EMBEDDED_STRING,
 
     // Everything after runtime_entry (inclusive) is not GC'ed.
     RUNTIME_ENTRY,
@@ -137,7 +137,7 @@
     NUMBER_OF_MODES,  // must be no greater than 14 - see RelocInfoWriter
     NONE,  // never recorded
     LAST_CODE_ENUM = CODE_TARGET,
-    LAST_GCED_ENUM = EMBEDDED_STRING
+    LAST_GCED_ENUM = EMBEDDED_OBJECT
   };
 
 
@@ -185,6 +185,11 @@
   // Apply a relocation by delta bytes
   INLINE(void apply(intptr_t delta));
 
+  // Is the pointer this relocation info refers to coded like a plain pointer
+  // or is it strange in some way (eg relative or patched into a series of
+  // instructions).
+  bool IsCodedSpecially();
+
   // Read/modify the code target in the branch/call instruction
   // this relocation applies to;
   // can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
@@ -195,9 +200,23 @@
   INLINE(Object** target_object_address());
   INLINE(void set_target_object(Object* target));
 
-  // Read the address of the word containing the target_address. Can only
-  // be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY.
+  // Read the address of the word containing the target_address in an
+  // instruction stream.  What this means exactly is architecture-independent.
+  // The only architecture-independent user of this function is the serializer.
+  // The serializer uses it to find out how many raw bytes of instruction to
+  // output before the next target.  Architecture-independent code shouldn't
+  // dereference the pointer it gets back from this.
   INLINE(Address target_address_address());
+  // This indicates how much space a target takes up when deserializing a code
+  // stream.  For most architectures this is just the size of a pointer.  For
+  // an instruction like movw/movt where the target bits are mixed into the
+  // instruction bits the size of the target will be zero, indicating that the
+  // serializer should not step forwards in memory after a target is resolved
+  // and written.  In this case the target_address_address function above
+  // should return the end of the instructions to be patched, allowing the
+  // deserializer to deserialize the instructions as raw bytes and put them in
+  // place, ready to be patched with the target.
+  INLINE(int target_address_size());
 
   // Read/modify the reference in the instruction this relocation
   // applies to; can only be called if rmode_ is external_reference
@@ -212,6 +231,8 @@
   INLINE(Object** call_object_address());
   INLINE(void set_call_object(Object* target));
 
+  inline void Visit(ObjectVisitor* v);
+
   // Patch the code with some other code.
   void PatchCode(byte* instructions, int instruction_count);
 
diff --git a/src/circular-queue-inl.h b/src/circular-queue-inl.h
index 90ab0f5..349f222 100644
--- a/src/circular-queue-inl.h
+++ b/src/circular-queue-inl.h
@@ -34,54 +34,6 @@
 namespace internal {
 
 
-template<typename Record>
-CircularQueue<Record>::CircularQueue(int desired_buffer_size_in_bytes)
-    : buffer_(NewArray<Record>(desired_buffer_size_in_bytes / sizeof(Record))),
-      buffer_end_(buffer_ + desired_buffer_size_in_bytes / sizeof(Record)),
-      enqueue_semaphore_(
-          OS::CreateSemaphore(static_cast<int>(buffer_end_ - buffer_) - 1)),
-      enqueue_pos_(buffer_),
-      dequeue_pos_(buffer_) {
-  // To be able to distinguish between a full and an empty queue
-  // state, the queue must be capable of containing at least 2
-  // records.
-  ASSERT((buffer_end_ - buffer_) >= 2);
-}
-
-
-template<typename Record>
-CircularQueue<Record>::~CircularQueue() {
-  DeleteArray(buffer_);
-  delete enqueue_semaphore_;
-}
-
-
-template<typename Record>
-void CircularQueue<Record>::Dequeue(Record* rec) {
-  ASSERT(!IsEmpty());
-  *rec = *dequeue_pos_;
-  dequeue_pos_ = Next(dequeue_pos_);
-  // Tell we have a spare record.
-  enqueue_semaphore_->Signal();
-}
-
-
-template<typename Record>
-void CircularQueue<Record>::Enqueue(const Record& rec) {
-  // Wait until we have at least one spare record.
-  enqueue_semaphore_->Wait();
-  ASSERT(Next(enqueue_pos_) != dequeue_pos_);
-  *enqueue_pos_ = rec;
-  enqueue_pos_ = Next(enqueue_pos_);
-}
-
-
-template<typename Record>
-Record* CircularQueue<Record>::Next(Record* curr) {
-  return ++curr != buffer_end_ ? curr : buffer_;
-}
-
-
 void* SamplingCircularQueue::Enqueue() {
   WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
   void* result = producer_pos_->enqueue_pos;
diff --git a/src/circular-queue.h b/src/circular-queue.h
index 486f107..73afc68 100644
--- a/src/circular-queue.h
+++ b/src/circular-queue.h
@@ -32,32 +32,6 @@
 namespace internal {
 
 
-// Lock-based blocking circular queue for small records.  Intended for
-// transfer of small records between a single producer and a single
-// consumer. Blocks on enqueue operation if the queue is full.
-template<typename Record>
-class CircularQueue {
- public:
-  inline explicit CircularQueue(int desired_buffer_size_in_bytes);
-  inline ~CircularQueue();
-
-  INLINE(void Dequeue(Record* rec));
-  INLINE(void Enqueue(const Record& rec));
-  INLINE(bool IsEmpty()) { return enqueue_pos_ == dequeue_pos_; }
-
- private:
-  INLINE(Record* Next(Record* curr));
-
-  Record* buffer_;
-  Record* const buffer_end_;
-  Semaphore* enqueue_semaphore_;
-  Record* enqueue_pos_;
-  Record* dequeue_pos_;
-
-  DISALLOW_COPY_AND_ASSIGN(CircularQueue);
-};
-
-
 // Lock-free cache-friendly sampling circular queue for large
 // records. Intended for fast transfer of large records between a
 // single producer and a single consumer. If the queue is full,
diff --git a/src/codegen.h b/src/codegen.h
index 667d100..358c6fc 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -114,7 +114,7 @@
   F(CharFromCode, 1, 1)                                                      \
   F(ObjectEquals, 2, 1)                                                      \
   F(Log, 3, 1)                                                               \
-  F(RandomHeapNumber, 0, 1)                                          \
+  F(RandomHeapNumber, 0, 1)                                                  \
   F(IsObject, 1, 1)                                                          \
   F(IsFunction, 1, 1)                                                        \
   F(IsUndetectableObject, 1, 1)                                              \
diff --git a/src/compiler.cc b/src/compiler.cc
index 901f218..ca92ed9 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -44,6 +44,18 @@
 namespace v8 {
 namespace internal {
 
+// For normal operation the syntax checker is used to determine whether to
+// use the full compiler for top level code or not. However if the flag
+// --always-full-compiler is specified or debugging is active the full
+// compiler will be used for all code.
+static bool AlwaysFullCompiler() {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
+#else
+  return FLAG_always_full_compiler;
+#endif
+}
+
 
 static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
   FunctionLiteral* function = info->function();
@@ -120,7 +132,9 @@
       ? info->scope()->is_global_scope()
       : (shared->is_toplevel() || shared->try_full_codegen());
 
-  if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
+  if (AlwaysFullCompiler()) {
+    return FullCodeGenerator::MakeCode(info);
+  } else if (FLAG_full_compiler && is_run_once) {
     FullCodeGenSyntaxChecker checker;
     checker.Check(function);
     if (checker.has_supported_syntax()) {
@@ -507,7 +521,11 @@
     CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
     bool is_run_once = literal->try_full_codegen();
     bool is_compiled = false;
-    if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
+
+    if (AlwaysFullCompiler()) {
+      code = FullCodeGenerator::MakeCode(&info);
+      is_compiled = true;
+    } else if (FLAG_full_compiler && is_run_once) {
       FullCodeGenSyntaxChecker checker;
       checker.Check(literal);
       if (checker.has_supported_syntax()) {
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index e454a9a..cb7fdd8 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -34,6 +34,7 @@
 
 #include "circular-queue-inl.h"
 #include "profile-generator-inl.h"
+#include "unbound-queue-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -54,7 +55,7 @@
 
 
 void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) {
-  code_map->AddAlias(alias, start);
+  code_map->AddAlias(start, entry, code_start);
 }
 
 
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index ed3f692..31c4658 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -46,7 +46,6 @@
 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
     : generator_(generator),
       running_(false),
-      events_buffer_(kEventsBufferSize),
       ticks_buffer_(sizeof(TickSampleEventRecord),
                     kTickSamplesBufferChunkSize,
                     kTickSamplesBufferChunksCount),
@@ -141,13 +140,15 @@
 
 
 void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
-                                                  Address start) {
+                                                  Address start,
+                                                  int security_token_id) {
   CodeEventsContainer evt_rec;
   CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_;
   rec->type = CodeEventRecord::CODE_ALIAS;
   rec->order = ++enqueue_order_;
-  rec->alias = alias;
-  rec->start = start;
+  rec->start = alias;
+  rec->entry = generator_->NewCodeEntry(security_token_id);
+  rec->code_start = start;
   events_buffer_.Enqueue(evt_rec);
 }
 
@@ -257,26 +258,30 @@
 }
 
 
-CpuProfile* CpuProfiler::StopProfiling(String* title) {
-  return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
+CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
+  return is_profiling() ?
+      singleton_->StopCollectingProfile(security_token, title) : NULL;
 }
 
 
 int CpuProfiler::GetProfilesCount() {
   ASSERT(singleton_ != NULL);
-  return singleton_->profiles_->profiles()->length();
+  // The count of profiles doesn't depend on a security token.
+  return singleton_->profiles_->Profiles(CodeEntry::kNoSecurityToken)->length();
 }
 
 
-CpuProfile* CpuProfiler::GetProfile(int index) {
+CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
   ASSERT(singleton_ != NULL);
-  return singleton_->profiles_->profiles()->at(index);
+  const int token = singleton_->token_enumerator_->GetTokenId(security_token);
+  return singleton_->profiles_->Profiles(token)->at(index);
 }
 
 
-CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
+CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
   ASSERT(singleton_ != NULL);
-  return singleton_->profiles_->GetProfile(uid);
+  const int token = singleton_->token_enumerator_->GetTokenId(security_token);
+  return singleton_->profiles_->GetProfile(token, uid);
 }
 
 
@@ -348,8 +353,15 @@
 
 
 void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
+  int security_token_id = CodeEntry::kNoSecurityToken;
+  if (function->unchecked_context()->IsContext()) {
+    security_token_id = singleton_->token_enumerator_->GetTokenId(
+        function->context()->global_context()->security_token());
+  }
   singleton_->processor_->FunctionCreateEvent(
-      function->address(), function->code()->address());
+      function->address(),
+      function->code()->address(),
+      security_token_id);
 }
 
 
@@ -388,12 +400,14 @@
 CpuProfiler::CpuProfiler()
     : profiles_(new CpuProfilesCollection()),
       next_profile_uid_(1),
+      token_enumerator_(new TokenEnumerator()),
       generator_(NULL),
       processor_(NULL) {
 }
 
 
 CpuProfiler::~CpuProfiler() {
+  delete token_enumerator_;
   delete profiles_;
 }
 
@@ -438,7 +452,9 @@
 CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
   const double actual_sampling_rate = generator_->actual_sampling_rate();
   StopProcessorIfLastProfile();
-  CpuProfile* result = profiles_->StopProfiling(title, actual_sampling_rate);
+  CpuProfile* result = profiles_->StopProfiling(CodeEntry::kNoSecurityToken,
+                                                title,
+                                                actual_sampling_rate);
   if (result != NULL) {
     result->Print();
   }
@@ -446,10 +462,12 @@
 }
 
 
-CpuProfile* CpuProfiler::StopCollectingProfile(String* title) {
+CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
+                                               String* title) {
   const double actual_sampling_rate = generator_->actual_sampling_rate();
   StopProcessorIfLastProfile();
-  return profiles_->StopProfiling(title, actual_sampling_rate);
+  int token = token_enumerator_->GetTokenId(security_token);
+  return profiles_->StopProfiling(token, title, actual_sampling_rate);
 }
 
 
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 35d8d5e..81f9ae3 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -31,6 +31,7 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 #include "circular-queue.h"
+#include "unbound-queue.h"
 
 namespace v8 {
 namespace internal {
@@ -41,7 +42,7 @@
 class CpuProfile;
 class CpuProfilesCollection;
 class ProfileGenerator;
-
+class TokenEnumerator;
 
 #define CODE_EVENTS_TYPE_LIST(V)                \
   V(CODE_CREATION, CodeCreateEventRecord)       \
@@ -94,8 +95,9 @@
 
 class CodeAliasEventRecord : public CodeEventRecord {
  public:
-  Address alias;
   Address start;
+  CodeEntry* entry;
+  Address code_start;
 
   INLINE(void UpdateCodeMap(CodeMap* code_map));
 };
@@ -151,7 +153,7 @@
                        Address start, unsigned size);
   void CodeMoveEvent(Address from, Address to);
   void CodeDeleteEvent(Address from);
-  void FunctionCreateEvent(Address alias, Address start);
+  void FunctionCreateEvent(Address alias, Address start, int security_token_id);
   void FunctionMoveEvent(Address from, Address to);
   void FunctionDeleteEvent(Address from);
   void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
@@ -180,7 +182,7 @@
 
   ProfileGenerator* generator_;
   bool running_;
-  CircularQueue<CodeEventsContainer> events_buffer_;
+  UnboundQueue<CodeEventsContainer> events_buffer_;
   SamplingCircularQueue ticks_buffer_;
   unsigned enqueue_order_;
 };
@@ -212,10 +214,10 @@
   static void StartProfiling(const char* title);
   static void StartProfiling(String* title);
   static CpuProfile* StopProfiling(const char* title);
-  static CpuProfile* StopProfiling(String* title);
+  static CpuProfile* StopProfiling(Object* security_token, String* title);
   static int GetProfilesCount();
-  static CpuProfile* GetProfile(int index);
-  static CpuProfile* FindProfile(unsigned uid);
+  static CpuProfile* GetProfile(Object* security_token, int index);
+  static CpuProfile* FindProfile(Object* security_token, unsigned uid);
 
   // Invoked from stack sampler (thread or signal handler.)
   static TickSample* TickSampleEvent();
@@ -252,11 +254,12 @@
   void StartCollectingProfile(String* title);
   void StartProcessorIfNotStarted();
   CpuProfile* StopCollectingProfile(const char* title);
-  CpuProfile* StopCollectingProfile(String* title);
+  CpuProfile* StopCollectingProfile(Object* security_token, String* title);
   void StopProcessorIfLastProfile();
 
   CpuProfilesCollection* profiles_;
   unsigned next_profile_uid_;
+  TokenEnumerator* token_enumerator_;
   ProfileGenerator* generator_;
   ProfilerEventsProcessor* processor_;
   int saved_logging_nesting_;
diff --git a/src/debug.cc b/src/debug.cc
index bf1f893..8cb95ef 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -72,6 +72,17 @@
 }
 
 
+static v8::Handle<v8::Context> GetDebugEventContext() {
+  Handle<Context> context = Debug::debugger_entry()->GetContext();
+  // Top::context() may have been NULL when "script collected" event occured.
+  if (*context == NULL) {
+    return v8::Local<v8::Context>();
+  }
+  Handle<Context> global_context(context->global_context());
+  return v8::Utils::ToLocal(global_context);
+}
+
+
 BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info,
                                              BreakLocatorType type) {
   debug_info_ = debug_info;
@@ -2112,12 +2123,14 @@
     if (event_listener_->IsProxy()) {
       // C debug event listener.
       Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
-      v8::Debug::EventCallback callback =
-            FUNCTION_CAST<v8::Debug::EventCallback>(callback_obj->proxy());
-      callback(event,
-               v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
-               v8::Utils::ToLocal(event_data),
-               v8::Utils::ToLocal(Handle<Object>::cast(event_listener_data_)));
+      v8::Debug::EventCallback2 callback =
+            FUNCTION_CAST<v8::Debug::EventCallback2>(callback_obj->proxy());
+      EventDetailsImpl event_details(
+          event,
+          Handle<JSObject>::cast(exec_state),
+          event_data,
+          event_listener_data_);
+      callback(event_details);
     } else {
       // JavaScript debug event listener.
       ASSERT(event_listener_->IsJSFunction());
@@ -2643,14 +2656,10 @@
 
 
 v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
-  Handle<Context> context = Debug::debugger_entry()->GetContext();
-  // Top::context() may have been NULL when "script collected" event occured.
-  if (*context == NULL) {
-    ASSERT(event_ == v8::ScriptCollected);
-    return v8::Local<v8::Context>();
-  }
-  Handle<Context> global_context(context->global_context());
-  return v8::Utils::ToLocal(global_context);
+  v8::Handle<v8::Context> context = GetDebugEventContext();
+  // Top::context() may be NULL when "script collected" event occures.
+  ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
+  return GetDebugEventContext();
 }
 
 
@@ -2659,6 +2668,41 @@
 }
 
 
+EventDetailsImpl::EventDetailsImpl(DebugEvent event,
+                                   Handle<JSObject> exec_state,
+                                   Handle<JSObject> event_data,
+                                   Handle<Object> callback_data)
+    : event_(event),
+      exec_state_(exec_state),
+      event_data_(event_data),
+      callback_data_(callback_data) {}
+
+
+DebugEvent EventDetailsImpl::GetEvent() const {
+  return event_;
+}
+
+
+v8::Handle<v8::Object> EventDetailsImpl::GetExecutionState() const {
+  return v8::Utils::ToLocal(exec_state_);
+}
+
+
+v8::Handle<v8::Object> EventDetailsImpl::GetEventData() const {
+  return v8::Utils::ToLocal(event_data_);
+}
+
+
+v8::Handle<v8::Context> EventDetailsImpl::GetEventContext() const {
+  return GetDebugEventContext();
+}
+
+
+v8::Handle<v8::Value> EventDetailsImpl::GetCallbackData() const {
+  return v8::Utils::ToLocal(callback_data_);
+}
+
+
 CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
                                    client_data_(NULL) {
 }
diff --git a/src/debug.h b/src/debug.h
index e7ac94e..e2eecb8 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -524,6 +524,27 @@
 };
 
 
+// Details of the debug event delivered to the debug event listener.
+class EventDetailsImpl : public v8::Debug::EventDetails {
+ public:
+  EventDetailsImpl(DebugEvent event,
+                   Handle<JSObject> exec_state,
+                   Handle<JSObject> event_data,
+                   Handle<Object> callback_data);
+  virtual DebugEvent GetEvent() const;
+  virtual v8::Handle<v8::Object> GetExecutionState() const;
+  virtual v8::Handle<v8::Object> GetEventData() const;
+  virtual v8::Handle<v8::Context> GetEventContext() const;
+  virtual v8::Handle<v8::Value> GetCallbackData() const;
+ private:
+  DebugEvent event_;  // Debug event causing the break.
+  Handle<JSObject> exec_state_;  // Current execution state.
+  Handle<JSObject> event_data_;  // Data associated with the event.
+  Handle<Object> callback_data_;  // User data passed with the callback when
+                                  // it was registered.
+};
+
+
 // Message send by user to v8 debugger or debugger output message.
 // In addition to command text it may contain a pointer to some user data
 // which are expected to be passed along with the command reponse to message
@@ -693,8 +714,9 @@
   static void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
   static bool is_loading_debugger() { return Debugger::is_loading_debugger_; }
 
- private:
   static bool IsDebuggerActive();
+
+ private:
   static void ListenersChanged();
 
   static Mutex* debugger_access_;  // Mutex guarding debugger variables.
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 490a2c5..c086df4 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -100,10 +100,10 @@
 DEFINE_bool(debug_code, false,
             "generate extra code (comments, assertions) for debugging")
 DEFINE_bool(emit_branch_hints, false, "emit branch hints")
-DEFINE_bool(push_pop_elimination, true,
-            "eliminate redundant push/pops in assembly code")
-DEFINE_bool(print_push_pop_elimination, false,
-            "print elimination of redundant push/pops in assembly code")
+DEFINE_bool(peephole_optimization, true,
+            "perform peephole optimizations in assembly code")
+DEFINE_bool(print_peephole_optimization, false,
+            "print peephole optimizations in assembly code")
 DEFINE_bool(enable_sse2, true,
             "enable use of SSE2 instructions if available")
 DEFINE_bool(enable_sse3, true,
@@ -182,6 +182,11 @@
 DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
 DEFINE_bool(trace_gc, false,
             "print one trace line following each garbage collection")
+DEFINE_bool(trace_gc_nvp, false,
+            "print one detailed trace line in name=value format "
+            "after each garbage collection")
+DEFINE_bool(print_cumulative_gc_stat, false,
+            "print cumulative GC statistics in name=value format on exit")
 DEFINE_bool(trace_gc_verbose, false,
             "print more details following each garbage collection")
 DEFINE_bool(collect_maps, true,
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 699a1e9..2ccbca8 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -760,11 +760,6 @@
 }
 
 
-void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
-  UNREACHABLE();
-}
-
-
 void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
   Comment cmnt(masm_, "[ DoWhileStatement");
   SetStatementPosition(stmt);
@@ -810,6 +805,7 @@
   Visit(stmt->body());
 
   __ bind(loop_statement.continue_target());
+
   // Check stack before looping.
   __ StackLimitCheck(&stack_limit_hit);
   __ bind(&stack_check_success);
@@ -872,11 +868,6 @@
 }
 
 
-void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
-  UNREACHABLE();
-}
-
-
 void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
   Comment cmnt(masm_, "[ TryCatchStatement");
   SetStatementPosition(stmt);
@@ -995,12 +986,6 @@
 }
 
 
-void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
-    SharedFunctionInfoLiteral* expr) {
-  UNREACHABLE();
-}
-
-
 void FullCodeGenerator::VisitConditional(Conditional* expr) {
   Comment cmnt(masm_, "[ Conditional");
   Label true_case, false_case, done;
@@ -1034,6 +1019,24 @@
 }
 
 
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<SharedFunctionInfo> function_info =
+      Compiler::BuildFunctionInfo(expr, script(), this);
+  if (HasStackOverflow()) return;
+  EmitNewClosure(function_info);
+}
+
+
+void FullCodeGenerator::VisitSharedFunctionInfoLiteral(
+    SharedFunctionInfoLiteral* expr) {
+  Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
+  EmitNewClosure(expr->shared_function_info());
+}
+
+
 void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
   // Call runtime routine to allocate the catch extension object and
   // assign the exception value to the catch variable.
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 96d0f3e..c7d0093 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,6 +31,7 @@
 #include "v8.h"
 
 #include "ast.h"
+#include "compiler.h"
 
 namespace v8 {
 namespace internal {
@@ -229,8 +230,6 @@
       return stack_depth + kForInStackElementCount;
     }
    private:
-    // TODO(lrn): Check that this value is correct when implementing
-    // for-in.
     static const int kForInStackElementCount = 5;
     DISALLOW_COPY_AND_ASSIGN(ForIn);
   };
@@ -258,12 +257,22 @@
   // context.
   void DropAndApply(int count, Expression::Context context, Register reg);
 
+  // Set up branch labels for a test expression.
+  void PrepareTest(Label* materialize_true,
+                   Label* materialize_false,
+                   Label** if_true,
+                   Label** if_false);
+
   // Emit code to convert pure control flow to a pair of labels into the
   // result expected according to an expression context.
   void Apply(Expression::Context context,
              Label* materialize_true,
              Label* materialize_false);
 
+  // Emit code to convert constant control flow (true or false) into
+  // the result expected according to an expression context.
+  void Apply(Expression::Context context, bool flag);
+
   // Helper function to convert a pure value into a test context.  The value
   // is expected on the stack or the accumulator, depending on the platform.
   // See the platform-specific implementation for details.
@@ -348,6 +357,12 @@
   void VisitDeclarations(ZoneList<Declaration*>* declarations);
   void DeclareGlobals(Handle<FixedArray> pairs);
 
+  // Platform-specific code for a variable, constant, or function
+  // declaration.  Functions have an initial value.
+  void EmitDeclaration(Variable* variable,
+                       Variable::Mode mode,
+                       FunctionLiteral* function);
+
   // Platform-specific return sequence
   void EmitReturnSequence(int position);
 
@@ -355,9 +370,48 @@
   void EmitCallWithStub(Call* expr);
   void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
 
+
+  // Platform-specific code for inline runtime calls.
+  void EmitInlineRuntimeCall(CallRuntime* expr);
+  void EmitIsSmi(ZoneList<Expression*>* arguments);
+  void EmitIsNonNegativeSmi(ZoneList<Expression*>* arguments);
+  void EmitIsObject(ZoneList<Expression*>* arguments);
+  void EmitIsUndetectableObject(ZoneList<Expression*>* arguments);
+  void EmitIsFunction(ZoneList<Expression*>* arguments);
+  void EmitIsArray(ZoneList<Expression*>* arguments);
+  void EmitIsRegExp(ZoneList<Expression*>* arguments);
+  void EmitIsConstructCall(ZoneList<Expression*>* arguments);
+  void EmitObjectEquals(ZoneList<Expression*>* arguments);
+  void EmitArguments(ZoneList<Expression*>* arguments);
+  void EmitArgumentsLength(ZoneList<Expression*>* arguments);
+  void EmitClassOf(ZoneList<Expression*>* arguments);
+  void EmitValueOf(ZoneList<Expression*>* arguments);
+  void EmitSetValueOf(ZoneList<Expression*>* arguments);
+  void EmitNumberToString(ZoneList<Expression*>* arguments);
+  void EmitCharFromCode(ZoneList<Expression*>* arguments);
+  void EmitFastCharCodeAt(ZoneList<Expression*>* arguments);
+  void EmitStringCompare(ZoneList<Expression*>* arguments);
+  void EmitStringAdd(ZoneList<Expression*>* arguments);
+  void EmitLog(ZoneList<Expression*>* arguments);
+  void EmitRandomHeapNumber(ZoneList<Expression*>* arguments);
+  void EmitSubString(ZoneList<Expression*>* arguments);
+  void EmitRegExpExec(ZoneList<Expression*>* arguments);
+  void EmitMathPow(ZoneList<Expression*>* arguments);
+  void EmitMathSin(ZoneList<Expression*>* arguments);
+  void EmitMathCos(ZoneList<Expression*>* arguments);
+  void EmitMathSqrt(ZoneList<Expression*>* arguments);
+  void EmitCallFunction(ZoneList<Expression*>* arguments);
+  void EmitRegExpConstructResult(ZoneList<Expression*>* arguments);
+  void EmitSwapElements(ZoneList<Expression*>* arguments);
+  void EmitGetFromCache(ZoneList<Expression*>* arguments);
+
   // Platform-specific code for loading variables.
   void EmitVariableLoad(Variable* expr, Expression::Context context);
 
+  // Platform-specific support for allocating a new closure based on
+  // the given function info.
+  void EmitNewClosure(Handle<SharedFunctionInfo> info);
+
   // Platform-specific support for compiling assignments.
 
   // Load a value from a named property.
@@ -372,9 +426,15 @@
   // of the stack and the right one in the accumulator.
   void EmitBinaryOp(Token::Value op, Expression::Context context);
 
+  // Assign to the given expression as if via '='. The right-hand-side value
+  // is expected in the accumulator.
+  void EmitAssignment(Expression* expr);
+
   // Complete a variable assignment.  The right-hand-side value is expected
   // in the accumulator.
-  void EmitVariableAssignment(Variable* var, Expression::Context context);
+  void EmitVariableAssignment(Variable* var,
+                              Token::Value op,
+                              Expression::Context context);
 
   // Complete a named property assignment.  The receiver is expected on top
   // of the stack and the right-hand-side value in the accumulator.
@@ -385,6 +445,14 @@
   // accumulator.
   void EmitKeyedPropertyAssignment(Assignment* expr);
 
+  // Helper for compare operations. Expects the null-value in a register.
+  void EmitNullCompare(bool strict,
+                       Register obj,
+                       Register null_const,
+                       Label* if_true,
+                       Label* if_false,
+                       Register scratch);
+
   void SetFunctionPosition(FunctionLiteral* fun);
   void SetReturnPosition(FunctionLiteral* fun);
   void SetStatementPosition(Statement* stmt);
diff --git a/src/globals.h b/src/globals.h
index 981ea16..292d8d8 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -59,6 +59,24 @@
 #error Host architecture was not detected as supported by v8
 #endif
 
+// Target architecture detection. This may be set externally. If not, detect
+// in the same way as the host architecture, that is, target the native
+// environment as presented by the compiler.
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \
+    !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
+#if defined(_M_X64) || defined(__x86_64__)
+#define V8_TARGET_ARCH_X64 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define V8_TARGET_ARCH_IA32 1
+#elif defined(__ARMEL__)
+#define V8_TARGET_ARCH_ARM 1
+#elif defined(_MIPS_ARCH_MIPS32R2)
+#define V8_TARGET_ARCH_MIPS 1
+#else
+#error Target architecture was not detected as supported by v8
+#endif
+#endif
+
 // Check for supported combinations of host and target architectures.
 #if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32)
 #error Target architecture ia32 is only supported on ia32 host
diff --git a/src/handles.cc b/src/handles.cc
index 1d4465f..c90365c 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -399,6 +399,11 @@
 }
 
 
+Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
+  CALL_HEAP_FUNCTION(obj->DefineAccessor(*info), Object);
+}
+
+
 // Wrappers for scripts are kept alive and cached in weak global
 // handles referred from proxy objects held by the scripts as long as
 // they are used. When they are not used anymore, the garbage
diff --git a/src/handles.h b/src/handles.h
index ea13def..96b17a6 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -262,6 +262,8 @@
 
 Handle<JSObject> Copy(Handle<JSObject> obj);
 
+Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info);
+
 Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
                                       Handle<JSArray> array);
 
diff --git a/src/heap.cc b/src/heap.cc
index 0a276ca..d554a3b 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -115,8 +115,11 @@
 Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
 
 int Heap::mc_count_ = 0;
+int Heap::ms_count_ = 0;
 int Heap::gc_count_ = 0;
 
+GCTracer* Heap::tracer_ = NULL;
+
 int Heap::unflattened_strings_length_ = 0;
 
 int Heap::always_allocate_scope_depth_ = 0;
@@ -130,6 +133,11 @@
 bool Heap::disallow_allocation_failure_ = false;
 #endif  // DEBUG
 
+int GCTracer::alive_after_last_gc_ = 0;
+double GCTracer::last_gc_end_timestamp_ = 0.0;
+int GCTracer::max_gc_pause_ = 0;
+int GCTracer::max_alive_after_gc_ = 0;
+int GCTracer::min_in_mutator_ = kMaxInt;
 
 int Heap::Capacity() {
   if (!HasBeenSetup()) return 0;
@@ -570,7 +578,7 @@
   VerifySymbolTable();
   if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
     ASSERT(!allocation_allowed_);
-    GCTracer::ExternalScope scope(tracer);
+    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
     global_gc_prologue_callback_();
   }
 
@@ -596,14 +604,16 @@
         old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
     old_gen_exhausted_ = false;
   } else {
+    tracer_ = tracer;
     Scavenge();
+    tracer_ = NULL;
   }
 
   Counters::objs_since_last_young.Set(0);
 
   if (collector == MARK_COMPACTOR) {
     DisableAssertNoAllocation allow_allocation;
-    GCTracer::ExternalScope scope(tracer);
+    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
     GlobalHandles::PostGarbageCollectionProcessing();
   }
 
@@ -627,7 +637,7 @@
 
   if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
     ASSERT(!allocation_allowed_);
-    GCTracer::ExternalScope scope(tracer);
+    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
     global_gc_epilogue_callback_();
   }
   VerifySymbolTable();
@@ -636,7 +646,11 @@
 
 void Heap::MarkCompact(GCTracer* tracer) {
   gc_state_ = MARK_COMPACT;
-  mc_count_++;
+  if (MarkCompactCollector::IsCompacting()) {
+    mc_count_++;
+  } else {
+    ms_count_++;
+  }
   tracer->set_full_gc_count(mc_count_);
   LOG(ResourceEvent("markcompact", "begin"));
 
@@ -1179,6 +1193,7 @@
         node->set_size(object_size);
 
         *p = target;
+        tracer()->increment_promoted_objects_size(object_size);
         return;
       }
     } else {
@@ -1214,6 +1229,7 @@
           (*p)->Iterate(&v);
 #endif
         }
+        tracer()->increment_promoted_objects_size(object_size);
         return;
       }
     }
@@ -2064,7 +2080,7 @@
   }
 
   // Make an attempt to flatten the buffer to reduce access time.
-  buffer->TryFlatten();
+  buffer = buffer->TryFlattenGetString();
 
   Object* result = buffer->IsAsciiRepresentation()
       ? AllocateRawAsciiString(length, pretenure )
@@ -3760,6 +3776,17 @@
 
 
 void Heap::TearDown() {
+  if (FLAG_print_cumulative_gc_stat) {
+    PrintF("\n\n");
+    PrintF("gc_count=%d ", gc_count_);
+    PrintF("mark_sweep_count=%d ", ms_count_);
+    PrintF("mark_compact_count=%d ", mc_count_);
+    PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
+    PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
+    PrintF("max_alive_after_gc=%d ", GCTracer::get_max_alive_after_gc());
+    PrintF("\n\n");
+  }
+
   GlobalHandles::TearDown();
 
   ExternalStringTable::TearDown();
@@ -4235,33 +4262,114 @@
 #endif
 
 
+static int CountTotalHolesSize() {
+  int holes_size = 0;
+  OldSpaces spaces;
+  for (OldSpace* space = spaces.next();
+       space != NULL;
+       space = spaces.next()) {
+    holes_size += space->Waste() + space->AvailableFree();
+  }
+  return holes_size;
+}
+
+
 GCTracer::GCTracer()
     : start_time_(0.0),
-      start_size_(0.0),
-      external_time_(0.0),
+      start_size_(0),
       gc_count_(0),
       full_gc_count_(0),
       is_compacting_(false),
-      marked_count_(0) {
+      marked_count_(0),
+      allocated_since_last_gc_(0),
+      spent_in_mutator_(0),
+      promoted_objects_size_(0) {
   // These two fields reflect the state of the previous full collection.
   // Set them before they are changed by the collector.
   previous_has_compacted_ = MarkCompactCollector::HasCompacted();
   previous_marked_count_ = MarkCompactCollector::previous_marked_count();
-  if (!FLAG_trace_gc) return;
+  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
   start_time_ = OS::TimeCurrentMillis();
-  start_size_ = SizeOfHeapObjects();
+  start_size_ = Heap::SizeOfObjects();
+
+  for (int i = 0; i < Scope::kNumberOfScopes; i++) {
+    scopes_[i] = 0;
+  }
+
+  in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
+
+  allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
+
+  if (last_gc_end_timestamp_ > 0) {
+    spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
+  }
 }
 
 
 GCTracer::~GCTracer() {
-  if (!FLAG_trace_gc) return;
   // Printf ONE line iff flag is set.
-  int time = static_cast<int>(OS::TimeCurrentMillis() - start_time_);
-  int external_time = static_cast<int>(external_time_);
-  PrintF("%s %.1f -> %.1f MB, ",
-         CollectorString(), start_size_, SizeOfHeapObjects());
-  if (external_time > 0) PrintF("%d / ", external_time);
-  PrintF("%d ms.\n", time);
+  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
+
+  bool first_gc = (last_gc_end_timestamp_ == 0);
+
+  alive_after_last_gc_ = Heap::SizeOfObjects();
+  last_gc_end_timestamp_ = OS::TimeCurrentMillis();
+
+  int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
+
+  // Update cumulative GC statistics if required.
+  if (FLAG_print_cumulative_gc_stat) {
+    max_gc_pause_ = Max(max_gc_pause_, time);
+    max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
+    if (!first_gc) {
+      min_in_mutator_ = Min(min_in_mutator_,
+                            static_cast<int>(spent_in_mutator_));
+    }
+  }
+
+  if (!FLAG_trace_gc_nvp) {
+    int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
+
+    PrintF("%s %.1f -> %.1f MB, ",
+           CollectorString(),
+           static_cast<double>(start_size_) / MB,
+           SizeOfHeapObjects());
+
+    if (external_time > 0) PrintF("%d / ", external_time);
+    PrintF("%d ms.\n", time);
+  } else {
+    PrintF("pause=%d ", time);
+    PrintF("mutator=%d ",
+           static_cast<int>(spent_in_mutator_));
+
+    PrintF("gc=");
+    switch (collector_) {
+      case SCAVENGER:
+        PrintF("s");
+        break;
+      case MARK_COMPACTOR:
+        PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
+        break;
+      default:
+        UNREACHABLE();
+    }
+    PrintF(" ");
+
+    PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
+    PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
+    PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
+    PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
+
+    PrintF("total_size_before=%d ", start_size_);
+    PrintF("total_size_after=%d ", Heap::SizeOfObjects());
+    PrintF("holes_size_before=%d ", in_free_list_or_wasted_before_gc_);
+    PrintF("holes_size_after=%d ", CountTotalHolesSize());
+
+    PrintF("allocated=%d ", allocated_since_last_gc_);
+    PrintF("promoted=%d ", promoted_objects_size_);
+
+    PrintF("\n");
+  }
 
 #if defined(ENABLE_LOGGING_AND_PROFILING)
   Heap::PrintShortHeapStatistics();
diff --git a/src/heap.h b/src/heap.h
index b4af6d9..74e5a31 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -981,6 +981,8 @@
 
   static void ClearJSFunctionResultCaches();
 
+  static GCTracer* tracer() { return tracer_; }
+
  private:
   static int reserved_semispace_size_;
   static int max_semispace_size_;
@@ -1020,6 +1022,7 @@
   static int PromotedExternalMemorySize();
 
   static int mc_count_;  // how many mark-compact collections happened
+  static int ms_count_;  // how many mark-sweep collections happened
   static int gc_count_;  // how many gc happened
 
   // Total length of the strings we failed to flatten since the last GC.
@@ -1223,6 +1226,8 @@
                                            SharedFunctionInfo* shared,
                                            Object* prototype);
 
+  static GCTracer* tracer_;
+
 
   // Initializes the number to string cache based on the max semispace size.
   static Object* InitializeNumberStringCache();
@@ -1629,19 +1634,30 @@
 
 class GCTracer BASE_EMBEDDED {
  public:
-  // Time spent while in the external scope counts towards the
-  // external time in the tracer and will be reported separately.
-  class ExternalScope BASE_EMBEDDED {
+  class Scope BASE_EMBEDDED {
    public:
-    explicit ExternalScope(GCTracer* tracer) : tracer_(tracer) {
+    enum ScopeId {
+      EXTERNAL,
+      MC_MARK,
+      MC_SWEEP,
+      MC_COMPACT,
+      kNumberOfScopes
+    };
+
+    Scope(GCTracer* tracer, ScopeId scope)
+        : tracer_(tracer),
+        scope_(scope) {
       start_time_ = OS::TimeCurrentMillis();
     }
-    ~ExternalScope() {
-      tracer_->external_time_ += OS::TimeCurrentMillis() - start_time_;
+
+    ~Scope() {
+      ASSERT((0 <= scope_) && (scope_ < kNumberOfScopes));
+      tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
     }
 
    private:
     GCTracer* tracer_;
+    ScopeId scope_;
     double start_time_;
   };
 
@@ -1667,6 +1683,19 @@
 
   int marked_count() { return marked_count_; }
 
+  void increment_promoted_objects_size(int object_size) {
+    promoted_objects_size_ += object_size;
+  }
+
+  // Returns maximum GC pause.
+  static int get_max_gc_pause() { return max_gc_pause_; }
+
+  // Returns maximum size of objects alive after GC.
+  static int get_max_alive_after_gc() { return max_alive_after_gc_; }
+
+  // Returns minimal interval between two subsequent collections.
+  static int get_min_in_mutator() { return min_in_mutator_; }
+
  private:
   // Returns a string matching the collector.
   const char* CollectorString();
@@ -1677,12 +1706,9 @@
   }
 
   double start_time_;  // Timestamp set in the constructor.
-  double start_size_;  // Size of objects in heap set in constructor.
+  int start_size_;  // Size of objects in heap set in constructor.
   GarbageCollector collector_;  // Type of collector.
 
-  // Keep track of the amount of time spent in external callbacks.
-  double external_time_;
-
   // A count (including this one, eg, the first collection is 1) of the
   // number of garbage collections.
   int gc_count_;
@@ -1706,6 +1732,38 @@
   // The count from the end of the previous full GC.  Will be zero if there
   // was no previous full GC.
   int previous_marked_count_;
+
+  // Amounts of time spent in different scopes during GC.
+  double scopes_[Scope::kNumberOfScopes];
+
+  // Total amount of space either wasted or contained in one of free lists
+  // before the current GC.
+  int in_free_list_or_wasted_before_gc_;
+
+  // Difference between space used in the heap at the beginning of the current
+  // collection and the end of the previous collection.
+  int allocated_since_last_gc_;
+
+  // Amount of time spent in mutator that is time elapsed between end of the
+  // previous collection and the beginning of the current one.
+  double spent_in_mutator_;
+
+  // Size of objects promoted during the current collection.
+  int promoted_objects_size_;
+
+  // Maximum GC pause.
+  static int max_gc_pause_;
+
+  // Maximum size of objects alive after GC.
+  static int max_alive_after_gc_;
+
+  // Minimal interval between two subsequent collections.
+  static int min_in_mutator_;
+
+  // Size of objects alive after last GC.
+  static int alive_after_last_gc_;
+
+  static double last_gc_end_timestamp_;
 };
 
 
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 6dc584e..9c96e19 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -38,6 +38,7 @@
 #define V8_IA32_ASSEMBLER_IA32_INL_H_
 
 #include "cpu.h"
+#include "debug.h"
 
 namespace v8 {
 namespace internal {
@@ -77,6 +78,11 @@
 }
 
 
+int RelocInfo::target_address_size() {
+  return Assembler::kExternalTargetSize;
+}
+
+
 void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
@@ -148,6 +154,26 @@
 }
 
 
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    visitor->VisitPointer(target_object_address());
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    visitor->VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  } else if (Debug::has_break_points() &&
+             RelocInfo::IsJSReturn(mode) &&
+             IsPatchedReturnSequence()) {
+    visitor->VisitDebugTarget(this);
+#endif
+  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+
 Immediate::Immediate(int x)  {
   x_ = x;
   rmode_ = RelocInfo::NONE;
@@ -159,11 +185,6 @@
   rmode_ = RelocInfo::EXTERNAL_REFERENCE;
 }
 
-Immediate::Immediate(const char* s) {
-  x_ = reinterpret_cast<int32_t>(s);
-  rmode_ = RelocInfo::EMBEDDED_STRING;
-}
-
 
 Immediate::Immediate(Label* internal_offset) {
   x_ = reinterpret_cast<int32_t>(internal_offset);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 26e40b1..4690c67 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -36,6 +36,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "disassembler.h"
 #include "macro-assembler.h"
 #include "serialize.h"
@@ -160,6 +162,15 @@
     1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE;
 
 
+bool RelocInfo::IsCodedSpecially() {
+  // The deserializer needs to know whether a pointer is specially coded.  Being
+  // specially coded on IA32 means that it is a relative address, as used by
+  // branch instructions.  These are also the ones that need changing when a
+  // code object moves.
+  return (1 << rmode_) & kApplyMask;
+}
+
+
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   // Patch the code at the current address with the supplied instructions.
   for (int i = 0; i < instruction_count; i++) {
@@ -433,7 +444,7 @@
 
 void Assembler::pop(Register dst) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
-  if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+  if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
     // (last_pc_ != NULL) is rolled into the above check.
     // If a last_pc_ is set, we need to make sure that there has not been any
     // relocation information generated between the last instruction and this
@@ -443,7 +454,7 @@
       int push_reg_code = instr & 0x7;
       if (push_reg_code == dst.code()) {
         pc_ = last_pc_;
-        if (FLAG_print_push_pop_elimination) {
+        if (FLAG_print_peephole_optimization) {
           PrintF("%d push/pop (same reg) eliminated\n", pc_offset());
         }
       } else {
@@ -452,7 +463,7 @@
         Register src = { push_reg_code };
         EnsureSpace ensure_space(this);
         emit_operand(dst, Operand(src));
-        if (FLAG_print_push_pop_elimination) {
+        if (FLAG_print_peephole_optimization) {
           PrintF("%d push/pop (reg->reg) eliminated\n", pc_offset());
         }
       }
@@ -466,7 +477,7 @@
         last_pc_[0] = 0x8b;
         last_pc_[1] = op1;
         last_pc_ = NULL;
-        if (FLAG_print_push_pop_elimination) {
+        if (FLAG_print_peephole_optimization) {
           PrintF("%d push/pop (op->reg) eliminated\n", pc_offset());
         }
         return;
@@ -483,7 +494,7 @@
         last_pc_[1] = 0xc4;
         last_pc_[2] = 0x04;
         last_pc_ = NULL;
-        if (FLAG_print_push_pop_elimination) {
+        if (FLAG_print_peephole_optimization) {
           PrintF("%d push/pop (mov-pop) eliminated\n", pc_offset());
         }
         return;
@@ -498,7 +509,7 @@
         // change to
         // 31c0         xor eax,eax
         last_pc_ = NULL;
-        if (FLAG_print_push_pop_elimination) {
+        if (FLAG_print_peephole_optimization) {
           PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
         }
         return;
@@ -521,7 +532,7 @@
           // b8XX000000   mov eax,0x000000XX
         }
         last_pc_ = NULL;
-        if (FLAG_print_push_pop_elimination) {
+        if (FLAG_print_peephole_optimization) {
           PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
         }
         return;
@@ -533,7 +544,7 @@
       last_pc_ = NULL;
       // change to
       // b8XXXXXXXX   mov eax,0xXXXXXXXX
-      if (FLAG_print_push_pop_elimination) {
+      if (FLAG_print_peephole_optimization) {
         PrintF("%d push/pop (imm->reg) eliminated\n", pc_offset());
       }
       return;
@@ -776,6 +787,13 @@
 }
 
 
+void Assembler::stos() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xAB);
+}
+
+
 void Assembler::xchg(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -813,7 +831,7 @@
 
 void Assembler::add(const Operand& dst, const Immediate& x) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
-  if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+  if (FLAG_peephole_optimization && (reloc_info_writer.last_pc() <= last_pc_)) {
     byte instr = last_pc_[0];
     if ((instr & 0xf8) == 0x50) {
       // Last instruction was a push. Check whether this is a pop without a
@@ -822,7 +840,7 @@
           (x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) {
         pc_ = last_pc_;
         last_pc_ = NULL;
-        if (FLAG_print_push_pop_elimination) {
+        if (FLAG_print_peephole_optimization) {
           PrintF("%d push/pop(noreg) eliminated\n", pc_offset());
         }
         return;
@@ -2528,3 +2546,5 @@
 #endif
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 6a7effd..9ece744 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -194,7 +194,6 @@
 class Immediate BASE_EMBEDDED {
  public:
   inline explicit Immediate(int x);
-  inline explicit Immediate(const char* s);
   inline explicit Immediate(const ExternalReference& ext);
   inline explicit Immediate(Handle<Object> handle);
   inline explicit Immediate(Smi* value);
@@ -551,6 +550,7 @@
   // Repetitive string instructions.
   void rep_movs();
   void rep_stos();
+  void stos();
 
   // Exchange two registers
   void xchg(Register dst, Register src);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 2db21d5..6086258 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "codegen-inl.h"
 
 namespace v8 {
@@ -806,6 +808,7 @@
                             Label* gc_required) {
   ASSERT(scratch.is(edi));  // rep stos destination
   ASSERT(!fill_with_hole || array_size.is(ecx));  // rep stos count
+  ASSERT(!fill_with_hole || !result.is(eax));  // result is never eax
 
   // Load the initial map from the array function.
   __ mov(elements_array,
@@ -863,15 +866,22 @@
   if (fill_with_hole) {
     __ lea(edi, Operand(elements_array,
                         FixedArray::kHeaderSize - kHeapObjectTag));
-
-    __ push(eax);
     __ mov(eax, Factory::the_hole_value());
-
     __ cld();
+    // Do not use rep stos when filling less than kRepStosThreshold
+    // words.
+    const int kRepStosThreshold = 16;
+    Label loop, entry, done;
+    __ cmp(ecx, kRepStosThreshold);
+    __ j(below, &loop);  // Note: ecx > 0.
     __ rep_stos();
-
-    // Restore saved registers.
-    __ pop(eax);
+    __ jmp(&done);
+    __ bind(&loop);
+    __ stos();
+    __ bind(&entry);
+    __ cmp(edi, Operand(elements_array_end));
+    __ j(below, &loop);
+    __ bind(&done);
   }
 }
 
@@ -970,13 +980,14 @@
   AllocateJSArray(masm,
                   edi,
                   ecx,
-                  eax,
                   ebx,
+                  eax,
                   edx,
                   edi,
                   true,
                   &prepare_generic_code_call);
   __ IncrementCounter(&Counters::array_function_native, 1);
+  __ mov(eax, ebx);
   __ pop(ebx);
   if (construct_call) {
     __ pop(edi);
@@ -1247,3 +1258,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 4393e44..c55ec7b 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "compiler.h"
@@ -693,9 +695,7 @@
   } else if (variable != NULL && variable->slot() != NULL) {
     // For a variable that rewrites to a slot, we signal it is the immediate
     // subexpression of a typeof.
-    Result result =
-        LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
-    frame()->Push(&result);
+    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
   } else {
     // Anything else can be handled normally.
     Load(expr);
@@ -744,7 +744,8 @@
     // We have to skip storing into the arguments slot if it has already
     // been written to. This can happen if the a function has a local
     // variable named 'arguments'.
-    Result probe = LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+    LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+    Result probe = frame_->Pop();
     if (probe.is_constant()) {
       // We have to skip updating the arguments object if it has
       // been assigned a proper value.
@@ -2979,6 +2980,7 @@
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
     Load(args->at(i));
+    frame_->SpillTop();
   }
 
   // Record the position for debugging purposes.
@@ -3023,9 +3025,7 @@
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
   Load(receiver);
-  Result existing_args =
-      LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
-  frame()->Push(&existing_args);
+  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
 
   // Emit the source position information after having loaded the
   // receiver and the arguments.
@@ -4716,19 +4716,19 @@
 }
 
 
-Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  Result result;
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
   if (slot->type() == Slot::LOOKUP) {
     ASSERT(slot->var()->is_dynamic());
     JumpTarget slow;
     JumpTarget done;
+    Result value;
 
     // Generate fast case for loading from slots that correspond to
     // local/global variables or arguments unless they are shadowed by
     // eval-introduced bindings.
     EmitDynamicLoadFromSlotFastCase(slot,
                                     typeof_state,
-                                    &result,
+                                    &value,
                                     &slow,
                                     &done);
 
@@ -4740,14 +4740,14 @@
     frame()->EmitPush(esi);
     frame()->EmitPush(Immediate(slot->var()->name()));
     if (typeof_state == INSIDE_TYPEOF) {
-      result =
+      value =
           frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
     } else {
-      result = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
+      value = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
     }
 
-    done.Bind(&result);
-    return result;
+    done.Bind(&value);
+    frame_->Push(&value);
 
   } else if (slot->var()->mode() == Variable::CONST) {
     // Const slots may contain 'the hole' value (the constant hasn't been
@@ -4764,15 +4764,13 @@
     __ j(not_equal, &exit);
     __ mov(ecx, Factory::undefined_value());
     __ bind(&exit);
-    return Result(ecx);
+    frame()->EmitPush(ecx);
 
   } else if (slot->type() == Slot::PARAMETER) {
     frame()->PushParameterAt(slot->index());
-    return frame()->Pop();
 
   } else if (slot->type() == Slot::LOCAL) {
     frame()->PushLocalAt(slot->index());
-    return frame()->Pop();
 
   } else {
     // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
@@ -4781,46 +4779,48 @@
     // The use of SlotOperand below is safe for an unspilled frame
     // because it will always be a context slot.
     ASSERT(slot->type() == Slot::CONTEXT);
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid());
-    __ mov(result.reg(), SlotOperand(slot, result.reg()));
-    return result;
+    Result temp = allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
+    frame()->Push(&temp);
   }
 }
 
 
-Result CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
                                                     TypeofState state) {
-  Result result = LoadFromSlot(slot, state);
+  LoadFromSlot(slot, state);
 
   // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return result;
+  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
 
   // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return result;
+  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
 
   // If the loaded value is a constant, we know if the arguments
   // object has been lazily loaded yet.
+  Result result = frame()->Pop();
   if (result.is_constant()) {
     if (result.handle()->IsTheHole()) {
-      result.Unuse();
-      return StoreArgumentsObject(false);
-    } else {
-      return result;
+      result = StoreArgumentsObject(false);
     }
+    frame()->Push(&result);
+    return;
   }
-
+  ASSERT(result.is_register());
   // The loaded value is in a register. If it is the sentinel that
   // indicates that we haven't loaded the arguments object yet, we
   // need to do it now.
   JumpTarget exit;
   __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
-  exit.Branch(not_equal, &result);
+  frame()->Push(&result);
+  exit.Branch(not_equal);
 
-  result.Unuse();
   result = StoreArgumentsObject(false);
-  exit.Bind(&result);
-  return result;
+  frame()->SetElementAt(0, &result);
+  result.Unuse();
+  exit.Bind();
+  return;
 }
 
 
@@ -5070,8 +5070,7 @@
       UNREACHABLE();
     }
   } else {
-    Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-    frame()->Push(&result);
+    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
   }
 }
 
@@ -5392,8 +5391,7 @@
   if (node->is_compound()) {
     // For a compound assignment the right-hand side is a binary operation
     // between the current property value and the actual right-hand side.
-    Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-    frame()->Push(&result);
+    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
     Load(node->value());
 
     // Perform the binary operation.
@@ -5730,6 +5728,7 @@
     int arg_count = args->length();
     for (int i = 0; i < arg_count; i++) {
       Load(args->at(i));
+      frame_->SpillTop();
     }
 
     // Prepare the stack for the call to ResolvePossiblyDirectEval.
@@ -5779,6 +5778,7 @@
     int arg_count = args->length();
     for (int i = 0; i < arg_count; i++) {
       Load(args->at(i));
+      frame_->SpillTop();
     }
 
     // Push the name of the function onto the frame.
@@ -5884,6 +5884,7 @@
         int arg_count = args->length();
         for (int i = 0; i < arg_count; i++) {
           Load(args->at(i));
+          frame_->SpillTop();
         }
 
         // Push the name of the function onto the frame.
@@ -6159,11 +6160,11 @@
   __ mov(map.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
   __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
   __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
-  destination()->false_target()->Branch(less);
+  destination()->false_target()->Branch(below);
   __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
   obj.Unuse();
   map.Unuse();
-  destination()->Split(less_equal);
+  destination()->Split(below_equal);
 }
 
 
@@ -6276,7 +6277,7 @@
     __ mov(obj.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset));
     __ movzx_b(tmp.reg(), FieldOperand(obj.reg(), Map::kInstanceTypeOffset));
     __ cmp(tmp.reg(), FIRST_JS_OBJECT_TYPE);
-    null.Branch(less);
+    null.Branch(below);
 
     // As long as JS_FUNCTION_TYPE is the last instance type and it is
     // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
@@ -6866,7 +6867,7 @@
   // Check that object doesn't require security checks and
   // has no indexed interceptor.
   __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
-  deferred->Branch(less);
+  deferred->Branch(below);
   __ movzx_b(tmp1.reg(), FieldOperand(tmp1.reg(), Map::kBitFieldOffset));
   __ test(tmp1.reg(), Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
   deferred->Branch(not_zero);
@@ -8186,11 +8187,11 @@
       __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
       __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kInstanceTypeOffset));
       __ cmp(map.reg(), FIRST_JS_OBJECT_TYPE);
-      destination()->false_target()->Branch(less);
+      destination()->false_target()->Branch(below);
       __ cmp(map.reg(), LAST_JS_OBJECT_TYPE);
       answer.Unuse();
       map.Unuse();
-      destination()->Split(less_equal);
+      destination()->Split(below_equal);
     } else {
       // Uncommon case: typeof testing against a string literal that is
       // never returned from the typeof operator.
@@ -8597,16 +8598,16 @@
   if (loop_nesting() > 0) {
     Comment cmnt(masm_, "[ Inlined load from keyed Property");
 
-    Result key = frame_->Pop();
-    Result receiver = frame_->Pop();
-    key.ToRegister();
-    receiver.ToRegister();
-
     // Use a fresh temporary to load the elements without destroying
     // the receiver which is needed for the deferred slow case.
     Result elements = allocator()->Allocate();
     ASSERT(elements.is_valid());
 
+    Result key = frame_->Pop();
+    Result receiver = frame_->Pop();
+    key.ToRegister();
+    receiver.ToRegister();
+
     // Use a fresh temporary for the index and later the loaded
     // value.
     result = allocator()->Allocate();
@@ -8620,6 +8621,7 @@
     __ test(receiver.reg(), Immediate(kSmiTagMask));
     deferred->Branch(zero);
 
+    // Check that the receiver has the expected map.
     // Initially, use an invalid map. The map is patched in the IC
     // initialization code.
     __ bind(deferred->patch_site());
@@ -8653,7 +8655,6 @@
            FieldOperand(elements.reg(), FixedArray::kLengthOffset));
     deferred->Branch(above_equal);
 
-    // Load and check that the result is not the hole.
     __ mov(result.reg(), Operand(elements.reg(),
                                  result.reg(),
                                  times_4,
@@ -8851,10 +8852,8 @@
       Comment cmnt(masm, "[ Load from Slot");
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
-      Result result =
-          cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
       if (!persist_after_get_) set_unloaded();
-      cgen_->frame()->Push(&result);
       break;
     }
 
@@ -11596,7 +11595,7 @@
       ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
       Label first_non_object;
       __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
-      __ j(less, &first_non_object);
+      __ j(below, &first_non_object);
 
       // Return non-zero (eax is not zero)
       Label return_not_equal;
@@ -11613,7 +11612,7 @@
       __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
 
       __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
-      __ j(greater_equal, &return_not_equal);
+      __ j(above_equal, &return_not_equal);
 
       // Check for oddballs: true, false, null, undefined.
       __ cmp(ecx, ODDBALL_TYPE);
@@ -12261,9 +12260,9 @@
   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));  // eax - object map
   __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset));  // ecx - type
   __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
-  __ j(less, &slow, not_taken);
+  __ j(below, &slow, not_taken);
   __ cmp(ecx, LAST_JS_OBJECT_TYPE);
-  __ j(greater, &slow, not_taken);
+  __ j(above, &slow, not_taken);
 
   // Get the prototype of the function.
   __ mov(edx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
@@ -12291,9 +12290,9 @@
   __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
   __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
-  __ j(less, &slow, not_taken);
+  __ j(below, &slow, not_taken);
   __ cmp(ecx, LAST_JS_OBJECT_TYPE);
-  __ j(greater, &slow, not_taken);
+  __ j(above, &slow, not_taken);
 
   // Register mapping:
   //   eax is object map.
@@ -13291,3 +13290,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index e00bec7..a098dc3 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -459,8 +459,8 @@
   void LoadWithSafeInt32ModeDisabled(Expression* expr);
 
   // Read a value from a slot and leave it on top of the expression stack.
-  Result LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  Result LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
+  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
   Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
                                            TypeofState typeof_state,
                                            JumpTarget* slow);
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 2107ad9..b15140f 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -33,6 +33,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "cpu.h"
 #include "macro-assembler.h"
 
@@ -77,3 +79,5 @@
 }
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index d142b11..9780f3b 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "codegen-inl.h"
 #include "debug.h"
 
@@ -261,3 +263,5 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 8d342e0..58c22af 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -30,6 +30,9 @@
 #include <stdarg.h>
 
 #include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "disasm.h"
 
 namespace disasm {
@@ -90,6 +93,7 @@
   {0x99, "cdq", UNSET_OP_ORDER},
   {0x9B, "fwait", UNSET_OP_ORDER},
   {0xFC, "cld", UNSET_OP_ORDER},
+  {0xAB, "stos", UNSET_OP_ORDER},
   {-1, "", UNSET_OP_ORDER}
 };
 
@@ -1438,3 +1442,5 @@
 
 
 }  // namespace disasm
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
index 61e2b5e..b749e59 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "codegen-inl.h"
 #include "fast-codegen.h"
 #include "data-flow.h"
@@ -948,3 +950,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index 5c900be..212cfde 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "frames-inl.h"
 
 namespace v8 {
@@ -109,3 +111,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index e9838ad..1b78772 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
@@ -79,11 +81,17 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (scope()->num_heap_slots() > 0) {
+    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    if (heap_slots > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is still in edi.
       __ push(edi);
-      __ CallRuntime(Runtime::kNewContext, 1);
+      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+        FastNewContextStub stub(heap_slots);
+        __ CallStub(&stub);
+      } else {
+        __ CallRuntime(Runtime::kNewContext, 1);
+      }
       function_in_register = false;
       // Context is returned in both eax and esi.  It replaces the context
       // passed to us.  It's saved in the stack and kept live in esi.
@@ -140,7 +148,18 @@
   }
 
   { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(scope()->declarations());
+    // For named function expressions, declare the function name as a
+    // constant.
+    if (scope()->is_function_scope() && scope()->function() != NULL) {
+      EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+    }
+    // Visit all the explicit declarations unless there is an illegal
+    // redeclaration.
+    if (scope()->HasIllegalRedeclaration()) {
+      scope()->VisitIllegalRedeclaration(this);
+    } else {
+      VisitDeclarations(scope()->declarations());
+    }
   }
 
   { Comment cmnt(masm_, "[ Stack check");
@@ -425,6 +444,39 @@
 }
 
 
+void FullCodeGenerator::PrepareTest(Label* materialize_true,
+                                    Label* materialize_false,
+                                    Label** if_true,
+                                    Label** if_false) {
+  switch (context_) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+    case Expression::kEffect:
+      // In an effect context, the true and the false case branch to the
+      // same label.
+      *if_true = *if_false = materialize_true;
+      break;
+    case Expression::kValue:
+      *if_true = materialize_true;
+      *if_false = materialize_false;
+      break;
+    case Expression::kTest:
+      *if_true = true_label_;
+      *if_false = false_label_;
+      break;
+    case Expression::kValueTest:
+      *if_true = materialize_true;
+      *if_false = false_label_;
+      break;
+    case Expression::kTestValue:
+      *if_true = true_label_;
+      *if_false = materialize_false;
+      break;
+  }
+}
+
+
 void FullCodeGenerator::Apply(Expression::Context context,
                               Label* materialize_true,
                               Label* materialize_false) {
@@ -490,6 +542,61 @@
 }
 
 
+// Convert constant control flow (true or false) to the result expected for
+// a given expression context.
+void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
+  switch (context) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+    case Expression::kEffect:
+      break;
+    case Expression::kValue: {
+      Handle<Object> value =
+          flag ? Factory::true_value() : Factory::false_value();
+      switch (location_) {
+        case kAccumulator:
+          __ mov(result_register(), value);
+          break;
+        case kStack:
+          __ push(Immediate(value));
+          break;
+      }
+      break;
+    }
+    case Expression::kTest:
+      __ jmp(flag ? true_label_ : false_label_);
+      break;
+    case Expression::kTestValue:
+      switch (location_) {
+        case kAccumulator:
+          // If value is false it's needed.
+          if (!flag) __ mov(result_register(), Factory::false_value());
+          break;
+        case kStack:
+          // If value is false it's needed.
+          if (!flag) __ push(Immediate(Factory::false_value()));
+          break;
+      }
+      __ jmp(flag ? true_label_ : false_label_);
+      break;
+    case Expression::kValueTest:
+      switch (location_) {
+        case kAccumulator:
+          // If value is true it's needed.
+          if (flag) __ mov(result_register(), Factory::true_value());
+          break;
+        case kStack:
+          // If value is true it's needed.
+          if (flag) __ push(Immediate(Factory::true_value()));
+          break;
+      }
+      __ jmp(flag ? true_label_ : false_label_);
+      break;
+  }
+}
+
+
 void FullCodeGenerator::DoTest(Expression::Context context) {
   // The value to test is in the accumulator.  If the value might be needed
   // on the stack (value/test and test/value contexts with a stack location
@@ -665,22 +772,22 @@
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+                                        Variable::Mode mode,
+                                        FunctionLiteral* function) {
   Comment cmnt(masm_, "[ Declaration");
-  Variable* var = decl->proxy()->var();
-  ASSERT(var != NULL);  // Must have been resolved.
-  Slot* slot = var->slot();
-  Property* prop = var->AsProperty();
-
+  ASSERT(variable != NULL);  // Must have been resolved.
+  Slot* slot = variable->slot();
+  Property* prop = variable->AsProperty();
   if (slot != NULL) {
     switch (slot->type()) {
       case Slot::PARAMETER:
       case Slot::LOCAL:
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ mov(Operand(ebp, SlotOffset(slot)),
                  Immediate(Factory::the_hole_value()));
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kAccumulator);
+        } else if (function != NULL) {
+          VisitForValue(function, kAccumulator);
           __ mov(Operand(ebp, SlotOffset(slot)), result_register());
         }
         break;
@@ -690,7 +797,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ mov(ebx,
@@ -698,12 +805,12 @@
           __ cmp(ebx, Operand(esi));
           __ Check(equal, "Unexpected declaration in current context.");
         }
-        if (decl->mode() == Variable::CONST) {
-          __ mov(eax, Immediate(Factory::the_hole_value()));
-          __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax);
+        if (mode == Variable::CONST) {
+          __ mov(CodeGenerator::ContextOperand(esi, slot->index()),
+                 Immediate(Factory::the_hole_value()));
           // No write barrier since the hole value is in old space.
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kAccumulator);
+        } else if (function != NULL) {
+          VisitForValue(function, kAccumulator);
           __ mov(CodeGenerator::ContextOperand(esi, slot->index()),
                  result_register());
           int offset = Context::SlotOffset(slot->index());
@@ -714,21 +821,19 @@
 
       case Slot::LOOKUP: {
         __ push(esi);
-        __ push(Immediate(var->name()));
+        __ push(Immediate(variable->name()));
         // Declaration nodes are always introduced in one of two modes.
-        ASSERT(decl->mode() == Variable::VAR ||
-               decl->mode() == Variable::CONST);
-        PropertyAttributes attr =
-            (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+        ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+        PropertyAttributes attr = (mode == Variable::VAR) ? NONE : READ_ONLY;
         __ push(Immediate(Smi::FromInt(attr)));
         // Push initial value, if any.
         // Note: For variables we must not push an initial value (such as
         // 'undefined') because we may have a (legal) redeclaration and we
         // must not destroy the current value.
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ push(Immediate(Factory::the_hole_value()));
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kStack);
+        } else if (function != NULL) {
+          VisitForValue(function, kStack);
         } else {
           __ push(Immediate(Smi::FromInt(0)));  // No initial value!
         }
@@ -738,13 +843,13 @@
     }
 
   } else if (prop != NULL) {
-    if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+    if (function != NULL || mode == Variable::CONST) {
       // We are declaring a function or constant that rewrites to a
       // property.  Use (keyed) IC to set the initial value.
       VisitForValue(prop->obj(), kStack);
-      if (decl->fun() != NULL) {
+      if (function != NULL) {
         VisitForValue(prop->key(), kStack);
-        VisitForValue(decl->fun(), kAccumulator);
+        VisitForValue(function, kAccumulator);
         __ pop(ecx);
       } else {
         VisitForValue(prop->key(), kAccumulator);
@@ -763,6 +868,11 @@
 }
 
 
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+  EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
+}
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(esi);  // The context is the first argument.
@@ -773,19 +883,225 @@
 }
 
 
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
-  Comment cmnt(masm_, "[ FunctionLiteral");
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+  // Keep the switch value on the stack until a case matches.
+  VisitForValue(stmt->tag(), kStack);
 
-  // Build the shared function info and instantiate the function based
-  // on it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(expr, script(), this);
-  if (HasStackOverflow()) return;
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
 
-  // Create a new closure.
-  __ push(esi);
-  __ push(Immediate(function_info));
-  __ CallRuntime(Runtime::kNewClosure, 2);
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForValue(clause->label(), kAccumulator);
+
+    // Perform the comparison as if via '==='.  The comparison stub expects
+    // the smi vs. smi case to be handled before it is called.
+    Label slow_case;
+    __ mov(edx, Operand(esp, 0));  // Switch value.
+    __ mov(ecx, edx);
+    __ or_(ecx, Operand(eax));
+    __ test(ecx, Immediate(kSmiTagMask));
+    __ j(not_zero, &slow_case, not_taken);
+    __ cmp(edx, Operand(eax));
+    __ j(not_equal, &next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ jmp(clause->body_target()->entry_label());
+
+    __ bind(&slow_case);
+    CompareStub stub(equal, true);
+    __ CallStub(&stub);
+    __ test(eax, Operand(eax));
+    __ j(not_equal, &next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ jmp(clause->body_target()->entry_label());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  __ Drop(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ jmp(nested_statement.break_target());
+  } else {
+    __ jmp(default_clause->body_target()->entry_label());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target()->entry_label());
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_target());
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  Comment cmnt(masm_, "[ ForInStatement");
+  SetStatementPosition(stmt);
+
+  Label loop, exit;
+  ForIn loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Get the object to enumerate over. Both SpiderMonkey and JSC
+  // ignore null and undefined in contrast to the specification; see
+  // ECMA-262 section 12.6.4.
+  VisitForValue(stmt->enumerable(), kAccumulator);
+  __ cmp(eax, Factory::undefined_value());
+  __ j(equal, &exit);
+  __ cmp(eax, Factory::null_value());
+  __ j(equal, &exit);
+
+  // Convert the object to a JS object.
+  Label convert, done_convert;
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &convert);
+  __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+  __ j(above_equal, &done_convert);
+  __ bind(&convert);
+  __ push(eax);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ bind(&done_convert);
+  __ push(eax);
+
+  // TODO(kasperl): Check cache validity in generated code. This is a
+  // fast case for the JSObject::IsSimpleEnum cache validity
+  // checks. If we cannot guarantee cache validity, call the runtime
+  // system to check cache validity or get the property names in a
+  // fixed array.
+
+  // Get the set of properties to enumerate.
+  __ push(eax);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::meta_map());
+  __ j(not_equal, &fixed_array);
+
+  // We got a map in register eax. Get the enumeration cache from it.
+  __ mov(ecx, FieldOperand(eax, Map::kInstanceDescriptorsOffset));
+  __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
+  __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Setup the four remaining stack slots.
+  __ push(eax);  // Map.
+  __ push(edx);  // Enumeration cache.
+  __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
+  __ SmiTag(eax);
+  __ push(eax);  // Enumeration cache length (as smi).
+  __ push(Immediate(Smi::FromInt(0)));  // Initial index.
+  __ jmp(&loop);
+
+  // We got a fixed array in register eax. Iterate through that.
+  __ bind(&fixed_array);
+  __ push(Immediate(Smi::FromInt(0)));  // Map (0) - force slow check.
+  __ push(eax);
+  __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
+  __ SmiTag(eax);
+  __ push(eax);  // Fixed array length (as smi).
+  __ push(Immediate(Smi::FromInt(0)));  // Initial index.
+
+  // Generate code for doing the condition check.
+  __ bind(&loop);
+  __ mov(eax, Operand(esp, 0 * kPointerSize));  // Get the current index.
+  __ cmp(eax, Operand(esp, 1 * kPointerSize));  // Compare to the array length.
+  __ j(above_equal, loop_statement.break_target());
+
+  // Get the current entry of the array into register ebx.
+  __ mov(ebx, Operand(esp, 2 * kPointerSize));
+  __ mov(ebx, FieldOperand(ebx, eax, times_2, FixedArray::kHeaderSize));
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case into register edx.
+  __ mov(edx, Operand(esp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  Label update_each;
+  __ mov(ecx, Operand(esp, 4 * kPointerSize));
+  __ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+  __ j(equal, &update_each);
+
+  // Convert the entry to a string or null if it isn't a property
+  // anymore. If the property has been removed while iterating, we
+  // just skip it.
+  __ push(ecx);  // Enumerable.
+  __ push(ebx);  // Current entry.
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+  __ cmp(eax, Factory::null_value());
+  __ j(equal, loop_statement.continue_target());
+  __ mov(ebx, Operand(eax));
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register ebx.
+  __ bind(&update_each);
+  __ mov(result_register(), ebx);
+  // Perform the assignment as if via '='.
+  EmitAssignment(stmt->each());
+
+  // Generate code for the body of the loop.
+  Label stack_limit_hit, stack_check_done;
+  Visit(stmt->body());
+
+  __ StackLimitCheck(&stack_limit_hit);
+  __ bind(&stack_check_done);
+
+  // Generate code for going to the next element by incrementing the
+  // index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_target());
+  __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
+  __ jmp(&loop);
+
+  // Slow case for the stack limit check.
+  StackCheckStub stack_check_stub;
+  __ bind(&stack_limit_hit);
+  __ CallStub(&stack_check_stub);
+  __ jmp(&stack_check_done);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_target());
+  __ add(Operand(esp), Immediate(5 * kPointerSize));
+
+  // Exit and decrement the loop depth.
+  __ bind(&exit);
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  if (scope()->is_function_scope() && info->num_literals() == 0) {
+    FastNewClosureStub stub;
+    __ push(Immediate(info));
+    __ CallStub(&stub);
+  } else {
+    __ push(esi);
+    __ push(Immediate(info));
+    __ CallRuntime(Runtime::kNewClosure, 2);
+  }
   Apply(context_, eax);
 }
 
@@ -830,7 +1146,20 @@
     Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
                             ? "Context slot"
                             : "Stack slot");
-    Apply(context, slot);
+    if (var->mode() == Variable::CONST) {
+      // Constants may be the hole value if they have not been initialized.
+      // Unhole them.
+      Label done;
+      MemOperand slot_operand = EmitSlotSearch(slot, eax);
+      __ mov(eax, slot_operand);
+      __ cmp(eax, Factory::the_hole_value());
+      __ j(not_equal, &done);
+      __ mov(eax, Factory::undefined_value());
+      __ bind(&done);
+      Apply(context, eax);
+    } else {
+      Apply(context, slot);
+    }
 
   } else {
     Comment cmnt(masm_, "Rewritten parameter");
@@ -966,22 +1295,28 @@
 
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
+
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+
   __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
   __ push(Immediate(expr->constant_elements()));
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else {
+  } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+  } else {
+    FastCloneShallowArrayStub stub(length);
+    __ CallStub(&stub);
   }
 
   bool result_saved = false;  // Is the result saved to the stack?
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  ZoneList<Expression*>* subexprs = expr->values();
-  for (int i = 0, len = subexprs->length(); i < len; i++) {
+  for (int i = 0; i < length; i++) {
     Expression* subexpr = subexprs->at(i);
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
@@ -1016,7 +1351,13 @@
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
   Comment cmnt(masm_, "[ Assignment");
-  ASSERT(expr->op() != Token::INIT_CONST);
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // on the left-hand side.
+  if (!expr->target()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->target());
+    return;
+  }
+
   // Left-hand side can only be a property, a global or a (parameter or local)
   // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -1095,6 +1436,7 @@
   switch (assign_type) {
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             expr->op(),
                              context_);
       break;
     case NAMED_PROPERTY:
@@ -1137,15 +1479,66 @@
 }
 
 
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+  // Invalid left-hand sides are rewritten to have a 'throw
+  // ReferenceError' on the left-hand side.
+  if (!expr->IsValidLeftHandSide()) {
+    VisitForEffect(expr);
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->AsProperty();
+  if (prop != NULL) {
+    assign_type = (prop->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      __ push(eax);  // Preserve value.
+      VisitForValue(prop->obj(), kAccumulator);
+      __ mov(edx, eax);
+      __ pop(eax);  // Restore value.
+      __ mov(ecx, prop->key()->AsLiteral()->handle());
+      Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+      __ call(ic, RelocInfo::CODE_TARGET);
+      __ nop();  // Signal no inlined code.
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ push(eax);  // Preserve value.
+      VisitForValue(prop->obj(), kStack);
+      VisitForValue(prop->key(), kAccumulator);
+      __ mov(ecx, eax);
+      __ pop(edx);
+      __ pop(eax);  // Restore value.
+      Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+      __ call(ic, RelocInfo::CODE_TARGET);
+      __ nop();  // Signal no inlined code.
+      break;
+    }
+  }
+}
+
+
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+                                               Token::Value op,
                                                Expression::Context context) {
-  // Three main cases: global variables, lookup slots, and all other
-  // types of slots.  Left-hand-side parameters that rewrite to
-  // explicit property accesses do not reach here.
+  // Left-hand sides that rewrite to explicit property accesses do not reach
+  // here.
   ASSERT(var != NULL);
   ASSERT(var->is_global() || var->slot() != NULL);
 
-  Slot* slot = var->slot();
   if (var->is_global()) {
     ASSERT(!var->is_this());
     // Assignment to a global variable.  Use inline caching for the
@@ -1156,44 +1549,61 @@
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
     __ call(ic, RelocInfo::CODE_TARGET);
     __ nop();
-    Apply(context, eax);
 
-  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    __ push(result_register());  // Value.
-    __ push(esi);  // Context.
-    __ push(Immediate(var->name()));
-    __ CallRuntime(Runtime::kStoreContextSlot, 3);
-    Apply(context, eax);
-
-  } else if (slot != NULL) {
+  } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
+    // Perform the assignment for non-const variables and for initialization
+    // of const variables.  Const assignments are simply skipped.
+    Label done;
+    Slot* slot = var->slot();
     switch (slot->type()) {
-      case Slot::LOCAL:
       case Slot::PARAMETER:
-        __ mov(Operand(ebp, SlotOffset(slot)), result_register());
+      case Slot::LOCAL:
+        if (op == Token::INIT_CONST) {
+          // Detect const reinitialization by checking for the hole value.
+          __ mov(edx, Operand(ebp, SlotOffset(slot)));
+          __ cmp(edx, Factory::the_hole_value());
+          __ j(not_equal, &done);
+        }
+        // Perform the assignment.
+        __ mov(Operand(ebp, SlotOffset(slot)), eax);
         break;
 
       case Slot::CONTEXT: {
         MemOperand target = EmitSlotSearch(slot, ecx);
-        __ mov(target, result_register());
-
-        // RecordWrite may destroy all its register arguments.
-        __ mov(edx, result_register());
+        if (op == Token::INIT_CONST) {
+          // Detect const reinitialization by checking for the hole value.
+          __ mov(edx, target);
+          __ cmp(edx, Factory::the_hole_value());
+          __ j(not_equal, &done);
+        }
+        // Perform the assignment and issue the write barrier.
+        __ mov(target, eax);
+        // The value of the assignment is in eax.  RecordWrite clobbers its
+        // register arguments.
+        __ mov(edx, eax);
         int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
         __ RecordWrite(ecx, offset, edx, ebx);
         break;
       }
 
       case Slot::LOOKUP:
-        UNREACHABLE();
+        // Call the runtime for the assignment.  The runtime will ignore
+        // const reinitialization.
+        __ push(eax);  // Value.
+        __ push(esi);  // Context.
+        __ push(Immediate(var->name()));
+        if (op == Token::INIT_CONST) {
+          // The runtime will ignore const redeclaration.
+          __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+        } else {
+          __ CallRuntime(Runtime::kStoreContextSlot, 3);
+        }
         break;
     }
-    Apply(context, result_register());
-
-  } else {
-    // Variables rewritten as properties are not treated as variables in
-    // assignments.
-    UNREACHABLE();
+    __ bind(&done);
   }
+
+  Apply(context, eax);
 }
 
 
@@ -1327,7 +1737,8 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
-  CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -1341,16 +1752,62 @@
   Variable* var = fun->AsVariableProxy()->AsVariable();
 
   if (var != NULL && var->is_possibly_eval()) {
-    // Call to the identifier 'eval'.
-    UNREACHABLE();
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    VisitForValue(fun, kStack);
+    __ push(Immediate(Factory::undefined_value()));  // Reserved receiver slot.
+
+    // Push the arguments.
+    ZoneList<Expression*>* args = expr->arguments();
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      VisitForValue(args->at(i), kStack);
+    }
+
+    // Push copy of the function - found below the arguments.
+    __ push(Operand(esp, (arg_count + 1) * kPointerSize));
+
+    // Push copy of the first argument or undefined if it doesn't exist.
+    if (arg_count > 0) {
+      __ push(Operand(esp, arg_count * kPointerSize));
+    } else {
+      __ push(Immediate(Factory::undefined_value()));
+    }
+
+    // Push the receiver of the enclosing function and do runtime call.
+    __ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
+    __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+
+    // The runtime call returns a pair of values in eax (function) and
+    // edx (receiver). Touch up the stack with the right values.
+    __ mov(Operand(esp, (arg_count + 0) * kPointerSize), edx);
+    __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
+
+    // Record source position for debugger.
+    SetSourcePosition(expr->position());
+    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    __ CallStub(&stub);
+    // Restore context register.
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    DropAndApply(1, context_, eax);
   } else if (var != NULL && !var->is_this() && var->is_global()) {
     // Push global object as receiver for the call IC.
     __ push(CodeGenerator::GlobalObject());
     EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
   } else if (var != NULL && var->slot() != NULL &&
              var->slot()->type() == Slot::LOOKUP) {
-    // Call to a lookup slot.
-    UNREACHABLE();
+    // Call to a lookup slot (dynamically introduced variable).  Call the
+    // runtime to find the function to call (returned in eax) and the object
+    // holding it (returned in edx).
+    __ push(context_register());
+    __ push(Immediate(var->name()));
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    __ push(eax);  // Function.
+    __ push(edx);  // Receiver.
+    EmitCallWithStub(expr);
   } else if (fun->AsProperty() != NULL) {
     // Call to an object property.
     Property* prop = fun->AsProperty();
@@ -1447,7 +1904,728 @@
 }
 
 
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (strcmp("_IsSmi", *name->ToCString()) == 0) {
+    EmitIsSmi(expr->arguments());
+  } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
+    EmitIsNonNegativeSmi(expr->arguments());
+  } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
+    EmitIsObject(expr->arguments());
+  } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
+    EmitIsUndetectableObject(expr->arguments());
+  } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
+    EmitIsFunction(expr->arguments());
+  } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
+    EmitIsArray(expr->arguments());
+  } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
+    EmitIsRegExp(expr->arguments());
+  } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
+    EmitIsConstructCall(expr->arguments());
+  } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
+    EmitObjectEquals(expr->arguments());
+  } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
+    EmitArguments(expr->arguments());
+  } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
+    EmitArgumentsLength(expr->arguments());
+  } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
+    EmitClassOf(expr->arguments());
+  } else if (strcmp("_Log", *name->ToCString()) == 0) {
+    EmitLog(expr->arguments());
+  } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
+    EmitRandomHeapNumber(expr->arguments());
+  } else if (strcmp("_SubString", *name->ToCString()) == 0) {
+    EmitSubString(expr->arguments());
+  } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
+    EmitRegExpExec(expr->arguments());
+  } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
+    EmitValueOf(expr->arguments());
+  } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
+    EmitSetValueOf(expr->arguments());
+  } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
+    EmitNumberToString(expr->arguments());
+  } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
+    EmitCharFromCode(expr->arguments());
+  } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
+    EmitFastCharCodeAt(expr->arguments());
+  } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
+    EmitStringAdd(expr->arguments());
+  } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
+    EmitStringCompare(expr->arguments());
+  } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
+    EmitMathPow(expr->arguments());
+  } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
+    EmitMathSin(expr->arguments());
+  } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
+    EmitMathCos(expr->arguments());
+  } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
+    EmitMathSqrt(expr->arguments());
+  } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
+    EmitCallFunction(expr->arguments());
+  } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
+    EmitRegExpConstructResult(expr->arguments());
+  } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
+    EmitSwapElements(expr->arguments());
+  } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
+    EmitGetFromCache(expr->arguments());
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ test(eax, Immediate(kSmiTagMask | 0x80000000));
+  __ j(zero, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, if_false);
+  __ cmp(eax, Factory::null_value());
+  __ j(equal, if_true);
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset));
+  __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+  __ j(not_zero, if_false);
+  __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+  __ j(below, if_false);
+  __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+  __ j(below_equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, if_false);
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsUndetectable));
+  __ j(not_zero, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, if_false);
+  __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+  __ j(equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(equal, if_false);
+  __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+  __ j(equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(equal, if_false);
+  __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
+  __ j(equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  // Get the frame pointer for the calling frame.
+  __ mov(eax, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ cmp(Operand(eax, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &check_frame_marker);
+  __ mov(eax, Operand(eax, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ cmp(Operand(eax, StandardFrameConstants::kMarkerOffset),
+         Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+  __ j(equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ pop(ebx);
+  __ cmp(eax, Operand(ebx));
+  __ j(equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in edx and the formal
+  // parameter count in eax.
+  VisitForValue(args->at(0), kAccumulator);
+  __ mov(edx, eax);
+  __ mov(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  __ CallStub(&stub);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label exit;
+  // Get the number of formal parameters.
+  __ Set(eax, Immediate(Smi::FromInt(scope()->num_parameters())));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(not_equal, &exit);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  if (FLAG_debug_code) __ AbortIfNotSmi(eax);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  // If the object is a smi, we return null.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &null);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, eax);  // Map is now in eax.
+  __ j(below, &null);
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  __ cmp(ebx, JS_FUNCTION_TYPE);
+  __ j(equal, &function);
+
+  // Check if the constructor in the map is a function.
+  __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
+  __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+  __ j(not_equal, &non_function_constructor);
+
+  // eax now contains the constructor function. Grab the
+  // instance class name from there.
+  __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ jmp(&done);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ mov(eax, Factory::function_class_symbol());
+  __ jmp(&done);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ mov(eax, Factory::Object_symbol());
+  __ jmp(&done);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ mov(eax, Factory::null_value());
+
+  // All done.
+  __ bind(&done);
+
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+    VisitForValue(args->at(1), kStack);
+    VisitForValue(args->at(2), kStack);
+    __ CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  // Finally, we're expected to leave a value on the top of the stack.
+  __ mov(eax, Factory::undefined_value());
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
+
+  __ AllocateHeapNumber(edi, ebx, ecx, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
+
+  __ bind(&slow_allocate_heapnumber);
+  // To allocate a heap number, and ensure that it is not a smi, we
+  // call the runtime function FUnaryMinus on 0, returning the double
+  // -0.0.  A new, distinct heap number is returned each time.
+  __ push(Immediate(Smi::FromInt(0)));
+  __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+  __ mov(edi, eax);
+
+  __ bind(&heapnumber_allocated);
+
+  __ PrepareCallCFunction(0, ebx);
+  __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+  // Convert 32 random bits in eax to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  // This is implemented on both SSE2 and FPU.
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope fscope(SSE2);
+    __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
+    __ movd(xmm1, Operand(ebx));
+    __ movd(xmm0, Operand(eax));
+    __ cvtss2sd(xmm1, xmm1);
+    __ pxor(xmm0, xmm1);
+    __ subsd(xmm0, xmm1);
+    __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
+  } else {
+    // 0x4130000000000000 is 1.0 x 2^20 as a double.
+    __ mov(FieldOperand(edi, HeapNumber::kExponentOffset),
+           Immediate(0x41300000));
+    __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), eax);
+    __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
+    __ mov(FieldOperand(edi, HeapNumber::kMantissaOffset), Immediate(0));
+    __ fld_d(FieldOperand(edi, HeapNumber::kValueOffset));
+    __ fsubp(1);
+    __ fstp_d(FieldOperand(edi, HeapNumber::kValueOffset));
+  }
+  __ mov(eax, edi);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  SubStringStub stub;
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallStub(&stub);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  RegExpExecStub stub;
+  ASSERT(args->length() == 4);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  VisitForValue(args->at(3), kStack);
+  __ CallStub(&stub);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &done);
+  // If the object is not a value type, return the object.
+  __ CmpObjectType(eax, JS_VALUE_TYPE, ebx);
+  __ j(not_equal, &done);
+  __ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
+
+  __ bind(&done);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the runtime function.
+  ASSERT(args->length() == 2);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  __ CallRuntime(Runtime::kMath_pow, 2);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  VisitForValue(args->at(0), kStack);  // Load the object.
+  VisitForValue(args->at(1), kAccumulator);  // Load the value.
+  __ pop(ebx);  // eax = value. ebx = object.
+
+  Label done;
+  // If the object is a smi, return the value.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(zero, &done);
+
+  // If the object is not a value type, return the value.
+  __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
+  __ j(not_equal, &done);
+
+  // Store the value.
+  __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  __ mov(edx, eax);
+  __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
+
+  __ bind(&done);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and call the stub.
+  VisitForValue(args->at(0), kStack);
+
+  NumberToStringStub stub;
+  __ CallStub(&stub);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label slow_case, done;
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiShiftSize == 0);
+  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+  __ test(eax,
+          Immediate(kSmiTagMask |
+                    ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+  __ j(not_zero, &slow_case);
+  __ Set(ebx, Immediate(Factory::single_character_string_cache()));
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiTagSize == 1);
+  ASSERT(kSmiShiftSize == 0);
+  // At this point code register contains smi tagged ascii char code.
+  __ mov(ebx, FieldOperand(ebx,
+                           eax, times_half_pointer_size,
+                           FixedArray::kHeaderSize));
+  __ cmp(ebx, Factory::undefined_value());
+  __ j(equal, &slow_case);
+  __ mov(eax, ebx);
+  __ jmp(&done);
+
+  __ bind(&slow_case);
+  __ push(eax);
+  __ CallRuntime(Runtime::kCharFromCode, 1);
+
+  __ bind(&done);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
+  // TODO(fsc): Port the complete implementation from the classic back-end.
+  // Move the undefined value into the result register, which will
+  // trigger the slow case.
+  __ Set(eax, Immediate(Factory::undefined_value()));
+  Apply(context_, eax);
+}
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  __ CallStub(&stub);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+
+  StringCompareStub stub;
+  __ CallStub(&stub);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::SIN);
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallStub(&stub);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::COS);
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallStub(&stub);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime function.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_sqrt, 1);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+  ASSERT(args->length() >= 2);
+
+  int arg_count = args->length() - 2;  // For receiver and function.
+  VisitForValue(args->at(0), kStack);  // Receiver.
+  for (int i = 0; i < arg_count; i++) {
+    VisitForValue(args->at(i + 1), kStack);
+  }
+  VisitForValue(args->at(arg_count + 1), kAccumulator);  // Function.
+
+  // InvokeFunction requires function in edi. Move it in there.
+  if (!result_register().is(edi)) __ mov(edi, result_register());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(edi, count, CALL_FUNCTION);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+  Apply(context_, eax);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      Top::global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    __ mov(eax, Factory::undefined_value());
+    Apply(context_, eax);
+    return;
+  }
+
+  VisitForValue(args->at(1), kAccumulator);
+
+  Register key = eax;
+  Register cache = ebx;
+  Register tmp = ecx;
+  __ mov(cache, CodeGenerator::ContextOperand(esi, Context::GLOBAL_INDEX));
+  __ mov(cache,
+         FieldOperand(cache, GlobalObject::kGlobalContextOffset));
+  __ mov(cache,
+         CodeGenerator::ContextOperand(
+             cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ mov(cache,
+         FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+  Label done, not_found;
+  // tmp now holds finger offset as a smi.
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ mov(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+  __ cmp(key, CodeGenerator::FixedArrayElementOperand(cache, tmp));
+  __ j(not_equal, &not_found);
+
+  __ mov(eax, CodeGenerator::FixedArrayElementOperand(cache, tmp, 1));
+  __ jmp(&done);
+
+  __ bind(&not_found);
+  // Call runtime to perform the lookup.
+  __ push(cache);
+  __ push(key);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+
+  __ bind(&done);
+  Apply(context_, eax);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (name->length() > 0 && name->Get(0) == '_') {
+    Comment cmnt(masm_, "[ InlineRuntimeCall");
+    EmitInlineRuntimeCall(expr);
+    return;
+  }
+
   Comment cmnt(masm_, "[ CallRuntime");
   ZoneList<Expression*>* args = expr->arguments();
 
@@ -1481,6 +2659,46 @@
 
 void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* prop = expr->expression()->AsProperty();
+      Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+      if (prop == NULL && var == NULL) {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        Apply(context_, true);
+      } else if (var != NULL &&
+                 !var->is_global() &&
+                 var->slot() != NULL &&
+                 var->slot()->type() != Slot::LOOKUP) {
+        // Result of deleting non-global, non-dynamic variables is false.
+        // The subexpression does not have side effects.
+        Apply(context_, false);
+      } else {
+        // Property or variable reference.  Call the delete builtin with
+        // object and property name as arguments.
+        if (prop != NULL) {
+          VisitForValue(prop->obj(), kStack);
+          VisitForValue(prop->key(), kStack);
+        } else if (var->is_global()) {
+          __ push(CodeGenerator::GlobalObject());
+          __ push(Immediate(var->name()));
+        } else {
+          // Non-global variable.  Call the runtime to look up the context
+          // where the variable was introduced.
+          __ push(context_register());
+          __ push(Immediate(var->name()));
+          __ CallRuntime(Runtime::kLookupContext, 2);
+          __ push(eax);
+          __ push(Immediate(var->name()));
+        }
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        Apply(context_, eax);
+      }
+      break;
+    }
+
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
       VisitForEffect(expr->expression());
@@ -1521,33 +2739,15 @@
 
     case Token::NOT: {
       Comment cmnt(masm_, "[ UnaryOperation (NOT)");
-      Label materialize_true, materialize_false, done;
-      // Initially assume a pure test context.  Notice that the labels are
-      // swapped.
-      Label* if_true = false_label_;
-      Label* if_false = true_label_;
-      switch (context_) {
-        case Expression::kUninitialized:
-          UNREACHABLE();
-          break;
-        case Expression::kEffect:
-          if_true = &done;
-          if_false = &done;
-          break;
-        case Expression::kValue:
-          if_true = &materialize_false;
-          if_false = &materialize_true;
-          break;
-        case Expression::kTest:
-          break;
-        case Expression::kValueTest:
-          if_false = &materialize_true;
-          break;
-        case Expression::kTestValue:
-          if_true = &materialize_false;
-          break;
-      }
+      Label materialize_true, materialize_false;
+      Label* if_true = NULL;
+      Label* if_false = NULL;
+
+      // Notice that the labels are swapped.
+      PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
+
       VisitForControl(expr->expression(), if_true, if_false);
+
       Apply(context_, if_false, if_true);  // Labels swapped.
       break;
     }
@@ -1643,6 +2843,12 @@
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // as the left-hand side.
+  if (!expr->expression()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
 
   // Expression can only be a property, a global or a (parameter or local)
   // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
@@ -1664,7 +2870,7 @@
     EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
                      Expression::kValue);
     location_ = saved_location;
-  } else  {
+  } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && context_ != Expression::kEffect) {
       __ push(Immediate(Smi::FromInt(0)));
@@ -1754,7 +2960,9 @@
   switch (assign_type) {
     case VARIABLE:
       if (expr->is_postfix()) {
+        // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN,
                                Expression::kEffect);
         // For all contexts except kEffect: We have the result on
         // top of the stack.
@@ -1762,7 +2970,9 @@
           ApplyTOS(context_);
         }
       } else {
+        // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN,
                                context_);
       }
       break;
@@ -1840,36 +3050,41 @@
 }
 
 
+void FullCodeGenerator::EmitNullCompare(bool strict,
+                                        Register obj,
+                                        Register null_const,
+                                        Label* if_true,
+                                        Label* if_false,
+                                        Register scratch) {
+  __ cmp(obj, Operand(null_const));
+  if (strict) {
+    __ j(equal, if_true);
+  } else {
+    __ j(equal, if_true);
+    __ cmp(obj, Factory::undefined_value());
+    __ j(equal, if_true);
+    __ test(obj, Immediate(kSmiTagMask));
+    __ j(zero, if_false);
+    // It can be an undetectable object.
+    __ mov(scratch, FieldOperand(obj, HeapObject::kMapOffset));
+    __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
+    __ test(scratch, Immediate(1 << Map::kIsUndetectable));
+    __ j(not_zero, if_true);
+  }
+  __ jmp(if_false);
+}
+
+
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-  Label materialize_true, materialize_false, done;
-  // Initially assume we are in a test context.
-  Label* if_true = true_label_;
-  Label* if_false = false_label_;
-  switch (context_) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-      break;
-    case Expression::kEffect:
-      if_true = &done;
-      if_false = &done;
-      break;
-    case Expression::kValue:
-      if_true = &materialize_true;
-      if_false = &materialize_false;
-      break;
-    case Expression::kTest:
-      break;
-    case Expression::kValueTest:
-      if_true = &materialize_true;
-      break;
-    case Expression::kTestValue:
-      if_false = &materialize_false;
-      break;
-  }
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
 
   VisitForValue(expr->left(), kStack);
   switch (expr->op()) {
@@ -1899,10 +3114,24 @@
         case Token::EQ_STRICT:
           strict = true;
           // Fall through
-        case Token::EQ:
+        case Token::EQ: {
           cc = equal;
           __ pop(edx);
+          // If either operand is constant null we do a fast compare
+          // against null.
+          Literal* right_literal = expr->right()->AsLiteral();
+          Literal* left_literal = expr->left()->AsLiteral();
+          if (right_literal != NULL && right_literal->handle()->IsNull()) {
+            EmitNullCompare(strict, edx, eax, if_true, if_false, ecx);
+            Apply(context_, if_true, if_false);
+            return;
+          } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
+            EmitNullCompare(strict, eax, edx, if_true, if_false, ecx);
+            Apply(context_, if_true, if_false);
+            return;
+          }
           break;
+        }
         case Token::LT:
           cc = less;
           __ pop(edx);
@@ -2012,3 +3241,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 4929c8a..644d200 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "codegen-inl.h"
 #include "ic-inl.h"
 #include "runtime.h"
@@ -1643,3 +1645,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/jump-target-ia32.cc b/src/ia32/jump-target-ia32.cc
index cba6508..76c0d02 100644
--- a/src/ia32/jump-target-ia32.cc
+++ b/src/ia32/jump-target-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "codegen-inl.h"
 #include "jump-target-inl.h"
 #include "register-allocator-inl.h"
@@ -431,3 +433,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index a7d2834..ba2fe2d 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "debug.h"
@@ -1706,3 +1708,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index d9dddd6..b0de827 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -26,6 +26,9 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "v8.h"
+
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "unicode.h"
 #include "log.h"
 #include "ast.h"
@@ -1241,3 +1244,5 @@
 #endif  // V8_INTERPRETED_REGEXP
 
 }}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
index 73fefb3..d840c0c 100644
--- a/src/ia32/register-allocator-ia32.cc
+++ b/src/ia32/register-allocator-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "virtual-frame-inl.h"
@@ -151,3 +153,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 189c0e4..eb555d7 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "ic-inl.h"
 #include "codegen-inl.h"
 #include "stub-cache.h"
@@ -354,7 +356,7 @@
                         Register holder,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         Label* miss_label) {
@@ -374,7 +376,8 @@
     }
 
     if (!optimize) {
-      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
+                     miss_label);
       return;
     }
 
@@ -387,12 +390,17 @@
     __ push(holder);
     __ push(name_);
 
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
     CompileCallLoadPropertyWithInterceptor(masm,
                                            receiver,
                                            holder,
                                            name_,
-                                           holder_obj);
+                                           interceptor_holder);
 
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
     Label interceptor_failed;
     __ cmp(eax, Factory::no_interceptor_result_sentinel());
     __ j(equal, &interceptor_failed);
@@ -409,47 +417,61 @@
     __ LeaveInternalFrame();
 
     if (lookup->type() == FIELD) {
-      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+      // We found FIELD property in prototype chain of interceptor's holder.
+      // Check that the maps from interceptor's holder to field's holder
+      // haven't changed...
+      holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
                                               lookup->holder(), scratch1,
                                               scratch2,
                                               name,
                                               miss_label);
+      // ... and retrieve a field from field's holder.
       stub_compiler->GenerateFastPropertyLoad(masm, eax,
                                               holder, lookup->holder(),
                                               lookup->GetFieldIndex());
       __ ret(0);
     } else {
+      // We found CALLBACKS property in prototype chain of interceptor's
+      // holder.
       ASSERT(lookup->type() == CALLBACKS);
       ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
       ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
+      // Prepare for tail call: push receiver to stack after return address.
       Label cleanup;
-      __ pop(scratch2);
+      __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(scratch2);
 
-      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+      // Check that the maps from interceptor's holder to callback's holder
+      // haven't changed.
+      holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
                                               lookup->holder(), scratch1,
                                               scratch2,
                                               name,
                                               &cleanup);
 
-      __ pop(scratch2);  // save old return address
+      // Continue tail call preparation: push remaining parameters after
+      // return address.
+      __ pop(scratch2);  // return address
       __ push(holder);
       __ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
       __ push(holder);
       __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
       __ push(name_);
-      __ push(scratch2);  // restore old return address
+      __ push(scratch2);  // restore return address
 
+      // Tail call to runtime.
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
       __ TailCallExternalReference(ref, 5, 1);
 
+      // Clean up code: we pushed receiver after return address and
+      // need to remove it from there.
       __ bind(&cleanup);
-      __ pop(scratch1);
-      __ pop(scratch2);
+      __ pop(scratch1);  // return address.
+      __ pop(scratch2);  // receiver.
       __ push(scratch1);
     }
   }
@@ -459,10 +481,10 @@
                       Register receiver,
                       Register holder,
                       Register scratch,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     __ pop(scratch);  // save old return address
-    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
     __ push(scratch);  // restore old return address
 
     ExternalReference ref = ExternalReference(
@@ -624,7 +646,7 @@
                         Register receiver,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         const CallOptimization& optimization,
@@ -637,10 +659,13 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+      depth1 =
+          optimization.GetPrototypeDepthOfExpectedType(object,
+                                                       interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
-                                                              lookup->holder());
+        depth2 =
+            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                         lookup->holder());
       }
       can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
                              (depth2 != kInvalidProtoDepth);
@@ -653,24 +678,32 @@
       ReserveSpaceForFastApiCall(masm, scratch1);
     }
 
+    // Check that the maps from receiver to interceptor's holder
+    // haven't changed and thus we can invoke interceptor.
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         depth1, miss);
 
+    // Invoke an interceptor and if it provides a value,
+    // branch to |regular_invoke|.
     Label regular_invoke;
-    LoadWithInterceptor(masm, receiver, holder, holder_obj, &regular_invoke);
+    LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
+                        &regular_invoke);
 
-    // Generate code for the failed interceptor case.
+    // Interceptor returned nothing for this property.  Try to use cached
+    // constant function.
 
-    // Check the lookup is still valid.
-    stub_compiler_->CheckPrototypes(holder_obj, receiver,
+    // Check that the maps from interceptor's holder to constant function's
+    // holder haven't changed and thus we can use cached constant function.
+    stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
                                     lookup->holder(),
                                     scratch1, scratch2, name,
                                     depth2, miss);
 
+    // Invoke function.
     if (can_do_fast_api_call) {
       GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
@@ -678,12 +711,14 @@
                         JUMP_FUNCTION);
     }
 
+    // Deferred code for fast API call case---clean preallocated space.
     if (can_do_fast_api_call) {
       __ bind(&miss_cleanup);
       FreeSpaceForFastApiCall(masm, scratch1);
       __ jmp(miss_label);
     }
 
+    // Invoke a regular function.
     __ bind(&regular_invoke);
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
@@ -696,10 +731,10 @@
                       Register scratch1,
                       Register scratch2,
                       String* name,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         miss_label);
 
@@ -711,7 +746,7 @@
                              receiver,
                              holder,
                              name_,
-                             holder_obj);
+                             interceptor_holder);
 
     __ CallExternalReference(
           ExternalReference(
@@ -2387,3 +2422,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 10aaa52..e22df6e 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_IA32)
+
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "scopes.h"
@@ -1310,3 +1312,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index 51a802c..a8f23b0 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -144,6 +144,9 @@
   // (ie, they all have frame-external references).
   Register SpillAnyRegister();
 
+  // Spill the top element of the frame.
+  void SpillTop() { SpillElementAt(element_count() - 1); }
+
   // Sync the range of elements in [begin, end] with memory.
   void SyncRange(int begin, int end);
 
@@ -349,7 +352,7 @@
   Result CallStoreIC(Handle<String> name, bool is_contextual);
 
   // Call keyed store IC.  Value, key, and receiver are found on top
-  // of the frame.  Key and receiver are not dropped.
+  // of the frame.  All three are dropped.
   Result CallKeyedStoreIC();
 
   // Call call IC.  Function name, arguments, and receiver are found on top
diff --git a/src/jump-target-light.h b/src/jump-target-light.h
index 656ec75..084bd58 100644
--- a/src/jump-target-light.h
+++ b/src/jump-target-light.h
@@ -74,6 +74,8 @@
 
   inline CodeGenerator* cgen();
 
+  Label* entry_label() { return &entry_label_; }
+
   const VirtualFrame* entry_frame() const {
     return entry_frame_set_ ? &entry_frame_ : NULL;
   }
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 592ef49..b14d3d8 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -988,7 +988,7 @@
   byte* buffer_;
   int buffer_size_;
 
-  static const int kBufferGap = 8;
+  static const int kBufferGap = RelocInfoWriter::kMaxSize;
   static const int kMaximalBufferSize = 512*MB;
 };
 
diff --git a/src/log.cc b/src/log.cc
index 891b0e2..f48b358 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -170,7 +170,7 @@
   SafeStackTraceFrameIterator it(sample->fp, sample->sp,
                                  sample->sp, js_entry_sp);
   while (!it.done() && i < TickSample::kMaxFramesCount) {
-    sample->stack[i++] = it.frame()->pc();
+    sample->stack[i++] = reinterpret_cast<Address>(it.frame()->function());
     it.Advance();
   }
   sample->frames_count = i;
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index a21e960..686a61c 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -68,13 +68,8 @@
 #elif V8_TARGET_ARCH_ARM
 #include "arm/constants-arm.h"
 #include "assembler.h"
-#ifdef V8_ARM_VARIANT_THUMB
-#include "arm/assembler-thumb2.h"
-#include "arm/assembler-thumb2-inl.h"
-#else
 #include "arm/assembler-arm.h"
 #include "arm/assembler-arm-inl.h"
-#endif
 #include "code.h"  // must be after assembler_*.h
 #include "arm/macro-assembler-arm.h"
 #elif V8_TARGET_ARCH_MIPS
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index e3cc6ab..554b579 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -78,6 +78,7 @@
   SweepLargeObjectSpace();
 
   if (IsCompacting()) {
+    GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
     EncodeForwardingAddresses();
 
     UpdatePointers();
@@ -678,6 +679,7 @@
 
 
 void MarkCompactCollector::MarkLiveObjects() {
+  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
 #ifdef DEBUG
   ASSERT(state_ == PREPARE_GC);
   state_ = MARK_LIVE_OBJECTS;
@@ -1163,6 +1165,8 @@
       HeapObject* target = HeapObject::cast(result);
       MigrateObject(target->address(), object->address(), object_size);
       Heap::UpdateRSet(target);
+      MarkCompactCollector::tracer()->
+          increment_promoted_objects_size(object_size);
       return true;
     }
   } else {
@@ -1177,6 +1181,8 @@
       if (target_space == Heap::old_pointer_space()) {
         Heap::UpdateRSet(target);
       }
+      MarkCompactCollector::tracer()->
+          increment_promoted_objects_size(object_size);
       return true;
     }
   }
@@ -1735,6 +1741,8 @@
 
 
 void MarkCompactCollector::SweepSpaces() {
+  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
+
   ASSERT(state_ == SWEEP_SPACES);
   ASSERT(!IsCompacting());
   // Noncompacting collections simply sweep the spaces to clear the mark
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 4a91624..d9617dc 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -34,6 +34,9 @@
 
 
 #include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "mips/assembler-mips-inl.h"
 #include "serialize.h"
 
@@ -1206,3 +1209,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 04bcfeb..26fea25 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -29,6 +29,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "codegen-inl.h"
 #include "debug.h"
 #include "runtime.h"
@@ -200,3 +202,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index ca1edd4..f8b88d7 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -28,6 +28,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "compiler.h"
@@ -1426,3 +1428,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index a5ef9f8..49502bd 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -26,6 +26,9 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "constants-mips.h"
 
 namespace assembler {
@@ -321,3 +324,5 @@
 }
 
 } }   // namespace assembler::mips
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index f592257..659fc01 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -35,6 +35,9 @@
 #endif  // #ifdef __mips
 
 #include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "cpu.h"
 
 namespace v8 {
@@ -67,3 +70,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index cdb35ae..8c40930 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -29,6 +29,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "codegen-inl.h"
 #include "debug.h"
 
@@ -126,3 +128,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index cab72d1..959a4a2 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -57,6 +57,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "constants-mips.h"
 #include "disasm.h"
 #include "macro-assembler.h"
@@ -782,3 +784,4 @@
 
 }  // namespace disasm
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/fast-codegen-mips.cc b/src/mips/fast-codegen-mips.cc
index 48a0ce6..186f9fa 100644
--- a/src/mips/fast-codegen-mips.cc
+++ b/src/mips/fast-codegen-mips.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "codegen-inl.h"
 #include "fast-codegen.h"
 
@@ -72,3 +74,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index cdc880d..0fce3cd 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -28,6 +28,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "frames-inl.h"
 #include "mips/assembler-mips-inl.h"
 
@@ -97,3 +99,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 3c29e99..afda2cb 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
@@ -271,3 +273,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index 8c90921..519fe62 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -29,6 +29,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "codegen-inl.h"
 #include "ic-inl.h"
 #include "runtime.h"
@@ -215,3 +217,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
index 4bd9102..408f75e 100644
--- a/src/mips/jump-target-mips.cc
+++ b/src/mips/jump-target-mips.cc
@@ -28,6 +28,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "codegen-inl.h"
 #include "jump-target-inl.h"
 #include "register-allocator-inl.h"
@@ -170,3 +172,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index c276af5..e096028 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -29,6 +29,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "debug.h"
@@ -1321,3 +1323,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/register-allocator-mips.cc b/src/mips/register-allocator-mips.cc
index f48d3a6..2c5d61b 100644
--- a/src/mips/register-allocator-mips.cc
+++ b/src/mips/register-allocator-mips.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 
@@ -58,3 +60,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index bdb3b7f..886b9e4 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -29,6 +29,8 @@
 #include <cstdarg>
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "disasm.h"
 #include "assembler.h"
 #include "globals.h"    // Need the BitCast
@@ -1646,3 +1648,4 @@
 
 #endif  // __mips
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 0b2d2c3..faaacbc 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "ic-inl.h"
 #include "codegen-inl.h"
 #include "stub-cache.h"
@@ -398,3 +400,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
index c2116de..b61ce75 100644
--- a/src/mips/virtual-frame-mips.cc
+++ b/src/mips/virtual-frame-mips.cc
@@ -29,6 +29,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_MIPS)
+
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "scopes.h"
@@ -314,3 +316,4 @@
 
 } }  // namespace v8::internal
 
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/objects-inl.h b/src/objects-inl.h
index ad15104..d82d73e 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1691,13 +1691,19 @@
 
 
 Object* String::TryFlatten(PretenureFlag pretenure) {
-  // We don't need to flatten strings that are already flat.  Since this code
-  // is inlined, it can be helpful in the flat case to not call out to Flatten.
-  if (IsFlat()) return this;
+  if (!StringShape(this).IsCons()) return this;
+  ConsString* cons = ConsString::cast(this);
+  if (cons->second()->length() == 0) return cons->first();
   return SlowTryFlatten(pretenure);
 }
 
 
+String* String::TryFlattenGetString(PretenureFlag pretenure) {
+  Object* flat = TryFlatten(pretenure);
+  return flat->IsFailure() ? this : String::cast(flat);
+}
+
+
 uint16_t String::Get(int index) {
   ASSERT(index >= 0 && index < length());
   switch (StringShape(this).full_representation_tag()) {
diff --git a/src/objects.cc b/src/objects.cc
index c8acb47..ab678cb 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -189,7 +189,7 @@
   }
 
   UNREACHABLE();
-  return 0;
+  return NULL;
 }
 
 
@@ -631,7 +631,7 @@
     case kConsStringTag: {
       ConsString* cs = ConsString::cast(this);
       if (cs->second()->length() == 0) {
-        return this;
+        return cs->first();
       }
       // There's little point in putting the flat string in new space if the
       // cons string is in old space.  It can never get GCed until there is
@@ -669,7 +669,7 @@
       }
       cs->set_first(result);
       cs->set_second(Heap::empty_string());
-      return this;
+      return result;
     }
     default:
       return this;
@@ -1613,7 +1613,7 @@
   }
 
   UNREACHABLE();
-  return 0;
+  return NULL;
 }
 
 
@@ -1657,7 +1657,8 @@
 }
 
 
-Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
+bool JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
+                                                        Object* value) {
   for (Object* pt = GetPrototype();
        pt != Heap::null_value();
        pt = pt->GetPrototype()) {
@@ -1670,12 +1671,12 @@
       Object* element = dictionary->ValueAt(entry);
       PropertyDetails details = dictionary->DetailsAt(entry);
       if (details.type() == CALLBACKS) {
-        // Only accessors allowed as elements.
-        return FixedArray::cast(element)->get(kSetterIndex);
+        SetElementWithCallback(element, index, value, JSObject::cast(pt));
+        return true;
       }
     }
   }
-  return Heap::undefined_value();
+  return false;
 }
 
 
@@ -2692,30 +2693,11 @@
   // interceptor calls.
   AssertNoContextChange ncc;
 
-  // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
-    Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
-    return Heap::undefined_value();
-  }
-
   // Try to flatten before operating on the string.
   name->TryFlatten();
 
-  // Check if there is an API defined callback object which prohibits
-  // callback overwriting in this object or it's prototype chain.
-  // This mechanism is needed for instance in a browser setting, where
-  // certain accessors such as window.location should not be allowed
-  // to be overwritten because allowing overwriting could potentially
-  // cause security problems.
-  LookupResult callback_result;
-  LookupCallback(name, &callback_result);
-  if (callback_result.IsFound()) {
-    Object* obj = callback_result.GetCallbackObject();
-    if (obj->IsAccessorInfo() &&
-        AccessorInfo::cast(obj)->prohibits_overwriting()) {
-      return Heap::undefined_value();
-    }
+  if (!CanSetCallback(name)) {
+    return Heap::undefined_value();
   }
 
   uint32_t index;
@@ -2746,9 +2728,10 @@
           PropertyDetails details = dictionary->DetailsAt(entry);
           if (details.IsReadOnly()) return Heap::undefined_value();
           if (details.type() == CALLBACKS) {
-            // Only accessors allowed as elements.
-            ASSERT(result->IsFixedArray());
-            return result;
+            if (result->IsFixedArray()) {
+              return result;
+            }
+            // Otherwise allow to override it.
           }
         }
         break;
@@ -2765,15 +2748,10 @@
       if (result.IsReadOnly()) return Heap::undefined_value();
       if (result.type() == CALLBACKS) {
         Object* obj = result.GetCallbackObject();
+        // Need to preserve old getters/setters.
         if (obj->IsFixedArray()) {
-          // The object might be in fast mode even though it has
-          // a getter/setter.
-          Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
-          if (ok->IsFailure()) return ok;
-
-          PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
-          SetNormalizedProperty(name, obj, details);
-          return obj;
+          // Use set to update attributes.
+          return SetPropertyCallback(name, obj, attributes);
         }
       }
     }
@@ -2782,50 +2760,100 @@
   // Allocate the fixed array to hold getter and setter.
   Object* structure = Heap::AllocateFixedArray(2, TENURED);
   if (structure->IsFailure()) return structure;
-  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
 
   if (is_element) {
-    // Normalize object to make this operation simple.
-    Object* ok = NormalizeElements();
-    if (ok->IsFailure()) return ok;
-
-    // Update the dictionary with the new CALLBACKS property.
-    Object* dict =
-        element_dictionary()->Set(index, structure, details);
-    if (dict->IsFailure()) return dict;
-
-    // If name is an index we need to stay in slow case.
-    NumberDictionary* elements = NumberDictionary::cast(dict);
-    elements->set_requires_slow_elements();
-    // Set the potential new dictionary on the object.
-    set_elements(NumberDictionary::cast(dict));
+    return SetElementCallback(index, structure, attributes);
   } else {
-    // Normalize object to make this operation simple.
-    Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
-    if (ok->IsFailure()) return ok;
-
-    // For the global object allocate a new map to invalidate the global inline
-    // caches which have a global property cell reference directly in the code.
-    if (IsGlobalObject()) {
-      Object* new_map = map()->CopyDropDescriptors();
-      if (new_map->IsFailure()) return new_map;
-      set_map(Map::cast(new_map));
-    }
-
-    // Update the dictionary with the new CALLBACKS property.
-    return SetNormalizedProperty(name, structure, details);
+    return SetPropertyCallback(name, structure, attributes);
   }
+}
+
+
+bool JSObject::CanSetCallback(String* name) {
+  ASSERT(!IsAccessCheckNeeded()
+         || Top::MayNamedAccess(this, name, v8::ACCESS_SET));
+
+  // Check if there is an API defined callback object which prohibits
+  // callback overwriting in this object or it's prototype chain.
+  // This mechanism is needed for instance in a browser setting, where
+  // certain accessors such as window.location should not be allowed
+  // to be overwritten because allowing overwriting could potentially
+  // cause security problems.
+  LookupResult callback_result;
+  LookupCallback(name, &callback_result);
+  if (callback_result.IsProperty()) {
+    Object* obj = callback_result.GetCallbackObject();
+    if (obj->IsAccessorInfo() &&
+        AccessorInfo::cast(obj)->prohibits_overwriting()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+Object* JSObject::SetElementCallback(uint32_t index,
+                                     Object* structure,
+                                     PropertyAttributes attributes) {
+  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+
+  // Normalize elements to make this operation simple.
+  Object* ok = NormalizeElements();
+  if (ok->IsFailure()) return ok;
+
+  // Update the dictionary with the new CALLBACKS property.
+  Object* dict =
+      element_dictionary()->Set(index, structure, details);
+  if (dict->IsFailure()) return dict;
+
+  NumberDictionary* elements = NumberDictionary::cast(dict);
+  elements->set_requires_slow_elements();
+  // Set the potential new dictionary on the object.
+  set_elements(elements);
 
   return structure;
 }
 
 
+Object* JSObject::SetPropertyCallback(String* name,
+                                      Object* structure,
+                                      PropertyAttributes attributes) {
+  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+
+  bool convert_back_to_fast = HasFastProperties() &&
+      (map()->instance_descriptors()->number_of_descriptors()
+          < DescriptorArray::kMaxNumberOfDescriptors);
+
+  // Normalize object to make this operation simple.
+  Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+  if (ok->IsFailure()) return ok;
+
+  // For the global object allocate a new map to invalidate the global inline
+  // caches which have a global property cell reference directly in the code.
+  if (IsGlobalObject()) {
+    Object* new_map = map()->CopyDropDescriptors();
+    if (new_map->IsFailure()) return new_map;
+    set_map(Map::cast(new_map));
+  }
+
+  // Update the dictionary with the new CALLBACKS property.
+  Object* result = SetNormalizedProperty(name, structure, details);
+  if (result->IsFailure()) return result;
+
+  if (convert_back_to_fast) {
+    ok = TransformToFastProperties(0);
+    if (ok->IsFailure()) return ok;
+  }
+  return result;
+}
+
 Object* JSObject::DefineAccessor(String* name, bool is_getter, JSFunction* fun,
                                  PropertyAttributes attributes) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded() &&
-      !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
-    Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
     return Heap::undefined_value();
   }
 
@@ -2844,6 +2872,78 @@
 }
 
 
+Object* JSObject::DefineAccessor(AccessorInfo* info) {
+  String* name = String::cast(info->name());
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+    return Heap::undefined_value();
+  }
+
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return this;
+    ASSERT(proto->IsJSGlobalObject());
+    return JSObject::cast(proto)->DefineAccessor(info);
+  }
+
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc;
+
+  // Try to flatten before operating on the string.
+  name->TryFlatten();
+
+  if (!CanSetCallback(name)) {
+    return Heap::undefined_value();
+  }
+
+  uint32_t index = 0;
+  bool is_element = name->AsArrayIndex(&index);
+
+  if (is_element) {
+    if (IsJSArray()) return Heap::undefined_value();
+
+    // Accessors overwrite previous callbacks (cf. with getters/setters).
+    switch (GetElementsKind()) {
+      case FAST_ELEMENTS:
+        break;
+      case PIXEL_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      case EXTERNAL_FLOAT_ELEMENTS:
+        // Ignore getters and setters on pixel and external array
+        // elements.
+        return Heap::undefined_value();
+      case DICTIONARY_ELEMENTS:
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+
+    SetElementCallback(index, info, info->property_attributes());
+  } else {
+    // Lookup the name.
+    LookupResult result;
+    LocalLookup(name, &result);
+    // ES5 forbids turning a property into an accessor if it's not
+    // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
+    if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
+      return Heap::undefined_value();
+    }
+    SetPropertyCallback(name, info, info->property_attributes());
+  }
+
+  return this;
+}
+
+
 Object* JSObject::LookupAccessor(String* name, bool is_getter) {
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
@@ -2871,8 +2971,9 @@
           Object* element = dictionary->ValueAt(entry);
           PropertyDetails details = dictionary->DetailsAt(entry);
           if (details.type() == CALLBACKS) {
-            // Only accessors allowed as elements.
-            return FixedArray::cast(element)->get(accessor_index);
+            if (element->IsFixedArray()) {
+              return FixedArray::cast(element)->get(accessor_index);
+            }
           }
         }
       }
@@ -4580,51 +4681,58 @@
     if (Hash() != other->Hash()) return false;
   }
 
-  if (StringShape(this).IsSequentialAscii() &&
-      StringShape(other).IsSequentialAscii()) {
-    const char* str1 = SeqAsciiString::cast(this)->GetChars();
-    const char* str2 = SeqAsciiString::cast(other)->GetChars();
+  // We know the strings are both non-empty. Compare the first chars
+  // before we try to flatten the strings.
+  if (this->Get(0) != other->Get(0)) return false;
+
+  String* lhs = this->TryFlattenGetString();
+  String* rhs = other->TryFlattenGetString();
+
+  if (StringShape(lhs).IsSequentialAscii() &&
+      StringShape(rhs).IsSequentialAscii()) {
+    const char* str1 = SeqAsciiString::cast(lhs)->GetChars();
+    const char* str2 = SeqAsciiString::cast(rhs)->GetChars();
     return CompareRawStringContents(Vector<const char>(str1, len),
                                     Vector<const char>(str2, len));
   }
 
-  if (this->IsFlat()) {
+  if (lhs->IsFlat()) {
     if (IsAsciiRepresentation()) {
-      Vector<const char> vec1 = this->ToAsciiVector();
-      if (other->IsFlat()) {
-        if (other->IsAsciiRepresentation()) {
-          Vector<const char> vec2 = other->ToAsciiVector();
+      Vector<const char> vec1 = lhs->ToAsciiVector();
+      if (rhs->IsFlat()) {
+        if (rhs->IsAsciiRepresentation()) {
+          Vector<const char> vec2 = rhs->ToAsciiVector();
           return CompareRawStringContents(vec1, vec2);
         } else {
           VectorIterator<char> buf1(vec1);
-          VectorIterator<uc16> ib(other->ToUC16Vector());
+          VectorIterator<uc16> ib(rhs->ToUC16Vector());
           return CompareStringContents(&buf1, &ib);
         }
       } else {
         VectorIterator<char> buf1(vec1);
-        string_compare_buffer_b.Reset(0, other);
+        string_compare_buffer_b.Reset(0, rhs);
         return CompareStringContents(&buf1, &string_compare_buffer_b);
       }
     } else {
-      Vector<const uc16> vec1 = this->ToUC16Vector();
-      if (other->IsFlat()) {
-        if (other->IsAsciiRepresentation()) {
+      Vector<const uc16> vec1 = lhs->ToUC16Vector();
+      if (rhs->IsFlat()) {
+        if (rhs->IsAsciiRepresentation()) {
           VectorIterator<uc16> buf1(vec1);
-          VectorIterator<char> ib(other->ToAsciiVector());
+          VectorIterator<char> ib(rhs->ToAsciiVector());
           return CompareStringContents(&buf1, &ib);
         } else {
-          Vector<const uc16> vec2(other->ToUC16Vector());
+          Vector<const uc16> vec2(rhs->ToUC16Vector());
           return CompareRawStringContents(vec1, vec2);
         }
       } else {
         VectorIterator<uc16> buf1(vec1);
-        string_compare_buffer_b.Reset(0, other);
+        string_compare_buffer_b.Reset(0, rhs);
         return CompareStringContents(&buf1, &string_compare_buffer_b);
       }
     }
   } else {
-    string_compare_buffer_a.Reset(0, this);
-    return CompareStringContentsPartial(&string_compare_buffer_a, other);
+    string_compare_buffer_a.Reset(0, lhs);
+    return CompareStringContentsPartial(&string_compare_buffer_a, rhs);
   }
 }
 
@@ -5164,22 +5272,7 @@
                   RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
 
   for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
-    RelocInfo::Mode rmode = it.rinfo()->rmode();
-    if (rmode == RelocInfo::EMBEDDED_OBJECT) {
-      v->VisitPointer(it.rinfo()->target_object_address());
-    } else if (RelocInfo::IsCodeTarget(rmode)) {
-      v->VisitCodeTarget(it.rinfo());
-    } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-      v->VisitExternalReference(it.rinfo()->target_reference_address());
-#ifdef ENABLE_DEBUGGER_SUPPORT
-    } else if (Debug::has_break_points() &&
-               RelocInfo::IsJSReturn(rmode) &&
-               it.rinfo()->IsPatchedReturnSequence()) {
-      v->VisitDebugTarget(it.rinfo());
-#endif
-    } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
-      v->VisitRuntimeEntry(it.rinfo());
-    }
+    it.rinfo()->Visit(v);
   }
 
   ScopeInfo<>::IterateScopeInfo(this, v);
@@ -5847,6 +5940,108 @@
 }
 
 
+Object* JSObject::GetElementWithCallback(Object* receiver,
+                                         Object* structure,
+                                         uint32_t index,
+                                         Object* holder) {
+  ASSERT(!structure->IsProxy());
+
+  // api style callbacks.
+  if (structure->IsAccessorInfo()) {
+    AccessorInfo* data = AccessorInfo::cast(structure);
+    Object* fun_obj = data->getter();
+    v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+    HandleScope scope;
+    Handle<JSObject> self(JSObject::cast(receiver));
+    Handle<JSObject> holder_handle(JSObject::cast(holder));
+    Handle<Object> number = Factory::NewNumberFromUint(index);
+    Handle<String> key(Factory::NumberToString(number));
+    LOG(ApiNamedPropertyAccess("load", *self, *key));
+    CustomArguments args(data->data(), *self, *holder_handle);
+    v8::AccessorInfo info(args.end());
+    v8::Handle<v8::Value> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = call_fun(v8::Utils::ToLocal(key), info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (result.IsEmpty()) return Heap::undefined_value();
+    return *v8::Utils::OpenHandle(*result);
+  }
+
+  // __defineGetter__ callback
+  if (structure->IsFixedArray()) {
+    Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
+    if (getter->IsJSFunction()) {
+      return Object::GetPropertyWithDefinedGetter(receiver,
+                                                  JSFunction::cast(getter));
+    }
+    // Getter is not a function.
+    return Heap::undefined_value();
+  }
+
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Object* JSObject::SetElementWithCallback(Object* structure,
+                                         uint32_t index,
+                                         Object* value,
+                                         JSObject* holder) {
+  HandleScope scope;
+
+  // We should never get here to initialize a const with the hole
+  // value since a const declaration would conflict with the setter.
+  ASSERT(!value->IsTheHole());
+  Handle<Object> value_handle(value);
+
+  // To accommodate both the old and the new api we switch on the
+  // data structure used to store the callbacks.  Eventually proxy
+  // callbacks should be phased out.
+  ASSERT(!structure->IsProxy());
+
+  if (structure->IsAccessorInfo()) {
+    // api style callbacks
+    AccessorInfo* data = AccessorInfo::cast(structure);
+    Object* call_obj = data->setter();
+    v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
+    if (call_fun == NULL) return value;
+    Handle<Object> number = Factory::NewNumberFromUint(index);
+    Handle<String> key(Factory::NumberToString(number));
+    LOG(ApiNamedPropertyAccess("store", this, *key));
+    CustomArguments args(data->data(), this, JSObject::cast(holder));
+    v8::AccessorInfo info(args.end());
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      call_fun(v8::Utils::ToLocal(key),
+               v8::Utils::ToLocal(value_handle),
+               info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    return *value_handle;
+  }
+
+  if (structure->IsFixedArray()) {
+    Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
+    if (setter->IsJSFunction()) {
+     return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+    } else {
+      Handle<Object> holder_handle(holder);
+      Handle<Object> key(Factory::NewNumberFromUint(index));
+      Handle<Object> args[2] = { key, holder_handle };
+      return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
+                                               HandleVector(args, 2)));
+    }
+  }
+
+  UNREACHABLE();
+  return NULL;
+}
+
+
 // Adding n elements in fast case is O(n*n).
 // Note: revisit design to have dual undefined values to capture absent
 // elements.
@@ -5857,9 +6052,8 @@
   uint32_t elms_length = static_cast<uint32_t>(elms->length());
 
   if (!IsJSArray() && (index >= elms_length || elms->get(index)->IsTheHole())) {
-    Object* setter = LookupCallbackSetterInPrototypes(index);
-    if (setter->IsJSFunction()) {
-      return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+    if (SetElementWithCallbackSetterInPrototypes(index, value)) {
+      return value;
     }
   }
 
@@ -5977,18 +6171,7 @@
         Object* element = dictionary->ValueAt(entry);
         PropertyDetails details = dictionary->DetailsAt(entry);
         if (details.type() == CALLBACKS) {
-          // Only accessors allowed as elements.
-          FixedArray* structure = FixedArray::cast(element);
-          if (structure->get(kSetterIndex)->IsJSFunction()) {
-            JSFunction* setter = JSFunction::cast(structure->get(kSetterIndex));
-            return SetPropertyWithDefinedSetter(setter, value);
-          } else {
-            Handle<Object> self(this);
-            Handle<Object> key(Factory::NewNumberFromUint(index));
-            Handle<Object> args[2] = { key, self };
-            return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
-                                                     HandleVector(args, 2)));
-          }
+          return SetElementWithCallback(element, index, value, this);
         } else {
           dictionary->UpdateMaxNumberKey(index);
           dictionary->ValueAtPut(entry, value);
@@ -5996,10 +6179,8 @@
       } else {
         // Index not already used. Look for an accessor in the prototype chain.
         if (!IsJSArray()) {
-          Object* setter = LookupCallbackSetterInPrototypes(index);
-          if (setter->IsJSFunction()) {
-            return SetPropertyWithDefinedSetter(JSFunction::cast(setter),
-                                                value);
+          if (SetElementWithCallbackSetterInPrototypes(index, value)) {
+            return value;
           }
         }
         Object* result = dictionary->AtNumberPut(index, value);
@@ -6102,16 +6283,10 @@
         Object* element = dictionary->ValueAt(entry);
         PropertyDetails details = dictionary->DetailsAt(entry);
         if (details.type() == CALLBACKS) {
-          // Only accessors allowed as elements.
-          FixedArray* structure = FixedArray::cast(element);
-          Object* getter = structure->get(kGetterIndex);
-          if (getter->IsJSFunction()) {
-            return GetPropertyWithDefinedGetter(receiver,
-                                                JSFunction::cast(getter));
-          } else {
-            // Getter is not a function.
-            return Heap::undefined_value();
-          }
+          return GetElementWithCallback(receiver,
+                                        element,
+                                        index,
+                                        this);
         }
         return element;
       }
@@ -6259,16 +6434,10 @@
         Object* element = dictionary->ValueAt(entry);
         PropertyDetails details = dictionary->DetailsAt(entry);
         if (details.type() == CALLBACKS) {
-          // Only accessors allowed as elements.
-          FixedArray* structure = FixedArray::cast(element);
-          Object* getter = structure->get(kGetterIndex);
-          if (getter->IsJSFunction()) {
-            return GetPropertyWithDefinedGetter(receiver,
-                                                JSFunction::cast(getter));
-          } else {
-            // Getter is not a function.
-            return Heap::undefined_value();
-          }
+          return GetElementWithCallback(receiver,
+                                        element,
+                                        index,
+                                        this);
         }
         return element;
       }
@@ -7038,15 +7207,9 @@
   }
 
   Object* AsObject() {
-    // If the string is a cons string, attempt to flatten it so that
-    // symbols will most often be flat strings.
-    if (StringShape(string_).IsCons()) {
-      ConsString* cons_string = ConsString::cast(string_);
-      cons_string->TryFlatten();
-      if (cons_string->second()->length() == 0) {
-        string_ = cons_string->first();
-      }
-    }
+    // Attempt to flatten the string, so that symbols will most often
+    // be flat strings.
+    string_ = string_->TryFlattenGetString();
     // Transform string to symbol if possible.
     Map* map = Heap::SymbolMapForString(string_);
     if (map != NULL) {
diff --git a/src/objects.h b/src/objects.h
index 8b114a6..8e89e8f 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1248,6 +1248,8 @@
                          PropertyAttributes attributes);
   Object* LookupAccessor(String* name, bool is_getter);
 
+  Object* DefineAccessor(AccessorInfo* info);
+
   // Used from Object::GetProperty().
   Object* GetPropertyWithFailedAccessCheck(Object* receiver,
                                            LookupResult* result,
@@ -1370,7 +1372,7 @@
   void LookupRealNamedProperty(String* name, LookupResult* result);
   void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
   void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
-  Object* LookupCallbackSetterInPrototypes(uint32_t index);
+  bool SetElementWithCallbackSetterInPrototypes(uint32_t index, Object* value);
   void LookupCallback(String* name, LookupResult* result);
 
   // Returns the number of properties on this object filtering out properties
@@ -1539,6 +1541,14 @@
   Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
 
  private:
+  Object* GetElementWithCallback(Object* receiver,
+                                 Object* structure,
+                                 uint32_t index,
+                                 Object* holder);
+  Object* SetElementWithCallback(Object* structure,
+                                 uint32_t index,
+                                 Object* value,
+                                 JSObject* holder);
   Object* SetElementWithInterceptor(uint32_t index, Object* value);
   Object* SetElementWithoutInterceptor(uint32_t index, Object* value);
 
@@ -1569,6 +1579,13 @@
   // Returns true if most of the elements backing storage is used.
   bool HasDenseElements();
 
+  bool CanSetCallback(String* name);
+  Object* SetElementCallback(uint32_t index,
+                             Object* structure,
+                             PropertyAttributes attributes);
+  Object* SetPropertyCallback(String* name,
+                              Object* structure,
+                              PropertyAttributes attributes);
   Object* DefineGetterSetter(String* name, PropertyAttributes attributes);
 
   void LookupInDescriptor(String* name, LookupResult* result);
@@ -4001,17 +4018,28 @@
   // to this method are not efficient unless the string is flat.
   inline uint16_t Get(int index);
 
-  // Try to flatten the top level ConsString that is hiding behind this
-  // string.  This is a no-op unless the string is a ConsString.  Flatten
-  // mutates the ConsString and might return a failure.
-  Object* SlowTryFlatten(PretenureFlag pretenure);
-
-  // Try to flatten the string.  Checks first inline to see if it is necessary.
-  // Do not handle allocation failures.  After calling TryFlatten, the
-  // string could still be a ConsString, in which case a failure is returned.
-  // Use FlattenString from Handles.cc to be sure to flatten.
+  // Try to flatten the string.  Checks first inline to see if it is
+  // necessary.  Does nothing if the string is not a cons string.
+  // Flattening allocates a sequential string with the same data as
+  // the given string and mutates the cons string to a degenerate
+  // form, where the first component is the new sequential string and
+  // the second component is the empty string.  If allocation fails,
+  // this function returns a failure.  If flattening succeeds, this
+  // function returns the sequential string that is now the first
+  // component of the cons string.
+  //
+  // Degenerate cons strings are handled specially by the garbage
+  // collector (see IsShortcutCandidate).
+  //
+  // Use FlattenString from Handles.cc to flatten even in case an
+  // allocation failure happens.
   inline Object* TryFlatten(PretenureFlag pretenure = NOT_TENURED);
 
+  // Convenience function.  Has exactly the same behavior as
+  // TryFlatten(), except in the case of failure returns the original
+  // string.
+  inline String* TryFlattenGetString(PretenureFlag pretenure = NOT_TENURED);
+
   Vector<const char> ToAsciiVector();
   Vector<const uc16> ToUC16Vector();
 
@@ -4197,6 +4225,11 @@
                                   unsigned max_chars);
 
  private:
+  // Try to flatten the top level ConsString that is hiding behind this
+  // string.  This is a no-op unless the string is a ConsString.  Flatten
+  // mutates the ConsString and might return a failure.
+  Object* SlowTryFlatten(PretenureFlag pretenure);
+
   // Slow case of String::Equals.  This implementation works on any strings
   // but it is most efficient on strings that are almost flat.
   bool SlowEquals(String* other);
diff --git a/src/parser.cc b/src/parser.cc
index c482fdf..bbf71bc 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -5073,12 +5073,12 @@
 
 
 int ScriptDataImpl::Length() {
-  return store_.length();
+  return store_.length() * sizeof(unsigned);
 }
 
 
-unsigned* ScriptDataImpl::Data() {
-  return store_.start();
+const char* ScriptDataImpl::Data() {
+  return reinterpret_cast<const char*>(store_.start());
 }
 
 
diff --git a/src/parser.h b/src/parser.h
index 2e5daf9..89966a6 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -90,7 +90,7 @@
         last_entry_(0) { }
   virtual ~ScriptDataImpl();
   virtual int Length();
-  virtual unsigned* Data();
+  virtual const char* Data();
   virtual bool HasError();
   FunctionEntry GetFunctionEnd(int start);
   bool SanityCheck();
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index b1075cf..72fe088 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -290,7 +290,7 @@
 
   int frames_count = backtrace(addresses.start(), frames_size);
 
-  char** symbols = backtrace_symbols(addresses, frames_count);
+  char** symbols = backtrace_symbols(addresses.start(), frames_count);
   if (symbols == NULL) {
     return kStackWalkError;
   }
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index fca218f..ff1ecb1 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -165,6 +165,28 @@
 }
 
 
+#ifdef V8_TARGET_ARCH_ARM
+// 0xffff0fa0 is the hard coded address of a function provided by
+// the kernel which implements a memory barrier. On older
+// ARM architecture revisions (pre-v6) this may be implemented using
+// a syscall. This address is stable, and in active use (hard coded)
+// by at least glibc-2.7 and the Android C library.
+typedef void (*LinuxKernelMemoryBarrierFunc)(void);
+LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
+    (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
+#endif
+
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)  // don't use on a simulator
+  pLinuxKernelMemoryBarrier();
+#else
+  __asm__ __volatile__("" : : : "memory");
+  // An x86 store acts as a release barrier.
+#endif
+  *ptr = value;
+}
+
+
 const char* OS::LocalTimezone(double time) {
   if (isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 23747c3..47193de 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -39,6 +39,7 @@
 #include <pthread.h>
 #include <semaphore.h>
 #include <signal.h>
+#include <libkern/OSAtomic.h>
 #include <mach/mach.h>
 #include <mach/semaphore.h>
 #include <mach/task.h>
@@ -259,6 +260,12 @@
 }
 
 
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+  OSMemoryBarrier();
+  *ptr = value;
+}
+
+
 const char* OS::LocalTimezone(double time) {
   if (isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 0d9547b..0ae1ecf 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -35,7 +35,8 @@
 #include <sys/stack.h>  // for stack alignment
 #include <unistd.h>  // getpagesize(), usleep()
 #include <sys/mman.h>  // mmap()
-#include <execinfo.h>  // backtrace(), backtrace_symbols()
+#include <ucontext.h>  // walkstack(), getcontext()
+#include <dlfcn.h>     // dladdr
 #include <pthread.h>
 #include <sched.h>  // for sched_yield
 #include <semaphore.h>
@@ -53,6 +54,24 @@
 #include "platform.h"
 
 
+// It seems there is a bug in some Solaris distributions (experienced in
+// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
+// access signbit() despite the availability of other C99 math functions.
+#ifndef signbit
+// Test sign - usually defined in math.h
+int signbit(double x) {
+  // We need to take care of the special case of both positive and negative
+  // versions of zero.
+  if (x == 0) {
+    return fpclass(x) & FP_NZERO;
+  } else {
+    // This won't detect negative NaN but that should be okay since we don't
+    // assume that behavior.
+    return x < 0;
+  }
+}
+#endif  // signbit
+
 namespace v8 {
 namespace internal {
 
@@ -231,31 +250,55 @@
 }
 
 
+struct StackWalker {
+  Vector<OS::StackFrame>& frames;
+  int index;
+};
+
+
+static int StackWalkCallback(uintptr_t pc, int signo, void* data) {
+  struct StackWalker* walker = static_cast<struct StackWalker*>(data);
+  Dl_info info;
+
+  int i = walker->index;
+
+  walker->frames[i].address = reinterpret_cast<void*>(pc);
+
+  // Make sure line termination is in place.
+  walker->frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0';
+
+  Vector<char> text = MutableCStrVector(walker->frames[i].text,
+                                        OS::kStackWalkMaxTextLen);
+
+  if (dladdr(reinterpret_cast<void*>(pc), &info) == 0) {
+    OS::SNPrintF(text, "[0x%p]", pc);
+  } else if ((info.dli_fname != NULL && info.dli_sname != NULL)) {
+    // We have symbol info.
+    OS::SNPrintF(text, "%s'%s+0x%x", info.dli_fname, info.dli_sname, pc);
+  } else {
+    // No local symbol info.
+    OS::SNPrintF(text,
+                 "%s'0x%p [0x%p]",
+                 info.dli_fname,
+                 pc - reinterpret_cast<uintptr_t>(info.dli_fbase),
+                 pc);
+  }
+  walker->index++;
+  return 0;
+}
+
+
 int OS::StackWalk(Vector<OS::StackFrame> frames) {
-  int frames_size = frames.length();
-  ScopedVector<void*> addresses(frames_size);
+  ucontext_t ctx;
+  struct StackWalker walker = { frames, 0 };
 
-  int frames_count = backtrace(addresses.start(), frames_size);
+  if (getcontext(&ctx) < 0) return kStackWalkError;
 
-  char** symbols = backtrace_symbols(addresses.start(), frames_count);
-  if (symbols == NULL) {
+  if (!walkcontext(&ctx, StackWalkCallback, &walker)) {
     return kStackWalkError;
   }
 
-  for (int i = 0; i < frames_count; i++) {
-    frames[i].address = addresses[i];
-    // Format a text representation of the frame based on the information
-    // available.
-    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
-             "%s",
-             symbols[i]);
-    // Make sure line termination is in place.
-    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
-  }
-
-  free(symbols);
-
-  return frames_count;
+  return walker.index;
 }
 
 
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index bee5173..e2d123c 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1340,6 +1340,12 @@
 }
 
 
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+
 bool VirtualMemory::IsReserved() {
   return address_ != NULL;
 }
diff --git a/src/platform.h b/src/platform.h
index 7156441..d63ca5e 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -83,6 +83,14 @@
 
 #endif  // WIN32
 
+
+#ifdef __sun
+# ifndef signbit
+int signbit(double x);
+# endif
+#endif
+
+
 // GCC specific stuff
 #ifdef __GNUC__
 
@@ -269,6 +277,8 @@
   // the platform doesn't care. Guaranteed to be a power of two.
   static int ActivationFrameAlignment();
 
+  static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
+
  private:
   static const int msPerSecond = 1000;
 
diff --git a/src/profile-generator-inl.h b/src/profile-generator-inl.h
index 628fa44..fecb70b 100644
--- a/src/profile-generator-inl.h
+++ b/src/profile-generator-inl.h
@@ -35,17 +35,30 @@
 namespace v8 {
 namespace internal {
 
+CodeEntry::CodeEntry(int security_token_id)
+    : call_uid_(0),
+      tag_(Logger::FUNCTION_TAG),
+      name_prefix_(kEmptyNamePrefix),
+      name_(""),
+      resource_name_(""),
+      line_number_(0),
+      security_token_id_(security_token_id) {
+}
+
+
 CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
                      const char* name_prefix,
                      const char* name,
                      const char* resource_name,
-                     int line_number)
+                     int line_number,
+                     int security_token_id)
     : call_uid_(next_call_uid_++),
       tag_(tag),
       name_prefix_(name_prefix),
       name_(name),
       resource_name_(resource_name),
-      line_number_(line_number) {
+      line_number_(line_number),
+      security_token_id_(security_token_id) {
 }
 
 
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 4c2a330..ad8867c 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -28,6 +28,7 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 #include "v8.h"
+#include "global-handles.h"
 
 #include "profile-generator-inl.h"
 
@@ -37,10 +38,68 @@
 namespace internal {
 
 
+TokenEnumerator::TokenEnumerator()
+    : token_locations_(4),
+      token_removed_(4) {
+}
+
+
+TokenEnumerator::~TokenEnumerator() {
+  for (int i = 0; i < token_locations_.length(); ++i) {
+    if (!token_removed_[i]) {
+      GlobalHandles::ClearWeakness(token_locations_[i]);
+      GlobalHandles::Destroy(token_locations_[i]);
+    }
+  }
+}
+
+
+int TokenEnumerator::GetTokenId(Object* token) {
+  if (token == NULL) return CodeEntry::kNoSecurityToken;
+  for (int i = 0; i < token_locations_.length(); ++i) {
+    if (*token_locations_[i] == token && !token_removed_[i]) return i;
+  }
+  Handle<Object> handle = GlobalHandles::Create(token);
+  // handle.location() points to a memory cell holding a pointer
+  // to a token object in the V8's heap.
+  GlobalHandles::MakeWeak(handle.location(), this, TokenRemovedCallback);
+  token_locations_.Add(handle.location());
+  token_removed_.Add(false);
+  return token_locations_.length() - 1;
+}
+
+
+void TokenEnumerator::TokenRemovedCallback(v8::Persistent<v8::Value> handle,
+                                           void* parameter) {
+  reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
+      Utils::OpenHandle(*handle).location());
+}
+
+
+void TokenEnumerator::TokenRemoved(Object** token_location) {
+  for (int i = 0; i < token_locations_.length(); ++i) {
+    if (token_locations_[i] == token_location && !token_removed_[i]) {
+      token_removed_[i] = true;
+      return;
+    }
+  }
+}
+
+
 const char* CodeEntry::kEmptyNamePrefix = "";
 unsigned CodeEntry::next_call_uid_ = 1;
 
 
+void CodeEntry::CopyData(const CodeEntry& source) {
+  call_uid_ = source.call_uid_;
+  tag_ = source.tag_;
+  name_prefix_ = source.name_prefix_;
+  name_ = source.name_;
+  resource_name_ = source.resource_name_;
+  line_number_ = source.line_number_;
+}
+
+
 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
   HashMap::Entry* map_entry =
       children_.Lookup(entry, CodeEntryHash(entry), false);
@@ -73,11 +132,12 @@
 
 
 void ProfileNode::Print(int indent) {
-  OS::Print("%5u %5u %*c %s%s",
+  OS::Print("%5u %5u %*c %s%s [%d]",
             total_ticks_, self_ticks_,
             indent, ' ',
             entry_->name_prefix(),
-            entry_->name());
+            entry_->name(),
+            entry_->security_token_id());
   if (entry_->resource_name()[0] != '\0')
     OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
   OS::Print("\n");
@@ -93,6 +153,8 @@
 
 class DeleteNodesCallback {
  public:
+  void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
+
   void AfterAllChildrenTraversed(ProfileNode* node) {
     delete node;
   }
@@ -104,14 +166,19 @@
 
 
 ProfileTree::ProfileTree()
-    : root_entry_(Logger::FUNCTION_TAG, "", "(root)", "", 0),
+    : root_entry_(Logger::FUNCTION_TAG,
+                  "",
+                  "(root)",
+                  "",
+                  0,
+                  CodeEntry::kNoSecurityToken),
       root_(new ProfileNode(this, &root_entry_)) {
 }
 
 
 ProfileTree::~ProfileTree() {
   DeleteNodesCallback cb;
-  TraverseDepthFirstPostOrder(&cb);
+  TraverseDepthFirst(&cb);
 }
 
 
@@ -141,6 +208,70 @@
 }
 
 
+namespace {
+
+struct NodesPair {
+  NodesPair(ProfileNode* src, ProfileNode* dst)
+      : src(src), dst(dst) { }
+  ProfileNode* src;
+  ProfileNode* dst;
+};
+
+
+class FilteredCloneCallback {
+ public:
+  explicit FilteredCloneCallback(ProfileNode* dst_root, int security_token_id)
+      : stack_(10),
+        security_token_id_(security_token_id) {
+    stack_.Add(NodesPair(NULL, dst_root));
+  }
+
+  void BeforeTraversingChild(ProfileNode* parent, ProfileNode* child) {
+    if (IsTokenAcceptable(child->entry()->security_token_id(),
+                          parent->entry()->security_token_id())) {
+      ProfileNode* clone = stack_.last().dst->FindOrAddChild(child->entry());
+      clone->IncreaseSelfTicks(child->self_ticks());
+      stack_.Add(NodesPair(child, clone));
+    } else {
+      // Attribute ticks to parent node.
+      stack_.last().dst->IncreaseSelfTicks(child->self_ticks());
+    }
+  }
+
+  void AfterAllChildrenTraversed(ProfileNode* parent) { }
+
+  void AfterChildTraversed(ProfileNode*, ProfileNode* child) {
+    if (stack_.last().src == child) {
+      stack_.RemoveLast();
+    }
+  }
+
+ private:
+  bool IsTokenAcceptable(int token, int parent_token) {
+    if (token == CodeEntry::kNoSecurityToken
+        || token == security_token_id_) return true;
+    if (token == CodeEntry::kInheritsSecurityToken) {
+      ASSERT(parent_token != CodeEntry::kInheritsSecurityToken);
+      return parent_token == CodeEntry::kNoSecurityToken
+          || parent_token == security_token_id_;
+    }
+    return false;
+  }
+
+  List<NodesPair> stack_;
+  int security_token_id_;
+};
+
+}  // namespace
+
+void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) {
+  ms_to_ticks_scale_ = src->ms_to_ticks_scale_;
+  FilteredCloneCallback cb(root_, security_token_id);
+  src->TraverseDepthFirst(&cb);
+  CalculateTotalTicks();
+}
+
+
 void ProfileTree::SetTickRatePerMs(double ticks_per_ms) {
   ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0;
 }
@@ -170,12 +301,13 @@
 
 // Non-recursive implementation of a depth-first post-order tree traversal.
 template <typename Callback>
-void ProfileTree::TraverseDepthFirstPostOrder(Callback* callback) {
+void ProfileTree::TraverseDepthFirst(Callback* callback) {
   List<Position> stack(10);
   stack.Add(Position(root_));
-  do {
+  while (stack.length() > 0) {
     Position& current = stack.last();
     if (current.has_current_child()) {
+      callback->BeforeTraversingChild(current.node, current.current_child());
       stack.Add(Position(current.current_child()));
     } else {
       callback->AfterAllChildrenTraversed(current.node);
@@ -183,11 +315,11 @@
         Position& parent = stack[stack.length() - 2];
         callback->AfterChildTraversed(parent.node, current.node);
         parent.next_child();
-        // Remove child from the stack.
-        stack.RemoveLast();
       }
+      // Remove child from the stack.
+      stack.RemoveLast();
     }
-  } while (stack.length() > 1 || stack.last().has_current_child());
+  }
 }
 
 
@@ -195,6 +327,8 @@
 
 class CalculateTotalTicksCallback {
  public:
+  void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
+
   void AfterAllChildrenTraversed(ProfileNode* node) {
     node->IncreaseTotalTicks(node->self_ticks());
   }
@@ -209,7 +343,7 @@
 
 void ProfileTree::CalculateTotalTicks() {
   CalculateTotalTicksCallback cb;
-  TraverseDepthFirstPostOrder(&cb);
+  TraverseDepthFirst(&cb);
 }
 
 
@@ -238,6 +372,15 @@
 }
 
 
+CpuProfile* CpuProfile::FilteredClone(int security_token_id) {
+  ASSERT(security_token_id != CodeEntry::kNoSecurityToken);
+  CpuProfile* clone = new CpuProfile(title_, uid_);
+  clone->top_down_.FilteredClone(&top_down_, security_token_id);
+  clone->bottom_up_.FilteredClone(&bottom_up_, security_token_id);
+  return clone;
+}
+
+
 void CpuProfile::ShortPrint() {
   OS::Print("top down ");
   top_down_.ShortPrint();
@@ -259,12 +402,13 @@
     CodeMap::CodeEntryInfo(NULL, 0);
 
 
-void CodeMap::AddAlias(Address alias, Address addr) {
+void CodeMap::AddAlias(Address start, CodeEntry* entry, Address code_start) {
   CodeTree::Locator locator;
-  if (tree_.Find(addr, &locator)) {
-    const CodeEntryInfo& entry_info = locator.value();
-    tree_.Insert(alias, &locator);
-    locator.set_value(entry_info);
+  if (tree_.Find(code_start, &locator)) {
+    const CodeEntryInfo& code_info = locator.value();
+    entry->CopyData(*code_info.entry);
+    tree_.Insert(start, &locator);
+    locator.set_value(CodeEntryInfo(entry, code_info.size));
   }
 }
 
@@ -295,8 +439,10 @@
 
 CpuProfilesCollection::CpuProfilesCollection()
     : function_and_resource_names_(StringsMatch),
-      profiles_uids_(CpuProfilesMatch),
+      profiles_uids_(UidsMatch),
       current_profiles_semaphore_(OS::CreateSemaphore(1)) {
+  // Create list of unabridged profiles.
+  profiles_by_token_.Add(new List<CpuProfile*>());
 }
 
 
@@ -313,11 +459,15 @@
   delete *profile_ptr;
 }
 
+static void DeleteProfilesList(List<CpuProfile*>** list_ptr) {
+  (*list_ptr)->Iterate(DeleteCpuProfile);
+  delete *list_ptr;
+}
 
 CpuProfilesCollection::~CpuProfilesCollection() {
   delete current_profiles_semaphore_;
   current_profiles_.Iterate(DeleteCpuProfile);
-  profiles_.Iterate(DeleteCpuProfile);
+  profiles_by_token_.Iterate(DeleteProfilesList);
   code_entries_.Iterate(DeleteCodeEntry);
   args_count_names_.Iterate(DeleteArgsCountName);
   for (HashMap::Entry* p = function_and_resource_names_.Start();
@@ -349,7 +499,8 @@
 }
 
 
-CpuProfile* CpuProfilesCollection::StopProfiling(const char* title,
+CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
+                                                 const char* title,
                                                  double actual_sampling_rate) {
   const int title_len = StrLength(title);
   CpuProfile* profile = NULL;
@@ -365,29 +516,89 @@
   if (profile != NULL) {
     profile->CalculateTotalTicks();
     profile->SetActualSamplingRate(actual_sampling_rate);
-    profiles_.Add(profile);
+    List<CpuProfile*>* unabridged_list =
+        profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
+    unabridged_list->Add(profile);
     HashMap::Entry* entry =
         profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()),
                               static_cast<uint32_t>(profile->uid()),
                               true);
     ASSERT(entry->value == NULL);
-    entry->value = profile;
+    entry->value = reinterpret_cast<void*>(unabridged_list->length() - 1);
+    return GetProfile(security_token_id, profile->uid());
   }
-  return profile;
+  return NULL;
 }
 
 
-CpuProfile* CpuProfilesCollection::StopProfiling(String* title,
+CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id,
+                                                 String* title,
                                                  double actual_sampling_rate) {
-  return StopProfiling(GetName(title), actual_sampling_rate);
+  return StopProfiling(security_token_id, GetName(title), actual_sampling_rate);
 }
 
 
-CpuProfile* CpuProfilesCollection::GetProfile(unsigned uid) {
+CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
+                                              unsigned uid) {
   HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
                                                 static_cast<uint32_t>(uid),
                                                 false);
-  return entry != NULL ? reinterpret_cast<CpuProfile*>(entry->value) : NULL;
+  int index;
+  if (entry != NULL) {
+    index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+  } else {
+    return NULL;
+  }
+  List<CpuProfile*>* unabridged_list =
+      profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
+  if (security_token_id == CodeEntry::kNoSecurityToken) {
+    return unabridged_list->at(index);
+  }
+  List<CpuProfile*>* list = GetProfilesList(security_token_id);
+  if (list->at(index) == NULL) {
+      list->at(index) =
+          unabridged_list->at(index)->FilteredClone(security_token_id);
+  }
+  return list->at(index);
+}
+
+
+int CpuProfilesCollection::TokenToIndex(int security_token_id) {
+  ASSERT(CodeEntry::kNoSecurityToken == -1);
+  return security_token_id + 1;  // kNoSecurityToken -> 0, 0 -> 1, ...
+}
+
+
+List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
+    int security_token_id) {
+  const int index = TokenToIndex(security_token_id);
+  profiles_by_token_.AddBlock(NULL, profiles_by_token_.length() - index + 1);
+  List<CpuProfile*>* unabridged_list =
+      profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
+  const int current_count = unabridged_list->length();
+  if (profiles_by_token_[index] == NULL) {
+    profiles_by_token_[index] = new List<CpuProfile*>(current_count);
+  }
+  List<CpuProfile*>* list = profiles_by_token_[index];
+  list->AddBlock(NULL, current_count - list->length());
+  return list;
+}
+
+
+List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) {
+  List<CpuProfile*>* unabridged_list =
+      profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
+  if (security_token_id == CodeEntry::kNoSecurityToken) {
+    return unabridged_list;
+  }
+  List<CpuProfile*>* list = GetProfilesList(security_token_id);
+  const int current_count = unabridged_list->length();
+  for (int i = 0; i < current_count; ++i) {
+    if (list->at(i) == NULL) {
+      list->at(i) = unabridged_list->at(i)->FilteredClone(security_token_id);
+    }
+  }
+  return list;
 }
 
 
@@ -399,7 +610,8 @@
                                    CodeEntry::kEmptyNamePrefix,
                                    GetFunctionName(name),
                                    GetName(resource_name),
-                                   line_number);
+                                   line_number,
+                                   CodeEntry::kNoSecurityToken);
   code_entries_.Add(entry);
   return entry;
 }
@@ -411,7 +623,8 @@
                                    CodeEntry::kEmptyNamePrefix,
                                    GetFunctionName(name),
                                    "",
-                                   v8::CpuProfileNode::kNoLineNumberInfo);
+                                   v8::CpuProfileNode::kNoLineNumberInfo,
+                                   CodeEntry::kNoSecurityToken);
   code_entries_.Add(entry);
   return entry;
 }
@@ -424,7 +637,8 @@
                                    name_prefix,
                                    GetName(name),
                                    "",
-                                   v8::CpuProfileNode::kNoLineNumberInfo);
+                                   v8::CpuProfileNode::kNoLineNumberInfo,
+                                   CodeEntry::kInheritsSecurityToken);
   code_entries_.Add(entry);
   return entry;
 }
@@ -436,7 +650,15 @@
                                    "args_count: ",
                                    GetName(args_count),
                                    "",
-                                   v8::CpuProfileNode::kNoLineNumberInfo);
+                                   v8::CpuProfileNode::kNoLineNumberInfo,
+                                   CodeEntry::kInheritsSecurityToken);
+  code_entries_.Add(entry);
+  return entry;
+}
+
+
+CodeEntry* CpuProfilesCollection::NewCodeEntry(int security_token_id) {
+  CodeEntry* entry = new CodeEntry(security_token_id);
   code_entries_.Add(entry);
   return entry;
 }
@@ -547,8 +769,13 @@
         *entry = NULL;
       } else {
         CodeEntry* pc_entry = *entries.start();
-        if (pc_entry == NULL || pc_entry->is_js_function())
+        if (pc_entry == NULL) {
           *entry = NULL;
+        } else if (pc_entry->is_js_function()) {
+          // Use function entry in favor of pc entry, as function
+          // entry has security token.
+          *entries.start() = NULL;
+        }
       }
       entry++;
     }
diff --git a/src/profile-generator.h b/src/profile-generator.h
index bd5b0cd..7830787 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -35,14 +35,34 @@
 namespace v8 {
 namespace internal {
 
+class TokenEnumerator {
+ public:
+  TokenEnumerator();
+  ~TokenEnumerator();
+  int GetTokenId(Object* token);
+
+ private:
+  static void TokenRemovedCallback(v8::Persistent<v8::Value> handle,
+                                   void* parameter);
+  void TokenRemoved(Object** token_location);
+
+  List<Object**> token_locations_;
+  List<bool> token_removed_;
+
+  friend class TokenEnumeratorTester;
+};
+
+
 class CodeEntry {
  public:
+  explicit INLINE(CodeEntry(int security_token_id));
   // CodeEntry doesn't own name strings, just references them.
   INLINE(CodeEntry(Logger::LogEventsAndTags tag,
                    const char* name_prefix,
                    const char* name,
                    const char* resource_name,
-                   int line_number));
+                   int line_number,
+                   int security_token_id));
 
   INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); }
   INLINE(const char* name_prefix() const) { return name_prefix_; }
@@ -51,18 +71,24 @@
   INLINE(const char* resource_name() const) { return resource_name_; }
   INLINE(int line_number() const) { return line_number_; }
   INLINE(unsigned call_uid() const) { return call_uid_; }
+  INLINE(int security_token_id() const) { return security_token_id_; }
 
   INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
 
+  void CopyData(const CodeEntry& source);
+
   static const char* kEmptyNamePrefix;
+  static const int kNoSecurityToken = -1;
+  static const int kInheritsSecurityToken = -2;
 
  private:
-  const unsigned call_uid_;
+  unsigned call_uid_;
   Logger::LogEventsAndTags tag_;
   const char* name_prefix_;
   const char* name_;
   const char* resource_name_;
   int line_number_;
+  int security_token_id_;
 
   static unsigned next_call_uid_;
 
@@ -79,6 +105,7 @@
   ProfileNode* FindChild(CodeEntry* entry);
   ProfileNode* FindOrAddChild(CodeEntry* entry);
   INLINE(void IncrementSelfTicks()) { ++self_ticks_; }
+  INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; }
   INLINE(void IncreaseTotalTicks(unsigned amount)) { total_ticks_ += amount; }
 
   INLINE(CodeEntry* entry() const) { return entry_; }
@@ -119,6 +146,7 @@
   void AddPathFromEnd(const Vector<CodeEntry*>& path);
   void AddPathFromStart(const Vector<CodeEntry*>& path);
   void CalculateTotalTicks();
+  void FilteredClone(ProfileTree* src, int security_token_id);
 
   double TicksToMillis(unsigned ticks) const {
     return ticks * ms_to_ticks_scale_;
@@ -133,7 +161,7 @@
 
  private:
   template <typename Callback>
-  void TraverseDepthFirstPostOrder(Callback* callback);
+  void TraverseDepthFirst(Callback* callback);
 
   CodeEntry root_entry_;
   ProfileNode* root_;
@@ -152,6 +180,7 @@
   void AddPath(const Vector<CodeEntry*>& path);
   void CalculateTotalTicks();
   void SetActualSamplingRate(double actual_sampling_rate);
+  CpuProfile* FilteredClone(int security_token_id);
 
   INLINE(const char* title() const) { return title_; }
   INLINE(unsigned uid() const) { return uid_; }
@@ -179,7 +208,7 @@
   INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
   INLINE(void MoveCode(Address from, Address to));
   INLINE(void DeleteCode(Address addr));
-  void AddAlias(Address alias, Address addr);
+  void AddAlias(Address start, CodeEntry* entry, Address code_start);
   CodeEntry* FindEntry(Address addr);
 
   void Print();
@@ -221,10 +250,14 @@
 
   bool StartProfiling(const char* title, unsigned uid);
   bool StartProfiling(String* title, unsigned uid);
-  CpuProfile* StopProfiling(const char* title, double actual_sampling_rate);
-  CpuProfile* StopProfiling(String* title, double actual_sampling_rate);
-  INLINE(List<CpuProfile*>* profiles()) { return &profiles_; }
-  CpuProfile* GetProfile(unsigned uid);
+  CpuProfile* StopProfiling(int security_token_id,
+                            const char* title,
+                            double actual_sampling_rate);
+  CpuProfile* StopProfiling(int security_token_id,
+                            String* title,
+                            double actual_sampling_rate);
+  List<CpuProfile*>* Profiles(int security_token_id);
+  CpuProfile* GetProfile(int security_token_id, unsigned uid);
   inline bool is_last_profile();
 
   CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
@@ -233,6 +266,7 @@
   CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
                           const char* name_prefix, String* name);
   CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count);
+  CodeEntry* NewCodeEntry(int security_token_id);
 
   // Called from profile generator thread.
   void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path);
@@ -242,13 +276,15 @@
   INLINE(const char* GetFunctionName(const char* name));
   const char* GetName(String* name);
   const char* GetName(int args_count);
+  List<CpuProfile*>* GetProfilesList(int security_token_id);
+  int TokenToIndex(int security_token_id);
 
   INLINE(static bool StringsMatch(void* key1, void* key2)) {
     return strcmp(reinterpret_cast<char*>(key1),
                   reinterpret_cast<char*>(key2)) == 0;
   }
 
-  INLINE(static bool CpuProfilesMatch(void* key1, void* key2)) {
+  INLINE(static bool UidsMatch(void* key1, void* key2)) {
     return key1 == key2;
   }
 
@@ -257,8 +293,8 @@
   // args_count -> char*
   List<char*> args_count_names_;
   List<CodeEntry*> code_entries_;
-  List<CpuProfile*> profiles_;
-  // uid -> CpuProfile*
+  List<List<CpuProfile*>* > profiles_by_token_;
+  // uid -> index
   HashMap profiles_uids_;
 
   // Accessed by VM thread and profile generator thread.
@@ -332,6 +368,10 @@
     return profiles_->NewCodeEntry(tag, args_count);
   }
 
+  INLINE(CodeEntry* NewCodeEntry(int security_token_id)) {
+    return profiles_->NewCodeEntry(security_token_id);
+  }
+
   void RecordTickSample(const TickSample& sample);
 
   INLINE(CodeMap* code_map()) { return &code_map_; }
diff --git a/src/runtime.js b/src/runtime.js
index 8e3883f..3e4d473 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -559,20 +559,15 @@
 // ES5, section 9.12
 function SameValue(x, y) {
   if (typeof x != typeof y) return false;
-  if (IS_NULL_OR_UNDEFINED(x)) return true;
   if (IS_NUMBER(x)) {
     if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
-    // x is +0 and y is -0 or vice versa
-    if (x === 0 && y === 0 && !%_IsSmi(x) && !%_IsSmi(y) &&
-        ((1 / x < 0 && 1 / y > 0) || (1 / x > 0 && 1 / y < 0))) {
+    // x is +0 and y is -0 or vice versa.
+    if (x === 0 && y === 0 && (1 / x) != (1 / y)) {
       return false;
     }
-    return x == y;
+    return x === y;
   }
-  if (IS_STRING(x)) return %StringEquals(x, y);
-  if (IS_BOOLEAN(x))return %NumberEquals(%ToNumber(x),%ToNumber(y));
-
-  return %_ObjectEquals(x, y);
+  return x === y
 }
 
 
diff --git a/src/serialize.cc b/src/serialize.cc
index dcaa101..06c6df7 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -660,27 +660,164 @@
 }
 
 
-#define ONE_CASE_PER_SPACE(base_tag)   \
-  case (base_tag) + NEW_SPACE:         /* NOLINT */ \
-  case (base_tag) + OLD_POINTER_SPACE: /* NOLINT */ \
-  case (base_tag) + OLD_DATA_SPACE:    /* NOLINT */ \
-  case (base_tag) + CODE_SPACE:        /* NOLINT */ \
-  case (base_tag) + MAP_SPACE:         /* NOLINT */ \
-  case (base_tag) + CELL_SPACE:        /* NOLINT */ \
-  case (base_tag) + kLargeData:        /* NOLINT */ \
-  case (base_tag) + kLargeCode:        /* NOLINT */ \
-  case (base_tag) + kLargeFixedArray:  /* NOLINT */
+// This macro is always used with a constant argument so it should all fold
+// away to almost nothing in the generated code.  It might be nicer to do this
+// with the ternary operator but there are type issues with that.
+#define ASSIGN_DEST_SPACE(space_number)                                        \
+  Space* dest_space;                                                           \
+  if (space_number == NEW_SPACE) {                                             \
+    dest_space = Heap::new_space();                                            \
+  } else if (space_number == OLD_POINTER_SPACE) {                              \
+    dest_space = Heap::old_pointer_space();                                    \
+  } else if (space_number == OLD_DATA_SPACE) {                                 \
+    dest_space = Heap::old_data_space();                                       \
+  } else if (space_number == CODE_SPACE) {                                     \
+    dest_space = Heap::code_space();                                           \
+  } else if (space_number == MAP_SPACE) {                                      \
+    dest_space = Heap::map_space();                                            \
+  } else if (space_number == CELL_SPACE) {                                     \
+    dest_space = Heap::cell_space();                                           \
+  } else {                                                                     \
+    ASSERT(space_number >= LO_SPACE);                                          \
+    dest_space = Heap::lo_space();                                             \
+  }
+
+
+static const int kUnknownOffsetFromStart = -1;
 
 
 void Deserializer::ReadChunk(Object** current,
                              Object** limit,
-                             int space,
+                             int source_space,
                              Address address) {
   while (current < limit) {
     int data = source_->Get();
     switch (data) {
+#define CASE_STATEMENT(where, how, within, space_number)                       \
+      case where + how + within + space_number:                                \
+      ASSERT((where & ~kPointedToMask) == 0);                                  \
+      ASSERT((how & ~kHowToCodeMask) == 0);                                    \
+      ASSERT((within & ~kWhereToPointMask) == 0);                              \
+      ASSERT((space_number & ~kSpaceMask) == 0);
+
+#define CASE_BODY(where, how, within, space_number_if_any, offset_from_start)  \
+      {                                                                        \
+        bool emit_write_barrier = false;                                       \
+        bool current_was_incremented = false;                                  \
+        int space_number =  space_number_if_any == kAnyOldSpace ?              \
+                            (data & kSpaceMask) : space_number_if_any;         \
+        if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
+          ASSIGN_DEST_SPACE(space_number)                                      \
+          ReadObject(space_number, dest_space, current);                       \
+          emit_write_barrier =                                                 \
+            (space_number == NEW_SPACE && source_space != NEW_SPACE);          \
+        } else {                                                               \
+          Object* new_object = NULL;  /* May not be a real Object pointer. */  \
+          if (where == kNewObject) {                                           \
+            ASSIGN_DEST_SPACE(space_number)                                    \
+            ReadObject(space_number, dest_space, &new_object);                 \
+          } else if (where == kRootArray) {                                    \
+            int root_id = source_->GetInt();                                   \
+            new_object = Heap::roots_address()[root_id];                       \
+          } else if (where == kPartialSnapshotCache) {                         \
+            int cache_index = source_->GetInt();                               \
+            new_object = partial_snapshot_cache_[cache_index];                 \
+          } else if (where == kExternalReference) {                            \
+            int reference_id = source_->GetInt();                              \
+            Address address =                                                  \
+                external_reference_decoder_->Decode(reference_id);             \
+            new_object = reinterpret_cast<Object*>(address);                   \
+          } else if (where == kBackref) {                                      \
+            emit_write_barrier =                                               \
+              (space_number == NEW_SPACE && source_space != NEW_SPACE);        \
+            new_object = GetAddressFromEnd(data & kSpaceMask);                 \
+          } else {                                                             \
+            ASSERT(where == kFromStart);                                       \
+            if (offset_from_start == kUnknownOffsetFromStart) {                \
+              emit_write_barrier =                                             \
+                (space_number == NEW_SPACE && source_space != NEW_SPACE);      \
+              new_object = GetAddressFromStart(data & kSpaceMask);             \
+            } else {                                                           \
+              Address object_address = pages_[space_number][0] +               \
+                  (offset_from_start << kObjectAlignmentBits);                 \
+              new_object = HeapObject::FromAddress(object_address);            \
+            }                                                                  \
+          }                                                                    \
+          if (within == kFirstInstruction) {                                   \
+            Code* new_code_object = reinterpret_cast<Code*>(new_object);       \
+            new_object = reinterpret_cast<Object*>(                            \
+                new_code_object->instruction_start());                         \
+          }                                                                    \
+          if (how == kFromCode) {                                              \
+            Address location_of_branch_data =                                  \
+                reinterpret_cast<Address>(current);                            \
+            Assembler::set_target_at(location_of_branch_data,                  \
+                                     reinterpret_cast<Address>(new_object));   \
+            if (within == kFirstInstruction) {                                 \
+              location_of_branch_data += Assembler::kCallTargetSize;           \
+              current = reinterpret_cast<Object**>(location_of_branch_data);   \
+              current_was_incremented = true;                                  \
+            }                                                                  \
+          } else {                                                             \
+            *current = new_object;                                             \
+          }                                                                    \
+        }                                                                      \
+        if (emit_write_barrier) {                                              \
+          Heap::RecordWrite(address, static_cast<int>(                         \
+              reinterpret_cast<Address>(current) - address));                  \
+        }                                                                      \
+        if (!current_was_incremented) {                                        \
+          current++;   /* Increment current if it wasn't done above. */        \
+        }                                                                      \
+        break;                                                                 \
+      }                                                                        \
+
+// This generates a case and a body for each space.  The large object spaces are
+// very rare in snapshots so they are grouped in one body.
+#define ONE_PER_SPACE(where, how, within)                                      \
+  CASE_STATEMENT(where, how, within, NEW_SPACE)                                \
+  CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart)            \
+  CASE_STATEMENT(where, how, within, OLD_DATA_SPACE)                           \
+  CASE_BODY(where, how, within, OLD_DATA_SPACE, kUnknownOffsetFromStart)       \
+  CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE)                        \
+  CASE_BODY(where, how, within, OLD_POINTER_SPACE, kUnknownOffsetFromStart)    \
+  CASE_STATEMENT(where, how, within, CODE_SPACE)                               \
+  CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart)           \
+  CASE_STATEMENT(where, how, within, CELL_SPACE)                               \
+  CASE_BODY(where, how, within, CELL_SPACE, kUnknownOffsetFromStart)           \
+  CASE_STATEMENT(where, how, within, MAP_SPACE)                                \
+  CASE_BODY(where, how, within, MAP_SPACE, kUnknownOffsetFromStart)            \
+  CASE_STATEMENT(where, how, within, kLargeData)                               \
+  CASE_STATEMENT(where, how, within, kLargeCode)                               \
+  CASE_STATEMENT(where, how, within, kLargeFixedArray)                         \
+  CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
+
+// This generates a case and a body for the new space (which has to do extra
+// write barrier handling) and handles the other spaces with 8 fall-through
+// cases and one body.
+#define ALL_SPACES(where, how, within)                                         \
+  CASE_STATEMENT(where, how, within, NEW_SPACE)                                \
+  CASE_BODY(where, how, within, NEW_SPACE, kUnknownOffsetFromStart)            \
+  CASE_STATEMENT(where, how, within, OLD_DATA_SPACE)                           \
+  CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE)                        \
+  CASE_STATEMENT(where, how, within, CODE_SPACE)                               \
+  CASE_STATEMENT(where, how, within, CELL_SPACE)                               \
+  CASE_STATEMENT(where, how, within, MAP_SPACE)                                \
+  CASE_STATEMENT(where, how, within, kLargeData)                               \
+  CASE_STATEMENT(where, how, within, kLargeCode)                               \
+  CASE_STATEMENT(where, how, within, kLargeFixedArray)                         \
+  CASE_BODY(where, how, within, kAnyOldSpace, kUnknownOffsetFromStart)
+
+#define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number,                    \
+                                       space_number,                           \
+                                       offset_from_start)                      \
+  CASE_STATEMENT(kFromStart, kPlain, kStartOfObject, pseudo_space_number)      \
+  CASE_BODY(kFromStart, kPlain, kStartOfObject, space_number, offset_from_start)
+
+      // We generate 15 cases and bodies that process special tags that combine
+      // the raw data tag and the length into one byte.
 #define RAW_CASE(index, size)                                      \
-      case RAW_DATA_SERIALIZATION + index: {                       \
+      case kRawData + index: {                                     \
         byte* raw_data_out = reinterpret_cast<byte*>(current);     \
         source_->CopyRaw(raw_data_out, size);                      \
         current = reinterpret_cast<Object**>(raw_data_out + size); \
@@ -688,144 +825,77 @@
       }
       COMMON_RAW_LENGTHS(RAW_CASE)
 #undef RAW_CASE
-      case RAW_DATA_SERIALIZATION: {
+
+      // Deserialize a chunk of raw data that doesn't have one of the popular
+      // lengths.
+      case kRawData: {
         int size = source_->GetInt();
         byte* raw_data_out = reinterpret_cast<byte*>(current);
         source_->CopyRaw(raw_data_out, size);
         current = reinterpret_cast<Object**>(raw_data_out + size);
         break;
       }
-      case OBJECT_SERIALIZATION + NEW_SPACE: {
-        ReadObject(NEW_SPACE, Heap::new_space(), current);
-        if (space != NEW_SPACE) {
-          Heap::RecordWrite(address, static_cast<int>(
-              reinterpret_cast<Address>(current) - address));
-        }
-        current++;
-        break;
-      }
-      case OBJECT_SERIALIZATION + OLD_DATA_SPACE:
-        ReadObject(OLD_DATA_SPACE, Heap::old_data_space(), current++);
-        break;
-      case OBJECT_SERIALIZATION + OLD_POINTER_SPACE:
-        ReadObject(OLD_POINTER_SPACE, Heap::old_pointer_space(), current++);
-        break;
-      case OBJECT_SERIALIZATION + MAP_SPACE:
-        ReadObject(MAP_SPACE, Heap::map_space(), current++);
-        break;
-      case OBJECT_SERIALIZATION + CODE_SPACE:
-        ReadObject(CODE_SPACE, Heap::code_space(), current++);
-        break;
-      case OBJECT_SERIALIZATION + CELL_SPACE:
-        ReadObject(CELL_SPACE, Heap::cell_space(), current++);
-        break;
-      case OBJECT_SERIALIZATION + kLargeData:
-        ReadObject(kLargeData, Heap::lo_space(), current++);
-        break;
-      case OBJECT_SERIALIZATION + kLargeCode:
-        ReadObject(kLargeCode, Heap::lo_space(), current++);
-        break;
-      case OBJECT_SERIALIZATION + kLargeFixedArray:
-        ReadObject(kLargeFixedArray, Heap::lo_space(), current++);
-        break;
-      case CODE_OBJECT_SERIALIZATION + kLargeCode: {
-        Object* new_code_object = NULL;
-        ReadObject(kLargeCode, Heap::lo_space(), &new_code_object);
-        Code* code_object = reinterpret_cast<Code*>(new_code_object);
-        // Setting a branch/call to another code object from code.
-        Address location_of_branch_data = reinterpret_cast<Address>(current);
-        Assembler::set_target_at(location_of_branch_data,
-                                 code_object->instruction_start());
-        location_of_branch_data += Assembler::kCallTargetSize;
-        current = reinterpret_cast<Object**>(location_of_branch_data);
-        break;
-      }
-      case CODE_OBJECT_SERIALIZATION + CODE_SPACE: {
-        Object* new_code_object = NULL;
-        ReadObject(CODE_SPACE, Heap::code_space(), &new_code_object);
-        Code* code_object = reinterpret_cast<Code*>(new_code_object);
-        // Setting a branch/call to another code object from code.
-        Address location_of_branch_data = reinterpret_cast<Address>(current);
-        Assembler::set_target_at(location_of_branch_data,
-                                 code_object->instruction_start());
-        location_of_branch_data += Assembler::kCallTargetSize;
-        current = reinterpret_cast<Object**>(location_of_branch_data);
-        break;
-      }
-      ONE_CASE_PER_SPACE(BACKREF_SERIALIZATION) {
-        // Write a backreference to an object we unpacked earlier.
-        int backref_space = (data & kSpaceMask);
-        if (backref_space == NEW_SPACE && space != NEW_SPACE) {
-          Heap::RecordWrite(address, static_cast<int>(
-              reinterpret_cast<Address>(current) - address));
-        }
-        *current++ = GetAddressFromEnd(backref_space);
-        break;
-      }
-      ONE_CASE_PER_SPACE(REFERENCE_SERIALIZATION) {
-        // Write a reference to an object we unpacked earlier.
-        int reference_space = (data & kSpaceMask);
-        if (reference_space == NEW_SPACE && space != NEW_SPACE) {
-          Heap::RecordWrite(address, static_cast<int>(
-              reinterpret_cast<Address>(current) - address));
-        }
-        *current++ = GetAddressFromStart(reference_space);
-        break;
-      }
-#define COMMON_REFS_CASE(index, reference_space, address)                      \
-      case REFERENCE_SERIALIZATION + index: {                                  \
-        ASSERT(SpaceIsPaged(reference_space));                                 \
-        Address object_address =                                               \
-            pages_[reference_space][0] + (address << kObjectAlignmentBits);    \
-        *current++ = HeapObject::FromAddress(object_address);                  \
-        break;                                                                 \
-      }
-      COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
-#undef COMMON_REFS_CASE
-      ONE_CASE_PER_SPACE(CODE_BACKREF_SERIALIZATION) {
-        int backref_space = (data & kSpaceMask);
-        // Can't use Code::cast because heap is not set up yet and assertions
-        // will fail.
-        Code* code_object =
-            reinterpret_cast<Code*>(GetAddressFromEnd(backref_space));
-        // Setting a branch/call to previously decoded code object from code.
-        Address location_of_branch_data = reinterpret_cast<Address>(current);
-        Assembler::set_target_at(location_of_branch_data,
-                                 code_object->instruction_start());
-        location_of_branch_data += Assembler::kCallTargetSize;
-        current = reinterpret_cast<Object**>(location_of_branch_data);
-        break;
-      }
-      ONE_CASE_PER_SPACE(CODE_REFERENCE_SERIALIZATION) {
-        int backref_space = (data & kSpaceMask);
-        // Can't use Code::cast because heap is not set up yet and assertions
-        // will fail.
-        Code* code_object =
-            reinterpret_cast<Code*>(GetAddressFromStart(backref_space));
-        // Setting a branch/call to previously decoded code object from code.
-        Address location_of_branch_data = reinterpret_cast<Address>(current);
-        Assembler::set_target_at(location_of_branch_data,
-                                 code_object->instruction_start());
-        location_of_branch_data += Assembler::kCallTargetSize;
-        current = reinterpret_cast<Object**>(location_of_branch_data);
-        break;
-      }
-      case EXTERNAL_REFERENCE_SERIALIZATION: {
-        int reference_id = source_->GetInt();
-        Address address = external_reference_decoder_->Decode(reference_id);
-        *current++ = reinterpret_cast<Object*>(address);
-        break;
-      }
-      case EXTERNAL_BRANCH_TARGET_SERIALIZATION: {
-        int reference_id = source_->GetInt();
-        Address address = external_reference_decoder_->Decode(reference_id);
-        Address location_of_branch_data = reinterpret_cast<Address>(current);
-        Assembler::set_external_target_at(location_of_branch_data, address);
-        location_of_branch_data += Assembler::kExternalTargetSize;
-        current = reinterpret_cast<Object**>(location_of_branch_data);
-        break;
-      }
-      case START_NEW_PAGE_SERIALIZATION: {
+
+      // Deserialize a new object and write a pointer to it to the current
+      // object.
+      ONE_PER_SPACE(kNewObject, kPlain, kStartOfObject)
+      // Deserialize a new code object and write a pointer to its first
+      // instruction to the current code object.
+      ONE_PER_SPACE(kNewObject, kFromCode, kFirstInstruction)
+      // Find a recently deserialized object using its offset from the current
+      // allocation point and write a pointer to it to the current object.
+      ALL_SPACES(kBackref, kPlain, kStartOfObject)
+      // Find a recently deserialized code object using its offset from the
+      // current allocation point and write a pointer to its first instruction
+      // to the current code object.
+      ALL_SPACES(kBackref, kFromCode, kFirstInstruction)
+      // Find an already deserialized object using its offset from the start
+      // and write a pointer to it to the current object.
+      ALL_SPACES(kFromStart, kPlain, kStartOfObject)
+      // Find an already deserialized code object using its offset from the
+      // start and write a pointer to its first instruction to the current code
+      // object.
+      ALL_SPACES(kFromStart, kFromCode, kFirstInstruction)
+      // Find an already deserialized object at one of the predetermined popular
+      // offsets from the start and write a pointer to it in the current object.
+      COMMON_REFERENCE_PATTERNS(EMIT_COMMON_REFERENCE_PATTERNS)
+      // Find an object in the roots array and write a pointer to it to the
+      // current object.
+      CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
+      CASE_BODY(kRootArray, kPlain, kStartOfObject, 0, kUnknownOffsetFromStart)
+      // Find an object in the partial snapshots cache and write a pointer to it
+      // to the current object.
+      CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
+      CASE_BODY(kPartialSnapshotCache,
+                kPlain,
+                kStartOfObject,
+                0,
+                kUnknownOffsetFromStart)
+      // Find an external reference and write a pointer to it to the current
+      // object.
+      CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
+      CASE_BODY(kExternalReference,
+                kPlain,
+                kStartOfObject,
+                0,
+                kUnknownOffsetFromStart)
+      // Find an external reference and write a pointer to it in the current
+      // code object.
+      CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
+      CASE_BODY(kExternalReference,
+                kFromCode,
+                kStartOfObject,
+                0,
+                kUnknownOffsetFromStart)
+
+#undef CASE_STATEMENT
+#undef CASE_BODY
+#undef ONE_PER_SPACE
+#undef ALL_SPACES
+#undef EMIT_COMMON_REFERENCE_PATTERNS
+#undef ASSIGN_DEST_SPACE
+
+      case kNewPage: {
         int space = source_->Get();
         pages_[space].Add(last_object_address_);
         if (space == CODE_SPACE) {
@@ -833,7 +903,8 @@
         }
         break;
       }
-      case NATIVES_STRING_RESOURCE: {
+
+      case kNativesStringResource: {
         int index = source_->Get();
         Vector<const char> source_vector = Natives::GetScriptSource(index);
         NativesExternalStringResource* resource =
@@ -841,21 +912,13 @@
         *current++ = reinterpret_cast<Object*>(resource);
         break;
       }
-      case ROOT_SERIALIZATION: {
-        int root_id = source_->GetInt();
-        *current++ = Heap::roots_address()[root_id];
-        break;
-      }
-      case PARTIAL_SNAPSHOT_CACHE_ENTRY: {
-        int cache_index = source_->GetInt();
-        *current++ = partial_snapshot_cache_[cache_index];
-        break;
-      }
-      case SYNCHRONIZE: {
+
+      case kSynchronize: {
         // If we get here then that indicates that you have a mismatch between
         // the number of GC roots when serializing and deserializing.
         UNREACHABLE();
       }
+
       default:
         UNREACHABLE();
     }
@@ -880,7 +943,7 @@
   int data = source_->Get();
   // If this assert fails then that indicates that you have a mismatch between
   // the number of GC roots when serializing and deserializing.
-  ASSERT_EQ(SYNCHRONIZE, data);
+  ASSERT_EQ(kSynchronize, data);
   do {
     int character = source_->Get();
     if (character == 0) break;
@@ -895,7 +958,7 @@
 
 
 void Serializer::Synchronize(const char* tag) {
-  sink_->Put(SYNCHRONIZE, tag);
+  sink_->Put(kSynchronize, tag);
   int character;
   do {
     character = *tag++;
@@ -957,13 +1020,13 @@
 void Serializer::VisitPointers(Object** start, Object** end) {
   for (Object** current = start; current < end; current++) {
     if ((*current)->IsSmi()) {
-      sink_->Put(RAW_DATA_SERIALIZATION, "RawData");
+      sink_->Put(kRawData, "RawData");
       sink_->PutInt(kPointerSize, "length");
       for (int i = 0; i < kPointerSize; i++) {
         sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
       }
     } else {
-      SerializeObject(*current, TAGGED_REPRESENTATION);
+      SerializeObject(*current, kPlain, kStartOfObject);
     }
   }
 }
@@ -1033,7 +1096,8 @@
 void Serializer::SerializeReferenceToPreviousObject(
     int space,
     int address,
-    ReferenceRepresentation reference_representation) {
+    HowToCode how_to_code,
+    WhereToPoint where_to_point) {
   int offset = CurrentAllocationAddress(space) - address;
   bool from_start = true;
   if (SpaceIsPaged(space)) {
@@ -1054,43 +1118,30 @@
   // If we are actually dealing with real offsets (and not a numbering of
   // all objects) then we should shift out the bits that are always 0.
   if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
-  // On some architectures references between code objects are encoded
-  // specially (as relative offsets).  Such references have their own
-  // special tags to simplify the deserializer.
-  if (reference_representation == CODE_TARGET_REPRESENTATION) {
-    if (from_start) {
-      sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
-      sink_->PutInt(address, "address");
-    } else {
-      sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
+  if (from_start) {
+#define COMMON_REFS_CASE(pseudo_space, actual_space, offset)                   \
+    if (space == actual_space && address == offset &&                          \
+        how_to_code == kPlain && where_to_point == kStartOfObject) {           \
+      sink_->Put(kFromStart + how_to_code + where_to_point +                   \
+                 pseudo_space, "RefSer");                                      \
+    } else  /* NOLINT */
+    COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+#undef COMMON_REFS_CASE
+    {  /* NOLINT */
+      sink_->Put(kFromStart + how_to_code + where_to_point + space, "RefSer");
       sink_->PutInt(address, "address");
     }
   } else {
-    // Regular absolute references.
-    CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
-    if (from_start) {
-      // There are some common offsets that have their own specialized encoding.
-#define COMMON_REFS_CASE(tag, common_space, common_offset)               \
-      if (space == common_space && address == common_offset) {           \
-        sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer");      \
-      } else  /* NOLINT */
-      COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
-#undef COMMON_REFS_CASE
-      {  /* NOLINT */
-        sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
-        sink_->PutInt(address, "address");
-      }
-    } else {
-      sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
-      sink_->PutInt(address, "address");
-    }
+    sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
+    sink_->PutInt(address, "address");
   }
 }
 
 
 void StartupSerializer::SerializeObject(
     Object* o,
-    ReferenceRepresentation reference_representation) {
+    HowToCode how_to_code,
+    WhereToPoint where_to_point) {
   CHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
 
@@ -1099,13 +1150,15 @@
     int address = address_mapper_.MappedTo(heap_object);
     SerializeReferenceToPreviousObject(space,
                                        address,
-                                       reference_representation);
+                                       how_to_code,
+                                       where_to_point);
   } else {
     // Object has not yet been serialized.  Serialize it here.
     ObjectSerializer object_serializer(this,
                                        heap_object,
                                        sink_,
-                                       reference_representation);
+                                       how_to_code,
+                                       where_to_point);
     object_serializer.Serialize();
   }
 }
@@ -1115,7 +1168,7 @@
   for (int i = partial_snapshot_cache_length_;
        i < kPartialSnapshotCacheCapacity;
        i++) {
-    sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+    sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization");
     sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
   }
   Heap::IterateWeakRoots(this, VISIT_ALL);
@@ -1124,20 +1177,22 @@
 
 void PartialSerializer::SerializeObject(
     Object* o,
-    ReferenceRepresentation reference_representation) {
+    HowToCode how_to_code,
+    WhereToPoint where_to_point) {
   CHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
 
   int root_index;
   if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
-    sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
     sink_->PutInt(root_index, "root_index");
     return;
   }
 
   if (ShouldBeInThePartialSnapshotCache(heap_object)) {
     int cache_index = PartialSnapshotCacheIndex(heap_object);
-    sink_->Put(PARTIAL_SNAPSHOT_CACHE_ENTRY, "PartialSnapshotCache");
+    sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
+               "PartialSnapshotCache");
     sink_->PutInt(cache_index, "partial_snapshot_cache_index");
     return;
   }
@@ -1155,13 +1210,15 @@
     int address = address_mapper_.MappedTo(heap_object);
     SerializeReferenceToPreviousObject(space,
                                        address,
-                                       reference_representation);
+                                       how_to_code,
+                                       where_to_point);
   } else {
     // Object has not yet been serialized.  Serialize it here.
     ObjectSerializer serializer(this,
                                 heap_object,
                                 sink_,
-                                reference_representation);
+                                how_to_code,
+                                where_to_point);
     serializer.Serialize();
   }
 }
@@ -1171,12 +1228,8 @@
   int space = Serializer::SpaceOfObject(object_);
   int size = object_->Size();
 
-  if (reference_representation_ == TAGGED_REPRESENTATION) {
-    sink_->Put(OBJECT_SERIALIZATION + space, "ObjectSerialization");
-  } else {
-    CHECK_EQ(CODE_TARGET_REPRESENTATION, reference_representation_);
-    sink_->Put(CODE_OBJECT_SERIALIZATION + space, "ObjectSerialization");
-  }
+  sink_->Put(kNewObject + reference_representation_ + space,
+             "ObjectSerialization");
   sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
 
   LOG(SnapshotPositionEvent(object_->address(), sink_->Position()));
@@ -1186,12 +1239,12 @@
   int offset = serializer_->Allocate(space, size, &start_new_page);
   serializer_->address_mapper()->AddMapping(object_, offset);
   if (start_new_page) {
-    sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage");
+    sink_->Put(kNewPage, "NewPage");
     sink_->PutSection(space, "NewPageSpace");
   }
 
   // Serialize the map (first word of the object).
-  serializer_->SerializeObject(object_->map(), TAGGED_REPRESENTATION);
+  serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject);
 
   // Serialize the rest of the object.
   CHECK_EQ(0, bytes_processed_so_far_);
@@ -1209,7 +1262,7 @@
     if (current < end) OutputRawData(reinterpret_cast<Address>(current));
 
     while (current < end && !(*current)->IsSmi()) {
-      serializer_->SerializeObject(*current, TAGGED_REPRESENTATION);
+      serializer_->SerializeObject(*current, kPlain, kStartOfObject);
       bytes_processed_so_far_ += kPointerSize;
       current++;
     }
@@ -1223,7 +1276,7 @@
   OutputRawData(references_start);
 
   for (Address* current = start; current < end; current++) {
-    sink_->Put(EXTERNAL_REFERENCE_SERIALIZATION, "ExternalReference");
+    sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
     int reference_id = serializer_->EncodeExternalReference(*current);
     sink_->PutInt(reference_id, "reference id");
   }
@@ -1237,9 +1290,16 @@
   Address target = rinfo->target_address();
   uint32_t encoding = serializer_->EncodeExternalReference(target);
   CHECK(target == NULL ? encoding == 0 : encoding != 0);
-  sink_->Put(EXTERNAL_BRANCH_TARGET_SERIALIZATION, "ExternalReference");
+  int representation;
+  // Can't use a ternary operator because of gcc.
+  if (rinfo->IsCodedSpecially()) {
+    representation = kStartOfObject + kFromCode;
+  } else {
+    representation = kStartOfObject + kPlain;
+  }
+  sink_->Put(kExternalReference + representation, "ExternalReference");
   sink_->PutInt(encoding, "reference id");
-  bytes_processed_so_far_ += Assembler::kExternalTargetSize;
+  bytes_processed_so_far_ += rinfo->target_address_size();
 }
 
 
@@ -1248,8 +1308,8 @@
   Address target_start = rinfo->target_address_address();
   OutputRawData(target_start);
   Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-  serializer_->SerializeObject(target, CODE_TARGET_REPRESENTATION);
-  bytes_processed_so_far_ += Assembler::kCallTargetSize;
+  serializer_->SerializeObject(target, kFromCode, kFirstInstruction);
+  bytes_processed_so_far_ += rinfo->target_address_size();
 }
 
 
@@ -1264,7 +1324,7 @@
       typedef v8::String::ExternalAsciiStringResource Resource;
       Resource* resource = string->resource();
       if (resource == *resource_pointer) {
-        sink_->Put(NATIVES_STRING_RESOURCE, "NativesStringResource");
+        sink_->Put(kNativesStringResource, "NativesStringResource");
         sink_->PutSection(i, "NativesStringResourceEnd");
         bytes_processed_so_far_ += sizeof(resource);
         return;
@@ -1288,12 +1348,12 @@
     Address base = object_start + bytes_processed_so_far_;
 #define RAW_CASE(index, length)                                                \
     if (skipped == length) {                                                   \
-      sink_->PutSection(RAW_DATA_SERIALIZATION + index, "RawDataFixed");       \
+      sink_->PutSection(kRawData + index, "RawDataFixed");                     \
     } else  /* NOLINT */
     COMMON_RAW_LENGTHS(RAW_CASE)
 #undef RAW_CASE
     {  /* NOLINT */
-      sink_->Put(RAW_DATA_SERIALIZATION, "RawData");
+      sink_->Put(kRawData, "RawData");
       sink_->PutInt(skipped, "length");
     }
     for (int i = 0; i < skipped; i++) {
diff --git a/src/serialize.h b/src/serialize.h
index 279bc58..6a318f1 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -137,17 +137,23 @@
 };
 
 
-// It is very common to have a reference to the object at word 10 in space 2,
-// the object at word 5 in space 2 and the object at word 28 in space 4.  This
-// only works for objects in the first page of a space.
-#define COMMON_REFERENCE_PATTERNS(f)                              \
-  f(kNumberOfSpaces, 2, 10)                                       \
-  f(kNumberOfSpaces + 1, 2, 5)                                    \
-  f(kNumberOfSpaces + 2, 4, 28)                                   \
-  f(kNumberOfSpaces + 3, 2, 21)                                   \
-  f(kNumberOfSpaces + 4, 2, 98)                                   \
-  f(kNumberOfSpaces + 5, 2, 67)                                   \
-  f(kNumberOfSpaces + 6, 4, 132)
+// It is very common to have a reference to objects at certain offsets in the
+// heap.  These offsets have been determined experimentally.  We code
+// references to such objects in a single byte that encodes the way the pointer
+// is written (only plain pointers allowed), the space number and the offset.
+// This only works for objects in the first page of a space.  Don't use this for
+// things in newspace since it bypasses the write barrier.
+
+static const int k64 = (sizeof(uintptr_t) - 4) / 4;
+
+#define COMMON_REFERENCE_PATTERNS(f)                               \
+  f(kNumberOfSpaces, 2, (11 - k64))                                \
+  f((kNumberOfSpaces + 1), 2, 0)                                   \
+  f((kNumberOfSpaces + 2), 2, (142 - 16 * k64))                    \
+  f((kNumberOfSpaces + 3), 2, (74 - 15 * k64))                     \
+  f((kNumberOfSpaces + 4), 2, 5)                                   \
+  f((kNumberOfSpaces + 5), 1, 135)                                 \
+  f((kNumberOfSpaces + 6), 2, (228 - 39 * k64))
 
 #define COMMON_RAW_LENGTHS(f)        \
   f(1, 1)  \
@@ -175,37 +181,63 @@
   static void SetSnapshotCacheSize(int size);
 
  protected:
-  enum DataType {
-    RAW_DATA_SERIALIZATION = 0,
-    // And 15 common raw lengths.
-    OBJECT_SERIALIZATION = 16,
-    // One variant per space.
-    CODE_OBJECT_SERIALIZATION = 25,
-    // One per space (only code spaces in use).
-    EXTERNAL_REFERENCE_SERIALIZATION = 34,
-    EXTERNAL_BRANCH_TARGET_SERIALIZATION = 35,
-    SYNCHRONIZE = 36,
-    START_NEW_PAGE_SERIALIZATION = 37,
-    NATIVES_STRING_RESOURCE = 38,
-    ROOT_SERIALIZATION = 39,
-    PARTIAL_SNAPSHOT_CACHE_ENTRY = 40,
-    // Free: 41-47.
-    BACKREF_SERIALIZATION = 48,
-    // One per space, must be kSpaceMask aligned.
-    // Free: 57-63.
-    REFERENCE_SERIALIZATION = 64,
-    // One per space and common references.  Must be kSpaceMask aligned.
-    CODE_BACKREF_SERIALIZATION = 80,
-    // One per space, must be kSpaceMask aligned.
-    // Free: 89-95.
-    CODE_REFERENCE_SERIALIZATION = 96
-    // One per space, must be kSpaceMask aligned.
-    // Free: 105-255.
+  // Where the pointed-to object can be found:
+  enum Where {
+    kNewObject = 0,                 // Object is next in snapshot.
+    // 1-8                             One per space.
+    kRootArray = 0x9,               // Object is found in root array.
+    kPartialSnapshotCache = 0xa,    // Object is in the cache.
+    kExternalReference = 0xb,       // Pointer to an external reference.
+    // 0xc-0xf                         Free.
+    kBackref = 0x10,                 // Object is described relative to end.
+    // 0x11-0x18                       One per space.
+    // 0x19-0x1f                       Common backref offsets.
+    kFromStart = 0x20,              // Object is described relative to start.
+    // 0x21-0x28                       One per space.
+    // 0x29-0x2f                       Free.
+    // 0x30-0x3f                       Used by misc tags below.
+    kPointedToMask = 0x3f
   };
+
+  // How to code the pointer to the object.
+  enum HowToCode {
+    kPlain = 0,                          // Straight pointer.
+    // What this means depends on the architecture:
+    kFromCode = 0x40,                    // A pointer inlined in code.
+    kHowToCodeMask = 0x40
+  };
+
+  // Where to point within the object.
+  enum WhereToPoint {
+    kStartOfObject = 0,
+    kFirstInstruction = 0x80,
+    kWhereToPointMask = 0x80
+  };
+
+  // Misc.
+  // Raw data to be copied from the snapshot.
+  static const int kRawData = 0x30;
+  // Some common raw lengths: 0x31-0x3f
+  // A tag emitted at strategic points in the snapshot to delineate sections.
+  // If the deserializer does not find these at the expected moments then it
+  // is an indication that the snapshot and the VM do not fit together.
+  // Examine the build process for architecture, version or configuration
+  // mismatches.
+  static const int kSynchronize = 0x70;
+  // Used for the source code of the natives, which is in the executable, but
+  // is referred to from external strings in the snapshot.
+  static const int kNativesStringResource = 0x71;
+  static const int kNewPage = 0x72;
+  // 0x73-0x7f                            Free.
+  // 0xb0-0xbf                            Free.
+  // 0xf0-0xff                            Free.
+
+
   static const int kLargeData = LAST_SPACE;
   static const int kLargeCode = kLargeData + 1;
   static const int kLargeFixedArray = kLargeCode + 1;
   static const int kNumberOfSpaces = kLargeFixedArray + 1;
+  static const int kAnyOldSpace = -1;
 
   // A bitmask for getting the space out of an instruction.
   static const int kSpaceMask = 15;
@@ -396,10 +428,6 @@
 #endif
 
  protected:
-  enum ReferenceRepresentation {
-    TAGGED_REPRESENTATION,      // A tagged object reference.
-    CODE_TARGET_REPRESENTATION  // A reference to first instruction in target.
-  };
   static const int kInvalidRootIndex = -1;
   virtual int RootIndex(HeapObject* heap_object) = 0;
   virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
@@ -409,11 +437,12 @@
     ObjectSerializer(Serializer* serializer,
                      Object* o,
                      SnapshotByteSink* sink,
-                     ReferenceRepresentation representation)
+                     HowToCode how_to_code,
+                     WhereToPoint where_to_point)
       : serializer_(serializer),
         object_(HeapObject::cast(o)),
         sink_(sink),
-        reference_representation_(representation),
+        reference_representation_(how_to_code + where_to_point),
         bytes_processed_so_far_(0) { }
     void Serialize();
     void VisitPointers(Object** start, Object** end);
@@ -435,16 +464,18 @@
     Serializer* serializer_;
     HeapObject* object_;
     SnapshotByteSink* sink_;
-    ReferenceRepresentation reference_representation_;
+    int reference_representation_;
     int bytes_processed_so_far_;
   };
 
   virtual void SerializeObject(Object* o,
-                               ReferenceRepresentation representation) = 0;
+                               HowToCode how_to_code,
+                               WhereToPoint where_to_point) = 0;
   void SerializeReferenceToPreviousObject(
       int space,
       int address,
-      ReferenceRepresentation reference_representation);
+      HowToCode how_to_code,
+      WhereToPoint where_to_point);
   void InitializeAllocators();
   // This will return the space for an object.  If the object is in large
   // object space it may return kLargeCode or kLargeFixedArray in order
@@ -492,7 +523,8 @@
   // Serialize the objects reachable from a single object pointer.
   virtual void Serialize(Object** o);
   virtual void SerializeObject(Object* o,
-                               ReferenceRepresentation representation);
+                               HowToCode how_to_code,
+                               WhereToPoint where_to_point);
 
  protected:
   virtual int RootIndex(HeapObject* o);
@@ -528,7 +560,8 @@
   // 3) Weak references (eg the symbol table).
   virtual void SerializeStrongReferences();
   virtual void SerializeObject(Object* o,
-                               ReferenceRepresentation representation);
+                               HowToCode how_to_code,
+                               WhereToPoint where_to_point);
   void SerializeWeakReferences();
   void Serialize() {
     SerializeStrongReferences();
diff --git a/src/third_party/dtoa/dtoa.c b/src/third_party/dtoa/dtoa.c
index 8917d9d..178b3d1 100644
--- a/src/third_party/dtoa/dtoa.c
+++ b/src/third_party/dtoa/dtoa.c
@@ -164,8 +164,12 @@
  */
 
 #ifndef Long
+#if __LP64__
+#define Long int
+#else
 #define Long long
 #endif
+#endif
 #ifndef ULong
 typedef unsigned Long ULong;
 #endif
diff --git a/src/top.cc b/src/top.cc
index 87dc1f6..516ec67 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -370,8 +370,7 @@
   v8::HandleScope scope;
   // Ensure no negative values.
   int limit = Max(frame_limit, 0);
-  Handle<JSArray> stackTrace = Factory::NewJSArray(frame_limit);
-  FixedArray* frames = FixedArray::cast(stackTrace->elements());
+  Handle<JSArray> stack_trace = Factory::NewJSArray(frame_limit);
 
   Handle<String> column_key =  Factory::LookupAsciiSymbol("column");
   Handle<String> line_key =  Factory::LookupAsciiSymbol("lineNumber");
@@ -438,13 +437,13 @@
       SetProperty(stackFrame, constructor_key, is_constructor, NONE);
     }
 
-    frames->set(frames_seen, *stackFrame);
+    FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
     frames_seen++;
     it.Advance();
   }
 
-  stackTrace->set_length(Smi::FromInt(frames_seen));
-  return scope.Close(Utils::StackTraceToLocal(stackTrace));
+  stack_trace->set_length(Smi::FromInt(frames_seen));
+  return scope.Close(Utils::StackTraceToLocal(stack_trace));
 }
 
 
diff --git a/src/unbound-queue-inl.h b/src/unbound-queue-inl.h
new file mode 100644
index 0000000..ff5d833
--- /dev/null
+++ b/src/unbound-queue-inl.h
@@ -0,0 +1,87 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UNBOUND_QUEUE_INL_H_
+#define V8_UNBOUND_QUEUE_INL_H_
+
+#include "unbound-queue.h"
+
+namespace v8 {
+namespace internal {
+
+template<typename Record>
+struct UnboundQueue<Record>::Node: public Malloced {
+  explicit Node(const Record& value)
+      : value(value), next(NULL) {
+  }
+
+  Record value;
+  Node* next;
+};
+
+
+template<typename Record>
+UnboundQueue<Record>::UnboundQueue() {
+  first_ = new Node(Record());
+  divider_ = last_ = reinterpret_cast<AtomicWord>(first_);
+}
+
+
+template<typename Record>
+UnboundQueue<Record>::~UnboundQueue() {
+  while (first_ != NULL) DeleteFirst();
+}
+
+
+template<typename Record>
+void UnboundQueue<Record>::DeleteFirst() {
+  Node* tmp = first_;
+  first_ = tmp->next;
+  delete tmp;
+}
+
+
+template<typename Record>
+void UnboundQueue<Record>::Dequeue(Record* rec) {
+  ASSERT(divider_ != last_);
+  Node* next = reinterpret_cast<Node*>(divider_)->next;
+  *rec = next->value;
+  OS::ReleaseStore(&divider_, reinterpret_cast<AtomicWord>(next));
+}
+
+
+template<typename Record>
+void UnboundQueue<Record>::Enqueue(const Record& rec) {
+  Node*& next = reinterpret_cast<Node*>(last_)->next;
+  next = new Node(rec);
+  OS::ReleaseStore(&last_, reinterpret_cast<AtomicWord>(next));
+  while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_UNBOUND_QUEUE_INL_H_
diff --git a/src/unbound-queue.h b/src/unbound-queue.h
new file mode 100644
index 0000000..7bc314b
--- /dev/null
+++ b/src/unbound-queue.h
@@ -0,0 +1,66 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UNBOUND_QUEUE_
+#define V8_UNBOUND_QUEUE_
+
+namespace v8 {
+namespace internal {
+
+
+// Lock-free unbound queue for small records.  Intended for
+// transferring small records between a Single producer and a Single
+// consumer. Doesn't have restrictions on the number of queued
+// elements, so producer never blocks.  Implemented after Herb
+// Sutter's article:
+// http://www.ddj.com/high-performance-computing/210604448
+template<typename Record>
+class UnboundQueue BASE_EMBEDDED {
+ public:
+  inline UnboundQueue();
+  inline ~UnboundQueue();
+
+  INLINE(void Dequeue(Record* rec));
+  INLINE(void Enqueue(const Record& rec));
+  INLINE(bool IsEmpty()) { return divider_ == last_; }
+
+ private:
+  INLINE(void DeleteFirst());
+
+  struct Node;
+
+  Node* first_;
+  AtomicWord divider_;  // Node*
+  AtomicWord last_;     // Node*
+
+  DISALLOW_COPY_AND_ASSIGN(UnboundQueue);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_UNBOUND_QUEUE_
diff --git a/src/v8natives.js b/src/v8natives.js
index 531bd0e..ed392e2 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -434,6 +434,11 @@
 }
 
 
+PropertyDescriptor.prototype.hasWritable = function() {
+  return this.hasWritable_;
+}
+
+
 PropertyDescriptor.prototype.setConfigurable = function(configurable) {
   this.configurable_ = configurable;
   this.hasConfigurable_ = true;
@@ -537,6 +542,22 @@
     throw MakeTypeError("define_disallowed", ["defineProperty"]);
 
   if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
+      // Step 5 and 6
+     if ((!desc.hasEnumerable() || 
+          SameValue(desc.isEnumerable() && current.isEnumerable())) &&
+         (!desc.hasConfigurable() || 
+          SameValue(desc.isConfigurable(), current.isConfigurable())) &&
+         (!desc.hasWritable() || 
+          SameValue(desc.isWritable(), current.isWritable())) &&
+         (!desc.hasValue() ||
+          SameValue(desc.getValue(), current.getValue())) &&
+         (!desc.hasGetter() ||
+          SameValue(desc.getGet(), current.getGet())) &&
+         (!desc.hasSetter() ||
+          SameValue(desc.getSet(), current.getSet()))) {
+       return true;
+     }
+
     // Step 7
     if (desc.isConfigurable() ||  desc.isEnumerable() != current.isEnumerable())
       throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
@@ -583,7 +604,13 @@
     flag |= DONT_DELETE;
 
   if (IsDataDescriptor(desc) || IsGenericDescriptor(desc)) {
-    flag |= desc.isWritable() ? 0 : READ_ONLY;
+    if (desc.hasWritable()) {
+      flag |= desc.isWritable() ? 0 : READ_ONLY;
+    } else if (!IS_UNDEFINED(current)) {
+      flag |= current.isWritable() ? 0 : READ_ONLY;
+    } else {
+      flag |= READ_ONLY;
+    }
     %DefineOrRedefineDataProperty(obj, p, desc.getValue(), flag);
   } else {
     if (desc.hasGetter() && IS_FUNCTION(desc.getGet())) {
@@ -673,8 +700,9 @@
 // ES5 section 15.2.3.6.
 function ObjectDefineProperty(obj, p, attributes) {
   if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
-      !IS_UNDETECTABLE(obj))
+      !IS_UNDETECTABLE(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
+  }
   var name = ToString(p);
   var desc = ToPropertyDescriptor(attributes);
   DefineOwnProperty(obj, name, desc, true);
diff --git a/src/version.cc b/src/version.cc
index 1bf543d..adeee59 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      10
+#define BUILD_NUMBER      12
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index be7cfe0..4c69510 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -29,6 +29,7 @@
 #define V8_X64_ASSEMBLER_X64_INL_H_
 
 #include "cpu.h"
+#include "debug.h"
 #include "memory.h"
 
 namespace v8 {
@@ -229,6 +230,15 @@
 }
 
 
+int RelocInfo::target_address_size() {
+  if (IsCodedSpecially()) {
+    return Assembler::kCallTargetSize;
+  } else {
+    return Assembler::kExternalTargetSize;
+  }
+}
+
+
 void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   if (IsCodeTarget(rmode_)) {
@@ -320,6 +330,27 @@
       pc_ + Assembler::kPatchReturnSequenceAddressOffset);
 }
 
+
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    visitor->VisitPointer(target_object_address());
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    visitor->VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  } else if (Debug::has_break_points() &&
+             RelocInfo::IsJSReturn(mode) &&
+             IsPatchedReturnSequence()) {
+    visitor->VisitDebugTarget(this);
+#endif
+  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // Implementation of Operand
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index fcfa8d0..9f26496 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "macro-assembler.h"
 #include "serialize.h"
 
@@ -237,6 +239,52 @@
 }
 
 
+Operand::Operand(const Operand& operand, int32_t offset) {
+  ASSERT(operand.len_ >= 1);
+  // Operand encodes REX ModR/M [SIB] [Disp].
+  byte modrm = operand.buf_[0];
+  ASSERT(modrm < 0xC0);  // Disallow mode 3 (register target).
+  bool has_sib = ((modrm & 0x07) == 0x04);
+  byte mode = modrm & 0xC0;
+  int disp_offset = has_sib ? 2 : 1;
+  int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
+  // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
+  // displacement.
+  bool is_baseless = (mode == 0) && (base_reg == 0x05);  // No base or RIP base.
+  int32_t disp_value = 0;
+  if (mode == 0x80 || is_baseless) {
+    // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
+    disp_value = *reinterpret_cast<const int32_t*>(&operand.buf_[disp_offset]);
+  } else if (mode == 0x40) {
+    // Mode 1: Byte displacement.
+    disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
+  }
+
+  // Write new operand with same registers, but with modified displacement.
+  ASSERT(offset >= 0 ? disp_value + offset > disp_value
+                     : disp_value + offset < disp_value);  // No overflow.
+  disp_value += offset;
+  rex_ = operand.rex_;
+  if (!is_int8(disp_value) || is_baseless) {
+    // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
+    buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
+    len_ = disp_offset + 4;
+    Memory::int32_at(&buf_[disp_offset]) = disp_value;
+  } else if (disp_value != 0 || (base_reg == 0x05)) {
+    // Need 8 bits of displacement.
+    buf_[0] = (modrm & 0x3f) | 0x40;  // Mode 1.
+    len_ = disp_offset + 1;
+    buf_[disp_offset] = static_cast<byte>(disp_value);
+  } else {
+    // Need no displacement.
+    buf_[0] = (modrm & 0x3f);  // Mode 0.
+    len_ = disp_offset;
+  }
+  if (has_sib) {
+    buf_[1] = operand.buf_[1];
+  }
+}
+
 // -----------------------------------------------------------------------------
 // Implementation of Assembler.
 
@@ -458,19 +506,36 @@
 void Assembler::arithmetic_op(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  emit_rex_64(reg, rm_reg);
-  emit(opcode);
-  emit_modrm(reg, rm_reg);
+  ASSERT((opcode & 0xC6) == 2);
+  if (rm_reg.low_bits() == 4)  {  // Forces SIB byte.
+    // Swap reg and rm_reg and change opcode operand order.
+    emit_rex_64(rm_reg, reg);
+    emit(opcode ^ 0x02);
+    emit_modrm(rm_reg, reg);
+  } else {
+    emit_rex_64(reg, rm_reg);
+    emit(opcode);
+    emit_modrm(reg, rm_reg);
+  }
 }
 
 
 void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  emit(0x66);
-  emit_optional_rex_32(reg, rm_reg);
-  emit(opcode);
-  emit_modrm(reg, rm_reg);
+  ASSERT((opcode & 0xC6) == 2);
+  if (rm_reg.low_bits() == 4) {  // Forces SIB byte.
+    // Swap reg and rm_reg and change opcode operand order.
+    emit(0x66);
+    emit_optional_rex_32(rm_reg, reg);
+    emit(opcode ^ 0x02);
+    emit_modrm(rm_reg, reg);
+  } else {
+    emit(0x66);
+    emit_optional_rex_32(reg, rm_reg);
+    emit(opcode);
+    emit_modrm(reg, rm_reg);
+  }
 }
 
 
@@ -489,9 +554,17 @@
 void Assembler::arithmetic_op_32(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  emit_optional_rex_32(reg, rm_reg);
-  emit(opcode);
-  emit_modrm(reg, rm_reg);
+  ASSERT((opcode & 0xC6) == 2);
+  if (rm_reg.low_bits() == 4) {  // Forces SIB byte.
+    // Swap reg and rm_reg and change opcode operand order.
+    emit_optional_rex_32(rm_reg, reg);
+    emit(opcode ^ 0x02);  // E.g. 0x03 -> 0x01 for ADD.
+    emit_modrm(rm_reg, reg);
+  } else {
+    emit_optional_rex_32(reg, rm_reg);
+    emit(opcode);
+    emit_modrm(reg, rm_reg);
+  }
 }
 
 
@@ -1290,9 +1363,15 @@
 void Assembler::movl(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  emit_optional_rex_32(dst, src);
-  emit(0x8B);
-  emit_modrm(dst, src);
+  if (src.low_bits() == 4) {
+    emit_optional_rex_32(src, dst);
+    emit(0x89);
+    emit_modrm(src, dst);
+  } else {
+    emit_optional_rex_32(dst, src);
+    emit(0x8B);
+    emit_modrm(dst, src);
+  }
 }
 
 
@@ -1337,9 +1416,15 @@
 void Assembler::movq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  emit_rex_64(dst, src);
-  emit(0x8B);
-  emit_modrm(dst, src);
+  if (src.low_bits() == 4) {
+    emit_rex_64(src, dst);
+    emit(0x89);
+    emit_modrm(src, dst);
+  } else {
+    emit_rex_64(dst, src);
+    emit(0x8B);
+    emit_modrm(dst, src);
+  }
 }
 
 
@@ -1860,6 +1945,10 @@
     Register other = src.is(rax) ? dst : src;
     emit_rex_64(other);
     emit(0x90 | other.low_bits());
+  } else if (dst.low_bits() == 4) {
+    emit_rex_64(dst, src);
+    emit(0x87);
+    emit_modrm(dst, src);
   } else {
     emit_rex_64(src, dst);
     emit(0x87);
@@ -1885,12 +1974,18 @@
 void Assembler::testb(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  if (dst.code() > 3 || src.code() > 3) {
-    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
-    emit_rex_32(dst, src);
+  if (src.low_bits() == 4) {
+    emit_rex_32(src, dst);
+    emit(0x84);
+    emit_modrm(src, dst);
+  } else {
+    if (dst.code() > 3 || src.code() > 3) {
+      // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+      emit_rex_32(dst, src);
+    }
+    emit(0x84);
+    emit_modrm(dst, src);
   }
-  emit(0x84);
-  emit_modrm(dst, src);
 }
 
 
@@ -1941,9 +2036,15 @@
 void Assembler::testl(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  emit_optional_rex_32(dst, src);
-  emit(0x85);
-  emit_modrm(dst, src);
+  if (src.low_bits() == 4) {
+    emit_optional_rex_32(src, dst);
+    emit(0x85);
+    emit_modrm(src, dst);
+  } else {
+    emit_optional_rex_32(dst, src);
+    emit(0x85);
+    emit_modrm(dst, src);
+  }
 }
 
 
@@ -1994,9 +2095,15 @@
 void Assembler::testq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  emit_rex_64(dst, src);
-  emit(0x85);
-  emit_modrm(dst, src);
+  if (src.low_bits() == 4) {
+    emit_rex_64(src, dst);
+    emit(0x85);
+    emit_modrm(src, dst);
+  } else {
+    emit_rex_64(dst, src);
+    emit(0x85);
+    emit_modrm(dst, src);
+  }
 }
 
 
@@ -2739,4 +2846,16 @@
                                   1 << RelocInfo::INTERNAL_REFERENCE |
                                   1 << RelocInfo::JS_RETURN;
 
+
+bool RelocInfo::IsCodedSpecially() {
+  // The deserializer needs to know whether a pointer is specially coded.  Being
+  // specially coded on x64 means that it is a relative 32 bit address, as used
+  // by branch instructions.
+  return (1 << rmode_) & kApplyMask;
+}
+
+
+
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 0f06c3c..3db4d08 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -300,12 +300,16 @@
           ScaleFactor scale,
           int32_t disp);
 
+  // Offset from existing memory operand.
+  // Offset is added to existing displacement as 32-bit signed values and
+  // this must not overflow.
+  Operand(const Operand& base, int32_t offset);
+
  private:
   byte rex_;
   byte buf_[10];
   // The number of bytes in buf_.
   unsigned int len_;
-  RelocInfo::Mode rmode_;
 
   // Set the ModR/M byte without an encoded 'reg' register. The
   // register is encoded later as part of the emit_operand operation.
@@ -567,11 +571,7 @@
 
   // Arithmetics
   void addl(Register dst, Register src) {
-    if (dst.low_bits() == 4) {  // Forces SIB byte.
-      arithmetic_op_32(0x01, src, dst);
-    } else {
-      arithmetic_op_32(0x03, dst, src);
-    }
+    arithmetic_op_32(0x03, dst, src);
   }
 
   void addl(Register dst, Immediate src) {
@@ -607,11 +607,7 @@
   }
 
   void sbbl(Register dst, Register src) {
-    if (dst.low_bits() == 4) {  // Forces SIB byte if dst is base register.
-      arithmetic_op_32(0x19, src, dst);
-    } else {
-      arithmetic_op_32(0x1b, dst, src);
-    }
+    arithmetic_op_32(0x1b, dst, src);
   }
 
   void cmpb(Register dst, Immediate src) {
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index c55a4ea..8099feb 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -26,6 +26,9 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "codegen-inl.h"
 #include "macro-assembler.h"
 
@@ -1296,3 +1299,5 @@
 }
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index d9586cc..767c33f 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "compiler.h"
@@ -601,9 +603,8 @@
  public:
   explicit DeferredReferenceGetKeyedValue(Register dst,
                                           Register receiver,
-                                          Register key,
-                                          bool is_global)
-      : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+                                          Register key)
+      : dst_(dst), receiver_(receiver), key_(key) {
     set_comment("[ DeferredReferenceGetKeyedValue");
   }
 
@@ -616,7 +617,6 @@
   Register dst_;
   Register receiver_;
   Register key_;
-  bool is_global_;
 };
 
 
@@ -631,10 +631,7 @@
   // This means that we cannot allow test instructions after calls to
   // KeyedLoadIC stubs in other places.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  RelocInfo::Mode mode = is_global_
-                         ? RelocInfo::CODE_TARGET_CONTEXT
-                         : RelocInfo::CODE_TARGET;
-  __ Call(ic, mode);
+  __ Call(ic, RelocInfo::CODE_TARGET);
   // The delta from the start of the map-compare instruction to the
   // test instruction.  We use masm_-> directly here instead of the __
   // macro because the macro sometimes uses macro expansion to turn
@@ -678,11 +675,51 @@
 
 void DeferredReferenceSetKeyedValue::Generate() {
   __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
-  // Push receiver and key arguments on the stack.
-  __ push(receiver_);
-  __ push(key_);
-  // Move value argument to eax as expected by the IC stub.
-  if (!value_.is(rax)) __ movq(rax, value_);
+  // Move value, receiver, and key to registers rax, rdx, and rcx, as
+  // the IC stub expects.
+  // Move value to rax, using xchg if the receiver or key is in rax.
+  if (!value_.is(rax)) {
+    if (!receiver_.is(rax) && !key_.is(rax)) {
+      __ movq(rax, value_);
+    } else {
+      __ xchg(rax, value_);
+      // Update receiver_ and key_ if they are affected by the swap.
+      if (receiver_.is(rax)) {
+        receiver_ = value_;
+      } else if (receiver_.is(value_)) {
+        receiver_ = rax;
+      }
+      if (key_.is(rax)) {
+        key_ = value_;
+      } else if (key_.is(value_)) {
+        key_ = rax;
+      }
+    }
+  }
+  // Value is now in rax. Its original location is remembered in value_,
+  // and the value is restored to value_ before returning.
+  // The variables receiver_ and key_ are not preserved.
+  // Move receiver and key to rdx and rcx, swapping if necessary.
+  if (receiver_.is(rdx)) {
+    if (!key_.is(rcx)) {
+      __ movq(rcx, key_);
+    }  // Else everything is already in the right place.
+  } else if (receiver_.is(rcx)) {
+    if (key_.is(rdx)) {
+      __ xchg(rcx, rdx);
+    } else if (key_.is(rcx)) {
+      __ movq(rdx, receiver_);
+    } else {
+      __ movq(rdx, receiver_);
+      __ movq(rcx, key_);
+    }
+  } else if (key_.is(rcx)) {
+    __ movq(rdx, receiver_);
+  } else {
+    __ movq(rcx, key_);
+    __ movq(rdx, receiver_);
+  }
+
   // Call the IC stub.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
@@ -695,11 +732,8 @@
   // Here we use masm_-> instead of the __ macro because this is the
   // instruction that gets patched and coverage code gets in the way.
   masm_->testl(rax, Immediate(-delta_to_patch_site));
-  // Restore value (returned from store IC), key and receiver
-  // registers.
+  // Restore value (returned from store IC).
   if (!value_.is(rax)) __ movq(value_, rax);
-  __ pop(key_);
-  __ pop(receiver_);
 }
 
 
@@ -1546,7 +1580,7 @@
     }
     Result var = frame_->Pop();
     var.ToRegister();
-    __ AbortIfNotSmi(var.reg(), "Non-smi value in smi-typed stack slot.");
+    __ AbortIfNotSmi(var.reg());
   }
 }
 
@@ -2799,6 +2833,7 @@
     int arg_count = args->length();
     for (int i = 0; i < arg_count; i++) {
       Load(args->at(i));
+      frame_->SpillTop();
     }
 
     // Prepare the stack for the call to ResolvePossiblyDirectEval.
@@ -2848,6 +2883,7 @@
     int arg_count = args->length();
     for (int i = 0; i < arg_count; i++) {
       Load(args->at(i));
+      frame_->SpillTop();
     }
 
     // Push the name of the function on the frame.
@@ -2953,6 +2989,7 @@
         int arg_count = args->length();
         for (int i = 0; i < arg_count; i++) {
           Load(args->at(i));
+          frame_->SpillTop();
         }
 
         // Push the name of the function onto the frame.
@@ -3399,7 +3436,11 @@
                                                   new_value.type_info());
     }
 
-    __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
+    if (new_value.is_smi()) {
+      if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
+    } else {
+      __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
+    }
     if (is_increment) {
       __ SmiAddConstant(kScratchRegister,
                         new_value.reg(),
@@ -3833,11 +3874,13 @@
   __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
           Immediate(1 << Map::kIsUndetectable));
   destination()->false_target()->Branch(not_zero);
-  __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
-  destination()->false_target()->Branch(less);
-  __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+  __ movzxbq(kScratchRegister,
+             FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+  __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+  destination()->false_target()->Branch(below);
+  __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
   obj.Unuse();
-  destination()->Split(less_equal);
+  destination()->Split(below_equal);
 }
 
 
@@ -3921,7 +3964,7 @@
   __ bind(&exit);
   result.set_type_info(TypeInfo::Smi());
   if (FLAG_debug_code) {
-    __ AbortIfNotSmi(result.reg(), "Computed arguments.length is not a smi.");
+    __ AbortIfNotSmi(result.reg());
   }
   frame_->Push(&result);
 }
@@ -4329,7 +4372,7 @@
   __ PrepareCallCFunction(0);
   __ CallCFunction(ExternalReference::random_uint32_function(), 0);
 
-  // Convert 32 random bits in eax to 0.(32 random bits) in a double
+  // Convert 32 random bits in rax to 0.(32 random bits) in a double
   // by computing:
   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   __ movl(rcx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
@@ -5100,10 +5143,9 @@
   value.ToRegister();
 
   if (value.is_number()) {
-    Comment cmnt(masm_, "ONLY_NUMBER");
     // Fast case if TypeInfo indicates only numbers.
     if (FLAG_debug_code) {
-      __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
+      __ AbortIfNotNumber(value.reg());
     }
     // Smi => false iff zero.
     __ SmiCompare(value.reg(), Smi::FromInt(0));
@@ -5646,7 +5688,7 @@
                                                     slow));
           frame_->Push(&arguments);
           frame_->Push(key_literal->handle());
-          *result = EmitKeyedLoad(false);
+          *result = EmitKeyedLoad();
           frame_->Drop(2);  // Drop key and receiver.
           done->Jump(result);
         }
@@ -5876,7 +5918,7 @@
 
       if (left_side.is_smi()) {
         if (FLAG_debug_code) {
-          __ AbortIfNotSmi(left_side.reg(), "Non-smi value inferred as smi.");
+          __ AbortIfNotSmi(left_side.reg());
         }
       } else {
         Condition left_is_smi = masm_->CheckSmi(left_side.reg());
@@ -6748,8 +6790,7 @@
           Condition is_smi = masm_->CheckSmi(operand->reg());
           deferred->Branch(NegateCondition(is_smi));
         } else if (FLAG_debug_code) {
-          __ AbortIfNotSmi(operand->reg(),
-              "Static type info claims non-smi is smi in (const SHL smi).");
+          __ AbortIfNotSmi(operand->reg());
         }
 
         __ Move(answer.reg(), smi_value);
@@ -7011,7 +7052,43 @@
                                           left->reg(),
                                           rcx,
                                           overwrite_mode);
-    __ JumpIfNotBothSmi(left->reg(), rcx, deferred->entry_label());
+
+    Label do_op;
+    if (right_type_info.IsSmi()) {
+      if (FLAG_debug_code) {
+        __ AbortIfNotSmi(right->reg());
+      }
+      __ movq(answer.reg(), left->reg());
+      // If left is not known to be a smi, check if it is.
+      // If left is not known to be a number, and it isn't a smi, check if
+      // it is a HeapNumber.
+      if (!left_type_info.IsSmi()) {
+        __ JumpIfSmi(answer.reg(), &do_op);
+        if (!left_type_info.IsNumber()) {
+          // Branch if not a heapnumber.
+          __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
+                 Factory::heap_number_map());
+          deferred->Branch(not_equal);
+        }
+        // Load integer value into answer register using truncation.
+        __ cvttsd2si(answer.reg(),
+                     FieldOperand(answer.reg(), HeapNumber::kValueOffset));
+        // Branch if we might have overflowed.
+        // (False negative for Smi::kMinValue)
+        __ cmpq(answer.reg(), Immediate(0x80000000));
+        deferred->Branch(equal);
+        // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
+        __ Integer32ToSmi(answer.reg(), answer.reg());
+      } else {
+        // Fast case - both are actually smis.
+        if (FLAG_debug_code) {
+          __ AbortIfNotSmi(left->reg());
+        }
+      }
+    } else {
+      __ JumpIfNotBothSmi(left->reg(), rcx, deferred->entry_label());
+    }
+    __ bind(&do_op);
 
     // Perform the operation.
     switch (op) {
@@ -7106,8 +7183,89 @@
 }
 
 
-Result CodeGenerator::EmitKeyedLoad(bool is_global) {
-  Comment cmnt(masm_, "[ Load from keyed Property");
+Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
+  // Do not inline the inobject property case for loads from the global
+  // object.  Also do not inline for unoptimized code.  This saves time
+  // in the code generator.  Unoptimized code is toplevel code or code
+  // that is not in a loop.
+  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+    Comment cmnt(masm(), "[ Load from named Property");
+    frame()->Push(name);
+
+    RelocInfo::Mode mode = is_contextual
+        ? RelocInfo::CODE_TARGET_CONTEXT
+        : RelocInfo::CODE_TARGET;
+    result = frame()->CallLoadIC(mode);
+    // A test rax instruction following the call signals that the
+    // inobject property case was inlined.  Ensure that there is not
+    // a test rax instruction here.
+    __ nop();
+  } else {
+    // Inline the inobject property case.
+    Comment cmnt(masm(), "[ Inlined named property load");
+    Result receiver = frame()->Pop();
+    receiver.ToRegister();
+    result = allocator()->Allocate();
+    ASSERT(result.is_valid());
+
+    // Cannot use r12 for receiver, because that changes
+    // the distance between a call and a fixup location,
+    // due to a special encoding of r12 as r/m in a ModR/M byte.
+    if (receiver.reg().is(r12)) {
+      frame()->Spill(receiver.reg());  // It will be overwritten with result.
+      // Swap receiver and value.
+      __ movq(result.reg(), receiver.reg());
+      Result temp = receiver;
+      receiver = result;
+      result = temp;
+    }
+
+    DeferredReferenceGetNamedValue* deferred =
+        new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
+
+    // Check that the receiver is a heap object.
+    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+
+    __ bind(deferred->patch_site());
+    // This is the map check instruction that will be patched (so we can't
+    // use the double underscore macro that may insert instructions).
+    // Initially use an invalid map to force a failure.
+    masm()->Move(kScratchRegister, Factory::null_value());
+    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                 kScratchRegister);
+    // This branch is always a forwards branch so it's always a fixed
+    // size which allows the assert below to succeed and patching to work.
+    // Don't use deferred->Branch(...), since that might add coverage code.
+    masm()->j(not_equal, deferred->entry_label());
+
+    // The delta from the patch label to the load offset must be
+    // statically known.
+    ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+           LoadIC::kOffsetToLoadInstruction);
+    // The initial (invalid) offset has to be large enough to force
+    // a 32-bit instruction encoding to allow patching with an
+    // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
+    int offset = kMaxInt;
+    masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
+
+    __ IncrementCounter(&Counters::named_load_inline, 1);
+    deferred->BindExit();
+    frame()->Push(&receiver);
+  }
+  ASSERT(frame()->height() == original_height);
+  return result;
+}
+
+
+Result CodeGenerator::EmitKeyedLoad() {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
   // Inline array load code if inside of a loop.  We do not know
   // the receiver map yet, so we initially generate the code with
   // a check against an invalid map.  In the inline cache code, we
@@ -7115,34 +7273,30 @@
   if (loop_nesting() > 0) {
     Comment cmnt(masm_, "[ Inlined load from keyed Property");
 
+    // Use a fresh temporary to load the elements without destroying
+    // the receiver which is needed for the deferred slow case.
+    // Allocate the temporary early so that we use rax if it is free.
+    Result elements = allocator()->Allocate();
+    ASSERT(elements.is_valid());
+
+
     Result key = frame_->Pop();
     Result receiver = frame_->Pop();
     key.ToRegister();
     receiver.ToRegister();
 
-    // Use a fresh temporary to load the elements without destroying
-    // the receiver which is needed for the deferred slow case.
-    Result elements = allocator()->Allocate();
-    ASSERT(elements.is_valid());
-
-    // Use a fresh temporary for the index and later the loaded
-    // value.
+    // Use a fresh temporary for the index
     Result index = allocator()->Allocate();
     ASSERT(index.is_valid());
 
     DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(index.reg(),
+        new DeferredReferenceGetKeyedValue(elements.reg(),
                                            receiver.reg(),
-                                           key.reg(),
-                                           is_global);
+                                           key.reg());
 
-    // Check that the receiver is not a smi (only needed if this
-    // is not a load from the global context) and that it has the
-    // expected map.
-    if (!is_global) {
-      __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-    }
+    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
 
+    // Check that the receiver has the expected map.
     // Initially, use an invalid map. The map is patched in the IC
     // initialization code.
     __ bind(deferred->patch_site());
@@ -7173,7 +7327,6 @@
     __ cmpl(index.reg(),
             FieldOperand(elements.reg(), FixedArray::kLengthOffset));
     deferred->Branch(above_equal);
-
     // The index register holds the un-smi-tagged key. It has been
     // zero-extended to 64-bits, so it can be used directly as index in the
     // operand below.
@@ -7184,39 +7337,33 @@
     // heuristic about which register to reuse.  For example, if
     // one is rax, the we can reuse that one because the value
     // coming from the deferred code will be in rax.
-    Result value = index;
-    __ movq(value.reg(),
+    __ movq(elements.reg(),
             Operand(elements.reg(),
                     index.reg(),
                     times_pointer_size,
                     FixedArray::kHeaderSize - kHeapObjectTag));
+    result = elements;
     elements.Unuse();
     index.Unuse();
-    __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+    __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
     deferred->Branch(equal);
     __ IncrementCounter(&Counters::keyed_load_inline, 1);
 
     deferred->BindExit();
-    // Restore the receiver and key to the frame and push the
-    // result on top of it.
     frame_->Push(&receiver);
     frame_->Push(&key);
-    return value;
-
   } else {
     Comment cmnt(masm_, "[ Load from keyed Property");
-    RelocInfo::Mode mode = is_global
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    Result answer = frame_->CallKeyedLoadIC(mode);
+    result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
     // Make sure that we do not have a test instruction after the
     // call.  A test instruction after the call is used to
     // indicate that we have generated an inline version of the
     // keyed load.  The explicit nop instruction is here because
     // the push that follows might be peep-hole optimized away.
     __ nop();
-    return answer;
   }
+  ASSERT(frame()->height() == original_height);
+  return result;
 }
 
 
@@ -7259,6 +7406,7 @@
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
       cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+      if (!persist_after_get_) set_unloaded();
       break;
     }
 
@@ -7266,101 +7414,29 @@
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
-
-      // Do not inline the inobject property case for loads from the global
-      // object.  Also do not inline for unoptimized code.  This saves time
-      // in the code generator.  Unoptimized code is toplevel code or code
-      // that is not in a loop.
-      if (is_global ||
-          cgen_->scope()->is_global_scope() ||
-          cgen_->loop_nesting() == 0) {
-        Comment cmnt(masm, "[ Load from named Property");
-        cgen_->frame()->Push(GetName());
-
-        RelocInfo::Mode mode = is_global
-                               ? RelocInfo::CODE_TARGET_CONTEXT
-                               : RelocInfo::CODE_TARGET;
-        Result answer = cgen_->frame()->CallLoadIC(mode);
-        // A test rax instruction following the call signals that the
-        // inobject property case was inlined.  Ensure that there is not
-        // a test rax instruction here.
-        __ nop();
-        cgen_->frame()->Push(&answer);
-      } else {
-        // Inline the inobject property case.
-        Comment cmnt(masm, "[ Inlined named property load");
-        Result receiver = cgen_->frame()->Pop();
-        receiver.ToRegister();
-        Result value = cgen_->allocator()->Allocate();
-        ASSERT(value.is_valid());
-        // Cannot use r12 for receiver, because that changes
-        // the distance between a call and a fixup location,
-        // due to a special encoding of r12 as r/m in a ModR/M byte.
-        if (receiver.reg().is(r12)) {
-          // Swap receiver and value.
-          __ movq(value.reg(), receiver.reg());
-          Result temp = receiver;
-          receiver = value;
-          value = temp;
-          cgen_->frame()->Spill(value.reg());  // r12 may have been shared.
-        }
-
-        DeferredReferenceGetNamedValue* deferred =
-            new DeferredReferenceGetNamedValue(value.reg(),
-                                               receiver.reg(),
-                                               GetName());
-
-        // Check that the receiver is a heap object.
-        __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-        __ bind(deferred->patch_site());
-        // This is the map check instruction that will be patched (so we can't
-        // use the double underscore macro that may insert instructions).
-        // Initially use an invalid map to force a failure.
-        masm->Move(kScratchRegister, Factory::null_value());
-        masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                   kScratchRegister);
-        // This branch is always a forwards branch so it's always a fixed
-        // size which allows the assert below to succeed and patching to work.
-        // Don't use deferred->Branch(...), since that might add coverage code.
-        masm->j(not_equal, deferred->entry_label());
-
-        // The delta from the patch label to the load offset must be
-        // statically known.
-        ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
-               LoadIC::kOffsetToLoadInstruction);
-        // The initial (invalid) offset has to be large enough to force
-        // a 32-bit instruction encoding to allow patching with an
-        // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
-        int offset = kMaxInt;
-        masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
-
-        __ IncrementCounter(&Counters::named_load_inline, 1);
-        deferred->BindExit();
-        cgen_->frame()->Push(&receiver);
-        cgen_->frame()->Push(&value);
+      Result result = cgen_->EmitNamedLoad(GetName(), is_global);
+      cgen_->frame()->Push(&result);
+      if (!persist_after_get_) {
+        cgen_->UnloadReference(this);
       }
       break;
     }
 
     case KEYED: {
-      Comment cmnt(masm, "[ Load from keyed Property");
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
+      // A load of a bare identifier (load from global) cannot be keyed.
+      ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
 
-      Result value = cgen_->EmitKeyedLoad(is_global);
+      Result value = cgen_->EmitKeyedLoad();
       cgen_->frame()->Push(&value);
+      if (!persist_after_get_) {
+        cgen_->UnloadReference(this);
+      }
       break;
     }
 
     default:
       UNREACHABLE();
   }
-
-  if (!persist_after_get_) {
-    cgen_->UnloadReference(this);
-  }
 }
 
 
@@ -7469,7 +7545,7 @@
         if (!key.is_smi()) {
           __ JumpIfNotSmi(key.reg(), deferred->entry_label());
         } else if (FLAG_debug_code) {
-          __ AbortIfNotSmi(key.reg(), "Non-smi value in smi-typed value.");
+          __ AbortIfNotSmi(key.reg());
         }
 
         // Check that the receiver is a JSArray.
@@ -7524,8 +7600,6 @@
 
         deferred->BindExit();
 
-        cgen_->frame()->Push(&receiver);
-        cgen_->frame()->Push(&key);
         cgen_->frame()->Push(&value);
       } else {
         Result answer = cgen_->frame()->CallKeyedStoreIC();
@@ -7536,7 +7610,7 @@
         masm->nop();
         cgen_->frame()->Push(&answer);
       }
-      cgen_->UnloadReference(this);
+      set_unloaded();
       break;
     }
 
@@ -8898,6 +8972,7 @@
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
     Load(args->at(i));
+    frame_->SpillTop();
   }
 
   // Record the position for debugging purposes.
@@ -10014,8 +10089,8 @@
   if (static_operands_type_.IsSmi()) {
     // Skip smi check if we know that both arguments are smis.
     if (FLAG_debug_code) {
-      __ AbortIfNotSmi(left, "Static type check claimed non-smi is smi.");
-      __ AbortIfNotSmi(right, "Static type check claimed non-smi is smi.");
+      __ AbortIfNotSmi(left);
+      __ AbortIfNotSmi(right);
     }
     if (op_ == Token::BIT_OR) {
       // Handle OR here, since we do extra smi-checking in the or code below.
@@ -10198,8 +10273,8 @@
         // rdx: x
       if (static_operands_type_.IsNumber() && FLAG_debug_code) {
         // Assert at runtime that inputs are only numbers.
-        __ AbortIfNotNumber(rdx, "GenericBinaryOpStub operand not a number.");
-        __ AbortIfNotNumber(rax, "GenericBinaryOpStub operand not a number.");
+        __ AbortIfNotNumber(rdx);
+        __ AbortIfNotNumber(rax);
       } else {
         FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
       }
@@ -11589,3 +11664,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 01bbd20..9d46583 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -449,10 +449,13 @@
   // value in place.
   void StoreToSlot(Slot* slot, InitState init_state);
 
+  // Receiver is passed on the frame and not consumed.
+  Result EmitNamedLoad(Handle<String> name, bool is_contextual);
+
   // Load a property of an object, returning it in a Result.
   // The object and the property name are passed on the stack, and
   // not changed.
-  Result EmitKeyedLoad(bool is_global);
+  Result EmitKeyedLoad();
 
   // Special code for typeof expressions: Unfortunately, we must
   // be careful when loading the expression in 'typeof'
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index cc20c58..a43a02b 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -33,6 +33,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "cpu.h"
 #include "macro-assembler.h"
 
@@ -77,3 +79,5 @@
 }
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 5470912..89b98f1 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -28,6 +28,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "codegen-inl.h"
 #include "debug.h"
 
@@ -132,10 +134,10 @@
   // Register state for keyed IC load call (from ic-x64.cc).
   // ----------- S t a t e -------------
   //  -- rax    : value
+  //  -- rcx    : key
+  //  -- rdx    : receiver
   // -----------------------------------
-  // Register rax contains an object that needs to be pushed on the
-  // expression stack of the fake JS frame.
-  Generate_DebugBreakCallHelper(masm, rax.bit(), false);
+  Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit() | rdx.bit(), false);
 }
 
 
@@ -216,3 +218,5 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index bd912cd..44ffe5f 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -30,6 +30,9 @@
 #include <stdarg.h>
 
 #include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "disasm.h"
 
 namespace disasm {
@@ -1671,3 +1674,5 @@
 }
 
 }  // namespace disasm
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
index 5e76901..13eef03 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "codegen-inl.h"
 #include "fast-codegen.h"
 #include "scopes.h"
@@ -244,3 +246,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index 6a0527c..85ebc95 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "frames-inl.h"
 
 namespace v8 {
@@ -107,3 +109,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index a34a94e..5bd09c2 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
@@ -79,11 +81,17 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (scope()->num_heap_slots() > 0) {
+    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    if (heap_slots > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is still in rdi.
       __ push(rdi);
-      __ CallRuntime(Runtime::kNewContext, 1);
+      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+        FastNewContextStub stub(heap_slots);
+        __ CallStub(&stub);
+      } else {
+        __ CallRuntime(Runtime::kNewContext, 1);
+      }
       function_in_register = false;
       // Context is returned in both rax and rsi.  It replaces the context
       // passed to us.  It's saved in the stack and kept live in rsi.
@@ -143,7 +151,18 @@
   }
 
   { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(scope()->declarations());
+    // For named function expressions, declare the function name as a
+    // constant.
+    if (scope()->is_function_scope() && scope()->function() != NULL) {
+      EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+    }
+    // Visit all the explicit declarations unless there is an illegal
+    // redeclaration.
+    if (scope()->HasIllegalRedeclaration()) {
+      scope()->VisitIllegalRedeclaration(this);
+    } else {
+      VisitDeclarations(scope()->declarations());
+    }
   }
 
   { Comment cmnt(masm_, "[ Stack check");
@@ -427,6 +446,39 @@
 }
 
 
+void FullCodeGenerator::PrepareTest(Label* materialize_true,
+                                    Label* materialize_false,
+                                    Label** if_true,
+                                    Label** if_false) {
+  switch (context_) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+    case Expression::kEffect:
+      // In an effect context, the true and the false case branch to the
+      // same label.
+      *if_true = *if_false = materialize_true;
+      break;
+    case Expression::kValue:
+      *if_true = materialize_true;
+      *if_false = materialize_false;
+      break;
+    case Expression::kTest:
+      *if_true = true_label_;
+      *if_false = false_label_;
+      break;
+    case Expression::kValueTest:
+      *if_true = materialize_true;
+      *if_false = false_label_;
+      break;
+    case Expression::kTestValue:
+      *if_true = true_label_;
+      *if_false = materialize_false;
+      break;
+  }
+}
+
+
 void FullCodeGenerator::Apply(Expression::Context context,
                               Label* materialize_true,
                               Label* materialize_false) {
@@ -492,6 +544,61 @@
 }
 
 
+// Convert constant control flow (true or false) to the result expected for
+// a given expression context.
+void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
+  switch (context) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+    case Expression::kEffect:
+      break;
+    case Expression::kValue: {
+      Heap::RootListIndex value_root_index =
+          flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+      switch (location_) {
+        case kAccumulator:
+          __ LoadRoot(result_register(), value_root_index);
+          break;
+        case kStack:
+          __ PushRoot(value_root_index);
+          break;
+      }
+      break;
+    }
+    case Expression::kTest:
+      __ jmp(flag ? true_label_ : false_label_);
+      break;
+    case Expression::kTestValue:
+      switch (location_) {
+        case kAccumulator:
+          // If value is false it's needed.
+          if (!flag) __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+          break;
+        case kStack:
+          // If value is false it's needed.
+          if (!flag) __ PushRoot(Heap::kFalseValueRootIndex);
+          break;
+      }
+      __ jmp(flag ? true_label_ : false_label_);
+      break;
+    case Expression::kValueTest:
+      switch (location_) {
+        case kAccumulator:
+          // If value is true it's needed.
+          if (flag) __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+          break;
+        case kStack:
+          // If value is true it's needed.
+          if (flag) __ PushRoot(Heap::kTrueValueRootIndex);
+          break;
+      }
+      __ jmp(flag ? true_label_ : false_label_);
+      break;
+  }
+}
+
+
 void FullCodeGenerator::DoTest(Expression::Context context) {
   // The value to test is in the accumulator.  If the value might be needed
   // on the stack (value/test and test/value contexts with a stack location
@@ -667,22 +774,23 @@
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+                                        Variable::Mode mode,
+                                        FunctionLiteral* function) {
   Comment cmnt(masm_, "[ Declaration");
-  Variable* var = decl->proxy()->var();
-  ASSERT(var != NULL);  // Must have been resolved.
-  Slot* slot = var->slot();
-  Property* prop = var->AsProperty();
+  ASSERT(variable != NULL);  // Must have been resolved.
+  Slot* slot = variable->slot();
+  Property* prop = variable->AsProperty();
 
   if (slot != NULL) {
     switch (slot->type()) {
       case Slot::PARAMETER:
       case Slot::LOCAL:
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
           __ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kAccumulator);
+        } else if (function != NULL) {
+          VisitForValue(function, kAccumulator);
           __ movq(Operand(rbp, SlotOffset(slot)), result_register());
         }
         break;
@@ -692,7 +800,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ movq(rbx,
@@ -700,13 +808,13 @@
           __ cmpq(rbx, rsi);
           __ Check(equal, "Unexpected declaration in current context.");
         }
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
           __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
                   kScratchRegister);
           // No write barrier since the hole value is in old space.
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kAccumulator);
+        } else if (function != NULL) {
+          VisitForValue(function, kAccumulator);
           __ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
                   result_register());
           int offset = Context::SlotOffset(slot->index());
@@ -717,21 +825,19 @@
 
       case Slot::LOOKUP: {
         __ push(rsi);
-        __ Push(var->name());
+        __ Push(variable->name());
         // Declaration nodes are always introduced in one of two modes.
-        ASSERT(decl->mode() == Variable::VAR ||
-               decl->mode() == Variable::CONST);
-        PropertyAttributes attr =
-            (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+        ASSERT(mode == Variable::VAR || mode == Variable::CONST);
+        PropertyAttributes attr = (mode == Variable::VAR) ? NONE : READ_ONLY;
         __ Push(Smi::FromInt(attr));
         // Push initial value, if any.
         // Note: For variables we must not push an initial value (such as
         // 'undefined') because we may have a (legal) redeclaration and we
         // must not destroy the current value.
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ PushRoot(Heap::kTheHoleValueRootIndex);
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kStack);
+        } else if (function != NULL) {
+          VisitForValue(function, kStack);
         } else {
           __ Push(Smi::FromInt(0));  // no initial value!
         }
@@ -741,32 +847,36 @@
     }
 
   } else if (prop != NULL) {
-    if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+    if (function != NULL || mode == Variable::CONST) {
       // We are declaring a function or constant that rewrites to a
       // property.  Use (keyed) IC to set the initial value.
       VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kStack);
-
-      if (decl->fun() != NULL) {
-        VisitForValue(decl->fun(), kAccumulator);
+      if (function != NULL) {
+        VisitForValue(prop->key(), kStack);
+        VisitForValue(function, kAccumulator);
+        __ pop(rcx);
       } else {
+        VisitForValue(prop->key(), kAccumulator);
+        __ movq(rcx, result_register());
         __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
       }
+      __ pop(rdx);
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       __ call(ic, RelocInfo::CODE_TARGET);
       // Absence of a test rax instruction following the call
       // indicates that none of the load was inlined.
       __ nop();
-
-      // Value in rax is ignored (declarations are statements).  Receiver
-      // and key on stack are discarded.
-      __ Drop(2);
     }
   }
 }
 
 
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+  EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
+}
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(rsi);  // The context is the first argument.
@@ -777,19 +887,226 @@
 }
 
 
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
-  Comment cmnt(masm_, "[ FunctionLiteral");
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+  // Keep the switch value on the stack until a case matches.
+  VisitForValue(stmt->tag(), kStack);
 
-  // Build the shared function info and instantiate the function based
-  // on it.
-  Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(expr, script(), this);
-  if (HasStackOverflow()) return;
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
 
-  // Create a new closure.
-  __ push(rsi);
-  __ Push(function_info);
-  __ CallRuntime(Runtime::kNewClosure, 2);
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForValue(clause->label(), kAccumulator);
+
+    // Perform the comparison as if via '==='.  The comparison stub expects
+    // the smi vs. smi case to be handled before it is called.
+    Label slow_case;
+    __ movq(rdx, Operand(rsp, 0));  // Switch value.
+    __ JumpIfNotBothSmi(rdx, rax, &slow_case);
+    __ SmiCompare(rdx, rax);
+    __ j(not_equal, &next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ jmp(clause->body_target()->entry_label());
+
+    __ bind(&slow_case);
+    CompareStub stub(equal, true);
+    __ CallStub(&stub);
+    __ testq(rax, rax);
+    __ j(not_equal, &next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ jmp(clause->body_target()->entry_label());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  __ Drop(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ jmp(nested_statement.break_target());
+  } else {
+    __ jmp(default_clause->body_target()->entry_label());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target()->entry_label());
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_target());
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  Comment cmnt(masm_, "[ ForInStatement");
+  SetStatementPosition(stmt);
+
+  Label loop, exit;
+  ForIn loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Get the object to enumerate over. Both SpiderMonkey and JSC
+  // ignore null and undefined in contrast to the specification; see
+  // ECMA-262 section 12.6.4.
+  VisitForValue(stmt->enumerable(), kAccumulator);
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &exit);
+  __ CompareRoot(rax, Heap::kNullValueRootIndex);
+  __ j(equal, &exit);
+
+  // Convert the object to a JS object.
+  Label convert, done_convert;
+  __ JumpIfSmi(rax, &convert);
+  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+  __ j(above_equal, &done_convert);
+  __ bind(&convert);
+  __ push(rax);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ bind(&done_convert);
+  __ push(rax);
+
+  // TODO(kasperl): Check cache validity in generated code. This is a
+  // fast case for the JSObject::IsSimpleEnum cache validity
+  // checks. If we cannot guarantee cache validity, call the runtime
+  // system to check cache validity or get the property names in a
+  // fixed array.
+
+  // Get the set of properties to enumerate.
+  __ push(rax);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kMetaMapRootIndex);
+  __ j(not_equal, &fixed_array);
+
+  // We got a map in register rax. Get the enumeration cache from it.
+  __ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
+  __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
+  __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Setup the four remaining stack slots.
+  __ push(rax);  // Map.
+  __ push(rdx);  // Enumeration cache.
+  __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
+  __ Integer32ToSmi(rax, rax);
+  __ push(rax);  // Enumeration cache length (as smi).
+  __ Push(Smi::FromInt(0));  // Initial index.
+  __ jmp(&loop);
+
+  // We got a fixed array in register rax. Iterate through that.
+  __ bind(&fixed_array);
+  __ Push(Smi::FromInt(0));  // Map (0) - force slow check.
+  __ push(rax);
+  __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+  __ Integer32ToSmi(rax, rax);
+  __ push(rax);  // Fixed array length (as smi).
+  __ Push(Smi::FromInt(0));  // Initial index.
+
+  // Generate code for doing the condition check.
+  __ bind(&loop);
+  __ movq(rax, Operand(rsp, 0 * kPointerSize));  // Get the current index.
+  __ cmpq(rax, Operand(rsp, 1 * kPointerSize));  // Compare to the array length.
+  __ j(above_equal, loop_statement.break_target());
+
+  // Get the current entry of the array into register rbx.
+  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+  SmiIndex index = __ SmiToIndex(rax, rax, kPointerSizeLog2);
+  __ movq(rbx, FieldOperand(rbx,
+                            index.reg,
+                            index.scale,
+                            FixedArray::kHeaderSize));
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case into register rdx.
+  __ movq(rdx, Operand(rsp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  Label update_each;
+  __ movq(rcx, Operand(rsp, 4 * kPointerSize));
+  __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ j(equal, &update_each);
+
+  // Convert the entry to a string or null if it isn't a property
+  // anymore. If the property has been removed while iterating, we
+  // just skip it.
+  __ push(rcx);  // Enumerable.
+  __ push(rbx);  // Current entry.
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+  __ CompareRoot(rax, Heap::kNullValueRootIndex);
+  __ j(equal, loop_statement.continue_target());
+  __ movq(rbx, rax);
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register rbx.
+  __ bind(&update_each);
+  __ movq(result_register(), rbx);
+  // Perform the assignment as if via '='.
+  EmitAssignment(stmt->each());
+
+  // Generate code for the body of the loop.
+  Label stack_limit_hit, stack_check_done;
+  Visit(stmt->body());
+
+  __ StackLimitCheck(&stack_limit_hit);
+  __ bind(&stack_check_done);
+
+  // Generate code for going to the next element by incrementing the
+  // index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_target());
+  __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
+  __ jmp(&loop);
+
+  // Slow case for the stack limit check.
+  StackCheckStub stack_check_stub;
+  __ bind(&stack_limit_hit);
+  __ CallStub(&stack_check_stub);
+  __ jmp(&stack_check_done);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_target());
+  __ addq(rsp, Immediate(5 * kPointerSize));
+
+  // Exit and decrement the loop depth.
+  __ bind(&exit);
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  if (scope()->is_function_scope() && info->num_literals() == 0) {
+    FastNewClosureStub stub;
+    __ Push(info);
+    __ CallStub(&stub);
+  } else {
+    __ push(rsi);
+    __ Push(info);
+    __ CallRuntime(Runtime::kNewClosure, 2);
+  }
   Apply(context_, rax);
 }
 
@@ -833,7 +1150,20 @@
     Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
                             ? "Context slot"
                             : "Stack slot");
-    Apply(context, slot);
+    if (var->mode() == Variable::CONST) {
+      // Constants may be the hole value if they have not been initialized.
+      // Unhole them.
+      Label done;
+      MemOperand slot_operand = EmitSlotSearch(slot, rax);
+      __ movq(rax, slot_operand);
+      __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+      __ j(not_equal, &done);
+      __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+      __ bind(&done);
+      Apply(context, rax);
+    } else {
+      Apply(context, slot);
+    }
 
   } else {
     Comment cmnt(masm_, "Rewritten parameter");
@@ -969,22 +1299,28 @@
 
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
+
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+
   __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
   __ Push(expr->constant_elements());
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else {
+  } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+  } else {
+    FastCloneShallowArrayStub stub(length);
+    __ CallStub(&stub);
   }
 
   bool result_saved = false;  // Is the result saved to the stack?
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  ZoneList<Expression*>* subexprs = expr->values();
-  for (int i = 0, len = subexprs->length(); i < len; i++) {
+  for (int i = 0; i < length; i++) {
     Expression* subexpr = subexprs->at(i);
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
@@ -1019,7 +1355,13 @@
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
   Comment cmnt(masm_, "[ Assignment");
-  ASSERT(expr->op() != Token::INIT_CONST);
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // on the left-hand side.
+  if (!expr->target()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->target());
+    return;
+  }
+
   // Left-hand side can only be a property, a global or a (parameter or local)
   // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -1045,8 +1387,15 @@
       }
       break;
     case KEYED_PROPERTY:
-      VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kStack);
+      if (expr->is_compound()) {
+        VisitForValue(prop->obj(), kStack);
+        VisitForValue(prop->key(), kAccumulator);
+        __ movq(rdx, Operand(rsp, 0));
+        __ push(rax);
+      } else {
+        VisitForValue(prop->obj(), kStack);
+        VisitForValue(prop->key(), kStack);
+      }
       break;
   }
 
@@ -1091,6 +1440,7 @@
   switch (assign_type) {
     case VARIABLE:
       EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             expr->op(),
                              context_);
       break;
     case NAMED_PROPERTY:
@@ -1132,61 +1482,130 @@
 }
 
 
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+  // Invalid left-hand sides are rewritten to have a 'throw
+  // ReferenceError' on the left-hand side.
+  if (!expr->IsValidLeftHandSide()) {
+    VisitForEffect(expr);
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->AsProperty();
+  if (prop != NULL) {
+    assign_type = (prop->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      __ push(rax);  // Preserve value.
+      VisitForValue(prop->obj(), kAccumulator);
+      __ movq(rdx, rax);
+      __ pop(rax);  // Restore value.
+      __ Move(rcx, prop->key()->AsLiteral()->handle());
+      Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+      __ call(ic, RelocInfo::CODE_TARGET);
+      __ nop();  // Signal no inlined code.
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ push(rax);  // Preserve value.
+      VisitForValue(prop->obj(), kStack);
+      VisitForValue(prop->key(), kStack);
+      __ movq(rax, Operand(rsp, 2 * kPointerSize));
+      Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+      __ call(ic, RelocInfo::CODE_TARGET);
+      __ nop();  // Signal no inlined code.
+      __ Drop(3);  // Receiver, key, and extra copy of value.
+      break;
+    }
+  }
+}
+
+
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
+                                               Token::Value op,
                                                Expression::Context context) {
-  // Three main cases: non-this global variables, lookup slots, and
-  // all other types of slots.  Left-hand-side parameters that rewrite
-  // to explicit property accesses do not reach here.
+  // Left-hand sides that rewrite to explicit property accesses do not reach
+  // here.
   ASSERT(var != NULL);
   ASSERT(var->is_global() || var->slot() != NULL);
-  Slot* slot = var->slot();
+
   if (var->is_global()) {
     ASSERT(!var->is_this());
     // Assignment to a global variable.  Use inline caching for the
     // assignment.  Right-hand-side value is passed in rax, variable name in
-    // rcx, and the global object in rdx.
+    // rcx, and the global object on the stack.
     __ Move(rcx, var->name());
     __ movq(rdx, CodeGenerator::GlobalObject());
     Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET);
-    Apply(context, rax);
+    __ nop();
 
-  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
-    __ push(result_register());  // Value.
-    __ push(rsi);  // Context.
-    __ Push(var->name());
-    __ CallRuntime(Runtime::kStoreContextSlot, 3);
-    Apply(context, rax);
-
-  } else if (var->slot() != NULL) {
+  } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
+    // Perform the assignment for non-const variables and for initialization
+    // of const variables.  Const assignments are simply skipped.
+    Label done;
+    Slot* slot = var->slot();
     switch (slot->type()) {
-      case Slot::LOCAL:
       case Slot::PARAMETER:
-        __ movq(Operand(rbp, SlotOffset(slot)), result_register());
+      case Slot::LOCAL:
+        if (op == Token::INIT_CONST) {
+          // Detect const reinitialization by checking for the hole value.
+          __ movq(rdx, Operand(rbp, SlotOffset(slot)));
+          __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+          __ j(not_equal, &done);
+        }
+        // Perform the assignment.
+        __ movq(Operand(rbp, SlotOffset(slot)), rax);
         break;
 
       case Slot::CONTEXT: {
         MemOperand target = EmitSlotSearch(slot, rcx);
-        __ movq(target, result_register());
-
-        // RecordWrite may destroy all its register arguments.
-        __ movq(rdx, result_register());
+        if (op == Token::INIT_CONST) {
+          // Detect const reinitialization by checking for the hole value.
+          __ movq(rdx, target);
+          __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+          __ j(not_equal, &done);
+        }
+        // Perform the assignment and issue the write barrier.
+        __ movq(target, rax);
+        // The value of the assignment is in rax.  RecordWrite clobbers its
+        // register arguments.
+        __ movq(rdx, rax);
         int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
         __ RecordWrite(rcx, offset, rdx, rbx);
         break;
       }
 
       case Slot::LOOKUP:
-        UNREACHABLE();
+        // Call the runtime for the assignment.  The runtime will ignore
+        // const reinitialization.
+        __ push(rax);  // Value.
+        __ push(rsi);  // Context.
+        __ Push(var->name());
+        if (op == Token::INIT_CONST) {
+          // The runtime will ignore const redeclaration.
+          __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+        } else {
+          __ CallRuntime(Runtime::kStoreContextSlot, 3);
+        }
         break;
     }
-    Apply(context, result_register());
-
-  } else {
-    // Variables rewritten as properties are not treated as variables in
-    // assignments.
-    UNREACHABLE();
+    __ bind(&done);
   }
+
+  Apply(context, rax);
 }
 
 
@@ -1245,6 +1664,12 @@
     __ pop(result_register());
   }
 
+  __ pop(rcx);
+  if (expr->ends_initialization_block()) {
+    __ movq(rdx, Operand(rsp, 0));  // Leave receiver on the stack for later.
+  } else {
+    __ pop(rdx);
+  }
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
@@ -1255,15 +1680,14 @@
 
   // If the assignment ends an initialization block, revert to fast case.
   if (expr->ends_initialization_block()) {
+    __ pop(rdx);
     __ push(rax);  // Result of assignment, saved even if not needed.
-    // Receiver is under the key and value.
-    __ push(Operand(rsp, 2 * kPointerSize));
+    __ push(rdx);
     __ CallRuntime(Runtime::kToFastProperties, 1);
     __ pop(rax);
   }
 
-  // Receiver and key are still on stack.
-  DropAndApply(2, context_, rax);
+  Apply(context_, rax);
 }
 
 
@@ -1319,7 +1743,8 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
-  CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
   // Restore context register.
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1334,8 +1759,47 @@
   Variable* var = fun->AsVariableProxy()->AsVariable();
 
   if (var != NULL && var->is_possibly_eval()) {
-    // Call to the identifier 'eval'.
-    UNREACHABLE();
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  The we call the resolved function using the given
+    // arguments.
+    VisitForValue(fun, kStack);
+    __ PushRoot(Heap::kUndefinedValueRootIndex);  // Reserved receiver slot.
+
+    // Push the arguments.
+    ZoneList<Expression*>* args = expr->arguments();
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      VisitForValue(args->at(i), kStack);
+    }
+
+    // Push copy of the function - found below the arguments.
+    __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
+
+    // Push copy of the first argument or undefined if it doesn't exist.
+    if (arg_count > 0) {
+      __ push(Operand(rsp, arg_count * kPointerSize));
+    } else {
+      __ PushRoot(Heap::kUndefinedValueRootIndex);
+    }
+
+    // Push the receiver of the enclosing function and do runtime call.
+    __ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
+    __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+
+    // The runtime call returns a pair of values in rax (function) and
+    // rdx (receiver). Touch up the stack with the right values.
+    __ movq(Operand(rsp, (arg_count + 0) * kPointerSize), rdx);
+    __ movq(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
+
+    // Record source position for debugger.
+    SetSourcePosition(expr->position());
+    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    __ CallStub(&stub);
+    // Restore context register.
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    DropAndApply(1, context_, rax);
   } else if (var != NULL && !var->is_this() && var->is_global()) {
     // Call to a global variable.
     // Push global object as receiver for the call IC lookup.
@@ -1343,8 +1807,15 @@
     EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
   } else if (var != NULL && var->slot() != NULL &&
              var->slot()->type() == Slot::LOOKUP) {
-    // Call to a lookup slot.
-    UNREACHABLE();
+    // Call to a lookup slot (dynamically introduced variable).  Call
+    // the runtime to find the function to call (returned in rax) and
+    // the object holding it (returned in rdx).
+    __ push(context_register());
+    __ Push(var->name());
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    __ push(rax);  // Function.
+    __ push(rdx);  // Receiver.
+    EmitCallWithStub(expr);
   } else if (fun->AsProperty() != NULL) {
     // Call to an object property.
     Property* prop = fun->AsProperty();
@@ -1435,7 +1906,711 @@
 }
 
 
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (strcmp("_IsSmi", *name->ToCString()) == 0) {
+    EmitIsSmi(expr->arguments());
+  } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
+    EmitIsNonNegativeSmi(expr->arguments());
+  } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
+    EmitIsObject(expr->arguments());
+  } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
+    EmitIsUndetectableObject(expr->arguments());
+  } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
+    EmitIsFunction(expr->arguments());
+  } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
+    EmitIsArray(expr->arguments());
+  } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
+    EmitIsRegExp(expr->arguments());
+  } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
+    EmitIsConstructCall(expr->arguments());
+  } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
+    EmitObjectEquals(expr->arguments());
+  } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
+    EmitArguments(expr->arguments());
+  } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
+    EmitArgumentsLength(expr->arguments());
+  } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
+    EmitClassOf(expr->arguments());
+  } else if (strcmp("_Log", *name->ToCString()) == 0) {
+    EmitLog(expr->arguments());
+  } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
+    EmitRandomHeapNumber(expr->arguments());
+  } else if (strcmp("_SubString", *name->ToCString()) == 0) {
+    EmitSubString(expr->arguments());
+  } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
+    EmitRegExpExec(expr->arguments());
+  } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
+    EmitValueOf(expr->arguments());
+  } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
+    EmitSetValueOf(expr->arguments());
+  } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
+    EmitNumberToString(expr->arguments());
+  } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
+    EmitCharFromCode(expr->arguments());
+  } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
+    EmitFastCharCodeAt(expr->arguments());
+  } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
+    EmitStringAdd(expr->arguments());
+  } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
+    EmitStringCompare(expr->arguments());
+  } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
+    EmitMathPow(expr->arguments());
+  } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
+    EmitMathSin(expr->arguments());
+  } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
+    EmitMathCos(expr->arguments());
+  } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
+    EmitMathSqrt(expr->arguments());
+  } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
+    EmitCallFunction(expr->arguments());
+  } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
+    EmitRegExpConstructResult(expr->arguments());
+  } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
+    EmitSwapElements(expr->arguments());
+  } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
+    EmitGetFromCache(expr->arguments());
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ JumpIfSmi(rax, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  Condition positive_smi = __ CheckPositiveSmi(rax);
+  __ j(positive_smi, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ JumpIfSmi(rax, if_false);
+  __ CompareRoot(rax, Heap::kNullValueRootIndex);
+  __ j(equal, if_true);
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsUndetectable));
+  __ j(not_zero, if_false);
+  __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+  __ cmpq(rbx, Immediate(FIRST_JS_OBJECT_TYPE));
+  __ j(below, if_false);
+  __ cmpq(rbx, Immediate(LAST_JS_OBJECT_TYPE));
+  __ j(below_equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ JumpIfSmi(rax, if_false);
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsUndetectable));
+  __ j(not_zero, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ JumpIfSmi(rax, if_false);
+  __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+  __ j(equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ JumpIfSmi(rax, if_false);
+  __ CmpObjectType(rax, JS_ARRAY_TYPE, rbx);
+  __ j(equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ JumpIfSmi(rax, if_false);
+  __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
+  __ j(equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  // Get the frame pointer for the calling frame.
+  __ movq(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ SmiCompare(Operand(rax, StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(not_equal, &check_frame_marker);
+  __ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ SmiCompare(Operand(rax, StandardFrameConstants::kMarkerOffset),
+                Smi::FromInt(StackFrame::CONSTRUCT));
+  __ j(equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ pop(rbx);
+  __ cmpq(rax, rbx);
+  __ j(equal, if_true);
+  __ jmp(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in edx and the formal
+  // parameter count in eax.
+  VisitForValue(args->at(0), kAccumulator);
+  __ movq(rdx, rax);
+  __ Move(rax, Smi::FromInt(scope()->num_parameters()));
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  __ CallStub(&stub);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label exit;
+  // Get the number of formal parameters.
+  __ Move(rax, Smi::FromInt(scope()->num_parameters()));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ j(not_equal, &exit);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ movq(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  if (FLAG_debug_code) __ AbortIfNotSmi(rax);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  // If the object is a smi, we return null.
+  __ JumpIfSmi(rax, &null);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);  // Map is now in rax.
+  __ j(below, &null);
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+  __ j(equal, &function);
+
+  // Check if the constructor in the map is a function.
+  __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
+  __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+  __ j(not_equal, &non_function_constructor);
+
+  // rax now contains the constructor function. Grab the
+  // instance class name from there.
+  __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+  __ movq(rax, FieldOperand(rax, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ jmp(&done);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ Move(rax, Factory::function_class_symbol());
+  __ jmp(&done);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ Move(rax, Factory::Object_symbol());
+  __ jmp(&done);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ LoadRoot(rax, Heap::kNullValueRootIndex);
+
+  // All done.
+  __ bind(&done);
+
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+    VisitForValue(args->at(1), kStack);
+    VisitForValue(args->at(2), kStack);
+    __ CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  // Finally, we're expected to leave a value on the top of the stack.
+  __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
+
+  __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
+
+  __ bind(&slow_allocate_heapnumber);
+  // To allocate a heap number, and ensure that it is not a smi, we
+  // call the runtime function FUnaryMinus on 0, returning the double
+  // -0.0.  A new, distinct heap number is returned each time.
+  __ Push(Smi::FromInt(0));
+  __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+  __ movq(rbx, rax);
+
+  __ bind(&heapnumber_allocated);
+
+  // Return a random uint32 number in rax.
+  // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs.
+  __ PrepareCallCFunction(0);
+  __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+  // Convert 32 random bits in rax to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  __ movl(rcx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
+  __ movd(xmm1, rcx);
+  __ movd(xmm0, rax);
+  __ cvtss2sd(xmm1, xmm1);
+  __ xorpd(xmm0, xmm1);
+  __ subsd(xmm0, xmm1);
+  __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
+
+  __ movq(rax, rbx);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  SubStringStub stub;
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallStub(&stub);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  RegExpExecStub stub;
+  ASSERT(args->length() == 4);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  VisitForValue(args->at(3), kStack);
+  __ CallStub(&stub);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ JumpIfSmi(rax, &done);
+  // If the object is not a value type, return the object.
+  __ CmpObjectType(rax, JS_VALUE_TYPE, rbx);
+  __ j(not_equal, &done);
+  __ movq(rax, FieldOperand(rax, JSValue::kValueOffset));
+
+  __ bind(&done);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the runtime function.
+  ASSERT(args->length() == 2);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  __ CallRuntime(Runtime::kMath_pow, 2);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  VisitForValue(args->at(0), kStack);  // Load the object.
+  VisitForValue(args->at(1), kAccumulator);  // Load the value.
+  __ pop(rbx);  // rax = value. ebx = object.
+
+  Label done;
+  // If the object is a smi, return the value.
+  __ JumpIfSmi(rbx, &done);
+
+  // If the object is not a value type, return the value.
+  __ CmpObjectType(rbx, JS_VALUE_TYPE, rcx);
+  __ j(not_equal, &done);
+
+  // Store the value.
+  __ movq(FieldOperand(rbx, JSValue::kValueOffset), rax);
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  __ movq(rdx, rax);
+  __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
+
+  __ bind(&done);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and call the stub.
+  VisitForValue(args->at(0), kStack);
+
+  NumberToStringStub stub;
+  __ CallStub(&stub);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label slow_case, done;
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+  __ JumpIfNotSmi(rax, &slow_case);
+  __ SmiToInteger32(rcx, rax);
+  __ cmpl(rcx, Immediate(String::kMaxAsciiCharCode));
+  __ j(above, &slow_case);
+
+  __ Move(rbx, Factory::single_character_string_cache());
+  __ movq(rbx, FieldOperand(rbx,
+                            rcx,
+                            times_pointer_size,
+                            FixedArray::kHeaderSize));
+
+  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &slow_case);
+  __ movq(rax, rbx);
+  __ jmp(&done);
+
+  __ bind(&slow_case);
+  __ push(rax);
+  __ CallRuntime(Runtime::kCharFromCode, 1);
+
+  __ bind(&done);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
+  // TODO(fsc): Port the complete implementation from the classic back-end.
+  // Move the undefined value into the result register, which will
+  // trigger the slow case.
+  __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+  Apply(context_, rax);
+}
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  __ CallStub(&stub);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+
+  StringCompareStub stub;
+  __ CallStub(&stub);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::SIN);
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallStub(&stub);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the stub.
+  TranscendentalCacheStub stub(TranscendentalCache::COS);
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallStub(&stub);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime function.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_sqrt, 1);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+  ASSERT(args->length() >= 2);
+
+  int arg_count = args->length() - 2;  // For receiver and function.
+  VisitForValue(args->at(0), kStack);  // Receiver.
+  for (int i = 0; i < arg_count; i++) {
+    VisitForValue(args->at(i + 1), kStack);
+  }
+  VisitForValue(args->at(arg_count + 1), kAccumulator);  // Function.
+
+  // InvokeFunction requires function in rdi. Move it in there.
+  if (!result_register().is(rdi)) __ movq(rdi, result_register());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(rdi, count, CALL_FUNCTION);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+  Apply(context_, rax);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      Top::global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+    Apply(context_, rax);
+    return;
+  }
+
+  VisitForValue(args->at(1), kAccumulator);
+
+  Register key = rax;
+  Register cache = rbx;
+  Register tmp = rcx;
+  __ movq(cache, CodeGenerator::ContextOperand(rsi, Context::GLOBAL_INDEX));
+  __ movq(cache,
+          FieldOperand(cache, GlobalObject::kGlobalContextOffset));
+  __ movq(cache,
+          CodeGenerator::ContextOperand(
+              cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ movq(cache,
+          FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+  Label done, not_found;
+  // tmp now holds finger offset as a smi.
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ movq(tmp, FieldOperand(cache, JSFunctionResultCache::kFingerOffset));
+  SmiIndex index =
+      __ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
+  __ cmpq(key, FieldOperand(cache,
+                            index.reg,
+                            index.scale,
+                            FixedArray::kHeaderSize));
+  __ j(not_equal, &not_found);
+  __ movq(rax, FieldOperand(cache,
+                            index.reg,
+                            index.scale,
+                            FixedArray::kHeaderSize + kPointerSize));
+  __ jmp(&done);
+
+  __ bind(&not_found);
+  // Call runtime to perform the lookup.
+  __ push(cache);
+  __ push(key);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+
+  __ bind(&done);
+  Apply(context_, rax);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (name->length() > 0 && name->Get(0) == '_') {
+    Comment cmnt(masm_, "[ InlineRuntimeCall");
+    EmitInlineRuntimeCall(expr);
+    return;
+  }
+
   Comment cmnt(masm_, "[ CallRuntime");
   ZoneList<Expression*>* args = expr->arguments();
 
@@ -1468,6 +2643,46 @@
 
 void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* prop = expr->expression()->AsProperty();
+      Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+      if (prop == NULL && var == NULL) {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        Apply(context_, true);
+      } else if (var != NULL &&
+                 !var->is_global() &&
+                 var->slot() != NULL &&
+                 var->slot()->type() != Slot::LOOKUP) {
+        // Result of deleting non-global, non-dynamic variables is false.
+        // The subexpression does not have side effects.
+        Apply(context_, false);
+      } else {
+        // Property or variable reference.  Call the delete builtin with
+        // object and property name as arguments.
+        if (prop != NULL) {
+          VisitForValue(prop->obj(), kStack);
+          VisitForValue(prop->key(), kStack);
+        } else if (var->is_global()) {
+          __ push(CodeGenerator::GlobalObject());
+          __ Push(var->name());
+        } else {
+          // Non-global variable.  Call the runtime to look up the context
+          // where the variable was introduced.
+          __ push(context_register());
+          __ Push(var->name());
+          __ CallRuntime(Runtime::kLookupContext, 2);
+          __ push(rax);
+          __ Push(var->name());
+        }
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        Apply(context_, rax);
+      }
+      break;
+    }
+
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
       VisitForEffect(expr->expression());
@@ -1508,33 +2723,15 @@
 
     case Token::NOT: {
       Comment cmnt(masm_, "[ UnaryOperation (NOT)");
-      Label materialize_true, materialize_false, done;
-      // Initially assume a pure test context.  Notice that the labels are
-      // swapped.
-      Label* if_true = false_label_;
-      Label* if_false = true_label_;
-      switch (context_) {
-        case Expression::kUninitialized:
-          UNREACHABLE();
-          break;
-        case Expression::kEffect:
-          if_true = &done;
-          if_false = &done;
-          break;
-        case Expression::kValue:
-          if_true = &materialize_false;
-          if_false = &materialize_true;
-          break;
-        case Expression::kTest:
-          break;
-        case Expression::kValueTest:
-          if_false = &materialize_true;
-          break;
-        case Expression::kTestValue:
-          if_true = &materialize_false;
-          break;
-      }
+      Label materialize_true, materialize_false;
+      Label* if_true = NULL;
+      Label* if_false = NULL;
+
+      // Notice that the labels are swapped.
+      PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
+
       VisitForControl(expr->expression(), if_true, if_false);
+
       Apply(context_, if_false, if_true);  // Labels swapped.
       break;
     }
@@ -1630,6 +2827,13 @@
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
 
+  // Invalid left-hand-sides are rewritten to have a 'throw
+  // ReferenceError' as the left-hand side.
+  if (!expr->expression()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
+
   // Expression can only be a property, a global or a (parameter or local)
   // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
   enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -1650,7 +2854,7 @@
     EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
                      Expression::kValue);
     location_ = saved_location;
-  } else  {
+  } else {
     // Reserve space for result of postfix operation.
     if (expr->is_postfix() && context_ != Expression::kEffect) {
       __ Push(Smi::FromInt(0));
@@ -1735,7 +2939,9 @@
   switch (assign_type) {
     case VARIABLE:
       if (expr->is_postfix()) {
+        // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN,
                                Expression::kEffect);
         // For all contexts except kEffect: We have the result on
         // top of the stack.
@@ -1743,7 +2949,9 @@
           ApplyTOS(context_);
         }
       } else {
+        // Perform the assignment as if via '='.
         EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN,
                                context_);
       }
       break;
@@ -1765,18 +2973,19 @@
       break;
     }
     case KEYED_PROPERTY: {
+      __ pop(rcx);
+      __ pop(rdx);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       __ call(ic, RelocInfo::CODE_TARGET);
       // This nop signals to the IC that there is no inlined code at the call
       // site for it to patch.
       __ nop();
       if (expr->is_postfix()) {
-        __ Drop(2);  // Result is on the stack under the key and the receiver.
         if (context_ != Expression::kEffect) {
           ApplyTOS(context_);
         }
       } else {
-        DropAndApply(2, context_, rax);
+        Apply(context_, rax);
       }
       break;
     }
@@ -1818,36 +3027,39 @@
 }
 
 
+void FullCodeGenerator::EmitNullCompare(bool strict,
+                                        Register obj,
+                                        Register null_const,
+                                        Label* if_true,
+                                        Label* if_false,
+                                        Register scratch) {
+  __ cmpq(obj, null_const);
+  if (strict) {
+    __ j(equal, if_true);
+  } else {
+    __ j(equal, if_true);
+    __ CompareRoot(obj, Heap::kUndefinedValueRootIndex);
+    __ j(equal, if_true);
+    __ JumpIfSmi(obj, if_false);
+    // It can be an undetectable object.
+    __ movq(scratch, FieldOperand(obj, HeapObject::kMapOffset));
+    __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+             Immediate(1 << Map::kIsUndetectable));
+    __ j(not_zero, if_true);
+  }
+  __ jmp(if_false);
+}
+
+
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-  Label materialize_true, materialize_false, done;
-  // Initially assume we are in a test context.
-  Label* if_true = true_label_;
-  Label* if_false = false_label_;
-  switch (context_) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-      break;
-    case Expression::kEffect:
-      if_true = &done;
-      if_false = &done;
-      break;
-    case Expression::kValue:
-      if_true = &materialize_true;
-      if_false = &materialize_false;
-      break;
-    case Expression::kTest:
-      break;
-    case Expression::kValueTest:
-      if_true = &materialize_true;
-      break;
-    case Expression::kTestValue:
-      if_false = &materialize_false;
-      break;
-  }
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
 
   VisitForValue(expr->left(), kStack);
   switch (expr->op()) {
@@ -1877,10 +3089,24 @@
         case Token::EQ_STRICT:
           strict = true;
           // Fall through.
-        case Token::EQ:
+        case Token::EQ: {
           cc = equal;
           __ pop(rdx);
+          // If either operand is constant null we do a fast compare
+          // against null.
+          Literal* right_literal = expr->right()->AsLiteral();
+          Literal* left_literal = expr->left()->AsLiteral();
+          if (right_literal != NULL && right_literal->handle()->IsNull()) {
+            EmitNullCompare(strict, rdx, rax, if_true, if_false, rcx);
+            Apply(context_, if_true, if_false);
+            return;
+          } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
+            EmitNullCompare(strict, rax, rdx, if_true, if_false, rcx);
+            Apply(context_, if_true, if_false);
+            return;
+          }
           break;
+        }
         case Token::LT:
           cc = less;
           __ pop(rdx);
@@ -1991,3 +3217,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 88fcfd1..8766ebb 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "codegen-inl.h"
 #include "ic-inl.h"
 #include "runtime.h"
@@ -778,16 +780,16 @@
 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax     : value
+  //  -- rcx     : key
+  //  -- rdx     : receiver
   //  -- rsp[0]  : return address
-  //  -- rsp[8]  : key
-  //  -- rsp[16] : receiver
   // -----------------------------------
 
-  __ pop(rcx);
-  __ push(Operand(rsp, 1 * kPointerSize));  // receiver
-  __ push(Operand(rsp, 1 * kPointerSize));  // key
+  __ pop(rbx);
+  __ push(rdx);  // receiver
+  __ push(rcx);  // key
   __ push(rax);  // value
-  __ push(rcx);  // return address
+  __ push(rbx);  // return address
 
   // Do tail-call to runtime routine.
   ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
@@ -798,16 +800,16 @@
 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax     : value
+  //  -- rcx     : key
+  //  -- rdx     : receiver
   //  -- rsp[0]  : return address
-  //  -- rsp[8]  : key
-  //  -- rsp[16] : receiver
   // -----------------------------------
 
-  __ pop(rcx);
-  __ push(Operand(rsp, 1 * kPointerSize));  // receiver
-  __ push(Operand(rsp, 1 * kPointerSize));  // key
+  __ pop(rbx);
+  __ push(rdx);  // receiver
+  __ push(rcx);  // key
   __ push(rax);  // value
-  __ push(rcx);  // return address
+  __ push(rbx);  // return address
 
   // Do tail-call to runtime routine.
   __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
@@ -816,50 +818,46 @@
 
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : key
-  //  -- rsp[16] : receiver
+  //  -- rax     : value
+  //  -- rcx     : key
+  //  -- rdx     : receiver
+  //  -- rsp[0]  : return address
   // -----------------------------------
   Label slow, fast, array, extra, check_pixel_array;
 
-  // Get the receiver from the stack.
-  __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // 2 ~ return address, key
   // Check that the object isn't a smi.
   __ JumpIfSmi(rdx, &slow);
   // Get the map from the receiver.
-  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+  __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow);
-  // Get the key from the stack.
-  __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // 1 ~ return address
   // Check that the key is a smi.
-  __ JumpIfNotSmi(rbx, &slow);
+  __ JumpIfNotSmi(rcx, &slow);
 
-  __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
+  __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JS object.
-  __ CmpInstanceType(rcx, FIRST_JS_OBJECT_TYPE);
+  __ CmpInstanceType(rbx, FIRST_JS_OBJECT_TYPE);
   __ j(below, &slow);
 
   // Object case: Check key against length in the elements array.
   // rax: value
   // rdx: JSObject
-  // rbx: index (as a smi)
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  // rcx: index (as a smi)
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
-  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
                  Heap::kFixedArrayMapRootIndex);
   __ j(not_equal, &check_pixel_array);
   // Untag the key (for checking against untagged length in the fixed array).
-  __ SmiToInteger32(rdx, rbx);
-  __ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
+  __ SmiToInteger32(rdi, rcx);
+  __ cmpl(rdi, FieldOperand(rbx, Array::kLengthOffset));
   // rax: value
-  // rcx: FixedArray
-  // rbx: index (as a smi)
+  // rbx: FixedArray
+  // rcx: index (as a smi)
   __ j(below, &fast);
 
   // Slow case: call runtime.
@@ -868,31 +866,31 @@
 
   // Check whether the elements is a pixel array.
   // rax: value
-  // rcx: elements array
-  // rbx: index (as a smi), zero-extended.
+  // rdx: receiver
+  // rbx: receiver's elements array
+  // rcx: index (as a smi), zero-extended.
   __ bind(&check_pixel_array);
-  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
                  Heap::kPixelArrayMapRootIndex);
   __ j(not_equal, &slow);
   // Check that the value is a smi. If a conversion is needed call into the
   // runtime to convert and clamp.
   __ JumpIfNotSmi(rax, &slow);
-  __ SmiToInteger32(rbx, rbx);
-  __ cmpl(rbx, FieldOperand(rcx, PixelArray::kLengthOffset));
+  __ SmiToInteger32(rdi, rcx);
+  __ cmpl(rdi, FieldOperand(rbx, PixelArray::kLengthOffset));
   __ j(above_equal, &slow);
-  __ movq(rdx, rax);  // Save the value.
-  __ SmiToInteger32(rax, rax);
+  // No more bailouts to slow case on this path, so key not needed.
+  __ SmiToInteger32(rcx, rax);
   {  // Clamp the value to [0..255].
     Label done;
-    __ testl(rax, Immediate(0xFFFFFF00));
+    __ testl(rcx, Immediate(0xFFFFFF00));
     __ j(zero, &done);
-    __ setcc(negative, rax);  // 1 if negative, 0 if positive.
-    __ decb(rax);  // 0 if negative, 255 if positive.
+    __ setcc(negative, rcx);  // 1 if negative, 0 if positive.
+    __ decb(rcx);  // 0 if negative, 255 if positive.
     __ bind(&done);
   }
-  __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
-  __ movb(Operand(rcx, rbx, times_1, 0), rax);
-  __ movq(rax, rdx);  // Return the original value.
+  __ movq(rbx, FieldOperand(rbx, PixelArray::kExternalPointerOffset));
+  __ movb(Operand(rbx, rdi, times_1, 0), rcx);
   __ ret(0);
 
   // Extra capacity case: Check if there is extra capacity to
@@ -900,18 +898,17 @@
   // element to the array by writing to array[array.length].
   __ bind(&extra);
   // rax: value
-  // rdx: JSArray
-  // rcx: FixedArray
-  // rbx: index (as a smi)
+  // rdx: receiver (a JSArray)
+  // rbx: receiver's elements array (a FixedArray)
+  // rcx: index (as a smi)
   // flags: smicompare (rdx.length(), rbx)
   __ j(not_equal, &slow);  // do not leave holes in the array
-  __ SmiToInteger64(rbx, rbx);
-  __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ SmiToInteger64(rdi, rcx);
+  __ cmpl(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
   __ j(above_equal, &slow);
   // Increment and restore smi-tag.
-  __ Integer64PlusConstantToSmi(rbx, rbx, 1);
-  __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
-  __ SmiSubConstant(rbx, rbx, Smi::FromInt(1));
+  __ Integer64PlusConstantToSmi(rdi, rdi, 1);
+  __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
   __ jmp(&fast);
 
   // Array case: Get the length and the elements array from the JS
@@ -919,39 +916,39 @@
   // length is always a smi.
   __ bind(&array);
   // rax: value
-  // rdx: JSArray
-  // rbx: index (as a smi)
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+  // rdx: receiver (a JSArray)
+  // rcx: index (as a smi)
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
                  Heap::kFixedArrayMapRootIndex);
   __ j(not_equal, &slow);
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
-  __ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
+  __ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
   __ j(below_equal, &extra);
 
   // Fast case: Do the store.
   __ bind(&fast);
   // rax: value
-  // rcx: FixedArray
-  // rbx: index (as a smi)
+  // rbx: receiver's elements array (a FixedArray)
+  // rcx: index (as a smi)
   Label non_smi_value;
   __ JumpIfNotSmi(rax, &non_smi_value);
-  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
-  __ movq(Operand(rcx, index.reg, index.scale,
+  SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
+  __ movq(Operand(rbx, index.reg, index.scale,
                   FixedArray::kHeaderSize - kHeapObjectTag),
           rax);
   __ ret(0);
   __ bind(&non_smi_value);
-  // Slow case that needs to retain rbx for use by RecordWrite.
+  // Slow case that needs to retain rcx for use by RecordWrite.
   // Update write barrier for the elements array address.
-  SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rbx, kPointerSizeLog2);
-  __ movq(Operand(rcx, index2.reg, index2.scale,
+  SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rcx, kPointerSizeLog2);
+  __ movq(Operand(rbx, index2.reg, index2.scale,
                   FixedArray::kHeaderSize - kHeapObjectTag),
           rax);
   __ movq(rdx, rax);
-  __ RecordWriteNonSmi(rcx, 0, rdx, rbx);
+  __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
   __ ret(0);
 }
 
@@ -959,102 +956,103 @@
 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
                                          ExternalArrayType array_type) {
   // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rsp[0] : return address
-  //  -- rsp[8] : key
-  //  -- rsp[16] : receiver
+  //  -- rax     : value
+  //  -- rcx     : key
+  //  -- rdx     : receiver
+  //  -- rsp[0]  : return address
   // -----------------------------------
   Label slow, check_heap_number;
 
-  // Get the receiver from the stack.
-  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
   // Check that the object isn't a smi.
   __ JumpIfSmi(rdx, &slow);
   // Get the map from the receiver.
-  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+  __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow);
-  // Get the key from the stack.
-  __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // 1 ~ return address
   // Check that the key is a smi.
-  __ JumpIfNotSmi(rbx, &slow);
+  __ JumpIfNotSmi(rcx, &slow);
 
   // Check that the object is a JS object.
-  __ CmpInstanceType(rcx, JS_OBJECT_TYPE);
+  __ CmpInstanceType(rbx, JS_OBJECT_TYPE);
   __ j(not_equal, &slow);
 
   // Check that the elements array is the appropriate type of
   // ExternalArray.
   // rax: value
-  // rdx: JSObject
-  // rbx: index (as a smi)
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+  // rcx: key (a smi)
+  // rdx: receiver (a JSObject)
+  __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
                  Heap::RootIndexForExternalArrayType(array_type));
   __ j(not_equal, &slow);
 
   // Check that the index is in range.
-  __ SmiToInteger32(rbx, rbx);  // Untag the index.
-  __ cmpl(rbx, FieldOperand(rcx, ExternalArray::kLengthOffset));
+  __ SmiToInteger32(rdi, rcx);  // Untag the index.
+  __ cmpl(rdi, FieldOperand(rbx, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
   __ j(above_equal, &slow);
 
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
   // rax: value
-  // rcx: elements array
-  // rbx: untagged index
+  // rcx: key (a smi)
+  // rdx: receiver (a JSObject)
+  // rbx: elements array
+  // rdi: untagged key
   __ JumpIfNotSmi(rax, &check_heap_number);
-  __ movq(rdx, rax);  // Save the value.
-  __ SmiToInteger32(rax, rax);
-  __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
-  // rcx: base pointer of external storage
+  // No more branches to slow case on this path.  Key and receiver not needed.
+  __ SmiToInteger32(rdx, rax);
+  __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+  // rbx: base pointer of external storage
   switch (array_type) {
     case kExternalByteArray:
     case kExternalUnsignedByteArray:
-      __ movb(Operand(rcx, rbx, times_1, 0), rax);
+      __ movb(Operand(rbx, rdi, times_1, 0), rdx);
       break;
     case kExternalShortArray:
     case kExternalUnsignedShortArray:
-      __ movw(Operand(rcx, rbx, times_2, 0), rax);
+      __ movw(Operand(rbx, rdi, times_2, 0), rdx);
       break;
     case kExternalIntArray:
     case kExternalUnsignedIntArray:
-      __ movl(Operand(rcx, rbx, times_4, 0), rax);
+      __ movl(Operand(rbx, rdi, times_4, 0), rdx);
       break;
     case kExternalFloatArray:
       // Need to perform int-to-float conversion.
-      __ push(rax);
+      __ push(rdx);
       __ fild_s(Operand(rsp, 0));
-      __ pop(rax);
-      __ fstp_s(Operand(rcx, rbx, times_4, 0));
+      __ pop(rdx);
+      __ fstp_s(Operand(rbx, rdi, times_4, 0));
       break;
     default:
       UNREACHABLE();
       break;
   }
-  __ movq(rax, rdx);  // Return the original value.
   __ ret(0);
 
   __ bind(&check_heap_number);
-  __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rdx);
+  // rax: value
+  // rcx: key (a smi)
+  // rdx: receiver (a JSObject)
+  // rbx: elements array
+  // rdi: untagged key
+  __ CmpObjectType(rax, HEAP_NUMBER_TYPE, kScratchRegister);
   __ j(not_equal, &slow);
+  // No more branches to slow case on this path.
 
   // The WebGL specification leaves the behavior of storing NaN and
   // +/-Infinity into integer arrays basically undefined. For more
   // reproducible behavior, convert these to zero.
   __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
-  __ movq(rdx, rax);  // Save the value.
-  __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
-  // rbx: untagged index
-  // rcx: base pointer of external storage
+  __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
+  // rdi: untagged index
+  // rbx: base pointer of external storage
   // top of FPU stack: value
   if (array_type == kExternalFloatArray) {
-    __ fstp_s(Operand(rcx, rbx, times_4, 0));
-    __ movq(rax, rdx);  // Return the original value.
+    __ fstp_s(Operand(rbx, rdi, times_4, 0));
     __ ret(0);
   } else {
     // Need to perform float-to-int conversion.
@@ -1063,66 +1061,70 @@
     __ fucomi(0);
     __ j(parity_even, &is_nan);
 
-    __ push(rax);  // Make room on stack
+    __ push(rdx);  // Make room on the stack.  Receiver is no longer needed.
     __ fistp_d(Operand(rsp, 0));
-    __ pop(rax);
-    // rax: untagged integer value
+    __ pop(rdx);
+    // rdx: value (converted to an untagged integer)
+    // rdi: untagged index
+    // rbx: base pointer of external storage
     switch (array_type) {
       case kExternalByteArray:
       case kExternalUnsignedByteArray:
-        __ movb(Operand(rcx, rbx, times_1, 0), rax);
+        __ movb(Operand(rbx, rdi, times_1, 0), rdx);
         break;
       case kExternalShortArray:
       case kExternalUnsignedShortArray:
-        __ movw(Operand(rcx, rbx, times_2, 0), rax);
+        __ movw(Operand(rbx, rdi, times_2, 0), rdx);
         break;
       case kExternalIntArray:
       case kExternalUnsignedIntArray: {
         // We also need to explicitly check for +/-Infinity. These are
         // converted to MIN_INT, but we need to be careful not to
-        // confuse with legal uses of MIN_INT.
+        // confuse with legal uses of MIN_INT.  Since MIN_INT truncated
+        // to 8 or 16 bits is zero, we only perform this test when storing
+        // 32-bit ints.
         Label not_infinity;
         // This test would apparently detect both NaN and Infinity,
         // but we've already checked for NaN using the FPU hardware
         // above.
-        __ movzxwq(rdi, FieldOperand(rdx, HeapNumber::kValueOffset + 6));
-        __ and_(rdi, Immediate(0x7FF0));
-        __ cmpw(rdi, Immediate(0x7FF0));
+        __ movzxwq(rcx, FieldOperand(rax, HeapNumber::kValueOffset + 6));
+        __ and_(rcx, Immediate(0x7FF0));
+        __ cmpw(rcx, Immediate(0x7FF0));
         __ j(not_equal, &not_infinity);
-        __ movq(rax, Immediate(0));
+        __ movq(rdx, Immediate(0));
         __ bind(&not_infinity);
-        __ movl(Operand(rcx, rbx, times_4, 0), rax);
+        __ movl(Operand(rbx, rdi, times_4, 0), rdx);
         break;
       }
       default:
         UNREACHABLE();
         break;
     }
-    __ movq(rax, rdx);  // Return the original value.
     __ ret(0);
 
     __ bind(&is_nan);
+    // rdi: untagged index
+    // rbx: base pointer of external storage
     __ ffree();
     __ fincstp();
-    __ movq(rax, Immediate(0));
+    __ movq(rdx, Immediate(0));
     switch (array_type) {
       case kExternalByteArray:
       case kExternalUnsignedByteArray:
-        __ movb(Operand(rcx, rbx, times_1, 0), rax);
+        __ movb(Operand(rbx, rdi, times_1, 0), rdx);
         break;
       case kExternalShortArray:
       case kExternalUnsignedShortArray:
-        __ movw(Operand(rcx, rbx, times_2, 0), rax);
+        __ movw(Operand(rbx, rdi, times_2, 0), rdx);
         break;
       case kExternalIntArray:
       case kExternalUnsignedIntArray:
-        __ movl(Operand(rcx, rbx, times_4, 0), rax);
+        __ movl(Operand(rbx, rdi, times_4, 0), rdx);
         break;
       default:
         UNREACHABLE();
         break;
     }
-    __ movq(rax, rdx);  // Return the original value.
     __ ret(0);
   }
 
@@ -1619,3 +1621,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/jump-target-x64.cc b/src/x64/jump-target-x64.cc
index 9b08c1f..1208b0d 100644
--- a/src/x64/jump-target-x64.cc
+++ b/src/x64/jump-target-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "codegen-inl.h"
 #include "jump-target-inl.h"
 #include "register-allocator-inl.h"
@@ -431,3 +433,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index f9b444b..b7a6aaf 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "bootstrapper.h"
 #include "codegen-inl.h"
 #include "assembler-x64.h"
@@ -601,7 +603,7 @@
 }
 
 
-void MacroAssembler::SmiCompare(Register  dst, const Operand& src) {
+void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
   cmpq(dst, src);
 }
 
@@ -612,13 +614,7 @@
 
 
 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
-  if (src->value() == 0) {
-    // Only tagged long smi to have 32-bit representation.
-    cmpq(dst, Immediate(0));
-  } else {
-    Move(kScratchRegister, src);
-    cmpq(dst, kScratchRegister);
-  }
+  cmpl(Operand(dst, kIntSize), Immediate(src->value()));
 }
 
 
@@ -800,7 +796,7 @@
 
 void MacroAssembler::SmiSub(Register dst,
                             Register src1,
-                            Operand const& src2,
+                            const Operand& src2,
                             Label* on_not_smi_result) {
   if (on_not_smi_result == NULL) {
     // No overflow checking. Use only when it's known that
@@ -918,6 +914,13 @@
 }
 
 
+void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
+  if (constant->value() != 0) {
+    addl(Operand(dst, kIntSize), Immediate(constant->value()));
+  }
+}
+
+
 void MacroAssembler::SmiAddConstant(Register dst,
                                     Register src,
                                     Smi* constant,
@@ -1597,13 +1600,7 @@
 
 
 void MacroAssembler::Test(const Operand& src, Smi* source) {
-  intptr_t smi = reinterpret_cast<intptr_t>(source);
-  if (is_int32(smi)) {
-    testl(src, Immediate(static_cast<int32_t>(smi)));
-  } else {
-    Move(kScratchRegister, source);
-    testq(src, kScratchRegister);
-  }
+  testl(Operand(src, kIntSize), Immediate(source->value()));
 }
 
 
@@ -1730,23 +1727,21 @@
 }
 
 
-void MacroAssembler::AbortIfNotNumber(Register object, const char* msg) {
+void MacroAssembler::AbortIfNotNumber(Register object) {
   Label ok;
   Condition is_smi = CheckSmi(object);
   j(is_smi, &ok);
   Cmp(FieldOperand(object, HeapObject::kMapOffset),
       Factory::heap_number_map());
-  Assert(equal, msg);
+  Assert(equal, "Operand not a number");
   bind(&ok);
 }
 
 
-void MacroAssembler::AbortIfNotSmi(Register object, const char* msg) {
+void MacroAssembler::AbortIfNotSmi(Register object) {
   Label ok;
   Condition is_smi = CheckSmi(object);
-  j(is_smi, &ok);
-  Assert(equal, msg);
-  bind(&ok);
+  Assert(is_smi, "Operand not a smi");
 }
 
 
@@ -2766,3 +2761,5 @@
 }
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 2eeb1fd..b4f3240 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -306,6 +306,10 @@
   // No overflow testing on the result is done.
   void SmiAddConstant(Register dst, Register src, Smi* constant);
 
+  // Add an integer constant to a tagged smi, giving a tagged smi as result.
+  // No overflow testing on the result is done.
+  void SmiAddConstant(const Operand& dst, Smi* constant);
+
   // Add an integer constant to a tagged smi, giving a tagged smi as result,
   // or jumping to a label if the result cannot be represented by a smi.
   void SmiAddConstant(Register dst,
@@ -349,7 +353,7 @@
 
   void SmiSub(Register dst,
               Register src1,
-              Operand const& src2,
+              const Operand& src2,
               Label* on_not_smi_result);
 
   // Multiplies smi values and return the result as a smi,
@@ -533,10 +537,10 @@
   void FCmp();
 
   // Abort execution if argument is not a number. Used in debug code.
-  void AbortIfNotNumber(Register object, const char* msg);
+  void AbortIfNotNumber(Register object);
 
   // Abort execution if argument is not a smi. Used in debug code.
-  void AbortIfNotSmi(Register object, const char* msg);
+  void AbortIfNotSmi(Register object);
 
   // ---------------------------------------------------------------------------
   // Exception handling
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index d9b75b1..383399e 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -26,6 +26,9 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "serialize.h"
 #include "unicode.h"
 #include "log.h"
@@ -1368,3 +1371,5 @@
 #endif  // V8_INTERPRETED_REGEXP
 
 }}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/register-allocator-x64.cc b/src/x64/register-allocator-x64.cc
index cf29593..1f5467e 100644
--- a/src/x64/register-allocator-x64.cc
+++ b/src/x64/register-allocator-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "virtual-frame-inl.h"
@@ -85,3 +87,5 @@
 
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 384bca1..8b095cb 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -28,6 +28,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "ic-inl.h"
 #include "codegen-inl.h"
 #include "stub-cache.h"
@@ -428,7 +430,7 @@
                         Register holder,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         Label* miss_label) {
@@ -448,7 +450,8 @@
     }
 
     if (!optimize) {
-      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
+                     miss_label);
       return;
     }
 
@@ -461,12 +464,17 @@
     __ push(holder);
     __ push(name_);
 
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
     CompileCallLoadPropertyWithInterceptor(masm,
                                            receiver,
                                            holder,
                                            name_,
-                                           holder_obj);
+                                           interceptor_holder);
 
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
     Label interceptor_failed;
     __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
     __ j(equal, &interceptor_failed);
@@ -483,13 +491,17 @@
     __ LeaveInternalFrame();
 
     if (lookup->type() == FIELD) {
-      holder = stub_compiler->CheckPrototypes(holder_obj,
+      // We found FIELD property in prototype chain of interceptor's holder.
+      // Check that the maps from interceptor's holder to field's holder
+      // haven't changed...
+      holder = stub_compiler->CheckPrototypes(interceptor_holder,
                                               holder,
                                               lookup->holder(),
                                               scratch1,
                                               scratch2,
                                               name,
                                               miss_label);
+      // ... and retrieve a field from field's holder.
       stub_compiler->GenerateFastPropertyLoad(masm,
                                               rax,
                                               holder,
@@ -497,37 +509,47 @@
                                               lookup->GetFieldIndex());
       __ ret(0);
     } else {
+      // We found CALLBACKS property in prototype chain of interceptor's
+      // holder.
       ASSERT(lookup->type() == CALLBACKS);
       ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
       ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
+      // Prepare for tail call.  Push receiver to stack after return address.
       Label cleanup;
-      __ pop(scratch2);
+      __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(scratch2);
 
-      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+      // Check that the maps from interceptor's holder to callback's holder
+      // haven't changed.
+      holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
                                               lookup->holder(), scratch1,
                                               scratch2,
                                               name,
                                               &cleanup);
 
-      __ pop(scratch2);  // save old return address
+      // Continue tail call preparation: push remaining parameters after
+      // return address.
+      __ pop(scratch2);  // return address
       __ push(holder);
       __ Move(holder, Handle<AccessorInfo>(callback));
       __ push(holder);
       __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
       __ push(name_);
-      __ push(scratch2);  // restore old return address
+      __ push(scratch2);  // restore return address
 
+      // Tail call to runtime.
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
       __ TailCallExternalReference(ref, 5, 1);
 
+      // Clean up code: we pushed receiver after return address and
+      // need to remove it from there.
       __ bind(&cleanup);
-      __ pop(scratch1);
-      __ pop(scratch2);
+      __ pop(scratch1);  // return address
+      __ pop(scratch2);  // receiver
       __ push(scratch1);
     }
   }
@@ -537,10 +559,10 @@
                       Register receiver,
                       Register holder,
                       Register scratch,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     __ pop(scratch);  // save old return address
-    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
     __ push(scratch);  // restore old return address
 
     ExternalReference ref = ExternalReference(
@@ -702,7 +724,7 @@
                         Register receiver,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         const CallOptimization& optimization,
@@ -715,10 +737,13 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+      depth1 =
+          optimization.GetPrototypeDepthOfExpectedType(object,
+                                                       interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
-                                                              lookup->holder());
+        depth2 =
+            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                         lookup->holder());
       }
       can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
                              (depth2 != kInvalidProtoDepth);
@@ -731,24 +756,32 @@
       ReserveSpaceForFastApiCall(masm, scratch1);
     }
 
+    // Check that the maps from receiver to interceptor's holder
+    // haven't changed and thus we can invoke interceptor.
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         depth1, miss);
 
+    // Invoke an interceptor and if it provides a value,
+    // branch to |regular_invoke|.
     Label regular_invoke;
-    LoadWithInterceptor(masm, receiver, holder, holder_obj, &regular_invoke);
+    LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
+                        &regular_invoke);
 
-    // Generate code for the failed interceptor case.
+    // Interceptor returned nothing for this property.  Try to use cached
+    // constant function.
 
-    // Check the lookup is still valid.
-    stub_compiler_->CheckPrototypes(holder_obj, receiver,
+    // Check that the maps from interceptor's holder to constant function's
+    // holder haven't changed and thus we can use cached constant function.
+    stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
                                     lookup->holder(),
                                     scratch1, scratch2, name,
                                     depth2, miss);
 
+    // Invoke function.
     if (can_do_fast_api_call) {
       GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
@@ -756,12 +789,14 @@
                         JUMP_FUNCTION);
     }
 
+    // Deferred code for fast API call case---clean preallocated space.
     if (can_do_fast_api_call) {
       __ bind(&miss_cleanup);
       FreeSpaceForFastApiCall(masm, scratch1);
       __ jmp(miss_label);
     }
 
+    // Invoke a regular function.
     __ bind(&regular_invoke);
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
@@ -774,10 +809,10 @@
                       Register scratch1,
                       Register scratch2,
                       String* name,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         miss_label);
 
@@ -789,7 +824,7 @@
                              receiver,
                              holder,
                              name_,
-                             holder_obj);
+                             interceptor_holder);
 
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
@@ -2027,23 +2062,18 @@
                                                   String* name) {
   // ----------- S t a t e -------------
   //  -- rax     : value
+  //  -- rcx     : key
+  //  -- rdx     : receiver
   //  -- rsp[0]  : return address
-  //  -- rsp[8]  : key
-  //  -- rsp[16] : receiver
   // -----------------------------------
   Label miss;
 
   __ IncrementCounter(&Counters::keyed_store_field, 1);
 
-  // Get the name from the stack.
-  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
   // Check that the name has not changed.
   __ Cmp(rcx, Handle<String>(name));
   __ j(not_equal, &miss);
 
-  // Get the receiver from the stack.
-  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
-
   // Generate store field code.  Preserves receiver and name on jump to miss.
   GenerateStoreField(masm(),
                      object,
@@ -2367,3 +2397,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 1e4374b..db316bb 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -27,6 +27,8 @@
 
 #include "v8.h"
 
+#if defined(V8_TARGET_ARCH_X64)
+
 #include "codegen-inl.h"
 #include "register-allocator-inl.h"
 #include "scopes.h"
@@ -1029,6 +1031,46 @@
 #endif
 
 
+// This function assumes that the only results that could be in a_reg or b_reg
+// are a and b.  Other results can be live, but must not be in a_reg or b_reg.
+void VirtualFrame::MoveResultsToRegisters(Result* a,
+                                          Result* b,
+                                          Register a_reg,
+                                          Register b_reg) {
+  ASSERT(!a_reg.is(b_reg));
+  // Assert that cgen()->allocator()->count(a_reg) is accounted for by a and b.
+  ASSERT(cgen()->allocator()->count(a_reg) <= 2);
+  ASSERT(cgen()->allocator()->count(a_reg) != 2 || a->reg().is(a_reg));
+  ASSERT(cgen()->allocator()->count(a_reg) != 2 || b->reg().is(a_reg));
+  ASSERT(cgen()->allocator()->count(a_reg) != 1 ||
+         (a->is_register() && a->reg().is(a_reg)) ||
+         (b->is_register() && b->reg().is(a_reg)));
+  // Assert that cgen()->allocator()->count(b_reg) is accounted for by a and b.
+  ASSERT(cgen()->allocator()->count(b_reg) <= 2);
+  ASSERT(cgen()->allocator()->count(b_reg) != 2 || a->reg().is(b_reg));
+  ASSERT(cgen()->allocator()->count(b_reg) != 2 || b->reg().is(b_reg));
+  ASSERT(cgen()->allocator()->count(b_reg) != 1 ||
+         (a->is_register() && a->reg().is(b_reg)) ||
+         (b->is_register() && b->reg().is(b_reg)));
+
+  if (a->is_register() && a->reg().is(a_reg)) {
+    b->ToRegister(b_reg);
+  } else if (!cgen()->allocator()->is_used(a_reg)) {
+    a->ToRegister(a_reg);
+    b->ToRegister(b_reg);
+  } else if (cgen()->allocator()->is_used(b_reg)) {
+    // a must be in b_reg, b in a_reg.
+    __ xchg(a_reg, b_reg);
+    // Results a and b will be invalidated, so it is ok if they are switched.
+  } else {
+    b->ToRegister(b_reg);
+    a->ToRegister(a_reg);
+  }
+  a->Unuse();
+  b->Unuse();
+}
+
+
 Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
   // Name and receiver are on the top of the frame.  The IC expects
   // name in rcx and receiver on the stack.  It does not drop the
@@ -1051,15 +1093,52 @@
 }
 
 
-Result VirtualFrame::CallKeyedStoreIC() {
-  // Value, key, and receiver are on the top of the frame.  The IC
-  // expects value in rax and key and receiver on the stack.  It does
-  // not drop the key and receiver.
-  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-  Result value = Pop();
-  PrepareForCall(2, 0);  // Two stack args, neither callee-dropped.
-  value.ToRegister(rax);
-  value.Unuse();
+Result VirtualFrame::CallCommonStoreIC(Handle<Code> ic,
+                                       Result* value,
+                                       Result* key,
+                                       Result* receiver) {
+  // The IC expects value in rax, key in rcx, and receiver in rdx.
+  PrepareForCall(0, 0);
+  // If one of the three registers is free, or a value is already
+  // in the correct register, move the remaining two values using
+  // MoveResultsToRegisters().
+  if (!cgen()->allocator()->is_used(rax) ||
+      (value->is_register() && value->reg().is(rax))) {
+    if (!cgen()->allocator()->is_used(rax)) {
+      value->ToRegister(rax);
+    }
+    MoveResultsToRegisters(key, receiver, rcx, rdx);
+    value->Unuse();
+  } else if (!cgen()->allocator()->is_used(rcx) ||
+             (key->is_register() && key->reg().is(rcx))) {
+    if (!cgen()->allocator()->is_used(rcx)) {
+      key->ToRegister(rcx);
+    }
+    MoveResultsToRegisters(value, receiver, rax, rdx);
+    key->Unuse();
+  } else if (!cgen()->allocator()->is_used(rdx) ||
+             (receiver->is_register() && receiver->reg().is(rdx))) {
+    if (!cgen()->allocator()->is_used(rdx)) {
+      receiver->ToRegister(rdx);
+    }
+    MoveResultsToRegisters(key, value, rcx, rax);
+    receiver->Unuse();
+  } else {
+    // Otherwise, no register is free, and no value is in the correct place.
+    // We have one of the two circular permutations of eax, ecx, edx.
+    ASSERT(value->is_register());
+    if (value->reg().is(rcx)) {
+      __ xchg(rax, rdx);
+      __ xchg(rax, rcx);
+    } else {
+      __ xchg(rax, rcx);
+      __ xchg(rax, rdx);
+    }
+    value->Unuse();
+    key->Unuse();
+    receiver->Unuse();
+  }
+
   return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
 }
 
@@ -1106,51 +1185,6 @@
 }
 
 
-Result VirtualFrame::CallStoreIC() {
-  // Name, value, and receiver are on top of the frame.  The IC
-  // expects name in rcx, value in rax, and receiver in edx.
-  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-  Result name = Pop();
-  Result value = Pop();
-  Result receiver = Pop();
-  PrepareForCall(0, 0);
-
-  // Optimized for case in which name is a constant value.
-  if (name.is_register() && (name.reg().is(rdx) || name.reg().is(rax))) {
-    if (!is_used(rcx)) {
-      name.ToRegister(rcx);
-    } else if (!is_used(rbx)) {
-      name.ToRegister(rbx);
-    } else {
-      ASSERT(!is_used(rdi));  // Only three results are live, so rdi is free.
-      name.ToRegister(rdi);
-    }
-  }
-  // Now name is not in edx or eax, so we can fix them, then move name to ecx.
-  if (value.is_register() && value.reg().is(rdx)) {
-    if (receiver.is_register() && receiver.reg().is(rax)) {
-      // Wrong registers.
-      __ xchg(rax, rdx);
-    } else {
-      // Register rax is free for value, which frees rcx for receiver.
-      value.ToRegister(rax);
-      receiver.ToRegister(rdx);
-    }
-  } else {
-    // Register rcx is free for receiver, which guarantees rax is free for
-    // value.
-    receiver.ToRegister(rdx);
-    value.ToRegister(rax);
-  }
-  // Receiver and value are in the right place, so rcx is free for name.
-  name.ToRegister(rcx);
-  name.Unuse();
-  value.Unuse();
-  receiver.Unuse();
-  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
 void VirtualFrame::PushTryHandler(HandlerType type) {
   ASSERT(cgen()->HasValidEntryRegisters());
   // Grow the expression stack by handler size less one (the return
@@ -1163,3 +1197,5 @@
 #undef __
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 529f47a..1c9751b 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -145,6 +145,9 @@
   // (ie, they all have frame-external references).
   Register SpillAnyRegister();
 
+  // Spill the top element of the frame to memory.
+  void SpillTop() { SpillElementAt(element_count() - 1); }
+
   // Sync the range of elements in [begin, end] with memory.
   void SyncRange(int begin, int end);
 
@@ -333,13 +336,33 @@
   // frame.  They are not dropped.
   Result CallKeyedLoadIC(RelocInfo::Mode mode);
 
-  // Call store IC.  Name, value, and receiver are found on top of the
-  // frame.  Receiver is not dropped.
-  Result CallStoreIC();
+
+  // Calling a store IC and a keyed store IC differ only by which ic is called
+  // and by the order of the three arguments on the frame.
+  Result CallCommonStoreIC(Handle<Code> ic,
+                           Result* value,
+                           Result *key,
+                           Result* receiver);
+
+  // Call store IC.  Name, value, and receiver are found on top
+  // of the frame.  All are dropped.
+  Result CallStoreIC() {
+    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+    Result name = Pop();
+    Result value = Pop();
+    Result receiver = Pop();
+    return CallCommonStoreIC(ic, &value, &name, &receiver);
+  }
 
   // Call keyed store IC.  Value, key, and receiver are found on top
-  // of the frame.  Key and receiver are not dropped.
-  Result CallKeyedStoreIC();
+  // of the frame.  All are dropped.
+  Result CallKeyedStoreIC() {
+    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+    Result value = Pop();
+    Result key = Pop();
+    Result receiver = Pop();
+    return CallCommonStoreIC(ic, &value, &key, &receiver);
+  }
 
   // Call call IC.  Function name, arguments, and receiver are found on top
   // of the frame and dropped by the call.
@@ -548,6 +571,14 @@
   // Register counts are correctly updated.
   int InvalidateFrameSlotAt(int index);
 
+  // This function assumes that a and b are the only results that could be in
+  // the registers a_reg or b_reg.  Other results can be live, but must not
+  //  be in the registers a_reg or b_reg.  The results a and b are invalidated.
+  void MoveResultsToRegisters(Result* a,
+                              Result* b,
+                              Register a_reg,
+                              Register b_reg);
+
   // Call a code stub that has already been prepared for calling (via
   // PrepareForCall).
   Result RawCallStub(CodeStub* stub);
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index 2cf0b12..876c104 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -71,6 +71,7 @@
     'test-strings.cc',
     'test-threads.cc',
     'test-thread-termination.cc',
+    'test-unbound-queue.cc',
     'test-utils.cc',
     'test-version.cc'
   ],
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index ea9e6e1..46eaccd 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -76,6 +76,11 @@
 }
 
 
+static void ExpectTrue(const char* code) {
+  ExpectBoolean(code, true);
+}
+
+
 static void ExpectObject(const char* code, Local<Value> expected) {
   Local<Value> result = CompileRun(code);
   CHECK(result->Equals(expected));
@@ -2506,7 +2511,7 @@
 
   // Uses getOwnPropertyDescriptor to check the configurable status
   Local<Script> script_desc
-    = Script::Compile(v8_str("var prop =Object.getOwnPropertyDescriptor( "
+    = Script::Compile(v8_str("var prop = Object.getOwnPropertyDescriptor( "
                              "obj, 'x');"
                              "prop.configurable;"));
   Local<Value> result = script_desc->Run();
@@ -2592,8 +2597,167 @@
 }
 
 
+static v8::Handle<v8::Object> GetGlobalProperty(LocalContext* context,
+                                                char const* name) {
+  return v8::Handle<v8::Object>::Cast((*context)->Global()->Get(v8_str(name)));
+}
 
 
+THREADED_TEST(DefineAPIAccessorOnObject) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  LocalContext context;
+
+  context->Global()->Set(v8_str("obj1"), templ->NewInstance());
+  CompileRun("var obj2 = {};");
+
+  CHECK(CompileRun("obj1.x")->IsUndefined());
+  CHECK(CompileRun("obj2.x")->IsUndefined());
+
+  CHECK(GetGlobalProperty(&context, "obj1")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+
+  ExpectString("obj1.x", "x");
+  CHECK(CompileRun("obj2.x")->IsUndefined());
+
+  CHECK(GetGlobalProperty(&context, "obj2")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+
+  ExpectString("obj1.x", "x");
+  ExpectString("obj2.x", "x");
+
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
+
+  CompileRun("Object.defineProperty(obj1, 'x',"
+             "{ get: function() { return 'y'; }, configurable: true })");
+
+  ExpectString("obj1.x", "y");
+  ExpectString("obj2.x", "x");
+
+  CompileRun("Object.defineProperty(obj2, 'x',"
+             "{ get: function() { return 'y'; }, configurable: true })");
+
+  ExpectString("obj1.x", "y");
+  ExpectString("obj2.x", "y");
+
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
+
+  CHECK(GetGlobalProperty(&context, "obj1")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+  CHECK(GetGlobalProperty(&context, "obj2")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+
+  ExpectString("obj1.x", "x");
+  ExpectString("obj2.x", "x");
+
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
+
+  // Define getters/setters, but now make them not configurable.
+  CompileRun("Object.defineProperty(obj1, 'x',"
+             "{ get: function() { return 'z'; }, configurable: false })");
+  CompileRun("Object.defineProperty(obj2, 'x',"
+             "{ get: function() { return 'z'; }, configurable: false })");
+
+  ExpectTrue("!Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
+  ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
+
+  ExpectString("obj1.x", "z");
+  ExpectString("obj2.x", "z");
+
+  CHECK(!GetGlobalProperty(&context, "obj1")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+  CHECK(!GetGlobalProperty(&context, "obj2")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+
+  ExpectString("obj1.x", "z");
+  ExpectString("obj2.x", "z");
+}
+
+
+THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  LocalContext context;
+
+  context->Global()->Set(v8_str("obj1"), templ->NewInstance());
+  CompileRun("var obj2 = {};");
+
+  CHECK(GetGlobalProperty(&context, "obj1")->SetAccessor(
+        v8_str("x"),
+        GetXValue, NULL,
+        v8_str("donut"), v8::DEFAULT, v8::DontDelete));
+  CHECK(GetGlobalProperty(&context, "obj2")->SetAccessor(
+        v8_str("x"),
+        GetXValue, NULL,
+        v8_str("donut"), v8::DEFAULT, v8::DontDelete));
+
+  ExpectString("obj1.x", "x");
+  ExpectString("obj2.x", "x");
+
+  ExpectTrue("!Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
+  ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
+
+  CHECK(!GetGlobalProperty(&context, "obj1")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+  CHECK(!GetGlobalProperty(&context, "obj2")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+
+  {
+    v8::TryCatch try_catch;
+    CompileRun("Object.defineProperty(obj1, 'x',"
+        "{get: function() { return 'func'; }})");
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value(try_catch.Exception());
+    CHECK_EQ(*exception_value,
+            "TypeError: Cannot redefine property: defineProperty");
+  }
+  {
+    v8::TryCatch try_catch;
+    CompileRun("Object.defineProperty(obj2, 'x',"
+        "{get: function() { return 'func'; }})");
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value(try_catch.Exception());
+    CHECK_EQ(*exception_value,
+            "TypeError: Cannot redefine property: defineProperty");
+  }
+}
+
+
+static v8::Handle<Value> Get239Value(Local<String> name,
+                                     const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  CHECK_EQ(info.Data(), v8_str("donut"));
+  CHECK_EQ(name, v8_str("239"));
+  return name;
+}
+
+
+THREADED_TEST(ElementAPIAccessor) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  LocalContext context;
+
+  context->Global()->Set(v8_str("obj1"), templ->NewInstance());
+  CompileRun("var obj2 = {};");
+
+  CHECK(GetGlobalProperty(&context, "obj1")->SetAccessor(
+        v8_str("239"),
+        Get239Value, NULL,
+        v8_str("donut")));
+  CHECK(GetGlobalProperty(&context, "obj2")->SetAccessor(
+        v8_str("239"),
+        Get239Value, NULL,
+        v8_str("donut")));
+
+  ExpectString("obj1[239]", "239");
+  ExpectString("obj2[239]", "239");
+  ExpectString("obj1['239']", "239");
+  ExpectString("obj2['239']", "239");
+}
+
 
 v8::Persistent<Value> xValue;
 
@@ -8003,8 +8167,8 @@
   // TODO(155): This test would break without the initialization of V8. This is
   // a workaround for now to make this test not fail.
   v8::V8::Initialize();
-  const char *script = "function foo(a) { return a+1; }";
-  v8::ScriptData *sd =
+  const char* script = "function foo(a) { return a+1; }";
+  v8::ScriptData* sd =
       v8::ScriptData::PreCompile(script, i::StrLength(script));
   CHECK_NE(sd->Length(), 0);
   CHECK_NE(sd->Data(), NULL);
@@ -8015,8 +8179,8 @@
 
 TEST(PreCompileWithError) {
   v8::V8::Initialize();
-  const char *script = "function foo(a) { return 1 * * 2; }";
-  v8::ScriptData *sd =
+  const char* script = "function foo(a) { return 1 * * 2; }";
+  v8::ScriptData* sd =
       v8::ScriptData::PreCompile(script, i::StrLength(script));
   CHECK(sd->HasError());
   delete sd;
@@ -8025,14 +8189,53 @@
 
 TEST(Regress31661) {
   v8::V8::Initialize();
-  const char *script = " The Definintive Guide";
-  v8::ScriptData *sd =
+  const char* script = " The Definintive Guide";
+  v8::ScriptData* sd =
       v8::ScriptData::PreCompile(script, i::StrLength(script));
   CHECK(sd->HasError());
   delete sd;
 }
 
 
+// Tests that ScriptData can be serialized and deserialized.
+TEST(PreCompileSerialization) {
+  v8::V8::Initialize();
+  const char* script = "function foo(a) { return a+1; }";
+  v8::ScriptData* sd =
+      v8::ScriptData::PreCompile(script, i::StrLength(script));
+
+  // Serialize.
+  int serialized_data_length = sd->Length();
+  char* serialized_data = i::NewArray<char>(serialized_data_length);
+  memcpy(serialized_data, sd->Data(), serialized_data_length);
+
+  // Deserialize.
+  v8::ScriptData* deserialized_sd =
+      v8::ScriptData::New(serialized_data, serialized_data_length);
+
+  // Verify that the original is the same as the deserialized.
+  CHECK_EQ(sd->Length(), deserialized_sd->Length());
+  CHECK_EQ(0, memcmp(sd->Data(), deserialized_sd->Data(), sd->Length()));
+  CHECK_EQ(sd->HasError(), deserialized_sd->HasError());
+
+  delete sd;
+  delete deserialized_sd;
+}
+
+
+// Attempts to deserialize bad data.
+TEST(PreCompileDeserializationError) {
+  v8::V8::Initialize();
+  const char* data = "DONT CARE";
+  int invalid_size = 3;
+  v8::ScriptData* sd = v8::ScriptData::New(data, invalid_size);
+
+  CHECK_EQ(0, sd->Length());
+
+  delete sd;
+}
+
+
 // This tests that we do not allow dictionary load/call inline caches
 // to use functions that have not yet been compiled.  The potential
 // problem of loading a function that has not yet been compiled can
diff --git a/test/cctest/test-circular-queue.cc b/test/cctest/test-circular-queue.cc
index 3fa49bf..ce9a42e 100644
--- a/test/cctest/test-circular-queue.cc
+++ b/test/cctest/test-circular-queue.cc
@@ -1,6 +1,6 @@
 // Copyright 2010 the V8 project authors. All rights reserved.
 //
-// Tests of circular queues.
+// Tests of the circular queue.
 
 #include "v8.h"
 #include "circular-queue-inl.h"
@@ -8,53 +8,9 @@
 
 namespace i = v8::internal;
 
-using i::CircularQueue;
 using i::SamplingCircularQueue;
 
 
-TEST(SingleRecordCircularQueue) {
-  typedef int Record;
-  CircularQueue<Record> cq(sizeof(Record) * 2);
-  CHECK(cq.IsEmpty());
-  cq.Enqueue(1);
-  CHECK(!cq.IsEmpty());
-  Record rec = 0;
-  cq.Dequeue(&rec);
-  CHECK_EQ(1, rec);
-  CHECK(cq.IsEmpty());
-}
-
-
-TEST(MultipleRecordsCircularQueue) {
-  typedef int Record;
-  const int kQueueSize = 10;
-  CircularQueue<Record> cq(sizeof(Record) * (kQueueSize + 1));
-  CHECK(cq.IsEmpty());
-  cq.Enqueue(1);
-  CHECK(!cq.IsEmpty());
-  for (int i = 2; i <= 5; ++i) {
-    cq.Enqueue(i);
-    CHECK(!cq.IsEmpty());
-  }
-  Record rec = 0;
-  for (int i = 1; i <= 4; ++i) {
-    CHECK(!cq.IsEmpty());
-    cq.Dequeue(&rec);
-    CHECK_EQ(i, rec);
-  }
-  for (int i = 6; i <= 12; ++i) {
-    cq.Enqueue(i);
-    CHECK(!cq.IsEmpty());
-  }
-  for (int i = 5; i <= 12; ++i) {
-    CHECK(!cq.IsEmpty());
-    cq.Dequeue(&rec);
-    CHECK_EQ(i, rec);
-  }
-  CHECK(cq.IsEmpty());
-}
-
-
 TEST(SamplingCircularQueue) {
   typedef SamplingCircularQueue::Cell Record;
   const int kRecordsPerChunk = 4;
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index 6133cdb..f587fc8 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -114,7 +114,8 @@
   processor.CodeMoveEvent(ToAddress(0x1400), ToAddress(0x1500));
   processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10);
   processor.CodeDeleteEvent(ToAddress(0x1600));
-  processor.FunctionCreateEvent(ToAddress(0x1700), ToAddress(0x1000));
+  processor.FunctionCreateEvent(ToAddress(0x1700), ToAddress(0x1000),
+                                CodeEntry::kNoSecurityToken);
   // Enqueue a tick event to enable code events processing.
   EnqueueTickSampleEvent(&processor, ToAddress(0x1000));
 
@@ -176,7 +177,8 @@
 
   processor.Stop();
   processor.Join();
-  CpuProfile* profile = profiles.StopProfiling("", 1);
+  CpuProfile* profile =
+      profiles.StopProfiling(CodeEntry::kNoSecurityToken, "", 1);
   CHECK_NE(NULL, profile);
 
   // Check call trees.
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index d90be8e..4b4c950 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -6196,7 +6196,28 @@
   v8::Local<v8::Context> context1 = v8::Debug::GetDebugContext();
   v8::Local<v8::Context> context2 = v8::Debug::GetDebugContext();
   CHECK_EQ(*context1, *context2);
-  // Make sure debugger is unloaded before running other tests.
-  v8::internal::ForceUnloadDebugger();
+}
+
+
+static v8::Handle<v8::Value> expected_callback_data;
+static void DebugEventContextChecker(const v8::Debug::EventDetails& details) {
+  CHECK(details.GetEventContext() == expected_context);
+  CHECK_EQ(expected_callback_data, details.GetCallbackData());
+}
+
+// Check that event details contain context where debug event occured.
+TEST(DebugEventContext) {
+  v8::HandleScope scope;
+  expected_callback_data = v8::Int32::New(2010);
+  v8::Debug::SetDebugEventListener2(DebugEventContextChecker,
+                                    expected_callback_data);
+  expected_context = v8::Context::New();
+  v8::Context::Scope context_scope(expected_context);
+  v8::Script::Compile(v8::String::New("(function(){debugger;})();"))->Run();
+  expected_context.Dispose();
+  expected_context.Clear();
+  v8::Debug::SetDebugEventListener(NULL);
+  expected_context_data = v8::Handle<v8::Value>();
   CheckDebuggerUnloaded();
 }
+
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 02e64b4..f94cd45 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -237,6 +237,7 @@
   __ cld();
   __ rep_movs();
   __ rep_stos();
+  __ stos();
 
   __ sub(edx, Operand(ebx, ecx, times_4, 10000));
   __ sub(edx, Operand(ebx));
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index 4d9d759..6da1a75 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -66,28 +66,6 @@
 }
 
 
-static void CheckRetAddrIsInFunction(const char* func_name,
-                                     Address ret_addr,
-                                     Address func_start_addr,
-                                     unsigned int func_len) {
-  printf("CheckRetAddrIsInFunction \"%s\": %p %p %p\n",
-         func_name, func_start_addr, ret_addr, func_start_addr + func_len);
-  CHECK_GE(ret_addr, func_start_addr);
-  CHECK_GE(func_start_addr + func_len, ret_addr);
-}
-
-
-static void CheckRetAddrIsInJSFunction(const char* func_name,
-                                       Address ret_addr,
-                                       Handle<JSFunction> func) {
-  v8::internal::Code* func_code = func->code();
-  CheckRetAddrIsInFunction(
-      func_name, ret_addr,
-      func_code->instruction_start(),
-      func_code->ExecutableSize());
-}
-
-
 // --- T r a c e   E x t e n s i o n ---
 
 class TraceExtension : public v8::Extension {
@@ -209,11 +187,16 @@
 }
 
 
-static void CheckRetAddrIsInJSFunction(const char* func_name,
-                                               Address ret_addr) {
-  CheckRetAddrIsInJSFunction(func_name,
-                             ret_addr,
-                             GetGlobalJSFunction(func_name));
+static void CheckObjectIsJSFunction(const char* func_name,
+                                    Address addr) {
+  i::Object* obj = reinterpret_cast<i::Object*>(addr);
+  CHECK(obj->IsJSFunction());
+  CHECK(JSFunction::cast(obj)->shared()->name()->IsString());
+  i::SmartPointer<char> found_name =
+      i::String::cast(
+          JSFunction::cast(
+              obj)->shared()->name())->ToCString();
+  CHECK_EQ(func_name, *found_name);
 }
 
 
@@ -272,6 +255,7 @@
   Handle<JSFunction> func = CompileFunction(trace_call_buf.start());
   CHECK(!func.is_null());
   i::FLAG_allow_natives_syntax = allow_natives_syntax;
+  func->shared()->set_name(*NewString(func_name));
 
 #ifdef DEBUG
   v8::internal::Code* func_code = func->code();
@@ -289,6 +273,11 @@
 // StackTracer uses Top::c_entry_fp as a starting point for stack
 // walking.
 TEST(CFromJSStackTrace) {
+  // TODO(711) The hack of replacing the inline runtime function
+  // RandomHeapNumber with GetFrameNumber does not work with the way the full
+  // compiler generates inline runtime calls.
+  i::FLAG_always_full_compiler = false;
+
   TickSample sample;
   InitTraceEnv(&sample);
 
@@ -313,10 +302,8 @@
   //           StackTracer::Trace
   CHECK_GT(sample.frames_count, 1);
   // Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
-  CheckRetAddrIsInJSFunction("JSFuncDoTrace",
-                             sample.stack[0]);
-  CheckRetAddrIsInJSFunction("JSTrace",
-                             sample.stack[1]);
+  CheckObjectIsJSFunction("JSFuncDoTrace", sample.stack[0]);
+  CheckObjectIsJSFunction("JSTrace", sample.stack[1]);
 }
 
 
@@ -326,6 +313,11 @@
 // Top::c_entry_fp value. In this case, StackTracer uses passed frame
 // pointer value as a starting point for stack walking.
 TEST(PureJSStackTrace) {
+  // TODO(711) The hack of replacing the inline runtime function
+  // RandomHeapNumber with GetFrameNumber does not work with the way the full
+  // compiler generates inline runtime calls.
+  i::FLAG_always_full_compiler = false;
+
   TickSample sample;
   InitTraceEnv(&sample);
 
@@ -359,10 +351,8 @@
            sample.function);
   CHECK_GT(sample.frames_count, 1);
   // Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
-  CheckRetAddrIsInJSFunction("JSTrace",
-                             sample.stack[0]);
-  CheckRetAddrIsInJSFunction("OuterJSTrace",
-                             sample.stack[1]);
+  CheckObjectIsJSFunction("JSTrace", sample.stack[0]);
+  CheckObjectIsJSFunction("OuterJSTrace", sample.stack[1]);
 }
 
 
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
index 8924ba7..dd97498 100755
--- a/test/cctest/test-macro-assembler-x64.cc
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -61,6 +61,7 @@
 using v8::internal::r13;
 using v8::internal::r14;
 using v8::internal::r15;
+using v8::internal::times_pointer_size;
 using v8::internal::FUNCTION_CAST;
 using v8::internal::CodeDesc;
 using v8::internal::less_equal;
@@ -75,6 +76,8 @@
 using v8::internal::Smi;
 using v8::internal::kSmiTagMask;
 using v8::internal::kSmiValueSize;
+using v8::internal::kPointerSize;
+using v8::internal::kIntSize;
 
 // Test the x64 assembler by compiling some simple functions into
 // a buffer and executing them.  These tests do not initialize the
@@ -2053,4 +2056,358 @@
 }
 
 
+TEST(OperandOffset) {
+  int data[256];
+  for (int i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
+
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  __ push(r12);
+  __ push(r13);
+  __ push(rbx);
+  __ push(rbp);
+  __ push(Immediate(0x100));  // <-- rbp
+  __ movq(rbp, rsp);
+  __ push(Immediate(0x101));
+  __ push(Immediate(0x102));
+  __ push(Immediate(0x103));
+  __ push(Immediate(0x104));
+  __ push(Immediate(0x105));  // <-- rbx
+  __ push(Immediate(0x106));
+  __ push(Immediate(0x107));
+  __ push(Immediate(0x108));
+  __ push(Immediate(0x109));  // <-- rsp
+  // rbp = rsp[9]
+  // r12 = rsp[3]
+  // rbx = rsp[5]
+  // r13 = rsp[7]
+  __ lea(r12, Operand(rsp, 3 * kPointerSize));
+  __ lea(r13, Operand(rbp, -3 * kPointerSize));
+  __ lea(rbx, Operand(rbp, -5 * kPointerSize));
+  __ movl(rcx, Immediate(2));
+  __ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE);
+  __ movl(rax, Immediate(1));
+
+  Operand sp0 = Operand(rsp, 0);
+
+  // Test 1.
+  __ movl(rdx, sp0);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x109));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Test 2.
+  // Zero to non-zero displacement.
+  __ movl(rdx, Operand(sp0, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x107));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand sp2 = Operand(rsp, 2 * kPointerSize);
+
+  // Test 3.
+  __ movl(rdx, sp2);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x107));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(sp2, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x105));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Non-zero to zero displacement.
+  __ movl(rdx, Operand(sp2, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x109));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand sp2c2 = Operand(rsp, rcx, times_pointer_size, 2 * kPointerSize);
+
+  // Test 6.
+  __ movl(rdx, sp2c2);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x105));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(sp2c2, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x103));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Non-zero to zero displacement.
+  __ movl(rdx, Operand(sp2c2, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x107));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+
+  Operand bp0 = Operand(rbp, 0);
+
+  // Test 9.
+  __ movl(rdx, bp0);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x100));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Zero to non-zero displacement.
+  __ movl(rdx, Operand(bp0, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x102));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand bp2 = Operand(rbp, -2 * kPointerSize);
+
+  // Test 11.
+  __ movl(rdx, bp2);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x102));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Non-zero to zero displacement.
+  __ movl(rdx, Operand(bp2, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x100));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bp2, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x104));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand bp2c4 = Operand(rbp, rcx, times_pointer_size, -4 * kPointerSize);
+
+  // Test 14:
+  __ movl(rdx, bp2c4);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x102));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bp2c4, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x100));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bp2c4, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x104));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand bx0 = Operand(rbx, 0);
+
+  // Test 17.
+  __ movl(rdx, bx0);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x105));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bx0, 5 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x100));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bx0, -4 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x109));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand bx2 = Operand(rbx, 2 * kPointerSize);
+
+  // Test 20.
+  __ movl(rdx, bx2);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x103));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bx2, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x101));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Non-zero to zero displacement.
+  __ movl(rdx, Operand(bx2, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x105));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand bx2c2 = Operand(rbx, rcx, times_pointer_size, -2 * kPointerSize);
+
+  // Test 23.
+  __ movl(rdx, bx2c2);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x105));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bx2c2, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x103));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bx2c2, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x107));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand r80 = Operand(r8, 0);
+
+  // Test 26.
+  __ movl(rdx, r80);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x80808080));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, -8 * kIntSize));
+  __ cmpl(rdx, Immediate(0x78787878));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, 8 * kIntSize));
+  __ cmpl(rdx, Immediate(0x88888888));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, -64 * kIntSize));
+  __ cmpl(rdx, Immediate(0x40404040));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, 64 * kIntSize));
+  __ cmpl(rdx, Immediate(0xC0C0C0C0));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand r88 = Operand(r8, 8 * kIntSize);
+
+  // Test 31.
+  __ movl(rdx, r88);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x88888888));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r88, -8 * kIntSize));
+  __ cmpl(rdx, Immediate(0x80808080));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r88, 8 * kIntSize));
+  __ cmpl(rdx, Immediate(0x90909090));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r88, -64 * kIntSize));
+  __ cmpl(rdx, Immediate(0x48484848));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r88, 64 * kIntSize));
+  __ cmpl(rdx, Immediate(0xC8C8C8C8));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+
+  Operand r864 = Operand(r8, 64 * kIntSize);
+
+  // Test 36.
+  __ movl(rdx, r864);  // Sanity check.
+  __ cmpl(rdx, Immediate(0xC0C0C0C0));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r864, -8 * kIntSize));
+  __ cmpl(rdx, Immediate(0xB8B8B8B8));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r864, 8 * kIntSize));
+  __ cmpl(rdx, Immediate(0xC8C8C8C8));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r864, -64 * kIntSize));
+  __ cmpl(rdx, Immediate(0x80808080));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r864, 32 * kIntSize));
+  __ cmpl(rdx, Immediate(0xE0E0E0E0));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // 32-bit offset to 8-bit offset.
+  __ movl(rdx, Operand(r864, -60 * kIntSize));
+  __ cmpl(rdx, Immediate(0x84848484));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r864, 60 * kIntSize));
+  __ cmpl(rdx, Immediate(0xFCFCFCFC));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Test unaligned offsets.
+
+  // Test 43.
+  __ movl(rdx, Operand(r80, 2));
+  __ cmpl(rdx, Immediate(0x81818080));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, -2));
+  __ cmpl(rdx, Immediate(0x80807F7F));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, 126));
+  __ cmpl(rdx, Immediate(0xA0A09F9F));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, -126));
+  __ cmpl(rdx, Immediate(0x61616060));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, 254));
+  __ cmpl(rdx, Immediate(0xC0C0BFBF));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, -254));
+  __ cmpl(rdx, Immediate(0x41414040));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Success.
+
+  __ movl(rax, Immediate(0));
+  __ bind(&exit);
+  __ lea(rsp, Operand(rbp, kPointerSize));
+  __ pop(rbp);
+  __ pop(rbx);
+  __ pop(r13);
+  __ pop(r12);
+  __ ret(0);
+
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+
 #undef __
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index e5850c9..b438d25 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -19,22 +19,64 @@
 using i::ProfileGenerator;
 using i::SampleRateCalculator;
 using i::TickSample;
+using i::TokenEnumerator;
 using i::Vector;
 
 
+namespace v8 {
+namespace internal {
+
+class TokenEnumeratorTester {
+ public:
+  static i::List<bool>* token_removed(TokenEnumerator* te) {
+    return &te->token_removed_;
+  }
+};
+
+} }  // namespace v8::internal
+
+TEST(TokenEnumerator) {
+  TokenEnumerator te;
+  CHECK_EQ(CodeEntry::kNoSecurityToken, te.GetTokenId(NULL));
+  v8::HandleScope hs;
+  v8::Local<v8::String> token1(v8::String::New("1"));
+  CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
+  CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
+  v8::Local<v8::String> token2(v8::String::New("2"));
+  CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
+  CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
+  CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
+  {
+    v8::HandleScope hs;
+    v8::Local<v8::String> token3(v8::String::New("3"));
+    CHECK_EQ(2, te.GetTokenId(*v8::Utils::OpenHandle(*token3)));
+    CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
+    CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
+  }
+  CHECK(!i::TokenEnumeratorTester::token_removed(&te)->at(2));
+  i::Heap::CollectAllGarbage(false);
+  CHECK(i::TokenEnumeratorTester::token_removed(&te)->at(2));
+  CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
+  CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
+}
+
+
 TEST(ProfileNodeFindOrAddChild) {
   ProfileNode node(NULL, NULL);
-  CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
+  CodeEntry entry1(
+      i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
   ProfileNode* childNode1 = node.FindOrAddChild(&entry1);
   CHECK_NE(NULL, childNode1);
   CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
-  CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
+  CodeEntry entry2(
+      i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
   ProfileNode* childNode2 = node.FindOrAddChild(&entry2);
   CHECK_NE(NULL, childNode2);
   CHECK_NE(childNode1, childNode2);
   CHECK_EQ(childNode1, node.FindOrAddChild(&entry1));
   CHECK_EQ(childNode2, node.FindOrAddChild(&entry2));
-  CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0);
+  CodeEntry entry3(
+      i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
   ProfileNode* childNode3 = node.FindOrAddChild(&entry3);
   CHECK_NE(NULL, childNode3);
   CHECK_NE(childNode1, childNode3);
@@ -75,9 +117,12 @@
 }  // namespace
 
 TEST(ProfileTreeAddPathFromStart) {
-  CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
-  CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
-  CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0);
+  CodeEntry entry1(
+      i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
+  CodeEntry entry2(
+      i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
+  CodeEntry entry3(
+      i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
   ProfileTree tree;
   ProfileTreeTestHelper helper(&tree);
   CHECK_EQ(NULL, helper.Walk(&entry1));
@@ -142,9 +187,12 @@
 
 
 TEST(ProfileTreeAddPathFromEnd) {
-  CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
-  CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
-  CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0);
+  CodeEntry entry1(
+      i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
+  CodeEntry entry2(
+      i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
+  CodeEntry entry3(
+      i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
   ProfileTree tree;
   ProfileTreeTestHelper helper(&tree);
   CHECK_EQ(NULL, helper.Walk(&entry1));
@@ -222,11 +270,30 @@
   CHECK_EQ(1, empty_tree.root()->total_ticks());
   CHECK_EQ(1, empty_tree.root()->self_ticks());
 
-  CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
-  CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
+  CodeEntry entry1(
+      i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
   CodeEntry* e1_path[] = {&entry1};
   Vector<CodeEntry*> e1_path_vec(
       e1_path, sizeof(e1_path) / sizeof(e1_path[0]));
+
+  ProfileTree single_child_tree;
+  single_child_tree.AddPathFromStart(e1_path_vec);
+  single_child_tree.root()->IncrementSelfTicks();
+  CHECK_EQ(0, single_child_tree.root()->total_ticks());
+  CHECK_EQ(1, single_child_tree.root()->self_ticks());
+  ProfileTreeTestHelper single_child_helper(&single_child_tree);
+  ProfileNode* node1 = single_child_helper.Walk(&entry1);
+  CHECK_NE(NULL, node1);
+  CHECK_EQ(0, node1->total_ticks());
+  CHECK_EQ(1, node1->self_ticks());
+  single_child_tree.CalculateTotalTicks();
+  CHECK_EQ(2, single_child_tree.root()->total_ticks());
+  CHECK_EQ(1, single_child_tree.root()->self_ticks());
+  CHECK_EQ(1, node1->total_ticks());
+  CHECK_EQ(1, node1->self_ticks());
+
+  CodeEntry entry2(
+      i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
   CodeEntry* e1_e2_path[] = {&entry1, &entry2};
   Vector<CodeEntry*> e1_e2_path_vec(
       e1_e2_path, sizeof(e1_e2_path) / sizeof(e1_e2_path[0]));
@@ -241,7 +308,7 @@
   // Results in {root,0,0} -> {entry1,0,2} -> {entry2,0,3}
   CHECK_EQ(0, flat_tree.root()->total_ticks());
   CHECK_EQ(0, flat_tree.root()->self_ticks());
-  ProfileNode* node1 = flat_helper.Walk(&entry1);
+  node1 = flat_helper.Walk(&entry1);
   CHECK_NE(NULL, node1);
   CHECK_EQ(0, node1->total_ticks());
   CHECK_EQ(2, node1->self_ticks());
@@ -261,7 +328,8 @@
   CodeEntry* e2_path[] = {&entry2};
   Vector<CodeEntry*> e2_path_vec(
       e2_path, sizeof(e2_path) / sizeof(e2_path[0]));
-  CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0);
+  CodeEntry entry3(
+      i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
   CodeEntry* e3_path[] = {&entry3};
   Vector<CodeEntry*> e3_path_vec(
       e3_path, sizeof(e3_path) / sizeof(e3_path[0]));
@@ -316,16 +384,119 @@
 }
 
 
+TEST(ProfileTreeFilteredClone) {
+  ProfileTree source_tree;
+  const int token0 = 0, token1 = 1, token2 = 2;
+  CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0, token0);
+  CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0, token1);
+  CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0, token0);
+  CodeEntry entry4(
+      i::Logger::FUNCTION_TAG, "", "ddd", "", 0,
+      CodeEntry::kInheritsSecurityToken);
+
+  {
+    CodeEntry* e1_e2_path[] = {&entry1, &entry2};
+    Vector<CodeEntry*> e1_e2_path_vec(
+        e1_e2_path, sizeof(e1_e2_path) / sizeof(e1_e2_path[0]));
+    source_tree.AddPathFromStart(e1_e2_path_vec);
+    CodeEntry* e2_e4_path[] = {&entry2, &entry4};
+    Vector<CodeEntry*> e2_e4_path_vec(
+        e2_e4_path, sizeof(e2_e4_path) / sizeof(e2_e4_path[0]));
+    source_tree.AddPathFromStart(e2_e4_path_vec);
+    CodeEntry* e3_e1_path[] = {&entry3, &entry1};
+    Vector<CodeEntry*> e3_e1_path_vec(
+        e3_e1_path, sizeof(e3_e1_path) / sizeof(e3_e1_path[0]));
+    source_tree.AddPathFromStart(e3_e1_path_vec);
+    CodeEntry* e3_e2_path[] = {&entry3, &entry2};
+    Vector<CodeEntry*> e3_e2_path_vec(
+        e3_e2_path, sizeof(e3_e2_path) / sizeof(e3_e2_path[0]));
+    source_tree.AddPathFromStart(e3_e2_path_vec);
+    source_tree.CalculateTotalTicks();
+    // Results in               -> {entry1,0,1,0} -> {entry2,1,1,1}
+    //            {root,0,4,-1} -> {entry2,0,1,1} -> {entry4,1,1,inherits}
+    //                          -> {entry3,0,2,0} -> {entry1,1,1,0}
+    //                                            -> {entry2,1,1,1}
+    CHECK_EQ(4, source_tree.root()->total_ticks());
+    CHECK_EQ(0, source_tree.root()->self_ticks());
+  }
+
+  {
+    ProfileTree token0_tree;
+    token0_tree.FilteredClone(&source_tree, token0);
+    // Should be                -> {entry1,1,1,0}
+    //            {root,1,4,-1} -> {entry3,1,2,0} -> {entry1,1,1,0}
+    // [self ticks from filtered nodes are attributed to their parents]
+    CHECK_EQ(4, token0_tree.root()->total_ticks());
+    CHECK_EQ(1, token0_tree.root()->self_ticks());
+    ProfileTreeTestHelper token0_helper(&token0_tree);
+    ProfileNode* node1 = token0_helper.Walk(&entry1);
+    CHECK_NE(NULL, node1);
+    CHECK_EQ(1, node1->total_ticks());
+    CHECK_EQ(1, node1->self_ticks());
+    CHECK_EQ(NULL, token0_helper.Walk(&entry2));
+    ProfileNode* node3 = token0_helper.Walk(&entry3);
+    CHECK_NE(NULL, node3);
+    CHECK_EQ(2, node3->total_ticks());
+    CHECK_EQ(1, node3->self_ticks());
+    ProfileNode* node3_1 = token0_helper.Walk(&entry3, &entry1);
+    CHECK_NE(NULL, node3_1);
+    CHECK_EQ(1, node3_1->total_ticks());
+    CHECK_EQ(1, node3_1->self_ticks());
+    CHECK_EQ(NULL, token0_helper.Walk(&entry3, &entry2));
+  }
+
+  {
+    ProfileTree token1_tree;
+    token1_tree.FilteredClone(&source_tree, token1);
+    // Should be
+    //            {root,1,4,-1} -> {entry2,2,3,1} -> {entry4,1,1,inherits}
+    // [child nodes referring to the same entry get merged and
+    //  their self times summed up]
+    CHECK_EQ(4, token1_tree.root()->total_ticks());
+    CHECK_EQ(1, token1_tree.root()->self_ticks());
+    ProfileTreeTestHelper token1_helper(&token1_tree);
+    CHECK_EQ(NULL, token1_helper.Walk(&entry1));
+    CHECK_EQ(NULL, token1_helper.Walk(&entry3));
+    ProfileNode* node2 = token1_helper.Walk(&entry2);
+    CHECK_NE(NULL, node2);
+    CHECK_EQ(3, node2->total_ticks());
+    CHECK_EQ(2, node2->self_ticks());
+    ProfileNode* node2_4 = token1_helper.Walk(&entry2, &entry4);
+    CHECK_NE(NULL, node2_4);
+    CHECK_EQ(1, node2_4->total_ticks());
+    CHECK_EQ(1, node2_4->self_ticks());
+  }
+
+  {
+    ProfileTree token2_tree;
+    token2_tree.FilteredClone(&source_tree, token2);
+    // Should be
+    //            {root,4,4,-1}
+    // [no nodes, all ticks get migrated into root node]
+    CHECK_EQ(4, token2_tree.root()->total_ticks());
+    CHECK_EQ(4, token2_tree.root()->self_ticks());
+    ProfileTreeTestHelper token2_helper(&token2_tree);
+    CHECK_EQ(NULL, token2_helper.Walk(&entry1));
+    CHECK_EQ(NULL, token2_helper.Walk(&entry2));
+    CHECK_EQ(NULL, token2_helper.Walk(&entry3));
+  }
+}
+
+
 static inline i::Address ToAddress(int n) {
   return reinterpret_cast<i::Address>(n);
 }
 
 TEST(CodeMapAddCode) {
   CodeMap code_map;
-  CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
-  CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
-  CodeEntry entry3(i::Logger::FUNCTION_TAG, "", "ccc", "", 0);
-  CodeEntry entry4(i::Logger::FUNCTION_TAG, "", "ddd", "", 0);
+  CodeEntry entry1(
+      i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
+  CodeEntry entry2(
+      i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
+  CodeEntry entry3(
+      i::Logger::FUNCTION_TAG, "", "ccc", "", 0, CodeEntry::kNoSecurityToken);
+  CodeEntry entry4(
+      i::Logger::FUNCTION_TAG, "", "ddd", "", 0, CodeEntry::kNoSecurityToken);
   code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
   code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
   code_map.AddCode(ToAddress(0x1900), &entry3, 0x50);
@@ -352,8 +523,10 @@
 
 TEST(CodeMapMoveAndDeleteCode) {
   CodeMap code_map;
-  CodeEntry entry1(i::Logger::FUNCTION_TAG, "", "aaa", "", 0);
-  CodeEntry entry2(i::Logger::FUNCTION_TAG, "", "bbb", "", 0);
+  CodeEntry entry1(
+      i::Logger::FUNCTION_TAG, "", "aaa", "", 0, CodeEntry::kNoSecurityToken);
+  CodeEntry entry2(
+      i::Logger::FUNCTION_TAG, "", "bbb", "", 0, CodeEntry::kNoSecurityToken);
   code_map.AddCode(ToAddress(0x1500), &entry1, 0x200);
   code_map.AddCode(ToAddress(0x1700), &entry2, 0x100);
   CHECK_EQ(&entry1, code_map.FindEntry(ToAddress(0x1500)));
@@ -425,7 +598,8 @@
   sample3.frames_count = 2;
   generator.RecordTickSample(sample3);
 
-  CpuProfile* profile = profiles.StopProfiling("", 1);
+  CpuProfile* profile =
+      profiles.StopProfiling(CodeEntry::kNoSecurityToken, "", 1);
   CHECK_NE(NULL, profile);
   ProfileTreeTestHelper top_down_test_helper(profile->top_down());
   CHECK_EQ(NULL, top_down_test_helper.Walk(entry2));
diff --git a/test/cctest/test-thread-termination.cc b/test/cctest/test-thread-termination.cc
index 83a1e19..aed7466 100644
--- a/test/cctest/test-thread-termination.cc
+++ b/test/cctest/test-thread-termination.cc
@@ -308,3 +308,48 @@
   v8::Script::Compile(source)->Run();
   context.Dispose();
 }
+
+v8::Handle<v8::Value> ReenterAfterTermination(const v8::Arguments& args) {
+  v8::TryCatch try_catch;
+  CHECK(!v8::V8::IsExecutionTerminating());
+  v8::Script::Compile(v8::String::New("function f() {"
+                                      "  var term = true;"
+                                      "  try {"
+                                      "    while(true) {"
+                                      "      if (term) terminate();"
+                                      "      term = false;"
+                                      "    }"
+                                      "    fail();"
+                                      "  } catch(e) {"
+                                      "    fail();"
+                                      "  }"
+                                      "}"
+                                      "f()"))->Run();
+  CHECK(try_catch.HasCaught());
+  CHECK(try_catch.Exception()->IsNull());
+  CHECK(try_catch.Message().IsEmpty());
+  CHECK(!try_catch.CanContinue());
+  CHECK(v8::V8::IsExecutionTerminating());
+  v8::Script::Compile(v8::String::New("function f() { fail(); } f()"))->Run();
+  return v8::Undefined();
+}
+
+// Test that reentry into V8 while the termination exception is still pending
+// (has not yet unwound the 0-level JS frame) does not crash.
+TEST(TerminateAndReenterFromThreadItself) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> global =
+      CreateGlobalTemplate(TerminateCurrentThread, ReenterAfterTermination);
+  v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+  v8::Context::Scope context_scope(context);
+  CHECK(!v8::V8::IsExecutionTerminating());
+  v8::Handle<v8::String> source =
+      v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
+  v8::Script::Compile(source)->Run();
+  CHECK(!v8::V8::IsExecutionTerminating());
+  // Check we can run JS again after termination.
+  CHECK(v8::Script::Compile(v8::String::New("function f() { return true; }"
+                                            "f()"))->Run()->IsTrue());
+  context.Dispose();
+}
+
diff --git a/test/cctest/test-unbound-queue.cc b/test/cctest/test-unbound-queue.cc
new file mode 100644
index 0000000..df5509e
--- /dev/null
+++ b/test/cctest/test-unbound-queue.cc
@@ -0,0 +1,54 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+//
+// Tests of the unbound queue.
+
+#include "v8.h"
+#include "unbound-queue-inl.h"
+#include "cctest.h"
+
+namespace i = v8::internal;
+
+using i::UnboundQueue;
+
+
+TEST(SingleRecord) {
+  typedef int Record;
+  UnboundQueue<Record> cq;
+  CHECK(cq.IsEmpty());
+  cq.Enqueue(1);
+  CHECK(!cq.IsEmpty());
+  Record rec = 0;
+  cq.Dequeue(&rec);
+  CHECK_EQ(1, rec);
+  CHECK(cq.IsEmpty());
+}
+
+
+TEST(MultipleRecords) {
+  typedef int Record;
+  UnboundQueue<Record> cq;
+  CHECK(cq.IsEmpty());
+  cq.Enqueue(1);
+  CHECK(!cq.IsEmpty());
+  for (int i = 2; i <= 5; ++i) {
+    cq.Enqueue(i);
+    CHECK(!cq.IsEmpty());
+  }
+  Record rec = 0;
+  for (int i = 1; i <= 4; ++i) {
+    CHECK(!cq.IsEmpty());
+    cq.Dequeue(&rec);
+    CHECK_EQ(i, rec);
+  }
+  for (int i = 6; i <= 12; ++i) {
+    cq.Enqueue(i);
+    CHECK(!cq.IsEmpty());
+  }
+  for (int i = 5; i <= 12; ++i) {
+    CHECK(!cq.IsEmpty());
+    cq.Dequeue(&rec);
+    CHECK_EQ(i, rec);
+  }
+  CHECK(cq.IsEmpty());
+}
+
diff --git a/test/mjsunit/compiler/assignment.js b/test/mjsunit/compiler/assignment.js
index ee2d323..6aded4e 100644
--- a/test/mjsunit/compiler/assignment.js
+++ b/test/mjsunit/compiler/assignment.js
@@ -262,3 +262,15 @@
 }
 
 bar_loop();
+
+
+// Test for assignment using a keyed store ic:
+function store_i_in_element_i_of_object_i() {
+  var i = new Object();
+  i[i] = i;
+}
+
+// Run three times to exercise caches.
+store_i_in_element_i_of_object_i();
+store_i_in_element_i_of_object_i();
+store_i_in_element_i_of_object_i();
diff --git a/test/mjsunit/object-define-property.js b/test/mjsunit/object-define-property.js
index 43b1c7f..46bfb34 100644
--- a/test/mjsunit/object-define-property.js
+++ b/test/mjsunit/object-define-property.js
@@ -53,36 +53,46 @@
   assertTrue(/called on non-object/.test(e));
 }
 
-// Object
+// Object.
 var obj1 = {};
 
-// Values
+// Values.
 var val1 = 0;
 var val2 = 0;
 var val3 = 0;
 
-// Descriptors
+function setter1() {val1++; }
+function getter1() {return val1; }
+
+function setter2() {val2++; }
+function getter2() {return val2; }
+
+function setter3() {val3++; }
+function getter3() {return val3; }
+
+
+// Descriptors.
 var emptyDesc = {};
 
 var accessorConfigurable = { 
-    set: function() { val1++; },
-    get: function() { return val1; },
+    set: setter1,
+    get: getter1,
     configurable: true
 };
 
 var accessorNoConfigurable = {
-    set: function() { val2++; },
-    get: function() { return val2; },
+    set: setter2,
+    get: getter2,
     configurable: false 
 };
 
 var accessorOnlySet = {
-  set: function() { val3++; },
+  set: setter3,
   configurable: true
 };
 
 var accessorOnlyGet = {
-  get: function() { return val3; },
+  get: getter3,
   configurable: true
 };
 
@@ -200,7 +210,7 @@
 assertEquals(4, val2);
 assertEquals(4, obj1.bar);
 
-// Define an accessor that has only a setter
+// Define an accessor that has only a setter.
 Object.defineProperty(obj1, "setOnly", accessorOnlySet);
 desc = Object.getOwnPropertyDescriptor(obj1, "setOnly");
 assertTrue(desc.configurable);
@@ -212,7 +222,7 @@
 assertEquals(1, obj1.setOnly = 1);
 assertEquals(1, val3);
 
-// Add a getter - should not touch the setter
+// Add a getter - should not touch the setter.
 Object.defineProperty(obj1, "setOnly", accessorOnlyGet);
 desc = Object.getOwnPropertyDescriptor(obj1, "setOnly");
 assertTrue(desc.configurable);
@@ -256,7 +266,7 @@
 assertEquals(obj1.foobar, 1000);
 
 
-// Redefine to writable descriptor - now writing to foobar should be allowed
+// Redefine to writable descriptor - now writing to foobar should be allowed.
 Object.defineProperty(obj1, "foobar", dataWritable);
 desc = Object.getOwnPropertyDescriptor(obj1, "foobar");
 assertEquals(obj1.foobar, 3000);
@@ -279,7 +289,7 @@
 assertEquals(obj1.foobar, 2000);
 assertEquals(desc.value, 2000);
 assertFalse(desc.configurable);
-assertFalse(desc.writable);
+assertTrue(desc.writable);
 assertFalse(desc.enumerable);
 assertEquals(desc.get, undefined);
 assertEquals(desc.set, undefined);
@@ -307,7 +317,7 @@
 assertEquals(obj1.foobar, 2000);
 assertEquals(desc.value, 2000);
 assertFalse(desc.configurable);
-assertFalse(desc.writable);
+assertTrue(desc.writable);
 assertFalse(desc.enumerable);
 assertEquals(desc.get, undefined);
 assertEquals(desc.set, undefined);
@@ -375,7 +385,7 @@
 
 
 // Redefinition of an accessor defined using __defineGetter__ and 
-// __defineSetter__
+// __defineSetter__.
 function get(){return this.x}
 function set(x){this.x=x};
 
@@ -442,7 +452,7 @@
 assertEquals(5, val1);
 assertEquals(5, obj4.bar);
 
-// Make sure an error is thrown when trying to access to redefined function
+// Make sure an error is thrown when trying to access to redefined function.
 try {
   obj4.bar();
   assertTrue(false);
@@ -453,7 +463,7 @@
 
 // Test runtime calls to DefineOrRedefineDataProperty and
 // DefineOrRedefineAccessorProperty - make sure we don't 
-// crash
+// crash.
 try {
   %DefineOrRedefineAccessorProperty(0, 0, 0, 0, 0);
 } catch (e) {
@@ -497,3 +507,210 @@
 } catch (e) {
   assertTrue(/illegal access/.test(e));
 }
+
+// Test that all possible differences in step 6 in DefineOwnProperty are
+// exercised, i.e., any difference in the given property descriptor and the
+// existing properties should not return true, but throw an error if the
+// existing configurable property is false. 
+
+var obj5 = {};
+// Enumerable will default to false.
+Object.defineProperty(obj5, 'foo', accessorNoConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj5, 'foo');
+// First, test that we are actually allowed to set the accessor if all
+// values are of the descriptor are the same as the existing one.
+Object.defineProperty(obj5, 'foo', accessorNoConfigurable);
+
+// Different setter.
+var descDifferent = {
+  configurable:false,
+  enumerable:false,
+  set: setter1,
+  get: getter2
+};
+
+try {
+  Object.defineProperty(obj5, 'foo', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Different getter.
+descDifferent = {
+  configurable:false,
+  enumerable:false,
+  set: setter2,
+  get: getter1
+};
+
+try {
+  Object.defineProperty(obj5, 'foo', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Different enumerable.
+descDifferent = {
+  configurable:false,
+  enumerable:true,
+  set: setter2,
+  get: getter2
+};
+
+try {
+  Object.defineProperty(obj5, 'foo', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Different configurable.
+descDifferent = {
+  configurable:false,
+  enumerable:true,
+  set: setter2,
+  get: getter2
+};
+
+try {
+  Object.defineProperty(obj5, 'foo', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// No difference.
+descDifferent = {
+  configurable:false,
+  enumerable:false,
+  set: setter2,
+  get: getter2
+};
+// Make sure we can still redefine if all properties are the same.
+Object.defineProperty(obj5, 'foo', descDifferent);
+
+// Make sure that obj5 still holds the original values.
+desc = Object.getOwnPropertyDescriptor(obj5, 'foo');
+assertEquals(desc.get, getter2);
+assertEquals(desc.set, setter2);
+assertFalse(desc.enumerable);
+assertFalse(desc.configurable);
+
+
+// Also exercise step 6 on data property, writable and enumerable
+// defaults to false.
+Object.defineProperty(obj5, 'bar', dataNoConfigurable);
+
+// Test that redefinition with the same property descriptor is possible
+Object.defineProperty(obj5, 'bar', dataNoConfigurable);
+
+// Different value.
+descDifferent = {
+  configurable:false,
+  enumerable:false,
+  writable: false,
+  value: 1999
+};
+
+try {
+  Object.defineProperty(obj5, 'bar', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Different writable.
+descDifferent = {
+  configurable:false,
+  enumerable:false,
+  writable: true,
+  value: 2000
+};
+
+try {
+  Object.defineProperty(obj5, 'bar', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+
+// Different enumerable.
+descDifferent = {
+  configurable:false,
+  enumerable:true ,
+  writable:false,
+  value: 2000
+};
+
+try {
+  Object.defineProperty(obj5, 'bar', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+
+// Different configurable.
+descDifferent = {
+  configurable:true,
+  enumerable:false,
+  writable:false,
+  value: 2000
+};
+
+try {
+  Object.defineProperty(obj5, 'bar', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// No difference.
+descDifferent = {
+  configurable:false,
+  enumerable:false,
+  writable:false,
+  value:2000
+};
+// Make sure we can still redefine if all properties are the same.
+Object.defineProperty(obj5, 'bar', descDifferent);
+
+// Make sure that obj5 still holds the original values.
+desc = Object.getOwnPropertyDescriptor(obj5, 'bar');
+assertEquals(desc.value, 2000);
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertFalse(desc.configurable);
+
+
+// Make sure that we can't overwrite +0 with -0 and vice versa.
+var descMinusZero = {value: -0, configurable: false};
+var descPlusZero = {value: +0, configurable: false};
+
+Object.defineProperty(obj5, 'minuszero', descMinusZero);
+
+// Make sure we can redefine with -0.
+Object.defineProperty(obj5, 'minuszero', descMinusZero);
+
+try {
+  Object.defineProperty(obj5, 'minuszero', descPlusZero);
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+
+Object.defineProperty(obj5, 'pluszero', descPlusZero);
+
+// Make sure we can redefine with +0.
+Object.defineProperty(obj5, 'pluszero', descPlusZero);
+
+try {
+  Object.defineProperty(obj5, 'pluszero', descMinusZero);
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
diff --git a/test/mjsunit/regress/regress-712.js b/test/mjsunit/regress/regress-712.js
new file mode 100644
index 0000000..b26b94a
--- /dev/null
+++ b/test/mjsunit/regress/regress-712.js
@@ -0,0 +1,38 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This regression test is used to ensure that Object.defineProperty
+// can't be called with an empty property descriptor on a non-configurable
+// existing property and override the existing property.
+// See: http://code.google.com/p/v8/issues/detail?id=712
+
+var obj = {};
+Object.defineProperty(obj, "x", { get: function() { return "42"; },
+                                  configurable: false });
+assertEquals(obj.x, "42");
+Object.defineProperty(obj, 'x', {});
+assertEquals(obj.x, "42");
diff --git a/test/mjsunit/regress/regress-720.js b/test/mjsunit/regress/regress-720.js
new file mode 100644
index 0000000..97e1284
--- /dev/null
+++ b/test/mjsunit/regress/regress-720.js
@@ -0,0 +1,36 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This regression test is used to ensure that Object.defineProperty
+// keeps the existing value of the writable flag if none is given
+// in the provided descriptor. 
+// See: http://code.google.com/p/v8/issues/detail?id=720
+
+var o = {x: 10};
+Object.defineProperty(o, "x", {value: 5});
+var desc = Object.getOwnPropertyDescriptor(o, "x");
+assertTrue(desc["writable"]);
diff --git a/test/mjsunit/samevalue.js b/test/mjsunit/samevalue.js
new file mode 100644
index 0000000..6cb35e6
--- /dev/null
+++ b/test/mjsunit/samevalue.js
@@ -0,0 +1,102 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Flags: --expose-natives_as natives
+// Test the SameValue internal method.
+
+var obj1 = {x: 10, y: 11, z: "test"};
+var obj2 = {x: 10, y: 11, z: "test"};
+
+assertTrue(natives.SameValue(0, 0));
+assertTrue(natives.SameValue(+0, +0));
+assertTrue(natives.SameValue(-0, -0));
+assertTrue(natives.SameValue(1, 1));
+assertTrue(natives.SameValue(2, 2));
+assertTrue(natives.SameValue(-1, -1));
+assertTrue(natives.SameValue(0.5, 0.5));
+assertTrue(natives.SameValue(true, true));
+assertTrue(natives.SameValue(false, false));
+assertTrue(natives.SameValue(NaN, NaN));
+assertTrue(natives.SameValue(null, null));
+assertTrue(natives.SameValue("foo", "foo"));
+assertTrue(natives.SameValue(obj1, obj1));
+// Undefined values.
+assertTrue(natives.SameValue());
+assertTrue(natives.SameValue(undefined, undefined));
+
+assertFalse(natives.SameValue(0,1));
+assertFalse(natives.SameValue("foo", "bar"));
+assertFalse(natives.SameValue(obj1, obj2));
+assertFalse(natives.SameValue(true, false));
+
+assertFalse(natives.SameValue(obj1, true));
+assertFalse(natives.SameValue(obj1, "foo"));
+assertFalse(natives.SameValue(obj1, 1));
+assertFalse(natives.SameValue(obj1, undefined));
+assertFalse(natives.SameValue(obj1, NaN));
+
+assertFalse(natives.SameValue(undefined, true));
+assertFalse(natives.SameValue(undefined, "foo"));
+assertFalse(natives.SameValue(undefined, 1));
+assertFalse(natives.SameValue(undefined, obj1));
+assertFalse(natives.SameValue(undefined, NaN));
+
+assertFalse(natives.SameValue(NaN, true));
+assertFalse(natives.SameValue(NaN, "foo"));
+assertFalse(natives.SameValue(NaN, 1));
+assertFalse(natives.SameValue(NaN, obj1));
+assertFalse(natives.SameValue(NaN, undefined));
+
+assertFalse(natives.SameValue("foo", true));
+assertFalse(natives.SameValue("foo", 1));
+assertFalse(natives.SameValue("foo", obj1));
+assertFalse(natives.SameValue("foo", undefined));
+assertFalse(natives.SameValue("foo", NaN));
+
+assertFalse(natives.SameValue(true, 1));
+assertFalse(natives.SameValue(true, obj1));
+assertFalse(natives.SameValue(true, undefined));
+assertFalse(natives.SameValue(true, NaN));
+assertFalse(natives.SameValue(true, "foo"));
+
+assertFalse(natives.SameValue(1, true));
+assertFalse(natives.SameValue(1, obj1));
+assertFalse(natives.SameValue(1, undefined));
+assertFalse(natives.SameValue(1, NaN));
+assertFalse(natives.SameValue(1, "foo"));
+
+// Special string cases.
+assertFalse(natives.SameValue("1", 1));
+assertFalse(natives.SameValue("true", true));
+assertFalse(natives.SameValue("false", false));
+assertFalse(natives.SameValue("undefined", undefined));
+assertFalse(natives.SameValue("NaN", NaN));
+
+// -0 and +0 are should be different
+assertFalse(natives.SameValue(+0, -0));
+assertFalse(natives.SameValue(-0, +0));
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index c92bfa6..b4ec444 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -161,6 +161,7 @@
 # In Denmark the adjustment starts one week earlier!.
 # Tests based on shell that use dates in this gap are flaky.
 ecma/Date/15.9.5.10-1: PASS || FAIL
+ecma/Date/15.9.5.10-2: PASS || TIMEOUT if ($arch == arm && $mode == debug)
 ecma/Date/15.9.5.12-1: PASS || FAIL
 ecma/Date/15.9.5.14: PASS || FAIL
 ecma/Date/15.9.5.34-1: PASS || FAIL
diff --git a/tools/gc-nvp-trace-processor.py b/tools/gc-nvp-trace-processor.py
new file mode 100644
index 0000000..3721b01
--- /dev/null
+++ b/tools/gc-nvp-trace-processor.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#
+# This is an utility for plotting charts based on GC traces produced by V8 when
+# run with flags --trace-gc --trace-gc-nvp. Relies on gnuplot for actual
+# plotting.
+#
+# Usage: gc-nvp-trace-processor.py <GC-trace-filename>
+#
+
+
+from __future__ import with_statement
+import sys, types, re, subprocess
+
+def flatten(l):
+  flat = []
+  for i in l: flat.extend(i)
+  return flat
+
+def split_nvp(s):
+  t = {}
+  for m in re.finditer(r"(\w+)=(-?\d+)", s):
+    t[m.group(1)] = int(m.group(2))
+  return t
+
+def parse_gc_trace(input):
+  trace = []
+  with open(input) as f:
+    for line in f:
+      info = split_nvp(line)
+      if info and 'pause' in info and info['pause'] > 0:
+        info['i'] = len(trace)
+        trace.append(info)
+  return trace
+
+def extract_field_names(script):
+  fields = { 'data': true, 'in': true }
+
+  for m in re.finditer(r"$(\w+)", script):
+    field_name = m.group(1)
+    if field_name not in fields:
+      fields[field] = field_count
+      field_count = field_count + 1
+
+  return fields
+
+def gnuplot(script):
+  gnuplot = subprocess.Popen(["gnuplot"], stdin=subprocess.PIPE)
+  gnuplot.stdin.write(script)
+  gnuplot.stdin.close()
+  gnuplot.wait()
+
+x1y1 = 'x1y1'
+x1y2 = 'x1y2'
+x2y1 = 'x2y1'
+x2y2 = 'x2y2'
+
+class Item(object):
+  def __init__(self, title, field, axis = x1y1, **keywords):
+    self.title = title
+    self.axis = axis
+    self.props = keywords
+    if type(field) is types.ListType:
+      self.field = field
+    else:
+      self.field = [field]
+
+  def fieldrefs(self):
+    return self.field
+
+  def to_gnuplot(self, context):
+    args = ['"%s"' % context.datafile,
+            'using %s' % context.format_fieldref(self.field),
+            'title "%s"' % self.title,
+            'axis %s' % self.axis]
+    if 'style' in self.props:
+      args.append('with %s' % self.props['style'])
+    if 'lc' in self.props:
+      args.append('lc rgb "%s"' % self.props['lc'])
+    if 'fs' in self.props:
+      args.append('fs %s' % self.props['fs'])
+    return ' '.join(args)
+
+class Plot(object):
+  def __init__(self, *items):
+    self.items = items
+
+  def fieldrefs(self):
+    return flatten([item.fieldrefs() for item in self.items])
+
+  def to_gnuplot(self, ctx):
+    return 'plot ' + ', '.join([item.to_gnuplot(ctx) for item in self.items])
+
+class Set(object):
+  def __init__(self, value):
+    self.value = value
+
+  def to_gnuplot(self, ctx):
+    return 'set ' + self.value
+
+  def fieldrefs(self):
+    return []
+
+class Context(object):
+  def __init__(self, datafile, field_to_index):
+    self.datafile = datafile
+    self.field_to_index = field_to_index
+
+  def format_fieldref(self, fieldref):
+    return ':'.join([str(self.field_to_index[field]) for field in fieldref])
+
+def collect_fields(plot):
+  field_to_index = {}
+  fields = []
+
+  def add_field(field):
+    if field not in field_to_index:
+      fields.append(field)
+      field_to_index[field] = len(fields)
+
+  for field in flatten([item.fieldrefs() for item in plot]):
+    add_field(field)
+
+  return (fields, field_to_index)
+
+def is_y2_used(plot):
+  for subplot in plot:
+    if isinstance(subplot, Plot):
+      for item in subplot.items:
+        if item.axis == x1y2 or item.axis == x2y2:
+          return True
+  return False
+
+def get_field(trace_line, field):
+  t = type(field)
+  if t is types.StringType:
+    return trace_line[field]
+  elif t is types.FunctionType:
+    return field(trace_line)
+
+def generate_datafile(datafile_name, trace, fields):
+  with open(datafile_name, 'w') as datafile:
+    for line in trace:
+      data_line = [str(get_field(line, field)) for field in fields]
+      datafile.write('\t'.join(data_line))
+      datafile.write('\n')
+
+def generate_script_and_datafile(plot, trace, datafile, output):
+  (fields, field_to_index) = collect_fields(plot)
+  generate_datafile(datafile, trace, fields)
+  script = [
+      'set terminal png',
+      'set output "%s"' % output,
+      'set autoscale',
+      'set ytics nomirror',
+      'set xtics nomirror',
+      'set key below'
+  ]
+
+  if is_y2_used(plot):
+    script.append('set autoscale y2')
+    script.append('set y2tics')
+
+  context = Context(datafile, field_to_index)
+
+  for item in plot:
+    script.append(item.to_gnuplot(context))
+
+  return '\n'.join(script)
+
+def plot_all(plots, trace, prefix):
+  charts = []
+
+  for plot in plots:
+    outfilename = "%s_%d.png" % (prefix, len(charts))
+    charts.append(outfilename)
+    script = generate_script_and_datafile(plot, trace, '~datafile', outfilename)
+    print 'Plotting %s...' % outfilename
+    gnuplot(script)
+
+  return charts
+
+def reclaimed_bytes(row):
+  return row['total_size_before'] - row['total_size_after']
+
+plots = [
+  [
+    Set('style fill solid 0.5 noborder'),
+    Set('style histogram rowstacked'),
+    Set('style data histograms'),
+    Plot(Item('Marking', 'mark', lc = 'purple'),
+         Item('Sweep', 'sweep', lc = 'blue'),
+         Item('Compaction', 'compact', lc = 'red'),
+         Item('Other',
+              lambda r: r['pause'] - r['mark'] - r['sweep'] - r['compact'],
+              lc = 'grey'))
+  ],
+  [
+    Set('style histogram rowstacked'),
+    Set('style data histograms'),
+    Plot(Item('Heap Size (before GC)', 'total_size_before', x1y2,
+              fs = 'solid 0.4 noborder',
+              lc = 'green'),
+         Item('Total holes (after GC)', 'holes_size_before', x1y2,
+              fs = 'solid 0.4 noborder',
+              lc = 'red'),
+         Item('GC Time', ['i', 'pause'], style = 'lines', lc = 'red'))
+  ],
+  [
+    Set('style histogram rowstacked'),
+    Set('style data histograms'),
+    Plot(Item('Heap Size (after GC)', 'total_size_after', x1y2,
+              fs = 'solid 0.4 noborder',
+              lc = 'green'),
+         Item('Total holes (after GC)', 'holes_size_after', x1y2,
+              fs = 'solid 0.4 noborder',
+              lc = 'red'),
+         Item('GC Time', ['i', 'pause'],
+              style = 'lines',
+              lc = 'red'))
+  ],
+  [
+    Set('style fill solid 0.5 noborder'),
+    Set('style data histograms'),
+    Plot(Item('Allocated', 'allocated'),
+         Item('Reclaimed', reclaimed_bytes),
+         Item('Promoted', 'promoted', style = 'lines', lc = 'black'))
+  ],
+]
+
+def process_trace(filename):
+  trace = parse_gc_trace(filename)
+  total_gc = reduce(lambda t,r: t + r['pause'], trace, 0)
+  max_gc = reduce(lambda t,r: max(t, r['pause']), trace, 0)
+  avg_gc = total_gc / len(trace)
+
+  charts = plot_all(plots, trace, filename)
+
+  with open(filename + '.html', 'w') as out:
+    out.write('<html><body>')
+    out.write('Total in GC: <b>%d</b><br/>' % total_gc)
+    out.write('Max in GC: <b>%d</b><br/>' % max_gc)
+    out.write('Avg in GC: <b>%d</b><br/>' % avg_gc)
+    for chart in charts:
+      out.write('<img src="%s">' % chart)
+      out.write('</body></html>')
+
+  print "%s generated." % (filename + '.html')
+
+if len(sys.argv) != 2:
+  print "Usage: %s <GC-trace-filename>" % sys.argv[0]
+  sys.exit(1)
+
+process_trace(sys.argv[1])
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index aa32e0c..a92576e 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -39,19 +39,28 @@
       'ENABLE_VMSTATE_TRACKING',
     ],
     'conditions': [
-      ['v8_target_arch=="arm"', {
-        'defines': [
-          'V8_TARGET_ARCH_ARM',
-        ],
-      }],
-      ['v8_target_arch=="ia32"', {
-        'defines': [
-          'V8_TARGET_ARCH_IA32',
-        ],
-      }],
-      ['v8_target_arch=="x64"', {
-        'defines': [
-          'V8_TARGET_ARCH_X64',
+      ['OS!="mac"', {
+        # TODO(mark): The OS!="mac" conditional is temporary. It can be
+        # removed once the Mac Chromium build stops setting target_arch to
+        # ia32 and instead sets it to mac. Other checks in this file for
+        # OS=="mac" can be removed at that time as well. This can be cleaned
+        # up once http://crbug.com/44205 is fixed.
+        'conditions': [
+          ['v8_target_arch=="arm"', {
+            'defines': [
+              'V8_TARGET_ARCH_ARM',
+            ],
+          }],
+          ['v8_target_arch=="ia32"', {
+            'defines': [
+              'V8_TARGET_ARCH_IA32',
+            ],
+          }],
+          ['v8_target_arch=="x64"', {
+            'defines': [
+              'V8_TARGET_ARCH_X64',
+            ],
+          }],
         ],
       }],
     ],
@@ -403,6 +412,8 @@
         '../../src/top.h',
         '../../src/type-info.cc',
         '../../src/type-info.h',
+        '../../src/unbound-queue-inl.h',
+        '../../src/unbound-queue.h',
         '../../src/unicode-inl.h',
         '../../src/unicode.cc',
         '../../src/unicode.h',
@@ -477,7 +488,7 @@
             }]
           ]
         }],
-        ['v8_target_arch=="ia32"', {
+        ['v8_target_arch=="ia32" or v8_target_arch=="mac" or OS=="mac"', {
           'include_dirs+': [
             '../../src/ia32',
           ],
@@ -513,7 +524,7 @@
             '../../src/ia32/virtual-frame-ia32.h',
           ],
         }],
-        ['v8_target_arch=="x64"', {
+        ['v8_target_arch=="x64" or v8_target_arch=="mac" or OS=="mac"', {
           'include_dirs+': [
             '../../src/x64',
           ],
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index 1e9d1e7..48d63b7 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -237,7 +237,6 @@
 		9FA38BC51175B2E500C4CD55 /* full-codegen-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BC21175B2E500C4CD55 /* full-codegen-ia32.cc */; };
 		9FA38BC61175B2E500C4CD55 /* jump-target-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BC31175B2E500C4CD55 /* jump-target-ia32.cc */; };
 		9FA38BC71175B2E500C4CD55 /* virtual-frame-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BC41175B2E500C4CD55 /* virtual-frame-ia32.cc */; };
-		9FA38BCE1175B30400C4CD55 /* assembler-thumb2.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BC91175B30400C4CD55 /* assembler-thumb2.cc */; };
 		9FA38BCF1175B30400C4CD55 /* full-codegen-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */; };
 		9FA38BD01175B30400C4CD55 /* jump-target-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */; };
 		9FA38BD11175B30400C4CD55 /* virtual-frame-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 9FA38BCD1175B30400C4CD55 /* virtual-frame-arm.cc */; };
@@ -619,9 +618,6 @@
 		9FA38BC21175B2E500C4CD55 /* full-codegen-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "full-codegen-ia32.cc"; path = "ia32/full-codegen-ia32.cc"; sourceTree = "<group>"; };
 		9FA38BC31175B2E500C4CD55 /* jump-target-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-ia32.cc"; path = "ia32/jump-target-ia32.cc"; sourceTree = "<group>"; };
 		9FA38BC41175B2E500C4CD55 /* virtual-frame-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-ia32.cc"; path = "ia32/virtual-frame-ia32.cc"; sourceTree = "<group>"; };
-		9FA38BC81175B30400C4CD55 /* assembler-thumb2-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "assembler-thumb2-inl.h"; path = "arm/assembler-thumb2-inl.h"; sourceTree = "<group>"; };
-		9FA38BC91175B30400C4CD55 /* assembler-thumb2.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "assembler-thumb2.cc"; path = "arm/assembler-thumb2.cc"; sourceTree = "<group>"; };
-		9FA38BCA1175B30400C4CD55 /* assembler-thumb2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "assembler-thumb2.h"; path = "arm/assembler-thumb2.h"; sourceTree = "<group>"; };
 		9FA38BCB1175B30400C4CD55 /* full-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "full-codegen-arm.cc"; path = "arm/full-codegen-arm.cc"; sourceTree = "<group>"; };
 		9FA38BCC1175B30400C4CD55 /* jump-target-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-arm.cc"; path = "arm/jump-target-arm.cc"; sourceTree = "<group>"; };
 		9FA38BCD1175B30400C4CD55 /* virtual-frame-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-arm.cc"; path = "arm/virtual-frame-arm.cc"; sourceTree = "<group>"; };
@@ -631,6 +627,8 @@
 		9FBE03E410BD412600F8BFBA /* fast-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "fast-codegen-arm.cc"; path = "arm/fast-codegen-arm.cc"; sourceTree = "<group>"; };
 		9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "oprofile-agent.cc"; sourceTree = "<group>"; };
 		9FC86ABC0F5FEDAC00F22668 /* oprofile-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "oprofile-agent.h"; sourceTree = "<group>"; };
+		9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue-inl.h"; sourceTree = "<group>"; };
+		9FF7A28311A642EA0051B8F2 /* unbound-queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue.h"; sourceTree = "<group>"; };
 /* End PBXFileReference section */
 
 /* Begin PBXFrameworksBuildPhase section */
@@ -734,9 +732,6 @@
 				897FF1000E719B8F00D62E90 /* assembler-ia32-inl.h */,
 				897FF1010E719B8F00D62E90 /* assembler-ia32.cc */,
 				897FF1020E719B8F00D62E90 /* assembler-ia32.h */,
-				9FA38BC81175B30400C4CD55 /* assembler-thumb2-inl.h */,
-				9FA38BC91175B30400C4CD55 /* assembler-thumb2.cc */,
-				9FA38BCA1175B30400C4CD55 /* assembler-thumb2.h */,
 				897FF1030E719B8F00D62E90 /* assembler.cc */,
 				897FF1040E719B8F00D62E90 /* assembler.h */,
 				897FF1050E719B8F00D62E90 /* ast.cc */,
@@ -977,6 +972,8 @@
 				897FF1910E719B8F00D62E90 /* top.h */,
 				9FA38BAE1175B2D200C4CD55 /* type-info.cc */,
 				9FA38BAF1175B2D200C4CD55 /* type-info.h */,
+				9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */,
+				9FF7A28311A642EA0051B8F2 /* unbound-queue.h */,
 				897FF1920E719B8F00D62E90 /* unicode-inl.h */,
 				897FF1930E719B8F00D62E90 /* unicode.cc */,
 				897FF1940E719B8F00D62E90 /* unicode.h */,
@@ -1394,7 +1391,6 @@
 				89F23C400E78D5B2006B2466 /* allocation.cc in Sources */,
 				89F23C410E78D5B2006B2466 /* api.cc in Sources */,
 				89F23C970E78D5E3006B2466 /* assembler-arm.cc in Sources */,
-				9FA38BCE1175B30400C4CD55 /* assembler-thumb2.cc in Sources */,
 				89F23C430E78D5B2006B2466 /* assembler.cc in Sources */,
 				89F23C440E78D5B2006B2466 /* ast.cc in Sources */,
 				89F23C450E78D5B2006B2466 /* bootstrapper.cc in Sources */,
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index 004e16e..2571b65 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -144,6 +144,30 @@
 					/>
 				</FileConfiguration>
 			</File>
+    		<File
+				RelativePath="..\..\src\dtoa.cc"
+				>
+			</File>
+    		<File
+				RelativePath="..\..\src\dtoa.h"
+				>
+			</File>
+    		<File
+				RelativePath="..\..\src\fast-dtoa.cc"
+				>
+			</File>
+    		<File
+				RelativePath="..\..\src\fast-dtoa.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\fixed-dtoa.cc"
+				>
+			</File>
+    		<File
+				RelativePath="..\..\src\fixed-dtoa.h"
+				>
+			</File>
 		</Filter>
 		<Filter
 			Name="src"
@@ -961,6 +985,14 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\unbound-queue-inl.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\unbound-queue.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\unicode-inl.h"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index 39cd42a..a3c5970 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -953,6 +953,14 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\unbound-queue-inl.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\unbound-queue.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\unicode-inl.h"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
index 4607817..708b380 100644
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ b/tools/visual_studio/v8_base_x64.vcproj
@@ -938,6 +938,14 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\unbound-queue-inl.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\unbound-queue.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\unicode-inl.h"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_cctest.vcproj b/tools/visual_studio/v8_cctest.vcproj
index 424d226..cca6eba 100644
--- a/tools/visual_studio/v8_cctest.vcproj
+++ b/tools/visual_studio/v8_cctest.vcproj
@@ -248,6 +248,10 @@
 			>
 		</File>
 		<File
+			RelativePath="..\..\test\cctest\test-unbound-queue.cc"
+			>
+		</File>
+		<File
 			RelativePath="..\..\test\cctest\test-utils.cc"
 			>
 		</File>