Version 2.2.22

Added ES5 Object.isExtensible and Object.preventExtensions.

Enabled building V8 as a DLL.

Fixed a bug in date code where -0 was not interpreted as 0 (issue 736).

Performance improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@5017 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 8b58faa..c29c717 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2010-07-05: Version 2.2.22
+
+        Added ES5 Object.isExtensible and Object.preventExtensions.
+
+        Enabled building V8 as a DLL.
+
+        Fixed a bug in date code where -0 was not interpreted as 0
+        (issue 736).
+
+        Performance improvements on all platforms.
+
+
 2010-06-30: Version 2.2.21
 
         Fix bug in externalizing some ASCII strings (Chromium issue 47824).
@@ -13,18 +25,21 @@
         Provide actual breakpoints locations in response to setBreakpoint
         and listBreakpoints requests.
 
+
 2010-06-28: Version 2.2.20
+
         Fix bug with for-in on x64 platform (issue 748).
 
         Fix crash bug on x64 platform (issue 756).
-        
+
         Fix bug in Object.getOwnPropertyNames. (chromium issue 41243).
 
-        Fix a bug on ARM  that caused the result of 1 << x to be 
+        Fix a bug on ARM that caused the result of 1 << x to be
         miscalculated for some inputs.
 
         Performance improvements on all platforms.
 
+
 2010-06-23: Version 2.2.19
 
         Fix bug that causes the build to break when profillingsupport=off
@@ -64,11 +79,11 @@
 
 2010-06-09: Version 2.2.16
 
-        Removed the SetExternalStringDiposeCallback API. Changed the 
+        Removed the SetExternalStringDiposeCallback API. Changed the
         disposal of external string resources to call a virtual Dispose
         method on the resource.
 
-        Added support for more precise break points when debugging and 
+        Added support for more precise break points when debugging and
         stepping.
 
         Memory usage improvements on all platforms.
diff --git a/include/v8.h b/include/v8.h
index b625618..ca4a247 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -61,10 +61,6 @@
 // the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
 // static library or building a program which uses the V8 static library neither
 // BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
-// The reason for having both V8EXPORT and V8EXPORT_INLINE is that classes which
-// have their code inside this header file need to have __declspec(dllexport)
-// when building the DLL but cannot have __declspec(dllimport) when building
-// a program which uses the DLL.
 #if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED)
 #error both BUILDING_V8_SHARED and USING_V8_SHARED are set - please check the\
   build configuration to ensure that at most one of these is set
@@ -72,13 +68,10 @@
 
 #ifdef BUILDING_V8_SHARED
 #define V8EXPORT __declspec(dllexport)
-#define V8EXPORT_INLINE __declspec(dllexport)
 #elif USING_V8_SHARED
 #define V8EXPORT __declspec(dllimport)
-#define V8EXPORT_INLINE
 #else
 #define V8EXPORT
-#define V8EXPORT_INLINE
 #endif  // BUILDING_V8_SHARED
 
 #else  // _WIN32
@@ -90,10 +83,8 @@
 // export symbols when we are building a static library.
 #if defined(__GNUC__) && (__GNUC__ >= 4) && defined(V8_SHARED)
 #define V8EXPORT __attribute__ ((visibility("default")))
-#define V8EXPORT_INLINE __attribute__ ((visibility("default")))
 #else  // defined(__GNUC__) && (__GNUC__ >= 4)
 #define V8EXPORT
-#define V8EXPORT_INLINE
 #endif  // defined(__GNUC__) && (__GNUC__ >= 4)
 
 #endif  // _WIN32
@@ -185,7 +176,7 @@
  * behind the scenes and the same rules apply to these values as to
  * their handles.
  */
-template <class T> class V8EXPORT_INLINE Handle {
+template <class T> class Handle {
  public:
 
   /**
@@ -196,7 +187,7 @@
   /**
    * Creates a new handle for the specified value.
    */
-  explicit Handle(T* val) : val_(val) { }
+  inline explicit Handle(T* val) : val_(val) { }
 
   /**
    * Creates a handle for the contents of the specified handle.  This
@@ -221,16 +212,16 @@
   /**
    * Returns true if the handle is empty.
    */
-  bool IsEmpty() const { return val_ == 0; }
+  inline bool IsEmpty() const { return val_ == 0; }
 
-  T* operator->() const { return val_; }
+  inline T* operator->() const { return val_; }
 
-  T* operator*() const { return val_; }
+  inline T* operator*() const { return val_; }
 
   /**
    * Sets the handle to be empty. IsEmpty() will then return true.
    */
-  void Clear() { this->val_ = 0; }
+  inline void Clear() { this->val_ = 0; }
 
   /**
    * Checks whether two handles are the same.
@@ -238,7 +229,7 @@
    * to which they refer are identical.
    * The handles' references are not checked.
    */
-  template <class S> bool operator==(Handle<S> that) const {
+  template <class S> inline bool operator==(Handle<S> that) const {
     internal::Object** a = reinterpret_cast<internal::Object**>(**this);
     internal::Object** b = reinterpret_cast<internal::Object**>(*that);
     if (a == 0) return b == 0;
@@ -252,7 +243,7 @@
    * the objects to which they refer are different.
    * The handles' references are not checked.
    */
-  template <class S> bool operator!=(Handle<S> that) const {
+  template <class S> inline bool operator!=(Handle<S> that) const {
     return !operator==(that);
   }
 
@@ -281,7 +272,7 @@
  * handle scope are destroyed when the handle scope is destroyed.  Hence it
  * is not necessary to explicitly deallocate local handles.
  */
-template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
+template <class T> class Local : public Handle<T> {
  public:
   inline Local();
   template <class S> inline Local(Local<S> that)
@@ -332,7 +323,7 @@
  * different storage cells but rather two references to the same
  * storage cell.
  */
-template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
+template <class T> class Persistent : public Handle<T> {
  public:
 
   /**
@@ -563,11 +554,12 @@
 /**
  * The origin, within a file, of a script.
  */
-class V8EXPORT ScriptOrigin {
+class ScriptOrigin {
  public:
-  ScriptOrigin(Handle<Value> resource_name,
-               Handle<Integer> resource_line_offset = Handle<Integer>(),
-               Handle<Integer> resource_column_offset = Handle<Integer>())
+  inline ScriptOrigin(
+      Handle<Value> resource_name,
+      Handle<Integer> resource_line_offset = Handle<Integer>(),
+      Handle<Integer> resource_column_offset = Handle<Integer>())
       : resource_name_(resource_name),
         resource_line_offset_(resource_line_offset),
         resource_column_offset_(resource_column_offset) { }
@@ -841,30 +833,30 @@
 /**
  * The superclass of all JavaScript values and objects.
  */
-class V8EXPORT Value : public Data {
+class Value : public Data {
  public:
 
   /**
    * Returns true if this value is the undefined value.  See ECMA-262
    * 4.3.10.
    */
-  bool IsUndefined() const;
+  V8EXPORT bool IsUndefined() const;
 
   /**
    * Returns true if this value is the null value.  See ECMA-262
    * 4.3.11.
    */
-  bool IsNull() const;
+  V8EXPORT bool IsNull() const;
 
    /**
    * Returns true if this value is true.
    */
-  bool IsTrue() const;
+  V8EXPORT bool IsTrue() const;
 
   /**
    * Returns true if this value is false.
    */
-  bool IsFalse() const;
+  V8EXPORT bool IsFalse() const;
 
   /**
    * Returns true if this value is an instance of the String type.
@@ -875,92 +867,92 @@
   /**
    * Returns true if this value is a function.
    */
-  bool IsFunction() const;
+  V8EXPORT bool IsFunction() const;
 
   /**
    * Returns true if this value is an array.
    */
-  bool IsArray() const;
+  V8EXPORT bool IsArray() const;
 
   /**
    * Returns true if this value is an object.
    */
-  bool IsObject() const;
+  V8EXPORT bool IsObject() const;
 
   /**
    * Returns true if this value is boolean.
    */
-  bool IsBoolean() const;
+  V8EXPORT bool IsBoolean() const;
 
   /**
    * Returns true if this value is a number.
    */
-  bool IsNumber() const;
+  V8EXPORT bool IsNumber() const;
 
   /**
    * Returns true if this value is external.
    */
-  bool IsExternal() const;
+  V8EXPORT bool IsExternal() const;
 
   /**
    * Returns true if this value is a 32-bit signed integer.
    */
-  bool IsInt32() const;
+  V8EXPORT bool IsInt32() const;
 
   /**
    * Returns true if this value is a 32-bit unsigned integer.
    */
-  bool IsUint32() const;
+  V8EXPORT bool IsUint32() const;
 
   /**
    * Returns true if this value is a Date.
    */
-  bool IsDate() const;
+  V8EXPORT bool IsDate() const;
 
-  Local<Boolean> ToBoolean() const;
-  Local<Number> ToNumber() const;
-  Local<String> ToString() const;
-  Local<String> ToDetailString() const;
-  Local<Object> ToObject() const;
-  Local<Integer> ToInteger() const;
-  Local<Uint32> ToUint32() const;
-  Local<Int32> ToInt32() const;
+  V8EXPORT Local<Boolean> ToBoolean() const;
+  V8EXPORT Local<Number> ToNumber() const;
+  V8EXPORT Local<String> ToString() const;
+  V8EXPORT Local<String> ToDetailString() const;
+  V8EXPORT Local<Object> ToObject() const;
+  V8EXPORT Local<Integer> ToInteger() const;
+  V8EXPORT Local<Uint32> ToUint32() const;
+  V8EXPORT Local<Int32> ToInt32() const;
 
   /**
    * Attempts to convert a string to an array index.
    * Returns an empty handle if the conversion fails.
    */
-  Local<Uint32> ToArrayIndex() const;
+  V8EXPORT Local<Uint32> ToArrayIndex() const;
 
-  bool BooleanValue() const;
-  double NumberValue() const;
-  int64_t IntegerValue() const;
-  uint32_t Uint32Value() const;
-  int32_t Int32Value() const;
+  V8EXPORT bool BooleanValue() const;
+  V8EXPORT double NumberValue() const;
+  V8EXPORT int64_t IntegerValue() const;
+  V8EXPORT uint32_t Uint32Value() const;
+  V8EXPORT int32_t Int32Value() const;
 
   /** JS == */
-  bool Equals(Handle<Value> that) const;
-  bool StrictEquals(Handle<Value> that) const;
+  V8EXPORT bool Equals(Handle<Value> that) const;
+  V8EXPORT bool StrictEquals(Handle<Value> that) const;
 
  private:
   inline bool QuickIsString() const;
-  bool FullIsString() const;
+  V8EXPORT bool FullIsString() const;
 };
 
 
 /**
  * The superclass of primitive values.  See ECMA-262 4.3.2.
  */
-class V8EXPORT Primitive : public Value { };
+class Primitive : public Value { };
 
 
 /**
  * A primitive boolean value (ECMA-262, 4.3.14).  Either the true
  * or false value.
  */
-class V8EXPORT Boolean : public Primitive {
+class Boolean : public Primitive {
  public:
-  bool Value() const;
+  V8EXPORT bool Value() const;
   static inline Handle<Boolean> New(bool value);
 };
 
@@ -968,19 +960,19 @@
 /**
  * A JavaScript string value (ECMA-262, 4.3.17).
  */
-class V8EXPORT String : public Primitive {
+class String : public Primitive {
  public:
 
   /**
    * Returns the number of characters in this string.
    */
-  int Length() const;
+  V8EXPORT int Length() const;
 
   /**
    * Returns the number of bytes in the UTF-8 encoded
    * representation of this string.
    */
-  int Utf8Length() const;
+  V8EXPORT int Utf8Length() const;
 
   /**
    * Write the contents of the string to an external buffer.
@@ -1007,33 +999,33 @@
     HINT_MANY_WRITES_EXPECTED = 1
   };
 
-  int Write(uint16_t* buffer,
-            int start = 0,
-            int length = -1,
-            WriteHints hints = NO_HINTS) const;  // UTF-16
-  int WriteAscii(char* buffer,
-                 int start = 0,
-                 int length = -1,
-                 WriteHints hints = NO_HINTS) const;  // ASCII
-  int WriteUtf8(char* buffer,
-                int length = -1,
-                int* nchars_ref = NULL,
-                WriteHints hints = NO_HINTS) const; // UTF-8
+  V8EXPORT int Write(uint16_t* buffer,
+                     int start = 0,
+                     int length = -1,
+                     WriteHints hints = NO_HINTS) const;  // UTF-16
+  V8EXPORT int WriteAscii(char* buffer,
+                          int start = 0,
+                          int length = -1,
+                          WriteHints hints = NO_HINTS) const;  // ASCII
+  V8EXPORT int WriteUtf8(char* buffer,
+                         int length = -1,
+                         int* nchars_ref = NULL,
+                         WriteHints hints = NO_HINTS) const;  // UTF-8
 
   /**
    * A zero length string.
    */
-  static v8::Local<v8::String> Empty();
+  V8EXPORT static v8::Local<v8::String> Empty();
 
   /**
    * Returns true if the string is external
    */
-  bool IsExternal() const;
+  V8EXPORT bool IsExternal() const;
 
   /**
    * Returns true if the string is both external and ascii
    */
-  bool IsExternalAscii() const;
+  V8EXPORT bool IsExternalAscii() const;
 
   class V8EXPORT ExternalStringResourceBase {
    public:
@@ -1124,7 +1116,7 @@
    * Get the ExternalAsciiStringResource for an external ascii string.
    * Returns NULL if IsExternalAscii() doesn't return true.
    */
-  ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
+  V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
 
   static inline String* Cast(v8::Value* obj);
 
@@ -1137,19 +1129,20 @@
    * 'strlen' to determine the buffer length, it might be
    * wrong if 'data' contains a null character.
    */
-  static Local<String> New(const char* data, int length = -1);
+  V8EXPORT static Local<String> New(const char* data, int length = -1);
 
   /** Allocates a new string from utf16 data.*/
-  static Local<String> New(const uint16_t* data, int length = -1);
+  V8EXPORT static Local<String> New(const uint16_t* data, int length = -1);
 
   /** Creates a symbol. Returns one if it exists already.*/
-  static Local<String> NewSymbol(const char* data, int length = -1);
+  V8EXPORT static Local<String> NewSymbol(const char* data, int length = -1);
 
   /**
    * Creates a new string by concatenating the left and the right strings
    * passed in as parameters.
    */
-  static Local<String> Concat(Handle<String> left, Handle<String>right);
+  V8EXPORT static Local<String> Concat(Handle<String> left,
+                                       Handle<String>right);
 
   /**
    * Creates a new external string using the data defined in the given
@@ -1159,7 +1152,7 @@
    * should the underlying buffer be deallocated or modified except through the
    * destructor of the external string resource.
    */
-  static Local<String> NewExternal(ExternalStringResource* resource);
+  V8EXPORT static Local<String> NewExternal(ExternalStringResource* resource);
 
   /**
    * Associate an external string resource with this string by transforming it
@@ -1170,7 +1163,7 @@
    * The string is not modified if the operation fails. See NewExternal for
    * information on the lifetime of the resource.
    */
-  bool MakeExternal(ExternalStringResource* resource);
+  V8EXPORT bool MakeExternal(ExternalStringResource* resource);
 
   /**
    * Creates a new external string using the ascii data defined in the given
@@ -1180,7 +1173,8 @@
    * should the underlying buffer be deallocated or modified except through the
    * destructor of the external string resource.
    */
-  static Local<String> NewExternal(ExternalAsciiStringResource* resource);
+  V8EXPORT static Local<String> NewExternal(
+      ExternalAsciiStringResource* resource);
 
   /**
    * Associate an external string resource with this string by transforming it
@@ -1191,18 +1185,20 @@
    * The string is not modified if the operation fails. See NewExternal for
    * information on the lifetime of the resource.
    */
-  bool MakeExternal(ExternalAsciiStringResource* resource);
+  V8EXPORT bool MakeExternal(ExternalAsciiStringResource* resource);
 
   /**
    * Returns true if this string can be made external.
    */
-  bool CanMakeExternal();
+  V8EXPORT bool CanMakeExternal();
 
   /** Creates an undetectable string from the supplied ascii or utf-8 data.*/
-  static Local<String> NewUndetectable(const char* data, int length = -1);
+  V8EXPORT static Local<String> NewUndetectable(const char* data,
+                                                int length = -1);
 
   /** Creates an undetectable string from the supplied utf-16 data.*/
-  static Local<String> NewUndetectable(const uint16_t* data, int length = -1);
+  V8EXPORT static Local<String> NewUndetectable(const uint16_t* data,
+                                                int length = -1);
 
   /**
    * Converts an object to a utf8-encoded character array.  Useful if
@@ -1273,21 +1269,21 @@
   };
 
  private:
-  void VerifyExternalStringResource(ExternalStringResource* val) const;
-  static void CheckCast(v8::Value* obj);
+  V8EXPORT void VerifyExternalStringResource(ExternalStringResource* val) const;
+  V8EXPORT static void CheckCast(v8::Value* obj);
 };
 
 
 /**
  * A JavaScript number value (ECMA-262, 4.3.20)
  */
-class V8EXPORT Number : public Primitive {
+class Number : public Primitive {
  public:
-  double Value() const;
-  static Local<Number> New(double value);
+  V8EXPORT double Value() const;
+  V8EXPORT static Local<Number> New(double value);
   static inline Number* Cast(v8::Value* obj);
  private:
-  Number();
+  V8EXPORT Number();
   static void CheckCast(v8::Value* obj);
 };
 
@@ -1295,56 +1291,56 @@
 /**
  * A JavaScript value representing a signed integer.
  */
-class V8EXPORT Integer : public Number {
+class Integer : public Number {
  public:
-  static Local<Integer> New(int32_t value);
-  static Local<Integer> NewFromUnsigned(uint32_t value);
-  int64_t Value() const;
+  V8EXPORT static Local<Integer> New(int32_t value);
+  V8EXPORT static Local<Integer> NewFromUnsigned(uint32_t value);
+  V8EXPORT int64_t Value() const;
   static inline Integer* Cast(v8::Value* obj);
  private:
-  Integer();
-  static void CheckCast(v8::Value* obj);
+  V8EXPORT Integer();
+  V8EXPORT static void CheckCast(v8::Value* obj);
 };
 
 
 /**
  * A JavaScript value representing a 32-bit signed integer.
  */
-class V8EXPORT Int32 : public Integer {
+class Int32 : public Integer {
  public:
-  int32_t Value() const;
+  V8EXPORT int32_t Value() const;
  private:
-  Int32();
+  V8EXPORT Int32();
 };
 
 
 /**
  * A JavaScript value representing a 32-bit unsigned integer.
  */
-class V8EXPORT Uint32 : public Integer {
+class Uint32 : public Integer {
  public:
-  uint32_t Value() const;
+  V8EXPORT uint32_t Value() const;
  private:
-  Uint32();
+  V8EXPORT Uint32();
 };
 
 
 /**
  * An instance of the built-in Date constructor (ECMA-262, 15.9).
  */
-class V8EXPORT Date : public Value {
+class Date : public Value {
  public:
-  static Local<Value> New(double time);
+  V8EXPORT static Local<Value> New(double time);
 
   /**
    * A specialization of Value::NumberValue that is more efficient
    * because we know the structure of this object.
    */
-  double NumberValue() const;
+  V8EXPORT double NumberValue() const;
 
   static inline Date* Cast(v8::Value* obj);
  private:
-  static void CheckCast(v8::Value* obj);
+  V8EXPORT static void CheckCast(v8::Value* obj);
 };
 
 
@@ -1403,14 +1399,14 @@
 /**
  * A JavaScript object (ECMA-262, 4.3.3)
  */
-class V8EXPORT Object : public Value {
+class Object : public Value {
  public:
-  bool Set(Handle<Value> key,
-           Handle<Value> value,
-           PropertyAttribute attribs = None);
+  V8EXPORT bool Set(Handle<Value> key,
+                    Handle<Value> value,
+                    PropertyAttribute attribs = None);
 
-  bool Set(uint32_t index,
-           Handle<Value> value);
+  V8EXPORT bool Set(uint32_t index,
+                    Handle<Value> value);
 
   // Sets a local property on this object bypassing interceptors and
   // overriding accessors or read-only properties.
@@ -1420,34 +1416,34 @@
   // will only be returned if the interceptor doesn't return a value.
   //
   // Note also that this only works for named properties.
-  bool ForceSet(Handle<Value> key,
-                Handle<Value> value,
-                PropertyAttribute attribs = None);
+  V8EXPORT bool ForceSet(Handle<Value> key,
+                         Handle<Value> value,
+                         PropertyAttribute attribs = None);
 
-  Local<Value> Get(Handle<Value> key);
+  V8EXPORT Local<Value> Get(Handle<Value> key);
 
-  Local<Value> Get(uint32_t index);
+  V8EXPORT Local<Value> Get(uint32_t index);
 
   // TODO(1245389): Replace the type-specific versions of these
   // functions with generic ones that accept a Handle<Value> key.
-  bool Has(Handle<String> key);
+  V8EXPORT bool Has(Handle<String> key);
 
-  bool Delete(Handle<String> key);
+  V8EXPORT bool Delete(Handle<String> key);
 
   // Delete a property on this object bypassing interceptors and
   // ignoring dont-delete attributes.
-  bool ForceDelete(Handle<Value> key);
+  V8EXPORT bool ForceDelete(Handle<Value> key);
 
-  bool Has(uint32_t index);
+  V8EXPORT bool Has(uint32_t index);
 
-  bool Delete(uint32_t index);
+  V8EXPORT bool Delete(uint32_t index);
 
-  bool SetAccessor(Handle<String> name,
-                   AccessorGetter getter,
-                   AccessorSetter setter = 0,
-                   Handle<Value> data = Handle<Value>(),
-                   AccessControl settings = DEFAULT,
-                   PropertyAttribute attribute = None);
+  V8EXPORT bool SetAccessor(Handle<String> name,
+                            AccessorGetter getter,
+                            AccessorSetter setter = 0,
+                            Handle<Value> data = Handle<Value>(),
+                            AccessControl settings = DEFAULT,
+                            PropertyAttribute attribute = None);
 
   /**
    * Returns an array containing the names of the enumerable properties
@@ -1455,78 +1451,80 @@
    * array returned by this method contains the same values as would
    * be enumerated by a for-in statement over this object.
    */
-  Local<Array> GetPropertyNames();
+  V8EXPORT Local<Array> GetPropertyNames();
 
   /**
    * Get the prototype object.  This does not skip objects marked to
    * be skipped by __proto__ and it does not consult the security
    * handler.
    */
-  Local<Value> GetPrototype();
+  V8EXPORT Local<Value> GetPrototype();
 
   /**
    * Set the prototype object.  This does not skip objects marked to
    * be skipped by __proto__ and it does not consult the security
    * handler.
    */
-  bool SetPrototype(Handle<Value> prototype);
+  V8EXPORT bool SetPrototype(Handle<Value> prototype);
 
   /**
    * Finds an instance of the given function template in the prototype
    * chain.
    */
-  Local<Object> FindInstanceInPrototypeChain(Handle<FunctionTemplate> tmpl);
+  V8EXPORT Local<Object> FindInstanceInPrototypeChain(
+      Handle<FunctionTemplate> tmpl);
 
   /**
    * Call builtin Object.prototype.toString on this object.
    * This is different from Value::ToString() that may call
    * user-defined toString function. This one does not.
    */
-  Local<String> ObjectProtoToString();
+  V8EXPORT Local<String> ObjectProtoToString();
 
   /** Gets the number of internal fields for this Object. */
-  int InternalFieldCount();
+  V8EXPORT int InternalFieldCount();
   /** Gets the value in an internal field. */
   inline Local<Value> GetInternalField(int index);
   /** Sets the value in an internal field. */
-  void SetInternalField(int index, Handle<Value> value);
+  V8EXPORT void SetInternalField(int index, Handle<Value> value);
 
   /** Gets a native pointer from an internal field. */
   inline void* GetPointerFromInternalField(int index);
 
   /** Sets a native pointer in an internal field. */
-  void SetPointerInInternalField(int index, void* value);
+  V8EXPORT void SetPointerInInternalField(int index, void* value);
 
   // Testers for local properties.
-  bool HasRealNamedProperty(Handle<String> key);
-  bool HasRealIndexedProperty(uint32_t index);
-  bool HasRealNamedCallbackProperty(Handle<String> key);
+  V8EXPORT bool HasRealNamedProperty(Handle<String> key);
+  V8EXPORT bool HasRealIndexedProperty(uint32_t index);
+  V8EXPORT bool HasRealNamedCallbackProperty(Handle<String> key);
 
   /**
    * If result.IsEmpty() no real property was located in the prototype chain.
    * This means interceptors in the prototype chain are not called.
    */
-  Local<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+  V8EXPORT Local<Value> GetRealNamedPropertyInPrototypeChain(
+      Handle<String> key);
 
   /**
    * If result.IsEmpty() no real property was located on the object or
    * in the prototype chain.
    * This means interceptors in the prototype chain are not called.
    */
-  Local<Value> GetRealNamedProperty(Handle<String> key);
+  V8EXPORT Local<Value> GetRealNamedProperty(Handle<String> key);
 
   /** Tests for a named lookup interceptor.*/
-  bool HasNamedLookupInterceptor();
+  V8EXPORT bool HasNamedLookupInterceptor();
 
   /** Tests for an index lookup interceptor.*/
-  bool HasIndexedLookupInterceptor();
+  V8EXPORT bool HasIndexedLookupInterceptor();
 
   /**
    * Turns on access check on the object if the object is an instance of
    * a template that has access check callbacks. If an object has no
    * access check info, the object cannot be accessed by anyone.
    */
-  void TurnOnAccessCheck();
+  V8EXPORT void TurnOnAccessCheck();
 
   /**
    * Returns the identity hash for this object. The current implemenation uses
@@ -1535,7 +1533,7 @@
    * The return value will never be 0. Also, it is not guaranteed to be
    * unique.
    */
-  int GetIdentityHash();
+  V8EXPORT int GetIdentityHash();
 
   /**
    * Access hidden properties on JavaScript objects. These properties are
@@ -1543,9 +1541,9 @@
    * C++ API. Hidden properties introduced by V8 internally (for example the
    * identity hash) are prefixed with "v8::".
    */
-  bool SetHiddenValue(Handle<String> key, Handle<Value> value);
-  Local<Value> GetHiddenValue(Handle<String> key);
-  bool DeleteHiddenValue(Handle<String> key);
+  V8EXPORT bool SetHiddenValue(Handle<String> key, Handle<Value> value);
+  V8EXPORT Local<Value> GetHiddenValue(Handle<String> key);
+  V8EXPORT bool DeleteHiddenValue(Handle<String> key);
 
   /**
    * Returns true if this is an instance of an api function (one
@@ -1554,13 +1552,13 @@
    * conservative and may return true for objects that haven't actually
    * been modified.
    */
-  bool IsDirty();
+  V8EXPORT bool IsDirty();
 
   /**
    * Clone this object with a fast but shallow copy.  Values will point
    * to the same values as the original object.
    */
-  Local<Object> Clone();
+  V8EXPORT Local<Object> Clone();
 
   /**
    * Set the backing store of the indexed properties to be managed by the
@@ -1569,7 +1567,7 @@
    * Note: The embedding program still owns the data and needs to ensure that
    *       the backing store is preserved while V8 has a reference.
    */
-  void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
+  V8EXPORT void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
   bool HasIndexedPropertiesInPixelData();
   uint8_t* GetIndexedPropertiesPixelData();
   int GetIndexedPropertiesPixelDataLength();
@@ -1581,21 +1579,22 @@
    * Note: The embedding program still owns the data and needs to ensure that
    *       the backing store is preserved while V8 has a reference.
    */
-  void SetIndexedPropertiesToExternalArrayData(void* data,
-                                               ExternalArrayType array_type,
-                                               int number_of_elements);
+  V8EXPORT void SetIndexedPropertiesToExternalArrayData(
+      void* data,
+      ExternalArrayType array_type,
+      int number_of_elements);
   bool HasIndexedPropertiesInExternalArrayData();
   void* GetIndexedPropertiesExternalArrayData();
   ExternalArrayType GetIndexedPropertiesExternalArrayDataType();
   int GetIndexedPropertiesExternalArrayDataLength();
 
-  static Local<Object> New();
+  V8EXPORT static Local<Object> New();
   static inline Object* Cast(Value* obj);
  private:
-  Object();
-  static void CheckCast(Value* obj);
-  Local<Value> CheckedGetInternalField(int index);
-  void* SlowGetPointerFromInternalField(int index);
+  V8EXPORT Object();
+  V8EXPORT static void CheckCast(Value* obj);
+  V8EXPORT Local<Value> CheckedGetInternalField(int index);
+  V8EXPORT void* SlowGetPointerFromInternalField(int index);
 
   /**
    * If quick access to the internal field is possible this method
@@ -1608,20 +1607,20 @@
 /**
  * An instance of the built-in array constructor (ECMA-262, 15.4.2).
  */
-class V8EXPORT Array : public Object {
+class Array : public Object {
  public:
-  uint32_t Length() const;
+  V8EXPORT uint32_t Length() const;
 
   /**
    * Clones an element at index |index|.  Returns an empty
    * handle if cloning fails (for any reason).
    */
-  Local<Object> CloneElementAt(uint32_t index);
+  V8EXPORT Local<Object> CloneElementAt(uint32_t index);
 
-  static Local<Array> New(int length = 0);
+  V8EXPORT static Local<Array> New(int length = 0);
   static inline Array* Cast(Value* obj);
  private:
-  Array();
+  V8EXPORT Array();
   static void CheckCast(Value* obj);
 };
 
@@ -1629,25 +1628,27 @@
 /**
  * A JavaScript function object (ECMA-262, 15.3).
  */
-class V8EXPORT Function : public Object {
+class Function : public Object {
  public:
-  Local<Object> NewInstance() const;
-  Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
-  Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
-  void SetName(Handle<String> name);
-  Handle<Value> GetName() const;
+  V8EXPORT Local<Object> NewInstance() const;
+  V8EXPORT Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
+  V8EXPORT Local<Value> Call(Handle<Object> recv,
+                             int argc,
+                             Handle<Value> argv[]);
+  V8EXPORT void SetName(Handle<String> name);
+  V8EXPORT Handle<Value> GetName() const;
 
   /**
    * Returns zero based line number of function body and
    * kLineOffsetNotFound if no information available.
    */
-  int GetScriptLineNumber() const;
-  ScriptOrigin GetScriptOrigin() const;
+  V8EXPORT int GetScriptLineNumber() const;
+  V8EXPORT ScriptOrigin GetScriptOrigin() const;
   static inline Function* Cast(Value* obj);
-  static const int kLineOffsetNotFound;
+  V8EXPORT static const int kLineOffsetNotFound;
  private:
-  Function();
-  static void CheckCast(Value* obj);
+  V8EXPORT Function();
+  V8EXPORT static void CheckCast(Value* obj);
 };
 
 
@@ -1662,19 +1663,19 @@
  * value Unwrap should be used, all other operations on that object will lead
  * to unpredictable results.
  */
-class V8EXPORT External : public Value {
+class External : public Value {
  public:
-  static Local<Value> Wrap(void* data);
+  V8EXPORT static Local<Value> Wrap(void* data);
   static inline void* Unwrap(Handle<Value> obj);
 
-  static Local<External> New(void* value);
+  V8EXPORT static Local<External> New(void* value);
   static inline External* Cast(Value* obj);
-  void* Value() const;
+  V8EXPORT void* Value() const;
  private:
-  External();
-  static void CheckCast(v8::Value* obj);
+  V8EXPORT External();
+  V8EXPORT static void CheckCast(v8::Value* obj);
   static inline void* QuickUnwrap(Handle<v8::Value> obj);
-  static void* FullUnwrap(Handle<v8::Value> obj);
+  V8EXPORT static void* FullUnwrap(Handle<v8::Value> obj);
 };
 
 
@@ -1704,7 +1705,7 @@
  * including the receiver, the number and values of arguments, and
  * the holder of the function.
  */
-class V8EXPORT Arguments {
+class Arguments {
  public:
   inline int Length() const;
   inline Local<Value> operator[](int i) const;
@@ -1714,7 +1715,6 @@
   inline bool IsConstructCall() const;
   inline Local<Value> Data() const;
  private:
-  Arguments();
   friend class ImplementationUtilities;
   inline Arguments(Local<Value> data,
                    Local<Object> holder,
@@ -3001,7 +3001,7 @@
    * Stack-allocated class which sets the execution context for all
    * operations executed within a local scope.
    */
-  class V8EXPORT Scope {
+  class Scope {
    public:
     inline Scope(Handle<Context> context) : context_(context) {
       context_->Enter();
@@ -3320,6 +3320,17 @@
   V8::ClearWeak(reinterpret_cast<internal::Object**>(**this));
 }
 
+
+Arguments::Arguments(v8::Local<v8::Value> data,
+                     v8::Local<v8::Object> holder,
+                     v8::Local<v8::Function> callee,
+                     bool is_construct_call,
+                     void** values, int length)
+    : data_(data), holder_(holder), callee_(callee),
+      is_construct_call_(is_construct_call),
+      values_(values), length_(length) { }
+
+
 Local<Value> Arguments::operator[](int i) const {
   if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
   return Local<Value>(reinterpret_cast<Value*>(values_ - i));
@@ -3580,7 +3591,6 @@
 
 
 #undef V8EXPORT
-#undef V8EXPORT_INLINE
 #undef TYPE_CHECK
 
 
diff --git a/src/api.h b/src/api.h
index e7b1394..5c67136 100644
--- a/src/api.h
+++ b/src/api.h
@@ -134,16 +134,6 @@
 };
 
 
-v8::Arguments::Arguments(v8::Local<v8::Value> data,
-                         v8::Local<v8::Object> holder,
-                         v8::Local<v8::Function> callee,
-                         bool is_construct_call,
-                         void** values, int length)
-    : data_(data), holder_(holder), callee_(callee),
-      is_construct_call_(is_construct_call),
-      values_(values), length_(length) { }
-
-
 enum ExtensionTraversalState {
   UNVISITED, VISITED, INSTALLED
 };
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index c8170b3..f5ff43a 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1802,6 +1802,16 @@
 
 
 void Assembler::vmov(const DwVfpRegister dst,
+                     const DwVfpRegister src,
+                     const Condition cond) {
+  // Dd = Dm
+  // Instruction details available in ARM DDI 0406B, A8-642.
+  emit(cond | 0xE*B24 | 0xB*B20 |
+       dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
+}
+
+
+void Assembler::vmov(const DwVfpRegister dst,
                      const Register src1,
                      const Register src2,
                      const Condition cond) {
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 8a4173d..6a4fb23 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -930,6 +930,10 @@
             const Register base,
             int offset,  // Offset must be a multiple of 4.
             const Condition cond = al);
+
+  void vmov(const DwVfpRegister dst,
+            const DwVfpRegister src,
+            const Condition cond = al);
   void vmov(const DwVfpRegister dst,
             const Register src1,
             const Register src2,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index f923c09..4d18727 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -748,37 +748,43 @@
                               JumpTarget* false_target) {
   // Note: The generated code snippet does not change stack variables.
   //       Only the condition code should be set.
+  bool known_smi = frame_->KnownSmiAt(0);
   Register tos = frame_->PopToRegister();
 
   // Fast case checks
 
   // Check if the value is 'false'.
-  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-  __ cmp(tos, ip);
-  false_target->Branch(eq);
+  if (!known_smi) {
+    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+    __ cmp(tos, ip);
+    false_target->Branch(eq);
 
-  // Check if the value is 'true'.
-  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-  __ cmp(tos, ip);
-  true_target->Branch(eq);
+    // Check if the value is 'true'.
+    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+    __ cmp(tos, ip);
+    true_target->Branch(eq);
 
-  // Check if the value is 'undefined'.
-  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(tos, ip);
-  false_target->Branch(eq);
+    // Check if the value is 'undefined'.
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(tos, ip);
+    false_target->Branch(eq);
+  }
 
   // Check if the value is a smi.
   __ cmp(tos, Operand(Smi::FromInt(0)));
-  false_target->Branch(eq);
-  __ tst(tos, Operand(kSmiTagMask));
-  true_target->Branch(eq);
 
-  // Slow case: call the runtime.
-  frame_->EmitPush(tos);
-  frame_->CallRuntime(Runtime::kToBool, 1);
-  // Convert the result (r0) to a condition code.
-  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-  __ cmp(r0, ip);
+  if (!known_smi) {
+    false_target->Branch(eq);
+    __ tst(tos, Operand(kSmiTagMask));
+    true_target->Branch(eq);
+
+    // Slow case: call the runtime.
+    frame_->EmitPush(tos);
+    frame_->CallRuntime(Runtime::kToBool, 1);
+    // Convert the result (r0) to a condition code.
+    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+    __ cmp(r0, ip);
+  }
 
   cc_reg_ = ne;
 }
@@ -1745,11 +1751,15 @@
     val = node->fun();  // NULL if we don't have a function
   }
 
+
   if (val != NULL) {
+    WriteBarrierCharacter wb_info =
+        val->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
+    if (val->AsLiteral() != NULL) wb_info = NEVER_NEWSPACE;
     // Set initial value.
     Reference target(this, node->proxy());
     Load(val);
-    target.SetValue(NOT_CONST_INIT);
+    target.SetValue(NOT_CONST_INIT, wb_info);
 
     // Get rid of the assigned value (declarations are statements).
     frame_->Drop();
@@ -2485,13 +2495,13 @@
       if (each.size() > 0) {
         __ ldr(r0, frame_->ElementAt(each.size()));
         frame_->EmitPush(r0);
-        each.SetValue(NOT_CONST_INIT);
+        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
         frame_->Drop(2);
       } else {
         // If the reference was to a slot we rely on the convenient property
         // that it doesn't matter whether a value (eg, r3 pushed above) is
         // right on top of or right underneath a zero-sized reference.
-        each.SetValue(NOT_CONST_INIT);
+        each.SetValue(NOT_CONST_INIT, UNLIKELY_SMI);
         frame_->Drop();
       }
     }
@@ -3646,6 +3656,8 @@
   // Evaluate the receiver subexpression.
   Load(prop->obj());
 
+  WriteBarrierCharacter wb_info;
+
   // Change to slow case in the beginning of an initialization block to
   // avoid the quadratic behavior of repeatedly adding fast properties.
   if (node->starts_initialization_block()) {
@@ -3667,7 +3679,7 @@
   // [tos]   : key
   // [tos+1] : receiver
   // [tos+2] : receiver if at the end of an initialization block
-
+  //
   // Evaluate the right-hand side.
   if (node->is_compound()) {
     // For a compound assignment the right-hand side is a binary operation
@@ -3699,9 +3711,13 @@
                              overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE,
                              inline_smi);
     }
+    wb_info = node->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI;
   } else {
     // For non-compound assignment just load the right-hand side.
     Load(node->value());
+    wb_info = node->value()->AsLiteral() != NULL ?
+        NEVER_NEWSPACE :
+        (node->value()->type()->IsLikelySmi() ? LIKELY_SMI : UNLIKELY_SMI);
   }
 
   // Stack layout:
@@ -3713,7 +3729,7 @@
   // Perform the assignment.  It is safe to ignore constants here.
   ASSERT(node->op() != Token::INIT_CONST);
   CodeForSourcePosition(node->position());
-  EmitKeyedStore(prop->key()->type());
+  EmitKeyedStore(prop->key()->type(), wb_info);
   frame_->EmitPush(r0);
 
   // Stack layout:
@@ -4291,7 +4307,7 @@
   } else {
     CpuFeatures::Scope scope(VFP3);
     JumpTarget runtime, done;
-    Label not_minus_half, allocate_return;
+    Label exponent_nonsmi, base_nonsmi, powi, not_minus_half, allocate_return;
 
     Register scratch1 = VirtualFrame::scratch0();
     Register scratch2 = VirtualFrame::scratch1();
@@ -4299,18 +4315,74 @@
     // Get base and exponent to registers.
     Register exponent = frame_->PopToRegister();
     Register base = frame_->PopToRegister(exponent);
+    Register heap_number_map = no_reg;
 
     // Set the frame for the runtime jump target. The code below jumps to the
     // jump target label so the frame needs to be established before that.
     ASSERT(runtime.entry_frame() == NULL);
     runtime.set_entry_frame(frame_);
 
-    __ BranchOnSmi(exponent, runtime.entry_label());
+    __ BranchOnNotSmi(exponent, &exponent_nonsmi);
+    __ BranchOnNotSmi(base, &base_nonsmi);
 
+    heap_number_map = r6;
+    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+
+    // Exponent is a smi and base is a smi. Get the smi value into vfp register
+    // d1.
+    __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
+    __ b(&powi);
+
+    __ bind(&base_nonsmi);
+    // Exponent is smi and base is non smi. Get the double value from the base
+    // into vfp register d1.
+    __ ObjectToDoubleVFPRegister(base, d1,
+                                 scratch1, scratch2, heap_number_map, s0,
+                                 runtime.entry_label());
+
+    __ bind(&powi);
+
+    // Load 1.0 into d0.
+    __ mov(scratch2, Operand(0x3ff00000));
+    __ mov(scratch1, Operand(0));
+    __ vmov(d0, scratch1, scratch2);
+
+    // Get the absolute untagged value of the exponent and use that for the
+    // calculation.
+    __ mov(scratch1, Operand(exponent, ASR, kSmiTagSize), SetCC);
+    __ rsb(scratch1, scratch1, Operand(0), LeaveCC, mi);  // Negate if negative.
+    __ vmov(d2, d0, mi);  // 1.0 needed in d2 later if exponent is negative.
+
+    // Run through all the bits in the exponent. The result is calculated in d0
+    // and d1 holds base^(bit^2).
+    Label more_bits;
+    __ bind(&more_bits);
+    __ mov(scratch1, Operand(scratch1, LSR, 1), SetCC);
+    __ vmul(d0, d0, d1, cs);  // Multiply with base^(bit^2) if bit is set.
+    __ vmul(d1, d1, d1, ne);  // Don't bother calculating next d1 if done.
+    __ b(ne, &more_bits);
+
+    // If exponent is positive we are done.
+    __ cmp(exponent, Operand(0));
+    __ b(ge, &allocate_return);
+
+    // If exponent is negative result is 1/result (d2 already holds 1.0 in that
+    // case). However if d0 has reached infinity this will not provide the
+    // correct result, so call runtime if that is the case.
+    __ mov(scratch2, Operand(0x7FF00000));
+    __ mov(scratch1, Operand(0));
+    __ vmov(d1, scratch1, scratch2);  // Load infinity into d1.
+    __ vcmp(d0, d1);
+    __ vmrs(pc);
+    runtime.Branch(eq);  // d0 reached infinity.
+    __ vdiv(d0, d2, d0);
+    __ b(&allocate_return);
+
+    __ bind(&exponent_nonsmi);
     // Special handling of raising to the power of -0.5 and 0.5. First check
     // that the value is a heap number and that the lower bits (which for both
     // values are zero).
-    Register heap_number_map = r6;
+    heap_number_map = r6;
     __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
     __ ldr(scratch1, FieldMemOperand(exponent, HeapObject::kMapOffset));
     __ ldr(scratch2, FieldMemOperand(exponent, HeapNumber::kMantissaOffset));
@@ -4319,7 +4391,7 @@
     __ tst(scratch2, scratch2);
     runtime.Branch(ne);
 
-    // Load the e
+    // Load the higher bits (which contains the floating point exponent).
     __ ldr(scratch1, FieldMemOperand(exponent, HeapNumber::kExponentOffset));
 
     // Compare exponent with -0.5.
@@ -4356,8 +4428,10 @@
     __ vsqrt(d0, d0);
 
     __ bind(&allocate_return);
-    __ AllocateHeapNumberWithValue(
-        base, d0, scratch1, scratch2, heap_number_map, runtime.entry_label());
+    Register scratch3 = r5;
+    __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
+                                   heap_number_map, runtime.entry_label());
+    __ mov(base, scratch3);
     done.Jump();
 
     runtime.Bind();
@@ -5451,7 +5525,7 @@
       __ sub(value, value, Operand(Smi::FromInt(1)));
     }
     frame_->EmitPush(value);
-    target.SetValue(NOT_CONST_INIT);
+    target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
     if (is_postfix) frame_->Pop();
     ASSERT_EQ(original_height + 1, frame_->height());
     return;
@@ -5550,7 +5624,7 @@
     // Set the target with the result, leaving the result on
     // top of the stack.  Removes the target from the stack if
     // it has a non-zero size.
-    if (!is_const) target.SetValue(NOT_CONST_INIT);
+    if (!is_const) target.SetValue(NOT_CONST_INIT, LIKELY_SMI);
   }
 
   // Postfix: Discard the new value and use the old.
@@ -6283,7 +6357,8 @@
 }
 
 
-void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
+void CodeGenerator::EmitKeyedStore(StaticType* key_type,
+                                   WriteBarrierCharacter wb_info) {
   // Generate inlined version of the keyed store if the code is in a loop
   // and the key is likely to be a smi.
   if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
@@ -6299,25 +6374,45 @@
     __ IncrementCounter(&Counters::keyed_store_inline, 1,
                         scratch1, scratch2);
 
+
+
     // Load the value, key and receiver from the stack.
+    bool value_is_harmless = frame_->KnownSmiAt(0);
+    if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
+    bool key_is_smi = frame_->KnownSmiAt(1);
     Register value = frame_->PopToRegister();
     Register key = frame_->PopToRegister(value);
     VirtualFrame::SpilledScope spilled(frame_);
     Register receiver = r2;
     frame_->EmitPop(receiver);
 
+#ifdef DEBUG
+    bool we_remembered_the_write_barrier = value_is_harmless;
+#endif
+
     // The deferred code expects value, key and receiver in registers.
     DeferredReferenceSetKeyedValue* deferred =
         new DeferredReferenceSetKeyedValue(value, key, receiver);
 
     // Check that the value is a smi. As this inlined code does not set the
     // write barrier it is only possible to store smi values.
-    __ tst(value, Operand(kSmiTagMask));
-    deferred->Branch(ne);
+    if (!value_is_harmless) {
+      // If the value is not likely to be a Smi then let's test the fixed array
+      // for new space instead.  See below.
+      if (wb_info == LIKELY_SMI) {
+        __ tst(value, Operand(kSmiTagMask));
+        deferred->Branch(ne);
+#ifdef DEBUG
+        we_remembered_the_write_barrier = true;
+#endif
+      }
+    }
 
-    // Check that the key is a smi.
-    __ tst(key, Operand(kSmiTagMask));
-    deferred->Branch(ne);
+    if (!key_is_smi) {
+      // Check that the key is a smi.
+      __ tst(key, Operand(kSmiTagMask));
+      deferred->Branch(ne);
+    }
 
     // Check that the receiver is a heap object.
     __ tst(receiver, Operand(kSmiTagMask));
@@ -6333,24 +6428,35 @@
     __ cmp(scratch1, key);
     deferred->Branch(ls);  // Unsigned less equal.
 
+    // Get the elements array from the receiver.
+    __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+    if (!value_is_harmless && wb_info != LIKELY_SMI) {
+      Label ok;
+      __ and_(scratch2, scratch1, Operand(ExternalReference::new_space_mask()));
+      __ cmp(scratch2, Operand(ExternalReference::new_space_start()));
+      __ tst(value, Operand(kSmiTagMask), ne);
+      deferred->Branch(ne);
+#ifdef DEBUG
+      we_remembered_the_write_barrier = true;
+#endif
+    }
+    // Check that the elements array is not a dictionary.
+    __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
     // The following instructions are the part of the inlined store keyed
     // property code which can be patched. Therefore the exact number of
     // instructions generated need to be fixed, so the constant pool is blocked
     // while generating this code.
     { Assembler::BlockConstPoolScope block_const_pool(masm_);
-      // Get the elements array from the receiver and check that it
-      // is not a dictionary.
-      __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
-      __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
+#ifdef DEBUG
+      Label check_inlined_codesize;
+      masm_->bind(&check_inlined_codesize);
+#endif
+
       // Read the fixed array map from the constant pool (not from the root
       // array) so that the value can be patched.  When debugging, we patch this
       // comparison to always fail so that we will hit the IC call in the
       // deferred code which will allow the debugger to break for fast case
       // stores.
-#ifdef DEBUG
-    Label check_inlined_codesize;
-    masm_->bind(&check_inlined_codesize);
-#endif
       __ mov(scratch3, Operand(Factory::fixed_array_map()));
       __ cmp(scratch2, scratch3);
       deferred->Branch(ne);
@@ -6367,6 +6473,8 @@
                 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
     }
 
+    ASSERT(we_remembered_the_write_barrier);
+
     deferred->BindExit();
   } else {
     frame()->CallKeyedStoreIC();
@@ -6464,7 +6572,7 @@
 }
 
 
-void Reference::SetValue(InitState init_state) {
+void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
   ASSERT(!is_illegal());
   ASSERT(!cgen_->has_cc());
   MacroAssembler* masm = cgen_->masm();
@@ -6496,7 +6604,7 @@
       Property* property = expression_->AsProperty();
       ASSERT(property != NULL);
       cgen_->CodeForSourcePosition(property->position());
-      cgen_->EmitKeyedStore(property->key()->type());
+      cgen_->EmitKeyedStore(property->key()->type(), wb_info);
       frame->EmitPush(r0);
       set_unloaded();
       break;
@@ -7170,22 +7278,42 @@
 
 
 // Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
+static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
+                                         Label* possible_strings,
+                                         Label* not_both_strings) {
   // r2 is object type of r0.
   // Ensure that no non-strings have the symbol bit set.
-  ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+  Label object_test;
   ASSERT(kSymbolTag != 0);
+  __ tst(r2, Operand(kIsNotStringMask));
+  __ b(ne, &object_test);
   __ tst(r2, Operand(kIsSymbolMask));
-  __ b(eq, slow);
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+  __ b(eq, possible_strings);
+  __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+  __ b(ge, not_both_strings);
   __ tst(r3, Operand(kIsSymbolMask));
-  __ b(eq, slow);
+  __ b(eq, possible_strings);
 
   // Both are symbols.  We already checked they weren't the same pointer
   // so they are not equal.
   __ mov(r0, Operand(1));   // Non-zero indicates not equal.
   __ mov(pc, Operand(lr));  // Return.
+
+  __ bind(&object_test);
+  __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+  __ b(lt, not_both_strings);
+  __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, not_both_strings);
+  // If both objects are undetectable, they are equal.  Otherwise, they
+  // are not equal, since they are different objects and an object is not
+  // equal to undefined.
+  __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+  __ and_(r0, r2, Operand(r3));
+  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
+  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
+  __ mov(pc, Operand(lr));  // Return.
 }
 
 
@@ -7301,7 +7429,8 @@
 
 
 void RecordWriteStub::Generate(MacroAssembler* masm) {
-  __ RecordWriteHelper(object_, Operand(offset_), offset_, scratch_);
+  __ add(offset_, object_, Operand(offset_));
+  __ RecordWriteHelper(object_, offset_, scratch_);
   __ Ret();
 }
 
@@ -7398,9 +7527,10 @@
   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
   // symbols.
   if (cc_ == eq && !strict_) {
-    // Either jumps to slow or returns the answer.  Assumes that r2 is the type
-    // of r0 on entry.
-    EmitCheckForSymbols(masm, &flat_string_check);
+    // Returns an answer for two symbols or two detectable objects.
+    // Otherwise jumps to string case or not both strings case.
+    // Assumes that r2 is the type of r0 on entry.
+    EmitCheckForSymbolsOrObjects(masm, &flat_string_check, &slow);
   }
 
   // Check for both being sequential ASCII strings, and inline if that is the
@@ -7913,7 +8043,11 @@
       // The code below for writing into heap numbers isn't capable of writing
       // the register as an unsigned int so we go to slow case if we hit this
       // case.
-      __ b(mi, &slow);
+      if (CpuFeatures::IsSupported(VFP3)) {
+        __ b(mi, &result_not_a_smi);
+      } else {
+        __ b(mi, &slow);
+      }
       break;
     case Token::SHL:
       // Use only the 5 least significant bits of the shift count.
@@ -7957,10 +8091,24 @@
   // result.
   __ mov(r0, Operand(r5));
 
-  // Tail call that writes the int32 in r2 to the heap number in r0, using
-  // r3 as scratch.  r0 is preserved and returned.
-  WriteInt32ToHeapNumberStub stub(r2, r0, r3);
-  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+  if (CpuFeatures::IsSupported(VFP3)) {
+    // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(s0, r2);
+    if (op_ == Token::SHR) {
+      __ vcvt_f64_u32(d0, s0);
+    } else {
+      __ vcvt_f64_s32(d0, s0);
+    }
+    __ sub(r3, r0, Operand(kHeapObjectTag));
+    __ vstr(d0, r3, HeapNumber::kValueOffset);
+    __ Ret();
+  } else {
+    // Tail call that writes the int32 in r2 to the heap number in r0, using
+    // r3 as scratch.  r0 is preserved and returned.
+    WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+    __ TailCallStub(&stub);
+  }
 
   if (mode_ != NO_OVERWRITE) {
     __ bind(&have_to_allocate);
@@ -8809,12 +8957,21 @@
       __ mov(r0, Operand(r2));
     }
 
-    // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
-    // have to set up a frame.
-    WriteInt32ToHeapNumberStub stub(r1, r0, r2);
-    __ push(lr);
-    __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
-    __ pop(lr);
+    if (CpuFeatures::IsSupported(VFP3)) {
+      // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
+      CpuFeatures::Scope scope(VFP3);
+      __ vmov(s0, r1);
+      __ vcvt_f64_s32(d0, s0);
+      __ sub(r2, r0, Operand(kHeapObjectTag));
+      __ vstr(d0, r2, HeapNumber::kValueOffset);
+    } else {
+      // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
+      // have to set up a frame.
+      WriteInt32ToHeapNumberStub stub(r1, r0, r2);
+      __ push(lr);
+      __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+      __ pop(lr);
+    }
   } else {
     UNIMPLEMENTED();
   }
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 2d8a935..855723d 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -44,6 +44,7 @@
 enum InitState { CONST_INIT, NOT_CONST_INIT };
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
+enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
 
 
 // -------------------------------------------------------------------------
@@ -100,7 +101,7 @@
   // on the expression stack.  The  value is stored in the location specified
   // by the reference, and is left on top of the stack, after the reference
   // is popped from beneath it (unloaded).
-  void SetValue(InitState init_state);
+  void SetValue(InitState init_state, WriteBarrierCharacter wb);
 
   // This is in preparation for something that uses the reference on the stack.
   // If we need this reference afterwards get then dup it now.  Otherwise mark
@@ -384,7 +385,7 @@
 
   // Store a keyed property. Key and receiver are on the stack and the value is
   // in r0. Result is returned in r0.
-  void EmitKeyedStore(StaticType* key_type);
+  void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
 
   void LoadFromGlobalSlotCheckExtensions(Slot* slot,
                                          TypeofState typeof_state,
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index fb17d45..a52417b 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1047,7 +1047,14 @@
   if (instr->Bit(4) == 0) {
     if (instr->Opc1Field() == 0x7) {
       // Other data processing instructions
-      if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+      if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+        // vmov register to register.
+        if (instr->SzField() == 0x1) {
+          Format(instr, "vmov.f64'cond 'Dd, 'Dm");
+        } else {
+          Unknown(instr);  // Not used by V8.
+        }
+      } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
         DecodeVCVTBetweenDoubleAndSingle(instr);
       } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
         DecodeVCVTBetweenFloatingPointAndInteger(instr);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 0af1036..97e6148 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -64,12 +64,12 @@
 
 // Generated code falls through if the receiver is a regular non-global
 // JS object with slow properties and no interceptors.
-static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
-                                                Register receiver,
-                                                Register elements,
-                                                Register t0,
-                                                Register t1,
-                                                Label* miss) {
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+                                                  Register receiver,
+                                                  Register elements,
+                                                  Register t0,
+                                                  Register t1,
+                                                  Label* miss) {
   // Register usage:
   //   receiver: holds the receiver on entry and is unchanged.
   //   elements: holds the property dictionary on fall through.
@@ -105,33 +105,16 @@
 }
 
 
-// Helper function used from LoadIC/CallIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
-                                   Label* miss,
-                                   Register elements,
-                                   Register name,
-                                   Register result,
-                                   Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-
-  Label done;
-
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+static void GenerateStringDictionaryProbes(MacroAssembler* masm,
+                                           Label* miss,
+                                           Label* done,
+                                           Register elements,
+                                           Register name,
+                                           Register scratch1,
+                                           Register scratch2) {
   // Compute the capacity mask.
   const int kCapacityOffset = StringDictionary::kHeaderSize +
       StringDictionary::kCapacityIndex * kPointerSize;
@@ -170,16 +153,56 @@
     __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
     __ cmp(name, Operand(ip));
     if (i != kProbes - 1) {
-      __ b(eq, &done);
+      __ b(eq, done);
     } else {
       __ b(ne, miss);
     }
   }
+}
 
-  // Check that the value is a normal property.
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// result:   Register for the result. It is only updated if a jump to the miss
+//           label is not done. Can be the same as elements or name clobbering
+//           one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+                                   Label* miss,
+                                   Register elements,
+                                   Register name,
+                                   Register result,
+                                   Register scratch1,
+                                   Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  GenerateStringDictionaryProbes(masm,
+                                 miss,
+                                 &done,
+                                 elements,
+                                 name,
+                                 scratch1,
+                                 scratch2);
+
+  // If probing finds an entry check that the value is a normal
+  // property.
   __ bind(&done);  // scratch2 == elements + 4 * index
-  __ ldr(scratch1,
-         FieldMemOperand(scratch2, kElementsStartOffset + 2 * kPointerSize));
+  const int kElementsStartOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
   __ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
   __ b(ne, miss);
 
@@ -189,6 +212,63 @@
 }
 
 
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// value:    The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+                                    Label* miss,
+                                    Register elements,
+                                    Register name,
+                                    Register value,
+                                    Register scratch1,
+                                    Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  GenerateStringDictionaryProbes(masm,
+                                 miss,
+                                 &done,
+                                 elements,
+                                 name,
+                                 scratch1,
+                                 scratch2);
+
+  // If probing finds an entry in the dictionary check that the value
+  // is a normal property that is not read only.
+  __ bind(&done);  // scratch2 == elements + 4 * index
+  const int kElementsStartOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask
+      = (PropertyDetails::TypeField::mask() |
+         PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
+  __ b(ne, miss);
+
+  // Store the value at the masked, scaled index and return.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+  __ str(value, MemOperand(scratch2));
+
+  // Update the write barrier. Make sure not to clobber the value.
+  __ mov(scratch1, value);
+  __ RecordWrite(elements, scratch2, scratch1);
+}
+
+
 static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
                                          Label* miss,
                                          Register elements,
@@ -560,7 +640,7 @@
   // Get the receiver of the function from the stack into r1.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
-  GenerateDictionaryLoadReceiverCheck(masm, r1, r0, r3, r4, &miss);
+  GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss);
 
   // r0: elements
   // Search the dictionary - put result in register r1.
@@ -815,7 +895,7 @@
   // -----------------------------------
   Label miss;
 
-  GenerateDictionaryLoadReceiverCheck(masm, r0, r1, r3, r4, &miss);
+  GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
 
   // r1: elements
   GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
@@ -2138,6 +2218,27 @@
 }
 
 
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0    : value
+  //  -- r1    : receiver
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+  Label miss;
+
+  GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
+
+  GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
+  __ IncrementCounter(&Counters::store_normal_hit, 1, r4, r5);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::store_normal_miss, 1, r4, r5);
+  GenerateMiss(masm);
+}
+
+
 #undef __
 
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 630e0b8..81fc11e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -310,32 +310,28 @@
 
 
 void MacroAssembler::RecordWriteHelper(Register object,
-                                       Operand offset,
-                                       Register scratch0,
-                                       Register scratch1) {
+                                       Register address,
+                                       Register scratch) {
   if (FLAG_debug_code) {
     // Check that the object is not in new space.
     Label not_in_new_space;
-    InNewSpace(object, scratch1, ne, &not_in_new_space);
+    InNewSpace(object, scratch, ne, &not_in_new_space);
     Abort("new-space object passed to RecordWriteHelper");
     bind(&not_in_new_space);
   }
 
-  // Add offset into the object.
-  add(scratch0, object, offset);
-
   // Calculate page address.
   Bfc(object, 0, kPageSizeBits);
 
   // Calculate region number.
-  Ubfx(scratch0, scratch0, Page::kRegionSizeLog2,
+  Ubfx(address, address, Page::kRegionSizeLog2,
        kPageSizeBits - Page::kRegionSizeLog2);
 
   // Mark region dirty.
-  ldr(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
+  ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
   mov(ip, Operand(1));
-  orr(scratch1, scratch1, Operand(ip, LSL, scratch0));
-  str(scratch1, MemOperand(object, Page::kDirtyFlagOffset));
+  orr(scratch, scratch, Operand(ip, LSL, address));
+  str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
 }
 
 
@@ -368,8 +364,11 @@
   // region marks for new space pages.
   InNewSpace(object, scratch0, eq, &done);
 
+  // Add offset into the object.
+  add(scratch0, object, offset);
+
   // Record the actual write.
-  RecordWriteHelper(object, offset, scratch0, scratch1);
+  RecordWriteHelper(object, scratch0, scratch1);
 
   bind(&done);
 
@@ -383,6 +382,38 @@
 }
 
 
+// Will clobber 4 registers: object, address, scratch, ip.  The
+// register 'object' contains a heap object pointer.  The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+                                 Register address,
+                                 Register scratch) {
+  // The compiled code assumes that record write doesn't change the
+  // context register, so we check that none of the clobbered
+  // registers are cp.
+  ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+
+  Label done;
+
+  // First, test that the object is not in the new space.  We cannot set
+  // region marks for new space pages.
+  InNewSpace(object, scratch, eq, &done);
+
+  // Record the actual write.
+  RecordWriteHelper(object, address, scratch);
+
+  bind(&done);
+
+  // Clobber all input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (FLAG_debug_code) {
+    mov(object, Operand(BitCast<int32_t>(kZapValue)));
+    mov(address, Operand(BitCast<int32_t>(kZapValue)));
+    mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+  }
+}
+
+
 void MacroAssembler::Ldrd(Register dst1, Register dst2,
                           const MemOperand& src, Condition cond) {
   ASSERT(src.rm().is(no_reg));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index c3f45a6..d57c565 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -137,22 +137,32 @@
                   Label* branch);
 
 
-  // For the page containing |object| mark the region covering [object+offset]
+  // For the page containing |object| mark the region covering [address]
   // dirty. The object address must be in the first 8K of an allocated page.
   void RecordWriteHelper(Register object,
-                         Operand offset,
-                         Register scratch0,
-                         Register scratch1);
+                         Register address,
+                         Register scratch);
 
-  // For the page containing |object| mark the region covering [object+offset]
-  // dirty. The object address must be in the first 8K of an allocated page.
-  // The 'scratch' registers are used in the implementation and all 3 registers
-  // are clobbered by the operation, as well as the ip register.
+  // For the page containing |object| mark the region covering
+  // [object+offset] dirty. The object address must be in the first 8K
+  // of an allocated page.  The 'scratch' registers are used in the
+  // implementation and all 3 registers are clobbered by the
+  // operation, as well as the ip register. RecordWrite updates the
+  // write barrier even when storing smis.
   void RecordWrite(Register object,
                    Operand offset,
                    Register scratch0,
                    Register scratch1);
 
+  // For the page containing |object| mark the region covering
+  // [address] dirty. The object address must be in the first 8K of an
+  // allocated page.  All 3 registers are clobbered by the operation,
+  // as well as the ip register. RecordWrite updates the write barrier
+  // even when storing smis.
+  void RecordWrite(Register object,
+                   Register address,
+                   Register scratch);
+
   // Push two registers.  Pushes leftmost register first (to highest address).
   void Push(Register src1, Register src2, Condition cond = al) {
     ASSERT(!src1.is(src2));
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 6240cd4..f09ce00 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2276,7 +2276,14 @@
   if (instr->Bit(4) == 0) {
     if (instr->Opc1Field() == 0x7) {
       // Other data processing instructions
-      if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
+      if ((instr->Opc2Field() == 0x0) && (instr->Opc3Field() == 0x1)) {
+        // vmov register to register.
+        if (instr->SzField() == 0x1) {
+          set_d_register_from_double(vd, get_double_from_d_register(vm));
+        } else {
+          UNREACHABLE();  // Not used by V8.
+        }
+      } else if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) {
         DecodeVCVTBetweenDoubleAndSingle(instr);
       } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) {
         DecodeVCVTBetweenFloatingPointAndInteger(instr);
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 3e5ba11..0e649cc 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -741,7 +741,8 @@
                                        Register scratch,
                                        String* name,
                                        int save_at_depth,
-                                       Label* miss) {
+                                       Label* miss,
+                                       Register extra) {
   // Check that the maps haven't changed.
   Register result =
       masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
diff --git a/src/builtins.cc b/src/builtins.cc
index c8d4e09..ad52ea1 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1263,6 +1263,11 @@
 }
 
 
+static void Generate_StoreIC_Normal(MacroAssembler* masm) {
+  StoreIC::GenerateNormal(masm);
+}
+
+
 static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
   StoreIC::GenerateMegamorphic(masm);
 }
diff --git a/src/builtins.h b/src/builtins.h
index 1fab375..3dcab62 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -98,6 +98,7 @@
                                                                           \
   V(StoreIC_Initialize,         STORE_IC, UNINITIALIZED)                  \
   V(StoreIC_ArrayLength,        STORE_IC, MONOMORPHIC)                    \
+  V(StoreIC_Normal,             STORE_IC, MONOMORPHIC)                    \
   V(StoreIC_Megamorphic,        STORE_IC, MEGAMORPHIC)                    \
                                                                           \
   V(KeyedStoreIC_Initialize,    KEYED_STORE_IC, UNINITIALIZED)            \
diff --git a/src/date.js b/src/date.js
index e780cb8..83fca27 100644
--- a/src/date.js
+++ b/src/date.js
@@ -347,9 +347,10 @@
 function MakeDay(year, month, date) {
   if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN;
 
-  year = TO_INTEGER(year);
-  month = TO_INTEGER(month);
-  date = TO_INTEGER(date);
+  // Convert to integer and map -0 to 0.
+  year = TO_INTEGER_MAP_MINUS_ZERO(year);
+  month = TO_INTEGER_MAP_MINUS_ZERO(month);
+  date = TO_INTEGER_MAP_MINUS_ZERO(date);
 
   if (year < kMinYear || year > kMaxYear ||
       month < kMinMonth || month > kMaxMonth ||
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index c808c87..47a3c8e 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -2070,6 +2070,7 @@
     return response.failed('Missing arguments');
   }
   var script_id = request.arguments.script_id;
+  var preview_only = !!request.arguments.preview_only;
   
   var scripts = %DebugGetLoadedScripts();
 
@@ -2092,18 +2093,9 @@
 
   var new_source = request.arguments.new_source;
   
-  try {
-    Debug.LiveEdit.SetScriptSource(the_script, new_source, change_log);
-  } catch (e) {
-    if (e instanceof Debug.LiveEdit.Failure) {
-      // Let's treat it as a "success" so that body with change_log will be
-      // sent back. "change_log" will have "failure" field set.
-      change_log.push( { failure: true, message: e.toString() } ); 
-    } else {
-      throw e;
-    }
-  }
-  response.body = {change_log: change_log};
+  var result_description = Debug.LiveEdit.SetScriptSource(the_script,
+      new_source, preview_only, change_log);
+  response.body = {change_log: change_log, result: result_description};
 };
 
 
diff --git a/src/debug.cc b/src/debug.cc
index d513b31..1dc6275 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -684,6 +684,12 @@
 
 void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
   DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
+  // We need to clear all breakpoints associated with the function to restore
+  // original code and avoid patching the code twice later because
+  // the function will live in the heap until next gc, and can be found by
+  // Runtime::FindSharedFunctionInfoInScript.
+  BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
+  it.ClearAllDebugBreak();
   RemoveDebugInfo(node->debug_info());
 #ifdef DEBUG
   node = Debug::debug_info_list_;
@@ -854,7 +860,7 @@
   HandleScope scope;
   ASSERT(args.length() == 0);
 
-  thread_local_.frames_are_dropped_ = false;
+  thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
 
   // Get the top-most JavaScript frame.
   JavaScriptFrameIterator it;
@@ -932,12 +938,22 @@
     PrepareStep(step_action, step_count);
   }
 
-  if (thread_local_.frames_are_dropped_) {
-    // We must have been calling IC stub. Do not return there anymore.
+  if (thread_local_.frame_drop_mode_ == FRAMES_UNTOUCHED) {
+    SetAfterBreakTarget(frame);
+  } else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_IC_CALL) {
+    // We must have been calling IC stub. Do not go there anymore.
     Code* plain_return = Builtins::builtin(Builtins::PlainReturn_LiveEdit);
     thread_local_.after_break_target_ = plain_return->entry();
+  } else if (thread_local_.frame_drop_mode_ ==
+      FRAME_DROPPED_IN_DEBUG_SLOT_CALL) {
+    // Debug break slot stub does not return normally, instead it manually
+    // cleans the stack and jumps. We should patch the jump address.
+    Code* plain_return = Builtins::builtin(Builtins::FrameDropper_LiveEdit);
+    thread_local_.after_break_target_ = plain_return->entry();
+  } else if (thread_local_.frame_drop_mode_ == FRAME_DROPPED_IN_DIRECT_CALL) {
+    // Nothing to do, after_break_target is not used here.
   } else {
-    SetAfterBreakTarget(frame);
+    UNREACHABLE();
   }
 
   return Heap::undefined_value();
@@ -1749,8 +1765,9 @@
 }
 
 
-void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id) {
-  thread_local_.frames_are_dropped_ = true;
+void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
+                                  FrameDropMode mode) {
+  thread_local_.frame_drop_mode_ = mode;
   thread_local_.break_frame_id_ = new_break_frame_id;
 }
 
diff --git a/src/debug.h b/src/debug.h
index 6019294..fb92692 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -400,7 +400,22 @@
   // Called from stub-cache.cc.
   static void GenerateCallICDebugBreak(MacroAssembler* masm);
 
-  static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id);
+  // Describes how exactly a frame has been dropped from stack.
+  enum FrameDropMode {
+    // No frame has been dropped.
+    FRAMES_UNTOUCHED,
+    // The top JS frame had been calling IC stub. IC stub mustn't be called now.
+    FRAME_DROPPED_IN_IC_CALL,
+    // The top JS frame had been calling debug break slot stub. Patch the
+    // address this stub jumps to in the end.
+    FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
+    // The top JS frame had been calling some C++ function. The return address
+    // gets patched automatically.
+    FRAME_DROPPED_IN_DIRECT_CALL
+  };
+
+  static void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
+                                    FrameDropMode mode);
 
   static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
                                      Handle<Code> code);
@@ -471,8 +486,9 @@
     // Storage location for jump when exiting debug break calls.
     Address after_break_target_;
 
-    // Indicates that LiveEdit has patched the stack.
-    bool frames_are_dropped_;
+    // Stores the way how LiveEdit has patched the stack. It is used when
+    // debugger returns control back to user script.
+    FrameDropMode frame_drop_mode_;
 
     // Top debugger entry.
     EnterDebugger* debugger_entry_;
diff --git a/src/globals.h b/src/globals.h
index 6cf2626..aea8858 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -463,6 +463,12 @@
 };
 
 
+enum InlineCacheHolderFlag {
+  OWN_MAP,  // For fast properties objects.
+  PROTOTYPE_MAP  // For slow properties objects (except GlobalObjects).
+};
+
+
 // Type of properties.
 // Order of properties is significant.
 // Must fit in the BitField PropertyDetails::TypeField.
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 1a847c1..fa09dd8 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -11656,7 +11656,7 @@
 
 
 void CompareStub::Generate(MacroAssembler* masm) {
-  Label call_builtin, done;
+  Label check_unequal_objects, done;
 
   // NOTICE! This code is only reached after a smi-fast-case check, so
   // it is certain that at least one operand isn't a smi.
@@ -11689,15 +11689,15 @@
       Label heap_number;
       __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
              Immediate(Factory::heap_number_map()));
-      if (cc_ == equal) {
-        __ j(equal, &heap_number);
-        // Identical objects are equal for operators ==, !=, and ===.
-        __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
-        __ ret(0);
-      } else {
-        // Identical objects must call ToPrimitive for <, <=, >, and >=.
-        __ j(not_equal, &not_identical);
+      __ j(equal, &heap_number);
+      if (cc_ != equal) {
+        // Call runtime on identical JSObjects.  Otherwise return equal.
+        __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+        __ j(above_equal, &not_identical);
       }
+      __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+      __ ret(0);
+
       __ bind(&heap_number);
       // It is a heap number, so return non-equal if it's NaN and equal if
       // it's not NaN.
@@ -11734,79 +11734,75 @@
     __ bind(&not_identical);
   }
 
-  if (cc_ == equal) {  // Both strict and non-strict.
+  // Strict equality can quickly decide whether objects are equal.
+  // Non-strict object equality is slower, so it is handled later in the stub.
+  if (cc_ == equal && strict_) {
     Label slow;  // Fallthrough label.
-
+    Label not_smis;
     // If we're doing a strict equality comparison, we don't have to do
     // type conversion, so we generate code to do fast comparison for objects
     // and oddballs. Non-smi numbers and strings still go through the usual
     // slow-case code.
-    if (strict_) {
-      // If either is a Smi (we know that not both are), then they can only
-      // be equal if the other is a HeapNumber. If so, use the slow case.
-      {
-        Label not_smis;
-        ASSERT_EQ(0, kSmiTag);
-        ASSERT_EQ(0, Smi::FromInt(0));
-        __ mov(ecx, Immediate(kSmiTagMask));
-        __ and_(ecx, Operand(eax));
-        __ test(ecx, Operand(edx));
-        __ j(not_zero, &not_smis);
-        // One operand is a smi.
+    // If either is a Smi (we know that not both are), then they can only
+    // be equal if the other is a HeapNumber. If so, use the slow case.
+    ASSERT_EQ(0, kSmiTag);
+    ASSERT_EQ(0, Smi::FromInt(0));
+    __ mov(ecx, Immediate(kSmiTagMask));
+    __ and_(ecx, Operand(eax));
+    __ test(ecx, Operand(edx));
+    __ j(not_zero, &not_smis);
+    // One operand is a smi.
 
-        // Check whether the non-smi is a heap number.
-        ASSERT_EQ(1, kSmiTagMask);
-        // ecx still holds eax & kSmiTag, which is either zero or one.
-        __ sub(Operand(ecx), Immediate(0x01));
-        __ mov(ebx, edx);
-        __ xor_(ebx, Operand(eax));
-        __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
-        __ xor_(ebx, Operand(eax));
-        // if eax was smi, ebx is now edx, else eax.
+    // Check whether the non-smi is a heap number.
+    ASSERT_EQ(1, kSmiTagMask);
+    // ecx still holds eax & kSmiTag, which is either zero or one.
+    __ sub(Operand(ecx), Immediate(0x01));
+    __ mov(ebx, edx);
+    __ xor_(ebx, Operand(eax));
+    __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
+    __ xor_(ebx, Operand(eax));
+    // if eax was smi, ebx is now edx, else eax.
 
-        // Check if the non-smi operand is a heap number.
-        __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
-               Immediate(Factory::heap_number_map()));
-        // If heap number, handle it in the slow case.
-        __ j(equal, &slow);
-        // Return non-equal (ebx is not zero)
-        __ mov(eax, ebx);
-        __ ret(0);
+    // Check if the non-smi operand is a heap number.
+    __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+           Immediate(Factory::heap_number_map()));
+    // If heap number, handle it in the slow case.
+    __ j(equal, &slow);
+    // Return non-equal (ebx is not zero)
+    __ mov(eax, ebx);
+    __ ret(0);
 
-        __ bind(&not_smis);
-      }
+    __ bind(&not_smis);
+    // If either operand is a JSObject or an oddball value, then they are not
+    // equal since their pointers are different
+    // There is no test for undetectability in strict equality.
 
-      // If either operand is a JSObject or an oddball value, then they are not
-      // equal since their pointers are different
-      // There is no test for undetectability in strict equality.
+    // Get the type of the first operand.
+    // If the first object is a JS object, we have done pointer comparison.
+    Label first_non_object;
+    ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+    __ j(below, &first_non_object);
 
-      // Get the type of the first operand.
-      // If the first object is a JS object, we have done pointer comparison.
-      Label first_non_object;
-      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
-      __ j(below, &first_non_object);
+    // Return non-zero (eax is not zero)
+    Label return_not_equal;
+    ASSERT(kHeapObjectTag != 0);
+    __ bind(&return_not_equal);
+    __ ret(0);
 
-      // Return non-zero (eax is not zero)
-      Label return_not_equal;
-      ASSERT(kHeapObjectTag != 0);
-      __ bind(&return_not_equal);
-      __ ret(0);
+    __ bind(&first_non_object);
+    // Check for oddballs: true, false, null, undefined.
+    __ CmpInstanceType(ecx, ODDBALL_TYPE);
+    __ j(equal, &return_not_equal);
 
-      __ bind(&first_non_object);
-      // Check for oddballs: true, false, null, undefined.
-      __ CmpInstanceType(ecx, ODDBALL_TYPE);
-      __ j(equal, &return_not_equal);
+    __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
+    __ j(above_equal, &return_not_equal);
 
-      __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
-      __ j(above_equal, &return_not_equal);
+    // Check for oddballs: true, false, null, undefined.
+    __ CmpInstanceType(ecx, ODDBALL_TYPE);
+    __ j(equal, &return_not_equal);
 
-      // Check for oddballs: true, false, null, undefined.
-      __ CmpInstanceType(ecx, ODDBALL_TYPE);
-      __ j(equal, &return_not_equal);
-
-      // Fall through to the general case.
-    }
+    // Fall through to the general case.
     __ bind(&slow);
   }
 
@@ -11893,7 +11889,8 @@
 
   __ bind(&check_for_strings);
 
-  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
+  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
+                                         &check_unequal_objects);
 
   // Inline comparison of ascii strings.
   StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -11906,7 +11903,44 @@
   __ Abort("Unexpected fall-through from string comparison");
 #endif
 
-  __ bind(&call_builtin);
+  __ bind(&check_unequal_objects);
+  if (cc_ == equal && !strict_) {
+    // Non-strict equality.  Objects are unequal if
+    // they are both JSObjects and not undetectable,
+    // and their pointers are different.
+    Label not_both_objects;
+    Label return_unequal;
+    // At most one is a smi, so we can test for smi by adding the two.
+    // A smi plus a heap object has the low bit set, a heap object plus
+    // a heap object has the low bit clear.
+    ASSERT_EQ(0, kSmiTag);
+    ASSERT_EQ(1, kSmiTagMask);
+    __ lea(ecx, Operand(eax, edx, times_1, 0));
+    __ test(ecx, Immediate(kSmiTagMask));
+    __ j(not_zero, &not_both_objects);
+    __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+    __ j(below, &not_both_objects);
+    __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
+    __ j(below, &not_both_objects);
+    // We do not bail out after this point.  Both are JSObjects, and
+    // they are equal if and only if both are undetectable.
+    // The and of the undetectable flags is 1 if and only if they are equal.
+    __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(zero, &return_unequal);
+    __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    __ j(zero, &return_unequal);
+    // The objects are both undetectable, so they both compare as the value
+    // undefined, and are equal.
+    __ Set(eax, Immediate(EQUAL));
+    __ bind(&return_unequal);
+    // Return non-equal by returning the non-zero object pointer in eax,
+    // or return equal if we fell through to here.
+    __ ret(2 * kPointerSize);  // rax, rdx were pushed
+    __ bind(&not_both_objects);
+  }
+
   // must swap argument order
   __ pop(ecx);
   __ pop(edx);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 62f878c..062f0f2 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -61,11 +61,11 @@
 
 // Generated code falls through if the receiver is a regular non-global
 // JS object with slow properties and no interceptors.
-static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
-                                                Register receiver,
-                                                Register r0,
-                                                Register r1,
-                                                Label* miss) {
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+                                                  Register receiver,
+                                                  Register r0,
+                                                  Register r1,
+                                                  Label* miss) {
   // Register usage:
   //   receiver: holds the receiver on entry and is unchanged.
   //   r0: used to hold receiver instance type.
@@ -98,36 +98,17 @@
 }
 
 
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not a symbol, and will jump to
-// the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
-                                   Label* miss_label,
-                                   Register elements,
-                                   Register name,
-                                   Register r0,
-                                   Register r1,
-                                   Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // Scratch registers:
-  //
-  // r0   - used for the index into the property dictionary
-  //
-  // r1   - used to hold the capacity of the property dictionary.
-  //
-  // result - holds the result on exit.
-
-  Label done;
-
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r0|. Jump to the |miss| label
+// otherwise.
+static void GenerateStringDictionaryProbes(MacroAssembler* masm,
+                                           Label* miss,
+                                           Label* done,
+                                           Register elements,
+                                           Register name,
+                                           Register r0,
+                                           Register r1) {
   // Compute the capacity mask.
   const int kCapacityOffset =
       StringDictionary::kHeaderSize +
@@ -160,14 +141,61 @@
     __ cmp(name, Operand(elements, r0, times_4,
                          kElementsStartOffset - kHeapObjectTag));
     if (i != kProbes - 1) {
-      __ j(equal, &done, taken);
+      __ j(equal, done, taken);
     } else {
-      __ j(not_equal, miss_label, not_taken);
+      __ j(not_equal, miss, not_taken);
     }
   }
+}
 
-  // Check that the value is a normal property.
+
+
+// Helper function used to load a property from a dictionary backing
+// storage. This function may fail to load a property even though it is
+// in the dictionary, so code at miss_label must always call a backup
+// property load that is complete. This function is safe to call if
+// name is not a symbol, and will jump to the miss_label in that
+// case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+                                   Label* miss_label,
+                                   Register elements,
+                                   Register name,
+                                   Register r0,
+                                   Register r1,
+                                   Register result) {
+  // Register use:
+  //
+  // elements - holds the property dictionary on entry and is unchanged.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
+  // Scratch registers:
+  //
+  // r0   - used for the index into the property dictionary
+  //
+  // r1   - used to hold the capacity of the property dictionary.
+  //
+  // result - holds the result on exit.
+
+  Label done;
+
+  // Probe the dictionary.
+  GenerateStringDictionaryProbes(masm,
+                                 miss_label,
+                                 &done,
+                                 elements,
+                                 name,
+                                 r0,
+                                 r1);
+
+  // If probing finds an entry in the dictionary, r0 contains the
+  // index into the dictionary. Check that the value is a normal
+  // property.
   __ bind(&done);
+  const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
   __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
           Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
@@ -179,6 +207,69 @@
 }
 
 
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property eventhough it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not a symbol, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+                                    Label* miss_label,
+                                    Register elements,
+                                    Register name,
+                                    Register value,
+                                    Register r0,
+                                    Register r1) {
+  // Register use:
+  //
+  // elements - holds the property dictionary on entry and is clobbered.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
+  // value - holds the value to store and is unchanged.
+  //
+  // r0 - used for index into the property dictionary and is clobbered.
+  //
+  // r1 - used to hold the capacity of the property dictionary and is clobbered.
+  Label done;
+
+
+  // Probe the dictionary.
+  GenerateStringDictionaryProbes(masm,
+                                 miss_label,
+                                 &done,
+                                 elements,
+                                 name,
+                                 r0,
+                                 r1);
+
+  // If probing finds an entry in the dictionary, r0 contains the
+  // index into the dictionary. Check that the value is a normal
+  // property that is not read only.
+  __ bind(&done);
+  const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask
+      = (PropertyDetails::TypeField::mask() |
+         PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+          Immediate(kTypeAndReadOnlyMask));
+  __ j(not_zero, miss_label, not_taken);
+
+  // Store the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+  __ mov(Operand(r0, 0), value);
+
+  // Update write barrier. Make sure not to clobber the value.
+  __ mov(r1, value);
+  __ RecordWrite(elements, r0, r1);
+}
+
+
 static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
                                          Label* miss,
                                          Register elements,
@@ -1238,7 +1329,7 @@
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  GenerateDictionaryLoadReceiverCheck(masm, edx, eax, ebx, &miss);
+  GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
 
   // eax: elements
   // Search the dictionary placing the result in edi.
@@ -1517,7 +1608,7 @@
   // -----------------------------------
   Label miss;
 
-  GenerateDictionaryLoadReceiverCheck(masm, eax, edx, ebx, &miss);
+  GenerateStringDictionaryReceiverCheck(masm, eax, edx, ebx, &miss);
 
   // edx: elements
   // Search the dictionary placing the result in eax.
@@ -1775,6 +1866,36 @@
 }
 
 
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- ecx    : name
+  //  -- edx    : receiver
+  //  -- esp[0] : return address
+  // -----------------------------------
+
+  Label miss, restore_miss;
+
+  GenerateStringDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
+
+  // A lot of registers are needed for storing to slow case
+  // objects. Push and restore receiver but rely on
+  // GenerateDictionaryStore preserving the value and name.
+  __ push(edx);
+  GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
+  __ Drop(1);
+  __ IncrementCounter(&Counters::store_normal_hit, 1);
+  __ ret(0);
+
+  __ bind(&restore_miss);
+  __ pop(edx);
+
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::store_normal_miss, 1);
+  GenerateMiss(masm);
+}
+
+
 // Defined in ic.cc.
 Object* KeyedStoreIC_Miss(Arguments args);
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index b83f9bc..b3f7c21 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -98,11 +98,6 @@
 }
 
 
-// For page containing |object| mark region covering [object+offset] dirty.
-// object is the object being stored into, value is the object being stored.
-// If offset is zero, then the scratch register contains the array index into
-// the elements array represented as a Smi.
-// All registers are clobbered by the operation.
 void MacroAssembler::RecordWrite(Register object, int offset,
                                  Register value, Register scratch) {
   // The compiled code assumes that record write doesn't change the
@@ -153,6 +148,39 @@
 }
 
 
+void MacroAssembler::RecordWrite(Register object,
+                                 Register address,
+                                 Register value) {
+  // The compiled code assumes that record write doesn't change the
+  // context register, so we check that none of the clobbered
+  // registers are esi.
+  ASSERT(!object.is(esi) && !value.is(esi) && !address.is(esi));
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  ASSERT_EQ(0, kSmiTag);
+  test(value, Immediate(kSmiTagMask));
+  j(zero, &done);
+
+  InNewSpace(object, value, equal, &done);
+
+  RecordWriteHelper(object, address, value);
+
+  bind(&done);
+
+  // Clobber all input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (FLAG_debug_code) {
+    mov(object, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(address, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+  }
+}
+
+
 void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
   cmp(esp,
       Operand::StaticVariable(ExternalReference::address_of_stack_limit()));
@@ -514,97 +542,6 @@
 }
 
 
-Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
-                                   JSObject* holder, Register holder_reg,
-                                   Register scratch,
-                                   int save_at_depth,
-                                   Label* miss) {
-  // Make sure there's no overlap between scratch and the other
-  // registers.
-  ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
-
-  // Keep track of the current object in register reg.
-  Register reg = object_reg;
-  int depth = 0;
-
-  if (save_at_depth == depth) {
-    mov(Operand(esp, kPointerSize), object_reg);
-  }
-
-  // Check the maps in the prototype chain.
-  // Traverse the prototype chain from the object and do map checks.
-  while (object != holder) {
-    depth++;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-    JSObject* prototype = JSObject::cast(object->GetPrototype());
-    if (Heap::InNewSpace(prototype)) {
-      // Get the map of the current object.
-      mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
-      cmp(Operand(scratch), Immediate(Handle<Map>(object->map())));
-      // Branch on the result of the map check.
-      j(not_equal, miss, not_taken);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (object->IsJSGlobalProxy()) {
-        CheckAccessGlobalProxy(reg, scratch, miss);
-
-        // Restore scratch register to be the map of the object.
-        // We load the prototype from the map in the scratch register.
-        mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-      // The prototype is in new space; we cannot store a reference
-      // to it in the code. Load it from the map.
-      reg = holder_reg;  // from now the object is in holder_reg
-      mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
-    } else {
-      // Check the map of the current object.
-      cmp(FieldOperand(reg, HeapObject::kMapOffset),
-          Immediate(Handle<Map>(object->map())));
-      // Branch on the result of the map check.
-      j(not_equal, miss, not_taken);
-      // Check access rights to the global object.  This has to happen
-      // after the map check so that we know that the object is
-      // actually a global object.
-      if (object->IsJSGlobalProxy()) {
-        CheckAccessGlobalProxy(reg, scratch, miss);
-      }
-      // The prototype is in old space; load it directly.
-      reg = holder_reg;  // from now the object is in holder_reg
-      mov(reg, Handle<JSObject>(prototype));
-    }
-
-    if (save_at_depth == depth) {
-      mov(Operand(esp, kPointerSize), reg);
-    }
-
-    // Go to the next object in the prototype chain.
-    object = prototype;
-  }
-
-  // Check the holder map.
-  cmp(FieldOperand(reg, HeapObject::kMapOffset),
-      Immediate(Handle<Map>(holder->map())));
-  j(not_equal, miss, not_taken);
-
-  // Log the check depth.
-  LOG(IntEvent("check-maps-depth", depth + 1));
-
-  // Perform security check for access to the global object and return
-  // the holder register.
-  ASSERT(object == holder);
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-  if (object->IsJSGlobalProxy()) {
-    CheckAccessGlobalProxy(reg, scratch, miss);
-  }
-  return reg;
-}
-
-
 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
                                             Register scratch,
                                             Label* miss) {
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 2018721..02cfd4d 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -73,16 +73,27 @@
                   Condition cc,  // equal for new space, not_equal otherwise.
                   Label* branch);
 
-  // For page containing |object| mark region covering [object+offset] dirty.
-  // object is the object being stored into, value is the object being stored.
-  // If offset is zero, then the scratch register contains the array index into
-  // the elements array represented as a Smi.
-  // All registers are clobbered by the operation.
+  // For page containing |object| mark region covering [object+offset]
+  // dirty. |object| is the object being stored into, |value| is the
+  // object being stored. If offset is zero, then the scratch register
+  // contains the array index into the elements array represented as a
+  // Smi. All registers are clobbered by the operation. RecordWrite
+  // filters out smis so it does not update the write barrier if the
+  // value is a smi.
   void RecordWrite(Register object,
                    int offset,
                    Register value,
                    Register scratch);
 
+  // For page containing |object| mark region covering |address|
+  // dirty. |object| is the object being stored into, |value| is the
+  // object being stored. All registers are clobbered by the
+  // operation. RecordWrite filters out smis so it does not update the
+  // write barrier if the value is a smi.
+  void RecordWrite(Register object,
+                   Register address,
+                   Register value);
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
   // Debugger Support
@@ -233,24 +244,6 @@
   // ---------------------------------------------------------------------------
   // Inline caching support
 
-  // Generates code that verifies that the maps of objects in the
-  // prototype chain of object hasn't changed since the code was
-  // generated and branches to the miss label if any map has. If
-  // necessary the function also generates code for security check
-  // in case of global object holders. The scratch and holder
-  // registers are always clobbered, but the object register is only
-  // clobbered if it the same as the holder register. The function
-  // returns a register containing the holder - either object_reg or
-  // holder_reg.
-  // The function can optionally (when save_at_depth !=
-  // kInvalidProtoDepth) save the object at the given depth by moving
-  // it to [esp + kPointerSize].
-  Register CheckMaps(JSObject* object, Register object_reg,
-                     JSObject* holder, Register holder_reg,
-                     Register scratch,
-                     int save_at_depth,
-                     Label* miss);
-
   // Generate code for checking access rights - used for security checks
   // on access to global objects across environments. The holder register
   // is left untouched, but the scratch register is clobbered.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index bab0435..26361d1 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -101,6 +101,110 @@
 }
 
 
+// Helper function used to check that the dictionary doesn't contain
+// the property. This function may return false negatives, so miss_label
+// must always call a backup property check that is complete.
+// This function is safe to call if the receiver has fast properties.
+// Name must be a symbol and receiver must be a heap object.
+static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                             Label* miss_label,
+                                             Register receiver,
+                                             String* name,
+                                             Register r0,
+                                             Register extra) {
+  ASSERT(name->IsSymbol());
+  __ IncrementCounter(&Counters::negative_lookups, 1);
+  __ IncrementCounter(&Counters::negative_lookups_miss, 1);
+
+  Label done;
+  __ mov(r0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  __ test(FieldOperand(r0, Map::kBitFieldOffset),
+          Immediate(kInterceptorOrAccessCheckNeededMask));
+  __ j(not_zero, miss_label, not_taken);
+
+  __ CmpInstanceType(r0, FIRST_JS_OBJECT_TYPE);
+  __ j(below, miss_label, not_taken);
+
+  // Load properties array.
+  Register properties = r0;
+  __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+  // Check that the properties array is a dictionary.
+  __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
+         Immediate(Factory::hash_table_map()));
+  __ j(not_equal, miss_label);
+
+  // Compute the capacity mask.
+  const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up.
+  static const int kProbes = 4;
+  const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the null value).
+  for (int i = 0; i < kProbes; i++) {
+    // r0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (extra.is(no_reg)) {
+      __ push(receiver);
+    }
+    Register index = extra.is(no_reg) ? receiver : extra;
+    // Capacity is smi 2^n.
+    __ mov(index, FieldOperand(properties, kCapacityOffset));
+    __ dec(index);
+    __ and_(Operand(index),
+            Immediate(Smi::FromInt(name->Hash() +
+                                   StringDictionary::GetProbeOffset(i))));
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
+
+    Register entity_name = extra.is(no_reg) ? properties : extra;
+    // Having undefined at this place means the name is not contained.
+    ASSERT_EQ(kSmiTagSize, 1);
+    __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
+                                kElementsStartOffset - kHeapObjectTag));
+    __ cmp(entity_name, Factory::undefined_value());
+    if (extra.is(no_reg)) {
+      // 'receiver' shares a register with 'entity_name'.
+      __ pop(receiver);
+    }
+    if (i != kProbes - 1) {
+      __ j(equal, &done, taken);
+
+      // Stop if found the property.
+      __ cmp(entity_name, Handle<String>(name));
+      __ j(equal, miss_label, not_taken);
+
+      if (extra.is(no_reg)) {
+        // Restore the properties if their register was occupied by the name.
+        __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+      }
+    } else {
+      // Give up probing if still not found the undefined value.
+      __ j(not_equal, miss_label, not_taken);
+    }
+  }
+
+  __ bind(&done);
+  __ DecrementCounter(&Counters::negative_lookups_miss, 1);
+}
+
+
 void StubCache::GenerateProbe(MacroAssembler* masm,
                               Code::Flags flags,
                               Register receiver,
@@ -723,6 +827,33 @@
 }
 
 
+// Calls GenerateCheckPropertyCell for each global object in the prototype chain
+// from object to (but not including) holder.
+static Object* GenerateCheckPropertyCells(MacroAssembler* masm,
+                                          JSObject* object,
+                                          JSObject* holder,
+                                          String* name,
+                                          Register scratch,
+                                          Label* miss) {
+  JSObject* current = object;
+  while (current != holder) {
+    if (current->IsGlobalObject()) {
+      Object* cell = GenerateCheckPropertyCell(masm,
+                                               GlobalObject::cast(current),
+                                               name,
+                                               scratch,
+                                               miss);
+      if (cell->IsFailure()) {
+        return cell;
+      }
+    }
+    ASSERT(current->IsJSObject());
+    current = JSObject::cast(current->GetPrototype());
+  }
+  return NULL;
+}
+
+
 #undef __
 #define __ ACCESS_MASM(masm())
 
@@ -733,33 +864,129 @@
                                        Register holder_reg,
                                        Register scratch,
                                        String* name,
-                                       int push_at_depth,
-                                       Label* miss) {
-  // Check that the maps haven't changed.
-  Register result =
-      masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch,
-                        push_at_depth, miss);
+                                       int save_at_depth,
+                                       Label* miss,
+                                       Register extra) {
+  // Make sure there's no overlap between holder and object registers.
+  ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+  ASSERT(!extra.is(object_reg) && !extra.is(holder_reg) && !extra.is(scratch));
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  JSObject* current = object;
+  int depth = 0;
+
+  if (save_at_depth == depth) {
+    __ mov(Operand(esp, kPointerSize), reg);
+  }
+
+  // Traverse the prototype chain and check the maps in the prototype chain for
+  // fast and global objects or do negative lookup for normal objects.
+  while (current != holder) {
+    depth++;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
+
+    ASSERT(current->GetPrototype()->IsJSObject());
+    JSObject* prototype = JSObject::cast(current->GetPrototype());
+    if (!current->HasFastProperties() &&
+        !current->IsJSGlobalObject() &&
+        !current->IsJSGlobalProxy()) {
+      if (!name->IsSymbol()) {
+        Object* lookup_result = Heap::LookupSymbol(name);
+        if (lookup_result->IsFailure()) {
+          set_failure(Failure::cast(lookup_result));
+          return reg;
+        } else {
+          name = String::cast(lookup_result);
+        }
+      }
+      ASSERT(current->property_dictionary()->FindEntry(name) ==
+             StringDictionary::kNotFound);
+
+      GenerateDictionaryNegativeLookup(masm(),
+                                       miss,
+                                       reg,
+                                       name,
+                                       scratch,
+                                       extra);
+      __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+    } else if (Heap::InNewSpace(prototype)) {
+      // Get the map of the current object.
+      __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      __ cmp(Operand(scratch), Immediate(Handle<Map>(current->map())));
+      // Branch on the result of the map check.
+      __ j(not_equal, miss, not_taken);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch, miss);
+
+        // Restore scratch register to be the map of the object.
+        // We load the prototype from the map in the scratch register.
+        __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      }
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ mov(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+    } else {
+      // Check the map of the current object.
+      __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+             Immediate(Handle<Map>(current->map())));
+      // Branch on the result of the map check.
+      __ j(not_equal, miss, not_taken);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (current->IsJSGlobalProxy()) {
+        __ CheckAccessGlobalProxy(reg, scratch, miss);
+      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // from now the object is in holder_reg
+      __ mov(reg, Handle<JSObject>(prototype));
+    }
+
+    if (save_at_depth == depth) {
+      __ mov(Operand(esp, kPointerSize), reg);
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+  }
+  ASSERT(current == holder);
+
+  // Log the check depth.
+  LOG(IntEvent("check-maps-depth", depth + 1));
+
+  // Check the holder map.
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+         Immediate(Handle<Map>(holder->map())));
+  __ j(not_equal, miss, not_taken);
+
+  // Perform security check for access to the global object.
+  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
+  if (holder->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(reg, scratch, miss);
+  };
 
   // If we've skipped any global objects, it's not enough to verify
   // that their maps haven't changed.  We also need to check that the
   // property cell for the property is still empty.
-  while (object != holder) {
-    if (object->IsGlobalObject()) {
-      Object* cell = GenerateCheckPropertyCell(masm(),
-                                               GlobalObject::cast(object),
-                                               name,
-                                               scratch,
-                                               miss);
-      if (cell->IsFailure()) {
-        set_failure(Failure::cast(cell));
-        return result;
-      }
-    }
-    object = JSObject::cast(object->GetPrototype());
-  }
+  Object* result = GenerateCheckPropertyCells(masm(),
+                                              object,
+                                              holder,
+                                              name,
+                                              scratch,
+                                              miss);
+  if (result->IsFailure()) set_failure(Failure::cast(result));
 
   // Return the register containing the holder.
-  return result;
+  return reg;
 }
 
 
@@ -1083,7 +1310,8 @@
   __ j(zero, &miss, not_taken);
 
   // Do the right check and compute the holder register.
-  Register reg = CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
+  Register reg = CheckPrototypes(object, edx, holder, ebx, eax,
+                                 name, &miss, edi);
 
   GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
 
@@ -1145,7 +1373,7 @@
 
   CheckPrototypes(JSObject::cast(object), edx,
                   holder, ebx,
-                  eax, name, &miss);
+                  eax, name, &miss, edi);
 
   if (argc == 0) {
     // Noop, return the length.
@@ -1291,7 +1519,7 @@
   __ j(zero, &miss);
   CheckPrototypes(JSObject::cast(object), edx,
                   holder, ebx,
-                  eax, name, &miss);
+                  eax, name, &miss, edi);
 
   // Get the elements array of the object.
   __ mov(ebx, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1366,7 +1594,7 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             eax);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                  ebx, edx, name, &miss);
+                  ebx, edx, name, &miss, edi);
 
   Register receiver = ebx;
   Register index = edi;
@@ -1431,7 +1659,7 @@
                                             Context::STRING_FUNCTION_INDEX,
                                             eax);
   CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                  ebx, edx, name, &miss);
+                  ebx, edx, name, &miss, edi);
 
   Register receiver = eax;
   Register index = edi;
@@ -1536,7 +1764,7 @@
 
       // Check that the maps haven't changed.
       CheckPrototypes(JSObject::cast(object), edx, holder,
-                      ebx, eax, name, depth, &miss);
+                      ebx, eax, name, depth, &miss, edi);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -1559,7 +1787,7 @@
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::STRING_FUNCTION_INDEX, eax);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, name, &miss);
+                        ebx, edx, name, &miss, edi);
       }
       break;
 
@@ -1579,7 +1807,7 @@
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::NUMBER_FUNCTION_INDEX, eax);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, name, &miss);
+                        ebx, edx, name, &miss, edi);
       }
       break;
     }
@@ -1600,7 +1828,7 @@
         GenerateDirectLoadGlobalFunctionPrototype(
             masm(), Context::BOOLEAN_FUNCTION_INDEX, eax);
         CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
-                        ebx, edx, name, &miss);
+                        ebx, edx, name, &miss, edi);
       }
       break;
     }
@@ -1722,7 +1950,7 @@
   }
 
   // Check that the maps haven't changed.
-  CheckPrototypes(object, edx, holder, ebx, eax, name, &miss);
+  CheckPrototypes(object, edx, holder, ebx, eax, name, &miss, edi);
 
   // Get the value from the cell.
   __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
@@ -1993,6 +2221,8 @@
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &miss, not_taken);
 
+  ASSERT(last->IsGlobalObject() || last->HasFastProperties());
+
   // Check the maps of the full prototype chain. Also check that
   // global property cells up to (but not including) the last object
   // in the prototype chain are empty.
@@ -2140,7 +2370,7 @@
   }
 
   // Check that the maps haven't changed.
-  CheckPrototypes(object, eax, holder, ebx, edx, name, &miss);
+  CheckPrototypes(object, eax, holder, ebx, edx, name, &miss, edi);
 
   // Get the value from the cell.
   __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
diff --git a/src/ic-inl.h b/src/ic-inl.h
index 131f77b..70bbaf8 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -80,11 +80,38 @@
 }
 
 
-Map* IC::GetCodeCacheMapForObject(Object* object) {
-  if (object->IsJSObject()) return JSObject::cast(object)->map();
+InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
+                                                JSObject* holder) {
+  if (object->IsJSObject()) {
+    return GetCodeCacheForObject(JSObject::cast(object), holder);
+  }
   // If the object is a value, we use the prototype map for the cache.
   ASSERT(object->IsString() || object->IsNumber() || object->IsBoolean());
-  return JSObject::cast(object->GetPrototype())->map();
+  return PROTOTYPE_MAP;
+}
+
+
+InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
+                                                JSObject* holder) {
+  // Fast-properties and global objects store stubs in their own maps.
+  // Slow properties objects use prototype's map (unless the property is its own
+  // when holder == object). It works because slow properties objects having
+  // the same prototype (or a prototype with the same map) and not having
+  // the property are interchangeable for such a stub.
+  if (holder != object &&
+      !object->HasFastProperties() &&
+      !object->IsJSGlobalProxy() &&
+      !object->IsJSGlobalObject()) {
+    return PROTOTYPE_MAP;
+  }
+  return OWN_MAP;
+}
+
+
+Map* IC::GetCodeCacheMap(Object* object, InlineCacheHolderFlag holder) {
+  Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype());
+  ASSERT(map_owner->IsJSObject());
+  return JSObject::cast(map_owner)->map();
 }
 
 
diff --git a/src/ic.cc b/src/ic.cc
index 4b77d92..cdb06ac 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -134,13 +134,45 @@
 }
 #endif
 
+
+static bool HasNormalObjectsInPrototypeChain(LookupResult* lookup,
+                                             Object* receiver) {
+  Object* end = lookup->IsProperty() ? lookup->holder() : Heap::null_value();
+  for (Object* current = receiver;
+       current != end;
+       current = current->GetPrototype()) {
+    if (current->IsJSObject() &&
+        !JSObject::cast(current)->HasFastProperties() &&
+        !current->IsJSGlobalProxy() &&
+        !current->IsJSGlobalObject()) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+
 IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) {
   IC::State state = target->ic_state();
 
   if (state != MONOMORPHIC) return state;
   if (receiver->IsUndefined() || receiver->IsNull()) return state;
 
-  Map* map = GetCodeCacheMapForObject(receiver);
+  InlineCacheHolderFlag cache_holder =
+      Code::ExtractCacheHolderFromFlags(target->flags());
+
+
+  if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
+    // The stub was generated for JSObject but called for non-JSObject.
+    // IC::GetCodeCacheMap is not applicable.
+    return MONOMORPHIC;
+  } else if (cache_holder == PROTOTYPE_MAP &&
+             receiver->GetPrototype()->IsNull()) {
+    // IC::GetCodeCacheMap is not applicable.
+    return MONOMORPHIC;
+  }
+  Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
 
   // Decide whether the inline cache failed because of changes to the
   // receiver itself or changes to one of its prototypes.
@@ -487,12 +519,24 @@
 
 
 void CallICBase::UpdateCaches(LookupResult* lookup,
-                          State state,
-                          Handle<Object> object,
-                          Handle<String> name) {
+                              State state,
+                              Handle<Object> object,
+                              Handle<String> name) {
   // Bail out if we didn't find a result.
   if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
+#ifndef V8_TARGET_ARCH_IA32
+  // Normal objects only implemented for IA32 by now.
+  if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+#else
+  if (lookup->holder() != *object &&
+      HasNormalObjectsInPrototypeChain(lookup, object->GetPrototype())) {
+    // Suppress optimization for prototype chains with slow properties objects
+    // in the middle.
+    return;
+  }
+#endif
+
   // Compute the number of arguments.
   int argc = target()->arguments_count();
   InLoopFlag in_loop = target()->ic_in_loop();
@@ -590,8 +634,13 @@
       state == MONOMORPHIC_PROTOTYPE_FAILURE) {
     set_target(Code::cast(code));
   } else if (state == MEGAMORPHIC) {
+    // Cache code holding map should be consistent with
+    // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
+    Map* map = JSObject::cast(object->IsJSObject() ? *object :
+                              object->GetPrototype())->map();
+
     // Update the stub cache.
-    StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
+    StubCache::Set(*name, map, Code::cast(code));
   }
 
 #ifdef DEBUG
@@ -795,6 +844,8 @@
   if (!object->IsJSObject()) return;
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
+  if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+
   // Compute the code stub for this load.
   Object* code = NULL;
   if (state == UNINITIALIZED) {
@@ -836,7 +887,7 @@
           // property must be found in the receiver for the stub to be
           // applicable.
           if (lookup->holder() != *receiver) return;
-          code = StubCache::ComputeLoadNormal(*name, *receiver);
+          code = StubCache::ComputeLoadNormal();
         }
         break;
       }
@@ -871,8 +922,12 @@
   } else if (state == MONOMORPHIC) {
     set_target(megamorphic_stub());
   } else if (state == MEGAMORPHIC) {
-    // Update the stub cache.
-    StubCache::Set(*name, GetCodeCacheMapForObject(*object), Code::cast(code));
+    // Cache code holding map should be consistent with
+    // GenerateMonomorphicCacheProbe.
+    Map* map = JSObject::cast(object->IsJSObject() ? *object :
+                              object->GetPrototype())->map();
+
+    StubCache::Set(*name, map, Code::cast(code));
   }
 
 #ifdef DEBUG
@@ -1018,6 +1073,8 @@
   if (!object->IsJSObject()) return;
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
+  if (HasNormalObjectsInPrototypeChain(lookup, *object)) return;
+
   // Compute the code stub for this load.
   Object* code = NULL;
 
@@ -1198,16 +1255,18 @@
       break;
     }
     case NORMAL: {
-      if (!receiver->IsGlobalObject()) {
-        return;
+      if (receiver->IsGlobalObject()) {
+        // The stub generated for the global object picks the value directly
+        // from the property cell. So the property must be directly on the
+        // global object.
+        Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+        JSGlobalPropertyCell* cell =
+            JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
+        code = StubCache::ComputeStoreGlobal(*name, *global, cell);
+      } else {
+        if (lookup->holder() != *receiver) return;
+        code = StubCache::ComputeStoreNormal();
       }
-      // The stub generated for the global object picks the value directly
-      // from the property cell. So the property must be directly on the
-      // global object.
-      Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
-      JSGlobalPropertyCell* cell =
-          JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
-      code = StubCache::ComputeStoreGlobal(*name, *global, cell);
       break;
     }
     case CALLBACKS: {
diff --git a/src/ic.h b/src/ic.h
index 738b6f4..0d5df96 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -117,9 +117,14 @@
     return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
   }
 
-  // Returns the map to use for caching stubs for a given object.
-  // This method should not be called with undefined or null.
-  static inline Map* GetCodeCacheMapForObject(Object* object);
+  // Determines which map must be used for keeping the code stub.
+  // These methods should not be called with undefined or null.
+  static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object,
+                                                            JSObject* holder);
+  static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
+                                                            JSObject* holder);
+  static inline Map* GetCodeCacheMap(Object* object,
+                                     InlineCacheHolderFlag holder);
 
  protected:
   Address fp() const { return fp_; }
@@ -384,6 +389,7 @@
   static void GenerateMiss(MacroAssembler* masm);
   static void GenerateMegamorphic(MacroAssembler* masm);
   static void GenerateArrayLength(MacroAssembler* masm);
+  static void GenerateNormal(MacroAssembler* masm);
 
  private:
   // Update the inline cache and the global stub cache based on the
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index 34d5c0d..c8c6f08 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -51,7 +51,8 @@
   // Applies the change to the script.
   // The change is in form of list of chunks encoded in a single array as
   // a series of triplets (pos1_start, pos1_end, pos2_end)
-  function ApplyPatchMultiChunk(script, diff_array, new_source, change_log) {
+  function ApplyPatchMultiChunk(script, diff_array, new_source, preview_only,
+      change_log) {
 
     var old_source = script.source;
 
@@ -96,7 +97,7 @@
       }
 
       // Recursively collects all newly compiled functions that are going into
-      // business and should be have link to the actual script updated.
+      // business and should have link to the actual script updated.
       function CollectNew(node_list) {
         for (var i = 0; i < node_list.length; i++) {
           link_to_original_script_list.push(node_list[i]);
@@ -121,6 +122,20 @@
       }
     }
 
+    var preview_description = {
+        change_tree: DescribeChangeTree(root_old_node),
+        textual_diff: {
+          old_len: old_source.length,
+          new_len: new_source.length,
+          chunks: diff_array
+        },
+        updated: false
+    };
+    
+    if (preview_only) {
+      return preview_description;
+    }
+    
     HarvestTodo(root_old_node);
     
     // Collect shared infos for functions whose code need to be patched.
@@ -132,13 +147,15 @@
       }
     }
 
-    // Check that function being patched is not currently on stack.
-    CheckStackActivations(replaced_function_infos, change_log);
-  
-  
     // We haven't changed anything before this line yet.
     // Committing all changes.
     
+    // Check that function being patched is not currently on stack or drop them.
+    var dropped_functions_number =
+        CheckStackActivations(replaced_function_infos, change_log);
+    
+    preview_description.stack_modified = dropped_functions_number != 0; 
+  
     // Start with breakpoints. Convert their line/column positions and 
     // temporary remove.
     var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
@@ -166,6 +183,8 @@
         LinkToOldScript(link_to_old_script_list[i], old_script,
             link_to_old_script_report);
       }
+      
+      preview_description.created_script_name = old_script_name;
     }
     
     // Link to an actual script all the functions that we are going to use.
@@ -189,6 +208,9 @@
     }
     
     break_points_restorer(pos_translator, old_script);
+    
+    preview_description.updated = true;
+    return preview_description;
   }
   // Function is public.
   this.ApplyPatchMultiChunk = ApplyPatchMultiChunk;
@@ -494,6 +516,16 @@
     this.new_end_pos = void 0;
     this.corresponding_node = void 0;
     this.unmatched_new_nodes = void 0;
+    
+    // 'Textual' correspondence/matching is weaker than 'pure'
+    // correspondence/matching. We need 'textual' level for visual presentation
+    // in UI, we use 'pure' level for actual code manipulation.
+    // Sometimes only function body is changed (functions in old and new script
+    // textually correspond), but we cannot patch the code, so we see them
+    // as an old function deleted and new function created.  
+    this.textual_corresponding_node = void 0;
+    this.textually_unmatched_new_nodes = void 0;
+    
     this.live_shared_info_wrapper = void 0;
   }
   
@@ -640,6 +672,7 @@
       var new_children = new_node.children;
       
       var unmatched_new_nodes_list = [];
+      var textually_unmatched_new_nodes_list = [];
 
       var old_index = 0;
       var new_index = 0;
@@ -650,6 +683,7 @@
           if (new_children[new_index].info.start_position <
               old_children[old_index].new_start_pos) {
             unmatched_new_nodes_list.push(new_children[new_index]);
+            textually_unmatched_new_nodes_list.push(new_children[new_index]);
             new_index++;
           } else if (new_children[new_index].info.start_position ==
               old_children[old_index].new_start_pos) {
@@ -657,6 +691,8 @@
                 old_children[old_index].new_end_pos) {
               old_children[old_index].corresponding_node =
                   new_children[new_index];
+              old_children[old_index].textual_corresponding_node =
+                  new_children[new_index];
               if (old_children[old_index].status != FunctionStatus.UNCHANGED) {
                 ProcessChildren(old_children[old_index],
                     new_children[new_index]);
@@ -673,6 +709,7 @@
                   "No corresponding function in new script found";
               old_node.status = FunctionStatus.CHANGED;
               unmatched_new_nodes_list.push(new_children[new_index]);
+              textually_unmatched_new_nodes_list.push(new_children[new_index]);
             }
             new_index++;
             old_index++;
@@ -694,21 +731,28 @@
       
       while (new_index < new_children.length) {
         unmatched_new_nodes_list.push(new_children[new_index]);
+        textually_unmatched_new_nodes_list.push(new_children[new_index]);
         new_index++;
       }
       
       if (old_node.status == FunctionStatus.CHANGED) {
-        if (!CompareFunctionExpectations(old_node.info, new_node.info)) {
+        var why_wrong_expectations =
+            WhyFunctionExpectationsDiffer(old_node.info, new_node.info);
+        if (why_wrong_expectations) {
           old_node.status = FunctionStatus.DAMAGED;
-          old_node.status_explanation = "Changed code expectations";
+          old_node.status_explanation = why_wrong_expectations;
         }
       }
       old_node.unmatched_new_nodes = unmatched_new_nodes_list;
+      old_node.textually_unmatched_new_nodes =
+          textually_unmatched_new_nodes_list;
     }
 
     ProcessChildren(old_code_tree, new_code_tree);
     
     old_code_tree.corresponding_node = new_code_tree;
+    old_code_tree.textual_corresponding_node = new_code_tree;
+
     Assert(old_code_tree.status != FunctionStatus.DAMAGED,
         "Script became damaged");
   }
@@ -792,27 +836,37 @@
   }
   
   // Compares a function interface old and new version, whether it
-  // changed or not.
-  function CompareFunctionExpectations(function_info1, function_info2) {
+  // changed or not. Returns explanation if they differ.
+  function WhyFunctionExpectationsDiffer(function_info1, function_info2) {
     // Check that function has the same number of parameters (there may exist
     // an adapter, that won't survive function parameter number change).
     if (function_info1.param_num != function_info2.param_num) {
-      return false;
+      return "Changed parameter number: " + function_info1.param_num + 
+          " and " + function_info2.param_num;
     }
     var scope_info1 = function_info1.scope_info;
     var scope_info2 = function_info2.scope_info;
-  
-    if (!scope_info1) {
-      return !scope_info2;
+
+    var scope_info1_text; 
+    var scope_info2_text; 
+    
+    if (scope_info1) {
+      scope_info1_text = scope_info1.toString(); 
+    } else {
+      scope_info1_text = "";
     }
-  
-    if (scope_info1.length != scope_info2.length) {
-      return false;
+    if (scope_info2) {
+      scope_info2_text = scope_info2.toString(); 
+    } else {
+      scope_info2_text = "";
     }
-  
-    // Check that outer scope structure is not changed. Otherwise the function
-    // will not properly work with existing scopes.
-    return scope_info1.toString() == scope_info2.toString();
+    
+    if (scope_info1_text != scope_info2_text) {
+      return "Incompatible variable maps: [" + scope_info1_text +
+          "] and [" + scope_info2_text + "]";  
+    }
+    // No differences. Return undefined.
+    return;
   }
   
   // Minifier forward declaration.
@@ -856,6 +910,8 @@
       change_log.push( { functions_on_stack: problems } );
       throw new Failure("Blocked by functions on stack");
     }
+    
+    return dropped.length;
   }
   
   // A copy of the FunctionPatchabilityStatus enum from liveedit.h
@@ -897,14 +953,11 @@
   this.GetPcFromSourcePos = GetPcFromSourcePos;
 
   // LiveEdit main entry point: changes a script text to a new string.
-  function SetScriptSource(script, new_source, change_log) {
+  function SetScriptSource(script, new_source, preview_only, change_log) {
     var old_source = script.source;
     var diff = CompareStringsLinewise(old_source, new_source);
-    if (diff.length == 0) {
-      change_log.push( { empty_diff: true } );
-      return;
-    }
-    ApplyPatchMultiChunk(script, diff, new_source, change_log);
+    return ApplyPatchMultiChunk(script, diff, new_source, preview_only,
+        change_log);
   }
   // Function is public.
   this.SetScriptSource = SetScriptSource;
@@ -931,7 +984,67 @@
     
     return ApplyPatchMultiChunk(script,
         [ change_pos, change_pos + change_len, change_pos + new_str.length],
-        new_source, change_log);
+        new_source, false, change_log);
+  }
+  
+  // Creates JSON description for a change tree.
+  function DescribeChangeTree(old_code_tree) {
+    
+    function ProcessOldNode(node) {
+      var child_infos = [];
+      for (var i = 0; i < node.children.length; i++) {
+        var child = node.children[i];
+        if (child.status != FunctionStatus.UNCHANGED) {
+          child_infos.push(ProcessOldNode(child));
+        }
+      }
+      var new_child_infos = [];
+      if (node.textually_unmatched_new_nodes) {
+        for (var i = 0; i < node.textually_unmatched_new_nodes.length; i++) {
+          var child = node.textually_unmatched_new_nodes[i];
+          new_child_infos.push(ProcessNewNode(child));
+        }
+      }
+      var res = {
+        name: node.info.function_name,
+        positions: DescribePositions(node),
+        status: node.status,
+        children: child_infos,
+        new_children: new_child_infos  
+      };
+      if (node.status_explanation) {
+        res.status_explanation = node.status_explanation;
+      }
+      if (node.textual_corresponding_node) {
+        res.new_positions = DescribePositions(node.textual_corresponding_node);
+      }
+      return res;
+    }
+    
+    function ProcessNewNode(node) {
+      var child_infos = [];
+      // Do not list ancestors.
+      if (false) {
+        for (var i = 0; i < node.children.length; i++) {
+          child_infos.push(ProcessNewNode(node.children[i]));
+        }
+      }
+      var res = {
+        name: node.info.function_name,
+        positions: DescribePositions(node),
+        children: child_infos,
+      };
+      return res;
+    }
+    
+    function DescribePositions(node) {
+      return {
+        start_position: node.info.start_position,
+        end_position: node.info.end_position
+      };
+    }
+    
+    return ProcessOldNode(old_code_tree);
   }
 
   
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 950f8e0..04631a3 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1187,7 +1187,12 @@
 // Returns error message or NULL.
 static const char* DropFrames(Vector<StackFrame*> frames,
                               int top_frame_index,
-                              int bottom_js_frame_index) {
+                              int bottom_js_frame_index,
+                              Debug::FrameDropMode* mode) {
+  if (Debug::kFrameDropperFrameSize < 0) {
+    return "Stack manipulations are not supported in this architecture.";
+  }
+
   StackFrame* pre_top_frame = frames[top_frame_index - 1];
   StackFrame* top_frame = frames[top_frame_index];
   StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
@@ -1198,12 +1203,18 @@
   if (pre_top_frame->code()->is_inline_cache_stub() &&
       pre_top_frame->code()->ic_state() == DEBUG_BREAK) {
     // OK, we can drop inline cache calls.
+    *mode = Debug::FRAME_DROPPED_IN_IC_CALL;
+  } else if (pre_top_frame->code() == Debug::debug_break_slot()) {
+    // OK, we can drop debug break slot.
+    *mode = Debug::FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
   } else if (pre_top_frame->code() ==
       Builtins::builtin(Builtins::FrameDropper_LiveEdit)) {
     // OK, we can drop our own code.
+    *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
   } else if (pre_top_frame->code()->kind() == Code::STUB &&
       pre_top_frame->code()->major_key()) {
-    // Unit Test entry, it's fine, we support this case.
+    // Entry from our unit tests, it's fine, we support this case.
+    *mode = Debug::FRAME_DROPPED_IN_DIRECT_CALL;
   } else {
     return "Unknown structure of stack above changing function";
   }
@@ -1316,8 +1327,9 @@
     return NULL;
   }
 
+  Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED;
   const char* error_message = DropFrames(frames, top_frame_index,
-                                         bottom_js_frame_index);
+                                         bottom_js_frame_index, &drop_mode);
 
   if (error_message != NULL) {
     return error_message;
@@ -1331,7 +1343,7 @@
       break;
     }
   }
-  Debug::FramesHaveBeenDropped(new_id);
+  Debug::FramesHaveBeenDropped(new_id, drop_mode);
 
   // Replace "blocked on active" with "replaced on active" status.
   for (int i = 0; i < array_len; i++) {
diff --git a/src/macros.py b/src/macros.py
index 7d97918..32c9651 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -120,6 +120,7 @@
 # Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
 macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
 macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
+macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
 macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
 macro TO_UINT32(arg) = (arg >>> 0);
 macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString(arg));
diff --git a/src/messages.js b/src/messages.js
index a46af4a..7bac3b2 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -196,6 +196,7 @@
       circular_structure:           "Converting circular structure to JSON",
       obj_ctor_property_non_object: "Object.%0 called on non-object",
       array_indexof_not_defined:    "Array.getIndexOf: Argument undefined",
+      object_not_extensible:        "Can't add property %0, object is not extensible",
       illegal_access:               "illegal access"
     };
   }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index f9def82..79f2c97 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1335,6 +1335,21 @@
 }
 
 
+bool JSObject::HasFastProperties() {
+  return !properties()->IsDictionary();
+}
+
+
+int JSObject::MaxFastProperties() {
+  // Allow extra fast properties if the object has more than
+  // kMaxFastProperties in-object properties. When this is the case,
+  // it is very unlikely that the object is being used as a dictionary
+  // and there is a good chance that allowing more map transitions
+  // will be worth it.
+  return Max(map()->inobject_properties(), kMaxFastProperties);
+}
+
+
 void Struct::InitializeBody(int object_size) {
   Object* value = Heap::undefined_value();
   for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
@@ -1343,11 +1358,6 @@
 }
 
 
-bool JSObject::HasFastProperties() {
-  return !properties()->IsDictionary();
-}
-
-
 bool Object::ToArrayIndex(uint32_t* index) {
   if (IsSmi()) {
     int value = Smi::cast(this)->value();
@@ -2189,6 +2199,20 @@
 }
 
 
+void Map::set_is_extensible(bool value) {
+  if (value) {
+    set_bit_field2(bit_field2() | (1 << kIsExtensible));
+  } else {
+    set_bit_field2(bit_field2() & ~(1 << kIsExtensible));
+  }
+}
+
+bool Map::is_extensible() {
+  return ((1 << kIsExtensible) & bit_field2()) != 0;
+}
+
+
+
 Code::Flags Code::flags() {
   return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
 }
@@ -2263,13 +2287,15 @@
                                InLoopFlag in_loop,
                                InlineCacheState ic_state,
                                PropertyType type,
-                               int argc) {
+                               int argc,
+                               InlineCacheHolderFlag holder) {
   // Compute the bit mask.
   int bits = kind << kFlagsKindShift;
   if (in_loop) bits |= kFlagsICInLoopMask;
   bits |= ic_state << kFlagsICStateShift;
   bits |= type << kFlagsTypeShift;
   bits |= argc << kFlagsArgumentsCountShift;
+  if (holder == PROTOTYPE_MAP) bits |= kFlagsCacheInPrototypeMapMask;
   // Cast to flags and validate result before returning it.
   Flags result = static_cast<Flags>(bits);
   ASSERT(ExtractKindFromFlags(result) == kind);
@@ -2283,9 +2309,10 @@
 
 Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
                                           PropertyType type,
+                                          InlineCacheHolderFlag holder,
                                           InLoopFlag in_loop,
                                           int argc) {
-  return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc);
+  return ComputeFlags(kind, in_loop, MONOMORPHIC, type, argc, holder);
 }
 
 
@@ -2318,6 +2345,12 @@
 }
 
 
+InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
+  int bits = (flags & kFlagsCacheInPrototypeMapMask);
+  return bits != 0 ? PROTOTYPE_MAP : OWN_MAP;
+}
+
+
 Code::Flags Code::RemoveTypeFromFlags(Flags flags) {
   int bits = flags & ~kFlagsTypeMask;
   return static_cast<Flags>(bits);
diff --git a/src/objects.cc b/src/objects.cc
index 883b28e..8288f63 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1276,7 +1276,7 @@
   }
 
   if (map()->unused_property_fields() == 0) {
-    if (properties()->length() > kMaxFastProperties) {
+    if (properties()->length() > MaxFastProperties()) {
       Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
       if (obj->IsFailure()) return obj;
       return AddSlowProperty(name, value, attributes);
@@ -1386,6 +1386,11 @@
                               Object* value,
                               PropertyAttributes attributes) {
   ASSERT(!IsJSGlobalProxy());
+  if (!map()->is_extensible()) {
+    Handle<Object> args[1] = {Handle<String>(name)};
+    return Top::Throw(*Factory::NewTypeError("object_not_extensible",
+                                               HandleVector(args, 1)));
+  }
   if (HasFastProperties()) {
     // Ensure the descriptor array does not get too big.
     if (map()->instance_descriptors()->number_of_descriptors() <
@@ -1474,7 +1479,7 @@
                                            Object* new_value,
                                            PropertyAttributes attributes) {
   if (map()->unused_property_fields() == 0 &&
-      properties()->length() > kMaxFastProperties) {
+      properties()->length() > MaxFastProperties()) {
     Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
     if (obj->IsFailure()) return obj;
     return ReplaceSlowProperty(name, new_value, attributes);
@@ -1746,8 +1751,6 @@
       result->DictionaryResult(this, entry);
       return;
     }
-    // Slow case object skipped during lookup. Do not use inline caching.
-    if (!IsGlobalObject()) result->DisallowCaching();
   }
   result->NotFound();
 }
@@ -2576,6 +2579,25 @@
 }
 
 
+Object* JSObject::PreventExtensions() {
+  // If there are fast elements we normalize.
+  if (HasFastElements()) {
+    NormalizeElements();
+  }
+  // Make sure that we never go back to fast case.
+  element_dictionary()->set_requires_slow_elements();
+
+  // Do a map transition, other objects with this map may still
+  // be extensible.
+  Object* new_map = map()->CopyDropTransitions();
+  if (new_map->IsFailure()) return new_map;
+  Map::cast(new_map)->set_is_extensible(false);
+  set_map(Map::cast(new_map));
+  ASSERT(!map()->is_extensible());
+  return new_map;
+}
+
+
 // Tests for the fast common case for property enumeration:
 // - This object and all prototypes has an enum cache (which means that it has
 //   no interceptors and needs no access checks).
@@ -3076,7 +3098,7 @@
   Object* descriptors = instance_descriptors()->RemoveTransitions();
   if (descriptors->IsFailure()) return descriptors;
   cast(new_map)->set_instance_descriptors(DescriptorArray::cast(descriptors));
-  return cast(new_map);
+  return new_map;
 }
 
 
@@ -6209,6 +6231,15 @@
             return value;
           }
         }
+        // When we set the is_extensible flag to false we always force
+        // the element into dictionary mode (and force them to stay there).
+        if (!map()->is_extensible()) {
+          Handle<Object> number(Heap::NumberFromUint32(index));
+          Handle<String> index_string(Factory::NumberToString(number));
+          Handle<Object> args[1] = { index_string };
+          return Top::Throw(*Factory::NewTypeError("object_not_extensible",
+                                                   HandleVector(args, 1)));
+        }
         Object* result = dictionary->AtNumberPut(index, value);
         if (result->IsFailure()) return result;
         if (elms != FixedArray::cast(result)) {
diff --git a/src/objects.h b/src/objects.h
index 0ad6f14..15cfd5c 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1367,6 +1367,7 @@
   // Returns the index'th element.
   // The undefined object if index is out of bounds.
   Object* GetElementWithReceiver(JSObject* receiver, uint32_t index);
+  Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
 
   Object* SetFastElementsCapacityAndLength(int capacity, int length);
   Object* SetSlowElements(Object* length);
@@ -1516,6 +1517,10 @@
   // Casting.
   static inline JSObject* cast(Object* obj);
 
+  // Disalow further properties to be added to the object.
+  Object* PreventExtensions();
+
+
   // Dispatched behavior.
   void JSObjectIterateBody(int object_size, ObjectVisitor* v);
   void JSObjectShortPrint(StringStream* accumulator);
@@ -1547,6 +1552,11 @@
 #endif
   Object* SlowReverseLookup(Object* value);
 
+  // Maximal number of fast properties for the JSObject. Used to
+  // restrict the number of map transitions to avoid an explosion in
+  // the number of maps for objects used as dictionaries.
+  inline int MaxFastProperties();
+
   // Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
   // Also maximal value of JSArray's length property.
   static const uint32_t kMaxElementCount = 0xffffffffu;
@@ -1568,8 +1578,6 @@
 
   STATIC_CHECK(kHeaderSize == Internals::kJSObjectHeaderSize);
 
-  Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
-
  private:
   Object* GetElementWithCallback(Object* receiver,
                                  Object* structure,
@@ -2765,11 +2773,13 @@
                                    InLoopFlag in_loop = NOT_IN_LOOP,
                                    InlineCacheState ic_state = UNINITIALIZED,
                                    PropertyType type = NORMAL,
-                                   int argc = -1);
+                                   int argc = -1,
+                                   InlineCacheHolderFlag holder = OWN_MAP);
 
   static inline Flags ComputeMonomorphicFlags(
       Kind kind,
       PropertyType type,
+      InlineCacheHolderFlag holder = OWN_MAP,
       InLoopFlag in_loop = NOT_IN_LOOP,
       int argc = -1);
 
@@ -2778,6 +2788,7 @@
   static inline InLoopFlag ExtractICInLoopFromFlags(Flags flags);
   static inline PropertyType ExtractTypeFromFlags(Flags flags);
   static inline int ExtractArgumentsCountFromFlags(Flags flags);
+  static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
   static inline Flags RemoveTypeFromFlags(Flags flags);
 
   // Convert a target address into a code object.
@@ -2864,16 +2875,18 @@
   static const int kFlagsICInLoopShift       = 3;
   static const int kFlagsTypeShift           = 4;
   static const int kFlagsKindShift           = 7;
-  static const int kFlagsArgumentsCountShift = 11;
+  static const int kFlagsICHolderShift       = 11;
+  static const int kFlagsArgumentsCountShift = 12;
 
   static const int kFlagsICStateMask        = 0x00000007;  // 00000000111
   static const int kFlagsICInLoopMask       = 0x00000008;  // 00000001000
   static const int kFlagsTypeMask           = 0x00000070;  // 00001110000
   static const int kFlagsKindMask           = 0x00000780;  // 11110000000
-  static const int kFlagsArgumentsCountMask = 0xFFFFF800;
+  static const int kFlagsCacheInPrototypeMapMask = 0x00000800;
+  static const int kFlagsArgumentsCountMask = 0xFFFFF000;
 
   static const int kFlagsNotUsedInLookup =
-      (kFlagsICInLoopMask | kFlagsTypeMask);
+      (kFlagsICInLoopMask | kFlagsTypeMask | kFlagsCacheInPrototypeMapMask);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
@@ -2980,13 +2993,8 @@
     return ((1 << kHasInstanceCallHandler) & bit_field()) != 0;
   }
 
-  inline void set_is_extensible() {
-    set_bit_field2(bit_field2() | (1 << kIsExtensible));
-  }
-
-  inline bool is_extensible() {
-    return ((1 << kIsExtensible) & bit_field2()) != 0;
-  }
+  inline void set_is_extensible(bool value);
+  inline bool is_extensible();
 
   // Tells whether the instance has fast elements.
   void set_has_fast_elements(bool value) {
diff --git a/src/rewriter.cc b/src/rewriter.cc
index c97408e..73301b9 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -87,11 +87,13 @@
 
 
 void AstOptimizer::VisitExpressionStatement(ExpressionStatement* node) {
+  node->expression()->set_no_negative_zero(true);
   Visit(node->expression());
 }
 
 
 void AstOptimizer::VisitIfStatement(IfStatement* node) {
+  node->condition()->set_no_negative_zero(true);
   Visit(node->condition());
   Visit(node->then_statement());
   if (node->HasElseStatement()) {
@@ -101,6 +103,7 @@
 
 
 void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
+  node->cond()->set_no_negative_zero(true);
   Visit(node->cond());
   Visit(node->body());
 }
@@ -108,6 +111,7 @@
 
 void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
   has_function_literal_ = false;
+  node->cond()->set_no_negative_zero(true);
   Visit(node->cond());
   node->may_have_function_literal_ = has_function_literal_;
   Visit(node->body());
@@ -120,6 +124,7 @@
   }
   if (node->cond() != NULL) {
     has_function_literal_ = false;
+    node->cond()->set_no_negative_zero(true);
     Visit(node->cond());
     node->may_have_function_literal_ = has_function_literal_;
   }
@@ -151,6 +156,7 @@
 
 
 void AstOptimizer::VisitSwitchStatement(SwitchStatement* node) {
+  node->tag()->set_no_negative_zero(true);
   Visit(node->tag());
   for (int i = 0; i < node->cases()->length(); i++) {
     CaseClause* clause = node->cases()->at(i);
@@ -444,6 +450,7 @@
   if (FLAG_safe_int32_compiler) {
     switch (node->op()) {
       case Token::BIT_NOT:
+        node->expression()->set_no_negative_zero(true);
         node->expression()->set_to_int32(true);
         // Fall through.
       case Token::ADD:
@@ -476,10 +483,49 @@
 }
 
 
+static bool CouldBeNegativeZero(AstNode* node) {
+  Literal* literal = node->AsLiteral();
+  if (literal != NULL) {
+    Handle<Object> handle = literal->handle();
+    if (handle->IsString() || handle->IsSmi()) {
+      return false;
+    } else if (handle->IsHeapNumber()) {
+      double double_value = HeapNumber::cast(*handle)->value();
+      if (double_value != 0) {
+        return false;
+      }
+    }
+  }
+  BinaryOperation* binary = node->AsBinaryOperation();
+  if (binary != NULL && Token::IsBitOp(binary->op())) {
+    return false;
+  }
+  return true;
+}
+
+
+static bool CouldBePositiveZero(AstNode* node) {
+  Literal* literal = node->AsLiteral();
+  if (literal != NULL) {
+    Handle<Object> handle = literal->handle();
+    if (handle->IsSmi()) {
+      if (Smi::cast(*handle) != Smi::FromInt(0)) {
+        return false;
+      }
+    } else if (handle->IsHeapNumber()) {
+      // Heap number literal can't be +0, because that's a Smi.
+      return false;
+    }
+  }
+  return true;
+}
+
+
 void AstOptimizer::VisitBinaryOperation(BinaryOperation* node) {
   // Depending on the operation we can propagate this node's type down the
   // AST nodes.
-  switch (node->op()) {
+  Token::Value op = node->op();
+  switch (op) {
     case Token::COMMA:
     case Token::OR:
       node->left()->set_no_negative_zero(true);
@@ -503,23 +549,54 @@
       node->left()->set_no_negative_zero(true);
       node->right()->set_no_negative_zero(true);
       break;
+    case Token::MUL: {
+      VariableProxy* lvar_proxy = node->left()->AsVariableProxy();
+      VariableProxy* rvar_proxy = node->right()->AsVariableProxy();
+      if (lvar_proxy != NULL && rvar_proxy != NULL) {
+        Variable* lvar = lvar_proxy->AsVariable();
+        Variable* rvar = rvar_proxy->AsVariable();
+        if (lvar != NULL && rvar != NULL) {
+          if (lvar->mode() == Variable::VAR && rvar->mode() == Variable::VAR) {
+            Slot* lslot = lvar->slot();
+            Slot* rslot = rvar->slot();
+            if (lslot->type() == rslot->type() &&
+                (lslot->type() == Slot::PARAMETER ||
+                 lslot->type() == Slot::LOCAL) &&
+                lslot->index() == rslot->index()) {
+              // A number squared doesn't give negative zero.
+              node->set_no_negative_zero(true);
+            }
+          }
+        }
+      }
+    }
     case Token::ADD:
     case Token::SUB:
-    case Token::MUL:
     case Token::DIV:
-    case Token::MOD:
+    case Token::MOD: {
       if (node->type()->IsLikelySmi()) {
         node->left()->type()->SetAsLikelySmiIfUnknown();
         node->right()->type()->SetAsLikelySmiIfUnknown();
       }
-      node->left()->set_no_negative_zero(node->no_negative_zero());
-      node->right()->set_no_negative_zero(node->no_negative_zero());
+      if (op == Token::ADD && (!CouldBeNegativeZero(node->left()) ||
+                               !CouldBeNegativeZero(node->right()))) {
+        node->left()->set_no_negative_zero(true);
+        node->right()->set_no_negative_zero(true);
+      } else if (op == Token::SUB && (!CouldBeNegativeZero(node->left()) ||
+                                      !CouldBePositiveZero(node->right()))) {
+        node->left()->set_no_negative_zero(true);
+        node->right()->set_no_negative_zero(true);
+      } else {
+        node->left()->set_no_negative_zero(node->no_negative_zero());
+        node->right()->set_no_negative_zero(node->no_negative_zero());
+      }
       if (node->op() == Token::DIV) {
         node->right()->set_no_negative_zero(false);
       } else if (node->op() == Token::MOD) {
         node->right()->set_no_negative_zero(true);
       }
       break;
+    }
     default:
       UNREACHABLE();
       break;
@@ -530,7 +607,7 @@
 
   // After visiting the operand nodes we have to check if this node's type
   // can be updated. If it does, then we can push that information down
-  // towards the leafs again if the new information is an upgrade over the
+  // towards the leaves again if the new information is an upgrade over the
   // previous type of the operand nodes.
   if (node->type()->IsUnknown()) {
     if (node->left()->type()->IsLikelySmi() ||
@@ -590,7 +667,7 @@
 
 void AstOptimizer::VisitCompareOperation(CompareOperation* node) {
   if (node->type()->IsKnown()) {
-    // Propagate useful information down towards the leafs.
+    // Propagate useful information down towards the leaves.
     node->left()->type()->SetAsLikelySmiIfUnknown();
     node->right()->type()->SetAsLikelySmiIfUnknown();
   }
@@ -604,7 +681,7 @@
 
   // After visiting the operand nodes we have to check if this node's type
   // can be updated. If it does, then we can push that information down
-  // towards the leafs again if the new information is an upgrade over the
+  // towards the leaves again if the new information is an upgrade over the
   // previous type of the operand nodes.
   if (node->type()->IsUnknown()) {
     if (node->left()->type()->IsLikelySmi() ||
diff --git a/src/runtime.cc b/src/runtime.cc
index 22e80b3..a3eb09f 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -678,6 +678,12 @@
 }
 
 
+static Object* Runtime_PreventExtensions(Arguments args) {
+  ASSERT(args.length() == 1);
+  CONVERT_CHECKED(JSObject, obj, args[0]);
+  return obj->PreventExtensions();
+}
+
 static Object* Runtime_IsExtensible(Arguments args) {
   ASSERT(args.length() == 1);
   CONVERT_CHECKED(JSObject, obj, args[0]);
@@ -5362,9 +5368,6 @@
 }
 
 
-
-
-
 static Object* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 1);
diff --git a/src/runtime.h b/src/runtime.h
index 3d4df1b..5719fc8 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -72,6 +72,7 @@
   F(GetOwnProperty, 2, 1) \
   \
   F(IsExtensible, 1, 1) \
+  F(PreventExtensions, 1, 1)\
   \
   /* Utilities */ \
   F(GetFunctionDelegate, 1, 1) \
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index ffa92dd..a654a08 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -94,6 +94,7 @@
 
 
 Object* StubCache::ComputeLoadNonexistent(String* name, JSObject* receiver) {
+  ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
   // If no global objects are present in the prototype chain, the load
   // nonexistent IC stub can be shared for all names for a given map
   // and we use the empty string for the map cache in that case.  If
@@ -129,14 +130,16 @@
                                     JSObject* receiver,
                                     JSObject* holder,
                                     int field_index) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  Map* map = receiver->map();
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, FIELD);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     LoadStubCompiler compiler;
     code = compiler.CompileLoadField(receiver, holder, field_index, name);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -148,14 +151,16 @@
                                        JSObject* holder,
                                        AccessorInfo* callback) {
   ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  Map* map = receiver->map();
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     LoadStubCompiler compiler;
     code = compiler.CompileLoadCallback(name, receiver, holder, callback);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -166,15 +171,17 @@
                                        JSObject* receiver,
                                        JSObject* holder,
                                        Object* value) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  Map* map = receiver->map();
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     LoadStubCompiler compiler;
     code = compiler.CompileLoadConstant(receiver, holder, value, name);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -184,21 +191,23 @@
 Object* StubCache::ComputeLoadInterceptor(String* name,
                                           JSObject* receiver,
                                           JSObject* holder) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  Map* map = receiver->map();
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, INTERCEPTOR);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     LoadStubCompiler compiler;
     code = compiler.CompileLoadInterceptor(receiver, holder, name);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
 }
 
 
-Object* StubCache::ComputeLoadNormal(String* name, JSObject* receiver) {
+Object* StubCache::ComputeLoadNormal() {
   return Builtins::builtin(Builtins::LoadIC_Normal);
 }
 
@@ -208,8 +217,10 @@
                                      GlobalObject* holder,
                                      JSGlobalPropertyCell* cell,
                                      bool is_dont_delete) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  Map* map = receiver->map();
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     LoadStubCompiler compiler;
     code = compiler.CompileLoadGlobal(receiver,
@@ -219,7 +230,7 @@
                                       is_dont_delete);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -230,14 +241,16 @@
                                          JSObject* receiver,
                                          JSObject* holder,
                                          int field_index) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  Map* map = receiver->map();
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, FIELD);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadField(name, receiver, holder, field_index);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -248,15 +261,17 @@
                                             JSObject* receiver,
                                             JSObject* holder,
                                             Object* value) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  Map* map = receiver->map();
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadConstant(name, receiver, holder, value);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -266,15 +281,17 @@
 Object* StubCache::ComputeKeyedLoadInterceptor(String* name,
                                                JSObject* receiver,
                                                JSObject* holder) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  Map* map = receiver->map();
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, INTERCEPTOR);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadInterceptor(receiver, holder, name);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -285,15 +302,17 @@
                                             JSObject* receiver,
                                             JSObject* holder,
                                             AccessorInfo* callback) {
+  ASSERT(IC::GetCodeCacheForObject(receiver, holder) == OWN_MAP);
+  Map* map = receiver->map();
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadCallback(name, receiver, holder, callback);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -305,13 +324,15 @@
                                                JSArray* receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  ASSERT(receiver->IsJSObject());
+  Map* map = receiver->map();
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadArrayLength(name);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -322,13 +343,14 @@
                                                 String* receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Map* map = receiver->map();
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadStringLength(name);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -339,13 +361,14 @@
                                                      JSFunction* receiver) {
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CALLBACKS);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Map* map = receiver->map();
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadFunctionPrototype(name);
     if (code->IsFailure()) return code;
     PROFILE(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -371,6 +394,11 @@
 }
 
 
+Object* StubCache::ComputeStoreNormal() {
+  return Builtins::builtin(Builtins::StoreIC_Normal);
+}
+
+
 Object* StubCache::ComputeStoreGlobal(String* name,
                                       GlobalObject* receiver,
                                       JSGlobalPropertyCell* cell) {
@@ -380,7 +408,7 @@
     StoreStubCompiler compiler;
     code = compiler.CompileStoreGlobal(receiver, cell, name);
     if (code->IsFailure()) return code;
-    PROFILE(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
+    PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -451,7 +479,9 @@
                                        JSObject* holder,
                                        JSFunction* function) {
   // Compute the check type and the map.
-  Map* map = IC::GetCodeCacheMapForObject(object);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(object, holder);
+  Map* map = IC::GetCodeCacheMap(object, cache_holder);
 
   // Compute check type based on receiver/holder.
   StubCompiler::CheckType check = StubCompiler::RECEIVER_MAP_CHECK;
@@ -466,6 +496,7 @@
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(kind,
                                     CONSTANT_FUNCTION,
+                                    cache_holder,
                                     in_loop,
                                     argc);
   Object* code = map->FindInCodeCache(name, flags);
@@ -476,7 +507,7 @@
     // caches.
     if (!function->is_compiled()) return Failure::InternalError();
     // Compile the stub - only create stubs for fully compiled functions.
-    CallStubCompiler compiler(argc, in_loop, kind);
+    CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
     code = compiler.CompileCallConstant(object, holder, function, name, check);
     if (code->IsFailure()) return code;
     ASSERT_EQ(flags, Code::cast(code)->flags());
@@ -497,7 +528,9 @@
                                     JSObject* holder,
                                     int index) {
   // Compute the check type and the map.
-  Map* map = IC::GetCodeCacheMapForObject(object);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(object, holder);
+  Map* map = IC::GetCodeCacheMap(object, cache_holder);
 
   // TODO(1233596): We cannot do receiver map check for non-JS objects
   // because they may be represented as immediates without a
@@ -508,11 +541,12 @@
 
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind,
                                                     FIELD,
+                                                    cache_holder,
                                                     in_loop,
                                                     argc);
   Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
-    CallStubCompiler compiler(argc, in_loop, kind);
+    CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
     code = compiler.CompileCallField(JSObject::cast(object),
                                      holder,
                                      index,
@@ -534,8 +568,9 @@
                                           Object* object,
                                           JSObject* holder) {
   // Compute the check type and the map.
-  // If the object is a value, we use the prototype map for the cache.
-  Map* map = IC::GetCodeCacheMapForObject(object);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(object, holder);
+  Map* map = IC::GetCodeCacheMap(object, cache_holder);
 
   // TODO(1233596): We cannot do receiver map check for non-JS objects
   // because they may be represented as immediates without a
@@ -547,11 +582,12 @@
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(kind,
                                     INTERCEPTOR,
+                                    cache_holder,
                                     NOT_IN_LOOP,
                                     argc);
   Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
-    CallStubCompiler compiler(argc, NOT_IN_LOOP, kind);
+    CallStubCompiler compiler(argc, NOT_IN_LOOP, kind, cache_holder);
     code = compiler.CompileCallInterceptor(JSObject::cast(object),
                                            holder,
                                            name);
@@ -585,25 +621,29 @@
                                      GlobalObject* holder,
                                      JSGlobalPropertyCell* cell,
                                      JSFunction* function) {
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(receiver, holder);
+  Map* map = IC::GetCodeCacheMap(receiver, cache_holder);
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(kind,
                                     NORMAL,
+                                    cache_holder,
                                     in_loop,
                                     argc);
-  Object* code = receiver->map()->FindInCodeCache(name, flags);
+  Object* code = map->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     // If the function hasn't been compiled yet, we cannot do it now
     // because it may cause GC. To avoid this issue, we return an
     // internal error which will make sure we do not update any
     // caches.
     if (!function->is_compiled()) return Failure::InternalError();
-    CallStubCompiler compiler(argc, in_loop, kind);
+    CallStubCompiler compiler(argc, in_loop, kind, cache_holder);
     code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
     if (code->IsFailure()) return code;
     ASSERT_EQ(flags, Code::cast(code)->flags());
     PROFILE(CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
                             Code::cast(code), name));
-    Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
+    Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
   return code;
@@ -1203,6 +1243,17 @@
 }
 
 
+CallStubCompiler::CallStubCompiler(int argc,
+                                   InLoopFlag in_loop,
+                                   Code::Kind kind,
+                                   InlineCacheHolderFlag cache_holder)
+    : arguments_(argc)
+    , in_loop_(in_loop)
+    , kind_(kind)
+    , cache_holder_(cache_holder) {
+}
+
+
 Object* CallStubCompiler::CompileCustomCall(int generator_id,
                                             Object* object,
                                             JSObject* holder,
@@ -1230,6 +1281,7 @@
   int argc = arguments_.immediate();
   Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
                                                     type,
+                                                    cache_holder_,
                                                     in_loop_,
                                                     argc);
   return GetCodeWithFlags(flags, name);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index fcfffcf..856904a 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -77,7 +77,7 @@
                                         JSObject* receiver,
                                         JSObject* holder);
 
-  static Object* ComputeLoadNormal(String* name, JSObject* receiver);
+  static Object* ComputeLoadNormal();
 
 
   static Object* ComputeLoadGlobal(String* name,
@@ -121,6 +121,8 @@
                                    int field_index,
                                    Map* transition = NULL);
 
+  static Object* ComputeStoreNormal();
+
   static Object* ComputeStoreGlobal(String* name,
                                     GlobalObject* receiver,
                                     JSGlobalPropertyCell* cell);
@@ -407,8 +409,21 @@
 
   static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
 
-  // Check the integrity of the prototype chain to make sure that the
-  // current IC is still valid.
+  // Generates code that verifies that the property holder has not changed
+  // (checking maps of objects in the prototype chain for fast and global
+  // objects or doing negative lookup for slow objects, ensures that the
+  // property cells for global objects are still empty) and checks that the map
+  // of the holder has not changed. If necessary the function also generates
+  // code for security check in case of global object holders. Helps to make
+  // sure that the current IC is still valid.
+  //
+  // The scratch and holder registers are always clobbered, but the object
+  // register is only clobbered if it the same as the holder register. The
+  // function returns a register containing the holder - either object_reg or
+  // holder_reg.
+  // The function can optionally (when save_at_depth !=
+  // kInvalidProtoDepth) save the object at the given depth by moving
+  // it to [esp + kPointerSize].
 
   Register CheckPrototypes(JSObject* object,
                            Register object_reg,
@@ -416,9 +431,10 @@
                            Register holder_reg,
                            Register scratch,
                            String* name,
-                           Label* miss) {
+                           Label* miss,
+                           Register extra = no_reg) {
     return CheckPrototypes(object, object_reg, holder, holder_reg, scratch,
-                           name, kInvalidProtoDepth, miss);
+                           name, kInvalidProtoDepth, miss, extra);
   }
 
   Register CheckPrototypes(JSObject* object,
@@ -428,7 +444,8 @@
                            Register scratch,
                            String* name,
                            int save_at_depth,
-                           Label* miss);
+                           Label* miss,
+                           Register extra = no_reg);
 
  protected:
   Object* GetCodeWithFlags(Code::Flags flags, const char* name);
@@ -611,8 +628,10 @@
     kNumCallGenerators
   };
 
-  CallStubCompiler(int argc, InLoopFlag in_loop, Code::Kind kind)
-      : arguments_(argc), in_loop_(in_loop), kind_(kind) { }
+  CallStubCompiler(int argc,
+                   InLoopFlag in_loop,
+                   Code::Kind kind,
+                   InlineCacheHolderFlag cache_holder);
 
   Object* CompileCallField(JSObject* object,
                            JSObject* holder,
@@ -653,6 +672,7 @@
   const ParameterCount arguments_;
   const InLoopFlag in_loop_;
   const Code::Kind kind_;
+  const InlineCacheHolderFlag cache_holder_;
 
   const ParameterCount& arguments() { return arguments_; }
 
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 93fecd1..509de3d 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -153,6 +153,8 @@
   SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)                \
   SC(named_store_global_inline, V8.NamedStoreGlobalInline)            \
   SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)   \
+  SC(store_normal_miss, V8.StoreNormalMiss)                           \
+  SC(store_normal_hit, V8.StoreNormalHit)                             \
   SC(call_miss, V8.CallMiss)                                          \
   SC(keyed_call_miss, V8.KeyedCallMiss)                               \
   SC(load_miss, V8.LoadMiss)                                          \
@@ -166,6 +168,8 @@
   SC(constructed_objects, V8.ConstructedObjects)                      \
   SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime)       \
   SC(constructed_objects_stub, V8.ConstructedObjectsStub)             \
+  SC(negative_lookups, V8.NegativeLookups)                            \
+  SC(negative_lookups_miss, V8.NegativeLookupsMiss)                  \
   SC(array_function_runtime, V8.ArrayFunctionRuntime)                 \
   SC(array_function_native, V8.ArrayFunctionNative)                   \
   SC(for_in, V8.ForIn)                                                \
diff --git a/src/v8dll-main.cc b/src/v8dll-main.cc
new file mode 100644
index 0000000..3d4b3a3
--- /dev/null
+++ b/src/v8dll-main.cc
@@ -0,0 +1,39 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <windows.h>
+
+#include "../include/v8.h"
+
+extern "C" {
+BOOL WINAPI DllMain(HANDLE hinstDLL,
+                    DWORD dwReason,
+                    LPVOID lpvReserved) {
+  // Do nothing.
+  return TRUE;
+}
+}
diff --git a/src/v8natives.js b/src/v8natives.js
index 24d5e7c..487faab 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -745,6 +745,27 @@
 }
 
 
+// ES5 section 15.2.3.10
+function ObjectPreventExtension(obj) {
+  if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
+      !IS_UNDETECTABLE(obj)) {
+    throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
+  }
+  %PreventExtensions(obj);
+  return obj;
+}
+
+
+// ES5 section 15.2.3.13
+function ObjectIsExtensible(obj) {
+  if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
+      !IS_UNDETECTABLE(obj)) {
+    throw MakeTypeError("obj_ctor_property_non_object", ["preventExtension"]);
+  }
+  return %IsExtensible(obj);
+}
+
+
 %SetCode($Object, function(x) {
   if (%_IsConstructCall()) {
     if (x == null) return this;
@@ -780,7 +801,9 @@
     "defineProperties", ObjectDefineProperties,
     "getPrototypeOf", ObjectGetPrototypeOf,
     "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
-    "getOwnPropertyNames", ObjectGetOwnPropertyNames
+    "getOwnPropertyNames", ObjectGetOwnPropertyNames,
+    "isExtensible", ObjectIsExtensible,
+    "preventExtensions", ObjectPreventExtension
   ));
 }
 
diff --git a/src/version.cc b/src/version.cc
index 68c2e73..db604e0 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      21
+#define BUILD_NUMBER      22
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index ff655c7..a38ebaf 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1238,10 +1238,6 @@
   __ movq(rbx, r8);
 #endif  // _WIN64
 
-  // Set up the roots register.
-  ExternalReference roots_address = ExternalReference::roots_address();
-  __ movq(kRootRegister, roots_address);
-
   // Current stack contents:
   // [rsp + 2 * kPointerSize ... ]: Internal frame
   // [rsp + kPointerSize]         : function
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index a6d31be..3b1aeae 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -592,7 +592,6 @@
       && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
       && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
       && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
-      && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0))
       && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
 }
 #endif
@@ -1600,11 +1599,133 @@
 }
 
 
+void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) {
+  // A fast smi loop is a for loop with an initializer
+  // that is a simple assignment of a smi to a stack variable,
+  // a test that is a simple test of that variable against a smi constant,
+  // and a step that is a increment/decrement of the variable, and
+  // where the variable isn't modified in the loop body.
+  // This guarantees that the variable is always a smi.
+
+  Variable* loop_var = node->loop_variable();
+  Smi* initial_value = *Handle<Smi>::cast(node->init()
+      ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle());
+  Smi* limit_value = *Handle<Smi>::cast(
+      node->cond()->AsCompareOperation()->right()->AsLiteral()->handle());
+  Token::Value compare_op =
+      node->cond()->AsCompareOperation()->op();
+  bool increments =
+      node->next()->StatementAsCountOperation()->op() == Token::INC;
+
+  // Check that the condition isn't initially false.
+  bool initially_false = false;
+  int initial_int_value = initial_value->value();
+  int limit_int_value = limit_value->value();
+  switch (compare_op) {
+    case Token::LT:
+      initially_false = initial_int_value >= limit_int_value;
+      break;
+    case Token::LTE:
+      initially_false = initial_int_value > limit_int_value;
+      break;
+    case Token::GT:
+      initially_false = initial_int_value <= limit_int_value;
+      break;
+    case Token::GTE:
+      initially_false = initial_int_value < limit_int_value;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  if (initially_false) return;
+
+  // Only check loop condition at the end.
+
+  Visit(node->init());
+
+  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+  // Set type and stack height of BreakTargets.
+  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  IncrementLoopNesting();
+  loop.Bind();
+
+  // Set number type of the loop variable to smi.
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+
+  SetTypeForStackSlot(loop_var->slot(), TypeInfo::Smi());
+  Visit(node->body());
+
+  if (node->continue_target()->is_linked()) {
+    node->continue_target()->Bind();
+  }
+
+  if (has_valid_frame()) {
+    CodeForStatementPosition(node);
+    Slot* loop_var_slot = loop_var->slot();
+    if (loop_var_slot->type() == Slot::LOCAL) {
+      frame_->PushLocalAt(loop_var_slot->index());
+    } else {
+      ASSERT(loop_var_slot->type() == Slot::PARAMETER);
+      frame_->PushParameterAt(loop_var_slot->index());
+    }
+    Result loop_var_result = frame_->Pop();
+    if (!loop_var_result.is_register()) {
+      loop_var_result.ToRegister();
+    }
+
+    if (increments) {
+      __ SmiAddConstant(loop_var_result.reg(),
+                        loop_var_result.reg(),
+                        Smi::FromInt(1));
+    } else {
+      __ SmiSubConstant(loop_var_result.reg(),
+                        loop_var_result.reg(),
+                        Smi::FromInt(1));
+    }
+
+    {
+      __ SmiCompare(loop_var_result.reg(), limit_value);
+      Condition condition;
+      switch (compare_op) {
+        case Token::LT:
+          condition = less;
+          break;
+        case Token::LTE:
+          condition = less_equal;
+          break;
+        case Token::GT:
+          condition = greater;
+          break;
+        case Token::GTE:
+          condition = greater_equal;
+          break;
+        default:
+          condition = never;
+          UNREACHABLE();
+      }
+      loop.Branch(condition);
+    }
+    loop_var_result.Unuse();
+  }
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  DecrementLoopNesting();
+}
+
+
 void CodeGenerator::VisitForStatement(ForStatement* node) {
   ASSERT(!in_spilled_code());
   Comment cmnt(masm_, "[ ForStatement");
   CodeForStatementPosition(node);
 
+  if (node->is_fast_smi_loop()) {
+    GenerateFastSmiLoop(node);
+    return;
+  }
+
   // Compile the init expression if present.
   if (node->init() != NULL) {
     Visit(node->init());
@@ -1694,16 +1815,6 @@
 
   CheckStack();  // TODO(1222600): ignore if body contains calls.
 
-  // We know that the loop index is a smi if it is not modified in the
-  // loop body and it is checked against a constant limit in the loop
-  // condition.  In this case, we reset the static type information of the
-  // loop index to smi before compiling the body, the update expression, and
-  // the bottom check of the loop condition.
-  if (node->is_fast_smi_loop()) {
-    // Set number type of the loop variable to smi.
-    SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
-  }
-
   Visit(node->body());
 
   // If there is an update expression, compile it if necessary.
@@ -1723,13 +1834,6 @@
     }
   }
 
-  // Set the type of the loop variable to smi before compiling the test
-  // expression if we are in a fast smi loop condition.
-  if (node->is_fast_smi_loop() && has_valid_frame()) {
-    // Set number type of the loop variable to smi.
-    SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi());
-  }
-
   // Based on the condition analysis, compile the backward jump as
   // necessary.
   switch (info) {
@@ -3501,17 +3605,16 @@
       __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
     }
     if (is_increment) {
-      __ SmiAddConstant(kScratchRegister,
+      __ SmiAddConstant(new_value.reg(),
                         new_value.reg(),
                         Smi::FromInt(1),
                         deferred->entry_label());
     } else {
-      __ SmiSubConstant(kScratchRegister,
+      __ SmiSubConstant(new_value.reg(),
                         new_value.reg(),
                         Smi::FromInt(1),
                         deferred->entry_label());
     }
-    __ movq(new_value.reg(), kScratchRegister);
     deferred->BindExit();
 
     // Postfix count operations return their input converted to
@@ -8622,26 +8725,26 @@
   __ bind(&seq_ascii_string);
   // rax: subject string (sequential ascii)
   // rcx: RegExp data (FixedArray)
-  __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
+  __ movq(r11, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
   __ Set(rdi, 1);  // Type is ascii.
   __ jmp(&check_code);
 
   __ bind(&seq_two_byte_string);
   // rax: subject string (flat two-byte)
   // rcx: RegExp data (FixedArray)
-  __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
+  __ movq(r11, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
   __ Set(rdi, 0);  // Type is two byte.
 
   __ bind(&check_code);
   // Check that the irregexp code has been generated for the actual string
   // encoding. If it has, the field contains a code object otherwise it contains
   // the hole.
-  __ CmpObjectType(r12, CODE_TYPE, kScratchRegister);
+  __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
   __ j(not_equal, &runtime);
 
   // rax: subject string
   // rdi: encoding of subject string (1 if ascii, 0 if two_byte);
-  // r12: code
+  // r11: code
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
   __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
@@ -8649,7 +8752,7 @@
   // rax: subject string
   // rbx: previous index
   // rdi: encoding of subject string (1 if ascii 0 if two_byte);
-  // r12: code
+  // r11: code
   // All checks done. Now push arguments for native regexp code.
   __ IncrementCounter(&Counters::regexp_entry_native, 1);
 
@@ -8699,7 +8802,7 @@
   // rax: subject string
   // rbx: previous index
   // rdi: encoding of subject string (1 if ascii 0 if two_byte);
-  // r12: code
+  // r11: code
 
   // Argument 4: End of string data
   // Argument 3: Start of string data
@@ -8723,8 +8826,8 @@
   __ movq(arg1, rax);
 
   // Locate the code entry and call it.
-  __ addq(r12, Immediate(Code::kHeaderSize - kHeapObjectTag));
-  __ CallCFunction(r12, kRegExpExecuteArguments);
+  __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ CallCFunction(r11, kRegExpExecuteArguments);
 
   // rsi is caller save, as it is used to pass parameter.
   __ pop(rsi);
@@ -8938,7 +9041,7 @@
 
 
 void CompareStub::Generate(MacroAssembler* masm) {
-  Label call_builtin, done;
+  Label check_unequal_objects, done;
   // The compare stub returns a positive, negative, or zero 64-bit integer
   // value in rax, corresponding to result of comparing the two inputs.
   // NOTICE! This code is only reached after a smi-fast-case check, so
@@ -8975,14 +9078,14 @@
       // If it's not a heap number, then return equal for (in)equality operator.
       __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
              Factory::heap_number_map());
-      if (cc_ == equal) {
-        __ j(equal, &heap_number);
-        __ Set(rax, EQUAL);
-        __ ret(0);
-      } else {
-        // Identical objects must still be converted to primitive for < and >.
-        __ j(not_equal, &not_identical);
+      __ j(equal, &heap_number);
+      if (cc_ != equal) {
+        // Call runtime on identical JSObjects.  Otherwise return equal.
+        __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+        __ j(above_equal, &not_identical);
       }
+      __ Set(rax, EQUAL);
+      __ ret(0);
 
       __ bind(&heap_number);
       // It is a heap number, so return  equal if it's not NaN.
@@ -9113,7 +9216,8 @@
 
   __ bind(&check_for_strings);
 
-  __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &call_builtin);
+  __ JumpIfNotBothSequentialAsciiStrings(
+      rdx, rax, rcx, rbx, &check_unequal_objects);
 
   // Inline comparison of ascii strings.
   StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -9128,7 +9232,40 @@
   __ Abort("Unexpected fall-through from string comparison");
 #endif
 
-  __ bind(&call_builtin);
+  __ bind(&check_unequal_objects);
+  if (cc_ == equal && !strict_) {
+    // Not strict equality.  Objects are unequal if
+    // they are both JSObjects and not undetectable,
+    // and their pointers are different.
+    Label not_both_objects, return_unequal;
+    // At most one is a smi, so we can test for smi by adding the two.
+    // A smi plus a heap object has the low bit set, a heap object plus
+    // a heap object has the low bit clear.
+    ASSERT_EQ(0, kSmiTag);
+    ASSERT_EQ(V8_UINT64_C(1), kSmiTagMask);
+    __ lea(rcx, Operand(rax, rdx, times_1, 0));
+    __ testb(rcx, Immediate(kSmiTagMask));
+    __ j(not_zero, &not_both_objects);
+    __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
+    __ j(below, &not_both_objects);
+    __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
+    __ j(below, &not_both_objects);
+    __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+             Immediate(1 << Map::kIsUndetectable));
+    __ j(zero, &return_unequal);
+    __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+             Immediate(1 << Map::kIsUndetectable));
+    __ j(zero, &return_unequal);
+    // The objects are both undetectable, so they both compare as the value
+    // undefined, and are equal.
+    __ Set(rax, EQUAL);
+    __ bind(&return_unequal);
+    // Return non-equal by returning the non-zero object pointer in eax,
+    // or return equal if we fell through to here.
+    __ ret(2 * kPointerSize);  // rax, rdx were pushed
+    __ bind(&not_both_objects);
+  }
+
   // must swap argument order
   __ pop(rcx);
   __ pop(rdx);
@@ -9488,7 +9625,7 @@
   // rbp: frame pointer  (restored after C call).
   // rsp: stack pointer  (restored after C call).
   // r14: number of arguments including receiver (C callee-saved).
-  // r15: pointer to the first argument (C callee-saved).
+  // r12: pointer to the first argument (C callee-saved).
   //      This pointer is reused in LeaveExitFrame(), so it is stored in a
   //      callee-saved register.
 
@@ -9529,7 +9666,7 @@
   // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
   // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
   __ movq(Operand(rsp, 4 * kPointerSize), r14);  // argc.
-  __ movq(Operand(rsp, 5 * kPointerSize), r15);  // argv.
+  __ movq(Operand(rsp, 5 * kPointerSize), r12);  // argv.
   if (result_size_ < 2) {
     // Pass a pointer to the Arguments object as the first argument.
     // Return result in single register (rax).
@@ -9545,7 +9682,7 @@
 #else  // _WIN64
   // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
   __ movq(rdi, r14);  // argc.
-  __ movq(rsi, r15);  // argv.
+  __ movq(rsi, r12);  // argv.
 #endif
   __ call(rbx);
   // Result is in rax - do not destroy this register!
@@ -9747,7 +9884,7 @@
   // rbp: frame pointer of exit frame  (restored after C call).
   // rsp: stack pointer (restored after C call).
   // r14: number of arguments including receiver (C callee-saved).
-  // r15: argv pointer (C callee-saved).
+  // r12: argv pointer (C callee-saved).
 
   Label throw_normal_exception;
   Label throw_termination_exception;
@@ -9807,24 +9944,38 @@
 
   // Push the stack frame type marker twice.
   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
-  __ Push(Smi::FromInt(marker));  // context slot
-  __ Push(Smi::FromInt(marker));  // function slot
-  // Save callee-saved registers (X64 calling conventions).
+  // Scratch register is neither callee-save, nor an argument register on any
+  // platform. It's free to use at this point.
+  // Cannot use smi-register for loading yet.
+  __ movq(kScratchRegister,
+          reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
+          RelocInfo::NONE);
+  __ push(kScratchRegister);  // context slot
+  __ push(kScratchRegister);  // function slot
+  // Save callee-saved registers (X64/Win64 calling conventions).
   __ push(r12);
   __ push(r13);
   __ push(r14);
   __ push(r15);
-  __ push(rdi);
-  __ push(rsi);
+#ifdef _WIN64
+  __ push(rdi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
+  __ push(rsi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
+#endif
   __ push(rbx);
-  // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
-  // callee-save in JS code as well.
+  // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
+  // callee save as well.
 
   // Save copies of the top frame descriptor on the stack.
   ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
   __ load_rax(c_entry_fp);
   __ push(rax);
 
+  // Set up the roots and smi constant registers.
+  // Needs to be done before any further smi loads.
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ movq(kRootRegister, roots_address);
+  __ InitializeSmiConstantRegister();
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   // If this is the outermost JS call, set js_entry_sp value.
   ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
@@ -9895,8 +10046,11 @@
 
   // Restore callee-saved registers (X64 conventions).
   __ pop(rbx);
+#ifdef _WIN64
+  // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
   __ pop(rsi);
   __ pop(rdi);
+#endif
   __ pop(r15);
   __ pop(r14);
   __ pop(r13);
@@ -11130,7 +11284,7 @@
 
   // Check that both strings are non-external ascii strings.
   __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
-                                                 &string_add_runtime);
+                                                  &string_add_runtime);
 
   // Get the two characters forming the sub string.
   __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
@@ -11140,7 +11294,7 @@
   // just allocate a new one.
   Label make_two_character_string, make_flat_ascii_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, rbx, rcx, r14, r12, rdi, r15, &make_two_character_string);
+      masm, rbx, rcx, r14, r11, rdi, r12, &make_two_character_string);
   __ IncrementCounter(&Counters::string_add_native, 1);
   __ ret(2 * kPointerSize);
 
@@ -11232,7 +11386,7 @@
 
   __ bind(&make_flat_ascii_string);
   // Both strings are ascii strings. As they are short they are both flat.
-  __ AllocateAsciiString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+  __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
   // rcx: result string
   __ movq(rbx, rcx);
   // Locate first character of result.
@@ -11269,7 +11423,7 @@
   __ j(not_zero, &string_add_runtime);
   // Both strings are two byte strings. As they are short they are both
   // flat.
-  __ AllocateTwoByteString(rcx, rbx, rdi, r14, r15, &string_add_runtime);
+  __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
   // rcx: result string
   __ movq(rbx, rcx);
   // Locate first character of result.
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index cd03d2a..b9a3b70 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -393,6 +393,9 @@
   // target (which can not be done more than once).
   void GenerateReturnSequence(Result* return_value);
 
+  // Generate code for a fast smi loop.
+  void GenerateFastSmiLoop(ForStatement* node);
+
   // Returns the arguments allocation mode.
   ArgumentsAllocationMode ArgumentsMode();
 
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index a92b248..9991981 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -56,7 +56,11 @@
 
 class EntryFrameConstants : public AllStatic {
  public:
+#ifdef _WIN64
   static const int kCallerFPOffset      = -10 * kPointerSize;
+#else
+  static const int kCallerFPOffset      = -8 * kPointerSize;
+#endif
   static const int kArgvOffset          = 6 * kPointerSize;
 };
 
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 31a806a..d04a7dc 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -61,11 +61,11 @@
 
 // Generated code falls through if the receiver is a regular non-global
 // JS object with slow properties and no interceptors.
-static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
-                                                Register receiver,
-                                                Register r0,
-                                                Register r1,
-                                                Label* miss) {
+static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
+                                                  Register receiver,
+                                                  Register r0,
+                                                  Register r1,
+                                                  Label* miss) {
   // Register usage:
   //   receiver: holds the receiver on entry and is unchanged.
   //   r0: used to hold receiver instance type.
@@ -98,34 +98,17 @@
 }
 
 
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not a symbol, and will jump to
-// the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
-                                   Label* miss_label,
-                                   Register elements,
-                                   Register name,
-                                   Register r0,
-                                   Register r1,
-                                   Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // r0   - used to hold the capacity of the property dictionary.
-  //
-  // r1   - used to hold the index into the property dictionary.
-  //
-  // result - holds the result on exit if the load succeeded.
-
-  Label done;
-
+// Probe the string dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found leaving the
+// index into the dictionary in |r1|. Jump to the |miss| label
+// otherwise.
+static void GenerateStringDictionaryProbes(MacroAssembler* masm,
+                                           Label* miss,
+                                           Label* done,
+                                           Register elements,
+                                           Register name,
+                                           Register r0,
+                                           Register r1) {
   // Compute the capacity mask.
   const int kCapacityOffset =
       StringDictionary::kHeaderSize +
@@ -157,14 +140,58 @@
     __ cmpq(name, Operand(elements, r1, times_pointer_size,
                           kElementsStartOffset - kHeapObjectTag));
     if (i != kProbes - 1) {
-      __ j(equal, &done);
+      __ j(equal, done);
     } else {
-      __ j(not_equal, miss_label);
+      __ j(not_equal, miss);
     }
   }
+}
 
-  // Check that the value is a normal property.
+
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if name is not a symbol, and will jump to
+// the miss_label in that case.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+                                   Label* miss_label,
+                                   Register elements,
+                                   Register name,
+                                   Register r0,
+                                   Register r1,
+                                   Register result) {
+  // Register use:
+  //
+  // elements - holds the property dictionary on entry and is unchanged.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
+  // r0   - used to hold the capacity of the property dictionary.
+  //
+  // r1   - used to hold the index into the property dictionary.
+  //
+  // result - holds the result on exit if the load succeeded.
+
+  Label done;
+
+  // Probe the dictionary.
+  GenerateStringDictionaryProbes(masm,
+                                 miss_label,
+                                 &done,
+                                 elements,
+                                 name,
+                                 r0,
+                                 r1);
+
+  // If probing finds an entry in the dictionary, r0 contains the
+  // index into the dictionary. Check that the value is a normal
+  // property.
   __ bind(&done);
+  const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
   __ Test(Operand(elements, r1, times_pointer_size,
                   kDetailsOffset - kHeapObjectTag),
@@ -179,6 +206,75 @@
 }
 
 
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property even though it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not a symbol, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm,
+                                    Label* miss_label,
+                                    Register elements,
+                                    Register name,
+                                    Register value,
+                                    Register scratch0,
+                                    Register scratch1) {
+  // Register use:
+  //
+  // elements - holds the property dictionary on entry and is clobbered.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
+  // value - holds the value to store and is unchanged.
+  //
+  // scratch0 - used for index into the property dictionary and is clobbered.
+  //
+  // scratch1 - used to hold the capacity of the property dictionary and is
+  //            clobbered.
+  Label done;
+
+  // Probe the dictionary.
+  GenerateStringDictionaryProbes(masm,
+                                 miss_label,
+                                 &done,
+                                 elements,
+                                 name,
+                                 scratch0,
+                                 scratch1);
+
+  // If probing finds an entry in the dictionary, scratch0 contains the
+  // index into the dictionary. Check that the value is a normal
+  // property that is not read only.
+  __ bind(&done);
+  const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask
+      = (PropertyDetails::TypeField::mask() |
+         PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
+  __ Test(Operand(elements,
+                  scratch1,
+                  times_pointer_size,
+                  kDetailsOffset - kHeapObjectTag),
+          Smi::FromInt(kTypeAndReadOnlyMask));
+  __ j(not_zero, miss_label);
+
+  // Store the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ lea(scratch1, Operand(elements,
+                           scratch1,
+                           times_pointer_size,
+                           kValueOffset - kHeapObjectTag));
+  __ movq(Operand(scratch1, 0), value);
+
+  // Update write barrier. Make sure not to clobber the value.
+  __ movq(scratch0, value);
+  __ RecordWrite(elements, scratch1, scratch0);
+}
+
+
 static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
                                          Label* miss,
                                          Register elements,
@@ -1332,7 +1428,7 @@
   // Get the receiver of the function from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  GenerateDictionaryLoadReceiverCheck(masm, rdx, rax, rbx, &miss);
+  GenerateStringDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss);
 
   // rax: elements
   // Search the dictionary placing the result in rdi.
@@ -1616,7 +1712,7 @@
   // -----------------------------------
   Label miss;
 
-  GenerateDictionaryLoadReceiverCheck(masm, rax, rdx, rbx, &miss);
+  GenerateStringDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
 
   //  rdx: elements
   // Search the dictionary placing the result in rax.
@@ -1760,6 +1856,28 @@
 }
 
 
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rdx    : receiver
+  //  -- rsp[0] : return address
+  // -----------------------------------
+
+  Label miss, restore_miss;
+
+  GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
+
+  GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
+  __ IncrementCounter(&Counters::store_normal_hit, 1);
+  __ ret(0);
+
+  __ bind(&miss);
+  __ IncrementCounter(&Counters::store_normal_miss, 1);
+  GenerateMiss(masm);
+}
+
+
 #undef __
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 3b2c789..76200d7 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -105,12 +105,6 @@
 }
 
 
-// For page containing |object| mark region covering [object+offset] dirty.
-// object is the object being stored into, value is the object being stored.
-// If offset is zero, then the index register contains the array index into
-// the elements array represented a zero extended int32. Otherwise it can be
-// used as a scratch register.
-// All registers are clobbered by the operation.
 void MacroAssembler::RecordWrite(Register object,
                                  int offset,
                                  Register value,
@@ -141,6 +135,35 @@
 }
 
 
+void MacroAssembler::RecordWrite(Register object,
+                                 Register address,
+                                 Register value) {
+  // The compiled code assumes that record write doesn't change the
+  // context register, so we check that none of the clobbered
+  // registers are esi.
+  ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+  JumpIfSmi(value, &done);
+
+  InNewSpace(object, value, equal, &done);
+
+  RecordWriteHelper(object, address, value);
+
+  bind(&done);
+
+  // Clobber all input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (FLAG_debug_code) {
+    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+  }
+}
+
+
 void MacroAssembler::RecordWriteNonSmi(Register object,
                                        int offset,
                                        Register scratch,
@@ -444,7 +467,7 @@
 
 void MacroAssembler::Set(Register dst, int64_t x) {
   if (x == 0) {
-    xor_(dst, dst);
+    xorl(dst, dst);
   } else if (is_int32(x)) {
     movq(dst, Immediate(static_cast<int32_t>(x)));
   } else if (is_uint32(x)) {
@@ -454,7 +477,6 @@
   }
 }
 
-
 void MacroAssembler::Set(const Operand& dst, int64_t x) {
   if (is_int32(x)) {
     movq(dst, Immediate(static_cast<int32_t>(x)));
@@ -469,6 +491,78 @@
 
 static int kSmiShift = kSmiTagSize + kSmiShiftSize;
 
+Register MacroAssembler::GetSmiConstant(Smi* source) {
+  int value = source->value();
+  if (value == 0) {
+    xorl(kScratchRegister, kScratchRegister);
+    return kScratchRegister;
+  }
+  if (value == 1) {
+    return kSmiConstantRegister;
+  }
+  LoadSmiConstant(kScratchRegister, source);
+  return kScratchRegister;
+}
+
+void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
+  if (FLAG_debug_code) {
+    movq(dst,
+         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+         RelocInfo::NONE);
+    cmpq(dst, kSmiConstantRegister);
+    if (allow_stub_calls()) {
+      Assert(equal, "Uninitialized kSmiConstantRegister");
+    } else {
+      Label ok;
+      j(equal, &ok);
+      int3();
+      bind(&ok);
+    }
+  }
+  if (source->value() == 0) {
+    xorl(dst, dst);
+    return;
+  }
+  int value = source->value();
+  bool negative = value < 0;
+  unsigned int uvalue = negative ? -value : value;
+
+  switch (uvalue) {
+    case 9:
+      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+      break;
+    case 8:
+      xorl(dst, dst);
+      lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+      break;
+    case 4:
+      xorl(dst, dst);
+      lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+      break;
+    case 5:
+      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+      break;
+    case 3:
+      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+      break;
+    case 2:
+      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+      break;
+    case 1:
+      movq(dst, kSmiConstantRegister);
+      break;
+    case 0:
+      UNREACHABLE();
+      return;
+    default:
+      movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
+      return;
+  }
+  if (negative) {
+    neg(dst);
+  }
+}
+
 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
   ASSERT_EQ(0, kSmiTag);
   if (!dst.is(src)) {
@@ -629,9 +723,10 @@
 
 Condition MacroAssembler::CheckPositiveSmi(Register src) {
   ASSERT_EQ(0, kSmiTag);
+  // Make mask 0x8000000000000001 and test that both bits are zero.
   movq(kScratchRegister, src);
   rol(kScratchRegister, Immediate(1));
-  testl(kScratchRegister, Immediate(0x03));
+  testb(kScratchRegister, Immediate(3));
   return zero;
 }
 
@@ -660,7 +755,6 @@
 }
 
 
-
 Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
   if (first.is(second)) {
     return CheckSmi(first);
@@ -673,11 +767,10 @@
 
 
 Condition MacroAssembler::CheckIsMinSmi(Register src) {
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  movq(kScratchRegister, src);
-  rol(kScratchRegister, Immediate(1));
-  cmpq(kScratchRegister, Immediate(1));
-  return equal;
+  ASSERT(!src.is(kScratchRegister));
+  // If we overflow by subtracting one, it's the minimal smi value.
+  cmpq(src, kSmiConstantRegister);
+  return overflow;
 }
 
 
@@ -690,8 +783,8 @@
 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
   // An unsigned 32-bit integer value is valid as long as the high bit
   // is not set.
-  testq(src, Immediate(0x80000000));
-  return zero;
+  testl(src, src);
+  return positive;
 }
 
 
@@ -784,10 +877,10 @@
     }
     Assert(no_overflow, "Smi subtraction overflow");
   } else if (dst.is(src1)) {
-    movq(kScratchRegister, src1);
-    subq(kScratchRegister, src2);
+    movq(kScratchRegister, src2);
+    cmpq(src1, kScratchRegister);
     j(overflow, on_not_smi_result);
-    movq(src1, kScratchRegister);
+    subq(src1, kScratchRegister);
   } else {
     movq(dst, src1);
     subq(dst, src2);
@@ -860,7 +953,7 @@
 
   JumpIfNotSmi(src, on_not_smi_result);
   Register tmp = (dst.is(src) ? kScratchRegister : dst);
-  Move(tmp, constant);
+  LoadSmiConstant(tmp, constant);
   addq(tmp, src);
   j(overflow, on_not_smi_result);
   if (dst.is(src)) {
@@ -874,14 +967,46 @@
     if (!dst.is(src)) {
       movq(dst, src);
     }
+    return;
   } else if (dst.is(src)) {
     ASSERT(!dst.is(kScratchRegister));
-
-    Move(kScratchRegister, constant);
-    addq(dst, kScratchRegister);
+    switch (constant->value()) {
+      case 1:
+        addq(dst, kSmiConstantRegister);
+        return;
+      case 2:
+        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+        return;
+      case 4:
+        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+        return;
+      case 8:
+        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+        return;
+      default:
+        Register constant_reg = GetSmiConstant(constant);
+        addq(dst, constant_reg);
+        return;
+    }
   } else {
-    Move(dst, constant);
-    addq(dst, src);
+    switch (constant->value()) {
+      case 1:
+        lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+        return;
+      case 2:
+        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+        return;
+      case 4:
+        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+        return;
+      case 8:
+        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+        return;
+      default:
+        LoadSmiConstant(dst, constant);
+        addq(dst, src);
+        return;
+    }
   }
 }
 
@@ -904,12 +1029,12 @@
   } else if (dst.is(src)) {
     ASSERT(!dst.is(kScratchRegister));
 
-    Move(kScratchRegister, constant);
-    addq(kScratchRegister, dst);
+    LoadSmiConstant(kScratchRegister, constant);
+    addq(kScratchRegister, src);
     j(overflow, on_not_smi_result);
     movq(dst, kScratchRegister);
   } else {
-    Move(dst, constant);
+    LoadSmiConstant(dst, constant);
     addq(dst, src);
     j(overflow, on_not_smi_result);
   }
@@ -923,19 +1048,17 @@
     }
   } else if (dst.is(src)) {
     ASSERT(!dst.is(kScratchRegister));
-
-    Move(kScratchRegister, constant);
-    subq(dst, kScratchRegister);
+    Register constant_reg = GetSmiConstant(constant);
+    subq(dst, constant_reg);
   } else {
-    // Subtract by adding the negative, to do it in two operations.
     if (constant->value() == Smi::kMinValue) {
-      Move(dst, constant);
+      LoadSmiConstant(dst, constant);
       // Adding and subtracting the min-value gives the same result, it only
       // differs on the overflow bit, which we don't check here.
       addq(dst, src);
     } else {
       // Subtract by adding the negation.
-      Move(dst, Smi::FromInt(-constant->value()));
+      LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
       addq(dst, src);
     }
   }
@@ -957,11 +1080,11 @@
       // We test the non-negativeness before doing the subtraction.
       testq(src, src);
       j(not_sign, on_not_smi_result);
-      Move(kScratchRegister, constant);
+      LoadSmiConstant(kScratchRegister, constant);
       subq(dst, kScratchRegister);
     } else {
       // Subtract by adding the negation.
-      Move(kScratchRegister, Smi::FromInt(-constant->value()));
+      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
       addq(kScratchRegister, dst);
       j(overflow, on_not_smi_result);
       movq(dst, kScratchRegister);
@@ -972,13 +1095,13 @@
       // We test the non-negativeness before doing the subtraction.
       testq(src, src);
       j(not_sign, on_not_smi_result);
-      Move(dst, constant);
+      LoadSmiConstant(dst, constant);
       // Adding and subtracting the min-value gives the same result, it only
       // differs on the overflow bit, which we don't check here.
       addq(dst, src);
     } else {
       // Subtract by adding the negation.
-      Move(dst, Smi::FromInt(-(constant->value())));
+      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
       addq(dst, src);
       j(overflow, on_not_smi_result);
     }
@@ -1132,10 +1255,10 @@
     xor_(dst, dst);
   } else if (dst.is(src)) {
     ASSERT(!dst.is(kScratchRegister));
-    Move(kScratchRegister, constant);
-    and_(dst, kScratchRegister);
+    Register constant_reg = GetSmiConstant(constant);
+    and_(dst, constant_reg);
   } else {
-    Move(dst, constant);
+    LoadSmiConstant(dst, constant);
     and_(dst, src);
   }
 }
@@ -1152,10 +1275,10 @@
 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
   if (dst.is(src)) {
     ASSERT(!dst.is(kScratchRegister));
-    Move(kScratchRegister, constant);
-    or_(dst, kScratchRegister);
+    Register constant_reg = GetSmiConstant(constant);
+    or_(dst, constant_reg);
   } else {
-    Move(dst, constant);
+    LoadSmiConstant(dst, constant);
     or_(dst, src);
   }
 }
@@ -1172,10 +1295,10 @@
 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
   if (dst.is(src)) {
     ASSERT(!dst.is(kScratchRegister));
-    Move(kScratchRegister, constant);
-    xor_(dst, kScratchRegister);
+    Register constant_reg = GetSmiConstant(constant);
+    xor_(dst, constant_reg);
   } else {
-    Move(dst, constant);
+    LoadSmiConstant(dst, constant);
     xor_(dst, src);
   }
 }
@@ -1343,6 +1466,7 @@
   // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
 }
 
+
 SmiIndex MacroAssembler::SmiToIndex(Register dst,
                                     Register src,
                                     int shift) {
@@ -1568,8 +1692,8 @@
   if (is_int32(smi)) {
     push(Immediate(static_cast<int32_t>(smi)));
   } else {
-    Set(kScratchRegister, smi);
-    push(kScratchRegister);
+    Register constant = GetSmiConstant(source);
+    push(constant);
   }
 }
 
@@ -2109,10 +2233,10 @@
   movq(rax, rsi);
   store_rax(context_address);
 
-  // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
+  // Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
   // so it must be retained across the C-call.
   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
-  lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+  lea(r12, Operand(rbp, r14, times_pointer_size, offset));
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Save the state of all registers to the stack from the memory
@@ -2158,7 +2282,7 @@
 
 void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
   // Registers:
-  // r15 : argv
+  // r12 : argv
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Restore the memory copy of the registers by digging them out from
   // the stack. This is needed to allow nested break points.
@@ -2178,7 +2302,7 @@
 
   // Pop everything up to and including the arguments and the receiver
   // from the caller stack.
-  lea(rsp, Operand(r15, 1 * kPointerSize));
+  lea(rsp, Operand(r12, 1 * kPointerSize));
 
   // Restore current context from top and clear it in debug mode.
   ExternalReference context_address(Top::k_context_address);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 44573f3..a256ab8 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -47,8 +47,11 @@
 // Default scratch register used by MacroAssembler (and other code that needs
 // a spare register). The register isn't callee save, and not used by the
 // function calling convention.
-static const Register kScratchRegister = { 10 };  // r10.
-static const Register kRootRegister = { 13 };     // r13
+static const Register kScratchRegister = { 10 };      // r10.
+static const Register kSmiConstantRegister = { 15 };  // r15 (callee save).
+static const Register kRootRegister = { 13 };         // r13 (callee save).
+// Value of smi in kSmiConstantRegister.
+static const int kSmiConstantRegisterValue = 1;
 
 // Convenience for platform-independent signatures.
 typedef Operand MemOperand;
@@ -93,16 +96,27 @@
                   Condition cc,
                   Label* branch);
 
-  // For page containing |object| mark region covering [object+offset] dirty.
-  // object is the object being stored into, value is the object being stored.
-  // If offset is zero, then the scratch register contains the array index into
-  // the elements array represented as a Smi.
-  // All registers are clobbered by the operation.
+  // For page containing |object| mark region covering [object+offset]
+  // dirty. |object| is the object being stored into, |value| is the
+  // object being stored. If |offset| is zero, then the |scratch|
+  // register contains the array index into the elements array
+  // represented as a Smi. All registers are clobbered by the
+  // operation. RecordWrite filters out smis so it does not update the
+  // write barrier if the value is a smi.
   void RecordWrite(Register object,
                    int offset,
                    Register value,
                    Register scratch);
 
+  // For page containing |object| mark region covering [address]
+  // dirty. |object| is the object being stored into, |value| is the
+  // object being stored. All registers are clobbered by the
+  // operation.  RecordWrite filters out smis so it does not update
+  // the write barrier if the value is a smi.
+  void RecordWrite(Register object,
+                   Register address,
+                   Register value);
+
   // For page containing |object| mark region covering [object+offset] dirty.
   // The value is known to not be a smi.
   // object is the object being stored into, value is the object being stored.
@@ -191,6 +205,12 @@
   // ---------------------------------------------------------------------------
   // Smi tagging, untagging and operations on tagged smis.
 
+  void InitializeSmiConstantRegister() {
+    movq(kSmiConstantRegister,
+         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+         RelocInfo::NONE);
+  }
+
   // Conversions between tagged smi values and non-tagged integer values.
 
   // Tag an integer value. The result must be known to be a valid smi value.
@@ -458,11 +478,12 @@
 
   // Basic Smi operations.
   void Move(Register dst, Smi* source) {
-    Set(dst, reinterpret_cast<int64_t>(source));
+    LoadSmiConstant(dst, source);
   }
 
   void Move(const Operand& dst, Smi* source) {
-    Set(dst, reinterpret_cast<int64_t>(source));
+    Register constant = GetSmiConstant(source);
+    movq(dst, constant);
   }
 
   void Push(Smi* smi);
@@ -809,6 +830,14 @@
  private:
   bool generating_stub_;
   bool allow_stub_calls_;
+
+  // Returns a register holding the smi value. The register MUST NOT be
+  // modified. It may be the "smi 1 constant" register.
+  Register GetSmiConstant(Smi* value);
+
+  // Moves the smi value to the destination register.
+  void LoadSmiConstant(Register dst, Smi* value);
+
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
index c7c18b3..c6bea3a 100644
--- a/src/x64/register-allocator-x64-inl.h
+++ b/src/x64/register-allocator-x64-inl.h
@@ -38,7 +38,8 @@
 
 bool RegisterAllocator::IsReserved(Register reg) {
   return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
-      reg.is(kScratchRegister) || reg.is(kRootRegister);
+      reg.is(kScratchRegister) || reg.is(kRootRegister) ||
+      reg.is(kSmiConstantRegister);
 }
 
 
@@ -58,11 +59,11 @@
     5,   // r8
     6,   // r9
     -1,  // r10  Scratch register.
-    9,   // r11
-    10,  // r12
+    8,   // r11
+    9,   // r12
     -1,  // r13  Roots array.  This is callee saved.
     7,   // r14
-    8    // r15
+    -1   // r15  Smi constant register.
   };
   return kNumbers[reg.code()];
 }
@@ -71,7 +72,7 @@
 Register RegisterAllocator::ToRegister(int num) {
   ASSERT(num >= 0 && num < kNumRegisters);
   const Register kRegisters[] =
-      { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r15, r11, r12 };
+      { rax, rbx, rcx, rdx, rdi, r8, r9, r14, r11, r12 };
   return kRegisters[num];
 }
 
diff --git a/src/x64/register-allocator-x64.h b/src/x64/register-allocator-x64.h
index 8d666d2..a2884d9 100644
--- a/src/x64/register-allocator-x64.h
+++ b/src/x64/register-allocator-x64.h
@@ -33,7 +33,7 @@
 
 class RegisterAllocatorConstants : public AllStatic {
  public:
-  static const int kNumRegisters = 11;
+  static const int kNumRegisters = 10;
   static const int kInvalidRegister = -1;
 };
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 1e103ac..ab75b96 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -2125,7 +2125,8 @@
                                        Register scratch,
                                        String* name,
                                        int save_at_depth,
-                                       Label* miss) {
+                                       Label* miss,
+                                       Register extra) {
   // Check that the maps haven't changed.
   Register result =
       masm()->CheckMaps(object,
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 0549e3c..adf47e2 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -388,6 +388,13 @@
   // Duplicate the top element of the frame.
   void Dup() { PushFrameSlotAt(element_count() - 1); }
 
+  // Duplicate the n'th element from the top of the frame.
+  // Dup(1) is equivalent to Dup().
+  void Dup(int n) {
+    ASSERT(n > 0);
+    PushFrameSlotAt(element_count() - n);
+  }
+
   // Pop an element from the top of the expression stack.  Returns a
   // Result, which may be a constant or a register.
   Result Pop();
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index be5ecba..745babc 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -3335,6 +3335,42 @@
 }
 
 
+
+THREADED_TEST(ExtensibleOnUndetectable) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  Local<v8::FunctionTemplate> desc =
+      v8::FunctionTemplate::New(0, v8::Handle<Value>());
+  desc->InstanceTemplate()->MarkAsUndetectable();  // undetectable
+
+  Local<v8::Object> obj = desc->GetFunction()->NewInstance();
+  env->Global()->Set(v8_str("undetectable"), obj);
+
+  Local<String> source = v8_str("undetectable.x = 42;"
+                                "undetectable.x");
+
+  Local<Script> script = Script::Compile(source);
+
+  CHECK_EQ(v8::Integer::New(42), script->Run());
+
+  ExpectBoolean("Object.isExtensible(undetectable)", true);
+
+  source = v8_str("Object.preventExtensions(undetectable);");
+  script = Script::Compile(source);
+  script->Run();
+  ExpectBoolean("Object.isExtensible(undetectable)", false);
+
+  source = v8_str("undetectable.y = 2000;");
+  script = Script::Compile(source);
+  v8::TryCatch try_catch;
+  Local<Value> result = script->Run();
+  CHECK(result.IsEmpty());
+  CHECK(try_catch.HasCaught());
+}
+
+
+
 THREADED_TEST(UndetectableString) {
   v8::HandleScope scope;
   LocalContext env;
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index e689637..8ebf752 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -2075,6 +2075,39 @@
 }
 
 
+// Test that it is possible to add and remove break points in a top level
+// function which has no references but has not been collected yet.
+TEST(ScriptBreakPointTopLevelCrash) {
+  v8::HandleScope scope;
+  DebugLocalContext env;
+  env.ExposeDebug();
+
+  v8::Debug::SetDebugEventListener(DebugEventBreakPointHitCount,
+                                   v8::Undefined());
+
+  v8::Local<v8::String> script_source = v8::String::New(
+    "function f() {\n"
+    "  return 0;\n"
+    "}\n"
+    "f()");
+
+  int sbp1 = SetScriptBreakPointByNameFromJS("test.html", 3, -1);
+  {
+    v8::HandleScope scope;
+    break_point_hit_count = 0;
+    v8::Script::Compile(script_source, v8::String::New("test.html"))->Run();
+    CHECK_EQ(1, break_point_hit_count);
+  }
+
+  int sbp2 = SetScriptBreakPointByNameFromJS("test.html", 3, -1);
+  ClearBreakPointFromJS(sbp1);
+  ClearBreakPointFromJS(sbp2);
+
+  v8::Debug::SetDebugEventListener(NULL);
+  CheckDebuggerUnloaded();
+}
+
+
 // Test that it is possible to remove the last break point for a function
 // inside the break handling of that break point.
 TEST(RemoveBreakPointInBreak) {
@@ -2129,7 +2162,7 @@
 }
 
 
-// Test setting a breakpoint on the  debugger statement.
+// Test setting a breakpoint on the debugger statement.
 TEST(DebuggerStatementBreakpoint) {
     break_point_hit_count = 0;
     v8::HandleScope scope;
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 18e641d..f890fc1 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -408,6 +408,11 @@
 
   if (CpuFeatures::IsSupported(VFP3)) {
     CpuFeatures::Scope scope(VFP3);
+    COMPARE(vmov(d0, d1),
+            "eeb00b41       vmov.f64 d0, d1");
+    COMPARE(vmov(d3, d3, eq),
+            "0eb03b43       vmov.f64eq d3, d3");
+
     COMPARE(vadd(d0, d1, d2),
             "ee310b02       vadd.f64 d0, d1, d2");
     COMPARE(vadd(d3, d4, d5, mi),
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
index dd97498..3d2b91b 100755
--- a/test/cctest/test-macro-assembler-x64.cc
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -57,10 +57,9 @@
 using v8::internal::r8;
 using v8::internal::r9;
 using v8::internal::r11;
-using v8::internal::r12;  // Remember: r12..r15 are callee save!
+using v8::internal::r12;
 using v8::internal::r13;
 using v8::internal::r14;
-using v8::internal::r15;
 using v8::internal::times_pointer_size;
 using v8::internal::FUNCTION_CAST;
 using v8::internal::CodeDesc;
@@ -92,6 +91,24 @@
 
 #define __ masm->
 
+
+static void EntryCode(MacroAssembler* masm) {
+  // Smi constant register is callee save.
+  __ push(v8::internal::kSmiConstantRegister);
+  __ InitializeSmiConstantRegister();
+}
+
+
+static void ExitCode(MacroAssembler* masm) {
+  // Return -1 if kSmiConstantRegister was clobbered during the test.
+  __ Move(rdx, Smi::FromInt(1));
+  __ cmpq(rdx, v8::internal::kSmiConstantRegister);
+  __ movq(rdx, Immediate(-1));
+  __ cmovq(not_equal, rax, rdx);
+  __ pop(v8::internal::kSmiConstantRegister);
+}
+
+
 TEST(Smi) {
   // Check that C++ Smi operations work as expected.
   int64_t test_numbers[] = {
@@ -139,6 +156,7 @@
   MacroAssembler assembler(buffer, static_cast<int>(actual_size));
   MacroAssembler* masm = &assembler;  // Create a pointer for the __ macro.
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestMoveSmi(masm, &exit, 1, Smi::FromInt(0));
@@ -156,6 +174,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -225,6 +244,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiCompare(masm, &exit, 0x10, 0, 0);
@@ -249,6 +269,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -272,6 +293,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   __ movq(rax, Immediate(1));  // Test number.
@@ -349,6 +371,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -397,6 +420,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   int64_t twice_max = static_cast<int64_t>(Smi::kMaxValue) * 2;
@@ -416,6 +440,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -438,6 +463,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
   Condition cond;
 
@@ -613,6 +639,7 @@
   __ xor_(rax, rax);
 
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -683,6 +710,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiNeg(masm, &exit, 0x10, 0);
@@ -696,6 +724,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -768,6 +797,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   // No-overflow tests.
@@ -782,6 +812,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -955,6 +986,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   SmiSubTest(masm, &exit, 0x10, 1, 2);
@@ -977,6 +1009,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1042,6 +1075,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiMul(masm, &exit, 0x10, 0, 0);
@@ -1061,6 +1095,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1081,51 +1116,51 @@
 #endif
   bool fraction = !division_by_zero && !overflow && (x % y != 0);
   __ Move(r11, Smi::FromInt(x));
-  __ Move(r12, Smi::FromInt(y));
+  __ Move(r14, Smi::FromInt(y));
   if (!fraction && !overflow && !negative_zero && !division_by_zero) {
     // Division succeeds
     __ movq(rcx, r11);
-    __ movq(r15, Immediate(id));
+    __ movq(r12, Immediate(id));
     int result = x / y;
     __ Move(r8, Smi::FromInt(result));
-    __ SmiDiv(r9, rcx, r12, exit);
-    // Might have destroyed rcx and r12.
-    __ incq(r15);
+    __ SmiDiv(r9, rcx, r14, exit);
+    // Might have destroyed rcx and r14.
+    __ incq(r12);
     __ SmiCompare(r9, r8);
     __ j(not_equal, exit);
 
-    __ incq(r15);
+    __ incq(r12);
     __ movq(rcx, r11);
-    __ Move(r12, Smi::FromInt(y));
+    __ Move(r14, Smi::FromInt(y));
     __ SmiCompare(rcx, r11);
     __ j(not_equal, exit);
 
-    __ incq(r15);
-    __ SmiDiv(rcx, rcx, r12, exit);
+    __ incq(r12);
+    __ SmiDiv(rcx, rcx, r14, exit);
 
-    __ incq(r15);
+    __ incq(r12);
     __ SmiCompare(rcx, r8);
     __ j(not_equal, exit);
   } else {
     // Division fails.
-    __ movq(r15, Immediate(id + 8));
+    __ movq(r12, Immediate(id + 8));
 
     Label fail_ok, fail_ok2;
     __ movq(rcx, r11);
-    __ SmiDiv(r9, rcx, r12, &fail_ok);
+    __ SmiDiv(r9, rcx, r14, &fail_ok);
     __ jmp(exit);
     __ bind(&fail_ok);
 
-    __ incq(r15);
+    __ incq(r12);
     __ SmiCompare(rcx, r11);
     __ j(not_equal, exit);
 
-    __ incq(r15);
-    __ SmiDiv(rcx, rcx, r12, &fail_ok2);
+    __ incq(r12);
+    __ SmiDiv(rcx, rcx, r14, &fail_ok2);
     __ jmp(exit);
     __ bind(&fail_ok2);
 
-    __ incq(r15);
+    __ incq(r12);
     __ SmiCompare(rcx, r11);
     __ j(not_equal, exit);
   }
@@ -1145,10 +1180,11 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
+  __ push(r14);
   __ push(r12);
-  __ push(r15);
   TestSmiDiv(masm, &exit, 0x10, 1, 1);
   TestSmiDiv(masm, &exit, 0x20, 1, 0);
   TestSmiDiv(masm, &exit, 0x30, -1, 0);
@@ -1170,11 +1206,12 @@
   TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
   TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1);
 
-  __ xor_(r15, r15);  // Success.
+  __ xor_(r12, r12);  // Success.
   __ bind(&exit);
-  __ movq(rax, r15);
-  __ pop(r15);
+  __ movq(rax, r12);
   __ pop(r12);
+  __ pop(r14);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1192,47 +1229,47 @@
   bool negative_zero = (!fraction && x < 0);
   __ Move(rcx, Smi::FromInt(x));
   __ movq(r11, rcx);
-  __ Move(r12, Smi::FromInt(y));
+  __ Move(r14, Smi::FromInt(y));
   if (!division_overflow && !negative_zero && !division_by_zero) {
     // Modulo succeeds
-    __ movq(r15, Immediate(id));
+    __ movq(r12, Immediate(id));
     int result = x % y;
     __ Move(r8, Smi::FromInt(result));
-    __ SmiMod(r9, rcx, r12, exit);
+    __ SmiMod(r9, rcx, r14, exit);
 
-    __ incq(r15);
+    __ incq(r12);
     __ SmiCompare(r9, r8);
     __ j(not_equal, exit);
 
-    __ incq(r15);
+    __ incq(r12);
     __ SmiCompare(rcx, r11);
     __ j(not_equal, exit);
 
-    __ incq(r15);
-    __ SmiMod(rcx, rcx, r12, exit);
+    __ incq(r12);
+    __ SmiMod(rcx, rcx, r14, exit);
 
-    __ incq(r15);
+    __ incq(r12);
     __ SmiCompare(rcx, r8);
     __ j(not_equal, exit);
   } else {
     // Modulo fails.
-    __ movq(r15, Immediate(id + 8));
+    __ movq(r12, Immediate(id + 8));
 
     Label fail_ok, fail_ok2;
-    __ SmiMod(r9, rcx, r12, &fail_ok);
+    __ SmiMod(r9, rcx, r14, &fail_ok);
     __ jmp(exit);
     __ bind(&fail_ok);
 
-    __ incq(r15);
+    __ incq(r12);
     __ SmiCompare(rcx, r11);
     __ j(not_equal, exit);
 
-    __ incq(r15);
-    __ SmiMod(rcx, rcx, r12, &fail_ok2);
+    __ incq(r12);
+    __ SmiMod(rcx, rcx, r14, &fail_ok2);
     __ jmp(exit);
     __ bind(&fail_ok2);
 
-    __ incq(r15);
+    __ incq(r12);
     __ SmiCompare(rcx, r11);
     __ j(not_equal, exit);
   }
@@ -1252,10 +1289,11 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
+  __ push(r14);
   __ push(r12);
-  __ push(r15);
   TestSmiMod(masm, &exit, 0x10, 1, 1);
   TestSmiMod(masm, &exit, 0x20, 1, 0);
   TestSmiMod(masm, &exit, 0x30, -1, 0);
@@ -1277,11 +1315,12 @@
   TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
   TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1);
 
-  __ xor_(r15, r15);  // Success.
+  __ xor_(r12, r12);  // Success.
   __ bind(&exit);
-  __ movq(rax, r15);
-  __ pop(r15);
+  __ movq(rax, r12);
   __ pop(r12);
+  __ pop(r14);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1336,7 +1375,7 @@
   // Allocate an executable page of memory.
   size_t actual_size;
   byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
                                       &actual_size,
                                       true));
   CHECK(buffer);
@@ -1345,6 +1384,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiIndex(masm, &exit, 0x10, 0);
@@ -1355,6 +1395,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1411,6 +1452,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);  // Avoid inline checks.
+  EntryCode(masm);
   Label exit;
 
   TestSelectNonSmi(masm, &exit, 0x10, 0, 0);
@@ -1425,6 +1467,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1487,6 +1530,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiAnd(masm, &exit, 0x10, 0, 0);
@@ -1503,6 +1547,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1565,6 +1610,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiOr(masm, &exit, 0x10, 0, 0);
@@ -1583,6 +1629,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1645,6 +1692,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiXor(masm, &exit, 0x10, 0, 0);
@@ -1663,6 +1711,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1709,6 +1758,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiNot(masm, &exit, 0x10, 0);
@@ -1722,6 +1772,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1793,7 +1844,7 @@
   // Allocate an executable page of memory.
   size_t actual_size;
   byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
                                       &actual_size,
                                       true));
   CHECK(buffer);
@@ -1802,6 +1853,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiShiftLeft(masm, &exit, 0x10, 0);
@@ -1814,6 +1866,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1896,7 +1949,7 @@
   // Allocate an executable page of memory.
   size_t actual_size;
   byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
                                       &actual_size,
                                       true));
   CHECK(buffer);
@@ -1905,6 +1958,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiShiftLogicalRight(masm, &exit, 0x10, 0);
@@ -1917,6 +1971,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -1971,6 +2026,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestSmiShiftArithmeticRight(masm, &exit, 0x10, 0);
@@ -1983,6 +2039,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -2032,6 +2089,7 @@
 
   MacroAssembler* masm = &assembler;
   masm->set_allow_stub_calls(false);
+  EntryCode(masm);
   Label exit;
 
   TestPositiveSmiPowerUp(masm, &exit, 0x20, 0);
@@ -2046,6 +2104,7 @@
 
   __ xor_(rax, rax);  // Success.
   __ bind(&exit);
+  ExitCode(masm);
   __ ret(0);
 
   CodeDesc desc;
@@ -2074,8 +2133,9 @@
   masm->set_allow_stub_calls(false);
   Label exit;
 
-  __ push(r12);
+  EntryCode(masm);
   __ push(r13);
+  __ push(r14);
   __ push(rbx);
   __ push(rbp);
   __ push(Immediate(0x100));  // <-- rbp
@@ -2093,7 +2153,7 @@
   // r12 = rsp[3]
   // rbx = rsp[5]
   // r13 = rsp[7]
-  __ lea(r12, Operand(rsp, 3 * kPointerSize));
+  __ lea(r14, Operand(rsp, 3 * kPointerSize));
   __ lea(r13, Operand(rbp, -3 * kPointerSize));
   __ lea(rbx, Operand(rbp, -5 * kPointerSize));
   __ movl(rcx, Immediate(2));
@@ -2396,8 +2456,9 @@
   __ lea(rsp, Operand(rbp, kPointerSize));
   __ pop(rbp);
   __ pop(rbx);
+  __ pop(r14);
   __ pop(r13);
-  __ pop(r12);
+  ExitCode(masm);
   __ ret(0);
 
 
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index 8e1e941..e461349 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -44,7 +44,6 @@
 chapter11/11.4/11.4.1//11.4.1-4.a-5: FAIL
 chapter11/11.4/11.4.1//11.4.1-4.a-7: FAIL
 
-
 # We do not have a global object called 'global' as required by tests.
 chapter15/15.1: FAIL_OK
 
@@ -52,14 +51,10 @@
 chapter15/15.2/15.2.3/15.2.3.8: UNIMPLEMENTED
 # NOT IMPLEMENTED: freeze
 chapter15/15.2/15.2.3/15.2.3.9: UNIMPLEMENTED
-# NOT IMPLEMENTED: preventExtensions
-chapter15/15.2/15.2.3/15.2.3.10: UNIMPLEMENTED
 # NOT IMPLEMENTED: isSealed
 chapter15/15.2/15.2.3/15.2.3.11: UNIMPLEMENTED
 # NOT IMPLEMENTED: isFrozen
 chapter15/15.2/15.2.3/15.2.3.12: UNIMPLEMENTED
-# NOT IMPLEMENTED: isExtensible
-chapter15/15.2/15.2.3/15.2.3.13: UNIMPLEMENTED
 
 # NOT IMPLEMENTED: seal
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-20: UNIMPLEMENTED
@@ -67,18 +62,12 @@
 # NOT IMPLEMENTED: freeze
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-21: UNIMPLEMENTED
 
-# NOT IMPLEMENTED: preventExtensions
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-22: UNIMPLEMENTED
-
 # NOT IMPLEMENTED: isSealed
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-23: UNIMPLEMENTED
 
 # NOT IMPLEMENTED: isFrozen
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-24: UNIMPLEMENTED
 
-# NOT IMPLEMENTED: isExtensible
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-25: UNIMPLEMENTED
-
 # NOT IMPLEMENTED: bind
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: UNIMPLEMENTED
 
diff --git a/test/mjsunit/call-stub.js b/test/mjsunit/call-stub.js
new file mode 100644
index 0000000..a9132a6
--- /dev/null
+++ b/test/mjsunit/call-stub.js
@@ -0,0 +1,51 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function Hash() {
+  for (var i = 0; i < 100; i++) {
+    this['a' + i] = i;
+  }
+
+  delete this.a50;  // Ensure it's a normal object.
+}
+
+Hash.prototype.m = function() {
+  return 1;
+};
+
+var h = new Hash();
+
+for (var i = 1; i < 100; i++) {
+  if (i == 50) {
+    h.m = function() {
+      return 2;
+    };
+  } else if (i == 70) {
+    delete h.m;
+  }
+  assertEquals(i < 50 || i >= 70 ? 1 : 2, h.m());
+}
diff --git a/test/mjsunit/date.js b/test/mjsunit/date.js
index b264a19..57fc5a0 100644
--- a/test/mjsunit/date.js
+++ b/test/mjsunit/date.js
@@ -154,6 +154,15 @@
 
 testToLocaleTimeString();
 
+// Test that -0 is treated correctly in MakeDay.
+var d = new Date();
+assertDoesNotThrow("d.setDate(-0)");
+assertDoesNotThrow("new Date(-0, -0, -0, -0, -0, -0. -0)");
+assertDoesNotThrow("new Date(0x40000000, 0x40000000, 0x40000000," +
+                   "0x40000000, 0x40000000, 0x40000000, 0x40000000)")
+assertDoesNotThrow("new Date(-0x40000001, -0x40000001, -0x40000001," +
+                   "-0x40000001, -0x40000001, -0x40000001, -0x40000001)")
+
 
 // Modified test from WebKit
 // LayoutTests/fast/js/script-tests/date-utc-timeclip.js:
diff --git a/test/mjsunit/debug-liveedit-3.js b/test/mjsunit/debug-liveedit-3.js
index b68e38d..b210657 100644
--- a/test/mjsunit/debug-liveedit-3.js
+++ b/test/mjsunit/debug-liveedit-3.js
@@ -57,7 +57,8 @@
 print("new source: " + new_source);
 
 var change_log = new Array();
-Debug.LiveEdit.SetScriptSource(script, new_source, change_log);
+var result = Debug.LiveEdit.SetScriptSource(script, new_source, false, change_log);
+print("Result: " + JSON.stringify(result) + "\n");
 print("Change log: " + JSON.stringify(change_log) + "\n");
 
 assertEquals(8, z6());
diff --git a/test/mjsunit/debug-liveedit-breakpoints.js b/test/mjsunit/debug-liveedit-breakpoints.js
index 5c61cf4..f01a8c4 100644
--- a/test/mjsunit/debug-liveedit-breakpoints.js
+++ b/test/mjsunit/debug-liveedit-breakpoints.js
@@ -72,7 +72,8 @@
 print("new source: " + new_source);
 
 var change_log = new Array();
-Debug.LiveEdit.SetScriptSource(script, new_source, change_log);
+var result = Debug.LiveEdit.SetScriptSource(script, new_source, false, change_log);
+print("Result: " + JSON.stringify(result) + "\n");
 print("Change log: " + JSON.stringify(change_log) + "\n");
 
 var breaks = Debug.scriptBreakPoints();
diff --git a/test/mjsunit/debug-liveedit-newsource.js b/test/mjsunit/debug-liveedit-newsource.js
index db256a4..7b8945a 100644
--- a/test/mjsunit/debug-liveedit-newsource.js
+++ b/test/mjsunit/debug-liveedit-newsource.js
@@ -57,7 +57,8 @@
 print("new source: " + new_source);
 
 var change_log = new Array();
-Debug.LiveEdit.SetScriptSource(script, new_source, change_log);
+var result = Debug.LiveEdit.SetScriptSource(script, new_source, false, change_log);
+print("Result: " + JSON.stringify(result) + "\n");
 print("Change log: " + JSON.stringify(change_log) + "\n");
 
 assertEquals("Capybara", ChooseAnimal());
diff --git a/test/mjsunit/math-pow.js b/test/mjsunit/math-pow.js
new file mode 100644
index 0000000..e732955
--- /dev/null
+++ b/test/mjsunit/math-pow.js
@@ -0,0 +1,129 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests the special cases specified by ES 15.8.2.13
+
+// Simple sanity check
+assertEquals(4, Math.pow(2, 2));
+assertEquals(2147483648, Math.pow(2, 31));
+assertEquals(0.25, Math.pow(2, -2));
+assertEquals(0.0625, Math.pow(2, -4));
+assertEquals(1, Math.pow(1, 100));
+assertEquals(0, Math.pow(0, 1000));
+
+// Spec tests
+assertEquals(NaN, Math.pow(2, NaN));
+assertEquals(NaN, Math.pow(+0, NaN));
+assertEquals(NaN, Math.pow(-0, NaN));
+assertEquals(NaN, Math.pow(Infinity, NaN));
+assertEquals(NaN, Math.pow(-Infinity, NaN));
+
+assertEquals(1, Math.pow(NaN, +0));
+assertEquals(1, Math.pow(NaN, -0));
+
+assertEquals(NaN, Math.pow(NaN, NaN));
+assertEquals(NaN, Math.pow(NaN, 2.2));
+assertEquals(NaN, Math.pow(NaN, 1));
+assertEquals(NaN, Math.pow(NaN, -1));
+assertEquals(NaN, Math.pow(NaN, -2.2));
+assertEquals(NaN, Math.pow(NaN, Infinity));
+assertEquals(NaN, Math.pow(NaN, -Infinity));
+
+assertEquals(Infinity, Math.pow(1.1, Infinity));
+assertEquals(Infinity, Math.pow(-1.1, Infinity));
+assertEquals(Infinity, Math.pow(2, Infinity));
+assertEquals(Infinity, Math.pow(-2, Infinity));
+
+assertEquals(+0, Math.pow(1.1, -Infinity));
+assertEquals(+0, Math.pow(-1.1, -Infinity));
+assertEquals(+0, Math.pow(2, -Infinity));
+assertEquals(+0, Math.pow(-2, -Infinity));
+
+assertEquals(NaN, Math.pow(1, Infinity));
+assertEquals(NaN, Math.pow(1, -Infinity));
+assertEquals(NaN, Math.pow(-1, Infinity));
+assertEquals(NaN, Math.pow(-1, -Infinity));
+
+assertEquals(+0, Math.pow(0.1, Infinity));
+assertEquals(+0, Math.pow(-0.1, Infinity));
+assertEquals(+0, Math.pow(0.999, Infinity));
+assertEquals(+0, Math.pow(-0.999, Infinity));
+
+assertEquals(Infinity, Math.pow(0.1, -Infinity));
+assertEquals(Infinity, Math.pow(-0.1, -Infinity));
+assertEquals(Infinity, Math.pow(0.999, -Infinity));
+assertEquals(Infinity, Math.pow(-0.999, -Infinity));
+
+assertEquals(Infinity, Math.pow(Infinity, 0.1));
+assertEquals(Infinity, Math.pow(Infinity, 2));
+
+assertEquals(+0, Math.pow(Infinity, -0.1));
+assertEquals(+0, Math.pow(Infinity, -2));
+
+assertEquals(-Infinity, Math.pow(-Infinity, 3));
+assertEquals(-Infinity, Math.pow(-Infinity, 13));
+
+assertEquals(Infinity, Math.pow(-Infinity, 3.1));
+assertEquals(Infinity, Math.pow(-Infinity, 2));
+
+assertEquals(-0, Math.pow(-Infinity, -3));
+assertEquals(-0, Math.pow(-Infinity, -13));
+
+assertEquals(+0, Math.pow(-Infinity, -3.1));
+assertEquals(+0, Math.pow(-Infinity, -2));
+
+assertEquals(+0, Math.pow(+0, 1.1));
+assertEquals(+0, Math.pow(+0, 2));
+
+assertEquals(Infinity, Math.pow(+0, -1.1));
+assertEquals(Infinity, Math.pow(+0, -2));
+
+assertEquals(-0, Math.pow(-0, 3));
+assertEquals(-0, Math.pow(-0, 13));
+
+assertEquals(+0, Math.pow(-0, 3.1));
+assertEquals(+0, Math.pow(-0, 2));
+
+assertEquals(-Infinity, Math.pow(-0, -3));
+assertEquals(-Infinity, Math.pow(-0, -13));
+
+assertEquals(Infinity, Math.pow(-0, -3.1));
+assertEquals(Infinity, Math.pow(-0, -2));
+
+assertEquals(NaN, Math.pow(-0.00001, 1.1));
+assertEquals(NaN, Math.pow(-0.00001, -1.1));
+assertEquals(NaN, Math.pow(-1.1, 1.1));
+assertEquals(NaN, Math.pow(-1.1, -1.1));
+assertEquals(NaN, Math.pow(-2, 1.1));
+assertEquals(NaN, Math.pow(-2, -1.1));
+assertEquals(NaN, Math.pow(-1000, 1.1));
+assertEquals(NaN, Math.pow(-1000, -1.1));
+
+// Tests from Sputnik S8.5_A13_T1.
+assertTrue((1*((Math.pow(2,53))-1)*(Math.pow(2,-1074))) === 4.4501477170144023e-308);
+assertTrue((1*(Math.pow(2,52))*(Math.pow(2,-1074))) === 2.2250738585072014e-308);
+assertTrue((-1*(Math.pow(2,52))*(Math.pow(2,-1074))) === -2.2250738585072014e-308);
diff --git a/test/mjsunit/object-prevent-extensions.js b/test/mjsunit/object-prevent-extensions.js
new file mode 100644
index 0000000..ebc2cfa6
--- /dev/null
+++ b/test/mjsunit/object-prevent-extensions.js
@@ -0,0 +1,157 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests the Object.preventExtensions method - ES 15.2.3.10
+
+
+var obj1 = {};
+// Extensible defaults to true.
+assertTrue(Object.isExtensible(obj1));
+Object.preventExtensions(obj1);
+
+// Make sure the is_extensible flag is set. 
+assertFalse(Object.isExtensible(obj1));
+// Try adding a new property.
+try {
+  obj1.x = 42;
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/object is not extensible/.test(e));
+}
+assertEquals(undefined, obj1.x);
+
+// Try adding a new element.
+try {
+  obj1[1] = 42;
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/object is not extensible/.test(e));
+}
+assertEquals(undefined, obj1[1]);
+
+
+// Try when the object has an existing property.
+var obj2 = {};
+assertTrue(Object.isExtensible(obj2));
+obj2.x = 42;
+assertEquals(42, obj2.x);
+assertTrue(Object.isExtensible(obj2));
+
+Object.preventExtensions(obj2);
+assertEquals(42, obj2.x);
+
+try {
+  obj2.y = 42;
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/object is not extensible/.test(e));
+}
+
+// obj2.y should still be undefined.
+assertEquals(undefined, obj2.y);
+// Make sure we can still write values to obj.x.
+obj2.x = 43;
+assertEquals(43, obj2.x)
+
+try {
+  obj2.y = new function() { return 42; };
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/object is not extensible/.test(e));
+}
+// obj2.y should still be undefined.
+assertEquals(undefined, obj2.y);
+assertEquals(43, obj2.x)
+
+try {
+  Object.defineProperty(obj2, "y", {value: 42});
+} catch (e) {
+  assertTrue(/object is not extensible/.test(e));
+}
+
+// obj2.y should still be undefined.
+assertEquals(undefined, obj2.y);
+assertEquals(43, obj2.x);
+
+try {
+  obj2[1] = 42;
+} catch (e) {
+  assertTrue(/object is not extensible/.test(e));
+}
+
+assertEquals(undefined, obj2[1]);
+
+var arr = new Array();
+arr[1] = 10;
+
+Object.preventExtensions(arr);
+
+try {
+  arr[2] = 42;
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/object is not extensible/.test(e));
+}
+assertEquals(10, arr[1]);
+
+// We should still be able to change exiting elements.
+arr[1]= 42;
+assertEquals(42, arr[1]);
+
+
+// Test the the extensible flag is not inherited.
+var parent = {};
+parent.x = 42;
+Object.preventExtensions(parent);
+
+var child = Object.create(parent);
+
+// We should be able to add new properties to the child object.
+child.y = 42;
+
+// This should have no influence on the parent class.
+try {
+  parent.y = 29;
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/object is not extensible/.test(e));
+}
+
+
+// Test that attributes on functions are also handled correctly.
+function foo() {
+  return 42;
+}
+
+Object.preventExtensions(foo);
+
+try {
+  foo.x = 29;
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/object is not extensible/.test(e));
+}
diff --git a/test/mjsunit/store-dictionary.js b/test/mjsunit/store-dictionary.js
new file mode 100644
index 0000000..45e254b
--- /dev/null
+++ b/test/mjsunit/store-dictionary.js
@@ -0,0 +1,65 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test dictionary store ICs.
+
+// Function that stores property 'x' on an object.
+function store(obj) { obj.x = 42; }
+
+// Create object and force it to dictionary mode by deleting property.
+var o = { x: 32, y: 33 };
+delete o.y;
+
+// Make the store ic in the 'store' function go into dictionary store
+// case.
+for (var i = 0; i < 3; i++) {
+  store(o);
+}
+assertEquals(42, o.x);
+
+// Test that READ_ONLY property attribute is respected. Make 'x'
+// READ_ONLY.
+Object.defineProperty(o, 'x', { value: 32, writable: false });
+
+// Attempt to store using the store ic in the 'store' function.
+store(o);
+
+// Check that the store did not change the value.
+assertEquals(32, o.x);
+
+// Check that bail-out code works.
+// Smi.
+store(1);
+// Fast case object.
+o = new Object();
+store(o);
+assertEquals(42, o.x);
+// Slow case object without x property.
+delete o.x;
+store(o);
+assertEquals(42, o.x);
+
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index a92576e..8c7ae37 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -75,7 +75,14 @@
         'msvs_settings': {
           'VCCLCompilerTool': {
             'Optimizations': '0',
-            'RuntimeLibrary': '1',
+            
+            'conditions': [
+              ['OS=="win" and component=="shared_library"', {
+                'RuntimeLibrary': '3',  # /MDd
+              }, {
+                'RuntimeLibrary': '1',  # /MTd
+              }],
+            ],
           },
           'VCLinkerTool': {
             'LinkIncremental': '2',
@@ -129,13 +136,20 @@
             },
             'msvs_settings': {
               'VCCLCompilerTool': {
-                'RuntimeLibrary': '0',
                 'Optimizations': '2',
                 'InlineFunctionExpansion': '2',
                 'EnableIntrinsicFunctions': 'true',
                 'FavorSizeOrSpeed': '0',
                 'OmitFramePointers': 'true',
                 'StringPooling': 'true',
+                
+                'conditions': [
+                  ['OS=="win" and component=="shared_library"', {
+                    'RuntimeLibrary': '2',  #/MD
+                  }, {
+                    'RuntimeLibrary': '0',  #/MT
+                  }],
+                ],
               },
               'VCLinkerTool': {
                 'LinkIncremental': '1',
@@ -152,7 +166,6 @@
   'targets': [
     {
       'target_name': 'v8',
-      'type': 'none',
       'conditions': [
         ['v8_use_snapshot=="true"', {
           'dependencies': ['v8_snapshot'],
@@ -160,6 +173,18 @@
         {
           'dependencies': ['v8_nosnapshot'],
         }],
+        ['OS=="win" and component=="shared_library"', {
+          'type': '<(component)',
+          'sources': [
+            '../../src/v8dll-main.cc',
+          ],
+          'defines': [
+            'BUILDING_V8_SHARED'
+          ],
+        },
+        {
+          'type': 'none',
+        }],
       ],
       'direct_dependent_settings': {
         'include_dirs': [
@@ -170,6 +195,13 @@
     {
       'target_name': 'v8_snapshot',
       'type': '<(library)',
+      'conditions': [
+        ['OS=="win" and component=="shared_library"', {
+          'defines': [
+            'BUILDING_V8_SHARED',
+          ],
+        }],
+      ],
       'dependencies': [
         'mksnapshot#host',
         'js2c#host',
@@ -216,7 +248,12 @@
         ['v8_target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
           'cflags': ['-m32'],
           'ldflags': ['-m32'],
-        }]
+        }],
+        ['OS=="win" and component=="shared_library"', {
+          'defines': [
+            'BUILDING_V8_SHARED',
+          ],
+        }],
       ]
     },
     {
@@ -614,6 +651,11 @@
             'libraries': [ '-lwinmm.lib' ],
           },
         }],
+        ['OS=="win" and component=="shared_library"', {
+          'defines': [
+            'BUILDING_V8_SHARED'
+          ],
+        }],
       ],
     },
     {
@@ -692,10 +734,15 @@
         '../../samples/shell.cc',
       ],
       'conditions': [
-        [ 'OS=="win"', {
+        ['OS=="win"', {
           # This could be gotten by not setting chromium_code, if that's OK.
           'defines': ['_CRT_SECURE_NO_WARNINGS'],
         }],
+        ['OS=="win" and component=="shared_library"', {
+          'defines': [
+            'USING_V8_SHARED',
+          ],
+        }],
       ],
     },
   ],