Version 3.6.5

New incremental garbage collector.

Removed the hard heap size limit (soft heap size limit is still
700/1400Mbytes by default).

Implemented ES5 generic Array.prototype.toString (Issue 1361).

V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415).

Fixed x64 RegExp start-of-string bug (Issues 1746, 1748).

Fixed propertyIsEnumerable for numeric properties (Issue 1692).

Fixed the MinGW and Windows 2000 builds.

Fixed "Prototype chain is not searched if named property handler does
not set a property" (Issue 1636).

Made the RegExp.prototype object be a RegExp object (Issue 1217).

Disallowed future reserved words as labels in strict mode.

Fixed string split to correctly coerce the separator to a string
(Issue 1711).

API: Added an optional source length field to the Extension
constructor.

API: Added Debug::DisableAgent to match existing Debug::EnableAgent
(Issue 1573).

Added "native" target to Makefile for the benefit of Linux distros.

Fixed: debugger stops stepping outside evaluate (Issue 1639).

More work on ES-Harmony proxies.  Still hidden behind a flag.

Bug fixes and performance improvements on all platforms.
Review URL: http://codereview.chromium.org/8139027

git-svn-id: http://v8.googlecode.com/svn/trunk@9534 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 99495dd..5d3f272 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,45 @@
+2011-10-05: Version 3.6.5
+
+        New incremental garbage collector.
+
+        Removed the hard heap size limit (soft heap size limit is still
+        700/1400Mbytes by default).
+
+        Implemented ES5 generic Array.prototype.toString (Issue 1361).
+
+        V8 now allows surrogate pair codes in decodeURIComponent (Issue 1415).
+
+        Fixed x64 RegExp start-of-string bug (Issues 1746, 1748).
+
+        Fixed propertyIsEnumerable for numeric properties (Issue 1692).
+
+        Fixed the MinGW and Windows 2000 builds.
+
+        Fixed "Prototype chain is not searched if named property handler does
+        not set a property" (Issue 1636).
+
+        Made the RegExp.prototype object be a RegExp object (Issue 1217).
+
+        Disallowed future reserved words as labels in strict mode.
+
+        Fixed string split to correctly coerce the separator to a string
+        (Issue 1711).
+
+        API: Added an optional source length field to the Extension
+        constructor.
+
+        API: Added Debug::DisableAgent to match existing Debug::EnableAgent
+        (Issue 1573).
+
+        Added "native" target to Makefile for the benefit of Linux distros.
+
+        Fixed: debugger stops stepping outside evaluate (Issue 1639).
+
+        More work on ES-Harmony proxies.  Still hidden behind a flag.
+
+        Bug fixes and performance improvements on all platforms.
+
+
 2011-09-15: Version 3.6.4
 
         Fixed d8's broken readline history.
diff --git a/Makefile b/Makefile
index a7b2731..ebf6831 100644
--- a/Makefile
+++ b/Makefile
@@ -50,6 +50,10 @@
 ifeq ($(disassembler), on)
   GYPFLAGS += -Dv8_enable_disassembler=1
 endif
+# objectprint=on
+ifeq ($(objectprint), on)
+  GYPFLAGS += -Dv8_object_print=1
+endif
 # snapshot=off
 ifeq ($(snapshot), off)
   GYPFLAGS += -Dv8_use_snapshot='false'
@@ -78,6 +82,7 @@
 # - any arch listed in ARCHES (see below)
 # - any mode listed in MODES
 # - every combination <arch>.<mode>, e.g. "ia32.release"
+# - "native": current host's architecture, release mode
 # - any of the above with .check appended, e.g. "ia32.release.check"
 # - default (no target specified): build all ARCHES and MODES
 # - "check": build all targets and run all tests
@@ -103,7 +108,7 @@
 # File where previously used GYPFLAGS are stored.
 ENVFILE = $(OUTDIR)/environment
 
-.PHONY: all check clean dependencies $(ENVFILE).new \
+.PHONY: all check clean dependencies $(ENVFILE).new native \
         $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
         $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES))
 
@@ -124,6 +129,11 @@
 	                     python -c "print raw_input().capitalize()") \
 	         builddir="$(shell pwd)/$(OUTDIR)/$@"
 
+native: $(OUTDIR)/Makefile-native
+	@$(MAKE) -C "$(OUTDIR)" -f Makefile-native \
+	         CXX="$(CXX)" LINK="$(LINK)" BUILDTYPE=Release \
+	         builddir="$(shell pwd)/$(OUTDIR)/$@"
+
 # Test targets.
 check: all
 	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)
@@ -140,6 +150,10 @@
 	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR) \
 	    --arch-and-mode=$(basename $@)
 
+native.check: native
+	@tools/test-wrapper-gypbuild.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
+	    --arch-and-mode=.
+
 # Clean targets. You can clean each architecture individually, or everything.
 $(addsuffix .clean,$(ARCHES)):
 	rm -f $(OUTDIR)/Makefile-$(basename $@)
@@ -147,7 +161,12 @@
 	rm -rf $(OUTDIR)/$(basename $@).debug
 	find $(OUTDIR) -regex '.*\(host\|target\)-$(basename $@)\.mk' -delete
 
-clean: $(addsuffix .clean,$(ARCHES))
+native.clean:
+	rm -f $(OUTDIR)/Makefile-native
+	rm -rf $(OUTDIR)/native
+	find $(OUTDIR) -regex '.*\(host\|target\)-native\.mk' -delete
+
+clean: $(addsuffix .clean,$(ARCHES)) native.clean
 
 # GYP file generation targets.
 $(OUTDIR)/Makefile-ia32: $(GYPFILES) $(ENVFILE)
@@ -165,6 +184,10 @@
 	              -Ibuild/standalone.gypi --depth=. -Ibuild/armu.gypi \
 	              -S-arm $(GYPFLAGS)
 
+$(OUTDIR)/Makefile-native: $(GYPFILES) $(ENVFILE)
+	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
+	              -Ibuild/standalone.gypi --depth=. -S-native $(GYPFLAGS)
+
 # Replaces the old with the new environment file if they're different, which
 # will trigger GYP to regenerate Makefiles.
 $(ENVFILE): $(ENVFILE).new
diff --git a/build/common.gypi b/build/common.gypi
index 4e896e0..209e089 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -60,6 +60,8 @@
 
     'v8_enable_disassembler%': 0,
 
+    'v8_object_print%': 0,
+
     'v8_enable_gdbjit%': 0,
 
     # Enable profiling support. Only required on Windows.
@@ -84,6 +86,9 @@
       ['v8_enable_disassembler==1', {
         'defines': ['ENABLE_DISASSEMBLER',],
       }],
+      ['v8_object_print==1', {
+        'defines': ['OBJECT_PRINT',],
+      }],
       ['v8_enable_gdbjit==1', {
         'defines': ['ENABLE_GDB_JIT_INTERFACE',],
       }],
@@ -184,6 +189,9 @@
           }],
         ],
       }],
+      ['OS=="solaris"', {
+        'defines': [ '__C99FEATURES__=1' ],  # isinf() etc.
+      }],
     ],
     'configurations': {
       'Debug': {
@@ -261,7 +269,7 @@
           }],
           ['OS=="win"', {
             'msvs_configuration_attributes': {
-              'OutputDirectory': '$(SolutionDir)$(ConfigurationName)',
+              'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
               'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
               'CharacterSet': '1',
             },
diff --git a/build/standalone.gypi b/build/standalone.gypi
index cb5e133..cdece8c 100644
--- a/build/standalone.gypi
+++ b/build/standalone.gypi
@@ -35,20 +35,24 @@
     'msvs_multi_core_compile%': '1',
     'variables': {
       'variables': {
-        'conditions': [
-          [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
-            # This handles the Linux platforms we generally deal with. Anything
-            # else gets passed through, which probably won't work very well; such
-            # hosts should pass an explicit target_arch to gyp.
-            'host_arch%':
-              '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
-          }, {  # OS!="linux" and OS!="freebsd" and OS!="openbsd"
-            'host_arch%': 'ia32',
-          }],
-        ],
+        'variables': {
+          'conditions': [
+            [ 'OS=="linux" or OS=="freebsd" or OS=="openbsd"', {
+              # This handles the Linux platforms we generally deal with. Anything
+              # else gets passed through, which probably won't work very well; such
+              # hosts should pass an explicit target_arch to gyp.
+              'host_arch%':
+                '<!(uname -m | sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/")',
+            }, {  # OS!="linux" and OS!="freebsd" and OS!="openbsd"
+              'host_arch%': 'ia32',
+            }],
+          ],
+        },
+        'host_arch%': '<(host_arch)',
+        'target_arch%': '<(host_arch)',
       },
       'host_arch%': '<(host_arch)',
-      'target_arch%': '<(host_arch)',
+      'target_arch%': '<(target_arch)',
       'v8_target_arch%': '<(target_arch)',
     },
     'host_arch%': '<(host_arch)',
diff --git a/include/v8-debug.h b/include/v8-debug.h
old mode 100644
new mode 100755
index 504cbfe..9e85dc4
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -340,6 +340,11 @@
                           bool wait_for_connection = false);
 
   /**
+    * Disable the V8 builtin debug agent. The TCP/IP connection will be closed.
+    */
+  static void DisableAgent();
+
+  /**
    * Makes V8 process all pending debug messages.
    *
    * From V8 point of view all debug messages come asynchronously (e.g. from
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index 4febcb9..f67646f 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -307,6 +307,12 @@
    * path from the snapshot root to the current node.
    */
   const HeapGraphNode* GetDominatorNode() const;
+
+  /**
+   * Finds and returns a value from the heap corresponding to this node,
+   * if the value is still reachable.
+   */
+  Handle<Value> GetHeapValue() const;
 };
 
 
diff --git a/include/v8.h b/include/v8.h
index 4b7f6e7..73b7fbe 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -1171,7 +1171,8 @@
    * Get the ExternalAsciiStringResource for an external ASCII string.
    * Returns NULL if IsExternalAscii() doesn't return true.
    */
-  V8EXPORT ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
+  V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource()
+      const;
 
   static inline String* Cast(v8::Value* obj);
 
@@ -2451,24 +2452,42 @@
 
 // --- Extensions ---
 
+class V8EXPORT ExternalAsciiStringResourceImpl
+    : public String::ExternalAsciiStringResource {
+ public:
+  ExternalAsciiStringResourceImpl() : data_(0), length_(0) {}
+  ExternalAsciiStringResourceImpl(const char* data, size_t length)
+      : data_(data), length_(length) {}
+  const char* data() const { return data_; }
+  size_t length() const { return length_; }
+
+ private:
+  const char* data_;
+  size_t length_;
+};
 
 /**
  * Ignore
  */
 class V8EXPORT Extension {  // NOLINT
  public:
+  // Note that the strings passed into this constructor must live as long
+  // as the Extension itself.
   Extension(const char* name,
             const char* source = 0,
             int dep_count = 0,
-            const char** deps = 0);
+            const char** deps = 0,
+            int source_length = -1);
   virtual ~Extension() { }
   virtual v8::Handle<v8::FunctionTemplate>
       GetNativeFunction(v8::Handle<v8::String> name) {
     return v8::Handle<v8::FunctionTemplate>();
   }
 
-  const char* name() { return name_; }
-  const char* source() { return source_; }
+  const char* name() const { return name_; }
+  size_t source_length() const { return source_length_; }
+  const String::ExternalAsciiStringResource* source() const {
+    return &source_; }
   int dependency_count() { return dep_count_; }
   const char** dependencies() { return deps_; }
   void set_auto_enable(bool value) { auto_enable_ = value; }
@@ -2476,7 +2495,8 @@
 
  private:
   const char* name_;
-  const char* source_;
+  size_t source_length_;  // expected to initialize before source_
+  ExternalAsciiStringResourceImpl source_;
   int dep_count_;
   const char** deps_;
   bool auto_enable_;
@@ -3498,9 +3518,9 @@
  *
  * v8::Locker is a scoped lock object. While it's
  * active (i.e. between its construction and destruction) the current thread is
- * allowed to use the locked isolate. V8 guarantees that an isolate can be locked
- * by at most one thread at any time. In other words, the scope of a v8::Locker is
- * a critical section.
+ * allowed to use the locked isolate. V8 guarantees that an isolate can be
+ * locked by at most one thread at any time. In other words, the scope of a
+ * v8::Locker is a critical section.
  *
  * Sample usage:
 * \code
@@ -3602,8 +3622,8 @@
   static void StopPreemption();
 
   /**
-   * Returns whether or not the locker for a given isolate, or default isolate if NULL is given,
-   * is locked by the current thread.
+   * Returns whether or not the locker for a given isolate, or default isolate
+   * if NULL is given, is locked by the current thread.
    */
   static bool IsLocked(Isolate* isolate = NULL);
 
@@ -3769,7 +3789,7 @@
   static const int kFullStringRepresentationMask = 0x07;
   static const int kExternalTwoByteRepresentationTag = 0x02;
 
-  static const int kJSObjectType = 0xa3;
+  static const int kJSObjectType = 0xa6;
   static const int kFirstNonstringType = 0x80;
   static const int kForeignType = 0x85;
 
diff --git a/src/SConscript b/src/SConscript
old mode 100644
new mode 100755
index 52607f1..f3ae807
--- a/src/SConscript
+++ b/src/SConscript
@@ -84,6 +84,7 @@
     hydrogen.cc
     hydrogen-instructions.cc
     ic.cc
+    incremental-marking.cc
     inspector.cc
     interpreter-irregexp.cc
     isolate.cc
@@ -133,6 +134,7 @@
     v8utils.cc
     variables.cc
     version.cc
+    store-buffer.cc
     zone.cc
     extensions/gc-extension.cc
     extensions/externalize-string-extension.cc
diff --git a/src/api.cc b/src/api.cc
index 5c8a314..7266390 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -185,7 +185,10 @@
   int end_marker;
   heap_stats.end_marker = &end_marker;
   i::Isolate* isolate = i::Isolate::Current();
-  isolate->heap()->RecordStats(&heap_stats, take_snapshot);
+  // BUG(1718):
+  // Don't use the take_snapshot since we don't support HeapIterator here
+  // without doing a special GC.
+  isolate->heap()->RecordStats(&heap_stats, false);
   i::V8::SetFatalError();
   FatalErrorCallback callback = GetFatalErrorHandler();
   {
@@ -501,9 +504,12 @@
 Extension::Extension(const char* name,
                      const char* source,
                      int dep_count,
-                     const char** deps)
+                     const char** deps,
+                     int source_length)
     : name_(name),
-      source_(source),
+      source_length_(source_length >= 0 ?
+                  source_length : (source ? strlen(source) : 0)),
+      source_(source, source_length_),
       dep_count_(dep_count),
       deps_(deps),
       auto_enable_(false) { }
@@ -3204,21 +3210,10 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
-      self,
-      i::JSObject::ALLOW_CREATION));
-  i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+  i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
-  EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> obj = i::SetProperty(
-      hidden_props,
-      key_obj,
-      value_obj,
-      static_cast<PropertyAttributes>(None),
-      i::kNonStrictMode);
-  has_pending_exception = obj.is_null();
-  EXCEPTION_BAILOUT_CHECK(isolate, false);
-  return true;
+  i::Handle<i::Object> result = i::SetHiddenProperty(self, key_obj, value_obj);
+  return *result == *self;
 }
 
 
@@ -3228,20 +3223,9 @@
              return Local<v8::Value>());
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
-      self,
-      i::JSObject::OMIT_CREATION));
-  if (hidden_props->IsUndefined()) {
-    return v8::Local<v8::Value>();
-  }
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> result = i::GetProperty(hidden_props, key_obj);
-  has_pending_exception = result.is_null();
-  EXCEPTION_BAILOUT_CHECK(isolate, v8::Local<v8::Value>());
-  if (result->IsUndefined()) {
-    return v8::Local<v8::Value>();
-  }
+  i::Handle<i::Object> result(self->GetHiddenProperty(*key_obj));
+  if (result->IsUndefined()) return v8::Local<v8::Value>();
   return Utils::ToLocal(result);
 }
 
@@ -3252,15 +3236,9 @@
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> hidden_props(i::GetHiddenProperties(
-      self,
-      i::JSObject::OMIT_CREATION));
-  if (hidden_props->IsUndefined()) {
-    return true;
-  }
-  i::Handle<i::JSObject> js_obj(i::JSObject::cast(*hidden_props));
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  return i::DeleteProperty(js_obj, key_obj)->IsTrue();
+  self->DeleteHiddenProperty(*key_obj);
+  return true;
 }
 
 
@@ -3310,22 +3288,12 @@
   i::Handle<i::ExternalArray> array =
       isolate->factory()->NewExternalArray(length, array_type, data);
 
-  // If the object already has external elements, create a new, unique
-  // map if the element type is now changing, because assumptions about
-  // generated code based on the receiver's map will be invalid.
-  i::Handle<i::HeapObject> elements(object->elements());
-  bool cant_reuse_map =
-      elements->map()->IsUndefined() ||
-      !elements->map()->has_external_array_elements() ||
-      elements->map() != isolate->heap()->MapForExternalArrayType(array_type);
-  if (cant_reuse_map) {
-    i::Handle<i::Map> external_array_map =
-        isolate->factory()->GetElementsTransitionMap(
-            i::Handle<i::Map>(object->map()),
-            GetElementsKindFromExternalArrayType(array_type),
-            object->HasFastProperties());
-    object->set_map(*external_array_map);
-  }
+  i::Handle<i::Map> external_array_map =
+      isolate->factory()->GetElementsTransitionMap(
+          object,
+          GetElementsKindFromExternalArrayType(array_type));
+
+  object->set_map(*external_array_map);
   object->set_elements(*array);
 }
 
@@ -3799,10 +3767,11 @@
 void v8::String::VerifyExternalStringResource(
     v8::String::ExternalStringResource* value) const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
-  v8::String::ExternalStringResource* expected;
+  const v8::String::ExternalStringResource* expected;
   if (i::StringShape(*str).IsExternalTwoByte()) {
-    void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
-    expected = reinterpret_cast<ExternalStringResource*>(resource);
+    const void* resource =
+        i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+    expected = reinterpret_cast<const ExternalStringResource*>(resource);
   } else {
     expected = NULL;
   }
@@ -3810,7 +3779,7 @@
 }
 
 
-v8::String::ExternalAsciiStringResource*
+const v8::String::ExternalAsciiStringResource*
       v8::String::GetExternalAsciiStringResource() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   if (IsDeadCheck(str->GetIsolate(),
@@ -3818,8 +3787,9 @@
     return NULL;
   }
   if (i::StringShape(*str).IsExternalAscii()) {
-    void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
-    return reinterpret_cast<ExternalAsciiStringResource*>(resource);
+    const void* resource =
+        i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+    return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
   } else {
     return NULL;
   }
@@ -4009,7 +3979,7 @@
 void v8::V8::LowMemoryNotification() {
   i::Isolate* isolate = i::Isolate::Current();
   if (!isolate->IsInitialized()) return;
-  isolate->heap()->CollectAllGarbage(true);
+  isolate->heap()->CollectAllAvailableGarbage();
 }
 
 
@@ -5480,6 +5450,12 @@
                                                        wait_for_connection);
 }
 
+
+void Debug::DisableAgent() {
+  return i::Isolate::Current()->debugger()->StopAgent();
+}
+
+
 void Debug::ProcessDebugMessages() {
   i::Execution::ProcessDebugMesssages(true);
 }
@@ -5804,6 +5780,16 @@
 }
 
 
+v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
+  i::Isolate* isolate = i::Isolate::Current();
+  IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue");
+  i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
+  return v8::Handle<Value>(!object.is_null() ?
+                           ToApi<Value>(object) : ToApi<Value>(
+                               isolate->factory()->undefined_value()));
+}
+
+
 static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
   return const_cast<i::HeapSnapshot*>(
       reinterpret_cast<const i::HeapSnapshot*>(snapshot));
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 3e19a45..54c291d 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -77,6 +77,11 @@
 void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
+  if (host() != NULL && IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -101,6 +106,10 @@
 void RelocInfo::set_target_object(Object* target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+  if (host() != NULL && target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -131,6 +140,12 @@
   ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
+  if (host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -147,6 +162,11 @@
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -195,7 +215,7 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitPointer(target_object_address());
+    visitor->VisitEmbeddedPointer(host(), target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
@@ -221,7 +241,7 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, host(), target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 0ec3692..329493a 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -78,7 +78,9 @@
 
 
 void CpuFeatures::Probe() {
-  ASSERT(!initialized_);
+  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
+                                CpuFeaturesImpliedByCompiler());
+  ASSERT(supported_ == 0 || supported_ == standard_features);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -86,8 +88,7 @@
   // Get the features implied by the OS and the compiler settings. This is the
   // minimal set of features which is also alowed for generated code in the
   // snapshot.
-  supported_ |= OS::CpuFeaturesImpliedByPlatform();
-  supported_ |= CpuFeaturesImpliedByCompiler();
+  supported_ |= standard_features;
 
   if (Serializer::enabled()) {
     // No probing for features if we might serialize (generate snapshot).
@@ -2505,7 +2506,8 @@
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
+  // We do not try to reuse pool constants.
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
     // Adjust code for new modes.
     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
@@ -2537,7 +2539,7 @@
     }
     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
-      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
       ClearRecordedAstId();
       reloc_info_writer.Write(&reloc_info_with_ast_id);
     } else {
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 9a58693..d19b64d 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1209,6 +1209,10 @@
   PositionsRecorder* positions_recorder() { return &positions_recorder_; }
 
   // Read/patch instructions
+  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
   static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
   static void instr_at_put(byte* pc, Instr instr) {
     *reinterpret_cast<Instr*>(pc) = instr;
@@ -1263,12 +1267,6 @@
 
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
 
-  // Read/patch instructions
-  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
-  void instr_at_put(int pos, Instr instr) {
-    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
-  }
-
   // Decode branch instruction at pos and return branch target pos
   int target_at(int pos);
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 60d2081..32b7896 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -582,10 +582,11 @@
   __ bind(&convert_argument);
   __ push(function);  // Preserve the function.
   __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
-  __ EnterInternalFrame();
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  }
   __ pop(function);
   __ mov(argument, r0);
   __ b(&argument_is_string);
@@ -601,10 +602,11 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
-  __ EnterInternalFrame();
-  __ push(argument);
-  __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(argument);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
   __ Ret();
 }
 
@@ -617,12 +619,12 @@
   //  -- sp[...]: constructor arguments
   // -----------------------------------
 
-  Label non_function_call;
+  Label slow, non_function_call;
   // Check that the function is not a smi.
   __ JumpIfSmi(r1, &non_function_call);
   // Check that the function is a JSFunction.
   __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &non_function_call);
+  __ b(ne, &slow);
 
   // Jump to the function-specific construct stub.
   __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -631,10 +633,19 @@
 
   // r0: number of arguments
   // r1: called object
+  // r2: object type
+  Label do_call;
+  __ bind(&slow);
+  __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ b(ne, &non_function_call);
+  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
   __ bind(&non_function_call);
+  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
   // Set expected number of arguments to zero (not changing r0).
   __ mov(r2, Operand(0, RelocInfo::NONE));
-  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   __ SetCallKind(r5, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
@@ -650,321 +661,329 @@
   Isolate* isolate = masm->isolate();
 
   // Enter a construct frame.
-  __ EnterConstructFrame();
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Preserve the two incoming parameters on the stack.
-  __ mov(r0, Operand(r0, LSL, kSmiTagSize));
-  __ push(r0);  // Smi-tagged arguments count.
-  __ push(r1);  // Constructor function.
+    // Preserve the two incoming parameters on the stack.
+    __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+    __ push(r0);  // Smi-tagged arguments count.
+    __ push(r1);  // Constructor function.
 
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  Label rt_call, allocated;
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    Label rt_call, allocated;
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(isolate);
-    __ mov(r2, Operand(debug_step_in_fp));
-    __ ldr(r2, MemOperand(r2));
-    __ tst(r2, r2);
-    __ b(ne, &rt_call);
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(isolate);
+      __ mov(r2, Operand(debug_step_in_fp));
+      __ ldr(r2, MemOperand(r2));
+      __ tst(r2, r2);
+      __ b(ne, &rt_call);
 #endif
 
-    // Load the initial map and verify that it is in fact a map.
-    // r1: constructor function
-    __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ JumpIfSmi(r2, &rt_call);
-    __ CompareObjectType(r2, r3, r4, MAP_TYPE);
-    __ b(ne, &rt_call);
+      // Load the initial map and verify that it is in fact a map.
+      // r1: constructor function
+      __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+      __ JumpIfSmi(r2, &rt_call);
+      __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+      __ b(ne, &rt_call);
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // r1: constructor function
-    // r2: initial map
-    __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
-    __ b(eq, &rt_call);
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // r1: constructor function
+      // r2: initial map
+      __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
+      __ b(eq, &rt_call);
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-      MemOperand constructor_count =
-          FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
-      __ ldrb(r4, constructor_count);
-      __ sub(r4, r4, Operand(1), SetCC);
-      __ strb(r4, constructor_count);
-      __ b(ne, &allocate);
-
-      __ Push(r1, r2);
-
-      __ push(r1);  // constructor
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(r2);
-      __ pop(r1);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    // r1: constructor function
-    // r2: initial map
-    __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-    __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
-
-    // Allocated the JSObject, now initialize the fields. Map is set to initial
-    // map and properties and elements are set to empty fixed array.
-    // r1: constructor function
-    // r2: initial map
-    // r3: object size
-    // r4: JSObject (not tagged)
-    __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
-    __ mov(r5, r4);
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
-    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
-    __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-    __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-
-    // Fill all the in-object properties with the appropriate filler.
-    // r1: constructor function
-    // r2: initial map
-    // r3: object size (in words)
-    // r4: JSObject (not tagged)
-    // r5: First in-object property of JSObject (not tagged)
-    __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
-    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-    { Label loop, entry;
       if (count_constructions) {
+        Label allocate;
+        // Decrease generous allocation count.
+        __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+        MemOperand constructor_count =
+            FieldMemOperand(r3, SharedFunctionInfo::kConstructionCountOffset);
+        __ ldrb(r4, constructor_count);
+        __ sub(r4, r4, Operand(1), SetCC);
+        __ strb(r4, constructor_count);
+        __ b(ne, &allocate);
+
+        __ Push(r1, r2);
+
+        __ push(r1);  // constructor
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(r2);
+        __ pop(r1);
+
+        __ bind(&allocate);
+      }
+
+      // Now allocate the JSObject on the heap.
+      // r1: constructor function
+      // r2: initial map
+      __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+      __ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
+
+      // Allocated the JSObject, now initialize the fields. Map is set to
+      // initial map and properties and elements are set to empty fixed array.
+      // r1: constructor function
+      // r2: initial map
+      // r3: object size
+      // r4: JSObject (not tagged)
+      __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+      __ mov(r5, r4);
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
+      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+      __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+      __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
+
+      // Fill all the in-object properties with the appropriate filler.
+      // r1: constructor function
+      // r2: initial map
+      // r3: object size (in words)
+      // r4: JSObject (not tagged)
+      // r5: First in-object property of JSObject (not tagged)
+      __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
+      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+      __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+      if (count_constructions) {
+        __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+        __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+                kBitsPerByte);
+        __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2));
+        // r0: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ cmp(r0, r6);
+          __ Assert(le, "Unexpected number of pre-allocated property fields.");
+        }
+        __ InitializeFieldsWithFiller(r5, r0, r7);
         // To allow for truncation.
         __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex);
-      } else {
-        __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
       }
-      __ b(&entry);
-      __ bind(&loop);
-      __ str(r7, MemOperand(r5, kPointerSize, PostIndex));
-      __ bind(&entry);
-      __ cmp(r5, r6);
-      __ b(lt, &loop);
+      __ InitializeFieldsWithFiller(r5, r6, r7);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      __ add(r4, r4, Operand(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed. Continue with
+      // allocated object if not fall through to runtime call if it is.
+      // r1: constructor function
+      // r4: JSObject
+      // r5: start of next object (not tagged)
+      __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+      // The field instance sizes contains both pre-allocated property fields
+      // and in-object properties.
+      __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
+      __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+              kBitsPerByte);
+      __ add(r3, r3, Operand(r6));
+      __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * kBitsPerByte,
+              kBitsPerByte);
+      __ sub(r3, r3, Operand(r6), SetCC);
+
+      // Done if no extra properties are to be allocated.
+      __ b(eq, &allocated);
+      __ Assert(pl, "Property allocation count failed.");
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // r1: constructor
+      // r3: number of elements in properties array
+      // r4: JSObject
+      // r5: start of next object
+      __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
+      __ AllocateInNewSpace(
+          r0,
+          r5,
+          r6,
+          r2,
+          &undo_allocation,
+          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+      // Initialize the FixedArray.
+      // r1: constructor
+      // r3: number of elements in properties array
+      // r4: JSObject
+      // r5: FixedArray (not tagged)
+      __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+      __ mov(r2, r5);
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
+      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+      __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+      __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
+
+      // Initialize the fields to undefined.
+      // r1: constructor function
+      // r2: First element of FixedArray (not tagged)
+      // r3: number of elements in properties array
+      // r4: JSObject
+      // r5: FixedArray (not tagged)
+      __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
+      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+      { Label loop, entry;
+        if (count_constructions) {
+          __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+        } else if (FLAG_debug_code) {
+          __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+          __ cmp(r7, r8);
+          __ Assert(eq, "Undefined value not loaded.");
+        }
+        __ b(&entry);
+        __ bind(&loop);
+        __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
+        __ bind(&entry);
+        __ cmp(r2, r6);
+        __ b(lt, &loop);
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject
+      // r1: constructor function
+      // r4: JSObject
+      // r5: FixedArray (not tagged)
+      __ add(r5, r5, Operand(kHeapObjectTag));  // Add the heap tag.
+      __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+
+      // Continue with JSObject being successfully allocated
+      // r1: constructor function
+      // r4: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // r4: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(r4, r5);
     }
 
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    __ add(r4, r4, Operand(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed. Continue with allocated
-    // object if not fall through to runtime call if it is.
+    // Allocate the new receiver object using the runtime call.
     // r1: constructor function
+    __ bind(&rt_call);
+    __ push(r1);  // argument for Runtime_NewObject
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ mov(r4, r0);
+
+    // Receiver for constructor call allocated.
     // r4: JSObject
-    // r5: start of next object (not tagged)
-    __ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
-    // The field instance sizes contains both pre-allocated property fields and
-    // in-object properties.
-    __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
-    __ Ubfx(r6, r0, Map::kPreAllocatedPropertyFieldsByte * 8, 8);
-    __ add(r3, r3, Operand(r6));
-    __ Ubfx(r6, r0, Map::kInObjectPropertiesByte * 8, 8);
-    __ sub(r3, r3, Operand(r6), SetCC);
+    __ bind(&allocated);
+    __ push(r4);
 
-    // Done if no extra properties are to be allocated.
-    __ b(eq, &allocated);
-    __ Assert(pl, "Property allocation count failed.");
+    // Push the function and the allocated receiver from the stack.
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ ldr(r1, MemOperand(sp, kPointerSize));
+    __ push(r1);  // Constructor function.
+    __ push(r4);  // Receiver.
 
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // r1: constructor
-    // r3: number of elements in properties array
-    // r4: JSObject
-    // r5: start of next object
-    __ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
-    __ AllocateInNewSpace(
-        r0,
-        r5,
-        r6,
-        r2,
-        &undo_allocation,
-        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
-
-    // Initialize the FixedArray.
-    // r1: constructor
-    // r3: number of elements in properties array
-    // r4: JSObject
-    // r5: FixedArray (not tagged)
-    __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
-    __ mov(r2, r5);
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
-    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
-    __ mov(r0, Operand(r3, LSL, kSmiTagSize));
-    __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
-
-    // Initialize the fields to undefined.
+    // Reload the number of arguments from the stack.
     // r1: constructor function
-    // r2: First element of FixedArray (not tagged)
-    // r3: number of elements in properties array
-    // r4: JSObject
-    // r5: FixedArray (not tagged)
-    __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
-    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-    { Label loop, entry;
-      if (count_constructions) {
-        __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-      } else if (FLAG_debug_code) {
-        __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
-        __ cmp(r7, r8);
-        __ Assert(eq, "Undefined value not loaded.");
-      }
-      __ b(&entry);
-      __ bind(&loop);
-      __ str(r7, MemOperand(r2, kPointerSize, PostIndex));
-      __ bind(&entry);
-      __ cmp(r2, r6);
-      __ b(lt, &loop);
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
+
+    // Setup pointer to last argument.
+    __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+    // Setup number of arguments for function call below
+    __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+
+    // Copy arguments and receiver to the expression stack.
+    // r0: number of arguments
+    // r2: address of last argument (caller sp)
+    // r1: constructor function
+    // r3: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    Label loop, entry;
+    __ b(&entry);
+    __ bind(&loop);
+    __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+    __ push(ip);
+    __ bind(&entry);
+    __ sub(r3, r3, Operand(2), SetCC);
+    __ b(ge, &loop);
+
+    // Call the function.
+    // r0: number of arguments
+    // r1: constructor function
+    if (is_api_function) {
+      __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected,
+                    RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(r0);
+      __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject
-    // r1: constructor function
-    // r4: JSObject
-    // r5: FixedArray (not tagged)
-    __ add(r5, r5, Operand(kHeapObjectTag));  // Add the heap tag.
-    __ str(r5, FieldMemOperand(r4, JSObject::kPropertiesOffset));
+    // Pop the function from the stack.
+    // sp[0]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    __ pop();
 
-    // Continue with JSObject being successfully allocated
-    // r1: constructor function
-    // r4: JSObject
-    __ jmp(&allocated);
+    // Restore context from the frame.
+    // r0: result
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // r4: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(r4, r5);
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    // r0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ JumpIfSmi(r0, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+    __ b(ge, &exit);
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ ldr(r0, MemOperand(sp));
+
+    // Remove receiver from the stack, remove caller arguments, and
+    // return.
+    __ bind(&exit);
+    // r0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+
+    // Leave construct frame.
   }
 
-  // Allocate the new receiver object using the runtime call.
-  // r1: constructor function
-  __ bind(&rt_call);
-  __ push(r1);  // argument for Runtime_NewObject
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ mov(r4, r0);
-
-  // Receiver for constructor call allocated.
-  // r4: JSObject
-  __ bind(&allocated);
-  __ push(r4);
-
-  // Push the function and the allocated receiver from the stack.
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ ldr(r1, MemOperand(sp, kPointerSize));
-  __ push(r1);  // Constructor function.
-  __ push(r4);  // Receiver.
-
-  // Reload the number of arguments from the stack.
-  // r1: constructor function
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
-
-  // Setup pointer to last argument.
-  __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Setup number of arguments for function call below
-  __ mov(r0, Operand(r3, LSR, kSmiTagSize));
-
-  // Copy arguments and receiver to the expression stack.
-  // r0: number of arguments
-  // r2: address of last argument (caller sp)
-  // r1: constructor function
-  // r3: number of arguments (smi-tagged)
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  Label loop, entry;
-  __ b(&entry);
-  __ bind(&loop);
-  __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
-  __ push(ip);
-  __ bind(&entry);
-  __ sub(r3, r3, Operand(2), SetCC);
-  __ b(ge, &loop);
-
-  // Call the function.
-  // r0: number of arguments
-  // r1: constructor function
-  if (is_api_function) {
-    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(r0);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Pop the function from the stack.
-  // sp[0]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ pop();
-
-  // Restore context from the frame.
-  // r0: result
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  // r0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ JumpIfSmi(r0, &use_receiver);
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
-  __ b(ge, &exit);
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ ldr(r0, MemOperand(sp));
-
-  // Remove receiver from the stack, remove caller arguments, and
-  // return.
-  __ bind(&exit);
-  // r0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ LeaveConstructFrame();
   __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
   __ add(sp, sp, Operand(kPointerSize));
   __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
@@ -997,63 +1016,64 @@
   // r4: argv
   // r5-r7, cp may be clobbered
 
-  // Clear the context before we push it when entering the JS frame.
+  // Clear the context before we push it when entering the internal frame.
   __ mov(cp, Operand(0, RelocInfo::NONE));
 
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Set up the context from the function argument.
-  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+    // Set up the context from the function argument.
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
-  // Set up the roots register.
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
-  __ mov(r10, Operand(roots_address));
+    // Set up the roots register.
+    ExternalReference roots_address =
+        ExternalReference::roots_address(masm->isolate());
+    __ mov(r10, Operand(roots_address));
 
-  // Push the function and the receiver onto the stack.
-  __ push(r1);
-  __ push(r2);
+    // Push the function and the receiver onto the stack.
+    __ push(r1);
+    __ push(r2);
 
-  // Copy arguments to the stack in a loop.
-  // r1: function
-  // r3: argc
-  // r4: argv, i.e. points to first arg
-  Label loop, entry;
-  __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
-  // r2 points past last arg.
-  __ b(&entry);
-  __ bind(&loop);
-  __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex));  // read next parameter
-  __ ldr(r0, MemOperand(r0));  // dereference handle
-  __ push(r0);  // push parameter
-  __ bind(&entry);
-  __ cmp(r4, r2);
-  __ b(ne, &loop);
+    // Copy arguments to the stack in a loop.
+    // r1: function
+    // r3: argc
+    // r4: argv, i.e. points to first arg
+    Label loop, entry;
+    __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+    // r2 points past last arg.
+    __ b(&entry);
+    __ bind(&loop);
+    __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex));  // read next parameter
+    __ ldr(r0, MemOperand(r0));  // dereference handle
+    __ push(r0);  // push parameter
+    __ bind(&entry);
+    __ cmp(r4, r2);
+    __ b(ne, &loop);
 
-  // Initialize all JavaScript callee-saved registers, since they will be seen
-  // by the garbage collector as part of handlers.
-  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
-  __ mov(r5, Operand(r4));
-  __ mov(r6, Operand(r4));
-  __ mov(r7, Operand(r4));
-  if (kR9Available == 1) {
-    __ mov(r9, Operand(r4));
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+    __ mov(r5, Operand(r4));
+    __ mov(r6, Operand(r4));
+    __ mov(r7, Operand(r4));
+    if (kR9Available == 1) {
+      __ mov(r9, Operand(r4));
+    }
+
+    // Invoke the code and pass argc as r0.
+    __ mov(r0, Operand(r3));
+    if (is_construct) {
+      __ Call(masm->isolate()->builtins()->JSConstructCall());
+    } else {
+      ParameterCount actual(r0);
+      __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+    // Exit the JS frame and remove the parameters (except function), and
+    // return.
+    // Respect ABI stack constraint.
   }
-
-  // Invoke the code and pass argc as r0.
-  __ mov(r0, Operand(r3));
-  if (is_construct) {
-    __ Call(masm->isolate()->builtins()->JSConstructCall());
-  } else {
-    ParameterCount actual(r0);
-    __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Exit the JS frame and remove the parameters (except function), and return.
-  // Respect ABI stack constraint.
-  __ LeaveInternalFrame();
   __ Jump(lr);
 
   // r0: result
@@ -1072,26 +1092,27 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(r1);
-  // Push call kind information.
-  __ push(r5);
+    // Preserve the function.
+    __ push(r1);
+    // Push call kind information.
+    __ push(r5);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(r1);
-  __ CallRuntime(Runtime::kLazyCompile, 1);
-  // Calculate the entry point.
-  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(r1);
+    __ CallRuntime(Runtime::kLazyCompile, 1);
+    // Calculate the entry point.
+    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-  // Restore call kind information.
-  __ pop(r5);
-  // Restore saved function.
-  __ pop(r1);
+    // Restore call kind information.
+    __ pop(r5);
+    // Restore saved function.
+    __ pop(r1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(r2);
@@ -1100,26 +1121,27 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(r1);
-  // Push call kind information.
-  __ push(r5);
+    // Preserve the function.
+    __ push(r1);
+    // Push call kind information.
+    __ push(r5);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(r1);
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
-  // Calculate the entry point.
-  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(r1);
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
+    // Calculate the entry point.
+    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-  // Restore call kind information.
-  __ pop(r5);
-  // Restore saved function.
-  __ pop(r1);
+    // Restore call kind information.
+    __ pop(r5);
+    // Restore saved function.
+    __ pop(r1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(r2);
@@ -1128,12 +1150,13 @@
 
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
-  __ EnterInternalFrame();
-  // Pass the function and deoptimization type to the runtime system.
-  __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
-  __ push(r0);
-  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(r0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  }
 
   // Get the full codegen state from the stack and untag it -> r6.
   __ ldr(r6, MemOperand(sp, 0 * kPointerSize));
@@ -1173,9 +1196,10 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
-  __ EnterInternalFrame();
-  __ CallRuntime(Runtime::kNotifyOSR, 0);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
   __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit());
   __ Ret();
 }
@@ -1191,10 +1215,11 @@
   // Lookup the function in the JavaScript frame and push it as an
   // argument to the on-stack replacement function.
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ EnterInternalFrame();
-  __ push(r0);
-  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(r0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
@@ -1276,17 +1301,23 @@
     __ b(ge, &shift_arguments);
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Smi-tagged.
-    __ push(r0);
 
-    __ push(r2);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(r2, r0);
+    {
+      // Enter an internal frame in order to preserve argument count.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ mov(r0, Operand(r0, LSL, kSmiTagSize));  // Smi-tagged.
+      __ push(r0);
 
-    __ pop(r0);
-    __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-    __ LeaveInternalFrame();
+      __ push(r2);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mov(r2, r0);
+
+      __ pop(r0);
+      __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+
+      // Exit the internal frame.
+    }
+
     // Restore the function to r1, and the flag to r4.
     __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
     __ mov(r4, Operand(0, RelocInfo::NONE));
@@ -1406,156 +1437,157 @@
   const int kRecvOffset     =  3 * kPointerSize;
   const int kFunctionOffset =  4 * kPointerSize;
 
-  __ EnterInternalFrame();
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
 
-  __ ldr(r0, MemOperand(fp, kFunctionOffset));  // get the function
-  __ push(r0);
-  __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    __ ldr(r0, MemOperand(fp, kFunctionOffset));  // get the function
+    __ push(r0);
+    __ ldr(r0, MemOperand(fp, kArgsOffset));  // get the args array
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
-  // Make r2 the space we have left. The stack might already be overflowed
-  // here which will cause r2 to become negative.
-  __ sub(r2, sp, r2);
-  // Check if the arguments will overflow the stack.
-  __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ b(gt, &okay);  // Signed comparison.
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+    // Make r2 the space we have left. The stack might already be overflowed
+    // here which will cause r2 to become negative.
+    __ sub(r2, sp, r2);
+    // Check if the arguments will overflow the stack.
+    __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+    __ b(gt, &okay);  // Signed comparison.
 
-  // Out of stack space.
-  __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ push(r1);
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  // End of stack check.
+    // Out of stack space.
+    __ ldr(r1, MemOperand(fp, kFunctionOffset));
+    __ push(r1);
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    // End of stack check.
 
-  // Push current limit and index.
-  __ bind(&okay);
-  __ push(r0);  // limit
-  __ mov(r1, Operand(0, RelocInfo::NONE));  // initial index
-  __ push(r1);
+    // Push current limit and index.
+    __ bind(&okay);
+    __ push(r0);  // limit
+    __ mov(r1, Operand(0, RelocInfo::NONE));  // initial index
+    __ push(r1);
 
-  // Get the receiver.
-  __ ldr(r0, MemOperand(fp, kRecvOffset));
+    // Get the receiver.
+    __ ldr(r0, MemOperand(fp, kRecvOffset));
 
-  // Check that the function is a JS function (otherwise it must be a proxy).
-  Label push_receiver;
-  __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &push_receiver);
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ ldr(r1, MemOperand(fp, kFunctionOffset));
+    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+    __ b(ne, &push_receiver);
 
-  // Change context eagerly to get the right global object if necessary.
-  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-  // Load the shared function info while the function is still in r1.
-  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    // Change context eagerly to get the right global object if necessary.
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+    // Load the shared function info while the function is still in r1.
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
 
-  // Compute the receiver.
-  // Do not transform the receiver for strict mode functions.
-  Label call_to_object, use_global_receiver;
-  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
-  __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
-                           kSmiTagSize)));
-  __ b(ne, &push_receiver);
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                             kSmiTagSize)));
+    __ b(ne, &push_receiver);
 
-  // Do not transform the receiver for strict mode functions.
-  __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-  __ b(ne, &push_receiver);
+    // Do not transform the receiver for strict mode functions.
+    __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ b(ne, &push_receiver);
 
-  // Compute the receiver in non-strict mode.
-  __ JumpIfSmi(r0, &call_to_object);
-  __ LoadRoot(r1, Heap::kNullValueRootIndex);
-  __ cmp(r0, r1);
-  __ b(eq, &use_global_receiver);
-  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
-  __ cmp(r0, r1);
-  __ b(eq, &use_global_receiver);
+    // Compute the receiver in non-strict mode.
+    __ JumpIfSmi(r0, &call_to_object);
+    __ LoadRoot(r1, Heap::kNullValueRootIndex);
+    __ cmp(r0, r1);
+    __ b(eq, &use_global_receiver);
+    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+    __ cmp(r0, r1);
+    __ b(eq, &use_global_receiver);
 
-  // Check if the receiver is already a JavaScript object.
-  // r0: receiver
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
-  __ b(ge, &push_receiver);
+    // Check if the receiver is already a JavaScript object.
+    // r0: receiver
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+    __ b(ge, &push_receiver);
 
-  // Convert the receiver to a regular object.
-  // r0: receiver
-  __ bind(&call_to_object);
-  __ push(r0);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ b(&push_receiver);
+    // Convert the receiver to a regular object.
+    // r0: receiver
+    __ bind(&call_to_object);
+    __ push(r0);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ b(&push_receiver);
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
-  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
-  __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
-  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+    __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
+    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
 
-  // Push the receiver.
-  // r0: receiver
-  __ bind(&push_receiver);
-  __ push(r0);
+    // Push the receiver.
+    // r0: receiver
+    __ bind(&push_receiver);
+    __ push(r0);
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ ldr(r0, MemOperand(fp, kIndexOffset));
-  __ b(&entry);
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ ldr(r0, MemOperand(fp, kIndexOffset));
+    __ b(&entry);
 
-  // Load the current argument from the arguments array and push it to the
-  // stack.
-  // r0: current argument index
-  __ bind(&loop);
-  __ ldr(r1, MemOperand(fp, kArgsOffset));
-  __ push(r1);
-  __ push(r0);
+    // Load the current argument from the arguments array and push it to the
+    // stack.
+    // r0: current argument index
+    __ bind(&loop);
+    __ ldr(r1, MemOperand(fp, kArgsOffset));
+    __ push(r1);
+    __ push(r0);
 
-  // Call the runtime to access the property in the arguments array.
-  __ CallRuntime(Runtime::kGetProperty, 2);
-  __ push(r0);
+    // Call the runtime to access the property in the arguments array.
+    __ CallRuntime(Runtime::kGetProperty, 2);
+    __ push(r0);
 
-  // Use inline caching to access the arguments.
-  __ ldr(r0, MemOperand(fp, kIndexOffset));
-  __ add(r0, r0, Operand(1 << kSmiTagSize));
-  __ str(r0, MemOperand(fp, kIndexOffset));
+    // Use inline caching to access the arguments.
+    __ ldr(r0, MemOperand(fp, kIndexOffset));
+    __ add(r0, r0, Operand(1 << kSmiTagSize));
+    __ str(r0, MemOperand(fp, kIndexOffset));
 
-  // Test if the copy loop has finished copying all the elements from the
-  // arguments object.
-  __ bind(&entry);
-  __ ldr(r1, MemOperand(fp, kLimitOffset));
-  __ cmp(r0, r1);
-  __ b(ne, &loop);
+    // Test if the copy loop has finished copying all the elements from the
+    // arguments object.
+    __ bind(&entry);
+    __ ldr(r1, MemOperand(fp, kLimitOffset));
+    __ cmp(r0, r1);
+    __ b(ne, &loop);
 
-  // Invoke the function.
-  Label call_proxy;
-  ParameterCount actual(r0);
-  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-  __ ldr(r1, MemOperand(fp, kFunctionOffset));
-  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &call_proxy);
-  __ InvokeFunction(r1, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(r0);
+    __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+    __ ldr(r1, MemOperand(fp, kFunctionOffset));
+    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
+    __ b(ne, &call_proxy);
+    __ InvokeFunction(r1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
 
-  // Tear down the internal frame and remove function, receiver and args.
-  __ LeaveInternalFrame();
-  __ add(sp, sp, Operand(3 * kPointerSize));
-  __ Jump(lr);
+    frame_scope.GenerateLeaveFrame();
+    __ add(sp, sp, Operand(3 * kPointerSize));
+    __ Jump(lr);
 
-  // Invoke the function proxy.
-  __ bind(&call_proxy);
-  __ push(r1);  // add function proxy as last argument
-  __ add(r0, r0, Operand(1));
-  __ mov(r2, Operand(0, RelocInfo::NONE));
-  __ SetCallKind(r5, CALL_AS_METHOD);
-  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
-  __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(r1);  // add function proxy as last argument
+    __ add(r0, r0, Operand(1));
+    __ mov(r2, Operand(0, RelocInfo::NONE));
+    __ SetCallKind(r5, CALL_AS_METHOD);
+    __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
+    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
 
-  __ LeaveInternalFrame();
+    // Tear down the internal frame and remove function, receiver and args.
+  }
   __ add(sp, sp, Operand(3 * kPointerSize));
   __ Jump(lr);
 }
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index e65f6d9..9163329 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -838,9 +838,11 @@
     __ vmov(d0, r0, r1);
     __ vmov(d1, r2, r3);
   }
-  // Call C routine that may not cause GC or other trouble.
-  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
-                   0, 2);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm);
+    __ CallCFunction(
+        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+  }
   // Store answer in the overwritable heap number. Double returned in
   // registers r0 and r1 or in d0.
   if (masm->use_eabi_hardfloat()) {
@@ -857,6 +859,29 @@
 }
 
 
+bool WriteInt32ToHeapNumberStub::IsPregenerated() {
+  // These variants are compiled ahead of time.  See next method.
+  if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
+    return true;
+  }
+  if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
+    return true;
+  }
+  // Other register combinations are generated as and when they are needed,
+  // so it is unsafe to call them from stubs (we can't generate a stub while
+  // we are generating a stub).
+  return false;
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+  WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
+  WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
+  stub1.GetCode()->set_is_pregenerated(true);
+  stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
 // See comment for class.
 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
   Label max_negative_int;
@@ -1197,6 +1222,8 @@
       __ vmov(d0, r0, r1);
       __ vmov(d1, r2, r3);
     }
+
+    AllowExternalCallThatCantCauseGC scope(masm);
     __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
                      0, 2);
     __ pop(pc);  // Return.
@@ -1214,7 +1241,7 @@
     // If either operand is a JS object or an oddball value, then they are
     // not equal since their pointers are different.
     // There is no test for undetectability in strict equality.
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
     Label first_non_object;
     // Get the type of the first operand into r2 and compare it with
     // FIRST_SPEC_OBJECT_TYPE.
@@ -1606,6 +1633,8 @@
 // The stub expects its argument in the tos_ register and returns its result in
 // it, too: zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // This stub uses VFP3 instructions.
   CpuFeatures::Scope scope(VFP3);
 
@@ -1713,6 +1742,41 @@
 }
 
 
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ stm(db_w, sp, kCallerSaved | lr.bit());
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(VFP3);
+    __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+      DwVfpRegister reg = DwVfpRegister::from_code(i);
+      __ vstr(reg, MemOperand(sp, i * kDoubleSize));
+    }
+  }
+  const int argument_count = 1;
+  const int fp_argument_count = 0;
+  const Register scratch = r1;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+  __ mov(r0, Operand(ExternalReference::isolate_address()));
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(VFP3);
+    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+      DwVfpRegister reg = DwVfpRegister::from_code(i);
+      __ vldr(reg, MemOperand(sp, i * kDoubleSize));
+    }
+    __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+  }
+  __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0).
+}
+
+
 void UnaryOpStub::PrintName(StringStream* stream) {
   const char* op_name = Token::Name(op_);
   const char* overwrite_name = NULL;  // Make g++ happy.
@@ -1866,12 +1930,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(r0);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(r1, Operand(r0));
-    __ pop(r0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(r0);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(r1, Operand(r0));
+      __ pop(r0);
+    }
 
     __ bind(&heapnumber_allocated);
     __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
@@ -1912,13 +1977,14 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(r0);  // Push the heap number, not the untagged int32.
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(r2, r0);  // Move the new heap number into r2.
-    // Get the heap number into r0, now that the new heap number is in r2.
-    __ pop(r0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(r0);  // Push the heap number, not the untagged int32.
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(r2, r0);  // Move the new heap number into r2.
+      // Get the heap number into r0, now that the new heap number is in r2.
+      __ pop(r0);
+    }
 
     // Convert the heap number in r0 to an untagged integer in r1.
     // This can't go slow-case because it's the same number we already
@@ -2028,6 +2094,10 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
+
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -3133,10 +3203,11 @@
     __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
     __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
-    __ EnterInternalFrame();
-    __ push(r0);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(r0);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -3149,14 +3220,15 @@
 
     // We return the value in d2 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Allocate an aligned object larger than a HeapNumber.
-    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
-    __ mov(scratch0, Operand(4 * kPointerSize));
-    __ push(scratch0);
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+      // Allocate an aligned object larger than a HeapNumber.
+      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+      __ mov(scratch0, Operand(4 * kPointerSize));
+      __ push(scratch0);
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 }
@@ -3173,6 +3245,7 @@
   } else {
     __ vmov(r0, r1, d2);
   }
+  AllowExternalCallThatCantCauseGC scope(masm);
   switch (type_) {
     case TranscendentalCache::SIN:
       __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
@@ -3268,11 +3341,14 @@
     __ push(lr);
     __ PrepareCallCFunction(1, 1, scratch);
     __ SetCallCDoubleArguments(double_base, exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(masm->isolate()),
-        1, 1);
-    __ pop(lr);
-    __ GetCFunctionDoubleResult(double_result);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ CallCFunction(
+          ExternalReference::power_double_int_function(masm->isolate()),
+          1, 1);
+      __ pop(lr);
+      __ GetCFunctionDoubleResult(double_result);
+    }
     __ vstr(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     __ mov(r0, heapnumber);
@@ -3298,11 +3374,14 @@
     __ push(lr);
     __ PrepareCallCFunction(0, 2, scratch);
     __ SetCallCDoubleArguments(double_base, double_exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(masm->isolate()),
-        0, 2);
-    __ pop(lr);
-    __ GetCFunctionDoubleResult(double_result);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0, 2);
+      __ pop(lr);
+      __ GetCFunctionDoubleResult(double_result);
+    }
     __ vstr(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     __ mov(r0, heapnumber);
@@ -3319,6 +3398,37 @@
 }
 
 
+bool CEntryStub::IsPregenerated() {
+  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+          result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  Handle<Code> code = save_doubles.GetCode();
+  code->set_is_pregenerated(true);
+  StoreBufferOverflowStub stub(kSaveFPRegs);
+  stub.GetCode()->set_is_pregenerated(true);
+  code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  Handle<Code> code = stub.GetCode();
+  code->set_is_pregenerated(true);
+}
+
+
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   __ Throw(r0);
 }
@@ -3430,8 +3540,7 @@
   __ b(eq, throw_out_of_memory_exception);
 
   // Retrieve the pending exception and clear the variable.
-  __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
-  __ ldr(r3, MemOperand(ip));
+  __ mov(r3, Operand(isolate->factory()->the_hole_value()));
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ ldr(r0, MemOperand(ip));
@@ -3469,6 +3578,7 @@
   __ sub(r6, r6, Operand(kPointerSize));
 
   // Enter the exit frame that transitions from JavaScript to C++.
+  FrameScope scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(save_doubles_);
 
   // Setup argc and the builtin function in callee-saved registers.
@@ -3613,8 +3723,7 @@
   // saved values before returning a failure to C.
 
   // Clear any pending exceptions.
-  __ mov(ip, Operand(ExternalReference::the_hole_value_location(isolate)));
-  __ ldr(r5, MemOperand(ip));
+  __ mov(r5, Operand(isolate->factory()->the_hole_value()));
   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ str(r5, MemOperand(ip));
@@ -3851,10 +3960,11 @@
     }
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
-    __ EnterInternalFrame();
-    __ Push(r0, r1);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r0, r1);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
     __ cmp(r0, Operand::Zero());
     __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
     __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
@@ -4480,8 +4590,7 @@
 
   // For arguments 4 and 3 get string length, calculate start of string data and
   // calculate the shift of the index (0 for ASCII and 1 for two byte).
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ add(r8, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   __ eor(r3, r3, Operand(1));
   // Load the length from the original subject string from the previous stack
   // frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4532,8 +4641,7 @@
   // stack overflow (on the backtrack stack) was detected in RegExp code but
   // haven't created the exception yet. Handle that in the runtime system.
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
-  __ mov(r1, Operand(ExternalReference::the_hole_value_location(isolate)));
-  __ ldr(r1, MemOperand(r1, 0));
+  __ mov(r1, Operand(isolate->factory()->the_hole_value()));
   __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
                                        isolate)));
   __ ldr(r0, MemOperand(r2, 0));
@@ -4575,16 +4683,25 @@
   __ str(r2, FieldMemOperand(last_match_info_elements,
                              RegExpImpl::kLastCaptureCountOffset));
   // Store last subject and last input.
-  __ mov(r3, last_match_info_elements);  // Moved up to reduce latency.
   __ str(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastSubjectOffset));
-  __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
+  __ mov(r2, subject);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastSubjectOffset,
+                      r2,
+                      r7,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs);
   __ str(subject,
          FieldMemOperand(last_match_info_elements,
                          RegExpImpl::kLastInputOffset));
-  __ mov(r3, last_match_info_elements);
-  __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastInputOffset,
+                      subject,
+                      r7,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -4712,6 +4829,22 @@
 }
 
 
+void CallFunctionStub::FinishCode(Code* code) {
+  code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+  UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
   Label slow, non_function;
 
@@ -6425,12 +6558,13 @@
   // Call the runtime system in a fresh internal frame.
   ExternalReference miss =
       ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-  __ EnterInternalFrame();
-  __ Push(r1, r0);
-  __ mov(ip, Operand(Smi::FromInt(op_)));
-  __ push(ip);
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1, r0);
+    __ mov(ip, Operand(Smi::FromInt(op_)));
+    __ push(ip);
+    __ CallExternalReference(miss, 3);
+  }
   // Compute the entry point of the rewritten stub.
   __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
   // Restore registers.
@@ -6613,6 +6747,8 @@
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Registers:
   //  result: StringDictionary to probe
   //  r1: key
@@ -6702,6 +6838,267 @@
 }
 
 
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { r6, r4, r7, EMIT_REMEMBERED_SET },
+  { r6, r2, r7, EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
+  // Also used in KeyedStoreIC::GenerateGeneric.
+  { r3, r4, r5, EMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal.
+  { r4, r1, r2, OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { r1, r2, r3, EMIT_REMEMBERED_SET },
+  { r3, r2, r1, EMIT_REMEMBERED_SET },
+  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { r2, r1, r3, EMIT_REMEMBERED_SET },
+  { r3, r1, r2, EMIT_REMEMBERED_SET },
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { r4, r2, r3, EMIT_REMEMBERED_SET },
+  // Null termination.
+  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+bool StoreBufferOverflowStub::IsPregenerated() {
+  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two instructions are generated with labels so as to get the
+  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
+  // forth between a compare instructions (a nop in this position) and the
+  // real branch when we start and stop incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+  __ b(&skip_to_incremental_noncompacting);
+  __ b(&skip_to_incremental_compacting);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  }
+  __ Ret();
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+  PatchBranchIntoNop(masm, 0);
+  PatchBranchIntoNop(masm, Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     ne,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  Register address =
+      r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+  ASSERT(!address.is(regs_.object()));
+  ASSERT(!address.is(r0));
+  __ Move(address, regs_.address());
+  __ Move(r0, regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ Move(r1, address);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ ldr(r1, MemOperand(address, 0));
+  }
+  __ mov(r2, Operand(ExternalReference::isolate_address()));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_scratch;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     eq,
+                     &ensure_not_white);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     eq,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need extra registers for this, so we push the object and the address
+  // register temporarily.
+  __ Push(regs_.object(), regs_.address());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    regs_.address(),  // Scratch.
+                    &need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 557f7e6..3ba75ba 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -58,6 +58,25 @@
 };
 
 
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -323,6 +342,9 @@
         the_heap_number_(the_heap_number),
         scratch_(scratch) { }
 
+  bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+
  private:
   Register the_int_;
   Register the_heap_number_;
@@ -371,6 +393,225 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+    masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
+    ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
+  }
+
+  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+    masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
+    ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
+  }
+
+  static Mode GetMode(Code* stub) {
+    Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+                                                   Assembler::kInstrSize);
+
+    if (Assembler::IsBranch(first_instruction)) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(Assembler::IsTstImmediate(first_instruction));
+
+    if (Assembler::IsBranch(second_instruction)) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(Assembler::IsTstImmediate(second_instruction));
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    MacroAssembler masm(NULL,
+                        stub->instruction_start(),
+                        stub->instruction_size());
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        PatchBranchIntoNop(&masm, 0);
+        PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, 0);
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers.  The input is
+  // two registers that must be preserved and one scratch register provided by
+  // the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotOneOf(object_, address_, scratch0_);
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      // We don't have to save scratch0_ because it was given to us as
+      // a scratch register.
+      masm->push(scratch1_);
+    }
+
+    void Restore(MacroAssembler* masm) {
+      masm->pop(scratch1_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The scratch registers
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(VFP3);
+        masm->sub(sp,
+                  sp,
+                  Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+        // Save all VFP registers except d0.
+        for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+          DwVfpRegister reg = DwVfpRegister::from_code(i);
+          masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+        }
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(VFP3);
+        // Restore all VFP registers except d0.
+        for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+          DwVfpRegister reg = DwVfpRegister::from_code(i);
+          masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
+        }
+        masm->add(sp,
+                  sp,
+                  Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+      }
+      masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+
+    Register GetRegThatIsNotOneOf(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  bool MustBeInStubCache() {
+    // All stubs must be registered in the stub cache
+    // otherwise IncrementalMarker would not be able to find
+    // and patch it.
+    return true;
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 4> {};
+  class ValueBits: public BitField<int, 4, 4> {};
+  class AddressBits: public BitField<int, 8, 4> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  Label slow_;
+  RegisterAllocation regs_;
+};
+
+
 // Enter C code from generated RegExp code in a way that allows
 // the C code to fix the return address in case of a GC.
 // Currently only needed on ARM.
@@ -575,6 +816,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -587,7 +830,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return LookupModeBits::encode(mode_);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index bf748a9..3993ed0 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -38,12 +38,16 @@
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
 
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index d27982a..1c0d508 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -69,16 +69,6 @@
                               int pos,
                               bool right_here = false);
 
-  // Constants related to patching of inlined load/store.
-  static int GetInlinedKeyedLoadInstructionsAfterPatch() {
-    return FLAG_debug_code ? 32 : 13;
-  }
-  static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
-  static int GetInlinedNamedStoreInstructionsAfterPatch() {
-    ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
-    return Isolate::Current()->inlined_write_barrier_size() + 4;
-  }
-
  private:
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 07a2272..b866f9c 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -132,55 +132,57 @@
 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
                                           RegList object_regs,
                                           RegList non_object_regs) {
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as a smi causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  if ((object_regs | non_object_regs) != 0) {
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        if (FLAG_debug_code) {
-          __ tst(reg, Operand(0xc0000000));
-          __ Assert(eq, "Unable to encode value as smi");
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    if ((object_regs | non_object_regs) != 0) {
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          if (FLAG_debug_code) {
+            __ tst(reg, Operand(0xc0000000));
+            __ Assert(eq, "Unable to encode value as smi");
+          }
+          __ mov(reg, Operand(reg, LSL, kSmiTagSize));
         }
-        __ mov(reg, Operand(reg, LSL, kSmiTagSize));
       }
+      __ stm(db_w, sp, object_regs | non_object_regs);
     }
-    __ stm(db_w, sp, object_regs | non_object_regs);
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ mov(r0, Operand(0, RelocInfo::NONE));  // no arguments
-  __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
+    __ mov(r0, Operand(0, RelocInfo::NONE));  // no arguments
+    __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values from the expression stack.
-  if ((object_regs | non_object_regs) != 0) {
-    __ ldm(ia_w, sp, object_regs | non_object_regs);
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        __ mov(reg, Operand(reg, LSR, kSmiTagSize));
-      }
-      if (FLAG_debug_code &&
-          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
-        __ mov(reg, Operand(kDebugZapValue));
+    // Restore the register values from the expression stack.
+    if ((object_regs | non_object_regs) != 0) {
+      __ ldm(ia_w, sp, object_regs | non_object_regs);
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          __ mov(reg, Operand(reg, LSR, kSmiTagSize));
+        }
+        if (FLAG_debug_code &&
+            (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+          __ mov(reg, Operand(kDebugZapValue));
+        }
       }
     }
-  }
 
-  __ LeaveInternalFrame();
+    // Leave the internal frame.
+  }
 
   // Now that the break point has been handled, resume normal execution by
   // jumping to the target address intended by the caller and that was
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 00357f7..bb03d74 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -112,12 +112,19 @@
   }
 #endif
 
+  Isolate* isolate = code->GetIsolate();
+
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
-  DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+  DeoptimizerData* data = isolate->deoptimizer_data();
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -134,7 +141,8 @@
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   const int kInstrSize = Assembler::kInstrSize;
@@ -169,6 +177,13 @@
          reinterpret_cast<uint32_t>(check_code->entry()));
   Memory::uint32_at(stack_check_address_pointer) =
       reinterpret_cast<uint32_t>(replacement_code->entry());
+
+  RelocInfo rinfo(pc_after - 2 * kInstrSize,
+                  RelocInfo::CODE_TARGET,
+                  0,
+                  unoptimized_code);
+  unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+      unoptimized_code, &rinfo, replacement_code);
 }
 
 
@@ -193,6 +208,9 @@
          reinterpret_cast<uint32_t>(replacement_code->entry()));
   Memory::uint32_at(stack_check_address_pointer) =
       reinterpret_cast<uint32_t>(check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->
+      RecordCodeTargetPatch(pc_after - 2 * kInstrSize, check_code);
 }
 
 
@@ -632,7 +650,10 @@
   __ mov(r5, Operand(ExternalReference::isolate_address()));
   __ str(r5, MemOperand(sp, 1 * kPointerSize));  // Isolate.
   // Call Deoptimizer::New().
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
 
   // Preserve "deoptimizer" object in register r0 and get the input
   // frame descriptor pointer to r1 (deoptimizer->input_);
@@ -686,8 +707,11 @@
   // r0: deoptimizer object; r1: scratch.
   __ PrepareCallCFunction(1, r1);
   // Call Deoptimizer::ComputeOutputFrames().
-  __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 1);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 1);
+  }
   __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
 
   // Replace the current (input) frame with the output frames.
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 26bbd82..c66ceee 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -70,6 +70,16 @@
   1 << 10 |  // r10 v7
   1 << 11;   // r11 v8 (fp in JavaScript code)
 
+// When calling into C++ (only for C++ calls that can't cause a GC).
+// The call code will take care of lr, fp, etc.
+static const RegList kCallerSaved =
+  1 <<  0 |  // r0
+  1 <<  1 |  // r1
+  1 <<  2 |  // r2
+  1 <<  3 |  // r3
+  1 <<  9;   // r9
+
+
 static const int kNumCalleeSaved = 7 + kR9Available;
 
 // Double registers d8 to d15 are callee-saved.
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 50ed8b1..2ee1594 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -39,6 +39,7 @@
 #include "stub-cache.h"
 
 #include "arm/code-stubs-arm.h"
+#include "arm/macro-assembler-arm.h"
 
 namespace v8 {
 namespace internal {
@@ -155,6 +156,11 @@
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   int locals_count = info->scope()->num_stack_slots();
 
   __ Push(lr, fp, cp, r1);
@@ -200,13 +206,12 @@
         // Load parameter from stack.
         __ ldr(r0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        __ mov(r1, Operand(Context::SlotOffset(var->index())));
-        __ str(r0, MemOperand(cp, r1));
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use two more registers to avoid
-        // clobbering cp.
-        __ mov(r2, Operand(cp));
-        __ RecordWrite(r2, Operand(r1), r3, r0);
+        MemOperand target = ContextOperand(cp, var->index());
+        __ str(r0, target);
+
+        // Update the write barrier.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
       }
     }
   }
@@ -665,12 +670,15 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ str(src, location);
+
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
-    __ RecordWrite(scratch0,
-                   Operand(Context::SlotOffset(var->index())),
-                   scratch1,
-                   src);
+    __ RecordWriteContextSlot(scratch0,
+                              location.offset(),
+                              src,
+                              scratch1,
+                              kLRHasBeenSaved,
+                              kDontSaveFPRegs);
   }
 }
 
@@ -746,8 +754,14 @@
         __ str(result_register(), ContextOperand(cp, variable->index()));
         int offset = Context::SlotOffset(variable->index());
         // We know that we have written a function, which is not a smi.
-        __ mov(r1, Operand(cp));
-        __ RecordWrite(r1, Operand(offset), r2, result_register());
+        __ RecordWriteContextSlot(cp,
+                                  offset,
+                                  result_register(),
+                                  r2,
+                                  kLRHasBeenSaved,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
       } else if (mode == Variable::CONST || mode == Variable::LET) {
         Comment cmnt(masm_, "[ Declaration");
@@ -1490,14 +1504,25 @@
     VisitForAccumulatorValue(subexpr);
 
     // Store the subexpression value in the array's elements.
-    __ ldr(r1, MemOperand(sp));  // Copy of array literal.
-    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+    __ ldr(r6, MemOperand(sp));  // Copy of array literal.
+    __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
     int offset = FixedArray::kHeaderSize + (i * kPointerSize);
     __ str(result_register(), FieldMemOperand(r1, offset));
 
+    Label no_map_change;
+    __ JumpIfSmi(result_register(), &no_map_change);
     // Update the write barrier for the array store with r0 as the scratch
     // register.
-    __ RecordWrite(r1, Operand(offset), r2, result_register());
+    __ RecordWriteField(
+        r1, offset, result_register(), r2, kLRHasBeenSaved, kDontSaveFPRegs,
+        EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+    if (FLAG_smi_only_arrays) {
+      __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+      __ CheckFastSmiOnlyElements(r3, r2, &no_map_change);
+      __ push(r6);  // Copy of array literal.
+      __ CallRuntime(Runtime::kNonSmiElementStored, 1);
+    }
+    __ bind(&no_map_change);
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1869,7 +1894,8 @@
         // RecordWrite may destroy all its register arguments.
         __ mov(r3, result_register());
         int offset = Context::SlotOffset(var->index());
-        __ RecordWrite(r1, Operand(offset), r2, r3);
+        __ RecordWriteContextSlot(
+            r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
       }
     }
 
@@ -1887,7 +1913,9 @@
       __ str(r0, location);
       if (var->IsContextSlot()) {
         __ mov(r3, r0);
-        __ RecordWrite(r1, Operand(Context::SlotOffset(var->index())), r2, r3);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(
+            r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
@@ -2662,20 +2690,24 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
   // Map is now in r0.
   __ b(lt, &null);
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ b(eq, &function);
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
-  __ b(ge, &function);
+  __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ b(eq, &function);
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
 
-  // Check if the constructor in the map is a function.
+  // Check if the constructor in the map is a JS function.
   __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
   __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
   __ b(ne, &non_function_constructor);
@@ -2853,7 +2885,9 @@
   __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
-  __ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
+  __ mov(r2, r0);
+  __ RecordWriteField(
+      r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(r0);
@@ -3141,16 +3175,31 @@
   __ str(scratch1, MemOperand(index2, 0));
   __ str(scratch2, MemOperand(index1, 0));
 
-  Label new_space;
-  __ InNewSpace(elements, scratch1, eq, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   scratch1,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   ne,
+                   &no_remembered_set);
   // Possible optimization: do a check that both values are Smis
   // (or them and test against Smi mask.)
 
-  __ mov(scratch1, elements);
-  __ RecordWriteHelper(elements, index1, scratch2);
-  __ RecordWriteHelper(scratch1, index2, scratch2);  // scratch1 holds elements.
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index1,
+                         scratch2,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index2,
+                         scratch2,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
   // We are done. Drop elements from the stack, and return undefined.
   __ Drop(3);
   __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@@ -3898,10 +3947,14 @@
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
     VisitForTypeofValue(expr);
   }
@@ -3942,9 +3995,11 @@
 
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(r0, if_false);
-    __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-    Split(ge, if_true, if_false, fall_through);
-
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
+    __ b(eq, if_true);
+    __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
+    Split(eq, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(r0, if_false);
     if (!FLAG_harmony_typeof) {
@@ -3963,18 +4018,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
-  Split(eq, if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -3982,9 +4026,12 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -3992,13 +4039,6 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
@@ -4085,8 +4125,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4094,15 +4135,21 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
+  VisitForAccumulatorValue(sub_expr);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ LoadRoot(r1, Heap::kNullValueRootIndex);
+  Heap::RootListIndex nil_value = nil == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ LoadRoot(r1, nil_value);
   __ cmp(r0, r1);
-  if (expr->is_strict()) {
+  if (expr->op() == Token::EQ_STRICT) {
     Split(eq, if_true, if_false, fall_through);
   } else {
+    Heap::RootListIndex other_nil_value = nil == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     __ b(eq, if_true);
-    __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(r1, other_nil_value);
     __ cmp(r0, r1);
     __ b(eq, if_true);
     __ JumpIfSmi(r0, if_false);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 2e49cae..879b515 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -208,7 +208,8 @@
 
   // Update the write barrier. Make sure not to clobber the value.
   __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1);
+  __ RecordWrite(
+      elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
 }
 
 
@@ -504,21 +505,22 @@
   // Get the receiver of the function from the stack.
   __ ldr(r3, MemOperand(sp, argc * kPointerSize));
 
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ Push(r3, r2);
+    // Push the receiver and the name of the function.
+    __ Push(r3, r2);
 
-  // Call the entry.
-  __ mov(r0, Operand(2));
-  __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
+    // Call the entry.
+    __ mov(r0, Operand(2));
+    __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
 
-  CEntryStub stub(1);
-  __ CallStub(&stub);
+    CEntryStub stub(1);
+    __ CallStub(&stub);
 
-  // Move result to r1 and leave the internal frame.
-  __ mov(r1, Operand(r0));
-  __ LeaveInternalFrame();
+    // Move result to r1 and leave the internal frame.
+    __ mov(r1, Operand(r0));
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -650,12 +652,13 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
-  __ EnterInternalFrame();
-  __ push(r2);  // save the key
-  __ Push(r1, r2);  // pass the receiver and the key
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(r2);  // restore the key
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(r2);  // save the key
+    __ Push(r1, r2);  // pass the receiver and the key
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(r2);  // restore the key
+  }
   __ mov(r1, r0);
   __ jmp(&do_call);
 
@@ -908,7 +911,8 @@
       GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
   __ str(r0, mapped_location);
   __ add(r6, r3, r5);
-  __ RecordWrite(r3, r6, r9);
+  __ mov(r9, r0);
+  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in r3.
@@ -916,7 +920,8 @@
       GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
   __ str(r0, unmapped_location);
   __ add(r6, r3, r4);
-  __ RecordWrite(r3, r6, r9);
+  __ mov(r9, r0);
+  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -1292,12 +1297,8 @@
   __ cmp(r4, Operand(JS_ARRAY_TYPE));
   __ b(eq, &array);
   // Check that the object is some kind of JSObject.
-  __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
+  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
   __ b(lt, &slow);
-  __ cmp(r4, Operand(JS_PROXY_TYPE));
-  __ b(eq, &slow);
-  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
-  __ b(eq, &slow);
 
   // Object case: Check key against length in the elements array.
   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -1353,17 +1354,36 @@
   // Fall through to fast case.
 
   __ bind(&fast);
-  // Fast case, store the value to the elements backing store.
-  __ add(r5, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(r5, r5, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ str(value, MemOperand(r5));
-  // Skip write barrier if the written value is a smi.
-  __ tst(value, Operand(kSmiTagMask));
-  __ Ret(eq);
-  // Update write barrier for the elements array address.
-  __ sub(r4, r5, Operand(elements));
-  __ RecordWrite(elements, Operand(r4), r5, r6);
+  Register scratch_value = r4;
+  Register address = r5;
 
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(value, MemOperand(address));
+  __ Ret();
+
+  __ bind(&non_smi_value);
+  if (FLAG_smi_only_arrays) {
+    // Escape to slow case when writing non-smi into smi-only array.
+    __ ldr(scratch_value, FieldMemOperand(receiver, HeapObject::kMapOffset));
+    __ CheckFastObjectElements(scratch_value, scratch_value, &slow);
+  }
+  // Fast elements array, store the value to the elements backing store.
+  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ str(value, MemOperand(address));
+  // Update write barrier for the elements array address.
+  __ mov(scratch_value, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements,
+                 address,
+                 scratch_value,
+                 kLRHasNotBeenSaved,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
   __ Ret();
 }
 
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 30ccd05..8495939 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -212,10 +212,11 @@
 }
 
 
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -711,7 +712,9 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  instr->set_environment(CreateEnvironment(hydrogen_env));
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
   return instr;
 }
 
@@ -994,10 +997,13 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
   int ast_id = hydrogen_env->ast_id();
   ASSERT(ast_id != AstNode::kNoNumber);
   int value_count = hydrogen_env->length();
@@ -1007,7 +1013,6 @@
                                           argument_count_,
                                           value_count,
                                           outer);
-  int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1016,7 +1021,7 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new LArgument(argument_index++);
+      op = new LArgument((*argument_index_accumulator)++);
     } else {
       op = UseAny(value);
     }
@@ -1444,9 +1449,9 @@
 }
 
 
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
+  return new LIsNilAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
@@ -1734,7 +1739,7 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
   LLoadGlobalCell* result = new LLoadGlobalCell;
-  return instr->check_hole_value()
+  return instr->RequiresHoleCheck()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1748,14 +1753,11 @@
 
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
-  if (instr->check_hole_value()) {
-    LOperand* temp = TempRegister();
-    LOperand* value = UseRegister(instr->value());
-    return AssignEnvironment(new LStoreGlobalCell(value, temp));
-  } else {
-    LOperand* value = UseRegisterAtStart(instr->value());
-    return new LStoreGlobalCell(value, NULL);
-  }
+  LOperand* temp = TempRegister();
+  LOperand* value = UseTempRegister(instr->value());
+  LInstruction* result = new LStoreGlobalCell(value, temp);
+  if (instr->RequiresHoleCheck()) result = AssignEnvironment(result);
+  return result;
 }
 
 
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 8c18760..73c7e45 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -107,7 +107,7 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNullAndBranch)                            \
+  V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
@@ -627,16 +627,17 @@
 };
 
 
-class LIsNullAndBranch: public LControlInstruction<1, 0> {
+class LIsNilAndBranch: public LControlInstruction<1, 0> {
  public:
-  explicit LIsNullAndBranch(LOperand* value) {
+  explicit LIsNilAndBranch(LOperand* value) {
     inputs_[0] = value;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
 
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -2159,7 +2160,8 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
 
   void VisitInstruction(HInstruction* current);
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index f5d7449..70ef884 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -82,6 +82,14 @@
   status_ = GENERATING;
   CpuFeatures::Scope scope1(VFP3);
   CpuFeatures::Scope scope2(ARMv7);
+
+  CodeStub::GenerateFPStubs();
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -206,13 +214,11 @@
         // Load parameter from stack.
         __ ldr(r0, MemOperand(fp, parameter_offset));
         // Store it in the context.
-        __ mov(r1, Operand(Context::SlotOffset(var->index())));
-        __ str(r0, MemOperand(cp, r1));
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use two more registers to avoid
-        // clobbering cp.
-        __ mov(r2, Operand(cp));
-        __ RecordWrite(r2, Operand(r1), r3, r0);
+        MemOperand target = ContextOperand(cp, var->index());
+        __ str(r0, target);
+        // Update the write barrier. This clobbers r3 and r0.
+        __ RecordWriteContextSlot(
+            cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
       }
     }
     Comment(";;; End allocate local context");
@@ -262,6 +268,9 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -739,7 +748,7 @@
     int deoptimization_index) {
   ASSERT(expected_safepoint_kind_ == kind);
 
-  const ZoneList<LOperand*>* operands = pointers->operands();
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deoptimization_index);
   for (int i = 0; i < operands->length(); i++) {
@@ -1032,6 +1041,7 @@
     virtual void Generate() {
       codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LDivI* instr_;
   };
@@ -1743,25 +1753,35 @@
 }
 
 
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
   Register scratch = scratch0();
   Register reg = ToRegister(instr->InputAt(0));
-
-  // TODO(fsc): If the expression is known to be a smi, then it's
-  // definitely not null. Jump to the false block.
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
+  if (instr->hydrogen()->representation().IsSpecialization() ||
+      instr->hydrogen()->type().IsSmi()) {
+    EmitGoto(false_block);
+    return;
+  }
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ LoadRoot(ip, nil_value);
   __ cmp(reg, ip);
-  if (instr->is_strict()) {
+  if (instr->kind() == kStrictEquality) {
     EmitBranch(true_block, false_block, eq);
   } else {
+    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ b(eq, true_label);
-    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(ip, other_nil_value);
     __ cmp(reg, ip);
     __ b(eq, true_label);
     __ JumpIfSmi(reg, false_label);
@@ -1918,28 +1938,36 @@
   ASSERT(!input.is(temp));
   ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
   __ JumpIfSmi(input, is_false);
-  __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
-  __ b(lt, is_false);
 
-  // Map is now in temp.
-  // Functions have class 'Function'.
-  __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ b(ge, is_true);
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
+    __ b(lt, is_false);
+    __ b(eq, is_true);
+    __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+    __ b(eq, is_true);
   } else {
-    __ b(ge, is_false);
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+    __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                          FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ b(gt, is_false);
   }
 
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
   // Objects with a non-function constructor have class 'Object'.
   __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -2016,9 +2044,8 @@
     virtual void Generate() {
       codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
     }
-
+    virtual LInstruction* instr() { return instr_; }
     Label* map_check() { return &map_check_; }
-
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -2180,7 +2207,7 @@
   Register result = ToRegister(instr->result());
   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
   __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
-  if (instr->hydrogen()->check_hole_value()) {
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
     __ cmp(result, ip);
     DeoptimizeIf(eq, instr->environment());
@@ -2203,6 +2230,7 @@
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Register scratch = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
 
   // Load the cell.
   __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
@@ -2211,8 +2239,7 @@
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted.
-  if (instr->hydrogen()->check_hole_value()) {
-    Register scratch2 = ToRegister(instr->TempAt(0));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ ldr(scratch2,
            FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -2222,6 +2249,15 @@
 
   // Store the value.
   __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+
+  // Cells are always in the remembered set.
+  __ RecordWriteField(scratch,
+                      JSGlobalPropertyCell::kValueOffset,
+                      value,
+                      scratch2,
+                      kLRHasBeenSaved,
+                      kSaveFPRegs,
+                      OMIT_REMEMBERED_SET);
 }
 
 
@@ -2247,10 +2283,15 @@
 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   Register context = ToRegister(instr->context());
   Register value = ToRegister(instr->value());
-  __ str(value, ContextOperand(context, instr->slot_index()));
+  MemOperand target = ContextOperand(context, instr->slot_index());
+  __ str(value, target);
   if (instr->needs_write_barrier()) {
-    int offset = Context::SlotOffset(instr->slot_index());
-    __ RecordWrite(context, Operand(offset), value, scratch0());
+    __ RecordWriteContextSlot(context,
+                              target.offset(),
+                              value,
+                              scratch0(),
+                              kLRHasBeenSaved,
+                              kSaveFPRegs);
   }
 }
 
@@ -2500,13 +2541,9 @@
            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
   }
 
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    // TODO(danno): If no hole check is required, there is no need to allocate
-    // elements into a temporary register, instead scratch can be used.
-    __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
-    __ cmp(scratch, Operand(kHoleNanUpper32));
-    DeoptimizeIf(eq, instr->environment());
-  }
+  __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+  __ cmp(scratch, Operand(kHoleNanUpper32));
+  DeoptimizeIf(eq, instr->environment());
 
   __ vldr(result, elements, 0);
 }
@@ -2577,6 +2614,7 @@
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
@@ -2906,6 +2944,7 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -3202,7 +3241,7 @@
   ASSERT(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ Drop(1);
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3262,7 +3301,8 @@
     __ str(value, FieldMemOperand(object, offset));
     if (instr->needs_write_barrier()) {
       // Update the write barrier for the object for in-object properties.
-      __ RecordWrite(object, Operand(offset), value, scratch);
+      __ RecordWriteField(
+          object, offset, value, scratch, kLRHasBeenSaved, kSaveFPRegs);
     }
   } else {
     __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
@@ -3270,7 +3310,8 @@
     if (instr->needs_write_barrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWrite(scratch, Operand(offset), value, object);
+      __ RecordWriteField(
+          scratch, offset, value, object, kLRHasBeenSaved, kSaveFPRegs);
     }
   }
 }
@@ -3301,6 +3342,13 @@
   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
   Register scratch = scratch0();
 
+  // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  // conversion, so it deopts in that case.
+  if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+    __ tst(value, Operand(kSmiTagMask));
+    DeoptimizeIf(ne, instr->environment());
+  }
+
   // Do the store.
   if (instr->key()->IsConstantOperand()) {
     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3315,8 +3363,8 @@
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
     // Compute address of modified element and store it into key register.
-    __ add(key, scratch, Operand(FixedArray::kHeaderSize));
-    __ RecordWrite(elements, key, value);
+    __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    __ RecordWrite(elements, key, value, kLRHasBeenSaved, kSaveFPRegs);
   }
 }
 
@@ -3417,6 +3465,7 @@
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
@@ -3452,6 +3501,7 @@
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
@@ -3575,6 +3625,7 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3646,6 +3697,7 @@
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagI* instr_;
   };
@@ -3711,6 +3763,7 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3819,16 +3872,6 @@
 }
 
 
-class DeferredTaggedToI: public LDeferredCode {
- public:
-  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-      : LDeferredCode(codegen), instr_(instr) { }
-  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
-  LTaggedToI* instr_;
-};
-
-
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Register input_reg = ToRegister(instr->InputAt(0));
   Register scratch1 = scratch0();
@@ -3911,6 +3954,16 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -4343,10 +4396,12 @@
     final_branch_condition = ne;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CompareObjectType(input, input, scratch,
-                         FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-    final_branch_condition = ge;
+    __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
+    __ b(eq, true_label);
+    __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
+    final_branch_condition = eq;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -4468,6 +4523,7 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index ead8489..711e459 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -376,16 +376,20 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen), external_exit_(NULL) {
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
 
   void SetExit(Label *exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -396,6 +400,7 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index f37f310..316c889 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -42,7 +42,8 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true) {
+      allow_stub_calls_(true),
+      has_frame_(false) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -406,32 +407,6 @@
 }
 
 
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register address,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, ne, &not_in_new_space);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
-  }
-
-  // Calculate page address.
-  Bfc(object, 0, kPageSizeBits);
-
-  // Calculate region number.
-  Ubfx(address, address, Page::kRegionSizeLog2,
-       kPageSizeBits - Page::kRegionSizeLog2);
-
-  // Mark region dirty.
-  ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-  mov(ip, Operand(1));
-  orr(scratch, scratch, Operand(ip, LSL, address));
-  str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
-}
-
-
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
                                 Condition cond,
@@ -443,38 +418,52 @@
 }
 
 
-// Will clobber 4 registers: object, offset, scratch, ip.  The
-// register 'object' contains a heap object pointer.  The heap object
-// tag is shifted away.
-void MacroAssembler::RecordWrite(Register object,
-                                 Operand offset,
-                                 Register scratch0,
-                                 Register scratch1) {
-  // The compiled code assumes that record write doesn't change the
-  // context register, so we check that none of the clobbered
-  // registers are cp.
-  ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
-
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    LinkRegisterStatus lr_status,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
   Label done;
 
-  // First, test that the object is not in the new space.  We cannot set
-  // region marks for new space pages.
-  InNewSpace(object, scratch0, eq, &done);
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
 
-  // Add offset into the object.
-  add(scratch0, object, offset);
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
 
-  // Record the actual write.
-  RecordWriteHelper(object, scratch0, scratch1);
+  add(dst, object, Operand(offset - kHeapObjectTag));
+  if (emit_debug_code()) {
+    Label ok;
+    tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+    b(eq, &ok);
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  RecordWrite(object,
+              dst,
+              value,
+              lr_status,
+              save_fp,
+              remembered_set_action,
+              OMIT_SMI_CHECK);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Operand(BitCast<int32_t>(kZapValue)));
-    mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
-    mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+    mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
+    mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
   }
 }
 
@@ -484,29 +473,94 @@
 // tag is shifted away.
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register scratch) {
+                                 Register value,
+                                 LinkRegisterStatus lr_status,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are cp.
-  ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+  ASSERT(!address.is(cp) && !value.is(cp));
 
   Label done;
 
-  // First, test that the object is not in the new space.  We cannot set
-  // region marks for new space pages.
-  InNewSpace(object, scratch, eq, &done);
+  if (smi_check == INLINE_SMI_CHECK) {
+    ASSERT_EQ(0, kSmiTag);
+    tst(value, Operand(kSmiTagMask));
+    b(eq, &done);
+  }
+
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                eq,
+                &done);
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                eq,
+                &done);
 
   // Record the actual write.
-  RecordWriteHelper(object, address, scratch);
+  if (lr_status == kLRHasNotBeenSaved) {
+    push(lr);
+  }
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
+  if (lr_status == kLRHasNotBeenSaved) {
+    pop(lr);
+  }
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Operand(BitCast<int32_t>(kZapValue)));
-    mov(address, Operand(BitCast<int32_t>(kZapValue)));
-    mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
+    mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
+    mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+  }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register address,
+                                         Register scratch,
+                                         SaveFPRegsMode fp_mode,
+                                         RememberedSetFinalAction and_then) {
+  Label done;
+  if (FLAG_debug_code) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok);
+    stop("Remembered set pointer is in new space");
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  mov(ip, Operand(store_buffer));
+  ldr(scratch, MemOperand(ip));
+  // Store pointer to buffer and increment buffer top.
+  str(address, MemOperand(scratch, kPointerSize, PostIndex));
+  // Write back new top of buffer.
+  str(scratch, MemOperand(ip));
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kFallThroughAtEnd) {
+    b(eq, &done);
+  } else {
+    ASSERT(and_then == kReturnAtEnd);
+    Ret(ne);
+  }
+  push(lr);
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(fp_mode);
+  CallStub(&store_buffer_overflow);
+  pop(lr);
+  bind(&done);
+  if (and_then == kReturnAtEnd) {
+    Ret();
   }
 }
 
@@ -961,6 +1015,9 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
 
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
@@ -988,6 +1045,9 @@
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
 
   InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -1011,6 +1071,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   // Contract with called JS functions requires that function is passed in r1.
   ASSERT(fun.is(r1));
 
@@ -1035,6 +1098,9 @@
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   ASSERT(function->is_compiled());
 
   // Get the function and setup the context.
@@ -1090,10 +1156,10 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
-  ASSERT(allow_stub_calls());
   mov(r0, Operand(0, RelocInfo::NONE));
   mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
   CEntryStub ces(1);
+  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 #endif
@@ -1793,13 +1859,37 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Register scratch,
                                        Label* fail) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
   b(hi, fail);
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Register scratch,
+                                             Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  b(ls, fail);
+  cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+  b(hi, fail);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Register scratch,
+                                              Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  cmp(scratch, Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  b(hi, fail);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Register scratch,
                               Handle<Map> map,
@@ -1895,13 +1985,13 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
 }
 
 
 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   Object* result;
   { MaybeObject* maybe_result = stub->TryGetCode();
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -1913,13 +2003,12 @@
 
 
 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
 }
 
 
 MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Object* result;
   { MaybeObject* maybe_result = stub->TryGetCode();
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -2022,6 +2111,12 @@
 }
 
 
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
     add(sp, sp, Operand(num_arguments * kPointerSize));
@@ -2417,8 +2512,7 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   mov(r0, Operand(function->nargs));
   mov(r1, Operand(ExternalReference(function, isolate())));
-  CEntryStub stub(1);
-  stub.SaveDoubles();
+  CEntryStub stub(1, kSaveFPRegs);
   CallStub(&stub);
 }
 
@@ -2491,6 +2585,9 @@
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   GetBuiltinEntry(r2, id);
   if (flag == CALL_FUNCTION) {
     call_wrapper.BeforeCall(CallSize(r2));
@@ -2622,14 +2719,20 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
 
   mov(r0, Operand(p0));
   push(r0);
   mov(r0, Operand(Smi::FromInt(p1 - p0)));
   push(r0);
-  CallRuntime(Runtime::kAbort, 2);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
   // will not return here
   if (is_const_pool_blocked()) {
     // If the calling code cares about the exact number of
@@ -2930,6 +3033,19 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  b(&entry);
+  bind(&loop);
+  str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
+  bind(&entry);
+  cmp(start_offset, end_offset);
+  b(lt, &loop);
+}
+
+
 void MacroAssembler::CountLeadingZeros(Register zeros,   // Answer.
                                        Register source,  // Input.
                                        Register scratch) {
@@ -3089,23 +3205,15 @@
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_reg_arguments,
                                    int num_double_arguments) {
-  CallCFunctionHelper(no_reg,
-                      function,
-                      ip,
-                      num_reg_arguments,
-                      num_double_arguments);
+  mov(ip, Operand(function));
+  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
-                                     Register scratch,
-                                     int num_reg_arguments,
-                                     int num_double_arguments) {
-  CallCFunctionHelper(function,
-                      ExternalReference::the_hole_value_location(isolate()),
-                      scratch,
-                      num_reg_arguments,
-                      num_double_arguments);
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
 }
 
 
@@ -3116,17 +3224,15 @@
 
 
 void MacroAssembler::CallCFunction(Register function,
-                                   Register scratch,
                                    int num_arguments) {
-  CallCFunction(function, scratch, num_arguments, 0);
+  CallCFunction(function, num_arguments, 0);
 }
 
 
 void MacroAssembler::CallCFunctionHelper(Register function,
-                                         ExternalReference function_reference,
-                                         Register scratch,
                                          int num_reg_arguments,
                                          int num_double_arguments) {
+  ASSERT(has_frame());
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -3150,10 +3256,6 @@
   // Just call directly. The function called cannot cause a GC, or
   // allow preemption, so the return address in the link register
   // stays correct.
-  if (function.is(no_reg)) {
-    mov(scratch, Operand(function_reference));
-    function = scratch;
-  }
   Call(function);
   int stack_passed_arguments = CalculateStackPassedWords(
       num_reg_arguments, num_double_arguments);
@@ -3185,6 +3287,185 @@
 }
 
 
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met) {
+  and_(scratch, object, Operand(~Page::kPageAlignmentMask));
+  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+  tst(scratch, Operand(mask));
+  b(cc, condition_met);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register scratch0,
+                                 Register scratch1,
+                                 Label* on_black) {
+  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+                              Register bitmap_scratch,
+                              Register mask_scratch,
+                              Label* has_color,
+                              int first_bit,
+                              int second_bit) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color, word_boundary;
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  tst(ip, Operand(mask_scratch));
+  b(first_bit == 1 ? eq : ne, &other_color);
+  // Shift left 1 by adding.
+  add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
+  b(eq, &word_boundary);
+  tst(ip, Operand(mask_scratch));
+  b(second_bit == 1 ? ne : eq, has_color);
+  jmp(&other_color);
+
+  bind(&word_boundary);
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
+  tst(ip, Operand(1));
+  b(second_bit == 1 ? ne : eq, has_color);
+  bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects.  This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+                                      Register scratch,
+                                      Label* not_data_object) {
+  Label is_data_object;
+  ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+  b(eq, &is_data_object);
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  b(ne, not_data_object);
+  bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+  and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+  Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+  Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+  add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
+  mov(ip, Operand(1));
+  mov(mask_reg, Operand(ip, LSL, mask_reg));
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Register load_scratch,
+    Label* value_is_white_and_not_data) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  tst(mask_scratch, load_scratch);
+  b(ne, &done);
+
+  if (FLAG_debug_code) {
+    // Check for impossible bit pattern.
+    Label ok;
+    // LSL may overflow, making the check conservative.
+    tst(load_scratch, Operand(mask_scratch, LSL, 1));
+    b(eq, &ok);
+    stop("Impossible marking bit pattern");
+    bind(&ok);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = load_scratch;  // Holds map while checking type.
+  Register length = load_scratch;  // Holds length of object after testing type.
+  Label is_data_object;
+
+  // Check for heap-number
+  ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+  mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
+  b(eq, &is_data_object);
+
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = load_scratch;
+  ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  b(ne, value_is_white_and_not_data);
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  tst(instance_type, Operand(kExternalStringTag));
+  mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
+  b(ne, &is_data_object);
+
+  // Sequential string, either ASCII or UC16.
+  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+  // getting the length multiplied by 2.
+  ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
+  tst(instance_type, Operand(kStringEncodingMask));
+  mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
+  add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+  and_(length, length, Operand(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  orr(ip, ip, Operand(mask_scratch));
+  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+  and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+  add(ip, ip, Operand(length));
+  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+  bind(&done);
+}
+
+
 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
   Usat(output_reg, 8, Operand(input_reg));
 }
@@ -3234,6 +3515,17 @@
 }
 
 
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 6084fde..4027f26 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -29,6 +29,7 @@
 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
 
 #include "assembler.h"
+#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -79,6 +80,14 @@
 };
 
 
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -157,40 +166,126 @@
                  Heap::RootListIndex index,
                  Condition cond = al);
 
+  // ---------------------------------------------------------------------------
+  // GC Support
 
-  // Check if object is in new space.
-  // scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cond,  // eq for new space, ne otherwise
-                  Label* branch);
+  void IncrementalMarkingRecordWriteHelper(Register object,
+                                           Register value,
+                                           Register address);
 
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
 
-  // For the page containing |object| mark the region covering [address]
-  // dirty. The object address must be in the first 8K of an allocated page.
-  void RecordWriteHelper(Register object,
-                         Register address,
-                         Register scratch);
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
 
-  // For the page containing |object| mark the region covering
-  // [object+offset] dirty. The object address must be in the first 8K
-  // of an allocated page.  The 'scratch' registers are used in the
-  // implementation and all 3 registers are clobbered by the
-  // operation, as well as the ip register. RecordWrite updates the
-  // write barrier even when storing smis.
-  void RecordWrite(Register object,
-                   Operand offset,
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch) {
+    InNewSpace(object, scratch, ne, branch);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch) {
+    InNewSpace(object, scratch, eq, branch);
+  }
+
+  // Check if an object has a given incremental marking color.
+  void HasColor(Register object,
+                Register scratch0,
+                Register scratch1,
+                Label* has_color,
+                int first_bit,
+                int second_bit);
+
+  void JumpIfBlack(Register object,
                    Register scratch0,
-                   Register scratch1);
+                   Register scratch1,
+                   Label* on_black);
 
-  // For the page containing |object| mark the region covering
-  // [address] dirty. The object address must be in the first 8K of an
-  // allocated page.  All 3 registers are clobbered by the operation,
-  // as well as the ip register. RecordWrite updates the write barrier
-  // even when storing smis.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register scratch);
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Register scratch3,
+                      Label* object_is_white_and_not_data);
+
+  // Detects conservatively whether an object is data-only, ie it does need to
+  // be scanned by the garbage collector.
+  void JumpIfDataObject(Register value,
+                        Register scratch,
+                        Label* not_data_object);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      LinkRegisterStatus lr_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // MemOperand(reg, off).
+  inline void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      LinkRegisterStatus lr_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     lr_status,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // For a given |object| notify the garbage collector that the slot |address|
+  // has been written.  |value| is the object being stored. The value and
+  // address registers are clobbered by the operation.
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      LinkRegisterStatus lr_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
   // Push a handle.
   void Push(Handle<Object> handle);
@@ -318,16 +413,6 @@
             const double imm,
             const Condition cond = al);
 
-
-  // ---------------------------------------------------------------------------
-  // Activation frames
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter exit frame.
   // stack_space - extra stack space, used for alignment before call to C.
   void EnterExitFrame(bool save_doubles, int stack_space = 0);
@@ -569,6 +654,13 @@
                  Register length,
                  Register scratch);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
@@ -608,6 +700,18 @@
                          Register scratch,
                          Label* fail);
 
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Register scratch,
+                               Label* fail);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Register scratch,
+                                Label* fail);
+
   // Check if the map of an object is equal to a specified map (either
   // given directly or as an index into the root list) and branch to
   // label if not. Skip the smi check if not required (object is known
@@ -830,11 +934,11 @@
   // return address (unless this is somehow accounted for by the called
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
-  void CallCFunction(Register function, Register scratch, int num_arguments);
+  void CallCFunction(Register function, int num_arguments);
   void CallCFunction(ExternalReference function,
                      int num_reg_arguments,
                      int num_double_arguments);
-  void CallCFunction(Register function, Register scratch,
+  void CallCFunction(Register function,
                      int num_reg_arguments,
                      int num_double_arguments);
 
@@ -902,6 +1006,9 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   // EABI variant for double arguments in use.
   bool use_eabi_hardfloat() {
@@ -1048,10 +1155,12 @@
 
   void LoadInstanceDescriptors(Register map, Register descriptors);
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
  private:
   void CallCFunctionHelper(Register function,
-                           ExternalReference function_reference,
-                           Register scratch,
                            int num_reg_arguments,
                            int num_double_arguments);
 
@@ -1067,16 +1176,25 @@
                       const CallWrapper& call_wrapper,
                       CallKind call_kind);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void InitializeNewString(Register string,
                            Register length,
                            Heap::RootListIndex map_index,
                            Register scratch1,
                            Register scratch2);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cond,  // eq for new space, ne otherwise.
+                  Label* branch);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Leaves addr_reg unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
+
   // Compute memory operands for safepoint stack slots.
   static int SafepointRegisterStackIndex(int reg_code);
   MemOperand SafepointRegisterSlot(Register reg);
@@ -1084,6 +1202,7 @@
 
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index cd76edb..c876467 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -371,9 +371,12 @@
     // Isolate.
     __ mov(r3, Operand(ExternalReference::isolate_address()));
 
-    ExternalReference function =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-    __ CallCFunction(function, argument_count);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference function =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+      __ CallCFunction(function, argument_count);
+    }
 
     // Check if function returned non-zero for success or zero for failure.
     __ cmp(r0, Operand(0, RelocInfo::NONE));
@@ -611,6 +614,12 @@
 
   // Entry code:
   __ bind(&entry_label_);
+
+  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
+  // is generated.
+  FrameScope scope(masm_, StackFrame::MANUAL);
+
+  // Actually emit code to start a new stack frame.
   // Push arguments
   // Save callee-save registers.
   // Start new stack frame.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 6af5355..5704202 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1618,6 +1618,8 @@
   ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
 
   intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+  // Catch null pointers a little earlier.
+  ASSERT(start_address > 8191 || start_address < 0);
   int reg = 0;
   while (rlist != 0) {
     if ((rlist & 1) != 0) {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index f856592..6c55366 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -431,7 +431,13 @@
 
     // Update the write barrier for the array address.
     // Pass the now unused name_reg as a scratch register.
-    __ RecordWrite(receiver_reg, Operand(offset), name_reg, scratch);
+    __ mov(name_reg, r0);
+    __ RecordWriteField(receiver_reg,
+                        offset,
+                        name_reg,
+                        scratch,
+                        kLRHasNotBeenSaved,
+                        kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -444,7 +450,13 @@
 
     // Update the write barrier for the array address.
     // Ok to clobber receiver_reg and name_reg, since we return.
-    __ RecordWrite(scratch, Operand(offset), name_reg, receiver_reg);
+    __ mov(name_reg, r0);
+    __ RecordWriteField(scratch,
+                        offset,
+                        name_reg,
+                        receiver_reg,
+                        kLRHasNotBeenSaved,
+                        kDontSaveFPRegs);
   }
 
   // Return the value (register r0).
@@ -553,9 +565,10 @@
 }
 
 
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
-                                      const CallOptimization& optimization,
-                                      int argc) {
+static MaybeObject* GenerateFastApiDirectCall(
+    MacroAssembler* masm,
+    const CallOptimization& optimization,
+    int argc) {
   // ----------- S t a t e -------------
   //  -- sp[0]              : holder (set by CheckPrototypes)
   //  -- sp[4]              : callee js function
@@ -591,6 +604,8 @@
   ApiFunction fun(api_function_address);
 
   const int kApiStackSpace = 4;
+
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
   // r0 = v8::Arguments&
@@ -616,9 +631,11 @@
   ExternalReference ref = ExternalReference(&fun,
                                             ExternalReference::DIRECT_API_CALL,
                                             masm->isolate());
+  AllowExternalCallThatCantCauseGC scope(masm);
   return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
+
 class CallInterceptorCompiler BASE_EMBEDDED {
  public:
   CallInterceptorCompiler(StubCompiler* stub_compiler,
@@ -794,7 +811,7 @@
                                         miss_label);
 
     // Call a runtime function to load the interceptor property.
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
@@ -811,7 +828,8 @@
 
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
@@ -820,18 +838,19 @@
                            JSObject* holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
-    __ Push(holder, name_);
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(holder, name_);
 
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
 
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+    }
 
     // If interceptor returns no-result sentinel, call the constant function.
     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
@@ -1228,7 +1247,10 @@
   ApiFunction fun(getter_address);
 
   const int kApiStackSpace = 1;
+
+  FrameScope frame_scope(masm(), StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
+
   // Create AccessorInfo instance on the stack above the exit frame with
   // scratch2 (internal::Object **args_) as the data.
   __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
@@ -1288,42 +1310,44 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ Push(receiver, holder_reg, name_reg);
-    } else {
-      __ Push(holder_reg, name_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ Push(receiver, holder_reg, name_reg);
+      } else {
+        __ Push(holder_reg, name_reg);
+      }
+
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method.)
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+      __ cmp(r0, scratch1);
+      __ b(eq, &interceptor_failed);
+      frame_scope.GenerateLeaveFrame();
+      __ Ret();
+
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+
+      // Leave the internal frame.
     }
 
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method.)
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ cmp(r0, scratch1);
-    __ b(eq, &interceptor_failed);
-    __ LeaveInternalFrame();
-    __ Ret();
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
-
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
     if (interceptor_holder != lookup->holder()) {
@@ -1556,7 +1580,7 @@
                 DONT_DO_SMI_CHECK);
 
     if (argc == 1) {  // Otherwise fall through to call the builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements;
 
       // Get the array's length into r0 and calculate new length.
       __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
@@ -1571,11 +1595,15 @@
       __ cmp(r0, r4);
       __ b(gt, &attempt_to_grow_elements);
 
+      // Check if value is a smi.
+      Label with_write_barrier;
+      __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+      __ JumpIfNotSmi(r4, &with_write_barrier);
+
       // Save new length.
       __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
 
       // Push the element.
-      __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
       // We may need a register containing the address end_elements below,
       // so write back the value in end_elements.
       __ add(end_elements, elements,
@@ -1585,14 +1613,33 @@
       __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
 
       // Check for a smi.
-      __ JumpIfNotSmi(r4, &with_write_barrier);
-      __ bind(&exit);
       __ Drop(argc + 1);
       __ Ret();
 
       __ bind(&with_write_barrier);
-      __ InNewSpace(elements, r4, eq, &exit);
-      __ RecordWriteHelper(elements, end_elements, r4);
+
+      if (FLAG_smi_only_arrays) {
+        __ ldr(r6, FieldMemOperand(receiver, HeapObject::kMapOffset));
+        __ CheckFastSmiOnlyElements(r6, r6, &call_builtin);
+      }
+
+      // Save new length.
+      __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+      // Push the element.
+      // We may need a register containing the address end_elements below,
+      // so write back the value in end_elements.
+      __ add(end_elements, elements,
+             Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+      __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
+
+      __ RecordWrite(elements,
+                     end_elements,
+                     r4,
+                     kLRHasNotBeenSaved,
+                     kDontSaveFPRegs,
+                     EMIT_REMEMBERED_SET,
+                     OMIT_SMI_CHECK);
       __ Drop(argc + 1);
       __ Ret();
 
@@ -1604,6 +1651,17 @@
         __ b(&call_builtin);
       }
 
+      __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
+      if (FLAG_smi_only_arrays) {
+        // Growing elements that are SMI-only requires special handling in case
+        // the new element is non-Smi. For now, delegate to the builtin.
+        Label no_fast_elements_check;
+        __ JumpIfSmi(r2, &no_fast_elements_check);
+        __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
+        __ CheckFastObjectElements(r7, r7, &call_builtin);
+        __ bind(&no_fast_elements_check);
+      }
+
       Isolate* isolate = masm()->isolate();
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate);
@@ -1630,8 +1688,7 @@
       // Update new_space_allocation_top.
       __ str(r6, MemOperand(r7));
       // Push the argument.
-      __ ldr(r6, MemOperand(sp, (argc - 1) * kPointerSize));
-      __ str(r6, MemOperand(end_elements));
+      __ str(r2, MemOperand(end_elements));
       // Fill the rest with holes.
       __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
@@ -2713,6 +2770,15 @@
   // Store the value in the cell.
   __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
 
+  __ mov(r1, r0);
+  __ RecordWriteField(r4,
+                      JSGlobalPropertyCell::kValueOffset,
+                      r1,
+                      r2,
+                      kLRHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET);
+
   Counters* counters = masm()->isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
   __ Ret();
@@ -3454,6 +3520,7 @@
     case EXTERNAL_FLOAT_ELEMENTS:
     case EXTERNAL_DOUBLE_ELEMENTS:
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3540,6 +3607,7 @@
       }
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3880,6 +3948,7 @@
       }
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3943,6 +4012,7 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
+          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4082,6 +4152,7 @@
           case EXTERNAL_FLOAT_ELEMENTS:
           case EXTERNAL_DOUBLE_ELEMENTS:
           case FAST_ELEMENTS:
+          case FAST_SMI_ONLY_ELEMENTS:
           case FAST_DOUBLE_ELEMENTS:
           case DICTIONARY_ELEMENTS:
           case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -4234,8 +4305,10 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind) {
   // ----------- S t a t e -------------
   //  -- r0    : value
   //  -- r1    : key
@@ -4277,15 +4350,33 @@
   __ cmp(key_reg, scratch);
   __ b(hs, &miss_force_generic);
 
-  __ add(scratch,
-         elements_reg, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ str(value_reg,
-         MemOperand(scratch, key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
-  __ RecordWrite(scratch,
-                 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize),
-                 receiver_reg , elements_reg);
-
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(value_reg, &miss_force_generic);
+    __ add(scratch,
+           elements_reg,
+           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+    __ add(scratch,
+           scratch,
+           Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+    __ str(value_reg, MemOperand(scratch));
+  } else {
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    __ add(scratch,
+           elements_reg,
+           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+    __ add(scratch,
+           scratch,
+           Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize));
+    __ str(value_reg, MemOperand(scratch));
+    __ mov(receiver_reg, value_reg);
+    __ RecordWrite(elements_reg,  // Object.
+                   scratch,       // Address.
+                   receiver_reg,  // Value.
+                   kLRHasNotBeenSaved,
+                   kDontSaveFPRegs);
+  }
   // value_reg (r0) is preserved.
   // Done.
   __ Ret();
diff --git a/src/array.js b/src/array.js
index 4dd23c8..397adc7 100644
--- a/src/array.js
+++ b/src/array.js
@@ -201,17 +201,14 @@
 
 
 function ConvertToLocaleString(e) {
-  if (e == null) {
+  if (IS_NULL_OR_UNDEFINED(e)) {
     return '';
   } else {
-    // e_obj's toLocaleString might be overwritten, check if it is a function.
-    // Call ToString if toLocaleString is not a function.
-    // See issue 877615.
+    // According to ES5, seciton 15.4.4.3, the toLocaleString conversion
+    // must throw a TypeError if ToObject(e).toLocaleString isn't
+    // callable.
     var e_obj = ToObject(e);
-    if (IS_SPEC_FUNCTION(e_obj.toLocaleString))
-      return ToString(e_obj.toLocaleString());
-    else
-      return ToString(e);
+    return %ToString(e_obj.toLocaleString());
   }
 }
 
@@ -381,18 +378,31 @@
 
 
 function ArrayToString() {
-  if (!IS_ARRAY(this)) {
-    throw new $TypeError('Array.prototype.toString is not generic');
+  var array;
+  var func;
+  if (IS_ARRAY(this)) {
+    func = this.join;
+    if (func === ArrayJoin) {
+      return Join(this, this.length, ',', ConvertToString);
+    }
+    array = this;
+  } else {
+    array = ToObject(this);
+    func = array.join;
   }
-  return Join(this, this.length, ',', ConvertToString);
+  if (!IS_SPEC_FUNCTION(func)) {
+    return %_CallFunction(array, ObjectToString);
+  }
+  return %_CallFunction(array, func);
 }
 
 
 function ArrayToLocaleString() {
-  if (!IS_ARRAY(this)) {
-    throw new $TypeError('Array.prototype.toString is not generic');
-  }
-  return Join(this, this.length, ',', ConvertToLocaleString);
+  var array = ToObject(this);
+  var arrayLen = array.length;
+  var len = TO_UINT32(arrayLen);
+  if (len === 0) return "";
+  return Join(array, len, ',', ConvertToLocaleString);
 }
 
 
diff --git a/src/assembler.cc b/src/assembler.cc
index ad5f350..bda85e6 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -38,6 +38,7 @@
 #include "deoptimizer.h"
 #include "execution.h"
 #include "ic-inl.h"
+#include "incremental-marking.h"
 #include "factory.h"
 #include "runtime.h"
 #include "runtime-profiler.h"
@@ -47,6 +48,7 @@
 #include "ast.h"
 #include "regexp-macro-assembler.h"
 #include "platform.h"
+#include "store-buffer.h"
 // Include native regexp-macro-assembler.
 #ifndef V8_INTERPRETED_REGEXP
 #if V8_TARGET_ARCH_IA32
@@ -516,6 +518,7 @@
 
 
 RelocIterator::RelocIterator(Code* code, int mode_mask) {
+  rinfo_.host_ = code;
   rinfo_.pc_ = code->instruction_start();
   rinfo_.data_ = 0;
   // Relocation info is read backwards.
@@ -736,9 +739,38 @@
   : address_(table_ref.address()) {}
 
 
+ExternalReference ExternalReference::
+    incremental_marking_record_write_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate,
+      FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
+}
+
+
+ExternalReference ExternalReference::
+    incremental_evacuation_record_write_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate,
+      FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode)));
+}
+
+
+ExternalReference ExternalReference::
+    store_buffer_overflow_function(Isolate* isolate) {
+  return ExternalReference(Redirect(
+      isolate,
+      FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
+}
+
+
+ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
+}
+
+
 ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
-  return ExternalReference(Redirect(isolate,
-                                    FUNCTION_ADDR(Runtime::PerformGC)));
+  return
+      ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC)));
 }
 
 
@@ -802,17 +834,6 @@
 }
 
 
-ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
-  return ExternalReference(isolate->factory()->the_hole_value().location());
-}
-
-
-ExternalReference ExternalReference::arguments_marker_location(
-    Isolate* isolate) {
-  return ExternalReference(isolate->factory()->arguments_marker().location());
-}
-
-
 ExternalReference ExternalReference::roots_address(Isolate* isolate) {
   return ExternalReference(isolate->heap()->roots_address());
 }
@@ -840,9 +861,14 @@
 }
 
 
+ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
+  return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
+}
+
+
 ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
-  Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
-  return ExternalReference(mask);
+  return ExternalReference(reinterpret_cast<Address>(
+      isolate->heap()->NewSpaceMask()));
 }
 
 
diff --git a/src/assembler.h b/src/assembler.h
index d58034d..01c3a70 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -143,6 +143,9 @@
 };
 
 
+enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
+
+
 // -----------------------------------------------------------------------------
 // Relocation information
 
@@ -216,8 +219,9 @@
 
 
   RelocInfo() {}
-  RelocInfo(byte* pc, Mode rmode, intptr_t data)
-      : pc_(pc), rmode_(rmode), data_(data) {
+
+  RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
+      : pc_(pc), rmode_(rmode), data_(data), host_(host) {
   }
 
   static inline bool IsConstructCall(Mode mode) {
@@ -258,6 +262,7 @@
   void set_pc(byte* pc) { pc_ = pc; }
   Mode rmode() const {  return rmode_; }
   intptr_t data() const { return data_; }
+  Code* host() const { return host_; }
 
   // Apply a relocation by delta bytes
   INLINE(void apply(intptr_t delta));
@@ -353,6 +358,7 @@
   byte* pc_;
   Mode rmode_;
   intptr_t data_;
+  Code* host_;
 #ifdef V8_TARGET_ARCH_MIPS
   // Code and Embedded Object pointers in mips are stored split
   // across two consecutive 32-bit instructions. Heap management
@@ -561,6 +567,13 @@
   // pattern. This means that they have to be added to the
   // ExternalReferenceTable in serialize.cc manually.
 
+  static ExternalReference incremental_marking_record_write_function(
+      Isolate* isolate);
+  static ExternalReference incremental_evacuation_record_write_function(
+      Isolate* isolate);
+  static ExternalReference store_buffer_overflow_function(
+      Isolate* isolate);
+  static ExternalReference flush_icache_function(Isolate* isolate);
   static ExternalReference perform_gc_function(Isolate* isolate);
   static ExternalReference fill_heap_number_with_random_function(
       Isolate* isolate);
@@ -577,12 +590,6 @@
   static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
   static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
 
-  // Static variable Factory::the_hole_value.location()
-  static ExternalReference the_hole_value_location(Isolate* isolate);
-
-  // Static variable Factory::arguments_marker.location()
-  static ExternalReference arguments_marker_location(Isolate* isolate);
-
   // Static variable Heap::roots_address()
   static ExternalReference roots_address(Isolate* isolate);
 
@@ -606,6 +613,10 @@
   static ExternalReference new_space_start(Isolate* isolate);
   static ExternalReference new_space_mask(Isolate* isolate);
   static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
+  static ExternalReference new_space_mark_bits(Isolate* isolate);
+
+  // Write barrier.
+  static ExternalReference store_buffer_top(Isolate* isolate);
 
   // Used for fast allocation in generated code.
   static ExternalReference new_space_allocation_top_address(Isolate* isolate);
diff --git a/src/ast.cc b/src/ast.cc
index 418cc43..d493814 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -327,59 +327,80 @@
 }
 
 
+static bool IsTypeof(Expression* expr) {
+  UnaryOperation* maybe_unary = expr->AsUnaryOperation();
+  return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
+}
+
+
+// Check for the pattern: typeof <expression> equals <string literal>.
+static bool MatchLiteralCompareTypeof(Expression* left,
+                                      Token::Value op,
+                                      Expression* right,
+                                      Expression** expr,
+                                      Handle<String>* check) {
+  if (IsTypeof(left) && right->IsStringLiteral() && Token::IsEqualityOp(op)) {
+    *expr = left->AsUnaryOperation()->expression();
+    *check = Handle<String>::cast(right->AsLiteral()->handle());
+    return true;
+  }
+  return false;
+}
+
+
 bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
                                               Handle<String>* check) {
-  if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
+  return MatchLiteralCompareTypeof(left_, op_, right_, expr, check) ||
+      MatchLiteralCompareTypeof(right_, op_, left_, expr, check);
+}
 
-  UnaryOperation* left_unary = left_->AsUnaryOperation();
-  UnaryOperation* right_unary = right_->AsUnaryOperation();
-  Literal* left_literal = left_->AsLiteral();
-  Literal* right_literal = right_->AsLiteral();
 
-  // Check for the pattern: typeof <expression> == <string literal>.
-  if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
-      right_literal != NULL && right_literal->handle()->IsString()) {
-    *expr = left_unary->expression();
-    *check = Handle<String>::cast(right_literal->handle());
+static bool IsVoidOfLiteral(Expression* expr) {
+  UnaryOperation* maybe_unary = expr->AsUnaryOperation();
+  return maybe_unary != NULL &&
+      maybe_unary->op() == Token::VOID &&
+      maybe_unary->expression()->AsLiteral() != NULL;
+}
+
+
+// Check for the pattern: void <literal> equals <expression>
+static bool MatchLiteralCompareUndefined(Expression* left,
+                                         Token::Value op,
+                                         Expression* right,
+                                         Expression** expr) {
+  if (IsVoidOfLiteral(left) && Token::IsEqualityOp(op)) {
+    *expr = right;
     return true;
   }
-
-  // Check for the pattern: <string literal> == typeof <expression>.
-  if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
-      left_literal != NULL && left_literal->handle()->IsString()) {
-    *expr = right_unary->expression();
-    *check = Handle<String>::cast(left_literal->handle());
-    return true;
-  }
-
   return false;
 }
 
 
 bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
-  if (op_ != Token::EQ_STRICT) return false;
+  return MatchLiteralCompareUndefined(left_, op_, right_, expr) ||
+      MatchLiteralCompareUndefined(right_, op_, left_, expr);
+}
 
-  UnaryOperation* left_unary = left_->AsUnaryOperation();
-  UnaryOperation* right_unary = right_->AsUnaryOperation();
 
-  // Check for the pattern: <expression> === void <literal>.
-  if (right_unary != NULL && right_unary->op() == Token::VOID &&
-      right_unary->expression()->AsLiteral() != NULL) {
-    *expr = left_;
+// Check for the pattern: null equals <expression>
+static bool MatchLiteralCompareNull(Expression* left,
+                                    Token::Value op,
+                                    Expression* right,
+                                    Expression** expr) {
+  if (left->IsNullLiteral() && Token::IsEqualityOp(op)) {
+    *expr = right;
     return true;
   }
-
-  // Check for the pattern: void <literal> === <expression>.
-  if (left_unary != NULL && left_unary->op() == Token::VOID &&
-      left_unary->expression()->AsLiteral() != NULL) {
-    *expr = right_;
-    return true;
-  }
-
   return false;
 }
 
 
+bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
+  return MatchLiteralCompareNull(left_, op_, right_, expr) ||
+      MatchLiteralCompareNull(right_, op_, left_, expr);
+}
+
+
 // ----------------------------------------------------------------------------
 // Inlining support
 
@@ -529,7 +550,9 @@
 
 
 bool VariableProxy::IsInlineable() const {
-  return var()->IsUnallocated() || var()->IsStackAllocated();
+  return var()->IsUnallocated()
+      || var()->IsStackAllocated()
+      || var()->IsContextSlot();
 }
 
 
@@ -598,11 +621,6 @@
 }
 
 
-bool CompareToNull::IsInlineable() const {
-  return expression()->IsInlineable();
-}
-
-
 bool CountOperation::IsInlineable() const {
   return expression()->IsInlineable();
 }
@@ -746,37 +764,41 @@
 
 void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
                               CallKind call_kind) {
-  Property* property = expression()->AsProperty();
-  ASSERT(property != NULL);
-  // Specialize for the receiver types seen at runtime.
-  Literal* key = property->key()->AsLiteral();
-  ASSERT(key != NULL && key->handle()->IsString());
-  Handle<String> name = Handle<String>::cast(key->handle());
-  receiver_types_.Clear();
-  oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
-#ifdef DEBUG
-  if (FLAG_enable_slow_asserts) {
-    int length = receiver_types_.length();
-    for (int i = 0; i < length; i++) {
-      Handle<Map> map = receiver_types_.at(i);
-      ASSERT(!map.is_null() && *map != NULL);
-    }
-  }
-#endif
   is_monomorphic_ = oracle->CallIsMonomorphic(this);
-  check_type_ = oracle->GetCallCheckType(this);
-  if (is_monomorphic_) {
-    Handle<Map> map;
-    if (receiver_types_.length() > 0) {
-      ASSERT(check_type_ == RECEIVER_MAP_CHECK);
-      map = receiver_types_.at(0);
-    } else {
-      ASSERT(check_type_ != RECEIVER_MAP_CHECK);
-      holder_ = Handle<JSObject>(
-          oracle->GetPrototypeForPrimitiveCheck(check_type_));
-      map = Handle<Map>(holder_->map());
+  Property* property = expression()->AsProperty();
+  if (property == NULL) {
+    // Function call.  Specialize for monomorphic calls.
+    if (is_monomorphic_) target_ = oracle->GetCallTarget(this);
+  } else {
+    // Method call.  Specialize for the receiver types seen at runtime.
+    Literal* key = property->key()->AsLiteral();
+    ASSERT(key != NULL && key->handle()->IsString());
+    Handle<String> name = Handle<String>::cast(key->handle());
+    receiver_types_.Clear();
+    oracle->CallReceiverTypes(this, name, call_kind, &receiver_types_);
+#ifdef DEBUG
+    if (FLAG_enable_slow_asserts) {
+      int length = receiver_types_.length();
+      for (int i = 0; i < length; i++) {
+        Handle<Map> map = receiver_types_.at(i);
+        ASSERT(!map.is_null() && *map != NULL);
+      }
     }
-    is_monomorphic_ = ComputeTarget(map, name);
+#endif
+    check_type_ = oracle->GetCallCheckType(this);
+    if (is_monomorphic_) {
+      Handle<Map> map;
+      if (receiver_types_.length() > 0) {
+        ASSERT(check_type_ == RECEIVER_MAP_CHECK);
+        map = receiver_types_.at(0);
+      } else {
+        ASSERT(check_type_ != RECEIVER_MAP_CHECK);
+        holder_ = Handle<JSObject>(
+            oracle->GetPrototypeForPrimitiveCheck(check_type_));
+        map = Handle<Map>(holder_->map());
+      }
+      is_monomorphic_ = ComputeTarget(map, name);
+    }
   }
 }
 
diff --git a/src/ast.h b/src/ast.h
index b56205f..00cfd7f 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -90,7 +90,6 @@
   V(CountOperation)                             \
   V(BinaryOperation)                            \
   V(CompareOperation)                           \
-  V(CompareToNull)                              \
   V(ThisFunction)
 
 #define AST_NODE_LIST(V)                        \
@@ -289,6 +288,12 @@
   // True iff the expression is a literal represented as a smi.
   virtual bool IsSmiLiteral() { return false; }
 
+  // True iff the expression is a string literal.
+  virtual bool IsStringLiteral() { return false; }
+
+  // True iff the expression is the null literal.
+  virtual bool IsNullLiteral() { return false; }
+
   // Type feedback information for assignments and properties.
   virtual bool IsMonomorphic() {
     UNREACHABLE();
@@ -891,6 +896,8 @@
 
   virtual bool IsTrivial() { return true; }
   virtual bool IsSmiLiteral() { return handle_->IsSmi(); }
+  virtual bool IsStringLiteral() { return handle_->IsString(); }
+  virtual bool IsNullLiteral() { return handle_->IsNull(); }
 
   // Check if this literal is identical to the other literal.
   bool IsIdenticalTo(const Literal* other) const {
@@ -1465,6 +1472,7 @@
   // Match special cases.
   bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
   bool IsLiteralCompareUndefined(Expression** expr);
+  bool IsLiteralCompareNull(Expression** expr);
 
  private:
   Token::Value op_;
@@ -1477,25 +1485,6 @@
 };
 
 
-class CompareToNull: public Expression {
- public:
-  CompareToNull(Isolate* isolate, bool is_strict, Expression* expression)
-      : Expression(isolate), is_strict_(is_strict), expression_(expression) { }
-
-  DECLARE_NODE_TYPE(CompareToNull)
-
-  virtual bool IsInlineable() const;
-
-  bool is_strict() const { return is_strict_; }
-  Token::Value op() const { return is_strict_ ? Token::EQ_STRICT : Token::EQ; }
-  Expression* expression() const { return expression_; }
-
- private:
-  bool is_strict_;
-  Expression* expression_;
-};
-
-
 class Conditional: public Expression {
  public:
   Conditional(Isolate* isolate,
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index f07e625..dc722cb 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -34,6 +34,7 @@
 #include "debug.h"
 #include "execution.h"
 #include "global-handles.h"
+#include "isolate-inl.h"
 #include "macro-assembler.h"
 #include "natives.h"
 #include "objects-visiting.h"
@@ -995,6 +996,26 @@
         initial_map->instance_size() + 5 * kPointerSize);
     initial_map->set_instance_descriptors(*descriptors);
     initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
+
+    // RegExp prototype object is itself a RegExp.
+    Handle<Map> proto_map = factory->CopyMapDropTransitions(initial_map);
+    proto_map->set_prototype(global_context()->initial_object_prototype());
+    Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
+    proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
+                                 heap->empty_string());
+    proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
+                                 heap->false_value());
+    proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
+                                 heap->false_value());
+    proto->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex,
+                                 heap->false_value());
+    proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+                                 Smi::FromInt(0),
+                                 SKIP_WRITE_BARRIER);  // It's a Smi.
+    initial_map->set_prototype(*proto);
+    factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
+                                   JSRegExp::IRREGEXP, factory->empty_string(),
+                                   JSRegExp::Flags(0), 0);
   }
 
   {  // -- J S O N
@@ -1076,6 +1097,11 @@
     elements->set(0, *array);
     array = factory->NewFixedArray(0);
     elements->set(1, *array);
+    Handle<Map> non_strict_arguments_elements_map =
+        factory->GetElementsTransitionMap(result,
+                                          NON_STRICT_ARGUMENTS_ELEMENTS);
+    result->set_map(*non_strict_arguments_elements_map);
+    ASSERT(result->HasNonStrictArgumentsElements());
     result->set_elements(*elements);
     global_context()->set_aliased_arguments_boilerplate(*result);
   }
@@ -1327,6 +1353,8 @@
                  configure_instance_fun);
   INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
   INSTALL_NATIVE(JSObject, "functionCache", function_cache);
+  INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
+                 to_complete_property_descriptor);
 }
 
 void Genesis::InstallExperimentalNativeFunctions() {
@@ -1555,6 +1583,18 @@
         isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
     array_function->shared()->DontAdaptArguments();
 
+    // InternalArrays should not use Smi-Only array optimizations. There are too
+    // many places in the C++ runtime code (e.g. RegEx) that assume that
+    // elements in InternalArrays can be set to non-Smi values without going
+    // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
+    // transition easy to trap. Moreover, they rarely are smi-only.
+    MaybeObject* maybe_map =
+        array_function->initial_map()->CopyDropTransitions();
+    Map* new_map;
+    if (!maybe_map->To<Map>(&new_map)) return maybe_map;
+    new_map->set_elements_kind(FAST_ELEMENTS);
+    array_function->set_initial_map(new_map);
+
     // Make "length" magic on instances.
     Handle<DescriptorArray> array_descriptors =
         factory()->CopyAppendForeignDescriptor(
@@ -1938,14 +1978,15 @@
     if (!InstallExtension(extension->dependencies()[i])) return false;
   }
   Isolate* isolate = Isolate::Current();
-  Vector<const char> source = CStrVector(extension->source());
-  Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
-  bool result = CompileScriptCached(CStrVector(extension->name()),
-                                    source_code,
-                                    isolate->bootstrapper()->extensions_cache(),
-                                    extension,
-                                    Handle<Context>(isolate->context()),
-                                    false);
+  Handle<String> source_code =
+      isolate->factory()->NewExternalStringFromAscii(extension->source());
+  bool result = CompileScriptCached(
+      CStrVector(extension->name()),
+      source_code,
+      isolate->bootstrapper()->extensions_cache(),
+      extension,
+      Handle<Context>(isolate->context()),
+      false);
   ASSERT(isolate->has_pending_exception() != result);
   if (!result) {
     isolate->clear_pending_exception();
diff --git a/src/builtins.cc b/src/builtins.cc
index e6a0699..5104f6d 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -33,6 +33,7 @@
 #include "builtins.h"
 #include "gdb-jit.h"
 #include "ic-inl.h"
+#include "mark-compact.h"
 #include "vm-state-inl.h"
 
 namespace v8 {
@@ -202,7 +203,7 @@
   }
 
   // 'array' now contains the JSArray we should initialize.
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   // Optimize the case where there is one argument and the argument is a
   // small smi.
@@ -215,7 +216,8 @@
         { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
           if (!maybe_obj->ToObject(&obj)) return maybe_obj;
         }
-        array->SetContent(FixedArray::cast(obj));
+        MaybeObject* maybe_obj = array->SetContent(FixedArray::cast(obj));
+        if (maybe_obj->IsFailure()) return maybe_obj;
         return array;
       }
     }
@@ -239,6 +241,13 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
 
+  // Set length and elements on the array.
+  if (FLAG_smi_only_arrays) {
+    MaybeObject* maybe_object =
+        array->EnsureCanContainElements(FixedArray::cast(obj));
+    if (maybe_object->IsFailure()) return maybe_object;
+  }
+
   AssertNoAllocation no_gc;
   FixedArray* elms = FixedArray::cast(obj);
   WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@@ -247,7 +256,6 @@
     elms->set(index, args[index+1], mode);
   }
 
-  // Set length and elements on the array.
   array->set_elements(FixedArray::cast(obj));
   array->set_length(len);
 
@@ -295,6 +303,7 @@
   if (mode == UPDATE_WRITE_BARRIER) {
     heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
   }
+  heap->incremental_marking()->RecordWrites(dst);
 }
 
 
@@ -313,6 +322,7 @@
   if (mode == UPDATE_WRITE_BARRIER) {
     heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
   }
+  heap->incremental_marking()->RecordWrites(dst);
 }
 
 
@@ -358,6 +368,14 @@
   former_start[to_trim] = heap->fixed_array_map();
   former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
 
+  // Maintain marking consistency for HeapObjectIterator and
+  // IncrementalMarking.
+  int size_delta = to_trim * kPointerSize;
+  if (heap->marking()->TransferMark(elms->address(),
+                                    elms->address() + size_delta)) {
+    MemoryChunk::IncrementLiveBytes(elms->address(), -size_delta);
+  }
+
   return FixedArray::cast(HeapObject::FromAddress(
       elms->address() + to_trim * kPointerSize));
 }
@@ -423,7 +441,7 @@
   for (int i = 0; i < n_args; i++) {
     argv[i] = args.at<Object>(i + 1).location();
   }
-  bool pending_exception = false;
+  bool pending_exception;
   Handle<Object> result = Execution::Call(function,
                                           args.receiver(),
                                           n_args,
@@ -475,9 +493,11 @@
     FillWithHoles(heap, new_elms, new_length, capacity);
 
     elms = new_elms;
-    array->set_elements(elms);
   }
 
+  MaybeObject* maybe = array->EnsureCanContainElements(&args, 1, to_add);
+  if (maybe->IsFailure()) return maybe;
+
   // Add the provided values.
   AssertNoAllocation no_gc;
   WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
@@ -485,6 +505,10 @@
     elms->set(index + len, args[index + 1], mode);
   }
 
+  if (elms != array->elements()) {
+    array->set_elements(elms);
+  }
+
   // Set the length.
   array->set_length(Smi::FromInt(new_length));
   return Smi::FromInt(new_length);
@@ -539,7 +563,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return heap->undefined_value();
@@ -551,9 +575,7 @@
   }
 
   if (!heap->lo_space()->Contains(elms)) {
-    // As elms still in the same space they used to be,
-    // there is no need to update region dirty mark.
-    array->set_elements(LeftTrimFixedArray(heap, elms, 1), SKIP_WRITE_BARRIER);
+    array->set_elements(LeftTrimFixedArray(heap, elms, 1));
   } else {
     // Shift the elements.
     AssertNoAllocation no_gc;
@@ -583,7 +605,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   int len = Smi::cast(array->length())->value();
   int to_add = args.length() - 1;
@@ -592,6 +614,12 @@
   // we should never hit this case.
   ASSERT(to_add <= (Smi::kMaxValue - len));
 
+  if (FLAG_smi_only_arrays) {
+    MaybeObject* maybe_object =
+        array->EnsureCanContainElements(&args, 1, to_add);
+    if (maybe_object->IsFailure()) return maybe_object;
+  }
+
   if (new_length > elms->length()) {
     // New backing storage is needed.
     int capacity = new_length + (new_length >> 1) + 16;
@@ -600,13 +628,11 @@
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* new_elms = FixedArray::cast(obj);
-
     AssertNoAllocation no_gc;
     if (len > 0) {
       CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
     }
     FillWithHoles(heap, new_elms, new_length, capacity);
-
     elms = new_elms;
     array->set_elements(elms);
   } else {
@@ -634,7 +660,7 @@
   int len = -1;
   if (receiver->IsJSArray()) {
     JSArray* array = JSArray::cast(receiver);
-    if (!array->HasFastElements() ||
+    if (!array->HasFastTypeElements() ||
         !IsJSArrayFastElementMovingAllowed(heap, array)) {
       return CallJsBuiltin(isolate, "ArraySlice", args);
     }
@@ -650,7 +676,7 @@
     bool is_arguments_object_with_fast_elements =
         receiver->IsJSObject()
         && JSObject::cast(receiver)->map() == arguments_map
-        && JSObject::cast(receiver)->HasFastElements();
+        && JSObject::cast(receiver)->HasFastTypeElements();
     if (!is_arguments_object_with_fast_elements) {
       return CallJsBuiltin(isolate, "ArraySlice", args);
     }
@@ -721,6 +747,12 @@
   }
   FixedArray* result_elms = FixedArray::cast(result);
 
+  if (FLAG_smi_only_arrays) {
+    MaybeObject* maybe_object =
+        result_array->EnsureCanContainElements(result_elms);
+    if (maybe_object->IsFailure()) return maybe_object;
+  }
+
   AssertNoAllocation no_gc;
   CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
 
@@ -748,7 +780,7 @@
   }
   FixedArray* elms = FixedArray::cast(elms_obj);
   JSArray* array = JSArray::cast(receiver);
-  ASSERT(array->HasFastElements());
+  ASSERT(array->HasFastTypeElements());
 
   int len = Smi::cast(array->length())->value();
 
@@ -826,8 +858,14 @@
 
   int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
 
+  if (FLAG_smi_only_arrays) {
+    MaybeObject* maybe = array->EnsureCanContainElements(&args, 3, item_count);
+    if (maybe->IsFailure()) return maybe;
+  }
+
   int new_length = len - actual_delete_count + item_count;
 
+  bool elms_changed = false;
   if (item_count < actual_delete_count) {
     // Shrink the array.
     const bool trim_array = !heap->lo_space()->Contains(elms) &&
@@ -842,7 +880,8 @@
       }
 
       elms = LeftTrimFixedArray(heap, elms, delta);
-      array->set_elements(elms, SKIP_WRITE_BARRIER);
+
+      elms_changed = true;
     } else {
       AssertNoAllocation no_gc;
       MoveElements(heap, &no_gc,
@@ -882,7 +921,7 @@
       FillWithHoles(heap, new_elms, new_length, capacity);
 
       elms = new_elms;
-      array->set_elements(elms);
+      elms_changed = true;
     } else {
       AssertNoAllocation no_gc;
       MoveElements(heap, &no_gc,
@@ -898,6 +937,10 @@
     elms->set(k, args[3 + k - actual_start], mode);
   }
 
+  if (elms_changed) {
+    array->set_elements(elms);
+  }
+
   // Set the length.
   array->set_length(Smi::FromInt(new_length));
 
@@ -920,7 +963,7 @@
   int result_len = 0;
   for (int i = 0; i < n_arguments; i++) {
     Object* arg = args[i];
-    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
+    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
         || JSArray::cast(arg)->GetPrototype() != array_proto) {
       return CallJsBuiltin(isolate, "ArrayConcat", args);
     }
@@ -956,6 +999,19 @@
   }
   FixedArray* result_elms = FixedArray::cast(result);
 
+  if (FLAG_smi_only_arrays) {
+    for (int i = 0; i < n_arguments; i++) {
+      JSArray* array = JSArray::cast(args[i]);
+      int len = Smi::cast(array->length())->value();
+      if (len > 0) {
+        FixedArray* elms = FixedArray::cast(array->elements());
+        MaybeObject* maybe_object =
+            result_array->EnsureCanContainElements(elms);
+        if (maybe_object->IsFailure()) return maybe_object;
+      }
+    }
+  }
+
   // Copy data.
   AssertNoAllocation no_gc;
   int start_pos = 0;
@@ -1607,20 +1663,22 @@
   const BuiltinDesc* functions = BuiltinFunctionTable::functions();
 
   // For now we generate builtin adaptor code into a stack-allocated
-  // buffer, before copying it into individual code objects.
-  byte buffer[4*KB];
+  // buffer, before copying it into individual code objects. Be careful
+  // with alignment, some platforms don't like unaligned code.
+  union { int force_alignment; byte buffer[4*KB]; } u;
 
   // Traverse the list of builtins and generate an adaptor in a
   // separate code object for each one.
   for (int i = 0; i < builtin_count; i++) {
     if (create_heap_objects) {
-      MacroAssembler masm(isolate, buffer, sizeof buffer);
+      MacroAssembler masm(isolate, u.buffer, sizeof u.buffer);
       // Generate the code/adaptor.
       typedef void (*Generator)(MacroAssembler*, int, BuiltinExtraArguments);
       Generator g = FUNCTION_CAST<Generator>(functions[i].generator);
       // We pass all arguments to the generator, but it may not use all of
       // them.  This works because the first arguments are on top of the
       // stack.
+      ASSERT(!masm.has_frame());
       g(&masm, functions[i].name, functions[i].extra_args);
       // Move the code into the object heap.
       CodeDesc desc;
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 00da4cb..4bc2603 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -52,11 +52,12 @@
   // Update the static counter each time a new code stub is generated.
   masm->isolate()->counters()->code_stubs()->Increment();
 
-  // Nested stubs are not allowed for leafs.
-  AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
+  // Nested stubs are not allowed for leaves.
+  AllowStubCallsScope allow_scope(masm, false);
 
   // Generate the code for the stub.
   masm->set_generating_stub(true);
+  NoCurrentFrameScope scope(masm);
   Generate(masm);
 }
 
@@ -127,8 +128,10 @@
             GetKey(),
             new_object);
     heap->public_set_code_stubs(*dict);
-
     code = *new_object;
+    Activate(code);
+  } else {
+    CHECK(IsPregenerated() == code->is_pregenerated());
   }
 
   ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
@@ -166,7 +169,11 @@
         heap->code_stubs()->AtNumberPut(GetKey(), code);
     if (maybe_new_object->ToObject(&new_object)) {
       heap->public_set_code_stubs(NumberDictionary::cast(new_object));
+    } else if (MustBeInStubCache()) {
+      return maybe_new_object;
     }
+
+    Activate(code);
   }
 
   return code;
@@ -188,6 +195,11 @@
 }
 
 
+void CodeStub::PrintName(StringStream* stream) {
+  stream->Add("%s", MajorName(MajorKey(), false));
+}
+
+
 int ICCompareStub::MinorKey() {
   return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
 }
@@ -245,6 +257,7 @@
 void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
   switch (elements_kind_) {
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
       KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
       break;
     case FAST_DOUBLE_ELEMENTS:
@@ -274,7 +287,11 @@
 void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
   switch (elements_kind_) {
     case FAST_ELEMENTS:
-      KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
+    case FAST_SMI_ONLY_ELEMENTS: {
+      KeyedStoreStubCompiler::GenerateStoreFastElement(masm,
+                                                       is_js_array_,
+                                                       elements_kind_);
+    }
       break;
     case FAST_DOUBLE_ELEMENTS:
       KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
@@ -302,24 +319,20 @@
 
 
 void ArgumentsAccessStub::PrintName(StringStream* stream) {
-  const char* type_name = NULL;  // Make g++ happy.
+  stream->Add("ArgumentsAccessStub_");
   switch (type_) {
-    case READ_ELEMENT: type_name = "ReadElement"; break;
-    case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break;
-    case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break;
-    case NEW_STRICT: type_name = "NewStrict"; break;
+    case READ_ELEMENT: stream->Add("ReadElement"); break;
+    case NEW_NON_STRICT_FAST: stream->Add("NewNonStrictFast"); break;
+    case NEW_NON_STRICT_SLOW: stream->Add("NewNonStrictSlow"); break;
+    case NEW_STRICT: stream->Add("NewStrict"); break;
   }
-  stream->Add("ArgumentsAccessStub_%s", type_name);
 }
 
 
 void CallFunctionStub::PrintName(StringStream* stream) {
-  const char* flags_name = NULL;  // Make g++ happy.
-  switch (flags_) {
-    case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break;
-    case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break;
-  }
-  stream->Add("CallFunctionStub_Args%d%s", argc_, flags_name);
+  stream->Add("CallFunctionStub_Args%d", argc_);
+  if (ReceiverMightBeImplicit()) stream->Add("_Implicit");
+  if (RecordCallTarget()) stream->Add("_Recording");
 }
 
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 64c89b9..9d4baf4 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -45,15 +45,11 @@
   V(Compare)                             \
   V(CompareIC)                           \
   V(MathPow)                             \
+  V(RecordWrite)                         \
+  V(StoreBufferOverflow)                 \
+  V(RegExpExec)                          \
   V(TranscendentalCache)                 \
   V(Instanceof)                          \
-  /* All stubs above this line only exist in a few versions, which are  */  \
-  /* generated ahead of time.  Therefore compiling a call to one of     */  \
-  /* them can't cause a new stub to be compiled, so compiling a call to */  \
-  /* them is GC safe.  The ones below this line exist in many variants  */  \
-  /* so code compiling a call to one can cause a GC.  This means they   */  \
-  /* can't be called from other stubs, since stub generation code is    */  \
-  /* not GC safe.                                                       */  \
   V(ConvertToDouble)                     \
   V(WriteInt32ToHeapNumber)              \
   V(StackCheck)                          \
@@ -65,7 +61,6 @@
   V(ToNumber)                            \
   V(CounterOp)                           \
   V(ArgumentsAccess)                     \
-  V(RegExpExec)                          \
   V(RegExpConstructResult)               \
   V(NumberToString)                      \
   V(CEntry)                              \
@@ -73,7 +68,7 @@
   V(KeyedLoadElement)                    \
   V(KeyedStoreElement)                   \
   V(DebuggerStatement)                   \
-  V(StringDictionaryNegativeLookup)
+  V(StringDictionaryLookup)
 
 // List of code stubs only used on ARM platforms.
 #ifdef V8_TARGET_ARCH_ARM
@@ -142,6 +137,27 @@
 
   virtual ~CodeStub() {}
 
+  bool CompilingCallsToThisStubIsGCSafe() {
+    bool is_pregenerated = IsPregenerated();
+    Code* code = NULL;
+    CHECK(!is_pregenerated || FindCodeInCache(&code));
+    return is_pregenerated;
+  }
+
+  // See comment above, where Instanceof is defined.
+  virtual bool IsPregenerated() { return false; }
+
+  static void GenerateStubsAheadOfTime();
+  static void GenerateFPStubs();
+
+  // Some stubs put untagged junk on the stack that cannot be scanned by the
+  // GC.  This means that we must be statically sure that no GC can occur while
+  // they are running.  If that is the case they should override this to return
+  // true, which will cause an assertion if we try to call something that can
+  // GC or if we try to put a stack frame on top of the junk, which would not
+  // result in a traversable stack.
+  virtual bool SometimesSetsUpAFrame() { return true; }
+
  protected:
   static const int kMajorBits = 6;
   static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
@@ -164,6 +180,14 @@
   // Finish the code object after it has been generated.
   virtual void FinishCode(Code* code) { }
 
+  // Returns true if TryGetCode should fail if it failed
+  // to register newly generated stub in the stub cache.
+  virtual bool MustBeInStubCache() { return false; }
+
+  // Activate newly generated stub. Is called after
+  // registering stub in the stub cache.
+  virtual void Activate(Code* code) { }
+
   // Returns information for computing the number key.
   virtual Major MajorKey() = 0;
   virtual int MinorKey() = 0;
@@ -178,9 +202,7 @@
 
   // Returns a name for logging/debugging purposes.
   SmartArrayPointer<const char> GetName();
-  virtual void PrintName(StringStream* stream) {
-    stream->Add("%s", MajorName(MajorKey(), false));
-  }
+  virtual void PrintName(StringStream* stream);
 
   // Returns whether the code generated for this stub needs to be allocated as
   // a fixed (non-moveable) code object.
@@ -193,9 +215,6 @@
            MajorKeyBits::encode(MajorKey());
   }
 
-  // See comment above, where Instanceof is defined.
-  bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
-
   class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
   class MinorKeyBits: public BitField<uint32_t, kMajorBits, kMinorBits> {};
 
@@ -531,11 +550,18 @@
 
 class CEntryStub : public CodeStub {
  public:
-  explicit CEntryStub(int result_size)
-      : result_size_(result_size), save_doubles_(false) { }
+  explicit CEntryStub(int result_size,
+                      SaveFPRegsMode save_doubles = kDontSaveFPRegs)
+      : result_size_(result_size), save_doubles_(save_doubles) { }
 
   void Generate(MacroAssembler* masm);
-  void SaveDoubles() { save_doubles_ = true; }
+
+  // The version of this stub that doesn't save doubles is generated ahead of
+  // time, so it's OK to call it from other stubs that can't cope with GC during
+  // their code generation.  On machines that always have gp registers (x64) we
+  // can generate both variants ahead of time.
+  virtual bool IsPregenerated();
+  static void GenerateAheadOfTime();
 
  private:
   void GenerateCore(MacroAssembler* masm,
@@ -550,7 +576,7 @@
 
   // Number of pointers/values returned.
   const int result_size_;
-  bool save_doubles_;
+  SaveFPRegsMode save_doubles_;
 
   Major MajorKey() { return CEntry; }
   int MinorKey();
@@ -647,10 +673,32 @@
 
   void Generate(MacroAssembler* masm);
 
+  virtual void FinishCode(Code* code);
+
+  static void Clear(Heap* heap, Address address);
+
+  static Object* GetCachedValue(Address address);
+
   static int ExtractArgcFromMinorKey(int minor_key) {
     return ArgcBits::decode(minor_key);
   }
 
+  // The object that indicates an uninitialized cache.
+  static Handle<Object> UninitializedSentinel(Isolate* isolate) {
+    return isolate->factory()->the_hole_value();
+  }
+
+  // A raw version of the uninitialized sentinel that's safe to read during
+  // garbage collection (e.g., for patching the cache).
+  static Object* RawUninitializedSentinel(Heap* heap) {
+    return heap->raw_unchecked_the_hole_value();
+  }
+
+  // The object that indicates a megamorphic state.
+  static Handle<Object> MegamorphicSentinel(Isolate* isolate) {
+    return isolate->factory()->undefined_value();
+  }
+
  private:
   int argc_;
   CallFunctionFlags flags_;
@@ -658,8 +706,8 @@
   virtual void PrintName(StringStream* stream);
 
   // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
-  class FlagBits: public BitField<CallFunctionFlags, 0, 1> {};
-  class ArgcBits: public BitField<unsigned, 1, 32 - 1> {};
+  class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
+  class ArgcBits: public BitField<unsigned, 2, 32 - 2> {};
 
   Major MajorKey() { return CallFunction; }
   int MinorKey() {
@@ -670,6 +718,10 @@
   bool ReceiverMightBeImplicit() {
     return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
   }
+
+  bool RecordCallTarget() {
+    return (flags_ & RECORD_CALL_TARGET) != 0;
+  }
 };
 
 
@@ -934,6 +986,8 @@
   virtual int GetCodeKind() { return Code::TO_BOOLEAN_IC; }
   virtual void PrintName(StringStream* stream);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   Major MajorKey() { return ToBoolean; }
   int MinorKey() { return (tos_.code() << NUMBER_OF_TYPES) | types_.ToByte(); }
diff --git a/src/codegen.cc b/src/codegen.cc
index cdc9ba1..ceea7b9 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -218,8 +218,8 @@
 
 
 int CEntryStub::MinorKey() {
+  int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
   ASSERT(result_size_ == 1 || result_size_ == 2);
-  int result = save_doubles_ ? 1 : 0;
 #ifdef _WIN64
   return result | ((result_size_ == 1) ? 0 : 2);
 #else
diff --git a/src/compiler-intrinsics.h b/src/compiler-intrinsics.h
new file mode 100644
index 0000000..3b9c59e
--- /dev/null
+++ b/src/compiler-intrinsics.h
@@ -0,0 +1,77 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_COMPILER_INTRINSICS_H_
+#define V8_COMPILER_INTRINSICS_H_
+
+namespace v8 {
+namespace internal {
+
+class CompilerIntrinsics {
+ public:
+  // Returns number of zero bits preceding least significant 1 bit.
+  // Undefined for zero value.
+  INLINE(static int CountTrailingZeros(uint32_t value));
+
+  // Returns number of zero bits following most significant 1 bit.
+  // Undefined for zero value.
+  INLINE(static int CountLeadingZeros(uint32_t value));
+};
+
+#ifdef __GNUC__
+int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
+  return __builtin_ctz(value);
+}
+
+int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
+  return __builtin_clz(value);
+}
+
+#elif defined(_MSC_VER)
+
+#pragma intrinsic(_BitScanForward)
+#pragma intrinsic(_BitScanReverse)
+
+int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
+  unsigned long result;  //NOLINT
+  _BitScanForward(&result, static_cast<long>(value));  //NOLINT
+  return static_cast<int>(result);
+}
+
+int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
+  unsigned long result;  //NOLINT
+  _BitScanReverse(&result, static_cast<long>(value));  //NOLINT
+  return 31 - static_cast<int>(result);
+}
+
+#else
+#error Unsupported compiler
+#endif
+
+} }  // namespace v8::internal
+
+#endif  // V8_COMPILER_INTRINSICS_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index ba6bb42..5a86b4e 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -36,6 +36,7 @@
 #include "full-codegen.h"
 #include "gdb-jit.h"
 #include "hydrogen.h"
+#include "isolate-inl.h"
 #include "lithium.h"
 #include "liveedit.h"
 #include "parser.h"
diff --git a/src/contexts.cc b/src/contexts.cc
index 4f93abd..007d30d 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -86,14 +86,14 @@
 
 Handle<Object> Context::Lookup(Handle<String> name,
                                ContextLookupFlags flags,
-                               int* index_,
+                               int* index,
                                PropertyAttributes* attributes,
                                BindingFlags* binding_flags) {
   Isolate* isolate = GetIsolate();
   Handle<Context> context(this, isolate);
 
   bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
-  *index_ = -1;
+  *index = -1;
   *attributes = ABSENT;
   *binding_flags = MISSING_BINDING;
 
@@ -110,70 +110,50 @@
       PrintF("\n");
     }
 
-    // Check extension/with/global object.
-    if (!context->IsBlockContext() && context->has_extension()) {
-      if (context->IsCatchContext()) {
-        // Catch contexts have the variable name in the extension slot.
-        if (name->Equals(String::cast(context->extension()))) {
-          if (FLAG_trace_contexts) {
-            PrintF("=> found in catch context\n");
-          }
-          *index_ = Context::THROWN_OBJECT_INDEX;
-          *attributes = NONE;
-          *binding_flags = MUTABLE_IS_INITIALIZED;
-          return context;
-        }
+    // 1. Check global objects, subjects of with, and extension objects.
+    if (context->IsGlobalContext() ||
+        context->IsWithContext() ||
+        (context->IsFunctionContext() && context->has_extension())) {
+      Handle<JSObject> object(JSObject::cast(context->extension()), isolate);
+      // Context extension objects needs to behave as if they have no
+      // prototype.  So even if we want to follow prototype chains, we need
+      // to only do a local lookup for context extension objects.
+      if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
+          object->IsJSContextExtensionObject()) {
+        *attributes = object->GetLocalPropertyAttribute(*name);
       } else {
-        ASSERT(context->IsGlobalContext() ||
-               context->IsFunctionContext() ||
-               context->IsWithContext());
-        // Global, function, and with contexts may have an object in the
-        // extension slot.
-        Handle<JSObject> extension(JSObject::cast(context->extension()),
-                                   isolate);
-        // Context extension objects needs to behave as if they have no
-        // prototype.  So even if we want to follow prototype chains, we
-        // need to only do a local lookup for context extension objects.
-        if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
-            extension->IsJSContextExtensionObject()) {
-          *attributes = extension->GetLocalPropertyAttribute(*name);
-        } else {
-          *attributes = extension->GetPropertyAttribute(*name);
+        *attributes = object->GetPropertyAttribute(*name);
+      }
+      if (*attributes != ABSENT) {
+        if (FLAG_trace_contexts) {
+          PrintF("=> found property in context object %p\n",
+                 reinterpret_cast<void*>(*object));
         }
-        if (*attributes != ABSENT) {
-          // property found
-          if (FLAG_trace_contexts) {
-            PrintF("=> found property in context object %p\n",
-                   reinterpret_cast<void*>(*extension));
-          }
-          return extension;
-        }
+        return object;
       }
     }
 
-    // Check serialized scope information of functions and blocks. Only
-    // functions can have parameters, and a function name.
+    // 2. Check the context proper if it has slots.
     if (context->IsFunctionContext() || context->IsBlockContext()) {
-      // We may have context-local slots.  Check locals in the context.
+      // Use serialized scope information of functions and blocks to search
+      // for the context index.
       Handle<SerializedScopeInfo> scope_info;
       if (context->IsFunctionContext()) {
         scope_info = Handle<SerializedScopeInfo>(
             context->closure()->shared()->scope_info(), isolate);
       } else {
-        ASSERT(context->IsBlockContext());
         scope_info = Handle<SerializedScopeInfo>(
             SerializedScopeInfo::cast(context->extension()), isolate);
       }
-
       Variable::Mode mode;
-      int index = scope_info->ContextSlotIndex(*name, &mode);
-      ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
-      if (index >= 0) {
+      int slot_index = scope_info->ContextSlotIndex(*name, &mode);
+      ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
+      if (slot_index >= 0) {
         if (FLAG_trace_contexts) {
           PrintF("=> found local in context slot %d (mode = %d)\n",
-                 index, mode);
+                 slot_index, mode);
         }
-        *index_ = index;
+        *index = slot_index;
         // Note: Fixed context slots are statically allocated by the compiler.
         // Statically allocated variables always have a statically known mode,
         // which is the mode with which they were declared when added to the
@@ -206,22 +186,34 @@
 
       // Check the slot corresponding to the intermediate context holding
       // only the function name variable.
-      if (follow_context_chain) {
-        int index = scope_info->FunctionContextSlotIndex(*name);
-        if (index >= 0) {
+      if (follow_context_chain && context->IsFunctionContext()) {
+        int function_index = scope_info->FunctionContextSlotIndex(*name);
+        if (function_index >= 0) {
           if (FLAG_trace_contexts) {
             PrintF("=> found intermediate function in context slot %d\n",
-                   index);
+                   function_index);
           }
-          *index_ = index;
+          *index = function_index;
           *attributes = READ_ONLY;
           *binding_flags = IMMUTABLE_IS_INITIALIZED;
           return context;
         }
       }
+
+    } else if (context->IsCatchContext()) {
+      // Catch contexts have the variable name in the extension slot.
+      if (name->Equals(String::cast(context->extension()))) {
+        if (FLAG_trace_contexts) {
+          PrintF("=> found in catch context\n");
+        }
+        *index = Context::THROWN_OBJECT_INDEX;
+        *attributes = NONE;
+        *binding_flags = MUTABLE_IS_INITIALIZED;
+        return context;
+      }
     }
 
-    // Proceed with the previous context.
+    // 3. Prepare to continue with the previous (next outermost) context.
     if (context->IsGlobalContext()) {
       follow_context_chain = false;
     } else {
diff --git a/src/contexts.h b/src/contexts.h
index 505f86c..b80475f 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -134,6 +134,8 @@
   V(MAP_CACHE_INDEX, Object, map_cache) \
   V(CONTEXT_DATA_INDEX, Object, data) \
   V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+  V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
+    to_complete_property_descriptor) \
   V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
   V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
   V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
@@ -252,6 +254,7 @@
     OUT_OF_MEMORY_INDEX,
     CONTEXT_DATA_INDEX,
     ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
+    TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
     DERIVED_HAS_TRAP_INDEX,
     DERIVED_GET_TRAP_INDEX,
     DERIVED_SET_TRAP_INDEX,
@@ -330,12 +333,6 @@
   // Mark the global context with out of memory.
   inline void mark_out_of_memory();
 
-  // The exception holder is the object used as a with object in
-  // the implementation of a catch block.
-  bool is_exception_holder(Object* object) {
-    return IsCatchContext() && extension() == object;
-  }
-
   // A global context hold a list of all functions which have been optimized.
   void AddOptimizedFunction(JSFunction* function);
   void RemoveOptimizedFunction(JSFunction* function);
@@ -355,29 +352,25 @@
 #undef GLOBAL_CONTEXT_FIELD_ACCESSORS
 
   // Lookup the the slot called name, starting with the current context.
-  // There are 4 possible outcomes:
+  // There are three possibilities:
   //
-  // 1) index_ >= 0 && result->IsContext():
-  //    most common case, the result is a Context, and index is the
-  //    context slot index, and the slot exists.
-  //    attributes == READ_ONLY for the function name variable, NONE otherwise.
+  // 1) result->IsContext():
+  //    The binding was found in a context.  *index is always the
+  //    non-negative slot index.  *attributes is NONE for var and let
+  //    declarations, READ_ONLY for const declarations (never ABSENT).
   //
-  // 2) index_ >= 0 && result->IsJSObject():
-  //    the result is the JSObject arguments object, the index is the parameter
-  //    index, i.e., key into the arguments object, and the property exists.
-  //    attributes != ABSENT.
+  // 2) result->IsJSObject():
+  //    The binding was found as a named property in a context extension
+  //    object (i.e., was introduced via eval), as a property on the subject
+  //    of with, or as a property of the global object.  *index is -1 and
+  //    *attributes is not ABSENT.
   //
-  // 3) index_ < 0 && result->IsJSObject():
-  //    the result is the JSObject extension context or the global object,
-  //    and the name is the property name, and the property exists.
-  //    attributes != ABSENT.
-  //
-  // 4) index_ < 0 && result.is_null():
-  //    there was no context found with the corresponding property.
-  //    attributes == ABSENT.
+  // 3) result.is_null():
+  //    There was no binding found, *index is always -1 and *attributes is
+  //    always ABSENT.
   Handle<Object> Lookup(Handle<String> name,
                         ContextLookupFlags flags,
-                        int* index_,
+                        int* index,
                         PropertyAttributes* attributes,
                         BindingFlags* binding_flags);
 
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 41cf0d5..8bc11bf 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -47,7 +47,7 @@
 namespace internal {
 
 static inline double JunkStringValue() {
-  return std::numeric_limits<double>::quiet_NaN();
+  return BitCast<double, uint64_t>(kQuietNaNMask);
 }
 
 
diff --git a/src/conversions.h b/src/conversions.h
index e51ad65..31aaf6b 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -28,8 +28,6 @@
 #ifndef V8_CONVERSIONS_H_
 #define V8_CONVERSIONS_H_
 
-#include <limits>
-
 #include "utils.h"
 
 namespace v8 {
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 6549028..d74c034 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -551,12 +551,12 @@
     sampler->Stop();
     need_to_stop_sampler_ = false;
   }
+  NoBarrier_Store(&is_profiling_, false);
   processor_->Stop();
   processor_->Join();
   delete processor_;
   delete generator_;
   processor_ = NULL;
-  NoBarrier_Store(&is_profiling_, false);
   generator_ = NULL;
   logger->logging_nesting_ = saved_logging_nesting_;
 }
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index adefba7..8fbc876 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,6 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
 
 #include "d8.h"
 #include "d8-debug.h"
@@ -367,3 +368,5 @@
 
 
 }  // namespace v8
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
diff --git a/src/d8.cc b/src/d8.cc
index 55f0d4c..a516576 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -146,11 +146,11 @@
                           Handle<Value> name,
                           bool print_result,
                           bool report_exceptions) {
-#ifndef V8_SHARED
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
   bool FLAG_debugger = i::FLAG_debugger;
 #else
   bool FLAG_debugger = false;
-#endif  // V8_SHARED
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
   HandleScope handle_scope;
   TryCatch try_catch;
   options.script_executed = true;
@@ -594,6 +594,7 @@
   Context::Scope utility_scope(utility_context_);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
+  if (i::FLAG_debugger) printf("JavaScript debugger enabled\n");
   // Install the debugger object in the utility scope
   i::Debug* debug = i::Isolate::Current()->debug();
   debug->Load();
@@ -816,7 +817,7 @@
 
 
 static FILE* FOpen(const char* path, const char* mode) {
-#if (defined(_WIN32) || defined(_WIN64))
+#if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
   FILE* result;
   if (fopen_s(&result, path, mode) == 0) {
     return result;
@@ -900,9 +901,6 @@
 #ifndef V8_SHARED
   console = LineEditor::Get();
   printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
-  if (i::FLAG_debugger) {
-    printf("JavaScript debugger enabled\n");
-  }
   console->Open();
   while (true) {
     i::SmartArrayPointer<char> input = console->Prompt(Shell::kPrompt);
@@ -1253,14 +1251,22 @@
     Locker lock;
     HandleScope scope;
     Persistent<Context> context = CreateEvaluationContext();
+    if (options.last_run) {
+      // Keep using the same context in the interactive shell.
+      evaluation_context_ = context;
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+      // If the interactive debugger is enabled make sure to activate
+      // it before running the files passed on the command line.
+      if (i::FLAG_debugger) {
+        InstallUtilityScript();
+      }
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+    }
     {
       Context::Scope cscope(context);
       options.isolate_sources[0].Execute();
     }
-    if (options.last_run) {
-      // Keep using the same context in the interactive shell
-      evaluation_context_ = context;
-    } else {
+    if (!options.last_run) {
       context.Dispose();
     }
 
@@ -1331,9 +1337,11 @@
   if (( options.interactive_shell
       || !options.script_executed )
       && !options.test_shell ) {
-#ifndef V8_SHARED
-    InstallUtilityScript();
-#endif  // V8_SHARED
+#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
+    if (!i::FLAG_debugger) {
+      InstallUtilityScript();
+    }
+#endif  // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
     RunShell();
   }
 
diff --git a/src/debug.cc b/src/debug.cc
index a229d39..fb7e337 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -40,6 +40,7 @@
 #include "global-handles.h"
 #include "ic.h"
 #include "ic-inl.h"
+#include "isolate-inl.h"
 #include "list.h"
 #include "messages.h"
 #include "natives.h"
@@ -401,15 +402,15 @@
   // Step in can only be prepared if currently positioned on an IC call,
   // construct call or CallFunction stub call.
   Address target = rinfo()->target_address();
-  Handle<Code> code(Code::GetCodeFromTargetAddress(target));
-  if (code->is_call_stub() || code->is_keyed_call_stub()) {
+  Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+  if (target_code->is_call_stub() || target_code->is_keyed_call_stub()) {
     // Step in through IC call is handled by the runtime system. Therefore make
     // sure that the any current IC is cleared and the runtime system is
     // called. If the executing code has a debug break at the location change
     // the call in the original code as it is the code there that will be
     // executed in place of the debug break call.
-    Handle<Code> stub = ComputeCallDebugPrepareStepIn(code->arguments_count(),
-                                                      code->kind());
+    Handle<Code> stub = ComputeCallDebugPrepareStepIn(
+        target_code->arguments_count(), target_code->kind());
     if (IsDebugBreak()) {
       original_rinfo()->set_target_address(stub->entry());
     } else {
@@ -419,7 +420,7 @@
 #ifdef DEBUG
     // All the following stuff is needed only for assertion checks so the code
     // is wrapped in ifdef.
-    Handle<Code> maybe_call_function_stub = code;
+    Handle<Code> maybe_call_function_stub = target_code;
     if (IsDebugBreak()) {
       Address original_target = original_rinfo()->target_address();
       maybe_call_function_stub =
@@ -436,8 +437,9 @@
     // Step in through CallFunction stub should also be prepared by caller of
     // this function (Debug::PrepareStep) which should flood target function
     // with breakpoints.
-    ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()
-           || is_call_function_stub);
+    ASSERT(RelocInfo::IsConstructCall(rmode()) ||
+           target_code->is_inline_cache_stub() ||
+           is_call_function_stub);
 #endif
   }
 }
@@ -474,11 +476,11 @@
   RelocInfo::Mode mode = rmode();
   if (RelocInfo::IsCodeTarget(mode)) {
     Address target = rinfo()->target_address();
-    Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+    Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
 
     // Patch the code to invoke the builtin debug break function matching the
     // calling convention used by the call site.
-    Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
+    Handle<Code> dbgbrk_code(Debug::FindDebugBreak(target_code, mode));
     rinfo()->set_target_address(dbgbrk_code->entry());
   }
 }
@@ -772,7 +774,7 @@
 
   // Execute the shared function in the debugger context.
   Handle<Context> context = isolate->global_context();
-  bool caught_exception = false;
+  bool caught_exception;
   Handle<JSFunction> function =
       factory->NewFunctionFromSharedFunctionInfo(function_info, context);
 
@@ -1103,7 +1105,7 @@
   Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
 
   // Call HandleBreakPointx.
-  bool caught_exception = false;
+  bool caught_exception;
   const int argc = 2;
   Object** argv[argc] = {
     break_id.location(),
@@ -1732,6 +1734,10 @@
   if (!has_break_points_) {
     Deoptimizer::DeoptimizeAll();
 
+    // We are going to iterate heap to find all functions without
+    // debug break slots.
+    isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+
     AssertNoAllocation no_allocation;
     Builtins* builtins = isolate_->builtins();
     Code* lazy_compile = builtins->builtin(Builtins::kLazyCompile);
@@ -1997,9 +2003,10 @@
 
   // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
   // rid of all the cached script wrappers and the second gets rid of the
-  // scripts which are no longer referenced.
-  heap->CollectAllGarbage(false);
-  heap->CollectAllGarbage(false);
+  // scripts which are no longer referenced.  The second also sweeps precisely,
+  // which saves us doing yet another GC to make the heap iterable.
+  heap->CollectAllGarbage(Heap::kNoGCFlags);
+  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask);
 
   ASSERT(script_cache_ == NULL);
   script_cache_ = new ScriptCache();
@@ -2007,6 +2014,8 @@
   // Scan heap for Script objects.
   int count = 0;
   HeapIterator iterator;
+  AssertNoAllocation no_allocation;
+
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
       script_cache_->Add(Handle<Script>(Script::cast(obj)));
@@ -2047,7 +2056,7 @@
 
   // Perform GC to get unreferenced scripts evicted from the cache before
   // returning the content.
-  isolate_->heap()->CollectAllGarbage(false);
+  isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   // Get the scripts from the cache.
   return script_cache_->GetScripts();
@@ -2345,7 +2354,7 @@
   Handle<JSValue> wrapper = GetScriptWrapper(script);
 
   // Call UpdateScriptBreakPoints expect no exceptions.
-  bool caught_exception = false;
+  bool caught_exception;
   const int argc = 1;
   Object** argv[argc] = { reinterpret_cast<Object**>(wrapper.location()) };
   Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
@@ -2486,7 +2495,7 @@
                           exec_state.location(),
                           Handle<Object>::cast(event_data).location(),
                           event_listener_data_.location() };
-  bool caught_exception = false;
+  bool caught_exception;
   Execution::TryCall(fun, isolate_->global(), argc, argv, &caught_exception);
   // Silently ignore exceptions from debug event listeners.
 }
@@ -2929,6 +2938,94 @@
 }
 
 
+EnterDebugger::EnterDebugger()
+    : isolate_(Isolate::Current()),
+      prev_(isolate_->debug()->debugger_entry()),
+      it_(isolate_),
+      has_js_frames_(!it_.done()),
+      save_(isolate_) {
+  Debug* debug = isolate_->debug();
+  ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
+  ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
+
+  // Link recursive debugger entry.
+  debug->set_debugger_entry(this);
+
+  // Store the previous break id and frame id.
+  break_id_ = debug->break_id();
+  break_frame_id_ = debug->break_frame_id();
+
+  // Create the new break info. If there is no JavaScript frames there is no
+  // break frame id.
+  if (has_js_frames_) {
+    debug->NewBreak(it_.frame()->id());
+  } else {
+    debug->NewBreak(StackFrame::NO_ID);
+  }
+
+  // Make sure that debugger is loaded and enter the debugger context.
+  load_failed_ = !debug->Load();
+  if (!load_failed_) {
+    // NOTE the member variable save which saves the previous context before
+    // this change.
+    isolate_->set_context(*debug->debug_context());
+  }
+}
+
+
+EnterDebugger::~EnterDebugger() {
+  ASSERT(Isolate::Current() == isolate_);
+  Debug* debug = isolate_->debug();
+
+  // Restore to the previous break state.
+  debug->SetBreak(break_frame_id_, break_id_);
+
+  // Check for leaving the debugger.
+  if (prev_ == NULL) {
+    // Clear mirror cache when leaving the debugger. Skip this if there is a
+    // pending exception as clearing the mirror cache calls back into
+    // JavaScript. This can happen if the v8::Debug::Call is used in which
+    // case the exception should end up in the calling code.
+    if (!isolate_->has_pending_exception()) {
+      // Try to avoid any pending debug break breaking in the clear mirror
+      // cache JavaScript code.
+      if (isolate_->stack_guard()->IsDebugBreak()) {
+        debug->set_interrupts_pending(DEBUGBREAK);
+        isolate_->stack_guard()->Continue(DEBUGBREAK);
+      }
+      debug->ClearMirrorCache();
+    }
+
+    // Request preemption and debug break when leaving the last debugger entry
+    // if any of these where recorded while debugging.
+    if (debug->is_interrupt_pending(PREEMPT)) {
+      // This re-scheduling of preemption is to avoid starvation in some
+      // debugging scenarios.
+      debug->clear_interrupt_pending(PREEMPT);
+      isolate_->stack_guard()->Preempt();
+    }
+    if (debug->is_interrupt_pending(DEBUGBREAK)) {
+      debug->clear_interrupt_pending(DEBUGBREAK);
+      isolate_->stack_guard()->DebugBreak();
+    }
+
+    // If there are commands in the queue when leaving the debugger request
+    // that these commands are processed.
+    if (isolate_->debugger()->HasCommands()) {
+      isolate_->stack_guard()->DebugCommand();
+    }
+
+    // If leaving the debugger with the debugger no longer active unload it.
+    if (!isolate_->debugger()->IsDebuggerActive()) {
+      isolate_->debugger()->UnloadDebugger();
+    }
+  }
+
+  // Leaving this debugger entry.
+  debug->set_debugger_entry(prev_);
+}
+
+
 MessageImpl MessageImpl::NewEvent(DebugEvent event,
                                   bool running,
                                   Handle<JSObject> exec_state,
diff --git a/src/debug.h b/src/debug.h
index a098040..caccede 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -869,91 +869,8 @@
 // some reason could not be entered FailedToEnter will return true.
 class EnterDebugger BASE_EMBEDDED {
  public:
-  EnterDebugger()
-      : isolate_(Isolate::Current()),
-        prev_(isolate_->debug()->debugger_entry()),
-        it_(isolate_),
-        has_js_frames_(!it_.done()),
-        save_(isolate_) {
-    Debug* debug = isolate_->debug();
-    ASSERT(prev_ != NULL || !debug->is_interrupt_pending(PREEMPT));
-    ASSERT(prev_ != NULL || !debug->is_interrupt_pending(DEBUGBREAK));
-
-    // Link recursive debugger entry.
-    debug->set_debugger_entry(this);
-
-    // Store the previous break id and frame id.
-    break_id_ = debug->break_id();
-    break_frame_id_ = debug->break_frame_id();
-
-    // Create the new break info. If there is no JavaScript frames there is no
-    // break frame id.
-    if (has_js_frames_) {
-      debug->NewBreak(it_.frame()->id());
-    } else {
-      debug->NewBreak(StackFrame::NO_ID);
-    }
-
-    // Make sure that debugger is loaded and enter the debugger context.
-    load_failed_ = !debug->Load();
-    if (!load_failed_) {
-      // NOTE the member variable save which saves the previous context before
-      // this change.
-      isolate_->set_context(*debug->debug_context());
-    }
-  }
-
-  ~EnterDebugger() {
-    ASSERT(Isolate::Current() == isolate_);
-    Debug* debug = isolate_->debug();
-
-    // Restore to the previous break state.
-    debug->SetBreak(break_frame_id_, break_id_);
-
-    // Check for leaving the debugger.
-    if (prev_ == NULL) {
-      // Clear mirror cache when leaving the debugger. Skip this if there is a
-      // pending exception as clearing the mirror cache calls back into
-      // JavaScript. This can happen if the v8::Debug::Call is used in which
-      // case the exception should end up in the calling code.
-      if (!isolate_->has_pending_exception()) {
-        // Try to avoid any pending debug break breaking in the clear mirror
-        // cache JavaScript code.
-        if (isolate_->stack_guard()->IsDebugBreak()) {
-          debug->set_interrupts_pending(DEBUGBREAK);
-          isolate_->stack_guard()->Continue(DEBUGBREAK);
-        }
-        debug->ClearMirrorCache();
-      }
-
-      // Request preemption and debug break when leaving the last debugger entry
-      // if any of these where recorded while debugging.
-      if (debug->is_interrupt_pending(PREEMPT)) {
-        // This re-scheduling of preemption is to avoid starvation in some
-        // debugging scenarios.
-        debug->clear_interrupt_pending(PREEMPT);
-        isolate_->stack_guard()->Preempt();
-      }
-      if (debug->is_interrupt_pending(DEBUGBREAK)) {
-        debug->clear_interrupt_pending(DEBUGBREAK);
-        isolate_->stack_guard()->DebugBreak();
-      }
-
-      // If there are commands in the queue when leaving the debugger request
-      // that these commands are processed.
-      if (isolate_->debugger()->HasCommands()) {
-        isolate_->stack_guard()->DebugCommand();
-      }
-
-      // If leaving the debugger with the debugger no longer active unload it.
-      if (!isolate_->debugger()->IsDebuggerActive()) {
-        isolate_->debugger()->UnloadDebugger();
-      }
-    }
-
-    // Leaving this debugger entry.
-    debug->set_debugger_entry(prev_);
-  }
+  EnterDebugger();
+  ~EnterDebugger();
 
   // Check whether the debugger could be entered.
   inline bool FailedToEnter() { return load_failed_; }
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 5feb73d..b052275 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -52,11 +52,13 @@
 
 DeoptimizerData::~DeoptimizerData() {
   if (eager_deoptimization_entry_code_ != NULL) {
-    eager_deoptimization_entry_code_->Free(EXECUTABLE);
+    Isolate::Current()->memory_allocator()->Free(
+        eager_deoptimization_entry_code_);
     eager_deoptimization_entry_code_ = NULL;
   }
   if (lazy_deoptimization_entry_code_ != NULL) {
-    lazy_deoptimization_entry_code_->Free(EXECUTABLE);
+    Isolate::Current()->memory_allocator()->Free(
+        lazy_deoptimization_entry_code_);
     lazy_deoptimization_entry_code_ = NULL;
   }
 }
@@ -71,6 +73,8 @@
 #endif
 
 
+// We rely on this function not causing a GC.  It is called from generated code
+// without having a real stack frame in place.
 Deoptimizer* Deoptimizer::New(JSFunction* function,
                               BailoutType type,
                               unsigned bailout_id,
@@ -319,6 +323,8 @@
       input_(NULL),
       output_count_(0),
       output_(NULL),
+      frame_alignment_marker_(isolate->heap()->frame_alignment_marker()),
+      has_alignment_padding_(0),
       deferred_heap_numbers_(0) {
   if (FLAG_trace_deopt && type != OSR) {
     if (type == DEBUGGER) {
@@ -343,6 +349,26 @@
   if (type == EAGER) {
     ASSERT(from == NULL);
     optimized_code_ = function_->code();
+    if (FLAG_trace_deopt && FLAG_code_comments) {
+      // Print instruction associated with this bailout.
+      const char* last_comment = NULL;
+      int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
+          | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+      for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
+        RelocInfo* info = it.rinfo();
+        if (info->rmode() == RelocInfo::COMMENT) {
+          last_comment = reinterpret_cast<const char*>(info->data());
+        }
+        if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
+          unsigned id = Deoptimizer::GetDeoptimizationId(
+              info->target_address(), Deoptimizer::EAGER);
+          if (id == bailout_id && last_comment != NULL) {
+            PrintF("            %s\n", last_comment);
+            break;
+          }
+        }
+      }
+    }
   } else if (type == LAZY) {
     optimized_code_ = FindDeoptimizingCodeFromAddress(from);
     ASSERT(optimized_code_ != NULL);
@@ -386,7 +412,7 @@
 Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
   ASSERT(id >= 0);
   if (id >= kNumberOfEntries) return NULL;
-  LargeObjectChunk* base = NULL;
+  MemoryChunk* base = NULL;
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   if (type == EAGER) {
     if (data->eager_deoptimization_entry_code_ == NULL) {
@@ -400,12 +426,12 @@
     base = data->lazy_deoptimization_entry_code_;
   }
   return
-      static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
+      static_cast<Address>(base->body()) + (id * table_entry_size_);
 }
 
 
 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
-  LargeObjectChunk* base = NULL;
+  MemoryChunk* base = NULL;
   DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
   if (type == EAGER) {
     base = data->eager_deoptimization_entry_code_;
@@ -413,14 +439,14 @@
     base = data->lazy_deoptimization_entry_code_;
   }
   if (base == NULL ||
-      addr < base->GetStartAddress() ||
-      addr >= base->GetStartAddress() +
+      addr < base->body() ||
+      addr >= base->body() +
           (kNumberOfEntries * table_entry_size_)) {
     return kNotDeoptimizationEntry;
   }
   ASSERT_EQ(0,
-      static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
-  return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
+      static_cast<int>(addr - base->body()) % table_entry_size_);
+  return static_cast<int>(addr - base->body()) / table_entry_size_;
 }
 
 
@@ -462,6 +488,8 @@
 }
 
 
+// We rely on this function not causing a GC.  It is called from generated code
+// without having a real stack frame in place.
 void Deoptimizer::DoComputeOutputFrames() {
   if (bailout_type_ == OSR) {
     DoComputeOsrOutputFrame();
@@ -613,11 +641,13 @@
       intptr_t input_value = input_->GetRegister(input_reg);
       if (FLAG_trace_deopt) {
         PrintF(
-            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
+            "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
             output_[frame_index]->GetTop() + output_offset,
             output_offset,
             input_value,
             converter.NameOfCPURegister(input_reg));
+        reinterpret_cast<Object*>(input_value)->ShortPrint();
+        PrintF("\n");
       }
       output_[frame_index]->SetFrameSlot(output_offset, input_value);
       return;
@@ -675,10 +705,12 @@
       if (FLAG_trace_deopt) {
         PrintF("    0x%08" V8PRIxPTR ": ",
                output_[frame_index]->GetTop() + output_offset);
-        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
+        PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] ",
                output_offset,
                input_value,
                input_offset);
+        reinterpret_cast<Object*>(input_value)->ShortPrint();
+        PrintF("\n");
       }
       output_[frame_index]->SetFrameSlot(output_offset, input_value);
       return;
@@ -953,7 +985,10 @@
   for (uint32_t i = 0; i < table_length; ++i) {
     uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
     Address pc_after = unoptimized_code->instruction_start() + pc_offset;
-    PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
+    PatchStackCheckCodeAt(unoptimized_code,
+                          pc_after,
+                          check_code,
+                          replacement_code);
     stack_check_cursor += 2 * kIntSize;
   }
 }
@@ -1039,7 +1074,7 @@
 }
 
 
-LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
+MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
   // We cannot run this if the serializer is enabled because this will
   // cause us to emit relocation information for the external
   // references. This is fine because the deoptimizer's code section
@@ -1053,12 +1088,15 @@
   masm.GetCode(&desc);
   ASSERT(desc.reloc_size == 0);
 
-  LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
+  MemoryChunk* chunk =
+      Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
+                                                            EXECUTABLE,
+                                                            NULL);
   if (chunk == NULL) {
     V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
   }
-  memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
-  CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
+  memcpy(chunk->body(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->body(), desc.instr_size);
   return chunk;
 }
 
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 8641261..3cf7046 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -86,8 +86,8 @@
 #endif
 
  private:
-  LargeObjectChunk* eager_deoptimization_entry_code_;
-  LargeObjectChunk* lazy_deoptimization_entry_code_;
+  MemoryChunk* eager_deoptimization_entry_code_;
+  MemoryChunk* lazy_deoptimization_entry_code_;
   Deoptimizer* current_;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
@@ -173,7 +173,8 @@
 
   // Patch stack guard check at instruction before pc_after in
   // the unoptimized code to unconditionally call replacement_code.
-  static void PatchStackCheckCodeAt(Address pc_after,
+  static void PatchStackCheckCodeAt(Code* unoptimized_code,
+                                    Address pc_after,
                                     Code* check_code,
                                     Code* replacement_code);
 
@@ -211,6 +212,11 @@
     return OFFSET_OF(Deoptimizer, output_count_);
   }
   static int output_offset() { return OFFSET_OF(Deoptimizer, output_); }
+  static int frame_alignment_marker_offset() {
+    return OFFSET_OF(Deoptimizer, frame_alignment_marker_); }
+  static int has_alignment_padding_offset() {
+    return OFFSET_OF(Deoptimizer, has_alignment_padding_);
+  }
 
   static int GetDeoptimizedCodeCount(Isolate* isolate);
 
@@ -285,7 +291,7 @@
 
   void AddDoubleValue(intptr_t slot_address, double value);
 
-  static LargeObjectChunk* CreateCode(BailoutType type);
+  static MemoryChunk* CreateCode(BailoutType type);
   static void GenerateDeoptimizationEntries(
       MacroAssembler* masm, int count, BailoutType type);
 
@@ -315,6 +321,10 @@
   // Array of output frame descriptions.
   FrameDescription** output_;
 
+  // Frames can be dynamically padded on ia32 to align untagged doubles.
+  Object* frame_alignment_marker_;
+  intptr_t has_alignment_padding_;
+
   List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
 
   static const int table_entry_size_;
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 1e67b4c..e3b40ab 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -200,7 +200,7 @@
     // Print all the reloc info for this instruction which are not comments.
     for (int i = 0; i < pcs.length(); i++) {
       // Put together the reloc info
-      RelocInfo relocinfo(pcs[i], rmodes[i], datas[i]);
+      RelocInfo relocinfo(pcs[i], rmodes[i], datas[i], NULL);
 
       // Indent the printing of the reloc info.
       if (i == 0) {
diff --git a/src/elements.cc b/src/elements.cc
index e4ecfe8..5e7a84e 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -227,7 +227,9 @@
  public:
   static MaybeObject* DeleteCommon(JSObject* obj,
                                    uint32_t key) {
-    ASSERT(obj->HasFastElements() || obj->HasFastArgumentsElements());
+    ASSERT(obj->HasFastElements() ||
+           obj->HasFastSmiOnlyElements() ||
+           obj->HasFastArgumentsElements());
     Heap* heap = obj->GetHeap();
     FixedArray* backing_store = FixedArray::cast(obj->elements());
     if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
@@ -596,6 +598,9 @@
 
 void ElementsAccessor::InitializeOncePerProcess() {
   static struct ConcreteElementsAccessors {
+    // Use the fast element handler for smi-only arrays. The implementation is
+    // currently identical.
+    FastElementsAccessor fast_smi_elements_handler;
     FastElementsAccessor fast_elements_handler;
     FastDoubleElementsAccessor fast_double_elements_handler;
     DictionaryElementsAccessor dictionary_elements_handler;
@@ -612,6 +617,7 @@
   } element_accessors;
 
   static ElementsAccessor* accessor_array[] = {
+    &element_accessors.fast_smi_elements_handler,
     &element_accessors.fast_elements_handler,
     &element_accessors.fast_double_elements_handler,
     &element_accessors.dictionary_elements_handler,
@@ -627,6 +633,9 @@
     &element_accessors.pixel_elements_handler
   };
 
+  STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
+                kElementsKindCount);
+
   elements_accessors_ = accessor_array;
 }
 
diff --git a/src/execution.cc b/src/execution.cc
index f36d4e4..2021c0f 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -33,6 +33,7 @@
 #include "bootstrapper.h"
 #include "codegen.h"
 #include "debug.h"
+#include "isolate-inl.h"
 #include "runtime-profiler.h"
 #include "simulator.h"
 #include "v8threads.h"
@@ -88,11 +89,9 @@
 
   Handle<Code> code;
   if (construct) {
-    JSConstructEntryStub stub;
-    code = stub.GetCode();
+    code = isolate->factory()->js_construct_entry_code();
   } else {
-    JSEntryStub stub;
-    code = stub.GetCode();
+    code = isolate->factory()->js_entry_code();
   }
 
   // Convert calls on global objects to be calls on the global
@@ -151,6 +150,8 @@
                                Object*** args,
                                bool* pending_exception,
                                bool convert_receiver) {
+  *pending_exception = false;
+
   if (!callable->IsJSFunction()) {
     callable = TryGetFunctionDelegate(callable, pending_exception);
     if (*pending_exception) return callable;
@@ -195,6 +196,7 @@
   v8::TryCatch catcher;
   catcher.SetVerbose(false);
   catcher.SetCaptureMessage(false);
+  *caught_exception = false;
 
   Handle<Object> result = Invoke(false, func, receiver, argc, args,
                                  caught_exception);
@@ -377,7 +379,7 @@
 
 bool StackGuard::IsInterrupted() {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & INTERRUPT;
+  return (thread_local_.interrupt_flags_ & INTERRUPT) != 0;
 }
 
 
@@ -403,7 +405,7 @@
 
 bool StackGuard::IsTerminateExecution() {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & TERMINATE;
+  return (thread_local_.interrupt_flags_ & TERMINATE) != 0;
 }
 
 
@@ -416,7 +418,7 @@
 
 bool StackGuard::IsRuntimeProfilerTick() {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK;
+  return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
 }
 
 
@@ -433,6 +435,22 @@
 }
 
 
+bool StackGuard::IsGCRequest() {
+  ExecutionAccess access(isolate_);
+  return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0;
+}
+
+
+void StackGuard::RequestGC() {
+  ExecutionAccess access(isolate_);
+  thread_local_.interrupt_flags_ |= GC_REQUEST;
+  if (thread_local_.postpone_interrupts_nesting_ == 0) {
+    thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+    isolate_->heap()->SetStackLimits();
+  }
+}
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 bool StackGuard::IsDebugBreak() {
   ExecutionAccess access(isolate_);
@@ -740,7 +758,7 @@
                           Handle<Object>::cast(fun).location(),
                           pos.location(),
                           is_global.location() };
-  bool caught_exception = false;
+  bool caught_exception;
   Handle<Object> result =
       TryCall(isolate->get_stack_trace_line_fun(),
               isolate->js_builtins_object(), argc, args,
@@ -852,6 +870,12 @@
 MaybeObject* Execution::HandleStackGuardInterrupt() {
   Isolate* isolate = Isolate::Current();
   StackGuard* stack_guard = isolate->stack_guard();
+
+  if (stack_guard->IsGCRequest()) {
+    isolate->heap()->CollectAllGarbage(false);
+    stack_guard->Continue(GC_REQUEST);
+  }
+
   isolate->counters()->stack_interrupts()->Increment();
   if (stack_guard->IsRuntimeProfilerTick()) {
     isolate->counters()->runtime_profiler_ticks()->Increment();
diff --git a/src/execution.h b/src/execution.h
index 5cd7141..9fa6e78 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -41,7 +41,8 @@
   DEBUGCOMMAND = 1 << 2,
   PREEMPT = 1 << 3,
   TERMINATE = 1 << 4,
-  RUNTIME_PROFILER_TICK = 1 << 5
+  RUNTIME_PROFILER_TICK = 1 << 5,
+  GC_REQUEST = 1 << 6
 };
 
 class Execution : public AllStatic {
@@ -196,6 +197,8 @@
   bool IsDebugCommand();
   void DebugCommand();
 #endif
+  bool IsGCRequest();
+  void RequestGC();
   void Continue(InterruptFlag after_what);
 
   // This provides an asynchronous read of the stack limits for the current
diff --git a/src/extensions/gc-extension.cc b/src/extensions/gc-extension.cc
index 3740c27..48e8c42 100644
--- a/src/extensions/gc-extension.cc
+++ b/src/extensions/gc-extension.cc
@@ -40,12 +40,7 @@
 
 
 v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
-  bool compact = false;
-  // All allocation spaces other than NEW_SPACE have the same effect.
-  if (args.Length() >= 1 && args[0]->IsBoolean()) {
-    compact = args[0]->BooleanValue();
-  }
-  HEAP->CollectAllGarbage(compact);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
   return v8::Undefined();
 }
 
diff --git a/src/factory.cc b/src/factory.cc
index 9728926..252c97c 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -234,7 +234,7 @@
 
 
 Handle<String> Factory::NewExternalStringFromAscii(
-    ExternalAsciiString::Resource* resource) {
+    const ExternalAsciiString::Resource* resource) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateExternalStringFromAscii(resource),
@@ -243,7 +243,7 @@
 
 
 Handle<String> Factory::NewExternalStringFromTwoByte(
-    ExternalTwoByteString::Resource* resource) {
+    const ExternalTwoByteString::Resource* resource) {
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateExternalStringFromTwoByte(resource),
@@ -404,10 +404,12 @@
 }
 
 
-Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
+Handle<Map> Factory::NewMap(InstanceType type,
+                            int instance_size,
+                            ElementsKind elements_kind) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateMap(type, instance_size),
+      isolate()->heap()->AllocateMap(type, instance_size, elements_kind),
       Map);
 }
 
@@ -455,23 +457,11 @@
 }
 
 
-Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
-  CALL_HEAP_FUNCTION(isolate(), src->GetFastElementsMap(), Map);
-}
-
-
-Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
-  CALL_HEAP_FUNCTION(isolate(), src->GetSlowElementsMap(), Map);
-}
-
-
 Handle<Map> Factory::GetElementsTransitionMap(
-    Handle<Map> src,
-    ElementsKind elements_kind,
-    bool safe_to_add_transition) {
+    Handle<JSObject> src,
+    ElementsKind elements_kind) {
   CALL_HEAP_FUNCTION(isolate(),
-                     src->GetElementsTransitionMap(elements_kind,
-                                                   safe_to_add_transition),
+                     src->GetElementsTransitionMap(elements_kind),
                      Map);
 }
 
@@ -722,7 +712,12 @@
   if (force_initial_map ||
       type != JS_OBJECT_TYPE ||
       instance_size != JSObject::kHeaderSize) {
-    Handle<Map> initial_map = NewMap(type, instance_size);
+    ElementsKind default_elements_kind = FLAG_smi_only_arrays
+        ? FAST_SMI_ONLY_ELEMENTS
+        : FAST_ELEMENTS;
+    Handle<Map> initial_map = NewMap(type,
+                                     instance_size,
+                                     default_elements_kind);
     function->set_initial_map(*initial_map);
     initial_map->set_constructor(*function);
   }
@@ -908,11 +903,26 @@
   Handle<JSArray> result =
       Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
                                         pretenure));
-  result->SetContent(*elements);
+  SetContent(result, elements);
   return result;
 }
 
 
+void Factory::SetContent(Handle<JSArray> array,
+                         Handle<FixedArray> elements) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      array->SetContent(*elements));
+}
+
+
+void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      array->EnsureCanContainNonSmiElements());
+}
+
+
 Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
                                     Handle<Object> prototype) {
   CALL_HEAP_FUNCTION(
@@ -938,6 +948,13 @@
 }
 
 
+void Factory::SetIdentityHash(Handle<JSObject> object, Object* hash) {
+  CALL_HEAP_FUNCTION_VOID(
+      isolate(),
+      object->SetIdentityHash(hash, ALLOW_CREATION));
+}
+
+
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
     Handle<String> name,
     int number_of_literals,
@@ -990,6 +1007,12 @@
 }
 
 
+Handle<String> Factory::Uint32ToString(uint32_t value) {
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->Uint32ToString(value), String);
+}
+
+
 Handle<NumberDictionary> Factory::DictionaryAtNumberPut(
     Handle<NumberDictionary> dictionary,
     uint32_t key,
@@ -1299,4 +1322,13 @@
 }
 
 
+Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
+  Heap* h = isolate()->heap();
+  if (name->Equals(h->undefined_symbol())) return undefined_value();
+  if (name->Equals(h->nan_symbol())) return nan_value();
+  if (name->Equals(h->infinity_symbol())) return infinity_value();
+  return Handle<Object>::null();
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/factory.h b/src/factory.h
index 71ae750..85f46c5 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -145,9 +145,9 @@
   // not make sense to have a UTF-8 factory function for external strings,
   // because we cannot change the underlying buffer.
   Handle<String> NewExternalStringFromAscii(
-      ExternalAsciiString::Resource* resource);
+      const ExternalAsciiString::Resource* resource);
   Handle<String> NewExternalStringFromTwoByte(
-      ExternalTwoByteString::Resource* resource);
+      const ExternalTwoByteString::Resource* resource);
 
   // Create a global (but otherwise uninitialized) context.
   Handle<Context> NewGlobalContext();
@@ -203,7 +203,9 @@
   Handle<JSGlobalPropertyCell> NewJSGlobalPropertyCell(
       Handle<Object> value);
 
-  Handle<Map> NewMap(InstanceType type, int instance_size);
+  Handle<Map> NewMap(InstanceType type,
+                     int instance_size,
+                     ElementsKind elements_kind = FAST_ELEMENTS);
 
   Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
 
@@ -215,13 +217,8 @@
 
   Handle<Map> CopyMapDropTransitions(Handle<Map> map);
 
-  Handle<Map> GetFastElementsMap(Handle<Map> map);
-
-  Handle<Map> GetSlowElementsMap(Handle<Map> map);
-
-  Handle<Map> GetElementsTransitionMap(Handle<Map> map,
-                                       ElementsKind elements_kind,
-                                       bool safe_to_add_transition);
+  Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
+                                       ElementsKind elements_kind);
 
   Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
 
@@ -258,12 +255,18 @@
       Handle<FixedArray> elements,
       PretenureFlag pretenure = NOT_TENURED);
 
+  void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
+
+  void EnsureCanContainNonSmiElements(Handle<JSArray> array);
+
   Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
 
   // Change the type of the argument into a JS object/function and reinitialize.
   void BecomeJSObject(Handle<JSReceiver> object);
   void BecomeJSFunction(Handle<JSReceiver> object);
 
+  void SetIdentityHash(Handle<JSObject> object, Object* hash);
+
   Handle<JSFunction> NewFunction(Handle<String> name,
                                  Handle<Object> prototype);
 
@@ -356,6 +359,7 @@
       PropertyAttributes attributes);
 
   Handle<String> NumberToString(Handle<Object> number);
+  Handle<String> Uint32ToString(uint32_t value);
 
   enum ApiInstanceType {
     JavaScriptObject,
@@ -442,6 +446,11 @@
                              JSRegExp::Flags flags,
                              int capture_count);
 
+  // Returns the value for a known global constant (a property of the global
+  // object which is neither configurable nor writable) like 'undefined'.
+  // Returns a null handle when the given name is unknown.
+  Handle<Object> GlobalConstantFor(Handle<String> name);
+
  private:
   Isolate* isolate() { return reinterpret_cast<Isolate*>(this); }
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 7df2b0b..3d0d5bb 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -104,6 +104,7 @@
 
 // Flags for experimental implementation features.
 DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
+DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values")
 DEFINE_bool(string_slices, false, "use string slices")
 
 // Flags for Crankshaft.
@@ -253,10 +254,16 @@
             "print cumulative GC statistics in name=value format on exit")
 DEFINE_bool(trace_gc_verbose, false,
             "print more details following each garbage collection")
+DEFINE_bool(trace_fragmentation, false,
+            "report fragmentation for old pointer and data pages")
 DEFINE_bool(collect_maps, true,
             "garbage collect maps from which no objects can be reached")
 DEFINE_bool(flush_code, true,
             "flush code that we expect not to use again before full gc")
+DEFINE_bool(incremental_marking, true, "use incremental marking")
+DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
+DEFINE_bool(trace_incremental_marking, false,
+            "trace progress of the incremental marking")
 
 // v8.cc
 DEFINE_bool(use_idle_notification, true,
@@ -276,8 +283,13 @@
 
 // mark-compact.cc
 DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
+DEFINE_bool(lazy_sweeping, true,
+            "Use lazy sweeping for old pointer and data spaces")
+DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
+            "Flush code caches in maps during mark compact cycle.")
 DEFINE_bool(never_compact, false,
             "Never perform compaction on full GC - testing only")
+DEFINE_bool(compact_code_space, false, "Compact code space")
 DEFINE_bool(cleanup_code_caches_at_gc, true,
             "Flush inline caches prior to mark compact collection and "
             "flush code caches in maps during mark compact cycle.")
@@ -348,11 +360,15 @@
 
 DEFINE_bool(help, false, "Print usage message, including flags, on console")
 DEFINE_bool(dump_counters, false, "Dump counters on exit")
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
 DEFINE_bool(debugger, false, "Enable JavaScript debugger")
 DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
                                     "debugger agent in another process")
 DEFINE_bool(debugger_agent, false, "Enable debugger agent")
 DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
 DEFINE_string(map_counters, "", "Map counters to a file")
 DEFINE_args(js_arguments, JSArguments(),
             "Pass all remaining arguments to the script. Alias for \"--\".")
@@ -425,6 +441,11 @@
 // ic.cc
 DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
 
+// mark-compact.cc
+DEFINE_bool(force_marking_deque_overflows, false,
+            "force overflows of marking deque by reducing it's size "
+            "to 64 words")
+
 // objects.cc
 DEFINE_bool(trace_normalization,
             false,
@@ -444,6 +465,9 @@
 
 DEFINE_bool(trace_isolates, false, "trace isolate state changes")
 
+DEFINE_bool(trace_live_byte_count, false,
+            "trace updates to page live byte count")
+
 // VM state
 DEFINE_bool(log_state_changes, false, "Log state changes.")
 
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 7ba79bf..4b8a4be 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -77,6 +77,21 @@
 }
 
 
+inline bool StackHandler::is_entry() const {
+  return state() == ENTRY;
+}
+
+
+inline bool StackHandler::is_try_catch() const {
+  return state() == TRY_CATCH;
+}
+
+
+inline bool StackHandler::is_try_finally() const {
+  return state() == TRY_FINALLY;
+}
+
+
 inline StackHandler::State StackHandler::state() const {
   const int offset = StackHandlerConstants::kStateOffset;
   return static_cast<State>(Memory::int_at(address() + offset));
@@ -105,8 +120,33 @@
 }
 
 
+inline Code* StackFrame::LookupCode() const {
+  return GetContainingCode(isolate(), pc());
+}
+
+
 inline Code* StackFrame::GetContainingCode(Isolate* isolate, Address pc) {
-  return isolate->pc_to_code_cache()->GetCacheEntry(pc)->code;
+  return isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
+}
+
+
+inline EntryFrame::EntryFrame(StackFrameIterator* iterator)
+    : StackFrame(iterator) {
+}
+
+
+inline EntryConstructFrame::EntryConstructFrame(StackFrameIterator* iterator)
+    : EntryFrame(iterator) {
+}
+
+
+inline ExitFrame::ExitFrame(StackFrameIterator* iterator)
+    : StackFrame(iterator) {
+}
+
+
+inline StandardFrame::StandardFrame(StackFrameIterator* iterator)
+    : StackFrame(iterator) {
 }
 
 
@@ -155,6 +195,11 @@
 }
 
 
+inline JavaScriptFrame::JavaScriptFrame(StackFrameIterator* iterator)
+    : StandardFrame(iterator) {
+}
+
+
 Address JavaScriptFrame::GetParameterSlot(int index) const {
   int param_count = ComputeParametersCount();
   ASSERT(-1 <= index && index < param_count);
@@ -190,6 +235,26 @@
 }
 
 
+inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
+    : JavaScriptFrame(iterator) {
+}
+
+
+inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
+    StackFrameIterator* iterator) : JavaScriptFrame(iterator) {
+}
+
+
+inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
+    : StandardFrame(iterator) {
+}
+
+
+inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
+    : InternalFrame(iterator) {
+}
+
+
 template<typename Iterator>
 inline JavaScriptFrameIteratorTemp<Iterator>::JavaScriptFrameIteratorTemp(
     Isolate* isolate)
diff --git a/src/frames.cc b/src/frames.cc
index bebd10a..412a59c 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -366,16 +366,17 @@
 
 
 Code* StackFrame::GetSafepointData(Isolate* isolate,
-                                   Address pc,
+                                   Address inner_pointer,
                                    SafepointEntry* safepoint_entry,
                                    unsigned* stack_slots) {
-  PcToCodeCache::PcToCodeCacheEntry* entry =
-      isolate->pc_to_code_cache()->GetCacheEntry(pc);
+  InnerPointerToCodeCache::InnerPointerToCodeCacheEntry* entry =
+      isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
   if (!entry->safepoint_entry.is_valid()) {
-    entry->safepoint_entry = entry->code->GetSafepointEntry(pc);
+    entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
     ASSERT(entry->safepoint_entry.is_valid());
   } else {
-    ASSERT(entry->safepoint_entry.Equals(entry->code->GetSafepointEntry(pc)));
+    ASSERT(entry->safepoint_entry.Equals(
+        entry->code->GetSafepointEntry(inner_pointer)));
   }
 
   // Fill in the results and return the code.
@@ -392,11 +393,16 @@
 }
 
 
+#ifdef DEBUG
+static bool GcSafeCodeContains(HeapObject* object, Address addr);
+#endif
+
+
 void StackFrame::IteratePc(ObjectVisitor* v,
                            Address* pc_address,
                            Code* holder) {
   Address pc = *pc_address;
-  ASSERT(holder->contains(pc));
+  ASSERT(GcSafeCodeContains(holder, pc));
   unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
   Object* code = holder;
   v->VisitPointer(&code);
@@ -819,7 +825,8 @@
   // back to a slow search in this case to find the original optimized
   // code object.
   if (!code->contains(pc())) {
-    code = isolate()->pc_to_code_cache()->GcSafeFindCodeForPc(pc());
+    code = isolate()->inner_pointer_to_code_cache()->
+        GcSafeFindCodeForInnerPointer(pc());
   }
   ASSERT(code != NULL);
   ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
@@ -881,6 +888,11 @@
 }
 
 
+int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
+  return Smi::cast(GetExpression(0))->value();
+}
+
+
 Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
   return fp() + StandardFrameConstants::kCallerSPOffset;
 }
@@ -1155,52 +1167,89 @@
 // -------------------------------------------------------------------------
 
 
-Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
+static Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
+  MapWord map_word = object->map_word();
+  return map_word.IsForwardingAddress() ?
+      map_word.ToForwardingAddress()->map() : map_word.ToMap();
+}
+
+
+static int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
+  return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
+}
+
+
+#ifdef DEBUG
+static bool GcSafeCodeContains(HeapObject* code, Address addr) {
+  Map* map = GcSafeMapOfCodeSpaceObject(code);
+  ASSERT(map == code->GetHeap()->code_map());
+  Address start = code->address();
+  Address end = code->address() + code->SizeFromMap(map);
+  return start <= addr && addr < end;
+}
+#endif
+
+
+Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
+                                                Address inner_pointer) {
   Code* code = reinterpret_cast<Code*>(object);
-  ASSERT(code != NULL && code->contains(pc));
+  ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer));
   return code;
 }
 
 
-Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
+Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
+    Address inner_pointer) {
   Heap* heap = isolate_->heap();
-  // Check if the pc points into a large object chunk.
-  LargeObjectChunk* chunk = heap->lo_space()->FindChunkContainingPc(pc);
-  if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
+  // Check if the inner pointer points into a large object chunk.
+  LargePage* large_page = heap->lo_space()->FindPageContainingPc(inner_pointer);
+  if (large_page != NULL) {
+    return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
+  }
 
-  // Iterate through the 8K page until we reach the end or find an
-  // object starting after the pc.
-  Page* page = Page::FromAddress(pc);
-  HeapObjectIterator iterator(page, heap->GcSafeSizeOfOldObjectFunction());
-  HeapObject* previous = NULL;
+  // Iterate through the page until we reach the end or find an object starting
+  // after the inner pointer.
+  Page* page = Page::FromAddress(inner_pointer);
+
+  Address addr = page->skip_list()->StartFor(inner_pointer);
+
+  Address top = heap->code_space()->top();
+  Address limit = heap->code_space()->limit();
+
   while (true) {
-    HeapObject* next = iterator.next();
-    if (next == NULL || next->address() >= pc) {
-      return GcSafeCastToCode(previous, pc);
+    if (addr == top && addr != limit) {
+      addr = limit;
+      continue;
     }
-    previous = next;
+
+    HeapObject* obj = HeapObject::FromAddress(addr);
+    int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
+    Address next_addr = addr + obj_size;
+    if (next_addr > inner_pointer) return GcSafeCastToCode(obj, inner_pointer);
+    addr = next_addr;
   }
 }
 
 
-PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
+InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
+    InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
   isolate_->counters()->pc_to_code()->Increment();
-  ASSERT(IsPowerOf2(kPcToCodeCacheSize));
+  ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
   uint32_t hash = ComputeIntegerHash(
-      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
-  uint32_t index = hash & (kPcToCodeCacheSize - 1);
-  PcToCodeCacheEntry* entry = cache(index);
-  if (entry->pc == pc) {
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)));
+  uint32_t index = hash & (kInnerPointerToCodeCacheSize - 1);
+  InnerPointerToCodeCacheEntry* entry = cache(index);
+  if (entry->inner_pointer == inner_pointer) {
     isolate_->counters()->pc_to_code_cached()->Increment();
-    ASSERT(entry->code == GcSafeFindCodeForPc(pc));
+    ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
   } else {
     // Because this code may be interrupted by a profiling signal that
-    // also queries the cache, we cannot update pc before the code has
-    // been set. Otherwise, we risk trying to use a cache entry before
+    // also queries the cache, we cannot update inner_pointer before the code
+    // has been set. Otherwise, we risk trying to use a cache entry before
     // the code has been computed.
-    entry->code = GcSafeFindCodeForPc(pc);
+    entry->code = GcSafeFindCodeForInnerPointer(inner_pointer);
     entry->safepoint_entry.Reset();
-    entry->pc = pc;
+    entry->inner_pointer = inner_pointer;
   }
   return entry;
 }
diff --git a/src/frames.h b/src/frames.h
index fed11c4..1d65826 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -49,36 +49,36 @@
 class ThreadLocalTop;
 class Isolate;
 
-class PcToCodeCache {
+class InnerPointerToCodeCache {
  public:
-  struct PcToCodeCacheEntry {
-    Address pc;
+  struct InnerPointerToCodeCacheEntry {
+    Address inner_pointer;
     Code* code;
     SafepointEntry safepoint_entry;
   };
 
-  explicit PcToCodeCache(Isolate* isolate) : isolate_(isolate) {
+  explicit InnerPointerToCodeCache(Isolate* isolate) : isolate_(isolate) {
     Flush();
   }
 
-  Code* GcSafeFindCodeForPc(Address pc);
-  Code* GcSafeCastToCode(HeapObject* object, Address pc);
+  Code* GcSafeFindCodeForInnerPointer(Address inner_pointer);
+  Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer);
 
   void Flush() {
     memset(&cache_[0], 0, sizeof(cache_));
   }
 
-  PcToCodeCacheEntry* GetCacheEntry(Address pc);
+  InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer);
 
  private:
-  PcToCodeCacheEntry* cache(int index) { return &cache_[index]; }
+  InnerPointerToCodeCacheEntry* cache(int index) { return &cache_[index]; }
 
   Isolate* isolate_;
 
-  static const int kPcToCodeCacheSize = 1024;
-  PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+  static const int kInnerPointerToCodeCacheSize = 1024;
+  InnerPointerToCodeCacheEntry cache_[kInnerPointerToCodeCacheSize];
 
-  DISALLOW_COPY_AND_ASSIGN(PcToCodeCache);
+  DISALLOW_COPY_AND_ASSIGN(InnerPointerToCodeCache);
 };
 
 
@@ -106,9 +106,9 @@
   static inline StackHandler* FromAddress(Address address);
 
   // Testers
-  bool is_entry() { return state() == ENTRY; }
-  bool is_try_catch() { return state() == TRY_CATCH; }
-  bool is_try_finally() { return state() == TRY_FINALLY; }
+  inline bool is_entry() const;
+  inline bool is_try_catch() const;
+  inline bool is_try_finally() const;
 
  private:
   // Accessors.
@@ -139,7 +139,10 @@
   enum Type {
     NONE = 0,
     STACK_FRAME_TYPE_LIST(DECLARE_TYPE)
-    NUMBER_OF_TYPES
+    NUMBER_OF_TYPES,
+    // Used by FrameScope to indicate that the stack frame is constructed
+    // manually and the FrameScope does not need to emit code.
+    MANUAL
   };
 #undef DECLARE_TYPE
 
@@ -215,9 +218,7 @@
   virtual Code* unchecked_code() const = 0;
 
   // Get the code associated with this frame.
-  Code* LookupCode() const {
-    return GetContainingCode(isolate(), pc());
-  }
+  inline Code* LookupCode() const;
 
   // Get the code object that contains the given pc.
   static inline Code* GetContainingCode(Isolate* isolate, Address pc);
@@ -299,7 +300,7 @@
   virtual void SetCallerFp(Address caller_fp);
 
  protected:
-  explicit EntryFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+  inline explicit EntryFrame(StackFrameIterator* iterator);
 
   // The caller stack pointer for entry frames is always zero. The
   // real information about the caller frame is available through the
@@ -326,8 +327,7 @@
   }
 
  protected:
-  explicit EntryConstructFrame(StackFrameIterator* iterator)
-      : EntryFrame(iterator) { }
+  inline explicit EntryConstructFrame(StackFrameIterator* iterator);
 
  private:
   friend class StackFrameIterator;
@@ -361,7 +361,7 @@
   static void FillState(Address fp, Address sp, State* state);
 
  protected:
-  explicit ExitFrame(StackFrameIterator* iterator) : StackFrame(iterator) { }
+  inline explicit ExitFrame(StackFrameIterator* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
@@ -394,8 +394,7 @@
   }
 
  protected:
-  explicit StandardFrame(StackFrameIterator* iterator)
-      : StackFrame(iterator) { }
+  inline explicit StandardFrame(StackFrameIterator* iterator);
 
   virtual void ComputeCallerState(State* state) const;
 
@@ -514,8 +513,7 @@
   }
 
  protected:
-  explicit JavaScriptFrame(StackFrameIterator* iterator)
-      : StandardFrame(iterator) { }
+  inline explicit JavaScriptFrame(StackFrameIterator* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
@@ -552,8 +550,7 @@
   DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
 
  protected:
-  explicit OptimizedFrame(StackFrameIterator* iterator)
-      : JavaScriptFrame(iterator) { }
+  inline explicit OptimizedFrame(StackFrameIterator* iterator);
 
  private:
   friend class StackFrameIterator;
@@ -581,12 +578,9 @@
                      int index) const;
 
  protected:
-  explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator)
-      : JavaScriptFrame(iterator) { }
+  inline explicit ArgumentsAdaptorFrame(StackFrameIterator* iterator);
 
-  virtual int GetNumberOfIncomingArguments() const {
-    return Smi::cast(GetExpression(0))->value();
-  }
+  virtual int GetNumberOfIncomingArguments() const;
 
   virtual Address GetCallerStackPointer() const;
 
@@ -611,8 +605,7 @@
   }
 
  protected:
-  explicit InternalFrame(StackFrameIterator* iterator)
-      : StandardFrame(iterator) { }
+  inline explicit InternalFrame(StackFrameIterator* iterator);
 
   virtual Address GetCallerStackPointer() const;
 
@@ -633,8 +626,7 @@
   }
 
  protected:
-  explicit ConstructFrame(StackFrameIterator* iterator)
-      : InternalFrame(iterator) { }
+  inline explicit ConstructFrame(StackFrameIterator* iterator);
 
  private:
   friend class StackFrameIterator;
@@ -715,15 +707,19 @@
 
   inline JavaScriptFrameIteratorTemp(Isolate* isolate, StackFrame::Id id);
 
-  JavaScriptFrameIteratorTemp(Address fp, Address sp,
-                              Address low_bound, Address high_bound) :
+  JavaScriptFrameIteratorTemp(Address fp,
+                              Address sp,
+                              Address low_bound,
+                              Address high_bound) :
       iterator_(fp, sp, low_bound, high_bound) {
     if (!done()) Advance();
   }
 
   JavaScriptFrameIteratorTemp(Isolate* isolate,
-                              Address fp, Address sp,
-                              Address low_bound, Address high_bound) :
+                              Address fp,
+                              Address sp,
+                              Address low_bound,
+                              Address high_bound) :
       iterator_(isolate, fp, sp, low_bound, high_bound) {
     if (!done()) Advance();
   }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 8073874..e822588 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -244,11 +244,6 @@
 }
 
 
-void BreakableStatementChecker::VisitCompareToNull(CompareToNull* expr) {
-  Visit(expr->expression());
-}
-
-
 void BreakableStatementChecker::VisitCompareOperation(CompareOperation* expr) {
   Visit(expr->left());
   Visit(expr->right());
@@ -291,8 +286,10 @@
   code->set_optimizable(info->IsOptimizable());
   cgen.PopulateDeoptimizationData(code);
   code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
+#ifdef ENABLE_DEBUGGER_SUPPORT
   code->set_has_debug_break_slots(
       info->isolate()->debugger()->IsDebuggerActive());
+#endif  // ENABLE_DEBUGGER_SUPPORT
   code->set_allow_osr_at_loop_nesting_level(0);
   code->set_stack_check_table_offset(table_offset);
   CodeGenerator::PrintCode(code, info);
@@ -1321,19 +1318,21 @@
 }
 
 
-bool FullCodeGenerator::TryLiteralCompare(CompareOperation* compare,
-                                          Label* if_true,
-                                          Label* if_false,
-                                          Label* fall_through) {
-  Expression *expr;
+bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
+  Expression *sub_expr;
   Handle<String> check;
-  if (compare->IsLiteralCompareTypeof(&expr, &check)) {
-    EmitLiteralCompareTypeof(expr, check, if_true, if_false, fall_through);
+  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+    EmitLiteralCompareTypeof(sub_expr, check);
     return true;
   }
 
-  if (compare->IsLiteralCompareUndefined(&expr)) {
-    EmitLiteralCompareUndefined(expr, if_true, if_false, fall_through);
+  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+    EmitLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+    return true;
+  }
+
+  if (expr->IsLiteralCompareNull(&sub_expr)) {
+    EmitLiteralCompareNil(expr, sub_expr, kNullValue);
     return true;
   }
 
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 803c618..6958dae 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -391,25 +391,16 @@
   // Try to perform a comparison as a fast inlined literal compare if
   // the operands allow it.  Returns true if the compare operations
   // has been matched and all code generated; false otherwise.
-  bool TryLiteralCompare(CompareOperation* compare,
-                         Label* if_true,
-                         Label* if_false,
-                         Label* fall_through);
+  bool TryLiteralCompare(CompareOperation* compare);
 
   // Platform-specific code for comparing the type of a value with
   // a given literal string.
-  void EmitLiteralCompareTypeof(Expression* expr,
-                                Handle<String> check,
-                                Label* if_true,
-                                Label* if_false,
-                                Label* fall_through);
+  void EmitLiteralCompareTypeof(Expression* expr, Handle<String> check);
 
-  // Platform-specific code for strict equality comparison with
-  // the undefined value.
-  void EmitLiteralCompareUndefined(Expression* expr,
-                                   Label* if_true,
-                                   Label* if_false,
-                                   Label* fall_through);
+  // Platform-specific code for equality comparison with a nil-like value.
+  void EmitLiteralCompareNil(CompareOperation* expr,
+                             Expression* sub_expr,
+                             NilValue nil);
 
   // Bailout support.
   void PrepareForBailout(Expression* node, State state);
diff --git a/src/func-name-inferrer.h b/src/func-name-inferrer.h
index bec3a5c..1a57268 100644
--- a/src/func-name-inferrer.h
+++ b/src/func-name-inferrer.h
@@ -70,6 +70,12 @@
     }
   }
 
+  void RemoveLastFunction() {
+    if (IsOpen() && !funcs_to_infer_.is_empty()) {
+      funcs_to_infer_.RemoveLast();
+    }
+  }
+
   // Infers a function name and leaves names collection state.
   void Infer() {
     ASSERT(IsOpen());
diff --git a/src/globals.h b/src/globals.h
index 6c6966a..d0c78d6 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -255,6 +255,10 @@
 const int kBinary32MantissaBits = 23;
 const int kBinary32ExponentShift = 23;
 
+// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
+// other bits set.
+const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
+
 // ASCII/UC16 constants
 // Code-point values in Unicode 4.0 are 21 bits wide.
 typedef uint16_t uc16;
diff --git a/src/handles.cc b/src/handles.cc
index 35c363c..407a3b5 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -421,17 +421,18 @@
 }
 
 
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
-                                   JSObject::HiddenPropertiesFlag flag) {
+Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
+                                 Handle<String> key,
+                                 Handle<Object> value) {
   CALL_HEAP_FUNCTION(obj->GetIsolate(),
-                     obj->GetHiddenProperties(flag),
+                     obj->SetHiddenProperty(*key, *value),
                      Object);
 }
 
 
-int GetIdentityHash(Handle<JSObject> obj) {
+int GetIdentityHash(Handle<JSReceiver> obj) {
   CALL_AND_RETRY(obj->GetIsolate(),
-                 obj->GetIdentityHash(JSObject::ALLOW_CREATION),
+                 obj->GetIdentityHash(ALLOW_CREATION),
                  return Smi::cast(__object__)->value(),
                  return 0);
 }
@@ -886,7 +887,7 @@
 
 
 Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
-                                               Handle<JSObject> key,
+                                               Handle<JSReceiver> key,
                                                Handle<Object> value) {
   CALL_HEAP_FUNCTION(table->GetIsolate(),
                      table->Put(*key, *value),
diff --git a/src/handles.h b/src/handles.h
index 7eaf4de..d5521f8 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -263,14 +263,13 @@
 
 Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
 
-// Return the object's hidden properties object. If the object has no hidden
-// properties and HiddenPropertiesFlag::ALLOW_CREATION is passed, then a new
-// hidden property object will be allocated. Otherwise Heap::undefined_value
-// is returned.
-Handle<Object> GetHiddenProperties(Handle<JSObject> obj,
-                                   JSObject::HiddenPropertiesFlag flag);
+// Sets a hidden property on an object. Returns obj on success, undefined
+// if trying to set the property on a detached proxy.
+Handle<Object> SetHiddenProperty(Handle<JSObject> obj,
+                                 Handle<String> key,
+                                 Handle<Object> value);
 
-int GetIdentityHash(Handle<JSObject> obj);
+int GetIdentityHash(Handle<JSReceiver> obj);
 
 Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
 Handle<Object> DeleteProperty(Handle<JSObject> obj, Handle<String> prop);
@@ -348,7 +347,7 @@
 Handle<Object> PreventExtensions(Handle<JSObject> object);
 
 Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
-                                               Handle<JSObject> key,
+                                               Handle<JSReceiver> key,
                                                Handle<Object> value);
 
 // Does lazy compilation of the given function. Returns true on success and
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 7b666af..4bd893e 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -33,15 +33,26 @@
 #include "list-inl.h"
 #include "objects.h"
 #include "v8-counters.h"
+#include "store-buffer.h"
+#include "store-buffer-inl.h"
 
 namespace v8 {
 namespace internal {
 
 void PromotionQueue::insert(HeapObject* target, int size) {
+  if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
+    NewSpacePage* rear_page =
+        NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
+    ASSERT(!rear_page->prev_page()->is_anchor());
+    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
+  }
   *(--rear_) = reinterpret_cast<intptr_t>(target);
   *(--rear_) = size;
   // Assert no overflow into live objects.
-  ASSERT(reinterpret_cast<Address>(rear_) >= HEAP->new_space()->top());
+#ifdef DEBUG
+  SemiSpace::AssertValidRange(HEAP->new_space()->top(),
+                              reinterpret_cast<Address>(rear_));
+#endif
 }
 
 
@@ -84,7 +95,7 @@
   // Allocate string.
   Object* result;
   { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
-                   ? lo_space_->AllocateRaw(size)
+                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -117,7 +128,7 @@
   // Allocate string.
   Object* result;
   { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
-                   ? lo_space_->AllocateRaw(size)
+                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -181,7 +192,7 @@
   } else if (CODE_SPACE == space) {
     result = code_space_->AllocateRaw(size_in_bytes);
   } else if (LO_SPACE == space) {
-    result = lo_space_->AllocateRaw(size_in_bytes);
+    result = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
   } else if (CELL_SPACE == space) {
     result = cell_space_->AllocateRaw(size_in_bytes);
   } else {
@@ -265,6 +276,11 @@
 }
 
 
+bool Heap::InNewSpace(Address addr) {
+  return new_space_.Contains(addr);
+}
+
+
 bool Heap::InFromSpace(Object* object) {
   return new_space_.FromSpaceContains(object);
 }
@@ -275,29 +291,36 @@
 }
 
 
+bool Heap::OldGenerationAllocationLimitReached() {
+  if (!incremental_marking()->IsStopped()) return false;
+  return OldGenerationSpaceAvailable() < 0;
+}
+
+
 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
   // An object should be promoted if:
   // - the object has survived a scavenge operation or
   // - to space is already 25% full.
-  return old_address < new_space_.age_mark()
-      || (new_space_.Size() + object_size) >= (new_space_.Capacity() >> 2);
+  NewSpacePage* page = NewSpacePage::FromAddress(old_address);
+  Address age_mark = new_space_.age_mark();
+  bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
+      (!page->ContainsLimit(age_mark) || old_address < age_mark);
+  return below_mark || (new_space_.Size() + object_size) >=
+                        (new_space_.EffectiveCapacity() >> 2);
 }
 
 
 void Heap::RecordWrite(Address address, int offset) {
-  if (new_space_.Contains(address)) return;
-  ASSERT(!new_space_.FromSpaceContains(address));
-  SLOW_ASSERT(Contains(address + offset));
-  Page::FromAddress(address)->MarkRegionDirty(address + offset);
+  if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
 }
 
 
 void Heap::RecordWrites(Address address, int start, int len) {
-  if (new_space_.Contains(address)) return;
-  ASSERT(!new_space_.FromSpaceContains(address));
-  Page* page = Page::FromAddress(address);
-  page->SetRegionMarks(page->GetRegionMarks() |
-      page->GetRegionMaskForSpan(address + start, len * kPointerSize));
+  if (!InNewSpace(address)) {
+    for (int i = 0; i < len; i++) {
+      store_buffer_.Mark(address + start + i * kPointerSize);
+    }
+  }
 }
 
 
@@ -343,31 +366,6 @@
 }
 
 
-void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                   Address src,
-                                                   int byte_size) {
-  ASSERT(IsAligned(byte_size, kPointerSize));
-
-  Page* page = Page::FromAddress(dst);
-  uint32_t marks = page->GetRegionMarks();
-
-  for (int remaining = byte_size / kPointerSize;
-       remaining > 0;
-       remaining--) {
-    Memory::Object_at(dst) = Memory::Object_at(src);
-
-    if (InNewSpace(Memory::Object_at(dst))) {
-      marks |= page->GetRegionMaskForAddress(dst);
-    }
-
-    dst += kPointerSize;
-    src += kPointerSize;
-  }
-
-  page->SetRegionMarks(marks);
-}
-
-
 void Heap::MoveBlock(Address dst, Address src, int byte_size) {
   ASSERT(IsAligned(byte_size, kPointerSize));
 
@@ -387,16 +385,6 @@
 }
 
 
-void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                   Address src,
-                                                   int byte_size) {
-  ASSERT(IsAligned(byte_size, kPointerSize));
-  ASSERT((dst < src) || (dst >= (src + byte_size)));
-
-  CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
-}
-
-
 void Heap::ScavengePointer(HeapObject** p) {
   ScavengeObject(p, *p);
 }
@@ -414,7 +402,9 @@
   // If the first word is a forwarding address, the object has already been
   // copied.
   if (first_word.IsForwardingAddress()) {
-    *p = first_word.ToForwardingAddress();
+    HeapObject* dest = first_word.ToForwardingAddress();
+    ASSERT(HEAP->InFromSpace(*p));
+    *p = dest;
     return;
   }
 
@@ -459,7 +449,7 @@
         amount_of_external_allocated_memory_ -
         amount_of_external_allocated_memory_at_last_global_gc_;
     if (amount_since_last_global_gc > external_allocation_limit_) {
-      CollectAllGarbage(false);
+      CollectAllGarbage(kNoGCFlags);
     }
   } else {
     // Avoid underflow.
@@ -476,6 +466,7 @@
   roots_[kLastScriptIdRootIndex] = last_script_id;
 }
 
+
 Isolate* Heap::isolate() {
   return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
       reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
@@ -688,15 +679,6 @@
 }
 
 
-void MarkCompactCollector::SetMark(HeapObject* obj) {
-  tracer_->increment_marked_count();
-#ifdef DEBUG
-  UpdateLiveObjectCount(obj);
-#endif
-  obj->SetMark();
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_HEAP_INL_H_
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 7e613e9..46c63c2 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -114,7 +114,6 @@
   bool generation_completed = true;
   switch (s_type) {
     case HeapSnapshot::kFull: {
-      HEAP->CollectAllGarbage(true);
       HeapSnapshotGenerator generator(result, control);
       generation_completed = generator.GenerateSnapshot();
       break;
diff --git a/src/heap.cc b/src/heap.cc
index d018593..d1f48ac 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -36,13 +36,16 @@
 #include "deoptimizer.h"
 #include "global-handles.h"
 #include "heap-profiler.h"
+#include "incremental-marking.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "natives.h"
 #include "objects-visiting.h"
+#include "objects-visiting-inl.h"
 #include "runtime-profiler.h"
 #include "scopeinfo.h"
 #include "snapshot.h"
+#include "store-buffer.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
@@ -58,10 +61,6 @@
 namespace internal {
 
 
-static const intptr_t kMinimumPromotionLimit = 2 * MB;
-static const intptr_t kMinimumAllocationLimit = 8 * MB;
-
-
 static Mutex* gc_initializer_mutex = OS::CreateMutex();
 
 
@@ -70,27 +69,21 @@
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
 #if defined(ANDROID)
-      reserved_semispace_size_(2*MB),
-      max_semispace_size_(2*MB),
-      initial_semispace_size_(128*KB),
-      max_old_generation_size_(192*MB),
-      max_executable_size_(max_old_generation_size_),
+#define LUMP_OF_MEMORY (128 * KB)
       code_range_size_(0),
 #elif defined(V8_TARGET_ARCH_X64)
-      reserved_semispace_size_(16*MB),
-      max_semispace_size_(16*MB),
-      initial_semispace_size_(1*MB),
-      max_old_generation_size_(1400*MB),
-      max_executable_size_(256*MB),
+#define LUMP_OF_MEMORY (2 * MB)
       code_range_size_(512*MB),
 #else
-      reserved_semispace_size_(8*MB),
-      max_semispace_size_(8*MB),
-      initial_semispace_size_(512*KB),
-      max_old_generation_size_(700*MB),
-      max_executable_size_(128*MB),
+#define LUMP_OF_MEMORY MB
       code_range_size_(0),
 #endif
+      reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
+      max_old_generation_size_(700ul * LUMP_OF_MEMORY),
+      max_executable_size_(128l * LUMP_OF_MEMORY),
+
 // Variables set based on semispace_size_ and old_generation_size_ in
 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
 // Will be 4 * reserved_semispace_size_ to ensure that young
@@ -100,6 +93,7 @@
       always_allocate_scope_depth_(0),
       linear_allocation_scope_depth_(0),
       contexts_disposed_(0),
+      scan_on_scavenge_pages_(0),
       new_space_(this),
       old_pointer_space_(NULL),
       old_data_space_(NULL),
@@ -109,7 +103,6 @@
       lo_space_(NULL),
       gc_state_(NOT_IN_GC),
       gc_post_processing_depth_(0),
-      mc_count_(0),
       ms_count_(0),
       gc_count_(0),
       unflattened_strings_length_(0),
@@ -121,10 +114,13 @@
 #endif  // DEBUG
       old_gen_promotion_limit_(kMinimumPromotionLimit),
       old_gen_allocation_limit_(kMinimumAllocationLimit),
+      old_gen_limit_factor_(1),
+      size_of_old_gen_at_last_old_space_gc_(0),
       external_allocation_limit_(0),
       amount_of_external_allocated_memory_(0),
       amount_of_external_allocated_memory_at_last_global_gc_(0),
       old_gen_exhausted_(false),
+      store_buffer_rebuilder_(store_buffer()),
       hidden_symbol_(NULL),
       global_gc_prologue_callback_(NULL),
       global_gc_epilogue_callback_(NULL),
@@ -141,12 +137,14 @@
       min_in_mutator_(kMaxInt),
       alive_after_last_gc_(0),
       last_gc_end_timestamp_(0.0),
-      page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
+      store_buffer_(this),
+      marking_(this),
+      incremental_marking_(this),
       number_idle_notifications_(0),
       last_idle_notification_gc_count_(0),
       last_idle_notification_gc_count_init_(false),
       configured_(false),
-      is_safe_to_read_maps_(true) {
+      chunks_queued_for_free_(NULL) {
   // Allow build-time customization of the max semispace size. Building
   // V8 with snapshots and a non-default max semispace size is much
   // easier if you can define it as part of the build environment.
@@ -224,29 +222,10 @@
 
 
 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
-  ASSERT(!HEAP->InNewSpace(object));  // Code only works for old objects.
-  ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
-  MapWord map_word = object->map_word();
-  map_word.ClearMark();
-  map_word.ClearOverflow();
-  return object->SizeFromMap(map_word.ToMap());
-}
-
-
-int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
-  ASSERT(!HEAP->InNewSpace(object));  // Code only works for old objects.
-  ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
-  uint32_t marker = Memory::uint32_at(object->address());
-  if (marker == MarkCompactCollector::kSingleFreeEncoding) {
-    return kIntSize;
-  } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
-    return Memory::int_at(object->address() + kIntSize);
-  } else {
-    MapWord map_word = object->map_word();
-    Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
-    Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
-    return object->SizeFromMap(map);
+  if (IntrusiveMarking::IsMarked(object)) {
+    return IntrusiveMarking::SizeOfMarkedObject(object);
   }
+  return object->SizeFromMap(object->map());
 }
 
 
@@ -400,6 +379,7 @@
 #endif  // DEBUG
 
   LiveObjectList::GCPrologue();
+  store_buffer()->GCPrologue();
 }
 
 intptr_t Heap::SizeOfObjects() {
@@ -412,6 +392,7 @@
 }
 
 void Heap::GarbageCollectionEpilogue() {
+  store_buffer()->GCEpilogue();
   LiveObjectList::GCEpilogue();
 #ifdef DEBUG
   allow_allocation(true);
@@ -443,13 +424,13 @@
 }
 
 
-void Heap::CollectAllGarbage(bool force_compaction) {
+void Heap::CollectAllGarbage(int flags) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
-  mark_compact_collector_.SetForceCompaction(force_compaction);
+  mark_compact_collector_.SetFlags(flags);
   CollectGarbage(OLD_POINTER_SPACE);
-  mark_compact_collector_.SetForceCompaction(false);
+  mark_compact_collector_.SetFlags(kNoGCFlags);
 }
 
 
@@ -457,8 +438,6 @@
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
-  mark_compact_collector()->SetForceCompaction(true);
-
   // Major GC would invoke weak handle callbacks on weakly reachable
   // handles, but won't collect weakly reachable objects until next
   // major GC.  Therefore if we collect aggressively and weak handle callback
@@ -467,13 +446,14 @@
   // Note: as weak callbacks can execute arbitrary code, we cannot
   // hope that eventually there will be no weak callbacks invocations.
   // Therefore stop recollecting after several attempts.
+  mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
   const int kMaxNumberOfAttempts = 7;
   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
       break;
     }
   }
-  mark_compact_collector()->SetForceCompaction(false);
+  mark_compact_collector()->SetFlags(kNoGCFlags);
 }
 
 
@@ -490,6 +470,23 @@
   allocation_timeout_ = Max(6, FLAG_gc_interval);
 #endif
 
+  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Scavenge during marking.\n");
+    }
+  }
+
+  if (collector == MARK_COMPACTOR &&
+      !mark_compact_collector()->PreciseSweepingRequired() &&
+      !incremental_marking()->IsStopped() &&
+      !incremental_marking()->should_hurry() &&
+      FLAG_incremental_marking_steps) {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+    }
+    collector = SCAVENGER;
+  }
+
   bool next_gc_likely_to_collect_more = false;
 
   { GCTracer tracer(this);
@@ -512,13 +509,24 @@
     GarbageCollectionEpilogue();
   }
 
+  ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+  if (incremental_marking()->IsStopped()) {
+    if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
+      incremental_marking()->Start();
+    }
+  }
+
   return next_gc_likely_to_collect_more;
 }
 
 
 void Heap::PerformScavenge() {
   GCTracer tracer(this);
-  PerformGarbageCollection(SCAVENGER, &tracer);
+  if (incremental_marking()->IsStopped()) {
+    PerformGarbageCollection(SCAVENGER, &tracer);
+  } else {
+    PerformGarbageCollection(MARK_COMPACTOR, &tracer);
+  }
 }
 
 
@@ -610,13 +618,6 @@
 
   // Committing memory to from space failed.
   // Try shrinking and try again.
-  PagedSpaces spaces;
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
-       space = spaces.next()) {
-    space->RelinkPageListInChunkOrder(true);
-  }
-
   Shrink();
   if (new_space_.CommitFromSpaceIfNeeded()) return;
 
@@ -647,7 +648,10 @@
 
 
 void Heap::ClearNormalizedMapCaches() {
-  if (isolate_->bootstrapper()->IsActive()) return;
+  if (isolate_->bootstrapper()->IsActive() &&
+      !incremental_marking()->IsMarking()) {
+    return;
+  }
 
   Object* context = global_contexts_list_;
   while (!context->IsUndefined()) {
@@ -657,24 +661,6 @@
 }
 
 
-#ifdef DEBUG
-
-enum PageWatermarkValidity {
-  ALL_VALID,
-  ALL_INVALID
-};
-
-static void VerifyPageWatermarkValidity(PagedSpace* space,
-                                        PageWatermarkValidity validity) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  bool expected_value = (validity == ALL_VALID);
-  while (it.has_next()) {
-    Page* page = it.next();
-    ASSERT(page->IsWatermarkValid() == expected_value);
-  }
-}
-#endif
-
 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
   double survival_rate =
       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
@@ -727,6 +713,13 @@
 
   int start_new_space_size = Heap::new_space()->SizeAsInt();
 
+  if (IsHighSurvivalRate()) {
+    // We speed up the incremental marker if it is running so that it
+    // does not fall behind the rate of promotion, which would cause a
+    // constantly growing old space.
+    incremental_marking()->NotifyOfHighPromotionRate();
+  }
+
   if (collector == MARK_COMPACTOR) {
     // Perform mark-sweep with optional compaction.
     MarkCompact(tracer);
@@ -736,11 +729,7 @@
 
     UpdateSurvivalRateTrend(start_new_space_size);
 
-    intptr_t old_gen_size = PromotedSpaceSize();
-    old_gen_promotion_limit_ =
-        old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
-    old_gen_allocation_limit_ =
-        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
 
     if (high_survival_rate_during_scavenges &&
         IsStableOrIncreasingSurvivalTrend()) {
@@ -750,10 +739,16 @@
       // In this case we aggressively raise old generation memory limits to
       // postpone subsequent mark-sweep collection and thus trade memory
       // space for the mutation speed.
-      old_gen_promotion_limit_ *= 2;
-      old_gen_allocation_limit_ *= 2;
+      old_gen_limit_factor_ = 2;
+    } else {
+      old_gen_limit_factor_ = 1;
     }
 
+    old_gen_promotion_limit_ =
+        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+    old_gen_allocation_limit_ =
+        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+
     old_gen_exhausted_ = false;
   } else {
     tracer_ = tracer;
@@ -782,9 +777,7 @@
         amount_of_external_allocated_memory_;
   }
 
-  GCCallbackFlags callback_flags = tracer->is_compacting()
-      ? kGCCallbackFlagCompacted
-      : kNoGCCallbackFlags;
+  GCCallbackFlags callback_flags = kNoGCCallbackFlags;
   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
       gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
@@ -808,34 +801,24 @@
 
   mark_compact_collector_.Prepare(tracer);
 
-  bool is_compacting = mark_compact_collector_.IsCompacting();
+  ms_count_++;
+  tracer->set_full_gc_count(ms_count_);
 
-  if (is_compacting) {
-    mc_count_++;
-  } else {
-    ms_count_++;
-  }
-  tracer->set_full_gc_count(mc_count_ + ms_count_);
+  MarkCompactPrologue();
 
-  MarkCompactPrologue(is_compacting);
-
-  is_safe_to_read_maps_ = false;
   mark_compact_collector_.CollectGarbage();
-  is_safe_to_read_maps_ = true;
 
   LOG(isolate_, ResourceEvent("markcompact", "end"));
 
   gc_state_ = NOT_IN_GC;
 
-  Shrink();
-
   isolate_->counters()->objs_since_last_full()->Set(0);
 
   contexts_disposed_ = 0;
 }
 
 
-void Heap::MarkCompactPrologue(bool is_compacting) {
+void Heap::MarkCompactPrologue() {
   // At any old GC clear the keyed lookup cache to enable collection of unused
   // maps.
   isolate_->keyed_lookup_cache()->Clear();
@@ -847,7 +830,8 @@
 
   CompletelyClearInstanceofCache();
 
-  if (is_compacting) FlushNumberStringCache();
+  // TODO(1605) select heuristic for flushing NumberString cache with
+  // FlushNumberStringCache
   if (FLAG_cleanup_code_caches_at_gc) {
     polymorphic_code_cache()->set_cache(undefined_value());
   }
@@ -857,13 +841,8 @@
 
 
 Object* Heap::FindCodeObject(Address a) {
-  Object* obj = NULL;  // Initialization to please compiler.
-  { MaybeObject* maybe_obj = code_space_->FindObject(a);
-    if (!maybe_obj->ToObject(&obj)) {
-      obj = lo_space_->FindObject(a)->ToObjectUnchecked();
-    }
-  }
-  return obj;
+  return isolate()->inner_pointer_to_code_cache()->
+      GcSafeFindCodeForInnerPointer(a);
 }
 
 
@@ -911,14 +890,18 @@
   // do not expect them.
   VerifyNonPointerSpacePointersVisitor v;
   HeapObjectIterator code_it(HEAP->code_space());
-  for (HeapObject* object = code_it.next();
-       object != NULL; object = code_it.next())
+  for (HeapObject* object = code_it.Next();
+       object != NULL; object = code_it.Next())
     object->Iterate(&v);
 
-  HeapObjectIterator data_it(HEAP->old_data_space());
-  for (HeapObject* object = data_it.next();
-       object != NULL; object = data_it.next())
-    object->Iterate(&v);
+  // The old data space was normally swept conservatively so that the iterator
+  // doesn't work, so we normally skip the next bit.
+  if (!HEAP->old_data_space()->was_swept_conservatively()) {
+    HeapObjectIterator data_it(HEAP->old_data_space());
+    for (HeapObject* object = data_it.Next();
+         object != NULL; object = data_it.Next())
+      object->Iterate(&v);
+  }
 }
 #endif
 
@@ -940,6 +923,64 @@
 }
 
 
+void Heap::ScavengeStoreBufferCallback(
+    Heap* heap,
+    MemoryChunk* page,
+    StoreBufferEvent event) {
+  heap->store_buffer_rebuilder_.Callback(page, event);
+}
+
+
+void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
+  if (event == kStoreBufferStartScanningPagesEvent) {
+    start_of_current_page_ = NULL;
+    current_page_ = NULL;
+  } else if (event == kStoreBufferScanningPageEvent) {
+    if (current_page_ != NULL) {
+      // If this page already overflowed the store buffer during this iteration.
+      if (current_page_->scan_on_scavenge()) {
+        // Then we should wipe out the entries that have been added for it.
+        store_buffer_->SetTop(start_of_current_page_);
+      } else if (store_buffer_->Top() - start_of_current_page_ >=
+                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
+        // Did we find too many pointers in the previous page?  The heuristic is
+        // that no page can take more then 1/5 the remaining slots in the store
+        // buffer.
+        current_page_->set_scan_on_scavenge(true);
+        store_buffer_->SetTop(start_of_current_page_);
+      } else {
+        // In this case the page we scanned took a reasonable number of slots in
+        // the store buffer.  It has now been rehabilitated and is no longer
+        // marked scan_on_scavenge.
+        ASSERT(!current_page_->scan_on_scavenge());
+      }
+    }
+    start_of_current_page_ = store_buffer_->Top();
+    current_page_ = page;
+  } else if (event == kStoreBufferFullEvent) {
+    // The current page overflowed the store buffer again.  Wipe out its entries
+    // in the store buffer and mark it scan-on-scavenge again.  This may happen
+    // several times while scanning.
+    if (current_page_ == NULL) {
+      // Store Buffer overflowed while scanning promoted objects.  These are not
+      // in any particular page, though they are likely to be clustered by the
+      // allocation routines.
+      store_buffer_->HandleFullness();
+    } else {
+      // Store Buffer overflowed while scanning a particular old space page for
+      // pointers to new space.
+      ASSERT(current_page_ == page);
+      ASSERT(page != NULL);
+      current_page_->set_scan_on_scavenge(true);
+      ASSERT(start_of_current_page_ != store_buffer_->Top());
+      store_buffer_->SetTop(start_of_current_page_);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
 void Heap::Scavenge() {
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@@ -947,22 +988,6 @@
 
   gc_state_ = SCAVENGE;
 
-  SwitchScavengingVisitorsTableIfProfilingWasEnabled();
-
-  Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
-#ifdef DEBUG
-  VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
-  VerifyPageWatermarkValidity(map_space_, ALL_VALID);
-#endif
-
-  // We do not update an allocation watermark of the top page during linear
-  // allocation to avoid overhead. So to maintain the watermark invariant
-  // we have to manually cache the watermark and mark the top page as having an
-  // invalid watermark. This guarantees that dirty regions iteration will use a
-  // correct watermark even if a linear allocation happens.
-  old_pointer_space_->FlushTopPageWatermark();
-  map_space_->FlushTopPageWatermark();
-
   // Implements Cheney's copying algorithm
   LOG(isolate_, ResourceEvent("scavenge", "begin"));
 
@@ -974,6 +999,13 @@
 
   CheckNewSpaceExpansionCriteria();
 
+  SelectScavengingVisitorsTable();
+
+  incremental_marking()->PrepareForScavenge();
+
+  old_pointer_space()->AdvanceSweeper(new_space_.Size());
+  old_data_space()->AdvanceSweeper(new_space_.Size());
+
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
   new_space_.Flip();
@@ -996,32 +1028,29 @@
   // for the addresses of promoted objects: every object promoted
   // frees up its size in bytes from the top of the new space, and
   // objects are at least one pointer in size.
-  Address new_space_front = new_space_.ToSpaceLow();
-  promotion_queue_.Initialize(new_space_.ToSpaceHigh());
+  Address new_space_front = new_space_.ToSpaceStart();
+  promotion_queue_.Initialize(new_space_.ToSpaceEnd());
 
-  is_safe_to_read_maps_ = false;
+#ifdef DEBUG
+  store_buffer()->Clean();
+#endif
+
   ScavengeVisitor scavenge_visitor(this);
   // Copy roots.
   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
 
-  // Copy objects reachable from the old generation.  By definition,
-  // there are no intergenerational pointers in code or data spaces.
-  IterateDirtyRegions(old_pointer_space_,
-                      &Heap::IteratePointersInDirtyRegion,
-                      &ScavengePointer,
-                      WATERMARK_CAN_BE_INVALID);
-
-  IterateDirtyRegions(map_space_,
-                      &IteratePointersInDirtyMapsRegion,
-                      &ScavengePointer,
-                      WATERMARK_CAN_BE_INVALID);
-
-  lo_space_->IterateDirtyRegions(&ScavengePointer);
+  // Copy objects reachable from the old generation.
+  {
+    StoreBufferRebuildScope scope(this,
+                                  store_buffer(),
+                                  &ScavengeStoreBufferCallback);
+    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
+  }
 
   // Copy objects reachable from cells by scavenging cell values directly.
   HeapObjectIterator cell_iterator(cell_space_);
-  for (HeapObject* cell = cell_iterator.next();
-       cell != NULL; cell = cell_iterator.next()) {
+  for (HeapObject* cell = cell_iterator.Next();
+       cell != NULL; cell = cell_iterator.Next()) {
     if (cell->IsJSGlobalPropertyCell()) {
       Address value_address =
           reinterpret_cast<Address>(cell) +
@@ -1046,14 +1075,16 @@
 
   LiveObjectList::UpdateReferencesForScavengeGC();
   isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+  incremental_marking()->UpdateMarkingDequeAfterScavenge();
 
   ASSERT(new_space_front == new_space_.top());
 
-  is_safe_to_read_maps_ = true;
-
   // Set age mark.
   new_space_.set_age_mark(new_space_.top());
 
+  new_space_.LowerInlineAllocationLimit(
+      new_space_.inline_allocation_limit_step());
+
   // Update how much has survived scavenge.
   IncrementYoungSurvivorsCounter(static_cast<int>(
       (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
@@ -1112,35 +1143,56 @@
 }
 
 
+void Heap::UpdateReferencesInExternalStringTable(
+    ExternalStringTableUpdaterCallback updater_func) {
+
+  // Update old space string references.
+  if (external_string_table_.old_space_strings_.length() > 0) {
+    Object** start = &external_string_table_.old_space_strings_[0];
+    Object** end = start + external_string_table_.old_space_strings_.length();
+    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
+  }
+
+  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
+}
+
+
 static Object* ProcessFunctionWeakReferences(Heap* heap,
                                              Object* function,
                                              WeakObjectRetainer* retainer) {
-  Object* head = heap->undefined_value();
+  Object* undefined = heap->undefined_value();
+  Object* head = undefined;
   JSFunction* tail = NULL;
   Object* candidate = function;
-  while (candidate != heap->undefined_value()) {
+  while (candidate != undefined) {
     // Check whether to keep the candidate in the list.
     JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
     Object* retain = retainer->RetainAs(candidate);
     if (retain != NULL) {
-      if (head == heap->undefined_value()) {
+      if (head == undefined) {
         // First element in the list.
-        head = candidate_function;
+        head = retain;
       } else {
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
-        tail->set_next_function_link(candidate_function);
+        tail->set_next_function_link(retain);
       }
       // Retained function is new tail.
+      candidate_function = reinterpret_cast<JSFunction*>(retain);
       tail = candidate_function;
+
+      ASSERT(retain->IsUndefined() || retain->IsJSFunction());
+
+      if (retain == undefined) break;
     }
+
     // Move to next element in the list.
     candidate = candidate_function->next_function_link();
   }
 
   // Terminate the list if there is one or more elements.
   if (tail != NULL) {
-    tail->set_next_function_link(heap->undefined_value());
+    tail->set_next_function_link(undefined);
   }
 
   return head;
@@ -1148,28 +1200,32 @@
 
 
 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
-  Object* head = undefined_value();
+  Object* undefined = undefined_value();
+  Object* head = undefined;
   Context* tail = NULL;
   Object* candidate = global_contexts_list_;
-  while (candidate != undefined_value()) {
+  while (candidate != undefined) {
     // Check whether to keep the candidate in the list.
     Context* candidate_context = reinterpret_cast<Context*>(candidate);
     Object* retain = retainer->RetainAs(candidate);
     if (retain != NULL) {
-      if (head == undefined_value()) {
+      if (head == undefined) {
         // First element in the list.
-        head = candidate_context;
+        head = retain;
       } else {
         // Subsequent elements in the list.
         ASSERT(tail != NULL);
         tail->set_unchecked(this,
                             Context::NEXT_CONTEXT_LINK,
-                            candidate_context,
+                            retain,
                             UPDATE_WRITE_BARRIER);
       }
       // Retained context is new tail.
+      candidate_context = reinterpret_cast<Context*>(retain);
       tail = candidate_context;
 
+      if (retain == undefined) break;
+
       // Process the weak list of optimized functions for the context.
       Object* function_list_head =
           ProcessFunctionWeakReferences(
@@ -1181,6 +1237,7 @@
                                        function_list_head,
                                        UPDATE_WRITE_BARRIER);
     }
+
     // Move to next element in the list.
     candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
   }
@@ -1212,35 +1269,45 @@
 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
                          Address new_space_front) {
   do {
-    ASSERT(new_space_front <= new_space_.top());
-
+    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
     // The addresses new_space_front and new_space_.top() define a
     // queue of unprocessed copied objects.  Process them until the
     // queue is empty.
-    while (new_space_front < new_space_.top()) {
-      HeapObject* object = HeapObject::FromAddress(new_space_front);
-      new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
+    while (new_space_front != new_space_.top()) {
+      if (!NewSpacePage::IsAtEnd(new_space_front)) {
+        HeapObject* object = HeapObject::FromAddress(new_space_front);
+        new_space_front +=
+          NewSpaceScavenger::IterateBody(object->map(), object);
+      } else {
+        new_space_front =
+            NewSpacePage::FromLimit(new_space_front)->next_page()->body();
+      }
     }
 
     // Promote and process all the to-be-promoted objects.
-    while (!promotion_queue_.is_empty()) {
-      HeapObject* target;
-      int size;
-      promotion_queue_.remove(&target, &size);
+    {
+      StoreBufferRebuildScope scope(this,
+                                    store_buffer(),
+                                    &ScavengeStoreBufferCallback);
+      while (!promotion_queue()->is_empty()) {
+        HeapObject* target;
+        int size;
+        promotion_queue()->remove(&target, &size);
 
-      // Promoted object might be already partially visited
-      // during dirty regions iteration. Thus we search specificly
-      // for pointers to from semispace instead of looking for pointers
-      // to new space.
-      ASSERT(!target->IsMap());
-      IterateAndMarkPointersToFromSpace(target->address(),
-                                        target->address() + size,
-                                        &ScavengePointer);
+        // Promoted object might be already partially visited
+        // during old space pointer iteration. Thus we search specificly
+        // for pointers to from semispace instead of looking for pointers
+        // to new space.
+        ASSERT(!target->IsMap());
+        IterateAndMarkPointersToFromSpace(target->address(),
+                                          target->address() + size,
+                                          &ScavengeObject);
+      }
     }
 
     // Take another spin if there are now unswept objects in new space
     // (there are currently no more unswept promoted objects).
-  } while (new_space_front < new_space_.top());
+  } while (new_space_front != new_space_.top());
 
   return new_space_front;
 }
@@ -1252,26 +1319,11 @@
 };
 
 
-typedef void (*ScavengingCallback)(Map* map,
-                                   HeapObject** slot,
-                                   HeapObject* object);
+enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
 
 
-static Atomic32 scavenging_visitors_table_mode_;
-static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
-
-INLINE(static void DoScavengeObject(Map* map,
-                                    HeapObject** slot,
-                                    HeapObject* obj));
-
-
-void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
-  scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
-}
-
-
-template<LoggingAndProfiling logging_and_profiling_mode>
+template<MarksHandling marks_handling,
+         LoggingAndProfiling logging_and_profiling_mode>
 class ScavengingVisitor : public StaticVisitorBase {
  public:
   static void Initialize() {
@@ -1306,9 +1358,13 @@
                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
                     Visit);
 
-    table_.Register(kVisitJSFunction,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<JSFunction::kSize>);
+    if (marks_handling == IGNORE_MARKS) {
+      table_.Register(kVisitJSFunction,
+                      &ObjectEvacuationStrategy<POINTER_OBJECT>::
+                          template VisitSpecialized<JSFunction::kSize>);
+    } else {
+      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
+    }
 
     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
                                    kVisitDataObject,
@@ -1373,10 +1429,15 @@
       }
     }
 
+    if (marks_handling == TRANSFER_MARKS) {
+      if (Marking::TransferColor(source, target)) {
+        MemoryChunk::IncrementLiveBytes(target->address(), size);
+      }
+    }
+
     return target;
   }
 
-
   template<ObjectContents object_contents, SizeRestriction size_restriction>
   static inline void EvacuateObject(Map* map,
                                     HeapObject** slot,
@@ -1386,13 +1447,14 @@
            (object_size <= Page::kMaxHeapObjectSize));
     ASSERT(object->Size() == object_size);
 
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     if (heap->ShouldBePromoted(object->address(), object_size)) {
       MaybeObject* maybe_result;
 
       if ((size_restriction != SMALL) &&
           (object_size > Page::kMaxHeapObjectSize)) {
-        maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
+        maybe_result = heap->lo_space()->AllocateRaw(object_size,
+                                                     NOT_EXECUTABLE);
       } else {
         if (object_contents == DATA_OBJECT) {
           maybe_result = heap->old_data_space()->AllocateRaw(object_size);
@@ -1414,13 +1476,36 @@
         return;
       }
     }
-    Object* result =
-        heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
+    MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
+    Object* result = allocation->ToObjectUnchecked();
+
     *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
     return;
   }
 
 
+  static inline void EvacuateJSFunction(Map* map,
+                                        HeapObject** slot,
+                                        HeapObject* object) {
+    ObjectEvacuationStrategy<POINTER_OBJECT>::
+        template VisitSpecialized<JSFunction::kSize>(map, slot, object);
+
+    HeapObject* target = *slot;
+    MarkBit mark_bit = Marking::MarkBitFrom(target);
+    if (Marking::IsBlack(mark_bit)) {
+      // This object is black and it might not be rescanned by marker.
+      // We should explicitly record code entry slot for compaction because
+      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+      // miss it as it is not HeapObject-tagged.
+      Address code_entry_slot =
+          target->address() + JSFunction::kCodeEntryOffset;
+      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+      map->GetHeap()->mark_compact_collector()->
+          RecordCodeEntrySlot(code_entry_slot, code);
+    }
+  }
+
+
   static inline void EvacuateFixedArray(Map* map,
                                         HeapObject** slot,
                                         HeapObject* object) {
@@ -1479,14 +1564,17 @@
                                                HeapObject* object) {
     ASSERT(IsShortcutCandidate(map->instance_type()));
 
-    if (ConsString::cast(object)->unchecked_second() ==
-        map->heap()->empty_string()) {
+    Heap* heap = map->GetHeap();
+
+    if (marks_handling == IGNORE_MARKS &&
+        ConsString::cast(object)->unchecked_second() ==
+        heap->empty_string()) {
       HeapObject* first =
           HeapObject::cast(ConsString::cast(object)->unchecked_first());
 
       *slot = first;
 
-      if (!map->heap()->InNewSpace(first)) {
+      if (!heap->InNewSpace(first)) {
         object->set_map_word(MapWord::FromForwardingAddress(first));
         return;
       }
@@ -1500,7 +1588,7 @@
         return;
       }
 
-      DoScavengeObject(first->map(), slot, first);
+      heap->DoScavengeObject(first->map(), slot, first);
       object->set_map_word(MapWord::FromForwardingAddress(*slot));
       return;
     }
@@ -1531,45 +1619,49 @@
 };
 
 
-template<LoggingAndProfiling logging_and_profiling_mode>
+template<MarksHandling marks_handling,
+         LoggingAndProfiling logging_and_profiling_mode>
 VisitorDispatchTable<ScavengingCallback>
-    ScavengingVisitor<logging_and_profiling_mode>::table_;
+    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
 
 
 static void InitializeScavengingVisitorsTables() {
-  ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
-  scavenging_visitors_table_.CopyFrom(
-      ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
-  scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
 }
 
 
-void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
-  if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
-    // Table was already updated by some isolate.
-    return;
-  }
-
-  if (isolate()->logger()->is_logging() |
+void Heap::SelectScavengingVisitorsTable() {
+  bool logging_and_profiling =
+      isolate()->logger()->is_logging() ||
       CpuProfiler::is_profiling(isolate()) ||
       (isolate()->heap_profiler() != NULL &&
-       isolate()->heap_profiler()->is_profiling())) {
-    // If one of the isolates is doing scavenge at this moment of time
-    // it might see this table in an inconsitent state when
-    // some of the callbacks point to
-    // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
-    // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
-    // However this does not lead to any bugs as such isolate does not have
-    // profiling enabled and any isolate with enabled profiling is guaranteed
-    // to see the table in the consistent state.
-    scavenging_visitors_table_.CopyFrom(
-        ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
+       isolate()->heap_profiler()->is_profiling());
 
-    // We use Release_Store to prevent reordering of this write before writes
-    // to the table.
-    Release_Store(&scavenging_visitors_table_mode_,
-                  LOGGING_AND_PROFILING_ENABLED);
+  if (!incremental_marking()->IsMarking()) {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<IGNORE_MARKS,
+                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<IGNORE_MARKS,
+                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+  } else {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<TRANSFER_MARKS,
+                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(
+          ScavengingVisitor<TRANSFER_MARKS,
+                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
   }
 }
 
@@ -1579,7 +1671,7 @@
   MapWord first_word = object->map_word();
   ASSERT(!first_word.IsForwardingAddress());
   Map* map = first_word.ToMap();
-  DoScavengeObject(map, p, object);
+  map->GetHeap()->DoScavengeObject(map, p, object);
 }
 
 
@@ -1605,7 +1697,9 @@
 }
 
 
-MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
+MaybeObject* Heap::AllocateMap(InstanceType instance_type,
+                               int instance_size,
+                               ElementsKind elements_kind) {
   Object* result;
   { MaybeObject* maybe_result = AllocateRawMap();
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -1627,7 +1721,7 @@
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
-  map->set_elements_kind(FAST_ELEMENTS);
+  map->set_elements_kind(elements_kind);
 
   // If the map object is aligned fill the padding area with Smi 0 objects.
   if (Map::kPadStart < Map::kSize) {
@@ -1707,7 +1801,7 @@
   }
   set_empty_fixed_array(FixedArray::cast(obj));
 
-  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_null_value(obj);
@@ -1798,6 +1892,12 @@
   }
   set_byte_array_map(Map::cast(obj));
 
+  { MaybeObject* maybe_obj =
+        AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_free_space_map(Map::cast(obj));
+
   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
@@ -1998,7 +2098,7 @@
                                  Object* to_number,
                                  byte kind) {
   Object* result;
-  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
@@ -2011,7 +2111,13 @@
   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
-  set_neander_map(Map::cast(obj));
+  // Don't use Smi-only elements optimizations for objects with the neander
+  // map. There are too many cases where element values are set directly with a
+  // bottleneck to trap the Smi-only -> fast elements transition, and there
+  // appears to be no benefit for optimize this case.
+  Map* new_neander_map = Map::cast(obj);
+  new_neander_map->set_elements_kind(FAST_ELEMENTS);
+  set_neander_map(new_neander_map);
 
   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2056,6 +2162,12 @@
   // To workaround the problem, make separate functions without inlining.
   Heap::CreateJSEntryStub();
   Heap::CreateJSConstructEntryStub();
+
+  // Create stubs that should be there, so we don't unexpectedly have to
+  // create them if we need them during the creation of another stub.
+  // Stub creation mixes raw pointers and handles in an unsafe manner so
+  // we cannot create stubs while we are creating stubs.
+  CodeStub::GenerateStubsAheadOfTime();
 }
 
 
@@ -2074,7 +2186,12 @@
   }
   set_nan_value(obj);
 
-  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
+  { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_infinity_value(obj);
+
+  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_undefined_value(obj);
@@ -2126,26 +2243,34 @@
   set_the_hole_value(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
-                                           Smi::FromInt(-4),
+                                           Smi::FromInt(-2),
                                            Oddball::kArgumentMarker);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_arguments_marker(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
-                                           Smi::FromInt(-2),
+                                           Smi::FromInt(-3),
                                            Oddball::kOther);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_no_interceptor_result_sentinel(obj);
 
   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
-                                           Smi::FromInt(-3),
+                                           Smi::FromInt(-4),
                                            Oddball::kOther);
     if (!maybe_obj->ToObject(&obj)) return false;
   }
   set_termination_exception(obj);
 
+  { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
+                                           Smi::FromInt(-5),
+                                           Oddball::kOther);
+    if (!maybe_obj->ToObject(&obj)) return false;
+  }
+  set_frame_alignment_marker(obj);
+  STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
+
   // Allocate the empty string.
   { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
     if (!maybe_obj->ToObject(&obj)) return false;
@@ -2422,6 +2547,15 @@
 }
 
 
+MaybeObject* Heap::Uint32ToString(uint32_t value,
+                                  bool check_number_string_cache) {
+  Object* number;
+  MaybeObject* maybe = NumberFromUint32(value);
+  if (!maybe->To<Object>(&number)) return maybe;
+  return NumberToString(number, check_number_string_cache);
+}
+
+
 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
 }
@@ -2737,25 +2871,23 @@
   // Make an attempt to flatten the buffer to reduce access time.
   buffer = buffer->TryFlattenGetString();
 
-  // TODO(1626): For now slicing external strings is not supported.  However,
-  // a flat cons string can have an external string as first part in some cases.
-  // Therefore we have to single out this case as well.
   if (!FLAG_string_slices ||
-      (buffer->IsConsString() &&
-        (!buffer->IsFlat() ||
-         !ConsString::cast(buffer)->first()->IsSeqString())) ||
-      buffer->IsExternalString() ||
+      !buffer->IsFlat() ||
       length < SlicedString::kMinLength ||
       pretenure == TENURED) {
     Object* result;
-    { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
-                     ? AllocateRawAsciiString(length, pretenure)
-                     : AllocateRawTwoByteString(length, pretenure);
+    // WriteToFlat takes care of the case when an indirect string has a
+    // different encoding from its underlying string.  These encodings may
+    // differ because of externalization.
+    bool is_ascii = buffer->IsAsciiRepresentation();
+    { MaybeObject* maybe_result = is_ascii
+                                  ? AllocateRawAsciiString(length, pretenure)
+                                  : AllocateRawTwoByteString(length, pretenure);
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     String* string_result = String::cast(result);
     // Copy the characters into the new object.
-    if (buffer->IsAsciiRepresentation()) {
+    if (is_ascii) {
       ASSERT(string_result->IsAsciiRepresentation());
       char* dest = SeqAsciiString::cast(string_result)->GetChars();
       String::WriteToFlat(buffer, dest, start, end);
@@ -2768,12 +2900,17 @@
   }
 
   ASSERT(buffer->IsFlat());
-  ASSERT(!buffer->IsExternalString());
 #if DEBUG
   buffer->StringVerify();
 #endif
 
   Object* result;
+  // When slicing an indirect string we use its encoding for a newly created
+  // slice and don't check the encoding of the underlying string.  This is safe
+  // even if the encodings are different because of externalization.  If an
+  // indirect ASCII string is pointing to a two-byte string, the two-byte char
+  // codes of the underlying string must still fit into ASCII (because
+  // externalization must not change char codes).
   { Map* map = buffer->IsAsciiRepresentation()
                  ? sliced_ascii_string_map()
                  : sliced_string_map();
@@ -2799,13 +2936,14 @@
     sliced_string->set_parent(buffer);
     sliced_string->set_offset(start);
   }
-  ASSERT(sliced_string->parent()->IsSeqString());
+  ASSERT(sliced_string->parent()->IsSeqString() ||
+         sliced_string->parent()->IsExternalString());
   return result;
 }
 
 
 MaybeObject* Heap::AllocateExternalStringFromAscii(
-    ExternalAsciiString::Resource* resource) {
+    const ExternalAsciiString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
     isolate()->context()->mark_out_of_memory();
@@ -2828,7 +2966,7 @@
 
 
 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
-    ExternalTwoByteString::Resource* resource) {
+    const ExternalTwoByteString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
     isolate()->context()->mark_out_of_memory();
@@ -2892,7 +3030,7 @@
   Object* result;
   { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
                    ? old_data_space_->AllocateRaw(size)
-                   : lo_space_->AllocateRaw(size);
+                   : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
 
@@ -2928,8 +3066,8 @@
   } else if (size == 2 * kPointerSize) {
     filler->set_map(two_pointer_filler_map());
   } else {
-    filler->set_map(byte_array_map());
-    ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
+    filler->set_map(free_space_map());
+    FreeSpace::cast(filler)->set_size(size);
   }
 }
 
@@ -2975,7 +3113,7 @@
   // Large code objects and code objects which should stay at a fixed address
   // are allocated in large object space.
   if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
-    maybe_result = lo_space_->AllocateRawCode(obj_size);
+    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -3020,7 +3158,7 @@
   int obj_size = code->Size();
   MaybeObject* maybe_result;
   if (obj_size > MaxObjectSizeInPagedSpace()) {
-    maybe_result = lo_space_->AllocateRawCode(obj_size);
+    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
   }
@@ -3063,7 +3201,7 @@
 
   MaybeObject* maybe_result;
   if (new_obj_size > MaxObjectSizeInPagedSpace()) {
-    maybe_result = lo_space_->AllocateRawCode(new_obj_size);
+    maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(new_obj_size);
   }
@@ -3112,9 +3250,9 @@
 }
 
 
-MaybeObject* Heap::InitializeFunction(JSFunction* function,
-                                      SharedFunctionInfo* shared,
-                                      Object* prototype) {
+void Heap::InitializeFunction(JSFunction* function,
+                              SharedFunctionInfo* shared,
+                              Object* prototype) {
   ASSERT(!prototype->IsMap());
   function->initialize_properties();
   function->initialize_elements();
@@ -3124,7 +3262,6 @@
   function->set_context(undefined_value());
   function->set_literals(empty_fixed_array());
   function->set_next_function_link(undefined_value());
-  return function;
 }
 
 
@@ -3134,8 +3271,18 @@
   // different context.
   JSFunction* object_function =
       function->context()->global_context()->object_function();
+
+  // Each function prototype gets a copy of the object function map.
+  // This avoid unwanted sharing of maps between prototypes of different
+  // constructors.
+  Map* new_map;
+  ASSERT(object_function->has_initial_map());
+  { MaybeObject* maybe_map =
+        object_function->initial_map()->CopyDropTransitions();
+    if (!maybe_map->To<Map>(&new_map)) return maybe_map;
+  }
   Object* prototype;
-  { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
+  { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
     if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
   }
   // When creating the prototype for the function we must set its
@@ -3160,7 +3307,8 @@
   { MaybeObject* maybe_result = Allocate(function_map, space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  return InitializeFunction(JSFunction::cast(result), shared, prototype);
+  InitializeFunction(JSFunction::cast(result), shared, prototype);
+  return result;
 }
 
 
@@ -3330,6 +3478,9 @@
   // We cannot always fill with one_pointer_filler_map because objects
   // created from API functions expect their internal fields to be initialized
   // with undefined_value.
+  // Pre-allocated fields need to be initialized with undefined_value as well
+  // so that object accesses before the constructor completes (e.g. in the
+  // debugger) will not cause a crash.
   if (map->constructor()->IsJSFunction() &&
       JSFunction::cast(map->constructor())->shared()->
           IsInobjectSlackTrackingInProgress()) {
@@ -3339,7 +3490,7 @@
   } else {
     filler = Heap::undefined_value();
   }
-  obj->InitializeBody(map->instance_size(), filler);
+  obj->InitializeBody(map, Heap::undefined_value(), filler);
 }
 
 
@@ -3377,7 +3528,8 @@
   InitializeJSObjectFromMap(JSObject::cast(obj),
                             FixedArray::cast(properties),
                             map);
-  ASSERT(JSObject::cast(obj)->HasFastElements());
+  ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
+         JSObject::cast(obj)->HasFastElements());
   return obj;
 }
 
@@ -3420,6 +3572,7 @@
   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   result->set_handler(handler);
+  result->set_hash(undefined_value());
   return result;
 }
 
@@ -3443,6 +3596,7 @@
   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
   result->set_handler(handler);
+  result->set_hash(undefined_value());
   result->set_call_trap(call_trap);
   result->set_construct_trap(construct_trap);
   return result;
@@ -3559,6 +3713,7 @@
               object_size);
   }
 
+  ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
   FixedArray* properties = FixedArray::cast(source->properties());
   // Update elements if necessary.
@@ -3591,13 +3746,13 @@
 
 MaybeObject* Heap::ReinitializeJSReceiver(
     JSReceiver* object, InstanceType type, int size) {
-  ASSERT(type >= FIRST_JS_RECEIVER_TYPE);
+  ASSERT(type >= FIRST_JS_OBJECT_TYPE);
 
   // Allocate fresh map.
   // TODO(rossberg): Once we optimize proxies, cache these maps.
   Map* map;
-  MaybeObject* maybe_map_obj = AllocateMap(type, size);
-  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
+  MaybeObject* maybe = AllocateMap(type, size);
+  if (!maybe->To<Map>(&map)) return maybe;
 
   // Check that the receiver has at least the size of the fresh object.
   int size_difference = object->map()->instance_size() - map->instance_size();
@@ -3608,30 +3763,35 @@
   // Allocate the backing storage for the properties.
   int prop_size = map->unused_property_fields() - map->inobject_properties();
   Object* properties;
-  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
-    if (!maybe_properties->ToObject(&properties)) return maybe_properties;
+  maybe = AllocateFixedArray(prop_size, TENURED);
+  if (!maybe->ToObject(&properties)) return maybe;
+
+  // Functions require some allocation, which might fail here.
+  SharedFunctionInfo* shared = NULL;
+  if (type == JS_FUNCTION_TYPE) {
+    String* name;
+    maybe = LookupAsciiSymbol("<freezing call trap>");
+    if (!maybe->To<String>(&name)) return maybe;
+    maybe = AllocateSharedFunctionInfo(name);
+    if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
   }
 
+  // Because of possible retries of this function after failure,
+  // we must NOT fail after this point, where we have changed the type!
+
   // Reset the map for the object.
   object->set_map(map);
+  JSObject* jsobj = JSObject::cast(object);
 
   // Reinitialize the object from the constructor map.
-  InitializeJSObjectFromMap(JSObject::cast(object),
-                            FixedArray::cast(properties), map);
+  InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
 
   // Functions require some minimal initialization.
   if (type == JS_FUNCTION_TYPE) {
-    String* name;
-    MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>");
-    if (!maybe_name->To<String>(&name)) return maybe_name;
-    SharedFunctionInfo* shared;
-    MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
-    if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
-    JSFunction* func;
-    MaybeObject* maybe_func =
-        InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
-    if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
-    func->set_context(isolate()->context()->global_context());
+    map->set_function_with_prototype(true);
+    InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
+    JSFunction::cast(object)->set_context(
+        isolate()->context()->global_context());
   }
 
   // Put in filler if the new object is smaller than the old.
@@ -3814,7 +3974,7 @@
   // Allocate string.
   Object* result;
   { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
-                   ? lo_space_->AllocateRaw(size)
+                   ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
@@ -3931,7 +4091,7 @@
   int size = FixedArray::SizeFor(length);
   return size <= kMaxObjectSizeInNewSpace
       ? new_space_.AllocateRaw(size)
-      : lo_space_->AllocateRawFixedArray(size);
+      : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
 }
 
 
@@ -4262,6 +4422,21 @@
 }
 
 
+bool Heap::IsHeapIterable() {
+  return (!old_pointer_space()->was_swept_conservatively() &&
+          !old_data_space()->was_swept_conservatively());
+}
+
+
+void Heap::EnsureHeapIsIterable() {
+  ASSERT(IsAllocationAllowed());
+  if (!IsHeapIterable()) {
+    CollectAllGarbage(kMakeHeapIterableMask);
+  }
+  ASSERT(IsHeapIterable());
+}
+
+
 bool Heap::IdleNotification() {
   static const int kIdlesBeforeScavenge = 4;
   static const int kIdlesBeforeMarkSweep = 7;
@@ -4292,7 +4467,7 @@
   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
     if (contexts_disposed_ > 0) {
       HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(false);
+      CollectAllGarbage(kNoGCFlags);
     } else {
       CollectGarbage(NEW_SPACE);
     }
@@ -4304,12 +4479,12 @@
     // generated code for cached functions.
     isolate_->compilation_cache()->Clear();
 
-    CollectAllGarbage(false);
+    CollectAllGarbage(kNoGCFlags);
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
 
   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
-    CollectAllGarbage(true);
+    CollectAllGarbage(kNoGCFlags);
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
     number_idle_notifications_ = 0;
@@ -4319,7 +4494,7 @@
       contexts_disposed_ = 0;
     } else {
       HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(false);
+      CollectAllGarbage(kNoGCFlags);
       last_idle_notification_gc_count_ = gc_count_;
     }
     // If this is the first idle notification, we reset the
@@ -4339,8 +4514,11 @@
 
   // Make sure that we have no pending context disposals and
   // conditionally uncommit from space.
-  ASSERT(contexts_disposed_ == 0);
+  // Take into account that we might have decided to delay full collection
+  // because incremental marking is in progress.
+  ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
   if (uncommit) UncommitFromSpace();
+
   return finished;
 }
 
@@ -4374,11 +4552,11 @@
   USE(title);
   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
          title, gc_count_);
-  PrintF("mark-compact GC : %d\n", mc_count_);
   PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
          old_gen_promotion_limit_);
   PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
          old_gen_allocation_limit_);
+  PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
 
   PrintF("\n");
   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
@@ -4455,69 +4633,18 @@
 
 
 #ifdef DEBUG
-static void DummyScavengePointer(HeapObject** p) {
-}
-
-
-static void VerifyPointersUnderWatermark(
-    PagedSpace* space,
-    DirtyRegionCallback visit_dirty_region) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-
-  while (it.has_next()) {
-    Page* page = it.next();
-    Address start = page->ObjectAreaStart();
-    Address end = page->AllocationWatermark();
-
-    HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
-                              start,
-                              end,
-                              visit_dirty_region,
-                              &DummyScavengePointer);
-  }
-}
-
-
-static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
-  LargeObjectIterator it(space);
-  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
-    if (object->IsFixedArray()) {
-      Address slot_address = object->address();
-      Address end = object->address() + object->Size();
-
-      while (slot_address < end) {
-        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
-        // When we are not in GC the Heap::InNewSpace() predicate
-        // checks that pointers which satisfy predicate point into
-        // the active semispace.
-        HEAP->InNewSpace(*slot);
-        slot_address += kPointerSize;
-      }
-    }
-  }
-}
-
-
 void Heap::Verify() {
   ASSERT(HasBeenSetup());
 
+  store_buffer()->Verify();
+
   VerifyPointersVisitor visitor;
   IterateRoots(&visitor, VISIT_ONLY_STRONG);
 
   new_space_.Verify();
 
-  VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
-  old_pointer_space_->Verify(&dirty_regions_visitor);
-  map_space_->Verify(&dirty_regions_visitor);
-
-  VerifyPointersUnderWatermark(old_pointer_space_,
-                               &IteratePointersInDirtyRegion);
-  VerifyPointersUnderWatermark(map_space_,
-                               &IteratePointersInDirtyMapsRegion);
-  VerifyPointersUnderWatermark(lo_space_);
-
-  VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
-  VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
+  old_pointer_space_->Verify(&visitor);
+  map_space_->Verify(&visitor);
 
   VerifyPointersVisitor no_dirty_regions_visitor;
   old_data_space_->Verify(&no_dirty_regions_visitor);
@@ -4526,6 +4653,7 @@
 
   lo_space_->Verify();
 }
+
 #endif  // DEBUG
 
 
@@ -4621,277 +4749,223 @@
 
 #ifdef DEBUG
 void Heap::ZapFromSpace() {
-  ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
-  for (Address a = new_space_.FromSpaceLow();
-       a < new_space_.FromSpaceHigh();
-       a += kPointerSize) {
-    Memory::Address_at(a) = kFromSpaceZapValue;
+  NewSpacePageIterator it(new_space_.FromSpaceStart(),
+                          new_space_.FromSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    for (Address cursor = page->body(), limit = page->body_limit();
+         cursor < limit;
+         cursor += kPointerSize) {
+      Memory::Address_at(cursor) = kFromSpaceZapValue;
+    }
   }
 }
 #endif  // DEBUG
 
 
-bool Heap::IteratePointersInDirtyRegion(Heap* heap,
-                                        Address start,
-                                        Address end,
-                                        ObjectSlotCallback copy_object_func) {
-  Address slot_address = start;
-  bool pointers_to_new_space_found = false;
-
-  while (slot_address < end) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (heap->InNewSpace(*slot)) {
-      ASSERT((*slot)->IsHeapObject());
-      copy_object_func(reinterpret_cast<HeapObject**>(slot));
-      if (heap->InNewSpace(*slot)) {
-        ASSERT((*slot)->IsHeapObject());
-        pointers_to_new_space_found = true;
-      }
-    }
-    slot_address += kPointerSize;
-  }
-  return pointers_to_new_space_found;
-}
-
-
-// Compute start address of the first map following given addr.
-static inline Address MapStartAlign(Address addr) {
-  Address page = Page::FromAddress(addr)->ObjectAreaStart();
-  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
-}
-
-
-// Compute end address of the first map preceding given addr.
-static inline Address MapEndAlign(Address addr) {
-  Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
-  return page + ((addr - page) / Map::kSize * Map::kSize);
-}
-
-
-static bool IteratePointersInDirtyMaps(Address start,
-                                       Address end,
-                                       ObjectSlotCallback copy_object_func) {
-  ASSERT(MapStartAlign(start) == start);
-  ASSERT(MapEndAlign(end) == end);
-
-  Address map_address = start;
-  bool pointers_to_new_space_found = false;
-
-  Heap* heap = HEAP;
-  while (map_address < end) {
-    ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
-    ASSERT(Memory::Object_at(map_address)->IsMap());
-
-    Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
-    Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
-
-    if (Heap::IteratePointersInDirtyRegion(heap,
-                                           pointer_fields_start,
-                                           pointer_fields_end,
-                                           copy_object_func)) {
-      pointers_to_new_space_found = true;
-    }
-
-    map_address += Map::kSize;
-  }
-
-  return pointers_to_new_space_found;
-}
-
-
-bool Heap::IteratePointersInDirtyMapsRegion(
-    Heap* heap,
-    Address start,
-    Address end,
-    ObjectSlotCallback copy_object_func) {
-  Address map_aligned_start = MapStartAlign(start);
-  Address map_aligned_end   = MapEndAlign(end);
-
-  bool contains_pointers_to_new_space = false;
-
-  if (map_aligned_start != start) {
-    Address prev_map = map_aligned_start - Map::kSize;
-    ASSERT(Memory::Object_at(prev_map)->IsMap());
-
-    Address pointer_fields_start =
-        Max(start, prev_map + Map::kPointerFieldsBeginOffset);
-
-    Address pointer_fields_end =
-        Min(prev_map + Map::kPointerFieldsEndOffset, end);
-
-    contains_pointers_to_new_space =
-      IteratePointersInDirtyRegion(heap,
-                                   pointer_fields_start,
-                                   pointer_fields_end,
-                                   copy_object_func)
-        || contains_pointers_to_new_space;
-  }
-
-  contains_pointers_to_new_space =
-    IteratePointersInDirtyMaps(map_aligned_start,
-                               map_aligned_end,
-                               copy_object_func)
-      || contains_pointers_to_new_space;
-
-  if (map_aligned_end != end) {
-    ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
-
-    Address pointer_fields_start =
-        map_aligned_end + Map::kPointerFieldsBeginOffset;
-
-    Address pointer_fields_end =
-        Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
-
-    contains_pointers_to_new_space =
-      IteratePointersInDirtyRegion(heap,
-                                   pointer_fields_start,
-                                   pointer_fields_end,
-                                   copy_object_func)
-        || contains_pointers_to_new_space;
-  }
-
-  return contains_pointers_to_new_space;
-}
-
-
 void Heap::IterateAndMarkPointersToFromSpace(Address start,
                                              Address end,
                                              ObjectSlotCallback callback) {
   Address slot_address = start;
-  Page* page = Page::FromAddress(start);
 
-  uint32_t marks = page->GetRegionMarks();
+  // We are not collecting slots on new space objects during mutation
+  // thus we have to scan for pointers to evacuation candidates when we
+  // promote objects. But we should not record any slots in non-black
+  // objects. Grey object's slots would be rescanned.
+  // White object might not survive until the end of collection
+  // it would be a violation of the invariant to record it's slots.
+  bool record_slots = false;
+  if (incremental_marking()->IsCompacting()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
+    record_slots = Marking::IsBlack(mark_bit);
+  }
 
   while (slot_address < end) {
     Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (InFromSpace(*slot)) {
-      ASSERT((*slot)->IsHeapObject());
-      callback(reinterpret_cast<HeapObject**>(slot));
-      if (InNewSpace(*slot)) {
-        ASSERT((*slot)->IsHeapObject());
-        marks |= page->GetRegionMaskForAddress(slot_address);
+    Object* object = *slot;
+    // If the store buffer becomes overfull we mark pages as being exempt from
+    // the store buffer.  These pages are scanned to find pointers that point
+    // to the new space.  In that case we may hit newly promoted objects and
+    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
+    if (object->IsHeapObject()) {
+      if (Heap::InFromSpace(object)) {
+        callback(reinterpret_cast<HeapObject**>(slot),
+                 HeapObject::cast(object));
+        Object* new_object = *slot;
+        if (InNewSpace(new_object)) {
+          ASSERT(Heap::InToSpace(new_object));
+          ASSERT(new_object->IsHeapObject());
+          store_buffer_.EnterDirectlyIntoStoreBuffer(
+              reinterpret_cast<Address>(slot));
+        }
+        ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
+      } else if (record_slots &&
+                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
+        mark_compact_collector()->RecordSlot(slot, slot, object);
       }
     }
     slot_address += kPointerSize;
   }
-
-  page->SetRegionMarks(marks);
 }
 
 
-uint32_t Heap::IterateDirtyRegions(
-    uint32_t marks,
-    Address area_start,
-    Address area_end,
-    DirtyRegionCallback visit_dirty_region,
-    ObjectSlotCallback copy_object_func) {
-  uint32_t newmarks = 0;
-  uint32_t mask = 1;
+#ifdef DEBUG
+typedef bool (*CheckStoreBufferFilter)(Object** addr);
 
-  if (area_start >= area_end) {
-    return newmarks;
-  }
 
-  Address region_start = area_start;
-
-  // area_start does not necessarily coincide with start of the first region.
-  // Thus to calculate the beginning of the next region we have to align
-  // area_start by Page::kRegionSize.
-  Address second_region =
-      reinterpret_cast<Address>(
-          reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
-          ~Page::kRegionAlignmentMask);
-
-  // Next region might be beyond area_end.
-  Address region_end = Min(second_region, area_end);
-
-  if (marks & mask) {
-    if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
-      newmarks |= mask;
-    }
-  }
-  mask <<= 1;
-
-  // Iterate subsequent regions which fully lay inside [area_start, area_end[.
-  region_start = region_end;
-  region_end = region_start + Page::kRegionSize;
-
-  while (region_end <= area_end) {
-    if (marks & mask) {
-      if (visit_dirty_region(this,
-                             region_start,
-                             region_end,
-                             copy_object_func)) {
-        newmarks |= mask;
-      }
-    }
-
-    region_start = region_end;
-    region_end = region_start + Page::kRegionSize;
-
-    mask <<= 1;
-  }
-
-  if (region_start != area_end) {
-    // A small piece of area left uniterated because area_end does not coincide
-    // with region end. Check whether region covering last part of area is
-    // dirty.
-    if (marks & mask) {
-      if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
-        newmarks |= mask;
-      }
-    }
-  }
-
-  return newmarks;
+bool IsAMapPointerAddress(Object** addr) {
+  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
+  int mod = a % Map::kSize;
+  return mod >= Map::kPointerFieldsBeginOffset &&
+         mod < Map::kPointerFieldsEndOffset;
 }
 
 
+bool EverythingsAPointer(Object** addr) {
+  return true;
+}
 
-void Heap::IterateDirtyRegions(
-    PagedSpace* space,
-    DirtyRegionCallback visit_dirty_region,
-    ObjectSlotCallback copy_object_func,
-    ExpectedPageWatermarkState expected_page_watermark_state) {
 
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-
-  while (it.has_next()) {
-    Page* page = it.next();
-    uint32_t marks = page->GetRegionMarks();
-
-    if (marks != Page::kAllRegionsCleanMarks) {
-      Address start = page->ObjectAreaStart();
-
-      // Do not try to visit pointers beyond page allocation watermark.
-      // Page can contain garbage pointers there.
-      Address end;
-
-      if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
-          page->IsWatermarkValid()) {
-        end = page->AllocationWatermark();
-      } else {
-        end = page->CachedAllocationWatermark();
-      }
-
-      ASSERT(space == old_pointer_space_ ||
-             (space == map_space_ &&
-              ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
-
-      page->SetRegionMarks(IterateDirtyRegions(marks,
-                                               start,
-                                               end,
-                                               visit_dirty_region,
-                                               copy_object_func));
+static void CheckStoreBuffer(Heap* heap,
+                             Object** current,
+                             Object** limit,
+                             Object**** store_buffer_position,
+                             Object*** store_buffer_top,
+                             CheckStoreBufferFilter filter,
+                             Address special_garbage_start,
+                             Address special_garbage_end) {
+  Map* free_space_map = heap->free_space_map();
+  for ( ; current < limit; current++) {
+    Object* o = *current;
+    Address current_address = reinterpret_cast<Address>(current);
+    // Skip free space.
+    if (o == free_space_map) {
+      Address current_address = reinterpret_cast<Address>(current);
+      FreeSpace* free_space =
+          FreeSpace::cast(HeapObject::FromAddress(current_address));
+      int skip = free_space->Size();
+      ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
+      ASSERT(skip > 0);
+      current_address += skip - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
     }
-
-    // Mark page watermark as invalid to maintain watermark validity invariant.
-    // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
-    page->InvalidateWatermark(true);
+    // Skip the current linear allocation space between top and limit which is
+    // unmarked with the free space map, but can contain junk.
+    if (current_address == special_garbage_start &&
+        special_garbage_end != special_garbage_start) {
+      current_address = special_garbage_end - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
+    }
+    if (!(*filter)(current)) continue;
+    ASSERT(current_address < special_garbage_start ||
+           current_address >= special_garbage_end);
+    ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
+    // We have to check that the pointer does not point into new space
+    // without trying to cast it to a heap object since the hash field of
+    // a string can contain values like 1 and 3 which are tagged null
+    // pointers.
+    if (!heap->InNewSpace(o)) continue;
+    while (**store_buffer_position < current &&
+           *store_buffer_position < store_buffer_top) {
+      (*store_buffer_position)++;
+    }
+    if (**store_buffer_position != current ||
+        *store_buffer_position == store_buffer_top) {
+      Object** obj_start = current;
+      while (!(*obj_start)->IsMap()) obj_start--;
+      UNREACHABLE();
+    }
   }
 }
 
 
+// Check that the store buffer contains all intergenerational pointers by
+// scanning a page and ensuring that all pointers to young space are in the
+// store buffer.
+void Heap::OldPointerSpaceCheckStoreBuffer() {
+  OldSpace* space = old_pointer_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+
+    Address end = page->ObjectAreaEnd();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this,
+                     current,
+                     limit,
+                     &store_buffer_position,
+                     store_buffer_top,
+                     &EverythingsAPointer,
+                     space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::MapSpaceCheckStoreBuffer() {
+  MapSpace* space = map_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+
+    Address end = page->ObjectAreaEnd();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this,
+                     current,
+                     limit,
+                     &store_buffer_position,
+                     store_buffer_top,
+                     &IsAMapPointerAddress,
+                     space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::LargeObjectSpaceCheckStoreBuffer() {
+  LargeObjectIterator it(lo_space());
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    // We only have code, sequential strings, or fixed arrays in large
+    // object space, and only fixed arrays can possibly contain pointers to
+    // the young generation.
+    if (object->IsFixedArray()) {
+      Object*** store_buffer_position = store_buffer()->Start();
+      Object*** store_buffer_top = store_buffer()->Top();
+      Object** current = reinterpret_cast<Object**>(object->address());
+      Object** limit =
+          reinterpret_cast<Object**>(object->address() + object->Size());
+      CheckStoreBuffer(this,
+                       current,
+                       limit,
+                       &store_buffer_position,
+                       store_buffer_top,
+                       &EverythingsAPointer,
+                       NULL,
+                       NULL);
+    }
+  }
+}
+#endif
+
+
 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
   IterateStrongRoots(v, mode);
   IterateWeakRoots(v, mode);
@@ -4941,8 +5015,7 @@
   // Iterate over the builtin code objects and code stubs in the
   // heap. Note that it is not necessary to iterate over code objects
   // on scavenge collections.
-  if (mode != VISIT_ALL_IN_SCAVENGE &&
-      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+  if (mode != VISIT_ALL_IN_SCAVENGE) {
     isolate_->builtins()->IterateBuiltins(v);
   }
   v->Synchronize("builtins");
@@ -4986,11 +5059,20 @@
 // and through the API, we should gracefully handle the case that the heap
 // size is not big enough to fit all the initial objects.
 bool Heap::ConfigureHeap(int max_semispace_size,
-                         int max_old_gen_size,
-                         int max_executable_size) {
+                         intptr_t max_old_gen_size,
+                         intptr_t max_executable_size) {
   if (HasBeenSetup()) return false;
 
-  if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
+  if (max_semispace_size > 0) {
+    if (max_semispace_size < Page::kPageSize) {
+      max_semispace_size = Page::kPageSize;
+      if (FLAG_trace_gc) {
+        PrintF("Max semispace size cannot be less than %dkbytes",
+               Page::kPageSize >> 10);
+      }
+    }
+    max_semispace_size_ = max_semispace_size;
+  }
 
   if (Snapshot::IsEnabled()) {
     // If we are using a snapshot we always reserve the default amount
@@ -5000,6 +5082,10 @@
     // than the default reserved semispace size.
     if (max_semispace_size_ > reserved_semispace_size_) {
       max_semispace_size_ = reserved_semispace_size_;
+      if (FLAG_trace_gc) {
+        PrintF("Max semispace size cannot be more than %dkbytes",
+               reserved_semispace_size_ >> 10);
+      }
     }
   } else {
     // If we are not using snapshots we reserve space for the actual
@@ -5025,8 +5111,12 @@
   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
   external_allocation_limit_ = 10 * max_semispace_size_;
 
-  // The old generation is paged.
-  max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
+  // The old generation is paged and needs at least one page for each space.
+  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+  max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
+                                                       Page::kPageSize),
+                                 RoundUp(max_old_generation_size_,
+                                         Page::kPageSize));
 
   configured_ = true;
   return true;
@@ -5034,9 +5124,9 @@
 
 
 bool Heap::ConfigureHeapDefault() {
-  return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
-                       FLAG_max_old_space_size * MB,
-                       FLAG_max_executable_size * MB);
+  return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
+                       static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
+                       static_cast<intptr_t>(FLAG_max_executable_size) * MB);
 }
 
 
@@ -5064,7 +5154,7 @@
   *stats->os_error = OS::GetLastError();
       isolate()->memory_allocator()->Available();
   if (take_snapshot) {
-    HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+    HeapIterator iterator;
     for (HeapObject* obj = iterator.next();
          obj != NULL;
          obj = iterator.next()) {
@@ -5280,31 +5370,21 @@
   gc_initializer_mutex->Lock();
   static bool initialized_gc = false;
   if (!initialized_gc) {
-    initialized_gc = true;
-    InitializeScavengingVisitorsTables();
-    NewSpaceScavenger::Initialize();
-    MarkCompactCollector::Initialize();
+      initialized_gc = true;
+      InitializeScavengingVisitorsTables();
+      NewSpaceScavenger::Initialize();
+      MarkCompactCollector::Initialize();
   }
   gc_initializer_mutex->Unlock();
 
   MarkMapPointersAsEncoded(false);
 
-  // Setup memory allocator and reserve a chunk of memory for new
-  // space.  The chunk is double the size of the requested reserved
-  // new space size to ensure that we can find a pair of semispaces that
-  // are contiguous and aligned to their size.
+  // Setup memory allocator.
   if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
       return false;
-  void* chunk =
-      isolate_->memory_allocator()->ReserveInitialChunk(
-          4 * reserved_semispace_size_);
-  if (chunk == NULL) return false;
 
-  // Align the pair of semispaces to their size, which must be a power
-  // of 2.
-  Address new_space_start =
-      RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
-  if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
+  // Setup new space.
+  if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
     return false;
   }
 
@@ -5315,7 +5395,7 @@
                    OLD_POINTER_SPACE,
                    NOT_EXECUTABLE);
   if (old_pointer_space_ == NULL) return false;
-  if (!old_pointer_space_->Setup(NULL, 0)) return false;
+  if (!old_pointer_space_->Setup()) return false;
 
   // Initialize old data space.
   old_data_space_ =
@@ -5324,7 +5404,7 @@
                    OLD_DATA_SPACE,
                    NOT_EXECUTABLE);
   if (old_data_space_ == NULL) return false;
-  if (!old_data_space_->Setup(NULL, 0)) return false;
+  if (!old_data_space_->Setup()) return false;
 
   // Initialize the code space, set its maximum capacity to the old
   // generation size. It needs executable memory.
@@ -5339,21 +5419,20 @@
   code_space_ =
       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
   if (code_space_ == NULL) return false;
-  if (!code_space_->Setup(NULL, 0)) return false;
+  if (!code_space_->Setup()) return false;
 
   // Initialize map space.
-  map_space_ = new MapSpace(this, FLAG_use_big_map_space
-      ? max_old_generation_size_
-      : MapSpace::kMaxMapPageIndex * Page::kPageSize,
-      FLAG_max_map_space_pages,
-      MAP_SPACE);
+  map_space_ = new MapSpace(this,
+                            max_old_generation_size_,
+                            FLAG_max_map_space_pages,
+                            MAP_SPACE);
   if (map_space_ == NULL) return false;
-  if (!map_space_->Setup(NULL, 0)) return false;
+  if (!map_space_->Setup()) return false;
 
   // Initialize global property cell space.
   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
   if (cell_space_ == NULL) return false;
-  if (!cell_space_->Setup(NULL, 0)) return false;
+  if (!cell_space_->Setup()) return false;
 
   // The large object code space may contain code or data.  We set the memory
   // to be non-executable here for safety, but this means we need to enable it
@@ -5361,7 +5440,6 @@
   lo_space_ = new LargeObjectSpace(this, LO_SPACE);
   if (lo_space_ == NULL) return false;
   if (!lo_space_->Setup()) return false;
-
   if (create_heap_objects) {
     // Create initial maps.
     if (!CreateInitialMaps()) return false;
@@ -5376,6 +5454,8 @@
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
 
+  store_buffer()->Setup();
+
   return true;
 }
 
@@ -5402,7 +5482,6 @@
     PrintF("\n\n");
     PrintF("gc_count=%d ", gc_count_);
     PrintF("mark_sweep_count=%d ", ms_count_);
-    PrintF("mark_compact_count=%d ", mc_count_);
     PrintF("max_gc_pause=%d ", get_max_gc_pause());
     PrintF("min_in_mutator=%d ", get_min_in_mutator());
     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
@@ -5452,6 +5531,9 @@
     lo_space_ = NULL;
   }
 
+  store_buffer()->TearDown();
+  incremental_marking()->TearDown();
+
   isolate_->memory_allocator()->TearDown();
 
 #ifdef DEBUG
@@ -5465,7 +5547,7 @@
   // Try to shrink all paged spaces.
   PagedSpaces spaces;
   for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
-    space->Shrink();
+    space->ReleaseAllUnusedPages();
 }
 
 
@@ -5668,45 +5750,6 @@
 };
 
 
-class FreeListNodesFilter : public HeapObjectsFilter {
- public:
-  FreeListNodesFilter() {
-    MarkFreeListNodes();
-  }
-
-  bool SkipObject(HeapObject* object) {
-    if (object->IsMarked()) {
-      object->ClearMark();
-      return true;
-    } else {
-      return false;
-    }
-  }
-
- private:
-  void MarkFreeListNodes() {
-    Heap* heap = HEAP;
-    heap->old_pointer_space()->MarkFreeListNodes();
-    heap->old_data_space()->MarkFreeListNodes();
-    MarkCodeSpaceFreeListNodes(heap);
-    heap->map_space()->MarkFreeListNodes();
-    heap->cell_space()->MarkFreeListNodes();
-  }
-
-  void MarkCodeSpaceFreeListNodes(Heap* heap) {
-    // For code space, using FreeListNode::IsFreeListNode is OK.
-    HeapObjectIterator iter(heap->code_space());
-    for (HeapObject* obj = iter.next_object();
-         obj != NULL;
-         obj = iter.next_object()) {
-      if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
-    }
-  }
-
-  AssertNoAllocation no_alloc;
-};
-
-
 class UnreachableObjectsFilter : public HeapObjectsFilter {
  public:
   UnreachableObjectsFilter() {
@@ -5714,8 +5757,8 @@
   }
 
   bool SkipObject(HeapObject* object) {
-    if (object->IsMarked()) {
-      object->ClearMark();
+    if (IntrusiveMarking::IsMarked(object)) {
+      IntrusiveMarking::ClearMark(object);
       return true;
     } else {
       return false;
@@ -5731,8 +5774,8 @@
       for (Object** p = start; p < end; p++) {
         if (!(*p)->IsHeapObject()) continue;
         HeapObject* obj = HeapObject::cast(*p);
-        if (obj->IsMarked()) {
-          obj->ClearMark();
+        if (IntrusiveMarking::IsMarked(obj)) {
+          IntrusiveMarking::ClearMark(obj);
           list_.Add(obj);
         }
       }
@@ -5754,7 +5797,7 @@
     for (HeapObject* obj = iterator.next();
          obj != NULL;
          obj = iterator.next()) {
-      obj->SetMark();
+      IntrusiveMarking::SetMark(obj);
     }
     UnmarkingVisitor visitor;
     HEAP->IterateRoots(&visitor, VISIT_ALL);
@@ -5788,10 +5831,11 @@
 void HeapIterator::Init() {
   // Start the iteration.
   space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
-      new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
+      new SpaceIterator(Isolate::Current()->heap()->
+                        GcSafeSizeOfOldObjectFunction());
   switch (filtering_) {
     case kFilterFreeListNodes:
-      filter_ = new FreeListNodesFilter;
+      // TODO(gc): Not handled.
       break;
     case kFilterUnreachable:
       filter_ = new UnreachableObjectsFilter;
@@ -5928,6 +5972,11 @@
 }
 
 
+static bool SafeIsGlobalContext(HeapObject* obj) {
+  return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
+}
+
+
 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
   if (!(*p)->IsHeapObject()) return;
 
@@ -5946,7 +5995,7 @@
     return;
   }
 
-  bool is_global_context = obj->IsGlobalContext();
+  bool is_global_context = SafeIsGlobalContext(obj);
 
   // not visited yet
   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@@ -6054,7 +6103,7 @@
   for (OldSpace* space = spaces.next();
        space != NULL;
        space = spaces.next()) {
-    holes_size += space->Waste() + space->AvailableFree();
+    holes_size += space->Waste() + space->Available();
   }
   return holes_size;
 }
@@ -6065,17 +6114,10 @@
       start_size_(0),
       gc_count_(0),
       full_gc_count_(0),
-      is_compacting_(false),
-      marked_count_(0),
       allocated_since_last_gc_(0),
       spent_in_mutator_(0),
       promoted_objects_size_(0),
       heap_(heap) {
-  // These two fields reflect the state of the previous full collection.
-  // Set them before they are changed by the collector.
-  previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
-  previous_marked_count_ =
-      heap_->mark_compact_collector_.previous_marked_count();
   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
   start_time_ = OS::TimeCurrentMillis();
   start_size_ = heap_->SizeOfObjects();
@@ -6092,6 +6134,14 @@
   if (heap_->last_gc_end_timestamp_ > 0) {
     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
   }
+
+  steps_count_ = heap_->incremental_marking()->steps_count();
+  steps_took_ = heap_->incremental_marking()->steps_took();
+  longest_step_ = heap_->incremental_marking()->longest_step();
+  steps_count_since_last_gc_ =
+      heap_->incremental_marking()->steps_count_since_last_gc();
+  steps_took_since_last_gc_ =
+      heap_->incremental_marking()->steps_took_since_last_gc();
 }
 
 
@@ -6126,7 +6176,21 @@
            SizeOfHeapObjects());
 
     if (external_time > 0) PrintF("%d / ", external_time);
-    PrintF("%d ms.\n", time);
+    PrintF("%d ms", time);
+    if (steps_count_ > 0) {
+      if (collector_ == SCAVENGER) {
+        PrintF(" (+ %d ms in %d steps since last GC)",
+               static_cast<int>(steps_took_since_last_gc_),
+               steps_count_since_last_gc_);
+      } else {
+        PrintF(" (+ %d ms in %d steps since start of marking, "
+                   "biggest step %f ms)",
+               static_cast<int>(steps_took_),
+               steps_count_,
+               longest_step_);
+      }
+    }
+    PrintF(".\n");
   } else {
     PrintF("pause=%d ", time);
     PrintF("mutator=%d ",
@@ -6138,8 +6202,7 @@
         PrintF("s");
         break;
       case MARK_COMPACTOR:
-        PrintF("%s",
-               heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
+        PrintF("ms");
         break;
       default:
         UNREACHABLE();
@@ -6161,6 +6224,14 @@
     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
 
+    if (collector_ == SCAVENGER) {
+      PrintF("stepscount=%d ", steps_count_since_last_gc_);
+      PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
+    } else {
+      PrintF("stepscount=%d ", steps_count_);
+      PrintF("stepstook=%d ", static_cast<int>(steps_took_));
+    }
+
     PrintF("\n");
   }
 
@@ -6173,8 +6244,7 @@
     case SCAVENGER:
       return "Scavenge";
     case MARK_COMPACTOR:
-      return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
-                                                           : "Mark-sweep";
+      return "Mark-sweep";
   }
   return "Unknown GC";
 }
@@ -6281,4 +6351,52 @@
 }
 
 
+void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
+  chunk->set_next_chunk(chunks_queued_for_free_);
+  chunks_queued_for_free_ = chunk;
+}
+
+
+void Heap::FreeQueuedChunks() {
+  if (chunks_queued_for_free_ == NULL) return;
+  MemoryChunk* next;
+  MemoryChunk* chunk;
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+
+    if (chunk->owner()->identity() == LO_SPACE) {
+      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
+      // If FromAnyPointerAddress encounters a slot that belongs to a large
+      // chunk queued for deletion it will fail to find the chunk because
+      // it try to perform a search in the list of pages owned by of the large
+      // object space and queued chunks were detached from that list.
+      // To work around this we split large chunk into normal kPageSize aligned
+      // pieces and initialize owner field and flags of every piece.
+      // If FromAnyPointerAddress encounteres a slot that belongs to one of
+      // these smaller pieces it will treat it as a slot on a normal Page.
+      MemoryChunk* inner = MemoryChunk::FromAddress(
+          chunk->address() + Page::kPageSize);
+      MemoryChunk* inner_last = MemoryChunk::FromAddress(
+          chunk->address() + chunk->size() - 1);
+      while (inner <= inner_last) {
+        // Size of a large chunk is always a multiple of
+        // OS::AllocationAlignment() so there is always
+        // enough space for a fake MemoryChunk header.
+        inner->set_owner(lo_space());
+        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+        inner = MemoryChunk::FromAddress(
+            inner->address() + Page::kPageSize);
+      }
+    }
+  }
+  isolate_->heap()->store_buffer()->Compact();
+  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    isolate_->memory_allocator()->Free(chunk);
+  }
+  chunks_queued_for_free_ = NULL;
+}
+
 } }  // namespace v8::internal
diff --git a/src/heap.h b/src/heap.h
index d81ff6c..8672db2 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -32,11 +32,15 @@
 
 #include "allocation.h"
 #include "globals.h"
+#include "incremental-marking.h"
 #include "list.h"
 #include "mark-compact.h"
+#include "objects-visiting.h"
 #include "spaces.h"
 #include "splay-tree-inl.h"
+#include "store-buffer.h"
 #include "v8-counters.h"
+#include "v8globals.h"
 
 namespace v8 {
 namespace internal {
@@ -49,19 +53,19 @@
 
 // Defines all the roots in Heap.
 #define STRONG_ROOT_LIST(V)                                      \
-  /* Put the byte array map early.  We need it to be in place by the time   */ \
-  /* the deserializer hits the next page, since it wants to put a byte      */ \
-  /* array in the unused space at the end of the page.                      */ \
   V(Map, byte_array_map, ByteArrayMap)                                         \
+  V(Map, free_space_map, FreeSpaceMap)                                         \
   V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
   V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
   /* Cluster the most popular ones in a few cache lines here at the top.    */ \
+  V(Smi, store_buffer_top, StoreBufferTop)                                     \
   V(Object, undefined_value, UndefinedValue)                                   \
   V(Object, the_hole_value, TheHoleValue)                                      \
   V(Object, null_value, NullValue)                                             \
   V(Object, true_value, TrueValue)                                             \
   V(Object, false_value, FalseValue)                                           \
   V(Object, arguments_marker, ArgumentsMarker)                                 \
+  V(Object, frame_alignment_marker, FrameAlignmentMarker)                      \
   V(Map, heap_number_map, HeapNumberMap)                                       \
   V(Map, global_context_map, GlobalContextMap)                                 \
   V(Map, fixed_array_map, FixedArrayMap)                                       \
@@ -123,6 +127,7 @@
   V(Map, message_object_map, JSMessageObjectMap)                               \
   V(Map, foreign_map, ForeignMap)                                              \
   V(Object, nan_value, NanValue)                                               \
+  V(Object, infinity_value, InfinityValue)                                     \
   V(Object, minus_zero_value, MinusZeroValue)                                  \
   V(Map, neander_map, NeanderMap)                                              \
   V(JSObject, message_listeners, MessageListeners)                             \
@@ -226,7 +231,9 @@
   V(closure_symbol, "(closure)")                                         \
   V(use_strict, "use strict")                                            \
   V(dot_symbol, ".")                                                     \
-  V(anonymous_function_symbol, "(anonymous function)")
+  V(anonymous_function_symbol, "(anonymous function)")                   \
+  V(infinity_symbol, "Infinity")                                         \
+  V(minus_infinity_symbol, "-Infinity")
 
 // Forward declarations.
 class GCTracer;
@@ -238,10 +245,26 @@
 typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
                                                       Object** pointer);
 
-typedef bool (*DirtyRegionCallback)(Heap* heap,
-                                    Address start,
-                                    Address end,
-                                    ObjectSlotCallback copy_object_func);
+class StoreBufferRebuilder {
+ public:
+  explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer) {
+  }
+
+  void Callback(MemoryChunk* page, StoreBufferEvent event);
+
+ private:
+  StoreBuffer* store_buffer_;
+
+  // We record in this variable how full the store buffer was when we started
+  // iterating over the current page, finding pointers to new space.  If the
+  // store buffer overflows again we can exempt the page from the store buffer
+  // by rewinding to this point instead of having to search the store buffer.
+  Object*** start_of_current_page_;
+  // The current page we are scanning in the store buffer iterator.
+  MemoryChunk* current_page_;
+};
+
 
 
 // The all static Heap captures the interface to the global object heap.
@@ -259,22 +282,37 @@
   PromotionQueue() : front_(NULL), rear_(NULL) { }
 
   void Initialize(Address start_address) {
+    // Assumes that a NewSpacePage exactly fits a number of promotion queue
+    // entries (where each is a pair of intptr_t). This allows us to simplify
+    // the test fpr when to switch pages.
+    ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
+           == 0);
+    ASSERT(NewSpacePage::IsAtEnd(start_address));
     front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
   }
 
-  bool is_empty() { return front_ <= rear_; }
+  bool is_empty() { return front_ == rear_; }
 
   inline void insert(HeapObject* target, int size);
 
   void remove(HeapObject** target, int* size) {
+    ASSERT(!is_empty());
+    if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
+      NewSpacePage* front_page =
+          NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
+      ASSERT(!front_page->prev_page()->is_anchor());
+      front_ =
+          reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
+    }
     *target = reinterpret_cast<HeapObject*>(*(--front_));
     *size = static_cast<int>(*(--front_));
     // Assert no underflow.
-    ASSERT(front_ >= rear_);
+    SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
+                                reinterpret_cast<Address>(front_));
   }
 
  private:
-  // The front of the queue is higher in memory than the rear.
+  // The front of the queue is higher in the memory page chain than the rear.
   intptr_t* front_;
   intptr_t* rear_;
 
@@ -282,6 +320,11 @@
 };
 
 
+typedef void (*ScavengingCallback)(Map* map,
+                                   HeapObject** slot,
+                                   HeapObject* object);
+
+
 // External strings table is a place where all external strings are
 // registered.  We need to keep track of such strings to properly
 // finalize them.
@@ -327,8 +370,8 @@
   // Configure heap size before setup. Return false if the heap has been
   // setup already.
   bool ConfigureHeap(int max_semispace_size,
-                     int max_old_gen_size,
-                     int max_executable_size);
+                     intptr_t max_old_gen_size,
+                     intptr_t max_executable_size);
   bool ConfigureHeapDefault();
 
   // Initializes the global object heap. If create_heap_objects is true,
@@ -456,6 +499,7 @@
   // size, but keeping the original prototype.  The receiver must have at least
   // the size of the new object.  The object is reinitialized and behaves as an
   // object that has been freshly allocated.
+  // Returns failure if an error occured, otherwise object.
   MUST_USE_RESULT MaybeObject* ReinitializeJSReceiver(JSReceiver* object,
                                                       InstanceType type,
                                                       int size);
@@ -484,8 +528,10 @@
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this function does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* AllocateMap(InstanceType instance_type,
-                                           int instance_size);
+  MUST_USE_RESULT MaybeObject* AllocateMap(
+      InstanceType instance_type,
+      int instance_size,
+      ElementsKind elements_kind = FAST_ELEMENTS);
 
   // Allocates a partial map for bootstrapping.
   MUST_USE_RESULT MaybeObject* AllocatePartialMap(InstanceType instance_type,
@@ -796,9 +842,9 @@
   // failed.
   // Please note this does not perform a garbage collection.
   MUST_USE_RESULT MaybeObject* AllocateExternalStringFromAscii(
-      ExternalAsciiString::Resource* resource);
+      const ExternalAsciiString::Resource* resource);
   MUST_USE_RESULT MaybeObject* AllocateExternalStringFromTwoByte(
-      ExternalTwoByteString::Resource* resource);
+      const ExternalTwoByteString::Resource* resource);
 
   // Finalizes an external string by deleting the associated external
   // data and clearing the resource pointer.
@@ -885,13 +931,24 @@
   // collect more garbage.
   inline bool CollectGarbage(AllocationSpace space);
 
-  // Performs a full garbage collection. Force compaction if the
-  // parameter is true.
-  void CollectAllGarbage(bool force_compaction);
+  static const int kNoGCFlags = 0;
+  static const int kMakeHeapIterableMask = 1;
+
+  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
+  // non-zero, then the slower precise sweeper is used, which leaves the heap
+  // in a state where we can iterate over the heap visiting all objects.
+  void CollectAllGarbage(int flags);
 
   // Last hope GC, should try to squeeze as much as possible.
   void CollectAllAvailableGarbage();
 
+  // Check whether the heap is currently iterable.
+  bool IsHeapIterable();
+
+  // Ensure that we have swept all spaces in such a way that we can iterate
+  // over all objects.  May cause a GC.
+  void EnsureHeapIsIterable();
+
   // Notify the heap that a context has been disposed.
   int NotifyContextDisposed() { return ++contexts_disposed_; }
 
@@ -899,6 +956,20 @@
   // ensure correct callback for weak global handles.
   void PerformScavenge();
 
+  inline void increment_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_++;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
+  inline void decrement_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_--;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
   PromotionQueue* promotion_queue() { return &promotion_queue_; }
 
 #ifdef DEBUG
@@ -925,6 +996,8 @@
 
   // Heap root getters.  We have versions with and without type::cast() here.
   // You can't use type::cast during GC because the assert fails.
+  // TODO(1490): Try removing the unchecked accessors, now that GC marking does
+  // not corrupt the stack.
 #define ROOT_ACCESSOR(type, name, camel_name)                                  \
   type* name() {                                                               \
     return type::cast(roots_[k##camel_name##RootIndex]);                       \
@@ -965,60 +1038,16 @@
   // Iterates over all the other roots in the heap.
   void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
 
-  enum ExpectedPageWatermarkState {
-    WATERMARK_SHOULD_BE_VALID,
-    WATERMARK_CAN_BE_INVALID
-  };
-
-  // For each dirty region on a page in use from an old space call
-  // visit_dirty_region callback.
-  // If either visit_dirty_region or callback can cause an allocation
-  // in old space and changes in allocation watermark then
-  // can_preallocate_during_iteration should be set to true.
-  // All pages will be marked as having invalid watermark upon
-  // iteration completion.
-  void IterateDirtyRegions(
-      PagedSpace* space,
-      DirtyRegionCallback visit_dirty_region,
-      ObjectSlotCallback callback,
-      ExpectedPageWatermarkState expected_page_watermark_state);
-
-  // Interpret marks as a bitvector of dirty marks for regions of size
-  // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
-  // memory interval from start to top. For each dirty region call a
-  // visit_dirty_region callback. Return updated bitvector of dirty marks.
-  uint32_t IterateDirtyRegions(uint32_t marks,
-                               Address start,
-                               Address end,
-                               DirtyRegionCallback visit_dirty_region,
-                               ObjectSlotCallback callback);
-
   // Iterate pointers to from semispace of new space found in memory interval
   // from start to end.
-  // Update dirty marks for page containing start address.
   void IterateAndMarkPointersToFromSpace(Address start,
                                          Address end,
                                          ObjectSlotCallback callback);
 
-  // Iterate pointers to new space found in memory interval from start to end.
-  // Return true if pointers to new space was found.
-  static bool IteratePointersInDirtyRegion(Heap* heap,
-                                           Address start,
-                                           Address end,
-                                           ObjectSlotCallback callback);
-
-
-  // Iterate pointers to new space found in memory interval from start to end.
-  // This interval is considered to belong to the map space.
-  // Return true if pointers to new space was found.
-  static bool IteratePointersInDirtyMapsRegion(Heap* heap,
-                                               Address start,
-                                               Address end,
-                                               ObjectSlotCallback callback);
-
-
   // Returns whether the object resides in new space.
   inline bool InNewSpace(Object* object);
+  inline bool InNewSpace(Address addr);
+  inline bool InNewSpacePage(Address addr);
   inline bool InFromSpace(Object* object);
   inline bool InToSpace(Object* object);
 
@@ -1057,12 +1086,20 @@
     roots_[kEmptyScriptRootIndex] = script;
   }
 
+  void public_set_store_buffer_top(Address* top) {
+    roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
+  }
+
   // Update the next script id.
   inline void SetLastScriptId(Object* last_script_id);
 
   // Generated code can embed this address to get access to the roots.
   Object** roots_address() { return roots_; }
 
+  Address* store_buffer_top_address() {
+    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
+  }
+
   // Get address of global contexts list for serialization support.
   Object** global_contexts_list_address() {
     return &global_contexts_list_;
@@ -1075,6 +1112,10 @@
   // Verify the heap is in its normal state before or after a GC.
   void Verify();
 
+  void OldPointerSpaceCheckStoreBuffer();
+  void MapSpaceCheckStoreBuffer();
+  void LargeObjectSpaceCheckStoreBuffer();
+
   // Report heap statistics.
   void ReportHeapStatistics(const char* title);
   void ReportCodeStatistics(const char* title);
@@ -1170,22 +1211,51 @@
   MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
                                                      PretenureFlag pretenure);
 
+  inline intptr_t PromotedTotalSize() {
+    return PromotedSpaceSize() + PromotedExternalMemorySize();
+  }
+
   // True if we have reached the allocation limit in the old generation that
   // should force the next GC (caused normally) to be a full one.
-  bool OldGenerationPromotionLimitReached() {
-    return (PromotedSpaceSize() + PromotedExternalMemorySize())
-           > old_gen_promotion_limit_;
+  inline bool OldGenerationPromotionLimitReached() {
+    return PromotedTotalSize() > old_gen_promotion_limit_;
   }
 
-  intptr_t OldGenerationSpaceAvailable() {
-    return old_gen_allocation_limit_ -
-           (PromotedSpaceSize() + PromotedExternalMemorySize());
+  inline intptr_t OldGenerationSpaceAvailable() {
+    return old_gen_allocation_limit_ - PromotedTotalSize();
   }
 
-  // True if we have reached the allocation limit in the old generation that
-  // should artificially cause a GC right now.
-  bool OldGenerationAllocationLimitReached() {
-    return OldGenerationSpaceAvailable() < 0;
+  static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
+  static const intptr_t kMinimumAllocationLimit =
+      8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
+
+  // When we sweep lazily we initially guess that there is no garbage on the
+  // heap and set the limits for the next GC accordingly.  As we sweep we find
+  // out that some of the pages contained garbage and we have to adjust
+  // downwards the size of the heap.  This means the limits that control the
+  // timing of the next GC also need to be adjusted downwards.
+  void LowerOldGenLimits(intptr_t adjustment) {
+    size_of_old_gen_at_last_old_space_gc_ -= adjustment;
+    old_gen_promotion_limit_ =
+        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+    old_gen_allocation_limit_ =
+        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+  }
+
+  intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
+    intptr_t limit =
+        Max(old_gen_size + old_gen_size / 3, kMinimumPromotionLimit);
+    limit += new_space_.Capacity();
+    limit *= old_gen_limit_factor_;
+    return limit;
+  }
+
+  intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
+    intptr_t limit =
+        Max(old_gen_size + old_gen_size / 2, kMinimumAllocationLimit);
+    limit += new_space_.Capacity();
+    limit *= old_gen_limit_factor_;
+    return limit;
   }
 
   // Can be called when the embedding application is idle.
@@ -1213,6 +1283,8 @@
 
   MUST_USE_RESULT MaybeObject* NumberToString(
       Object* number, bool check_number_string_cache = true);
+  MUST_USE_RESULT MaybeObject* Uint32ToString(
+      uint32_t value, bool check_number_string_cache = true);
 
   Map* MapForExternalArrayType(ExternalArrayType array_type);
   RootListIndex RootIndexForExternalArrayType(
@@ -1224,18 +1296,10 @@
   // by pointer size.
   static inline void CopyBlock(Address dst, Address src, int byte_size);
 
-  inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                      Address src,
-                                                      int byte_size);
-
   // Optimized version of memmove for blocks with pointer size aligned sizes and
   // pointer size aligned addresses.
   static inline void MoveBlock(Address dst, Address src, int byte_size);
 
-  inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
-                                                      Address src,
-                                                      int byte_size);
-
   // Check new space expansion criteria and expand semispaces if it was hit.
   void CheckNewSpaceExpansionCriteria();
 
@@ -1244,9 +1308,31 @@
     survived_since_last_expansion_ += survived;
   }
 
+  inline bool NextGCIsLikelyToBeFull() {
+    if (FLAG_gc_global) return true;
+
+    intptr_t total_promoted = PromotedTotalSize();
+
+    intptr_t adjusted_promotion_limit =
+        old_gen_promotion_limit_ - new_space_.Capacity();
+
+    if (total_promoted >= adjusted_promotion_limit) return true;
+
+    intptr_t adjusted_allocation_limit =
+        old_gen_allocation_limit_ - new_space_.Capacity() / 5;
+
+    if (PromotedSpaceSize() >= adjusted_allocation_limit) return true;
+
+    return false;
+  }
+
+
   void UpdateNewSpaceReferencesInExternalStringTable(
       ExternalStringTableUpdaterCallback updater_func);
 
+  void UpdateReferencesInExternalStringTable(
+      ExternalStringTableUpdaterCallback updater_func);
+
   void ProcessWeakReferences(WeakObjectRetainer* retainer);
 
   // Helper function that governs the promotion policy from new space to
@@ -1263,6 +1349,9 @@
 
   GCTracer* tracer() { return tracer_; }
 
+  // Returns the size of objects residing in non new spaces.
+  intptr_t PromotedSpaceSize();
+
   double total_regexp_code_generated() { return total_regexp_code_generated_; }
   void IncreaseTotalRegexpCodeGenerated(int size) {
     total_regexp_code_generated_ += size;
@@ -1281,6 +1370,18 @@
     return &mark_compact_collector_;
   }
 
+  StoreBuffer* store_buffer() {
+    return &store_buffer_;
+  }
+
+  Marking* marking() {
+    return &marking_;
+  }
+
+  IncrementalMarking* incremental_marking() {
+    return &incremental_marking_;
+  }
+
   ExternalStringTable* external_string_table() {
     return &external_string_table_;
   }
@@ -1291,16 +1392,28 @@
   }
 
   inline Isolate* isolate();
-  bool is_safe_to_read_maps() { return is_safe_to_read_maps_; }
 
-  void CallGlobalGCPrologueCallback() {
+  inline void CallGlobalGCPrologueCallback() {
     if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
   }
 
-  void CallGlobalGCEpilogueCallback() {
+  inline void CallGlobalGCEpilogueCallback() {
     if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
   }
 
+  inline bool OldGenerationAllocationLimitReached();
+
+  inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+    scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+  }
+
+  void QueueMemoryChunkForFree(MemoryChunk* chunk);
+  void FreeQueuedChunks();
+
+  // Completely clear the Instanceof cache (to stop it keeping objects alive
+  // around a GC).
+  inline void CompletelyClearInstanceofCache();
+
  private:
   Heap();
 
@@ -1308,12 +1421,12 @@
   // more expedient to get at the isolate directly from within Heap methods.
   Isolate* isolate_;
 
+  intptr_t code_range_size_;
   int reserved_semispace_size_;
   int max_semispace_size_;
   int initial_semispace_size_;
   intptr_t max_old_generation_size_;
   intptr_t max_executable_size_;
-  intptr_t code_range_size_;
 
   // For keeping track of how much data has survived
   // scavenge since last new space expansion.
@@ -1328,6 +1441,8 @@
   // For keeping track of context disposals.
   int contexts_disposed_;
 
+  int scan_on_scavenge_pages_;
+
 #if defined(V8_TARGET_ARCH_X64)
   static const int kMaxObjectSizeInNewSpace = 1024*KB;
 #else
@@ -1344,13 +1459,9 @@
   HeapState gc_state_;
   int gc_post_processing_depth_;
 
-  // Returns the size of object residing in non new spaces.
-  intptr_t PromotedSpaceSize();
-
   // Returns the amount of external memory registered since last global gc.
   int PromotedExternalMemorySize();
 
-  int mc_count_;  // how many mark-compact collections happened
   int ms_count_;  // how many mark-sweep collections happened
   unsigned int gc_count_;  // how many gc happened
 
@@ -1389,6 +1500,13 @@
   // every allocation in large object space.
   intptr_t old_gen_allocation_limit_;
 
+  // Sometimes the heuristics dictate that those limits are increased.  This
+  // variable records that fact.
+  int old_gen_limit_factor_;
+
+  // Used to adjust the limits that control the timing of the next GC.
+  intptr_t size_of_old_gen_at_last_old_space_gc_;
+
   // Limit on the amount of externally allocated memory allowed
   // between global GCs. If reached a global GC is forced.
   intptr_t external_allocation_limit_;
@@ -1408,6 +1526,8 @@
 
   Object* global_contexts_list_;
 
+  StoreBufferRebuilder store_buffer_rebuilder_;
+
   struct StringTypeTable {
     InstanceType type;
     int size;
@@ -1465,13 +1585,11 @@
   // Support for computing object sizes during GC.
   HeapObjectCallback gc_safe_size_of_old_object_;
   static int GcSafeSizeOfOldObject(HeapObject* object);
-  static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
 
   // Update the GC state. Called from the mark-compact collector.
   void MarkMapPointersAsEncoded(bool encoded) {
-    gc_safe_size_of_old_object_ = encoded
-        ? &GcSafeSizeOfOldObjectWithEncodedMap
-        : &GcSafeSizeOfOldObject;
+    ASSERT(!encoded);
+    gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
   }
 
   // Checks whether a global GC is necessary
@@ -1483,11 +1601,10 @@
   bool PerformGarbageCollection(GarbageCollector collector,
                                 GCTracer* tracer);
 
-  static const intptr_t kMinimumPromotionLimit = 2 * MB;
-  static const intptr_t kMinimumAllocationLimit = 8 * MB;
 
   inline void UpdateOldSpaceLimits();
 
+
   // Allocate an uninitialized object in map space.  The behavior is identical
   // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
   // have to test the allocation space argument and (b) can reduce code size
@@ -1522,8 +1639,6 @@
   // Allocate empty fixed double array.
   MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray();
 
-  void SwitchScavengingVisitorsTableIfProfilingWasEnabled();
-
   // Performs a minor collection in new generation.
   void Scavenge();
 
@@ -1532,16 +1647,15 @@
       Object** pointer);
 
   Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+  static void ScavengeStoreBufferCallback(Heap* heap,
+                                          MemoryChunk* page,
+                                          StoreBufferEvent event);
 
   // Performs a major collection in the whole heap.
   void MarkCompact(GCTracer* tracer);
 
   // Code to be run before and after mark-compact.
-  void MarkCompactPrologue(bool is_compacting);
-
-  // Completely clear the Instanceof cache (to stop it keeping objects alive
-  // around a GC).
-  inline void CompletelyClearInstanceofCache();
+  void MarkCompactPrologue();
 
   // Record statistics before and after garbage collection.
   void ReportStatisticsBeforeGC();
@@ -1551,12 +1665,11 @@
   static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
 
   // Initializes a function with a shared part and prototype.
-  // Returns the function.
   // Note: this code was factored out of AllocateFunction such that
   // other parts of the VM could use it. Specifically, a function that creates
   // instances of type JS_FUNCTION_TYPE benefit from the use of this function.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* InitializeFunction(
+  inline void InitializeFunction(
       JSFunction* function,
       SharedFunctionInfo* shared,
       Object* prototype);
@@ -1621,6 +1734,8 @@
     return high_survival_rate_period_length_ > 0;
   }
 
+  void SelectScavengingVisitorsTable();
+
   static const int kInitialSymbolTableSize = 2048;
   static const int kInitialEvalCacheSize = 64;
 
@@ -1640,10 +1755,11 @@
 
   MarkCompactCollector mark_compact_collector_;
 
-  // This field contains the meaning of the WATERMARK_INVALIDATED flag.
-  // Instead of clearing this flag from all pages we just flip
-  // its meaning at the beginning of a scavenge.
-  intptr_t page_watermark_invalidated_mark_;
+  StoreBuffer store_buffer_;
+
+  Marking marking_;
+
+  IncrementalMarking incremental_marking_;
 
   int number_idle_notifications_;
   unsigned int last_idle_notification_gc_count_;
@@ -1658,7 +1774,9 @@
 
   ExternalStringTable external_string_table_;
 
-  bool is_safe_to_read_maps_;
+  VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+  MemoryChunk* chunks_queued_for_free_;
 
   friend class Factory;
   friend class GCTracer;
@@ -1757,29 +1875,6 @@
     }
   }
 };
-
-
-// Visitor class to verify interior pointers in spaces that use region marks
-// to keep track of intergenerational references.
-// As VerifyPointersVisitor but also checks that dirty marks are set
-// for regions covering intergenerational references.
-class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
- public:
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        HeapObject* object = HeapObject::cast(*current);
-        ASSERT(HEAP->Contains(object));
-        ASSERT(object->map()->IsMap());
-        if (HEAP->InNewSpace(object)) {
-          ASSERT(HEAP->InToSpace(object));
-          Address addr = reinterpret_cast<Address>(current);
-          ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
-        }
-      }
-    }
-  }
-};
 #endif
 
 
@@ -2112,16 +2207,6 @@
   // Sets the full GC count.
   void set_full_gc_count(int count) { full_gc_count_ = count; }
 
-  // Sets the flag that this is a compacting full GC.
-  void set_is_compacting() { is_compacting_ = true; }
-  bool is_compacting() const { return is_compacting_; }
-
-  // Increment and decrement the count of marked objects.
-  void increment_marked_count() { ++marked_count_; }
-  void decrement_marked_count() { --marked_count_; }
-
-  int marked_count() { return marked_count_; }
-
   void increment_promoted_objects_size(int object_size) {
     promoted_objects_size_ += object_size;
   }
@@ -2146,23 +2231,6 @@
   // A count (including this one) of the number of full garbage collections.
   int full_gc_count_;
 
-  // True if the current GC is a compacting full collection, false
-  // otherwise.
-  bool is_compacting_;
-
-  // True if the *previous* full GC cwas a compacting collection (will be
-  // false if there has not been a previous full GC).
-  bool previous_has_compacted_;
-
-  // On a full GC, a count of the number of marked objects.  Incremented
-  // when an object is marked and decremented when an object's mark bit is
-  // cleared.  Will be zero on a scavenge collection.
-  int marked_count_;
-
-  // The count from the end of the previous full GC.  Will be zero if there
-  // was no previous full GC.
-  int previous_marked_count_;
-
   // Amounts of time spent in different scopes during GC.
   double scopes_[Scope::kNumberOfScopes];
 
@@ -2181,6 +2249,13 @@
   // Size of objects promoted during the current collection.
   intptr_t promoted_objects_size_;
 
+  // Incremental marking steps counters.
+  int steps_count_;
+  double steps_took_;
+  double longest_step_;
+  int steps_count_since_last_gc_;
+  double steps_took_since_last_gc_;
+
   Heap* heap_;
 };
 
@@ -2292,6 +2367,46 @@
 };
 
 
+// Intrusive object marking uses least significant bit of
+// heap object's map word to mark objects.
+// Normally all map words have least significant bit set
+// because they contain tagged map pointer.
+// If the bit is not set object is marked.
+// All objects should be unmarked before resuming
+// JavaScript execution.
+class IntrusiveMarking {
+ public:
+  static bool IsMarked(HeapObject* object) {
+    return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
+  }
+
+  static void ClearMark(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
+    ASSERT(!IsMarked(object));
+  }
+
+  static void SetMark(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
+    ASSERT(IsMarked(object));
+  }
+
+  static Map* MapOfMarkedObject(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
+  }
+
+  static int SizeOfMarkedObject(HeapObject* object) {
+    return object->SizeFromMap(MapOfMarkedObject(object));
+  }
+
+ private:
+  static const uintptr_t kNotMarkedBit = 0x1;
+  STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);
+};
+
+
 #if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
 // Helper class for tracing paths to a search target Object from all roots.
 // The TracePathFrom() method can be used to trace paths from a specific
@@ -2350,7 +2465,6 @@
 };
 #endif  // DEBUG || LIVE_OBJECT_LIST
 
-
 } }  // namespace v8::internal
 
 #undef HEAP
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 5630ce3..fd0c3bb 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -707,6 +707,14 @@
 }
 
 
+void HIsNilAndBranch::PrintDataTo(StringStream* stream) {
+  value()->PrintNameTo(stream);
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
 void HReturn::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
 }
@@ -777,15 +785,22 @@
   value()->PrintNameTo(stream);
   stream->Add(" == ");
   stream->Add(type_literal_->GetFlatContent().ToAsciiVector());
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
+void HTypeof::PrintDataTo(StringStream* stream) {
+  value()->PrintNameTo(stream);
 }
 
 
 void HChange::PrintDataTo(StringStream* stream) {
   HUnaryOperation::PrintDataTo(stream);
-  stream->Add(" %s to %s", from_.Mnemonic(), to().Mnemonic());
+  stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
 
   if (CanTruncateToInt32()) stream->Add(" truncating-int32");
   if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
+  if (CheckFlag(kDeoptimizeOnUndefined)) stream->Add(" deopt-on-undefined");
 }
 
 
@@ -857,6 +872,23 @@
 }
 
 
+const char* HCheckInstanceType::GetCheckName() {
+  switch (check_) {
+    case IS_SPEC_OBJECT: return "object";
+    case IS_JS_ARRAY: return "array";
+    case IS_STRING: return "string";
+    case IS_SYMBOL: return "symbol";
+  }
+  UNREACHABLE();
+  return "";
+}
+
+void HCheckInstanceType::PrintDataTo(StringStream* stream) {
+  stream->Add("%s ", GetCheckName());
+  HUnaryOperation::PrintDataTo(stream);
+}
+
+
 void HCallStub::PrintDataTo(StringStream* stream) {
   stream->Add("%s ",
               CodeStub::MajorName(major_key_, false));
@@ -1311,6 +1343,14 @@
 }
 
 
+void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
+  left()->PrintNameTo(stream);
+  stream->Add(" ");
+  right()->PrintNameTo(stream);
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
 void HGoto::PrintDataTo(StringStream* stream) {
   stream->Add("B%d", SuccessorAt(0)->block_id());
 }
@@ -1425,7 +1465,7 @@
 }
 
 
-bool HLoadKeyedFastElement::RequiresHoleCheck() const {
+bool HLoadKeyedFastElement::RequiresHoleCheck() {
   for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
     HValue* use = it.value();
     if (!use->IsChange()) return true;
@@ -1442,11 +1482,6 @@
 }
 
 
-bool HLoadKeyedFastDoubleElement::RequiresHoleCheck() const {
-  return true;
-}
-
-
 void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add("[");
@@ -1488,6 +1523,7 @@
       stream->Add("pixel");
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -1582,6 +1618,7 @@
     case EXTERNAL_PIXEL_ELEMENTS:
       stream->Add("pixel");
       break;
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
@@ -1598,7 +1635,18 @@
 
 void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p]", *cell());
-  if (check_hole_value()) stream->Add(" (deleteable/read-only)");
+  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
+  if (details_.IsReadOnly()) stream->Add(" (read-only)");
+}
+
+
+bool HLoadGlobalCell::RequiresHoleCheck() {
+  if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    if (!use->IsChange()) return true;
+  }
+  return false;
 }
 
 
@@ -1610,6 +1658,8 @@
 void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p] = ", *cell());
   value()->PrintNameTo(stream);
+  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
+  if (details_.IsReadOnly()) stream->Add(" (read-only)");
 }
 
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 1bc28ba..6b43f53 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -118,7 +118,7 @@
   V(InstanceOfKnownGlobal)                     \
   V(InvokeFunction)                            \
   V(IsConstructCallAndBranch)                  \
-  V(IsNullAndBranch)                           \
+  V(IsNilAndBranch)                            \
   V(IsObjectAndBranch)                         \
   V(IsSmiAndBranch)                            \
   V(IsUndetectableAndBranch)                   \
@@ -625,7 +625,7 @@
   void ComputeInitialRange();
 
   // Representation helpers.
-  virtual Representation RequiredInputRepresentation(int index) const = 0;
+  virtual Representation RequiredInputRepresentation(int index) = 0;
 
   virtual Representation InferredRepresentation() {
     return representation();
@@ -841,7 +841,7 @@
 
 class HBlockEntry: public HTemplateInstruction<0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -854,7 +854,7 @@
 // HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
 class HSoftDeoptimize: public HTemplateInstruction<0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -866,7 +866,7 @@
  public:
   explicit HDeoptimize(int environment_length) : values_(environment_length) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -908,10 +908,10 @@
 class HGoto: public HTemplateControlInstruction<1, 0> {
  public:
   explicit HGoto(HBasicBlock* target) {
-        SetSuccessorAt(0, target);
-      }
+    SetSuccessorAt(0, target);
+  }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -951,7 +951,7 @@
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -983,7 +983,7 @@
 
   Handle<Map> map() const { return map_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1000,7 +1000,7 @@
     SetOperandAt(0, value);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1014,7 +1014,7 @@
 
 class HAbnormalExit: public HTemplateControlInstruction<0, 0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1049,7 +1049,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1064,7 +1064,7 @@
  public:
   explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1083,7 +1083,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return representation();  // Same as the output representation.
   }
 
@@ -1094,27 +1094,27 @@
 class HChange: public HUnaryOperation {
  public:
   HChange(HValue* value,
-          Representation from,
           Representation to,
           bool is_truncating,
           bool deoptimize_on_undefined)
-      : HUnaryOperation(value),
-        from_(from),
-        deoptimize_on_undefined_(deoptimize_on_undefined) {
-    ASSERT(!from.IsNone() && !to.IsNone());
-    ASSERT(!from.Equals(to));
+      : HUnaryOperation(value) {
+    ASSERT(!value->representation().IsNone() && !to.IsNone());
+    ASSERT(!value->representation().Equals(to));
     set_representation(to);
     SetFlag(kUseGVN);
+    if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined);
     if (is_truncating) SetFlag(kTruncatingToInt32);
   }
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  Representation from() const { return from_; }
-  Representation to() const { return representation(); }
-  bool deoptimize_on_undefined() const { return deoptimize_on_undefined_; }
-  virtual Representation RequiredInputRepresentation(int index) const {
-    return from_;
+  Representation from() { return value()->representation(); }
+  Representation to() { return representation(); }
+  bool deoptimize_on_undefined() const {
+    return CheckFlag(kDeoptimizeOnUndefined);
+  }
+  virtual Representation RequiredInputRepresentation(int index) {
+    return from();
   }
 
   virtual Range* InferRange();
@@ -1124,16 +1124,7 @@
   DECLARE_CONCRETE_INSTRUCTION(Change)
 
  protected:
-  virtual bool DataEquals(HValue* other) {
-    if (!other->IsChange()) return false;
-    HChange* change = HChange::cast(other);
-    return to().Equals(change->to())
-        && deoptimize_on_undefined() == change->deoptimize_on_undefined();
-  }
-
- private:
-  Representation from_;
-  bool deoptimize_on_undefined_;
+  virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
@@ -1145,7 +1136,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1164,7 +1155,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1223,7 +1214,7 @@
   virtual int OperandCount() { return values_.length(); }
   virtual HValue* OperandAt(int index) { return values_[index]; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1268,7 +1259,7 @@
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1306,7 +1297,7 @@
   FunctionLiteral* function() const { return function_; }
   CallKind call_kind() const { return call_kind_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1323,7 +1314,7 @@
  public:
   HLeaveInlined() {}
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1337,7 +1328,7 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1354,7 +1345,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1372,7 +1363,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1392,7 +1383,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(OuterContext);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1410,7 +1401,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalObject)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1429,7 +1420,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(GlobalReceiver)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1465,7 +1456,7 @@
     SetOperandAt(0, value);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1485,7 +1476,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1500,7 +1491,7 @@
       : HBinaryCall(context, function, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1525,7 +1516,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1542,7 +1533,7 @@
       : HBinaryCall(context, key, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1566,7 +1557,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(CallNamed)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1583,7 +1574,7 @@
 
   HValue* context() { return value(); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1602,7 +1593,7 @@
   HValue* context() { return value(); }
   Handle<String> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1622,7 +1613,7 @@
 
   Handle<JSFunction> target() const { return target_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -1639,7 +1630,7 @@
       : HBinaryCall(context, constructor, argument_count) {
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1666,7 +1657,7 @@
   const Runtime::Function* function() const { return c_function_; }
   Handle<String> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1692,7 +1683,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1716,7 +1707,7 @@
     SetFlag(kDependsOnArrayLengths);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1735,7 +1726,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1754,7 +1745,7 @@
     SetFlag(kTruncatingToInt32);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Integer32();
   }
   virtual HType CalculateInferredType();
@@ -1804,7 +1795,7 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     if (index == 0) {
       return Representation::Tagged();
     } else {
@@ -1861,7 +1852,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1884,7 +1875,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -1908,7 +1899,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -1938,7 +1929,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -1978,7 +1969,9 @@
     return new HCheckInstanceType(value, IS_SYMBOL);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2008,6 +2001,8 @@
     LAST_INTERVAL_CHECK = IS_JS_ARRAY
   };
 
+  const char* GetCheckName();
+
   HCheckInstanceType(HValue* value, Check check)
       : HUnaryOperation(value), check_(check) {
     set_representation(Representation::Tagged());
@@ -2025,7 +2020,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2071,7 +2066,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2102,7 +2097,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual HType CalculateInferredType();
@@ -2151,7 +2146,7 @@
   }
 
   virtual Range* InferRange();
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return representation();
   }
   virtual HType CalculateInferredType();
@@ -2243,7 +2238,7 @@
     SetFlag(kIsArguments);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2259,7 +2254,20 @@
 
   bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  bool ImmortalImmovable() const {
+    Heap* heap = HEAP;
+    if (*handle_ == heap->undefined_value()) return true;
+    if (*handle_ == heap->null_value()) return true;
+    if (*handle_ == heap->true_value()) return true;
+    if (*handle_ == heap->false_value()) return true;
+    if (*handle_ == heap->the_hole_value()) return true;
+    if (*handle_ == heap->minus_zero_value()) return true;
+    if (*handle_ == heap->nan_value()) return true;
+    if (*handle_ == heap->empty_string()) return true;
+    return false;
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2367,7 +2375,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The length is untagged, all other inputs are tagged.
     return (index == 2)
         ? Representation::Integer32()
@@ -2394,7 +2402,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2410,7 +2418,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2433,7 +2441,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The arguments elements is considered tagged.
     return index == 0
         ? Representation::Tagged()
@@ -2459,7 +2467,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Integer32();
   }
 
@@ -2484,7 +2492,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
         ? Representation::Tagged()
         : representation();
@@ -2522,7 +2530,7 @@
   }
 
   virtual HType CalculateInferredType();
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
         ? Representation::Tagged()
         : representation();
@@ -2549,7 +2557,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2587,7 +2595,7 @@
     return input_representation_;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return input_representation_;
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -2610,7 +2618,9 @@
   HValue* left() { return OperandAt(0); }
   HValue* right() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2629,7 +2639,7 @@
   HValue* left() { return value(); }
   int right() const { return right_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Integer32();
   }
 
@@ -2641,21 +2651,25 @@
 };
 
 
-class HIsNullAndBranch: public HUnaryControlInstruction {
+class HIsNilAndBranch: public HUnaryControlInstruction {
  public:
-  HIsNullAndBranch(HValue* value, bool is_strict)
-      : HUnaryControlInstruction(value, NULL, NULL), is_strict_(is_strict) { }
+  HIsNilAndBranch(HValue* value, EqualityKind kind, NilValue nil)
+      : HUnaryControlInstruction(value, NULL, NULL), kind_(kind), nil_(nil) { }
 
-  bool is_strict() const { return is_strict_; }
+  EqualityKind kind() const { return kind_; }
+  NilValue nil() const { return nil_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
 
  private:
-  bool is_strict_;
+  EqualityKind kind_;
+  NilValue nil_;
 };
 
 
@@ -2664,7 +2678,7 @@
   explicit HIsObjectAndBranch(HValue* value)
     : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2679,7 +2693,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2693,7 +2707,7 @@
   explicit HIsUndetectableAndBranch(HValue* value)
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2703,7 +2717,7 @@
 
 class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -2725,7 +2739,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2742,7 +2756,7 @@
   explicit HHasCachedArrayIndexAndBranch(HValue* value)
       : HUnaryControlInstruction(value, NULL, NULL) { }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2757,7 +2771,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2776,7 +2790,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2800,7 +2814,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2817,7 +2831,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2845,7 +2859,7 @@
   HValue* left() { return OperandAt(1); }
   Handle<JSFunction> function() { return function_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -2870,7 +2884,7 @@
   HValue* left() { return OperandAt(0); }
   HValue* right() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
       ? Representation::Double()
       : Representation::None();
@@ -3099,7 +3113,7 @@
 
   int ast_id() const { return ast_id_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3120,7 +3134,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3152,7 +3166,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3168,7 +3182,7 @@
  public:
   HUnknownOSRValue() { set_representation(Representation::Tagged()); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3178,15 +3192,15 @@
 
 class HLoadGlobalCell: public HTemplateInstruction<0> {
  public:
-  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
-      : cell_(cell), check_hole_value_(check_hole_value) {
+  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
+      : cell_(cell), details_(details) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
     SetFlag(kDependsOnGlobalVars);
   }
 
   Handle<JSGlobalPropertyCell>  cell() const { return cell_; }
-  bool check_hole_value() const { return check_hole_value_; }
+  bool RequiresHoleCheck();
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -3195,7 +3209,7 @@
     return reinterpret_cast<intptr_t>(*cell_);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::None();
   }
 
@@ -3209,7 +3223,7 @@
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
-  bool check_hole_value_;
+  PropertyDetails details_;
 };
 
 
@@ -3234,7 +3248,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3250,17 +3264,19 @@
  public:
   HStoreGlobalCell(HValue* value,
                    Handle<JSGlobalPropertyCell> cell,
-                   bool check_hole_value)
+                   PropertyDetails details)
       : HUnaryOperation(value),
         cell_(cell),
-        check_hole_value_(check_hole_value) {
+        details_(details) {
     SetFlag(kChangesGlobalVars);
   }
 
   Handle<JSGlobalPropertyCell> cell() const { return cell_; }
-  bool check_hole_value() const { return check_hole_value_; }
+  bool RequiresHoleCheck() {
+    return !details_.IsDontDelete() || details_.IsReadOnly();
+  }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3269,7 +3285,7 @@
 
  private:
   Handle<JSGlobalPropertyCell> cell_;
-  bool check_hole_value_;
+  PropertyDetails details_;
 };
 
 
@@ -3297,7 +3313,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3320,7 +3336,7 @@
 
   int slot_index() const { return slot_index_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3342,7 +3358,7 @@
 static inline bool StoringValueNeedsWriteBarrier(HValue* value) {
   return !value->type().IsBoolean()
       && !value->type().IsSmi()
-      && !(value->IsConstant() && HConstant::cast(value)->InOldSpace());
+      && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
 }
 
 
@@ -3363,7 +3379,7 @@
     return StoringValueNeedsWriteBarrier(value());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3396,7 +3412,7 @@
   bool is_in_object() const { return is_in_object_; }
   int offset() const { return offset_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3428,7 +3444,7 @@
   Handle<String> name() { return name_; }
   bool need_generic() { return need_generic_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3463,7 +3479,7 @@
   HValue* object() { return OperandAt(1); }
   Handle<Object> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3487,7 +3503,7 @@
 
   HValue* function() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3511,7 +3527,7 @@
   HValue* object() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
     return index == 0
       ? Representation::Tagged()
@@ -3520,7 +3536,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  bool RequiresHoleCheck() const;
+  bool RequiresHoleCheck();
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
 
@@ -3542,7 +3558,7 @@
   HValue* elements() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
     return index == 0
       ? Representation::Tagged()
@@ -3551,8 +3567,6 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  bool RequiresHoleCheck() const;
-
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
 
  protected:
@@ -3582,7 +3596,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32, but the base pointer
     // for the element load is a naked pointer.
     return index == 0
@@ -3625,7 +3639,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3654,7 +3668,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
   virtual void PrintDataTo(StringStream* stream);
@@ -3703,7 +3717,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3717,14 +3731,16 @@
 
 class HStoreKeyedFastElement: public HTemplateInstruction<3> {
  public:
-  HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val) {
+  HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
+                         ElementsKind elements_kind = FAST_ELEMENTS)
+      : elements_kind_(elements_kind) {
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
     SetOperandAt(2, val);
     SetFlag(kChangesArrayElements);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The key is supposed to be Integer32.
     return index == 1
         ? Representation::Integer32()
@@ -3734,14 +3750,28 @@
   HValue* object() { return OperandAt(0); }
   HValue* key() { return OperandAt(1); }
   HValue* value() { return OperandAt(2); }
+  bool value_is_smi() {
+    return elements_kind_ == FAST_SMI_ONLY_ELEMENTS;
+  }
 
   bool NeedsWriteBarrier() {
-    return StoringValueNeedsWriteBarrier(value());
+    if (value_is_smi()) {
+      return false;
+    } else {
+      return StoringValueNeedsWriteBarrier(value());
+    }
+  }
+
+  bool ValueNeedsSmiCheck() {
+    return value_is_smi();
   }
 
   virtual void PrintDataTo(StringStream* stream);
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
+
+ private:
+  ElementsKind elements_kind_;
 };
 
 
@@ -3756,7 +3786,7 @@
     SetFlag(kChangesDoubleArrayElements);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     if (index == 1) {
       return Representation::Integer32();
     } else if (index == 2) {
@@ -3795,7 +3825,7 @@
 
   virtual void PrintDataTo(StringStream* stream);
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     if (index == 0) {
       return Representation::External();
     } else {
@@ -3843,7 +3873,7 @@
   HValue* context() { return OperandAt(3); }
   bool strict_mode() { return strict_mode_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3865,7 +3895,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -3891,7 +3921,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     // The index is supposed to be Integer32.
     return index == 2
         ? Representation::Integer32()
@@ -3922,7 +3952,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return index == 0
         ? Representation::Tagged()
         : Representation::Integer32();
@@ -3945,7 +3975,7 @@
     SetFlag(kDependsOnMaps);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4001,7 +4031,7 @@
 
   bool IsCopyOnWrite() const;
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4035,7 +4065,7 @@
   bool fast_elements() const { return fast_elements_; }
   bool has_function() const { return has_function_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4064,7 +4094,7 @@
   Handle<String> pattern() { return pattern_; }
   Handle<String> flags() { return flags_; }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4088,7 +4118,7 @@
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4114,7 +4144,9 @@
   HValue* context() { return OperandAt(0); }
   HValue* value() { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4132,7 +4164,7 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4146,7 +4178,7 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4162,7 +4194,7 @@
     SetAllSideEffects();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
@@ -4189,7 +4221,7 @@
   HValue* key() { return OperandAt(1); }
   HValue* object() { return OperandAt(2); }
 
-  virtual Representation RequiredInputRepresentation(int index) const {
+  virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index cca168a..64a6ae5 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -422,7 +422,7 @@
 };
 
 
-void HGraph::Verify() const {
+void HGraph::Verify(bool do_full_verify) const {
   for (int i = 0; i < blocks_.length(); i++) {
     HBasicBlock* block = blocks_.at(i);
 
@@ -473,25 +473,27 @@
   // Check special property of first block to have no predecessors.
   ASSERT(blocks_.at(0)->predecessors()->is_empty());
 
-  // Check that the graph is fully connected.
-  ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
-  ASSERT(analyzer.visited_count() == blocks_.length());
+  if (do_full_verify) {
+    // Check that the graph is fully connected.
+    ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
+    ASSERT(analyzer.visited_count() == blocks_.length());
 
-  // Check that entry block dominator is NULL.
-  ASSERT(entry_block_->dominator() == NULL);
+    // Check that entry block dominator is NULL.
+    ASSERT(entry_block_->dominator() == NULL);
 
-  // Check dominators.
-  for (int i = 0; i < blocks_.length(); ++i) {
-    HBasicBlock* block = blocks_.at(i);
-    if (block->dominator() == NULL) {
-      // Only start block may have no dominator assigned to.
-      ASSERT(i == 0);
-    } else {
-      // Assert that block is unreachable if dominator must not be visited.
-      ReachabilityAnalyzer dominator_analyzer(entry_block_,
-                                              blocks_.length(),
-                                              block->dominator());
-      ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+    // Check dominators.
+    for (int i = 0; i < blocks_.length(); ++i) {
+      HBasicBlock* block = blocks_.at(i);
+      if (block->dominator() == NULL) {
+        // Only start block may have no dominator assigned to.
+        ASSERT(i == 0);
+      } else {
+        // Assert that block is unreachable if dominator must not be visited.
+        ReachabilityAnalyzer dominator_analyzer(entry_block_,
+                                                blocks_.length(),
+                                                block->dominator());
+        ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+      }
     }
   }
 }
@@ -850,7 +852,7 @@
 }
 
 
-bool HGraph::CheckPhis() {
+bool HGraph::CheckArgumentsPhiUses() {
   int block_count = blocks_.length();
   for (int i = 0; i < block_count; ++i) {
     for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
@@ -863,13 +865,11 @@
 }
 
 
-bool HGraph::CollectPhis() {
+bool HGraph::CheckConstPhiUses() {
   int block_count = blocks_.length();
-  phi_list_ = new ZoneList<HPhi*>(block_count);
   for (int i = 0; i < block_count; ++i) {
     for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
       HPhi* phi = blocks_[i]->phis()->at(j);
-      phi_list_->Add(phi);
       // Check for the hole value (from an uninitialized const).
       for (int k = 0; k < phi->OperandCount(); k++) {
         if (phi->OperandAt(k) == GetConstantHole()) return false;
@@ -880,6 +880,18 @@
 }
 
 
+void HGraph::CollectPhis() {
+  int block_count = blocks_.length();
+  phi_list_ = new ZoneList<HPhi*>(block_count);
+  for (int i = 0; i < block_count; ++i) {
+    for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+      HPhi* phi = blocks_[i]->phis()->at(j);
+      phi_list_->Add(phi);
+    }
+  }
+}
+
+
 void HGraph::InferTypes(ZoneList<HValue*>* worklist) {
   BitVector in_worklist(GetMaximumValueID());
   for (int i = 0; i < worklist->length(); ++i) {
@@ -1848,7 +1860,7 @@
   }
 
   if (new_value == NULL) {
-    new_value = new(zone()) HChange(value, value->representation(), to,
+    new_value = new(zone()) HChange(value, to,
                                     is_truncating, deoptimize_on_undefined);
   }
 
@@ -2320,17 +2332,24 @@
 
   graph()->OrderBlocks();
   graph()->AssignDominators();
+
+#ifdef DEBUG
+  // Do a full verify after building the graph and computing dominators.
+  graph()->Verify(true);
+#endif
+
   graph()->PropagateDeoptimizingMark();
+  if (!graph()->CheckConstPhiUses()) {
+    Bailout("Unsupported phi use of const variable");
+    return NULL;
+  }
   graph()->EliminateRedundantPhis();
-  if (!graph()->CheckPhis()) {
-    Bailout("Unsupported phi use of arguments object");
+  if (!graph()->CheckArgumentsPhiUses()) {
+    Bailout("Unsupported phi use of arguments");
     return NULL;
   }
   if (FLAG_eliminate_dead_phis) graph()->EliminateUnreachablePhis();
-  if (!graph()->CollectPhis()) {
-    Bailout("Unsupported phi use of uninitialized constant");
-    return NULL;
-  }
+  graph()->CollectPhis();
 
   HInferRepresentation rep(graph());
   rep.Analyze();
@@ -3127,6 +3146,16 @@
   }
   switch (variable->location()) {
     case Variable::UNALLOCATED: {
+      // Handle known global constants like 'undefined' specially to avoid a
+      // load from a global cell for them.
+      Handle<Object> constant_value =
+          isolate()->factory()->GlobalConstantFor(variable->name());
+      if (!constant_value.is_null()) {
+        HConstant* instr =
+            new(zone()) HConstant(constant_value, Representation::Tagged());
+        return ast_context()->ReturnInstruction(instr, expr->id());
+      }
+
       LookupResult lookup;
       GlobalPropertyAccess type =
           LookupGlobalProperty(variable, &lookup, false);
@@ -3139,8 +3168,8 @@
       if (type == kUseCell) {
         Handle<GlobalObject> global(info()->global_object());
         Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-        bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
-        HLoadGlobalCell* instr = new(zone()) HLoadGlobalCell(cell, check_hole);
+        HLoadGlobalCell* instr =
+            new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
         return ast_context()->ReturnInstruction(instr, expr->id());
       } else {
         HValue* context = environment()->LookupContext();
@@ -3317,7 +3346,47 @@
     HValue* key = AddInstruction(
         new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
                               Representation::Integer32()));
-    AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
+    if (FLAG_smi_only_arrays) {
+      HInstruction* elements_kind =
+          AddInstruction(new(zone()) HElementsKind(literal));
+      HBasicBlock* store_fast = graph()->CreateBasicBlock();
+      // Two empty blocks to satisfy edge split form.
+      HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
+      HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
+      HBasicBlock* store_generic = graph()->CreateBasicBlock();
+      HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
+      HBasicBlock* join = graph()->CreateBasicBlock();
+
+      HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
+      smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
+      smicheck->SetSuccessorAt(1, check_smi_only_elements);
+      current_block()->Finish(smicheck);
+      store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
+
+      set_current_block(check_smi_only_elements);
+      HCompareConstantEqAndBranch* smi_elements_check =
+          new(zone()) HCompareConstantEqAndBranch(elements_kind,
+                                                  FAST_SMI_ONLY_ELEMENTS,
+                                                  Token::EQ_STRICT);
+      smi_elements_check->SetSuccessorAt(0, store_generic);
+      smi_elements_check->SetSuccessorAt(1, store_fast_edgesplit2);
+      current_block()->Finish(smi_elements_check);
+      store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
+
+      set_current_block(store_fast);
+      AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
+      store_fast->Goto(join);
+
+      set_current_block(store_generic);
+      AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
+      store_generic->Goto(join);
+
+      join->SetJoinId(expr->id());
+      set_current_block(join);
+    } else {
+      AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
+    }
+
     AddSimulate(expr->GetIdForElement(i));
   }
   return ast_context()->ReturnValue(Pop());
@@ -3561,10 +3630,10 @@
   LookupResult lookup;
   GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
   if (type == kUseCell) {
-    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
     Handle<GlobalObject> global(info()->global_object());
     Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-    HInstruction* instr = new(zone()) HStoreGlobalCell(value, cell, check_hole);
+    HInstruction* instr =
+        new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
     instr->set_position(position);
     AddInstruction(instr);
     if (instr->HasSideEffects()) AddSimulate(ast_id);
@@ -3928,6 +3997,7 @@
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
         break;
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -3944,6 +4014,30 @@
 }
 
 
+HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
+                                                    HValue* checked_key,
+                                                    HValue* val,
+                                                    ElementsKind elements_kind,
+                                                    bool is_store) {
+  if (is_store) {
+    ASSERT(val != NULL);
+    if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+      return new(zone()) HStoreKeyedFastDoubleElement(
+          elements, checked_key, val);
+    } else {  // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
+      return new(zone()) HStoreKeyedFastElement(
+          elements, checked_key, val, elements_kind);
+    }
+  }
+  // It's an element load (!is_store).
+  if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+    return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
+  } else {  // FAST_ELEMENTS or FAST_SMI_ONLY_ELEMENTS.
+    return new(zone()) HLoadKeyedFastElement(elements, checked_key);
+  }
+}
+
+
 HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
                                                            HValue* key,
                                                            HValue* val,
@@ -3951,17 +4045,20 @@
                                                            bool is_store) {
   ASSERT(expr->IsMonomorphic());
   Handle<Map> map = expr->GetMonomorphicReceiverType();
-  if (!map->has_fast_elements() &&
-      !map->has_fast_double_elements() &&
+  AddInstruction(new(zone()) HCheckNonSmi(object));
+  HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
+  bool fast_smi_only_elements = map->has_fast_smi_only_elements();
+  bool fast_elements = map->has_fast_elements();
+  bool fast_double_elements = map->has_fast_double_elements();
+  if (!fast_smi_only_elements &&
+      !fast_elements &&
+      !fast_double_elements &&
       !map->has_external_array_elements()) {
     return is_store ? BuildStoreKeyedGeneric(object, key, val)
                     : BuildLoadKeyedGeneric(object, key);
   }
-  AddInstruction(new(zone()) HCheckNonSmi(object));
-  HInstruction* mapcheck = AddInstruction(new(zone()) HCheckMap(object, map));
   HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object));
-  bool fast_double_elements = map->has_fast_double_elements();
-  if (is_store && map->has_fast_elements()) {
+  if (is_store && (fast_elements || fast_smi_only_elements)) {
     AddInstruction(new(zone()) HCheckMap(
         elements, isolate()->factory()->fixed_array_map()));
   }
@@ -3976,28 +4073,15 @@
     return BuildExternalArrayElementAccess(external_elements, checked_key,
                                            val, map->elements_kind(), is_store);
   }
-  ASSERT(map->has_fast_elements() || fast_double_elements);
+  ASSERT(fast_smi_only_elements || fast_elements || fast_double_elements);
   if (map->instance_type() == JS_ARRAY_TYPE) {
     length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck));
   } else {
     length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
   }
   checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-  if (is_store) {
-    if (fast_double_elements) {
-      return new(zone()) HStoreKeyedFastDoubleElement(elements,
-                                                      checked_key,
-                                                      val);
-    } else {
-      return new(zone()) HStoreKeyedFastElement(elements, checked_key, val);
-    }
-  } else {
-    if (fast_double_elements) {
-      return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key);
-    } else {
-      return new(zone()) HLoadKeyedFastElement(elements, checked_key);
-    }
-  }
+  return BuildFastElementAccess(elements, checked_key, val,
+                                map->elements_kind(), is_store);
 }
 
 
@@ -4039,14 +4123,20 @@
   HLoadExternalArrayPointer* external_elements = NULL;
   HInstruction* checked_key = NULL;
 
-  // FAST_ELEMENTS is assumed to be the first case.
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  // Generated code assumes that FAST_SMI_ONLY_ELEMENTS, FAST_ELEMENTS,
+  // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS are handled before external
+  // arrays.
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+  STATIC_ASSERT(FAST_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+  STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+  STATIC_ASSERT(DICTIONARY_ELEMENTS < FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND);
 
-  for (ElementsKind elements_kind = FAST_ELEMENTS;
+  for (ElementsKind elements_kind = FIRST_ELEMENTS_KIND;
        elements_kind <= LAST_ELEMENTS_KIND;
        elements_kind = ElementsKind(elements_kind + 1)) {
-    // After having handled FAST_ELEMENTS and DICTIONARY_ELEMENTS, we
-    // need to add some code that's executed for all external array cases.
+    // After having handled FAST_ELEMENTS, FAST_SMI_ONLY_ELEMENTS,
+    // FAST_DOUBLE_ELEMENTS and DICTIONARY_ELEMENTS, we need to add some code
+    // that's executed for all external array cases.
     STATIC_ASSERT(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND ==
                   LAST_ELEMENTS_KIND);
     if (elements_kind == FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND
@@ -4068,15 +4158,25 @@
 
       set_current_block(if_true);
       HInstruction* access;
-      if (elements_kind == FAST_ELEMENTS ||
+      if (elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+          elements_kind == FAST_ELEMENTS ||
           elements_kind == FAST_DOUBLE_ELEMENTS) {
-        bool fast_double_elements =
-            elements_kind == FAST_DOUBLE_ELEMENTS;
-        if (is_store && elements_kind == FAST_ELEMENTS) {
+        if (is_store && elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+          AddInstruction(new(zone()) HCheckSmi(val));
+        }
+        if (is_store && elements_kind != FAST_DOUBLE_ELEMENTS) {
           AddInstruction(new(zone()) HCheckMap(
               elements, isolate()->factory()->fixed_array_map(),
               elements_kind_branch));
         }
+        // TODO(jkummerow): The need for these two blocks could be avoided
+        // in one of two ways:
+        // (1) Introduce ElementsKinds for JSArrays that are distinct from
+        //     those for fast objects.
+        // (2) Put the common instructions into a third "join" block. This
+        //     requires additional AST IDs that we can deopt to from inside
+        //     that join block. They must be added to the Property class (when
+        //     it's a keyed property) and registered in the full codegen.
         HBasicBlock* if_jsarray = graph()->CreateBasicBlock();
         HBasicBlock* if_fastobject = graph()->CreateBasicBlock();
         HHasInstanceTypeAndBranch* typecheck =
@@ -4086,29 +4186,15 @@
         current_block()->Finish(typecheck);
 
         set_current_block(if_jsarray);
-        HInstruction* length = new(zone()) HJSArrayLength(object, typecheck);
-        AddInstruction(length);
+        HInstruction* length;
+        length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck));
         checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-        if (is_store) {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastDoubleElement(elements,
-                                                         checked_key,
-                                                         val));
-          } else {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
-          }
-        } else {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
-          } else {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastElement(elements, checked_key));
-          }
+        access = AddInstruction(BuildFastElementAccess(
+            elements, checked_key, val, elements_kind, is_store));
+        if (!is_store) {
           Push(access);
         }
+
         *has_side_effects |= access->HasSideEffects();
         if (position != -1) {
           access->set_position(position);
@@ -4118,25 +4204,8 @@
         set_current_block(if_fastobject);
         length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
         checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
-        if (is_store) {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastDoubleElement(elements,
-                                                         checked_key,
-                                                         val));
-          } else {
-            access = AddInstruction(
-                new(zone()) HStoreKeyedFastElement(elements, checked_key, val));
-          }
-        } else {
-          if (fast_double_elements) {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key));
-          } else {
-            access = AddInstruction(
-                new(zone()) HLoadKeyedFastElement(elements, checked_key));
-          }
-        }
+        access = AddInstruction(BuildFastElementAccess(
+            elements, checked_key, val, elements_kind, is_store));
       } else if (elements_kind == DICTIONARY_ELEMENTS) {
         if (is_store) {
           access = AddInstruction(BuildStoreKeyedGeneric(object, key, val));
@@ -4474,20 +4543,25 @@
     return false;
   }
 
-  // No context change required.
   CompilationInfo* outer_info = info();
+#if !defined(V8_TARGET_ARCH_IA32)
+  // Target must be able to use caller's context.
   if (target->context() != outer_info->closure()->context() ||
       outer_info->scope()->contains_with() ||
       outer_info->scope()->num_heap_slots() > 0) {
     TraceInline(target, caller, "target requires context change");
     return false;
   }
+#endif
+
 
   // Don't inline deeper than kMaxInliningLevels calls.
   HEnvironment* env = environment();
   int current_level = 1;
   while (env->outer() != NULL) {
-    if (current_level == Compiler::kMaxInliningLevels) {
+    if (current_level == (FLAG_limit_inlining
+                          ? Compiler::kMaxInliningLevels
+                          : 2 * Compiler::kMaxInliningLevels)) {
       TraceInline(target, caller, "inline depth limit reached");
       return false;
     }
@@ -4602,6 +4676,17 @@
                                      function,
                                      undefined,
                                      call_kind);
+#ifdef V8_TARGET_ARCH_IA32
+  // IA32 only, overwrite the caller's context in the deoptimization
+  // environment with the correct one.
+  //
+  // TODO(kmillikin): implement the same inlining on other platforms so we
+  // can remove the unsightly ifdefs in this function.
+  HConstant* context = new HConstant(Handle<Context>(target->context()),
+                                     Representation::Tagged());
+  AddInstruction(context);
+  inner_env->BindContext(context);
+#endif
   HBasicBlock* body_entry = CreateBasicBlock(inner_env);
   current_block()->Goto(body_entry);
   body_entry->SetJoinId(expr->ReturnId());
@@ -4922,8 +5007,8 @@
     }
 
   } else {
+    expr->RecordTypeFeedback(oracle(), CALL_AS_FUNCTION);
     VariableProxy* proxy = expr->expression()->AsVariableProxy();
-    // FIXME.
     bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
 
     if (global_call) {
@@ -4975,6 +5060,46 @@
         Drop(argument_count);
       }
 
+    } else if (expr->IsMonomorphic()) {
+      // The function is on the stack in the unoptimized code during
+      // evaluation of the arguments.
+      CHECK_ALIVE(VisitForValue(expr->expression()));
+      HValue* function = Top();
+      HValue* context = environment()->LookupContext();
+      HGlobalObject* global = new(zone()) HGlobalObject(context);
+      HGlobalReceiver* receiver = new(zone()) HGlobalReceiver(global);
+      AddInstruction(global);
+      PushAndAdd(receiver);
+      CHECK_ALIVE(VisitExpressions(expr->arguments()));
+      AddInstruction(new(zone()) HCheckFunction(function, expr->target()));
+      if (TryInline(expr)) {
+        // The function is lingering in the deoptimization environment.
+        // Handle it by case analysis on the AST context.
+        if (ast_context()->IsEffect()) {
+          Drop(1);
+        } else if (ast_context()->IsValue()) {
+          HValue* result = Pop();
+          Drop(1);
+          Push(result);
+        } else if (ast_context()->IsTest()) {
+          TestContext* context = TestContext::cast(ast_context());
+          if (context->if_true()->HasPredecessor()) {
+            context->if_true()->last_environment()->Drop(1);
+          }
+          if (context->if_false()->HasPredecessor()) {
+            context->if_true()->last_environment()->Drop(1);
+          }
+        } else {
+          UNREACHABLE();
+        }
+        return;
+      } else {
+        call = PreProcessCall(new(zone()) HInvokeFunction(context,
+                                                          function,
+                                                          argument_count));
+        Drop(1);  // The function.
+      }
+
     } else {
       CHECK_ALIVE(VisitArgument(expr->expression()));
       HValue* context = environment()->LookupContext();
@@ -5668,26 +5793,36 @@
 }
 
 
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* compare_expr,
-                                               Expression* expr,
+void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+                                               Expression* sub_expr,
                                                Handle<String> check) {
-  CHECK_ALIVE(VisitForTypeOf(expr));
-  HValue* expr_value = Pop();
-  HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(expr_value, check);
-  instr->set_position(compare_expr->position());
-  return ast_context()->ReturnControl(instr, compare_expr->id());
+  CHECK_ALIVE(VisitForTypeOf(sub_expr));
+  HValue* value = Pop();
+  HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
+  instr->set_position(expr->position());
+  return ast_context()->ReturnControl(instr, expr->id());
 }
 
 
-void HGraphBuilder::HandleLiteralCompareUndefined(
-    CompareOperation* compare_expr, Expression* expr) {
-  CHECK_ALIVE(VisitForValue(expr));
-  HValue* lhs = Pop();
-  HValue* rhs = graph()->GetConstantUndefined();
-  HCompareObjectEqAndBranch* instr =
-      new(zone()) HCompareObjectEqAndBranch(lhs, rhs);
-  instr->set_position(compare_expr->position());
-  return ast_context()->ReturnControl(instr, compare_expr->id());
+bool HGraphBuilder::TryLiteralCompare(CompareOperation* expr) {
+  Expression *sub_expr;
+  Handle<String> check;
+  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+    HandleLiteralCompareTypeof(expr, sub_expr, check);
+    return true;
+  }
+
+  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
+    HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+    return true;
+  }
+
+  if (expr->IsLiteralCompareNull(&sub_expr)) {
+    HandleLiteralCompareNil(expr, sub_expr, kNullValue);
+    return true;
+  }
+
+  return false;
 }
 
 
@@ -5709,17 +5844,7 @@
   }
 
   // Check for special cases that compare against literals.
-  Expression *sub_expr;
-  Handle<String> check;
-  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
-    HandleLiteralCompareTypeof(expr, sub_expr, check);
-    return;
-  }
-
-  if (expr->IsLiteralCompareUndefined(&sub_expr)) {
-    HandleLiteralCompareUndefined(expr, sub_expr);
-    return;
-  }
+  if (TryLiteralCompare(expr)) return;
 
   TypeInfo type_info = oracle()->CompareType(expr);
   // Check if this expression was ever executed according to type feedback.
@@ -5824,14 +5949,18 @@
 }
 
 
-void HGraphBuilder::VisitCompareToNull(CompareToNull* expr) {
+void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+                                            Expression* sub_expr,
+                                            NilValue nil) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  CHECK_ALIVE(VisitForValue(expr->expression()));
+  CHECK_ALIVE(VisitForValue(sub_expr));
   HValue* value = Pop();
-  HIsNullAndBranch* instr =
-      new(zone()) HIsNullAndBranch(value, expr->is_strict());
+  EqualityKind kind =
+      expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
+  HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil);
+  instr->set_position(expr->position());
   return ast_context()->ReturnControl(instr, expr->id());
 }
 
@@ -5914,9 +6043,7 @@
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceTypeAndBranch* result =
-      new(zone()) HHasInstanceTypeAndBranch(value,
-                                            JS_FUNCTION_TYPE,
-                                            JS_FUNCTION_PROXY_TYPE);
+      new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE);
   return ast_context()->ReturnControl(result, call->id());
 }
 
@@ -6816,7 +6943,7 @@
   }
 
 #ifdef DEBUG
-  if (graph_ != NULL) graph_->Verify();
+  if (graph_ != NULL) graph_->Verify(false);  // No full verify.
   if (allocator_ != NULL) allocator_->Verify();
 #endif
 }
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 03fbc73..8b507c2 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -243,11 +243,13 @@
 
   // Returns false if there are phi-uses of the arguments-object
   // which are not supported by the optimizing compiler.
-  bool CheckPhis();
+  bool CheckArgumentsPhiUses();
 
-  // Returns false if there are phi-uses of hole values comming
-  // from uninitialized consts.
-  bool CollectPhis();
+  // Returns false if there are phi-uses of an uninitialized const
+  // which are not supported by the optimizing compiler.
+  bool CheckConstPhiUses();
+
+  void CollectPhis();
 
   Handle<Code> Compile(CompilationInfo* info);
 
@@ -283,7 +285,7 @@
   }
 
 #ifdef DEBUG
-  void Verify() const;
+  void Verify(bool do_full_verify) const;
 #endif
 
  private:
@@ -910,11 +912,13 @@
                                   HValue* receiver,
                                   SmallMapList* types,
                                   Handle<String> name);
-  void HandleLiteralCompareTypeof(CompareOperation* compare_expr,
-                                  Expression* expr,
+  bool TryLiteralCompare(CompareOperation* expr);
+  void HandleLiteralCompareTypeof(CompareOperation* expr,
+                                  Expression* sub_expr,
                                   Handle<String> check);
-  void HandleLiteralCompareUndefined(CompareOperation* compare_expr,
-                                     Expression* expr);
+  void HandleLiteralCompareNil(CompareOperation* expr,
+                               Expression* sub_expr,
+                               NilValue nil);
 
   HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
                                            HValue* string,
@@ -938,6 +942,11 @@
       HValue* val,
       ElementsKind elements_kind,
       bool is_store);
+  HInstruction* BuildFastElementAccess(HValue* elements,
+                                       HValue* checked_key,
+                                       HValue* val,
+                                       ElementsKind elements_kind,
+                                       bool is_store);
 
   HInstruction* BuildMonomorphicElementAccess(HValue* object,
                                               HValue* key,
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 0ca2d6b..ed277e5 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -89,8 +89,13 @@
 
 
 void RelocInfo::set_target_address(Address target) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  if (host() != NULL && IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -116,6 +121,10 @@
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Memory::Object_at(pc_) = target;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (host() != NULL && target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -147,6 +156,12 @@
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -161,6 +176,11 @@
   ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Assembler::set_target_address_at(pc_ + 1, target);
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -194,7 +214,7 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitPointer(target_object_address());
+    visitor->VisitEmbeddedPointer(host(), target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
@@ -222,7 +242,7 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, host(), target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 9996474..66a9884 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -55,6 +55,8 @@
 uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
 
 
+// The Probe method needs executable memory, so it uses Heap::CreateCode.
+// Allocation failure is silent and leads to safe default.
 void CpuFeatures::Probe() {
   ASSERT(!initialized_);
   ASSERT(supported_ == 0);
@@ -86,23 +88,23 @@
   __ pushfd();
   __ push(ecx);
   __ push(ebx);
-  __ mov(ebp, Operand(esp));
+  __ mov(ebp, esp);
 
   // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
   __ pushfd();
   __ pop(eax);
-  __ mov(edx, Operand(eax));
+  __ mov(edx, eax);
   __ xor_(eax, 0x200000);  // Flip bit 21.
   __ push(eax);
   __ popfd();
   __ pushfd();
   __ pop(eax);
-  __ xor_(eax, Operand(edx));  // Different if CPUID is supported.
+  __ xor_(eax, edx);  // Different if CPUID is supported.
   __ j(not_zero, &cpuid);
 
   // CPUID not supported. Clear the supported features in edx:eax.
-  __ xor_(eax, Operand(eax));
-  __ xor_(edx, Operand(edx));
+  __ xor_(eax, eax);
+  __ xor_(edx, edx);
   __ jmp(&done);
 
   // Invoke CPUID with 1 in eax to get feature information in
@@ -118,13 +120,13 @@
 
   // Move the result from ecx:edx to edx:eax and make sure to mark the
   // CPUID feature as supported.
-  __ mov(eax, Operand(edx));
+  __ mov(eax, edx);
   __ or_(eax, 1 << CPUID);
-  __ mov(edx, Operand(ecx));
+  __ mov(edx, ecx);
 
   // Done.
   __ bind(&done);
-  __ mov(esp, Operand(ebp));
+  __ mov(esp, ebp);
   __ pop(ebx);
   __ pop(ecx);
   __ popfd();
@@ -286,6 +288,18 @@
       && ((buf_[0] & 0x07) == reg.code());  // register codes match.
 }
 
+
+bool Operand::is_reg_only() const {
+  return (buf_[0] & 0xF8) == 0xC0;  // Addressing mode is register only.
+}
+
+
+Register Operand::reg() const {
+  ASSERT(is_reg_only());
+  return Register::from_code(buf_[0] & 0x07);
+}
+
+
 // -----------------------------------------------------------------------------
 // Implementation of Assembler.
 
@@ -701,6 +715,13 @@
 }
 
 
+void Assembler::add(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x01);
+  emit_operand(src, dst);
+}
+
+
 void Assembler::add(const Operand& dst, const Immediate& x) {
   ASSERT(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
@@ -741,25 +762,29 @@
 
 void Assembler::cmpb(const Operand& op, int8_t imm8) {
   EnsureSpace ensure_space(this);
-  EMIT(0x80);
-  emit_operand(edi, op);  // edi == 7
+  if (op.is_reg(eax)) {
+    EMIT(0x3C);
+  } else {
+    EMIT(0x80);
+    emit_operand(edi, op);  // edi == 7
+  }
   EMIT(imm8);
 }
 
 
-void Assembler::cmpb(const Operand& dst, Register src) {
-  ASSERT(src.is_byte_register());
+void Assembler::cmpb(const Operand& op, Register reg) {
+  ASSERT(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x38);
-  emit_operand(src, dst);
+  emit_operand(reg, op);
 }
 
 
-void Assembler::cmpb(Register dst, const Operand& src) {
-  ASSERT(dst.is_byte_register());
+void Assembler::cmpb(Register reg, const Operand& op) {
+  ASSERT(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x3A);
-  emit_operand(dst, src);
+  emit_operand(reg, op);
 }
 
 
@@ -1069,18 +1094,6 @@
 }
 
 
-void Assembler::subb(const Operand& op, int8_t imm8) {
-  EnsureSpace ensure_space(this);
-  if (op.is_reg(eax)) {
-    EMIT(0x2c);
-  } else {
-    EMIT(0x80);
-    emit_operand(ebp, op);  // ebp == 5
-  }
-  EMIT(imm8);
-}
-
-
 void Assembler::sub(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
   emit_arith(5, dst, x);
@@ -1094,14 +1107,6 @@
 }
 
 
-void Assembler::subb(Register dst, const Operand& src) {
-  ASSERT(dst.code() < 4);
-  EnsureSpace ensure_space(this);
-  EMIT(0x2A);
-  emit_operand(dst, src);
-}
-
-
 void Assembler::sub(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   EMIT(0x29);
@@ -1158,6 +1163,10 @@
 
 
 void Assembler::test_b(const Operand& op, uint8_t imm8) {
+  if (op.is_reg_only() && op.reg().code() >= 4) {
+    test(op, Immediate(imm8));
+    return;
+  }
   EnsureSpace ensure_space(this);
   EMIT(0xF6);
   emit_operand(eax, op);
@@ -1178,10 +1187,10 @@
 }
 
 
-void Assembler::xor_(const Operand& src, Register dst) {
+void Assembler::xor_(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   EMIT(0x31);
-  emit_operand(dst, src);
+  emit_operand(src, dst);
 }
 
 
@@ -2471,7 +2480,7 @@
       return;
     }
   }
-  RelocInfo rinfo(pc_, rmode, data);
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   reloc_info_writer.Write(&rinfo);
 }
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 4698e3e..4dfde5f 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -75,6 +75,8 @@
   static inline Register FromAllocationIndex(int index);
 
   static Register from_code(int code) {
+    ASSERT(code >= 0);
+    ASSERT(code < kNumRegisters);
     Register r = { code };
     return r;
   }
@@ -300,9 +302,6 @@
 
 class Operand BASE_EMBEDDED {
  public:
-  // reg
-  INLINE(explicit Operand(Register reg));
-
   // XMM reg
   INLINE(explicit Operand(XMMRegister xmm_reg));
 
@@ -347,12 +346,16 @@
   // Returns true if this Operand is a wrapper for the specified register.
   bool is_reg(Register reg) const;
 
+  // Returns true if this Operand is a wrapper for one register.
+  bool is_reg_only() const;
+
+  // Asserts that this Operand is a wrapper for one register and returns the
+  // register.
+  Register reg() const;
+
  private:
-  byte buf_[6];
-  // The number of bytes in buf_.
-  unsigned int len_;
-  // Only valid if len_ > 4.
-  RelocInfo::Mode rmode_;
+  // reg
+  INLINE(explicit Operand(Register reg));
 
   // Set the ModRM byte without an encoded 'reg' register. The
   // register is encoded later as part of the emit_operand operation.
@@ -362,7 +365,15 @@
   inline void set_disp8(int8_t disp);
   inline void set_dispr(int32_t disp, RelocInfo::Mode rmode);
 
+  byte buf_[6];
+  // The number of bytes in buf_.
+  unsigned int len_;
+  // Only valid if len_ > 4.
+  RelocInfo::Mode rmode_;
+
   friend class Assembler;
+  friend class MacroAssembler;
+  friend class LCodeGen;
 };
 
 
@@ -671,7 +682,9 @@
   void leave();
 
   // Moves
+  void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
   void mov_b(Register dst, const Operand& src);
+  void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
   void mov_b(const Operand& dst, int8_t imm8);
   void mov_b(const Operand& dst, Register src);
 
@@ -687,17 +700,24 @@
   void mov(const Operand& dst, Handle<Object> handle);
   void mov(const Operand& dst, Register src);
 
+  void movsx_b(Register dst, Register src) { movsx_b(dst, Operand(src)); }
   void movsx_b(Register dst, const Operand& src);
 
+  void movsx_w(Register dst, Register src) { movsx_w(dst, Operand(src)); }
   void movsx_w(Register dst, const Operand& src);
 
+  void movzx_b(Register dst, Register src) { movzx_b(dst, Operand(src)); }
   void movzx_b(Register dst, const Operand& src);
 
+  void movzx_w(Register dst, Register src) { movzx_w(dst, Operand(src)); }
   void movzx_w(Register dst, const Operand& src);
 
   // Conditional moves
   void cmov(Condition cc, Register dst, int32_t imm32);
   void cmov(Condition cc, Register dst, Handle<Object> handle);
+  void cmov(Condition cc, Register dst, Register src) {
+    cmov(cc, dst, Operand(src));
+  }
   void cmov(Condition cc, Register dst, const Operand& src);
 
   // Flag management.
@@ -715,24 +735,31 @@
   void adc(Register dst, int32_t imm32);
   void adc(Register dst, const Operand& src);
 
+  void add(Register dst, Register src) { add(dst, Operand(src)); }
   void add(Register dst, const Operand& src);
+  void add(const Operand& dst, Register src);
+  void add(Register dst, const Immediate& imm) { add(Operand(dst), imm); }
   void add(const Operand& dst, const Immediate& x);
 
   void and_(Register dst, int32_t imm32);
   void and_(Register dst, const Immediate& x);
+  void and_(Register dst, Register src) { and_(dst, Operand(src)); }
   void and_(Register dst, const Operand& src);
-  void and_(const Operand& src, Register dst);
+  void and_(const Operand& dst, Register src);
   void and_(const Operand& dst, const Immediate& x);
 
+  void cmpb(Register reg, int8_t imm8) { cmpb(Operand(reg), imm8); }
   void cmpb(const Operand& op, int8_t imm8);
-  void cmpb(Register src, const Operand& dst);
-  void cmpb(const Operand& dst, Register src);
+  void cmpb(Register reg, const Operand& op);
+  void cmpb(const Operand& op, Register reg);
   void cmpb_al(const Operand& op);
   void cmpw_ax(const Operand& op);
   void cmpw(const Operand& op, Immediate imm16);
   void cmp(Register reg, int32_t imm32);
   void cmp(Register reg, Handle<Object> handle);
+  void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
   void cmp(Register reg, const Operand& op);
+  void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
   void cmp(const Operand& op, const Immediate& imm);
   void cmp(const Operand& op, Handle<Object> handle);
 
@@ -748,6 +775,7 @@
 
   // Signed multiply instructions.
   void imul(Register src);                               // edx:eax = eax * src.
+  void imul(Register dst, Register src) { imul(dst, Operand(src)); }
   void imul(Register dst, const Operand& src);           // dst = dst * src.
   void imul(Register dst, Register src, int32_t imm32);  // dst = src * imm32.
 
@@ -764,8 +792,10 @@
   void not_(Register dst);
 
   void or_(Register dst, int32_t imm32);
+  void or_(Register dst, Register src) { or_(dst, Operand(src)); }
   void or_(Register dst, const Operand& src);
   void or_(const Operand& dst, Register src);
+  void or_(Register dst, const Immediate& imm) { or_(Operand(dst), imm); }
   void or_(const Operand& dst, const Immediate& x);
 
   void rcl(Register dst, uint8_t imm8);
@@ -776,35 +806,42 @@
 
   void sbb(Register dst, const Operand& src);
 
+  void shld(Register dst, Register src) { shld(dst, Operand(src)); }
   void shld(Register dst, const Operand& src);
 
   void shl(Register dst, uint8_t imm8);
   void shl_cl(Register dst);
 
+  void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
   void shrd(Register dst, const Operand& src);
 
   void shr(Register dst, uint8_t imm8);
   void shr_cl(Register dst);
 
-  void subb(const Operand& dst, int8_t imm8);
-  void subb(Register dst, const Operand& src);
+  void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
   void sub(const Operand& dst, const Immediate& x);
+  void sub(Register dst, Register src) { sub(dst, Operand(src)); }
   void sub(Register dst, const Operand& src);
   void sub(const Operand& dst, Register src);
 
   void test(Register reg, const Immediate& imm);
+  void test(Register reg0, Register reg1) { test(reg0, Operand(reg1)); }
   void test(Register reg, const Operand& op);
   void test_b(Register reg, const Operand& op);
   void test(const Operand& op, const Immediate& imm);
+  void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); }
   void test_b(const Operand& op, uint8_t imm8);
 
   void xor_(Register dst, int32_t imm32);
+  void xor_(Register dst, Register src) { xor_(dst, Operand(src)); }
   void xor_(Register dst, const Operand& src);
-  void xor_(const Operand& src, Register dst);
+  void xor_(const Operand& dst, Register src);
+  void xor_(Register dst, const Immediate& imm) { xor_(Operand(dst), imm); }
   void xor_(const Operand& dst, const Immediate& x);
 
   // Bit operations.
   void bt(const Operand& dst, Register src);
+  void bts(Register dst, Register src) { bts(Operand(dst), src); }
   void bts(const Operand& dst, Register src);
 
   // Miscellaneous
@@ -835,6 +872,7 @@
   void call(Label* L);
   void call(byte* entry, RelocInfo::Mode rmode);
   int CallSize(const Operand& adr);
+  void call(Register reg) { call(Operand(reg)); }
   void call(const Operand& adr);
   int CallSize(Handle<Code> code, RelocInfo::Mode mode);
   void call(Handle<Code> code,
@@ -845,6 +883,7 @@
   // unconditional jump to L
   void jmp(Label* L, Label::Distance distance = Label::kFar);
   void jmp(byte* entry, RelocInfo::Mode rmode);
+  void jmp(Register reg) { jmp(Operand(reg)); }
   void jmp(const Operand& adr);
   void jmp(Handle<Code> code, RelocInfo::Mode rmode);
 
@@ -929,6 +968,7 @@
   void cvttss2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, const Operand& src);
 
+  void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
   void cvtsi2sd(XMMRegister dst, const Operand& src);
   void cvtss2sd(XMMRegister dst, XMMRegister src);
   void cvtsd2ss(XMMRegister dst, XMMRegister src);
@@ -969,12 +1009,14 @@
   void movdbl(XMMRegister dst, const Operand& src);
   void movdbl(const Operand& dst, XMMRegister src);
 
+  void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); }
   void movd(XMMRegister dst, const Operand& src);
-  void movd(const Operand& src, XMMRegister dst);
+  void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
+  void movd(const Operand& dst, XMMRegister src);
   void movsd(XMMRegister dst, XMMRegister src);
 
   void movss(XMMRegister dst, const Operand& src);
-  void movss(const Operand& src, XMMRegister dst);
+  void movss(const Operand& dst, XMMRegister src);
   void movss(XMMRegister dst, XMMRegister src);
 
   void pand(XMMRegister dst, XMMRegister src);
@@ -987,11 +1029,17 @@
   void psrlq(XMMRegister reg, int8_t shift);
   void psrlq(XMMRegister dst, XMMRegister src);
   void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
+  void pextrd(Register dst, XMMRegister src, int8_t offset) {
+    pextrd(Operand(dst), src, offset);
+  }
   void pextrd(const Operand& dst, XMMRegister src, int8_t offset);
+  void pinsrd(XMMRegister dst, Register src, int8_t offset) {
+    pinsrd(dst, Operand(src), offset);
+  }
   void pinsrd(XMMRegister dst, const Operand& src, int8_t offset);
 
   // Parallel XMM operations.
-  void movntdqa(XMMRegister src, const Operand& dst);
+  void movntdqa(XMMRegister dst, const Operand& src);
   void movntdq(const Operand& dst, XMMRegister src);
   // Prefetch src position into cache level.
   // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
@@ -1045,6 +1093,9 @@
   static const int kMaximalBufferSize = 512*MB;
   static const int kMinimalBufferSize = 4*KB;
 
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
  protected:
   bool emit_debug_code() const { return emit_debug_code_; }
 
@@ -1057,9 +1108,8 @@
 
   byte* addr_at(int pos) { return buffer_ + pos; }
 
+
  private:
-  byte byte_at(int pos)  { return buffer_[pos]; }
-  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
     return *reinterpret_cast<uint32_t*>(addr_at(pos));
   }
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 310ea3d..53ade3a 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -69,7 +69,7 @@
 
   // JumpToExternalReference expects eax to contain the number of arguments
   // including the receiver and the extra arguments.
-  __ add(Operand(eax), Immediate(num_extra_args + 1));
+  __ add(eax, Immediate(num_extra_args + 1));
   __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
 }
 
@@ -80,25 +80,34 @@
   //  -- edi: constructor function
   // -----------------------------------
 
-  Label non_function_call;
+  Label slow, non_function_call;
   // Check that function is not a smi.
   __ JumpIfSmi(edi, &non_function_call);
   // Check that function is a JSFunction.
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &non_function_call);
+  __ j(not_equal, &slow);
 
   // Jump to the function-specific construct stub.
   __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
   __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
-  __ jmp(Operand(ebx));
+  __ jmp(ebx);
 
   // edi: called object
   // eax: number of arguments
+  // ecx: object map
+  Label do_call;
+  __ bind(&slow);
+  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+  __ j(not_equal, &non_function_call);
+  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
   __ bind(&non_function_call);
+  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
   // Set expected number of arguments to zero (not changing eax).
   __ Set(ebx, Immediate(0));
-  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   Handle<Code> arguments_adaptor =
       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
   __ SetCallKind(ecx, CALL_AS_METHOD);
@@ -113,265 +122,272 @@
   ASSERT(!is_api_function || !count_constructions);
 
   // Enter a construct frame.
-  __ EnterConstructFrame();
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Store a smi-tagged arguments count on the stack.
-  __ SmiTag(eax);
-  __ push(eax);
+    // Store a smi-tagged arguments count on the stack.
+    __ SmiTag(eax);
+    __ push(eax);
 
-  // Push the function to invoke on the stack.
-  __ push(edi);
+    // Push the function to invoke on the stack.
+    __ push(edi);
 
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  Label rt_call, allocated;
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    Label rt_call, allocated;
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(masm->isolate());
-    __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
-    __ j(not_equal, &rt_call);
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(masm->isolate());
+      __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
+      __ j(not_equal, &rt_call);
 #endif
 
-    // Verified that the constructor is a JSFunction.
-    // Load the initial map and verify that it is in fact a map.
-    // edi: constructor
-    __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi
-    __ JumpIfSmi(eax, &rt_call);
-    // edi: constructor
-    // eax: initial map (if proven valid below)
-    __ CmpObjectType(eax, MAP_TYPE, ebx);
-    __ j(not_equal, &rt_call);
+      // Verified that the constructor is a JSFunction.
+      // Load the initial map and verify that it is in fact a map.
+      // edi: constructor
+      __ mov(eax, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+      // Will both indicate a NULL and a Smi
+      __ JumpIfSmi(eax, &rt_call);
+      // edi: constructor
+      // eax: initial map (if proven valid below)
+      __ CmpObjectType(eax, MAP_TYPE, ebx);
+      __ j(not_equal, &rt_call);
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // edi: constructor
-    // eax: initial map
-    __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
-    __ j(equal, &rt_call);
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // edi: constructor
+      // eax: initial map
+      __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
+      __ j(equal, &rt_call);
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-      __ dec_b(FieldOperand(ecx, SharedFunctionInfo::kConstructionCountOffset));
-      __ j(not_zero, &allocate);
-
-      __ push(eax);
-      __ push(edi);
-
-      __ push(edi);  // constructor
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(edi);
-      __ pop(eax);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    // edi: constructor
-    // eax: initial map
-    __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
-    __ shl(edi, kPointerSizeLog2);
-    __ AllocateInNewSpace(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-    // Allocated the JSObject, now initialize the fields.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    __ mov(Operand(ebx, JSObject::kMapOffset), eax);
-    Factory* factory = masm->isolate()->factory();
-    __ mov(ecx, factory->empty_fixed_array());
-    __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
-    __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
-    // Set extra fields in the newly allocated object.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    { Label loop, entry;
-      // To allow for truncation.
       if (count_constructions) {
-        __ mov(edx, factory->one_pointer_filler_map());
-      } else {
-        __ mov(edx, factory->undefined_value());
+        Label allocate;
+        // Decrease generous allocation count.
+        __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+        __ dec_b(FieldOperand(ecx,
+                              SharedFunctionInfo::kConstructionCountOffset));
+        __ j(not_zero, &allocate);
+
+        __ push(eax);
+        __ push(edi);
+
+        __ push(edi);  // constructor
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(edi);
+        __ pop(eax);
+
+        __ bind(&allocate);
       }
+
+      // Now allocate the JSObject on the heap.
+      // edi: constructor
+      // eax: initial map
+      __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+      __ shl(edi, kPointerSizeLog2);
+      __ AllocateInNewSpace(
+          edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
+      // Allocated the JSObject, now initialize the fields.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
+      __ mov(Operand(ebx, JSObject::kMapOffset), eax);
+      Factory* factory = masm->isolate()->factory();
+      __ mov(ecx, factory->empty_fixed_array());
+      __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
+      __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
+      // Set extra fields in the newly allocated object.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
       __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ mov(Operand(ecx, 0), edx);
-      __ add(Operand(ecx), Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmp(ecx, Operand(edi));
-      __ j(less, &loop);
-    }
-
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    __ or_(Operand(ebx), Immediate(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed.
-    // Allocate and initialize a FixedArray if it is.
-    // eax: initial map
-    // ebx: JSObject
-    // edi: start of next object
-    // Calculate the total number of properties described by the map.
-    __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
-    __ movzx_b(ecx, FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
-    __ add(edx, Operand(ecx));
-    // Calculate unused properties past the end of the in-object properties.
-    __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
-    __ sub(edx, Operand(ecx));
-    // Done if no extra properties are to be allocated.
-    __ j(zero, &allocated);
-    __ Assert(positive, "Property allocation count failed.");
-
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // ebx: JSObject
-    // edi: start of next object (will be start of FixedArray)
-    // edx: number of elements in properties array
-    __ AllocateInNewSpace(FixedArray::kHeaderSize,
-                          times_pointer_size,
-                          edx,
-                          edi,
-                          ecx,
-                          no_reg,
-                          &undo_allocation,
-                          RESULT_CONTAINS_TOP);
-
-    // Initialize the FixedArray.
-    // ebx: JSObject
-    // edi: FixedArray
-    // edx: number of elements
-    // ecx: start of next object
-    __ mov(eax, factory->fixed_array_map());
-    __ mov(Operand(edi, FixedArray::kMapOffset), eax);  // setup the map
-    __ SmiTag(edx);
-    __ mov(Operand(edi, FixedArray::kLengthOffset), edx);  // and length
-
-    // Initialize the fields to undefined.
-    // ebx: JSObject
-    // edi: FixedArray
-    // ecx: start of next object
-    { Label loop, entry;
       __ mov(edx, factory->undefined_value());
-      __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ mov(Operand(eax, 0), edx);
-      __ add(Operand(eax), Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmp(eax, Operand(ecx));
-      __ j(below, &loop);
+      if (count_constructions) {
+        __ movzx_b(esi,
+                   FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+        __ lea(esi,
+               Operand(ebx, esi, times_pointer_size, JSObject::kHeaderSize));
+        // esi: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ cmp(esi, edi);
+          __ Assert(less_equal,
+                    "Unexpected number of pre-allocated property fields.");
+        }
+        __ InitializeFieldsWithFiller(ecx, esi, edx);
+        __ mov(edx, factory->one_pointer_filler_map());
+      }
+      __ InitializeFieldsWithFiller(ecx, edi, edx);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
+      __ or_(ebx, Immediate(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed.
+      // Allocate and initialize a FixedArray if it is.
+      // eax: initial map
+      // ebx: JSObject
+      // edi: start of next object
+      // Calculate the total number of properties described by the map.
+      __ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+      __ movzx_b(ecx,
+                 FieldOperand(eax, Map::kPreAllocatedPropertyFieldsOffset));
+      __ add(edx, ecx);
+      // Calculate unused properties past the end of the in-object properties.
+      __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+      __ sub(edx, ecx);
+      // Done if no extra properties are to be allocated.
+      __ j(zero, &allocated);
+      __ Assert(positive, "Property allocation count failed.");
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // ebx: JSObject
+      // edi: start of next object (will be start of FixedArray)
+      // edx: number of elements in properties array
+      __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                            times_pointer_size,
+                            edx,
+                            edi,
+                            ecx,
+                            no_reg,
+                            &undo_allocation,
+                            RESULT_CONTAINS_TOP);
+
+      // Initialize the FixedArray.
+      // ebx: JSObject
+      // edi: FixedArray
+      // edx: number of elements
+      // ecx: start of next object
+      __ mov(eax, factory->fixed_array_map());
+      __ mov(Operand(edi, FixedArray::kMapOffset), eax);  // setup the map
+      __ SmiTag(edx);
+      __ mov(Operand(edi, FixedArray::kLengthOffset), edx);  // and length
+
+      // Initialize the fields to undefined.
+      // ebx: JSObject
+      // edi: FixedArray
+      // ecx: start of next object
+      { Label loop, entry;
+        __ mov(edx, factory->undefined_value());
+        __ lea(eax, Operand(edi, FixedArray::kHeaderSize));
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ mov(Operand(eax, 0), edx);
+        __ add(eax, Immediate(kPointerSize));
+        __ bind(&entry);
+        __ cmp(eax, ecx);
+        __ j(below, &loop);
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject
+      // ebx: JSObject
+      // edi: FixedArray
+      __ or_(edi, Immediate(kHeapObjectTag));  // add the heap tag
+      __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+
+
+      // Continue with JSObject being successfully allocated
+      // ebx: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // ebx: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(ebx);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject
-    // ebx: JSObject
-    // edi: FixedArray
-    __ or_(Operand(edi), Immediate(kHeapObjectTag));  // add the heap tag
-    __ mov(FieldOperand(ebx, JSObject::kPropertiesOffset), edi);
+    // Allocate the new receiver object using the runtime call.
+    __ bind(&rt_call);
+    // Must restore edi (constructor) before calling runtime.
+    __ mov(edi, Operand(esp, 0));
+    // edi: function (constructor)
+    __ push(edi);
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ mov(ebx, eax);  // store result in ebx
 
+    // New object allocated.
+    // ebx: newly allocated object
+    __ bind(&allocated);
+    // Retrieve the function from the stack.
+    __ pop(edi);
 
-    // Continue with JSObject being successfully allocated
-    // ebx: JSObject
-    __ jmp(&allocated);
+    // Retrieve smi-tagged arguments count from the stack.
+    __ mov(eax, Operand(esp, 0));
+    __ SmiUntag(eax);
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // ebx: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(ebx);
+    // Push the allocated receiver to the stack. We need two copies
+    // because we may have to return the original one and the calling
+    // conventions dictate that the called function pops the receiver.
+    __ push(ebx);
+    __ push(ebx);
+
+    // Setup pointer to last argument.
+    __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    Label loop, entry;
+    __ mov(ecx, eax);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ push(Operand(ebx, ecx, times_4, 0));
+    __ bind(&entry);
+    __ dec(ecx);
+    __ j(greater_equal, &loop);
+
+    // Call the function.
+    if (is_api_function) {
+      __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                    CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(eax);
+      __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Restore context from the frame.
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    __ JumpIfSmi(eax, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+    __ j(above_equal, &exit);
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ mov(eax, Operand(esp, 0));
+
+    // Restore the arguments count and leave the construct frame.
+    __ bind(&exit);
+    __ mov(ebx, Operand(esp, kPointerSize));  // Get arguments count.
+
+    // Leave construct frame.
   }
 
-  // Allocate the new receiver object using the runtime call.
-  __ bind(&rt_call);
-  // Must restore edi (constructor) before calling runtime.
-  __ mov(edi, Operand(esp, 0));
-  // edi: function (constructor)
-  __ push(edi);
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ mov(ebx, Operand(eax));  // store result in ebx
-
-  // New object allocated.
-  // ebx: newly allocated object
-  __ bind(&allocated);
-  // Retrieve the function from the stack.
-  __ pop(edi);
-
-  // Retrieve smi-tagged arguments count from the stack.
-  __ mov(eax, Operand(esp, 0));
-  __ SmiUntag(eax);
-
-  // Push the allocated receiver to the stack. We need two copies
-  // because we may have to return the original one and the calling
-  // conventions dictate that the called function pops the receiver.
-  __ push(ebx);
-  __ push(ebx);
-
-  // Setup pointer to last argument.
-  __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
-
-  // Copy arguments and receiver to the expression stack.
-  Label loop, entry;
-  __ mov(ecx, Operand(eax));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ push(Operand(ebx, ecx, times_4, 0));
-  __ bind(&entry);
-  __ dec(ecx);
-  __ j(greater_equal, &loop);
-
-  // Call the function.
-  if (is_api_function) {
-    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
-                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Restore context from the frame.
-  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  __ JumpIfSmi(eax, &use_receiver);
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-  __ j(above_equal, &exit);
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ mov(eax, Operand(esp, 0));
-
-  // Restore the arguments count and leave the construct frame.
-  __ bind(&exit);
-  __ mov(ebx, Operand(esp, kPointerSize));  // get arguments count
-  __ LeaveConstructFrame();
-
   // Remove caller arguments from the stack and return.
   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   __ pop(ecx);
@@ -399,57 +415,58 @@
 
 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
                                              bool is_construct) {
-  // Clear the context before we push it when entering the JS frame.
+  // Clear the context before we push it when entering the internal frame.
   __ Set(esi, Immediate(0));
 
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Load the previous frame pointer (ebx) to access C arguments
-  __ mov(ebx, Operand(ebp, 0));
+    // Load the previous frame pointer (ebx) to access C arguments
+    __ mov(ebx, Operand(ebp, 0));
 
-  // Get the function from the frame and setup the context.
-  __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
-  __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
+    // Get the function from the frame and setup the context.
+    __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
+    __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
 
-  // Push the function and the receiver onto the stack.
-  __ push(ecx);
-  __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
+    // Push the function and the receiver onto the stack.
+    __ push(ecx);
+    __ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
 
-  // Load the number of arguments and setup pointer to the arguments.
-  __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
-  __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
+    // Load the number of arguments and setup pointer to the arguments.
+    __ mov(eax, Operand(ebx, EntryFrameConstants::kArgcOffset));
+    __ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
 
-  // Copy arguments to the stack in a loop.
-  Label loop, entry;
-  __ Set(ecx, Immediate(0));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
-  __ push(Operand(edx, 0));  // dereference handle
-  __ inc(Operand(ecx));
-  __ bind(&entry);
-  __ cmp(ecx, Operand(eax));
-  __ j(not_equal, &loop);
+    // Copy arguments to the stack in a loop.
+    Label loop, entry;
+    __ Set(ecx, Immediate(0));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ mov(edx, Operand(ebx, ecx, times_4, 0));  // push parameter from argv
+    __ push(Operand(edx, 0));  // dereference handle
+    __ inc(ecx);
+    __ bind(&entry);
+    __ cmp(ecx, eax);
+    __ j(not_equal, &loop);
 
-  // Get the function from the stack and call it.
-  __ mov(edi, Operand(esp, eax, times_4, +1 * kPointerSize));  // +1 ~ receiver
+    // Get the function from the stack and call it.
+    // kPointerSize for the receiver.
+    __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
 
-  // Invoke the code.
-  if (is_construct) {
-    __ call(masm->isolate()->builtins()->JSConstructCall(),
-            RelocInfo::CODE_TARGET);
-  } else {
-    ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the code.
+    if (is_construct) {
+      __ call(masm->isolate()->builtins()->JSConstructCall(),
+              RelocInfo::CODE_TARGET);
+    } else {
+      ParameterCount actual(eax);
+      __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Exit the internal frame. Notice that this also removes the empty.
+    // context and the function left on the stack by the code
+    // invocation.
   }
-
-  // Exit the JS frame. Notice that this also removes the empty
-  // context and the function left on the stack by the code
-  // invocation.
-  __ LeaveInternalFrame();
-  __ ret(1 * kPointerSize);  // remove receiver
+  __ ret(kPointerSize);  // Remove receiver.
 }
 
 
@@ -464,68 +481,68 @@
 
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function.
-  __ push(edi);
-  // Push call kind information.
-  __ push(ecx);
+    // Push a copy of the function.
+    __ push(edi);
+    // Push call kind information.
+    __ push(ecx);
 
-  __ push(edi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyCompile, 1);
+    __ push(edi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyCompile, 1);
 
-  // Restore call kind information.
-  __ pop(ecx);
-  // Restore receiver.
-  __ pop(edi);
+    // Restore call kind information.
+    __ pop(ecx);
+    // Restore receiver.
+    __ pop(edi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(Operand(eax));
+  __ jmp(eax);
 }
 
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function onto the stack.
-  __ push(edi);
-  // Push call kind information.
-  __ push(ecx);
+    // Push a copy of the function onto the stack.
+    __ push(edi);
+    // Push call kind information.
+    __ push(ecx);
 
-  __ push(edi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
+    __ push(edi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-  // Restore call kind information.
-  __ pop(ecx);
-  // Restore receiver.
-  __ pop(edi);
+    // Restore call kind information.
+    __ pop(ecx);
+    // Restore receiver.
+    __ pop(edi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(Operand(eax));
+  __ jmp(eax);
 }
 
 
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Pass the function and deoptimization type to the runtime system.
-  __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
-  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+    // Pass the function and deoptimization type to the runtime system.
+    __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Get the full codegen state from the stack and untag it.
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
@@ -566,9 +583,10 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ pushad();
-  __ EnterInternalFrame();
-  __ CallRuntime(Runtime::kNotifyOSR, 0);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
   __ popad();
   __ ret(0);
 }
@@ -579,7 +597,7 @@
 
   // 1. Make sure we have at least one argument.
   { Label done;
-    __ test(eax, Operand(eax));
+    __ test(eax, eax);
     __ j(not_zero, &done);
     __ pop(ebx);
     __ push(Immediate(factory->undefined_value()));
@@ -631,18 +649,21 @@
     __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ SmiTag(eax);
-    __ push(eax);
 
-    __ push(ebx);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(ebx, eax);
-    __ Set(edx, Immediate(0));  // restore
+    { // In order to preserve argument count.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(eax);
+      __ push(eax);
 
-    __ pop(eax);
-    __ SmiUntag(eax);
-    __ LeaveInternalFrame();
+      __ push(ebx);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mov(ebx, eax);
+      __ Set(edx, Immediate(0));  // restore
+
+      __ pop(eax);
+      __ SmiUntag(eax);
+    }
+
     // Restore the function to edi.
     __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
     __ jmp(&patch_receiver);
@@ -695,11 +716,11 @@
   // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
   //     or a function proxy via CALL_FUNCTION_PROXY.
   { Label function, non_proxy;
-    __ test(edx, Operand(edx));
+    __ test(edx, edx);
     __ j(zero, &function);
     __ Set(ebx, Immediate(0));
     __ SetCallKind(ecx, CALL_AS_METHOD);
-    __ cmp(Operand(edx), Immediate(1));
+    __ cmp(edx, Immediate(1));
     __ j(not_equal, &non_proxy);
 
     __ pop(edx);   // return address
@@ -726,13 +747,13 @@
   __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
   __ SmiUntag(ebx);
   __ SetCallKind(ecx, CALL_AS_METHOD);
-  __ cmp(eax, Operand(ebx));
+  __ cmp(eax, ebx);
   __ j(not_equal,
        masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
 
   ParameterCount expected(0);
-  __ InvokeCode(Operand(edx), expected, expected, JUMP_FUNCTION,
-                NullCallWrapper(), CALL_AS_METHOD);
+  __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper(),
+                CALL_AS_METHOD);
 }
 
 
@@ -740,155 +761,156 @@
   static const int kArgumentsOffset = 2 * kPointerSize;
   static const int kReceiverOffset = 3 * kPointerSize;
   static const int kFunctionOffset = 4 * kPointerSize;
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
 
-  __ EnterInternalFrame();
+    __ push(Operand(ebp, kFunctionOffset));  // push this
+    __ push(Operand(ebp, kArgumentsOffset));  // push arguments
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  __ push(Operand(ebp, kFunctionOffset));  // push this
-  __ push(Operand(ebp, kArgumentsOffset));  // push arguments
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    ExternalReference real_stack_limit =
+        ExternalReference::address_of_real_stack_limit(masm->isolate());
+    __ mov(edi, Operand::StaticVariable(real_stack_limit));
+    // Make ecx the space we have left. The stack might already be overflowed
+    // here which will cause ecx to become negative.
+    __ mov(ecx, esp);
+    __ sub(ecx, edi);
+    // Make edx the space we need for the array when it is unrolled onto the
+    // stack.
+    __ mov(edx, eax);
+    __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+    // Check if the arguments will overflow the stack.
+    __ cmp(ecx, edx);
+    __ j(greater, &okay);  // Signed comparison.
 
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  ExternalReference real_stack_limit =
-      ExternalReference::address_of_real_stack_limit(masm->isolate());
-  __ mov(edi, Operand::StaticVariable(real_stack_limit));
-  // Make ecx the space we have left. The stack might already be overflowed
-  // here which will cause ecx to become negative.
-  __ mov(ecx, Operand(esp));
-  __ sub(ecx, Operand(edi));
-  // Make edx the space we need for the array when it is unrolled onto the
-  // stack.
-  __ mov(edx, Operand(eax));
-  __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
-  // Check if the arguments will overflow the stack.
-  __ cmp(ecx, Operand(edx));
-  __ j(greater, &okay);  // Signed comparison.
+    // Out of stack space.
+    __ push(Operand(ebp, 4 * kPointerSize));  // push this
+    __ push(eax);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    __ bind(&okay);
+    // End of stack check.
 
-  // Out of stack space.
-  __ push(Operand(ebp, 4 * kPointerSize));  // push this
-  __ push(eax);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  __ bind(&okay);
-  // End of stack check.
+    // Push current index and limit.
+    const int kLimitOffset =
+        StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+    const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+    __ push(eax);  // limit
+    __ push(Immediate(0));  // index
 
-  // Push current index and limit.
-  const int kLimitOffset =
-      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
-  __ push(eax);  // limit
-  __ push(Immediate(0));  // index
+    // Get the receiver.
+    __ mov(ebx, Operand(ebp, kReceiverOffset));
 
-  // Get the receiver.
-  __ mov(ebx, Operand(ebp, kReceiverOffset));
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ mov(edi, Operand(ebp, kFunctionOffset));
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+    __ j(not_equal, &push_receiver);
 
-  // Check that the function is a JS function (otherwise it must be a proxy).
-  Label push_receiver;
-  __ mov(edi, Operand(ebp, kFunctionOffset));
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &push_receiver);
+    // Change context eagerly to get the right global object if necessary.
+    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
-  // Change context eagerly to get the right global object if necessary.
-  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
+              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+    __ j(not_equal, &push_receiver);
 
-  // Compute the receiver.
-  // Do not transform the receiver for strict mode functions.
-  Label call_to_object, use_global_receiver;
-  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
-            1 << SharedFunctionInfo::kStrictModeBitWithinByte);
-  __ j(not_equal, &push_receiver);
+    Factory* factory = masm->isolate()->factory();
 
-  Factory* factory = masm->isolate()->factory();
+    // Do not transform the receiver for natives (shared already in ecx).
+    __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
+              1 << SharedFunctionInfo::kNativeBitWithinByte);
+    __ j(not_equal, &push_receiver);
 
-  // Do not transform the receiver for natives (shared already in ecx).
-  __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
-            1 << SharedFunctionInfo::kNativeBitWithinByte);
-  __ j(not_equal, &push_receiver);
+    // Compute the receiver in non-strict mode.
+    // Call ToObject on the receiver if it is not an object, or use the
+    // global object if it is null or undefined.
+    __ JumpIfSmi(ebx, &call_to_object);
+    __ cmp(ebx, factory->null_value());
+    __ j(equal, &use_global_receiver);
+    __ cmp(ebx, factory->undefined_value());
+    __ j(equal, &use_global_receiver);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
+    __ j(above_equal, &push_receiver);
 
-  // Compute the receiver in non-strict mode.
-  // Call ToObject on the receiver if it is not an object, or use the
-  // global object if it is null or undefined.
-  __ JumpIfSmi(ebx, &call_to_object);
-  __ cmp(ebx, factory->null_value());
-  __ j(equal, &use_global_receiver);
-  __ cmp(ebx, factory->undefined_value());
-  __ j(equal, &use_global_receiver);
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
-  __ j(above_equal, &push_receiver);
+    __ bind(&call_to_object);
+    __ push(ebx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(ebx, eax);
+    __ jmp(&push_receiver);
 
-  __ bind(&call_to_object);
-  __ push(ebx);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ mov(ebx, Operand(eax));
-  __ jmp(&push_receiver);
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ mov(ebx, FieldOperand(esi, kGlobalOffset));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+    __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ mov(ebx, FieldOperand(esi, kGlobalOffset));
-  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
-  __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
-  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    // Push the receiver.
+    __ bind(&push_receiver);
+    __ push(ebx);
 
-  // Push the receiver.
-  __ bind(&push_receiver);
-  __ push(ebx);
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ mov(eax, Operand(ebp, kIndexOffset));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ mov(eax, Operand(ebp, kIndexOffset));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
+    // Use inline caching to speed up access to arguments.
+    Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // It is important that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to indicate that
+    // we have generated an inline version of the keyed load.  In this
+    // case, we know that we are not generating a test instruction next.
 
-  // Use inline caching to speed up access to arguments.
-  Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ call(ic, RelocInfo::CODE_TARGET);
-  // It is important that we do not have a test instruction after the
-  // call.  A test instruction after the call is used to indicate that
-  // we have generated an inline version of the keyed load.  In this
-  // case, we know that we are not generating a test instruction next.
+    // Push the nth argument.
+    __ push(eax);
 
-  // Push the nth argument.
-  __ push(eax);
+    // Update the index on the stack and in register eax.
+    __ mov(eax, Operand(ebp, kIndexOffset));
+    __ add(eax, Immediate(1 << kSmiTagSize));
+    __ mov(Operand(ebp, kIndexOffset), eax);
 
-  // Update the index on the stack and in register eax.
-  __ mov(eax, Operand(ebp, kIndexOffset));
-  __ add(Operand(eax), Immediate(1 << kSmiTagSize));
-  __ mov(Operand(ebp, kIndexOffset), eax);
+    __ bind(&entry);
+    __ cmp(eax, Operand(ebp, kLimitOffset));
+    __ j(not_equal, &loop);
 
-  __ bind(&entry);
-  __ cmp(eax, Operand(ebp, kLimitOffset));
-  __ j(not_equal, &loop);
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(eax);
+    __ SmiUntag(eax);
+    __ mov(edi, Operand(ebp, kFunctionOffset));
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+    __ j(not_equal, &call_proxy);
+    __ InvokeFunction(edi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
 
-  // Invoke the function.
-  Label call_proxy;
-  ParameterCount actual(eax);
-  __ SmiUntag(eax);
-  __ mov(edi, Operand(ebp, kFunctionOffset));
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &call_proxy);
-  __ InvokeFunction(edi, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    frame_scope.GenerateLeaveFrame();
+    __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 
-  __ LeaveInternalFrame();
-  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(edi);  // add function proxy as last argument
+    __ inc(eax);
+    __ Set(ebx, Immediate(0));
+    __ SetCallKind(ecx, CALL_AS_METHOD);
+    __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
+    __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
 
-  // Invoke the function proxy.
-  __ bind(&call_proxy);
-  __ push(edi);  // add function proxy as last argument
-  __ inc(eax);
-  __ Set(ebx, Immediate(0));
-  __ SetCallKind(ecx, CALL_AS_METHOD);
-  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
-  __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
-
-  __ LeaveInternalFrame();
+    // Leave internal frame.
+  }
   __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 }
 
@@ -983,9 +1005,9 @@
     __ jmp(&entry);
     __ bind(&loop);
     __ mov(Operand(scratch1, 0), factory->the_hole_value());
-    __ add(Operand(scratch1), Immediate(kPointerSize));
+    __ add(scratch1, Immediate(kPointerSize));
     __ bind(&entry);
-    __ cmp(scratch1, Operand(scratch2));
+    __ cmp(scratch1, scratch2);
     __ j(below, &loop);
   }
 }
@@ -1082,7 +1104,7 @@
     __ bind(&loop);
     __ stos();
     __ bind(&entry);
-    __ cmp(edi, Operand(elements_array_end));
+    __ cmp(edi, elements_array_end);
     __ j(below, &loop);
     __ bind(&done);
   }
@@ -1120,7 +1142,7 @@
   __ push(eax);
 
   // Check for array construction with zero arguments.
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(not_zero, &argc_one_or_more);
 
   __ bind(&empty_array);
@@ -1147,7 +1169,7 @@
   __ j(not_equal, &argc_two_or_more);
   STATIC_ASSERT(kSmiTag == 0);
   __ mov(ecx, Operand(esp, (push_count + 1) * kPointerSize));
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(not_zero, &not_empty_array);
 
   // The single argument passed is zero, so we jump to the code above used to
@@ -1160,7 +1182,7 @@
     __ mov(eax, Operand(esp, i * kPointerSize));
     __ mov(Operand(esp, (i + 1) * kPointerSize), eax);
   }
-  __ add(Operand(esp), Immediate(2 * kPointerSize));  // Drop two stack slots.
+  __ add(esp, Immediate(2 * kPointerSize));  // Drop two stack slots.
   __ push(Immediate(0));  // Treat this as a call with argc of zero.
   __ jmp(&empty_array);
 
@@ -1250,7 +1272,7 @@
   __ bind(&loop);
   __ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
   __ mov(Operand(edx, 0), eax);
-  __ add(Operand(edx), Immediate(kPointerSize));
+  __ add(edx, Immediate(kPointerSize));
   __ bind(&entry);
   __ dec(ecx);
   __ j(greater_equal, &loop);
@@ -1356,14 +1378,14 @@
 
   if (FLAG_debug_code) {
     __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
-    __ cmp(edi, Operand(ecx));
+    __ cmp(edi, ecx);
     __ Assert(equal, "Unexpected String function");
   }
 
   // Load the first argument into eax and get rid of the rest
   // (including the receiver).
   Label no_arguments;
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(zero, &no_arguments);
   __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
   __ pop(ecx);
@@ -1439,12 +1461,13 @@
   // Invoke the conversion builtin and put the result into ebx.
   __ bind(&convert_argument);
   __ IncrementCounter(counters->string_ctor_conversions(), 1);
-  __ EnterInternalFrame();
-  __ push(edi);  // Preserve the function.
-  __ push(eax);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  __ pop(edi);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(edi);  // Preserve the function.
+    __ push(eax);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+    __ pop(edi);
+  }
   __ mov(ebx, eax);
   __ jmp(&argument_is_string);
 
@@ -1461,17 +1484,18 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1);
-  __ EnterInternalFrame();
-  __ push(ebx);
-  __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(ebx);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
   __ ret(0);
 }
 
 
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ push(ebp);
-  __ mov(ebp, Operand(esp));
+  __ mov(ebp, esp);
 
   // Store the arguments adaptor context sentinel.
   __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -1515,7 +1539,7 @@
   __ IncrementCounter(masm->isolate()->counters()->arguments_adaptors(), 1);
 
   Label enough, too_few;
-  __ cmp(eax, Operand(ebx));
+  __ cmp(eax, ebx);
   __ j(less, &too_few);
   __ cmp(ebx, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
   __ j(equal, &dont_adapt_arguments);
@@ -1533,8 +1557,8 @@
     __ bind(&copy);
     __ inc(edi);
     __ push(Operand(eax, 0));
-    __ sub(Operand(eax), Immediate(kPointerSize));
-    __ cmp(edi, Operand(ebx));
+    __ sub(eax, Immediate(kPointerSize));
+    __ cmp(edi, ebx);
     __ j(less, &copy);
     __ jmp(&invoke);
   }
@@ -1547,17 +1571,17 @@
     const int offset = StandardFrameConstants::kCallerSPOffset;
     __ lea(edi, Operand(ebp, eax, times_4, offset));
     // ebx = expected - actual.
-    __ sub(ebx, Operand(eax));
+    __ sub(ebx, eax);
     // eax = -actual - 1
     __ neg(eax);
-    __ sub(Operand(eax), Immediate(1));
+    __ sub(eax, Immediate(1));
 
     Label copy;
     __ bind(&copy);
     __ inc(eax);
     __ push(Operand(edi, 0));
-    __ sub(Operand(edi), Immediate(kPointerSize));
-    __ test(eax, Operand(eax));
+    __ sub(edi, Immediate(kPointerSize));
+    __ test(eax, eax);
     __ j(not_zero, &copy);
 
     // Fill remaining expected arguments with undefined values.
@@ -1565,7 +1589,7 @@
     __ bind(&fill);
     __ inc(eax);
     __ push(Immediate(masm->isolate()->factory()->undefined_value()));
-    __ cmp(eax, Operand(ebx));
+    __ cmp(eax, ebx);
     __ j(less, &fill);
   }
 
@@ -1573,7 +1597,7 @@
   __ bind(&invoke);
   // Restore function pointer.
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  __ call(Operand(edx));
+  __ call(edx);
 
   // Leave frame and return.
   LeaveArgumentsAdaptorFrame(masm);
@@ -1583,13 +1607,13 @@
   // Dont adapt arguments.
   // -------------------------------------------
   __ bind(&dont_adapt_arguments);
-  __ jmp(Operand(edx));
+  __ jmp(edx);
 }
 
 
 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
   CpuFeatures::TryForceFeatureScope scope(SSE2);
-  if (!CpuFeatures::IsSupported(SSE2)) {
+  if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) {
     __ Abort("Unreachable code: Cannot optimize without SSE2 support.");
     return;
   }
@@ -1616,15 +1640,16 @@
 
   // Pass the function to optimize as the argument to the on-stack
   // replacement runtime function.
-  __ EnterInternalFrame();
-  __ push(eax);
-  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(eax);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
   Label skip;
-  __ cmp(Operand(eax), Immediate(Smi::FromInt(-1)));
+  __ cmp(eax, Immediate(Smi::FromInt(-1)));
   __ j(not_equal, &skip, Label::kNear);
   __ ret(0);
 
@@ -1638,7 +1663,9 @@
   __ j(above_equal, &ok, Label::kNear);
   StackCheckStub stub;
   __ TailCallStub(&stub);
-  __ Abort("Unreachable code: returned from tail call.");
+  if (FLAG_debug_code) {
+    __ Abort("Unreachable code: returned from tail call.");
+  }
   __ bind(&ok);
   __ ret(0);
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 1009aaf..76089dc 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -49,7 +49,7 @@
   __ bind(&check_heap_number);
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   Factory* factory = masm->isolate()->factory();
-  __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+  __ cmp(ebx, Immediate(factory->heap_number_map()));
   __ j(not_equal, &call_builtin, Label::kNear);
   __ ret(0);
 
@@ -150,7 +150,7 @@
   }
 
   // Return and remove the on-stack parameter.
-  __ mov(esi, Operand(eax));
+  __ mov(esi, eax);
   __ ret(1 * kPointerSize);
 
   // Need to collect. Call into runtime system.
@@ -239,6 +239,8 @@
 // The stub expects its argument on the stack and returns its result in tos_:
 // zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   Label patch;
   Factory* factory = masm->isolate()->factory();
   const Register argument = eax;
@@ -336,6 +338,41 @@
 }
 
 
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ pushad();
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      __ movdbl(Operand(esp, i * kDoubleSize), reg);
+    }
+  }
+  const int argument_count = 1;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, ecx);
+  __ mov(Operand(esp, 0 * kPointerSize),
+         Immediate(ExternalReference::isolate_address()));
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  if (save_doubles_ == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      __ movdbl(reg, Operand(esp, i * kDoubleSize));
+    }
+    __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+  }
+  __ popad();
+  __ ret(0);
+}
+
+
 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
                                  Type type,
                                  Heap::RootListIndex value,
@@ -470,27 +507,27 @@
     // Check whether the exponent is too big for a 64 bit signed integer.
     static const uint32_t kTooBigExponent =
         (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
-    __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+    __ cmp(scratch2, Immediate(kTooBigExponent));
     __ j(greater_equal, conversion_failure);
     // Load x87 register with heap number.
     __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
     // Reserve space for 64 bit answer.
-    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
+    __ sub(esp, Immediate(sizeof(uint64_t)));  // Nolint.
     // Do conversion, which cannot fail because we checked the exponent.
     __ fisttp_d(Operand(esp, 0));
     __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
-    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
+    __ add(esp, Immediate(sizeof(uint64_t)));  // Nolint.
   } else {
     // Load ecx with zero.  We use this either for the final shift or
     // for the answer.
-    __ xor_(ecx, Operand(ecx));
+    __ xor_(ecx, ecx);
     // Check whether the exponent matches a 32 bit signed int that cannot be
     // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
     // exponent is 30 (biased).  This is the exponent that we are fastest at and
     // also the highest exponent we can handle here.
     const uint32_t non_smi_exponent =
         (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+    __ cmp(scratch2, Immediate(non_smi_exponent));
     // If we have a match of the int32-but-not-Smi exponent then skip some
     // logic.
     __ j(equal, &right_exponent, Label::kNear);
@@ -503,7 +540,7 @@
       // >>> operator has a tendency to generate numbers with an exponent of 31.
       const uint32_t big_non_smi_exponent =
           (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
-      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+      __ cmp(scratch2, Immediate(big_non_smi_exponent));
       __ j(not_equal, conversion_failure);
       // We have the big exponent, typically from >>>.  This means the number is
       // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
@@ -522,9 +559,9 @@
       // Shift down 21 bits to get the most significant 11 bits or the low
       // mantissa word.
       __ shr(ecx, 32 - big_shift_distance);
-      __ or_(ecx, Operand(scratch2));
+      __ or_(ecx, scratch2);
       // We have the answer in ecx, but we may need to negate it.
-      __ test(scratch, Operand(scratch));
+      __ test(scratch, scratch);
       __ j(positive, &done, Label::kNear);
       __ neg(ecx);
       __ jmp(&done, Label::kNear);
@@ -538,14 +575,14 @@
     // it rounds to zero.
     const uint32_t zero_exponent =
         (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
-    __ sub(Operand(scratch2), Immediate(zero_exponent));
+    __ sub(scratch2, Immediate(zero_exponent));
     // ecx already has a Smi zero.
     __ j(less, &done, Label::kNear);
 
     // We have a shifted exponent between 0 and 30 in scratch2.
     __ shr(scratch2, HeapNumber::kExponentShift);
     __ mov(ecx, Immediate(30));
-    __ sub(ecx, Operand(scratch2));
+    __ sub(ecx, scratch2);
 
     __ bind(&right_exponent);
     // Here ecx is the shift, scratch is the exponent word.
@@ -565,19 +602,19 @@
     // Shift down 22 bits to get the most significant 10 bits or the low
     // mantissa word.
     __ shr(scratch2, 32 - shift_distance);
-    __ or_(scratch2, Operand(scratch));
+    __ or_(scratch2, scratch);
     // Move down according to the exponent.
     __ shr_cl(scratch2);
     // Now the unsigned answer is in scratch2.  We need to move it to ecx and
     // we may need to fix the sign.
     Label negative;
-    __ xor_(ecx, Operand(ecx));
+    __ xor_(ecx, ecx);
     __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
     __ j(greater, &negative, Label::kNear);
     __ mov(ecx, scratch2);
     __ jmp(&done, Label::kNear);
     __ bind(&negative);
-    __ sub(ecx, Operand(scratch2));
+    __ sub(ecx, scratch2);
     __ bind(&done);
   }
 }
@@ -679,13 +716,13 @@
   __ JumpIfNotSmi(eax, non_smi, non_smi_near);
 
   // We can't handle -0 with smis, so use a type transition for that case.
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(zero, slow, slow_near);
 
   // Try optimistic subtraction '0 - value', saving operand in eax for undo.
-  __ mov(edx, Operand(eax));
+  __ mov(edx, eax);
   __ Set(eax, Immediate(0));
-  __ sub(eax, Operand(edx));
+  __ sub(eax, edx);
   __ j(overflow, undo, undo_near);
   __ ret(0);
 }
@@ -706,7 +743,7 @@
 
 
 void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
-  __ mov(eax, Operand(edx));
+  __ mov(eax, edx);
 }
 
 
@@ -760,7 +797,7 @@
     __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
             Immediate(HeapNumber::kSignMask));  // Flip sign.
   } else {
-    __ mov(edx, Operand(eax));
+    __ mov(edx, eax);
     // edx: operand
 
     Label slow_allocate_heapnumber, heapnumber_allocated;
@@ -768,11 +805,12 @@
     __ jmp(&heapnumber_allocated, Label::kNear);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(edx);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ pop(edx);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(edx);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ pop(edx);
+    }
 
     __ bind(&heapnumber_allocated);
     // eax: allocated 'empty' number
@@ -815,15 +853,16 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    // Push the original HeapNumber on the stack. The integer value can't
-    // be stored since it's untagged and not in the smi range (so we can't
-    // smi-tag it). We'll recalculate the value after the GC instead.
-    __ push(ebx);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    // New HeapNumber is in eax.
-    __ pop(edx);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Push the original HeapNumber on the stack. The integer value can't
+      // be stored since it's untagged and not in the smi range (so we can't
+      // smi-tag it). We'll recalculate the value after the GC instead.
+      __ push(ebx);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      // New HeapNumber is in eax.
+      __ pop(edx);
+    }
     // IntegerConvert uses ebx and edi as scratch registers.
     // This conversion won't go slow-case.
     IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
@@ -833,7 +872,7 @@
   }
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope use_sse2(SSE2);
-    __ cvtsi2sd(xmm0, Operand(ecx));
+    __ cvtsi2sd(xmm0, ecx);
     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
   } else {
     __ push(ecx);
@@ -947,6 +986,10 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
+
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -1022,7 +1065,7 @@
       // eax in case the result is not a smi.
       ASSERT(!left.is(ecx) && !right.is(ecx));
       __ mov(ecx, right);
-      __ or_(right, Operand(left));  // Bitwise or is commutative.
+      __ or_(right, left);  // Bitwise or is commutative.
       combined = right;
       break;
 
@@ -1034,7 +1077,7 @@
     case Token::DIV:
     case Token::MOD:
       __ mov(combined, right);
-      __ or_(combined, Operand(left));
+      __ or_(combined, left);
       break;
 
     case Token::SHL:
@@ -1044,7 +1087,7 @@
       // for the smi check register.
       ASSERT(!left.is(ecx) && !right.is(ecx));
       __ mov(ecx, right);
-      __ or_(right, Operand(left));
+      __ or_(right, left);
       combined = right;
       break;
 
@@ -1067,12 +1110,12 @@
 
     case Token::BIT_XOR:
       ASSERT(right.is(eax));
-      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
+      __ xor_(right, left);  // Bitwise xor is commutative.
       break;
 
     case Token::BIT_AND:
       ASSERT(right.is(eax));
-      __ and_(right, Operand(left));  // Bitwise and is commutative.
+      __ and_(right, left);  // Bitwise and is commutative.
       break;
 
     case Token::SHL:
@@ -1121,12 +1164,12 @@
 
     case Token::ADD:
       ASSERT(right.is(eax));
-      __ add(right, Operand(left));  // Addition is commutative.
+      __ add(right, left);  // Addition is commutative.
       __ j(overflow, &use_fp_on_smis);
       break;
 
     case Token::SUB:
-      __ sub(left, Operand(right));
+      __ sub(left, right);
       __ j(overflow, &use_fp_on_smis);
       __ mov(eax, left);
       break;
@@ -1140,7 +1183,7 @@
       // Remove tag from one of the operands (but keep sign).
       __ SmiUntag(right);
       // Do multiplication.
-      __ imul(right, Operand(left));  // Multiplication is commutative.
+      __ imul(right, left);  // Multiplication is commutative.
       __ j(overflow, &use_fp_on_smis);
       // Check for negative zero result.  Use combined = left | right.
       __ NegativeZeroTest(right, combined, &use_fp_on_smis);
@@ -1151,7 +1194,7 @@
       // save the left operand.
       __ mov(edi, left);
       // Check for 0 divisor.
-      __ test(right, Operand(right));
+      __ test(right, right);
       __ j(zero, &use_fp_on_smis);
       // Sign extend left into edx:eax.
       ASSERT(left.is(eax));
@@ -1167,7 +1210,7 @@
       // Check for negative zero result.  Use combined = left | right.
       __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
       // Check that the remainder is zero.
-      __ test(edx, Operand(edx));
+      __ test(edx, edx);
       __ j(not_zero, &use_fp_on_smis);
       // Tag the result and store it in register eax.
       __ SmiTag(eax);
@@ -1175,7 +1218,7 @@
 
     case Token::MOD:
       // Check for 0 divisor.
-      __ test(right, Operand(right));
+      __ test(right, right);
       __ j(zero, &not_smis);
 
       // Sign extend left into edx:eax.
@@ -1226,11 +1269,11 @@
         break;
       case Token::ADD:
         // Revert right = right + left.
-        __ sub(right, Operand(left));
+        __ sub(right, left);
         break;
       case Token::SUB:
         // Revert left = left - right.
-        __ add(left, Operand(right));
+        __ add(left, right);
         break;
       case Token::MUL:
         // Right was clobbered but a copy is in ebx.
@@ -1268,7 +1311,7 @@
           ASSERT_EQ(Token::SHL, op_);
           if (CpuFeatures::IsSupported(SSE2)) {
             CpuFeatures::Scope use_sse2(SSE2);
-            __ cvtsi2sd(xmm0, Operand(left));
+            __ cvtsi2sd(xmm0, left);
             __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
           } else {
             __ mov(Operand(esp, 1 * kPointerSize), left);
@@ -1290,11 +1333,11 @@
         switch (op_) {
           case Token::ADD:
             // Revert right = right + left.
-            __ sub(right, Operand(left));
+            __ sub(right, left);
             break;
           case Token::SUB:
             // Revert left = left - right.
-            __ add(left, Operand(right));
+            __ add(left, right);
             break;
           case Token::MUL:
             // Right was clobbered but a copy is in ebx.
@@ -1486,7 +1529,7 @@
         // Check result type if it is currently Int32.
         if (result_type_ <= BinaryOpIC::INT32) {
           __ cvttsd2si(ecx, Operand(xmm0));
-          __ cvtsi2sd(xmm2, Operand(ecx));
+          __ cvtsi2sd(xmm2, ecx);
           __ ucomisd(xmm0, xmm2);
           __ j(not_zero, &not_int32);
           __ j(carry, &not_int32);
@@ -1548,9 +1591,9 @@
       FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
                                                         &not_int32);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::BIT_OR:  __ or_(eax, ecx); break;
+        case Token::BIT_AND: __ and_(eax, ecx); break;
+        case Token::BIT_XOR: __ xor_(eax, ecx); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -1574,7 +1617,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
+        __ mov(ebx, eax);  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -1594,7 +1637,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1675,7 +1718,7 @@
   __ cmp(edx, factory->undefined_value());
   __ j(not_equal, &check, Label::kNear);
   if (Token::IsBitOp(op_)) {
-    __ xor_(edx, Operand(edx));
+    __ xor_(edx, edx);
   } else {
     __ mov(edx, Immediate(factory->nan_value()));
   }
@@ -1684,7 +1727,7 @@
   __ cmp(eax, factory->undefined_value());
   __ j(not_equal, &done, Label::kNear);
   if (Token::IsBitOp(op_)) {
-    __ xor_(eax, Operand(eax));
+    __ xor_(eax, eax);
   } else {
     __ mov(eax, Immediate(factory->nan_value()));
   }
@@ -1762,9 +1805,9 @@
                                                   use_sse3_,
                                                   &not_floats);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::BIT_OR:  __ or_(eax, ecx); break;
+        case Token::BIT_AND: __ and_(eax, ecx); break;
+        case Token::BIT_XOR: __ xor_(eax, ecx); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -1788,7 +1831,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
+        __ mov(ebx, eax);  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -1808,7 +1851,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -1961,9 +2004,9 @@
                                                   use_sse3_,
                                                   &call_runtime);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
-        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
-        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+        case Token::BIT_OR:  __ or_(eax, ecx); break;
+        case Token::BIT_AND: __ and_(eax, ecx); break;
+        case Token::BIT_XOR: __ xor_(eax, ecx); break;
         case Token::SAR: __ sar_cl(eax); break;
         case Token::SHL: __ shl_cl(eax); break;
         case Token::SHR: __ shr_cl(eax); break;
@@ -1987,7 +2030,7 @@
       if (op_ != Token::SHR) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
-        __ mov(ebx, Operand(eax));  // ebx: result
+        __ mov(ebx, eax);  // ebx: result
         Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
@@ -2007,7 +2050,7 @@
         // Store the result in the HeapNumber and return.
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
-          __ cvtsi2sd(xmm0, Operand(ebx));
+          __ cvtsi2sd(xmm0, ebx);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         } else {
           __ mov(Operand(esp, 1 * kPointerSize), ebx);
@@ -2117,10 +2160,10 @@
       __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
       // Now edx can be overwritten losing one of the arguments as we are
       // now done and will not need it any more.
-      __ mov(edx, Operand(ebx));
+      __ mov(edx, ebx);
       __ bind(&skip_allocation);
       // Use object in edx as a result holder
-      __ mov(eax, Operand(edx));
+      __ mov(eax, edx);
       break;
     }
     case OVERWRITE_RIGHT:
@@ -2178,7 +2221,7 @@
     // Then load the low and high words of the double into ebx, edx.
     STATIC_ASSERT(kSmiTagSize == 1);
     __ sar(eax, 1);
-    __ sub(Operand(esp), Immediate(2 * kPointerSize));
+    __ sub(esp, Immediate(2 * kPointerSize));
     __ mov(Operand(esp, 0), eax);
     __ fild_s(Operand(esp, 0));
     __ fst_d(Operand(esp, 0));
@@ -2189,7 +2232,7 @@
     // Check if input is a HeapNumber.
     __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
     Factory* factory = masm->isolate()->factory();
-    __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
+    __ cmp(ebx, Immediate(factory->heap_number_map()));
     __ j(not_equal, &runtime_call);
     // Input is a HeapNumber. Push it on the FPU stack and load its
     // low and high words into ebx, edx.
@@ -2201,12 +2244,12 @@
   } else {  // UNTAGGED.
     if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope sse4_scope(SSE4_1);
-      __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
+      __ pextrd(edx, xmm1, 0x1);  // copy xmm1[63..32] to edx.
     } else {
       __ pshufd(xmm0, xmm1, 0x1);
-      __ movd(Operand(edx), xmm0);
+      __ movd(edx, xmm0);
     }
-    __ movd(Operand(ebx), xmm1);
+    __ movd(ebx, xmm1);
   }
 
   // ST[0] or xmm1  == double value
@@ -2215,15 +2258,15 @@
   // Compute hash (the shifts are arithmetic):
   //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
   __ mov(ecx, ebx);
-  __ xor_(ecx, Operand(edx));
+  __ xor_(ecx, edx);
   __ mov(eax, ecx);
   __ sar(eax, 16);
-  __ xor_(ecx, Operand(eax));
+  __ xor_(ecx, eax);
   __ mov(eax, ecx);
   __ sar(eax, 8);
-  __ xor_(ecx, Operand(eax));
+  __ xor_(ecx, eax);
   ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
-  __ and_(Operand(ecx),
+  __ and_(ecx,
           Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
 
   // ST[0] or xmm1 == double value.
@@ -2238,7 +2281,7 @@
   __ mov(eax, Operand(eax, cache_array_index));
   // Eax points to the cache for the type type_.
   // If NULL, the cache hasn't been initialized yet, so go through runtime.
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(zero, &runtime_call_clear_stack);
 #ifdef DEBUG
   // Check that the layout of cache elements match expectations.
@@ -2281,10 +2324,10 @@
     __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
   } else {  // UNTAGGED.
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
-    __ sub(Operand(esp), Immediate(kDoubleSize));
+    __ sub(esp, Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
-    __ add(Operand(esp), Immediate(kDoubleSize));
+    __ add(esp, Immediate(kDoubleSize));
   }
   GenerateOperation(masm);
   __ mov(Operand(ecx, 0), ebx);
@@ -2299,20 +2342,21 @@
 
     // Skip cache and return answer directly, only in untagged case.
     __ bind(&skip_cache);
-    __ sub(Operand(esp), Immediate(kDoubleSize));
+    __ sub(esp, Immediate(kDoubleSize));
     __ movdbl(Operand(esp, 0), xmm1);
     __ fld_d(Operand(esp, 0));
     GenerateOperation(masm);
     __ fstp_d(Operand(esp, 0));
     __ movdbl(xmm1, Operand(esp, 0));
-    __ add(Operand(esp), Immediate(kDoubleSize));
+    __ add(esp, Immediate(kDoubleSize));
     // We return the value in xmm1 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
-    // Allocate an unused object bigger than a HeapNumber.
-    __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Allocate an unused object bigger than a HeapNumber.
+      __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 
@@ -2329,10 +2373,11 @@
     __ bind(&runtime_call);
     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
-    __ EnterInternalFrame();
-    __ push(eax);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(eax);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -2364,13 +2409,13 @@
     // If argument is outside the range -2^63..2^63, fsin/cos doesn't
     // work. We must reduce it to the appropriate range.
     __ mov(edi, edx);
-    __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
+    __ and_(edi, Immediate(0x7ff00000));  // Exponent only.
     int supported_exponent_limit =
         (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
-    __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+    __ cmp(edi, Immediate(supported_exponent_limit));
     __ j(below, &in_range, Label::kNear);
     // Check for infinity and NaN. Both return NaN for sin.
-    __ cmp(Operand(edi), Immediate(0x7ff00000));
+    __ cmp(edi, Immediate(0x7ff00000));
     Label non_nan_result;
     __ j(not_equal, &non_nan_result, Label::kNear);
     // Input is +/-Infinity or NaN. Result is NaN.
@@ -2379,7 +2424,7 @@
     __ push(Immediate(0x7ff80000));
     __ push(Immediate(0));
     __ fld_d(Operand(esp, 0));
-    __ add(Operand(esp), Immediate(2 * kPointerSize));
+    __ add(esp, Immediate(2 * kPointerSize));
     __ jmp(&done, Label::kNear);
 
     __ bind(&non_nan_result);
@@ -2395,7 +2440,7 @@
       __ fwait();
       __ fnstsw_ax();
       // Clear if Illegal Operand or Zero Division exceptions are set.
-      __ test(Operand(eax), Immediate(5));
+      __ test(eax, Immediate(5));
       __ j(zero, &no_exceptions, Label::kNear);
       __ fnclex();
       __ bind(&no_exceptions);
@@ -2408,7 +2453,7 @@
       __ fprem1();
       __ fwait();
       __ fnstsw_ax();
-      __ test(Operand(eax), Immediate(0x400 /* C2 */));
+      __ test(eax, Immediate(0x400 /* C2 */));
       // If C2 is set, computation only has partial result. Loop to
       // continue computation.
       __ j(not_zero, &partial_remainder_loop);
@@ -2541,13 +2586,13 @@
 
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm0, Operand(edx));
+  __ cvtsi2sd(xmm0, edx);
   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
   __ jmp(&load_eax);
 
   __ bind(&load_smi_eax);
   __ SmiUntag(eax);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm1, Operand(eax));
+  __ cvtsi2sd(xmm1, eax);
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
 
   __ bind(&done);
@@ -2571,12 +2616,12 @@
   __ jmp(not_numbers);  // Argument in eax is not a number.
   __ bind(&load_smi_edx);
   __ SmiUntag(edx);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm0, Operand(edx));
+  __ cvtsi2sd(xmm0, edx);
   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
   __ jmp(&load_eax);
   __ bind(&load_smi_eax);
   __ SmiUntag(eax);  // Untag smi before converting to float.
-  __ cvtsi2sd(xmm1, Operand(eax));
+  __ cvtsi2sd(xmm1, eax);
   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
   __ jmp(&done, Label::kNear);
   __ bind(&load_float_eax);
@@ -2592,11 +2637,11 @@
   __ mov(scratch, left);
   ASSERT(!scratch.is(right));  // We're about to clobber scratch.
   __ SmiUntag(scratch);
-  __ cvtsi2sd(xmm0, Operand(scratch));
+  __ cvtsi2sd(xmm0, scratch);
 
   __ mov(scratch, right);
   __ SmiUntag(scratch);
-  __ cvtsi2sd(xmm1, Operand(scratch));
+  __ cvtsi2sd(xmm1, scratch);
 }
 
 
@@ -2604,12 +2649,12 @@
                                                     Label* non_int32,
                                                     Register scratch) {
   __ cvttsd2si(scratch, Operand(xmm0));
-  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ cvtsi2sd(xmm2, scratch);
   __ ucomisd(xmm0, xmm2);
   __ j(not_zero, non_int32);
   __ j(carry, non_int32);
   __ cvttsd2si(scratch, Operand(xmm1));
-  __ cvtsi2sd(xmm2, Operand(scratch));
+  __ cvtsi2sd(xmm2, scratch);
   __ ucomisd(xmm1, xmm2);
   __ j(not_zero, non_int32);
   __ j(carry, non_int32);
@@ -2717,7 +2762,7 @@
 
   // Save 1 in xmm3 - we need this several times later on.
   __ mov(ecx, Immediate(1));
-  __ cvtsi2sd(xmm3, Operand(ecx));
+  __ cvtsi2sd(xmm3, ecx);
 
   Label exponent_nonsmi;
   Label base_nonsmi;
@@ -2728,7 +2773,7 @@
   // Optimized version when both exponent and base are smis.
   Label powi;
   __ SmiUntag(edx);
-  __ cvtsi2sd(xmm0, Operand(edx));
+  __ cvtsi2sd(xmm0, edx);
   __ jmp(&powi);
   // exponent is smi and base is a heapnumber.
   __ bind(&base_nonsmi);
@@ -2770,11 +2815,11 @@
 
   // base has the original value of the exponent - if the exponent  is
   // negative return 1/result.
-  __ test(edx, Operand(edx));
+  __ test(edx, edx);
   __ j(positive, &allocate_return);
   // Special case if xmm1 has reached infinity.
   __ mov(ecx, Immediate(0x7FB00000));
-  __ movd(xmm0, Operand(ecx));
+  __ movd(xmm0, ecx);
   __ cvtss2sd(xmm0, xmm0);
   __ ucomisd(xmm0, xmm1);
   __ j(equal, &call_runtime);
@@ -2797,7 +2842,7 @@
   Label handle_special_cases;
   __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
   __ SmiUntag(edx);
-  __ cvtsi2sd(xmm0, Operand(edx));
+  __ cvtsi2sd(xmm0, edx);
   __ jmp(&handle_special_cases, Label::kNear);
 
   __ bind(&base_not_smi);
@@ -2806,7 +2851,7 @@
   __ j(not_equal, &call_runtime);
   __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
   __ and_(ecx, HeapNumber::kExponentMask);
-  __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
+  __ cmp(ecx, Immediate(HeapNumber::kExponentMask));
   // base is NaN or +/-Infinity
   __ j(greater_equal, &call_runtime);
   __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
@@ -2817,7 +2862,7 @@
   // Test for -0.5.
   // Load xmm2 with -0.5.
   __ mov(ecx, Immediate(0xBF000000));
-  __ movd(xmm2, Operand(ecx));
+  __ movd(xmm2, ecx);
   __ cvtss2sd(xmm2, xmm2);
   // xmm2 now has -0.5.
   __ ucomisd(xmm2, xmm1);
@@ -2873,13 +2918,13 @@
   Label adaptor;
   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor, Label::kNear);
 
   // Check index against formal parameters count limit passed in
   // through register eax. Use unsigned comparison to get negative
   // check for free.
-  __ cmp(edx, Operand(eax));
+  __ cmp(edx, eax);
   __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
@@ -2895,7 +2940,7 @@
   // comparison to get negative check for free.
   __ bind(&adaptor);
   __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ cmp(edx, Operand(ecx));
+  __ cmp(edx, ecx);
   __ j(above_equal, &slow, Label::kNear);
 
   // Read the argument from the stack and return it.
@@ -2926,7 +2971,7 @@
   Label runtime;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &runtime, Label::kNear);
 
   // Patch the arguments.length and the parameters pointer.
@@ -2957,7 +3002,7 @@
   Label adaptor_frame, try_allocate;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // No adaptor, parameter count = argument count.
@@ -2976,7 +3021,7 @@
   // esp[4] = parameter count (tagged)
   // esp[8] = address of receiver argument
   // Compute the mapped parameter count = min(ebx, ecx) in ebx.
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(less_equal, &try_allocate, Label::kNear);
   __ mov(ebx, ecx);
 
@@ -2990,7 +3035,7 @@
   const int kParameterMapHeaderSize =
       FixedArray::kHeaderSize + 2 * kPointerSize;
   Label no_parameter_map;
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(zero, &no_parameter_map, Label::kNear);
   __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
   __ bind(&no_parameter_map);
@@ -2999,7 +3044,7 @@
   __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
 
   // 3. Arguments object.
-  __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
+  __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
 
   // Do the allocation of all three objects in one go.
   __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
@@ -3014,7 +3059,7 @@
   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
   __ mov(ebx, Operand(esp, 0 * kPointerSize));
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(not_zero, &has_mapped_parameters, Label::kNear);
   __ mov(edi, Operand(edi,
          Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
@@ -3069,7 +3114,7 @@
 
   // Initialize parameter map. If there are no mapped arguments, we're done.
   Label skip_parameter_map;
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(zero, &skip_parameter_map);
 
   __ mov(FieldOperand(edi, FixedArray::kMapOffset),
@@ -3093,7 +3138,7 @@
   __ mov(eax, Operand(esp, 2 * kPointerSize));
   __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
   __ add(ebx, Operand(esp, 4 * kPointerSize));
-  __ sub(ebx, Operand(eax));
+  __ sub(ebx, eax);
   __ mov(ecx, FACTORY->the_hole_value());
   __ mov(edx, edi);
   __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
@@ -3110,12 +3155,12 @@
   __ jmp(&parameters_test, Label::kNear);
 
   __ bind(&parameters_loop);
-  __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+  __ sub(eax, Immediate(Smi::FromInt(1)));
   __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
   __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
-  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+  __ add(ebx, Immediate(Smi::FromInt(1)));
   __ bind(&parameters_test);
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(not_zero, &parameters_loop, Label::kNear);
   __ pop(ecx);
 
@@ -3135,18 +3180,18 @@
   Label arguments_loop, arguments_test;
   __ mov(ebx, Operand(esp, 1 * kPointerSize));
   __ mov(edx, Operand(esp, 4 * kPointerSize));
-  __ sub(Operand(edx), ebx);  // Is there a smarter way to do negative scaling?
-  __ sub(Operand(edx), ebx);
+  __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
+  __ sub(edx, ebx);
   __ jmp(&arguments_test, Label::kNear);
 
   __ bind(&arguments_loop);
-  __ sub(Operand(edx), Immediate(kPointerSize));
+  __ sub(edx, Immediate(kPointerSize));
   __ mov(eax, Operand(edx, 0));
   __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
-  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
+  __ add(ebx, Immediate(Smi::FromInt(1)));
 
   __ bind(&arguments_test);
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(less, &arguments_loop, Label::kNear);
 
   // Restore.
@@ -3174,7 +3219,7 @@
   Label adaptor_frame, try_allocate, runtime;
   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(equal, &adaptor_frame, Label::kNear);
 
   // Get the length from the frame.
@@ -3193,11 +3238,11 @@
   // the arguments object and the elements array.
   Label add_arguments_object;
   __ bind(&try_allocate);
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(zero, &add_arguments_object, Label::kNear);
   __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
   __ bind(&add_arguments_object);
-  __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
+  __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
 
   // Do the allocation of both objects in one go.
   __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
@@ -3224,7 +3269,7 @@
 
   // If there are no actual arguments, we're done.
   Label done;
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(zero, &done, Label::kNear);
 
   // Get the parameters pointer from the stack.
@@ -3246,8 +3291,8 @@
   __ bind(&loop);
   __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
   __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
-  __ add(Operand(edi), Immediate(kPointerSize));
-  __ sub(Operand(edx), Immediate(kPointerSize));
+  __ add(edi, Immediate(kPointerSize));
+  __ sub(edx, Immediate(kPointerSize));
   __ dec(ecx);
   __ j(not_zero, &loop);
 
@@ -3294,7 +3339,7 @@
   ExternalReference address_of_regexp_stack_memory_size =
       ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
   __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(zero, &runtime);
 
   // Check that the first argument is a JSRegExp object.
@@ -3315,7 +3360,7 @@
   // ecx: RegExp data (FixedArray)
   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
   __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+  __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
   __ j(not_equal, &runtime);
 
   // ecx: RegExp data (FixedArray)
@@ -3325,7 +3370,7 @@
   // uses the asumption that smis are 2 * their untagged value.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(Operand(edx), Immediate(2));  // edx was a smi.
+  __ add(edx, Immediate(2));  // edx was a smi.
   // Check that the static offsets vector buffer is large enough.
   __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
   __ j(above, &runtime);
@@ -3347,7 +3392,7 @@
   // string length. A negative value will be greater (unsigned comparison).
   __ mov(eax, Operand(esp, kPreviousIndexOffset));
   __ JumpIfNotSmi(eax, &runtime);
-  __ cmp(eax, Operand(ebx));
+  __ cmp(eax, ebx);
   __ j(above_equal, &runtime);
 
   // ecx: RegExp data (FixedArray)
@@ -3367,8 +3412,8 @@
   // additional information.
   __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
   __ SmiUntag(eax);
-  __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
-  __ cmp(edx, Operand(eax));
+  __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
+  __ cmp(edx, eax);
   __ j(greater, &runtime);
 
   // Reset offset for possibly sliced string.
@@ -3385,8 +3430,7 @@
   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string, Label::kNear);
   // Any other flat string must be a flat ascii string.
-  __ and_(Operand(ebx),
-          Immediate(kIsNotStringMask | kStringRepresentationMask));
+  __ and_(ebx, Immediate(kIsNotStringMask | kStringRepresentationMask));
   __ j(zero, &seq_ascii_string, Label::kNear);
 
   // Check for flat cons string or sliced string.
@@ -3398,7 +3442,7 @@
   Label cons_string, check_encoding;
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ cmp(Operand(ebx), Immediate(kExternalStringTag));
+  __ cmp(ebx, Immediate(kExternalStringTag));
   __ j(less, &cons_string);
   __ j(equal, &runtime);
 
@@ -3504,14 +3548,14 @@
   // Prepare start and end index of the input.
   // Load the length from the original sliced string if that is the case.
   __ mov(esi, FieldOperand(esi, String::kLengthOffset));
-  __ add(esi, Operand(edi));  // Calculate input end wrt offset.
+  __ add(esi, edi);  // Calculate input end wrt offset.
   __ SmiUntag(edi);
-  __ add(ebx, Operand(edi));  // Calculate input start wrt offset.
+  __ add(ebx, edi);  // Calculate input start wrt offset.
 
   // ebx: start index of the input string
   // esi: end index of the input string
   Label setup_two_byte, setup_rest;
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(zero, &setup_two_byte, Label::kNear);
   __ SmiUntag(esi);
   __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
@@ -3531,8 +3575,8 @@
   __ bind(&setup_rest);
 
   // Locate the code entry and call it.
-  __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
-  __ call(Operand(edx));
+  __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ call(edx);
 
   // Drop arguments and come back to JS mode.
   __ LeaveApiExitFrame();
@@ -3553,11 +3597,9 @@
   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
                                       masm->isolate());
-  __ mov(edx,
-         Operand::StaticVariable(ExternalReference::the_hole_value_location(
-             masm->isolate())));
+  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
   __ mov(eax, Operand::StaticVariable(pending_exception));
-  __ cmp(edx, Operand(eax));
+  __ cmp(edx, eax);
   __ j(equal, &runtime);
   // For exception, throw the exception again.
 
@@ -3578,7 +3620,7 @@
 
   __ bind(&failure);
   // For failure to match, return null.
-  __ mov(Operand(eax), factory->null_value());
+  __ mov(eax, factory->null_value());
   __ ret(4 * kPointerSize);
 
   // Load RegExp data.
@@ -3589,7 +3631,7 @@
   // Calculate number of capture registers (number_of_captures + 1) * 2.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(Operand(edx), Immediate(2));  // edx was a smi.
+  __ add(edx, Immediate(2));  // edx was a smi.
 
   // edx: Number of capture registers
   // Load last_match_info which is still known to be a fast case JSArray.
@@ -3605,12 +3647,18 @@
   // Store last subject and last input.
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
-  __ mov(ecx, ebx);
-  __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
+  __ RecordWriteField(ebx,
+                      RegExpImpl::kLastSubjectOffset,
+                      eax,
+                      edi,
+                      kDontSaveFPRegs);
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
-  __ mov(ecx, ebx);
-  __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
+  __ RecordWriteField(ebx,
+                      RegExpImpl::kLastInputOffset,
+                      eax,
+                      edi,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -3624,7 +3672,7 @@
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
-  __ sub(Operand(edx), Immediate(1));
+  __ sub(edx, Immediate(1));
   __ j(negative, &done, Label::kNear);
   // Read the value from the static offsets vector buffer.
   __ mov(edi, Operand(ecx, edx, times_int_size, 0));
@@ -3655,7 +3703,7 @@
   Label done;
   __ mov(ebx, Operand(esp, kPointerSize * 3));
   __ JumpIfNotSmi(ebx, &slowcase);
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
+  __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
   __ j(above, &slowcase);
   // Smi-tagging is equivalent to multiplying by 2.
   STATIC_ASSERT(kSmiTag == 0);
@@ -3715,10 +3763,10 @@
   // ebx: Start of elements in FixedArray.
   // edx: the hole.
   Label loop;
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ bind(&loop);
   __ j(less_equal, &done, Label::kNear);  // Jump if ecx is negative or zero.
-  __ sub(Operand(ecx), Immediate(1));
+  __ sub(ecx, Immediate(1));
   __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
   __ jmp(&loop);
 
@@ -3752,7 +3800,7 @@
   // contains two elements (number and string) for each cache entry.
   __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
   __ shr(mask, kSmiTagSize + 1);  // Untag length and divide it by two.
-  __ sub(Operand(mask), Immediate(1));  // Make mask.
+  __ sub(mask, Immediate(1));  // Make mask.
 
   // Calculate the entry in the number string cache. The hash value in the
   // number string cache for smis is just the smi value, and the hash for
@@ -3778,7 +3826,7 @@
     __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
     __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
     // Object is heap number and hash is now in scratch. Calculate cache index.
-    __ and_(scratch, Operand(mask));
+    __ and_(scratch, mask);
     Register index = scratch;
     Register probe = mask;
     __ mov(probe,
@@ -3804,7 +3852,7 @@
 
   __ bind(&smi_hash_calculated);
   // Object is smi and hash is now in scratch. Calculate cache index.
-  __ and_(scratch, Operand(mask));
+  __ and_(scratch, mask);
   Register index = scratch;
   // Check if the entry is the smi we are looking for.
   __ cmp(object,
@@ -3856,10 +3904,10 @@
   // Compare two smis if required.
   if (include_smi_compare_) {
     Label non_smi, smi_done;
-    __ mov(ecx, Operand(edx));
-    __ or_(ecx, Operand(eax));
+    __ mov(ecx, edx);
+    __ or_(ecx, eax);
     __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
-    __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
+    __ sub(edx, eax);  // Return on the result of the subtraction.
     __ j(no_overflow, &smi_done, Label::kNear);
     __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
     __ bind(&smi_done);
@@ -3867,8 +3915,8 @@
     __ ret(0);
     __ bind(&non_smi);
   } else if (FLAG_debug_code) {
-    __ mov(ecx, Operand(edx));
-    __ or_(ecx, Operand(eax));
+    __ mov(ecx, edx);
+    __ or_(ecx, eax);
     __ test(ecx, Immediate(kSmiTagMask));
     __ Assert(not_zero, "Unexpected smi operands.");
   }
@@ -3880,7 +3928,7 @@
   // for NaN and undefined.
   {
     Label not_identical;
-    __ cmp(eax, Operand(edx));
+    __ cmp(eax, edx);
     __ j(not_equal, &not_identical);
 
     if (cc_ != equal) {
@@ -3929,7 +3977,7 @@
       __ Set(eax, Immediate(0));
       // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
       // bits.
-      __ add(edx, Operand(edx));
+      __ add(edx, edx);
       __ cmp(edx, kQuietNaNHighBitsMask << 1);
       if (cc_ == equal) {
         STATIC_ASSERT(EQUAL != 1);
@@ -3963,19 +4011,19 @@
     STATIC_ASSERT(kSmiTag == 0);
     ASSERT_EQ(0, Smi::FromInt(0));
     __ mov(ecx, Immediate(kSmiTagMask));
-    __ and_(ecx, Operand(eax));
-    __ test(ecx, Operand(edx));
+    __ and_(ecx, eax);
+    __ test(ecx, edx);
     __ j(not_zero, &not_smis, Label::kNear);
     // One operand is a smi.
 
     // Check whether the non-smi is a heap number.
     STATIC_ASSERT(kSmiTagMask == 1);
     // ecx still holds eax & kSmiTag, which is either zero or one.
-    __ sub(Operand(ecx), Immediate(0x01));
+    __ sub(ecx, Immediate(0x01));
     __ mov(ebx, edx);
-    __ xor_(ebx, Operand(eax));
-    __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
-    __ xor_(ebx, Operand(eax));
+    __ xor_(ebx, eax);
+    __ and_(ebx, ecx);  // ebx holds either 0 or eax ^ edx.
+    __ xor_(ebx, eax);
     // if eax was smi, ebx is now edx, else eax.
 
     // Check if the non-smi operand is a heap number.
@@ -4037,9 +4085,9 @@
       // Return a result of -1, 0, or 1, based on EFLAGS.
       __ mov(eax, 0);  // equal
       __ mov(ecx, Immediate(Smi::FromInt(1)));
-      __ cmov(above, eax, Operand(ecx));
+      __ cmov(above, eax, ecx);
       __ mov(ecx, Immediate(Smi::FromInt(-1)));
-      __ cmov(below, eax, Operand(ecx));
+      __ cmov(below, eax, ecx);
       __ ret(0);
     } else {
       FloatingPointHelper::CheckFloatOperands(
@@ -4198,25 +4246,49 @@
 }
 
 
+void CallFunctionStub::FinishCode(Code* code) {
+  code->set_has_function_cache(RecordCallTarget());
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+  ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
+  // 1 ~ size of the test eax opcode.
+  Object* cell = Memory::Object_at(address + kPointerSize + 1);
+  // Low-level because clearing happens during GC.
+  reinterpret_cast<JSGlobalPropertyCell*>(cell)->set_value(
+      RawUninitializedSentinel(heap));
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+  ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
+  // 1 ~ size of the test eax opcode.
+  Object* cell = Memory::Object_at(address + kPointerSize + 1);
+  return JSGlobalPropertyCell::cast(cell)->value();
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
+  Isolate* isolate = masm->isolate();
   Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
   // indicated by passing the hole as the receiver to the call
   // function stub.
   if (ReceiverMightBeImplicit()) {
-    Label call;
+    Label receiver_ok;
     // Get the receiver from the stack.
     // +1 ~ return address
     __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
     // Call as function is indicated with the hole.
-    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
-    __ j(not_equal, &call, Label::kNear);
+    __ cmp(eax, isolate->factory()->the_hole_value());
+    __ j(not_equal, &receiver_ok, Label::kNear);
     // Patch the receiver on the stack with the global receiver object.
     __ mov(ebx, GlobalObjectOperand());
     __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
     __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
-    __ bind(&call);
+    __ bind(&receiver_ok);
   }
 
   // Get the function to call from the stack.
@@ -4229,12 +4301,53 @@
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
   __ j(not_equal, &slow);
 
+  if (RecordCallTarget()) {
+    // Cache the called function in a global property cell in the
+    // instruction stream after the call.  Cache states are uninitialized,
+    // monomorphic (indicated by a JSFunction), and megamorphic.
+    Label initialize, call;
+    // Load the cache cell address into ebx and the cache state into ecx.
+    __ mov(ebx, Operand(esp, 0));  // Return address.
+    __ mov(ebx, Operand(ebx, 1));  // 1 ~ sizeof 'test eax' opcode in bytes.
+    __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+
+    // A monomorphic cache hit or an already megamorphic state: invoke the
+    // function without changing the state.
+    __ cmp(ecx, edi);
+    __ j(equal, &call, Label::kNear);
+    __ cmp(ecx, Immediate(MegamorphicSentinel(isolate)));
+    __ j(equal, &call, Label::kNear);
+
+    // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+    // megamorphic.
+    __ cmp(ecx, Immediate(UninitializedSentinel(isolate)));
+    __ j(equal, &initialize, Label::kNear);
+    // MegamorphicSentinel is a root so no write-barrier is needed.
+    __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+           Immediate(MegamorphicSentinel(isolate)));
+    __ jmp(&call, Label::kNear);
+
+    // An uninitialized cache is patched with the function.
+    __ bind(&initialize);
+    __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
+    __ mov(ecx, edi);
+    __ RecordWriteField(ebx,
+                        JSGlobalPropertyCell::kValueOffset,
+                        ecx,
+                        edx,
+                        kDontSaveFPRegs,
+                        OMIT_REMEMBERED_SET,  // Cells are rescanned.
+                        OMIT_SMI_CHECK);
+
+    __ bind(&call);
+  }
+
   // Fast-case: Just invoke the function.
   ParameterCount actual(argc_);
 
   if (ReceiverMightBeImplicit()) {
     Label call_as_function;
-    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
+    __ cmp(eax, isolate->factory()->the_hole_value());
     __ j(equal, &call_as_function);
     __ InvokeFunction(edi,
                       actual,
@@ -4251,6 +4364,14 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  if (RecordCallTarget()) {
+    // If there is a call target cache, mark it megamorphic in the
+    // non-function case.
+    __ mov(ebx, Operand(esp, 0));
+    __ mov(ebx, Operand(ebx, 1));
+    __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+           Immediate(MegamorphicSentinel(isolate)));
+  }
   // Check for function proxy.
   __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
   __ j(not_equal, &non_function);
@@ -4262,8 +4383,7 @@
   __ SetCallKind(ecx, CALL_AS_FUNCTION);
   __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
   {
-    Handle<Code> adaptor =
-      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
     __ jmp(adaptor, RelocInfo::CODE_TARGET);
   }
 
@@ -4275,8 +4395,7 @@
   __ Set(ebx, Immediate(0));
   __ SetCallKind(ecx, CALL_AS_METHOD);
   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
-  Handle<Code> adaptor =
-      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+  Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
   __ jmp(adaptor, RelocInfo::CODE_TARGET);
 }
 
@@ -4286,6 +4405,35 @@
 }
 
 
+bool CEntryStub::IsPregenerated() {
+  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+          result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  // It is important that the store buffer overflow stubs are generated first.
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  Handle<Code> code = save_doubles.GetCode();
+  code->set_is_pregenerated(true);
+  code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  Handle<Code> code = stub.GetCode();
+  code->set_is_pregenerated(true);
+}
+
+
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   __ Throw(eax);
 }
@@ -4332,7 +4480,7 @@
   __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
   __ mov(Operand(esp, 2 * kPointerSize),
          Immediate(ExternalReference::isolate_address()));
-  __ call(Operand(ebx));
+  __ call(ebx);
   // Result is in eax or edx:eax - do not destroy these registers!
 
   if (always_allocate_scope) {
@@ -4364,8 +4512,7 @@
   // should have returned some failure value.
   if (FLAG_debug_code) {
     __ push(edx);
-    __ mov(edx, Operand::StaticVariable(
-        ExternalReference::the_hole_value_location(masm->isolate())));
+    __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
     Label okay;
     __ cmp(edx, Operand::StaticVariable(pending_exception_address));
     // Cannot use check here as it attempts to generate call into runtime.
@@ -4376,7 +4523,7 @@
   }
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame(save_doubles_);
+  __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
   __ ret(0);
 
   // Handling of failure.
@@ -4393,10 +4540,8 @@
   __ j(equal, throw_out_of_memory_exception);
 
   // Retrieve the pending exception and clear the variable.
-  ExternalReference the_hole_location =
-      ExternalReference::the_hole_value_location(masm->isolate());
   __ mov(eax, Operand::StaticVariable(pending_exception_address));
-  __ mov(edx, Operand::StaticVariable(the_hole_location));
+  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
   __ mov(Operand::StaticVariable(pending_exception_address), edx);
 
   // Special handling of termination exceptions which are uncatchable
@@ -4431,7 +4576,7 @@
   // a garbage collection and retrying the builtin (twice).
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(save_doubles_);
+  __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
 
   // eax: result parameter for PerformGC, if any (setup below)
   // ebx: pointer to builtin function  (C callee-saved)
@@ -4487,7 +4632,7 @@
 
   // Setup frame.
   __ push(ebp);
-  __ mov(ebp, Operand(esp));
+  __ mov(ebp, esp);
 
   // Push marker in two places.
   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
@@ -4531,9 +4676,7 @@
   __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
 
   // Clear any pending exceptions.
-  ExternalReference the_hole_location =
-      ExternalReference::the_hole_value_location(masm->isolate());
-  __ mov(edx, Operand::StaticVariable(the_hole_location));
+  __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
   __ mov(Operand::StaticVariable(pending_exception), edx);
 
   // Fake a receiver (NULL).
@@ -4555,7 +4698,7 @@
   }
   __ mov(edx, Operand(edx, 0));  // deref address
   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
-  __ call(Operand(edx));
+  __ call(edx);
 
   // Unlink this frame from the handler chain.
   __ PopTryHandler();
@@ -4563,8 +4706,7 @@
   __ bind(&exit);
   // Check if the current stack frame is marked as the outermost JS frame.
   __ pop(ebx);
-  __ cmp(Operand(ebx),
-         Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
   __ j(not_equal, &not_outermost_js_2);
   __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
   __ bind(&not_outermost_js_2);
@@ -4578,7 +4720,7 @@
   __ pop(ebx);
   __ pop(esi);
   __ pop(edi);
-  __ add(Operand(esp), Immediate(2 * kPointerSize));  // remove markers
+  __ add(esp, Immediate(2 * kPointerSize));  // remove markers
 
   // Restore frame pointer and return.
   __ pop(ebp);
@@ -4694,10 +4836,10 @@
   __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
   Label loop, is_instance, is_not_instance;
   __ bind(&loop);
-  __ cmp(scratch, Operand(prototype));
+  __ cmp(scratch, prototype);
   __ j(equal, &is_instance, Label::kNear);
   Factory* factory = masm->isolate()->factory();
-  __ cmp(Operand(scratch), Immediate(factory->null_value()));
+  __ cmp(scratch, Immediate(factory->null_value()));
   __ j(equal, &is_not_instance, Label::kNear);
   __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
   __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
@@ -4788,13 +4930,14 @@
     __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
     // Call the builtin and convert 0/1 to true/false.
-    __ EnterInternalFrame();
-    __ push(object);
-    __ push(function);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(object);
+      __ push(function);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
     Label true_value, done;
-    __ test(eax, Operand(eax));
+    __ test(eax, eax);
     __ j(zero, &true_value, Label::kNear);
     __ mov(eax, factory->false_value());
     __ jmp(&done, Label::kNear);
@@ -5110,7 +5253,7 @@
   Label second_not_zero_length, both_not_zero_length;
   __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(not_zero, &second_not_zero_length, Label::kNear);
   // Second string is empty, result is first string which is already in eax.
   Counters* counters = masm->isolate()->counters();
@@ -5119,7 +5262,7 @@
   __ bind(&second_not_zero_length);
   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(ebx, Operand(ebx));
+  __ test(ebx, ebx);
   __ j(not_zero, &both_not_zero_length, Label::kNear);
   // First string is empty, result is second string which is in edx.
   __ mov(eax, edx);
@@ -5134,13 +5277,13 @@
   // Look at the length of the result of adding the two strings.
   Label string_add_flat_result, longer_than_two;
   __ bind(&both_not_zero_length);
-  __ add(ebx, Operand(ecx));
+  __ add(ebx, ecx);
   STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
   // Handle exceptionally long strings in the runtime system.
   __ j(overflow, &string_add_runtime);
   // Use the symbol table when adding two one character strings, as it
   // helps later optimizations to return a symbol here.
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
+  __ cmp(ebx, Immediate(Smi::FromInt(2)));
   __ j(not_equal, &longer_than_two);
 
   // Check that both strings are non-external ascii strings.
@@ -5177,7 +5320,7 @@
                          &string_add_runtime);
   // Pack both characters in ebx.
   __ shl(ecx, kBitsPerByte);
-  __ or_(ebx, Operand(ecx));
+  __ or_(ebx, ecx);
   // Set the characters in the new string.
   __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
   __ IncrementCounter(counters->string_add_native(), 1);
@@ -5185,7 +5328,7 @@
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
-  __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
+  __ cmp(ebx, Immediate(Smi::FromInt(String::kMinNonFlatLength)));
   __ j(below, &string_add_flat_result);
 
   // If result is not supposed to be flat allocate a cons string object. If both
@@ -5195,7 +5338,7 @@
   __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
-  __ and_(ecx, Operand(edi));
+  __ and_(ecx, edi);
   STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ test(ecx, Immediate(kStringEncodingMask));
@@ -5223,7 +5366,7 @@
   __ j(not_zero, &ascii_data);
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ xor_(edi, Operand(ecx));
+  __ xor_(edi, ecx);
   STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
   __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
   __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
@@ -5271,12 +5414,12 @@
   // eax: result string
   __ mov(ecx, eax);
   // Locate first character of result.
-  __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // Load first argument and locate first character.
   __ mov(edx, Operand(esp, 2 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: first character of result
   // edx: first char of first argument
@@ -5286,7 +5429,7 @@
   __ mov(edx, Operand(esp, 1 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: next character of result
   // edx: first char of second argument
@@ -5310,13 +5453,13 @@
   // eax: result string
   __ mov(ecx, eax);
   // Locate first character of result.
-  __ add(Operand(ecx),
+  __ add(ecx,
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Load first argument and locate first character.
   __ mov(edx, Operand(esp, 2 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx),
+  __ add(edx,
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: first character of result
@@ -5327,7 +5470,7 @@
   __ mov(edx, Operand(esp, 1 * kPointerSize));
   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
   __ SmiUntag(edi);
-  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // eax: result string
   // ecx: next character of result
   // edx: first char of second argument
@@ -5403,15 +5546,15 @@
   if (ascii) {
     __ mov_b(scratch, Operand(src, 0));
     __ mov_b(Operand(dest, 0), scratch);
-    __ add(Operand(src), Immediate(1));
-    __ add(Operand(dest), Immediate(1));
+    __ add(src, Immediate(1));
+    __ add(dest, Immediate(1));
   } else {
     __ mov_w(scratch, Operand(src, 0));
     __ mov_w(Operand(dest, 0), scratch);
-    __ add(Operand(src), Immediate(2));
-    __ add(Operand(dest), Immediate(2));
+    __ add(src, Immediate(2));
+    __ add(dest, Immediate(2));
   }
-  __ sub(Operand(count), Immediate(1));
+  __ sub(count, Immediate(1));
   __ j(not_zero, &loop);
 }
 
@@ -5434,7 +5577,7 @@
 
   // Nothing to do for zero characters.
   Label done;
-  __ test(count, Operand(count));
+  __ test(count, count);
   __ j(zero, &done);
 
   // Make count the number of bytes to copy.
@@ -5459,7 +5602,7 @@
 
   // Check if there are more bytes to copy.
   __ bind(&last_bytes);
-  __ test(count, Operand(count));
+  __ test(count, count);
   __ j(zero, &done);
 
   // Copy remaining characters.
@@ -5467,9 +5610,9 @@
   __ bind(&loop);
   __ mov_b(scratch, Operand(src, 0));
   __ mov_b(Operand(dest, 0), scratch);
-  __ add(Operand(src), Immediate(1));
-  __ add(Operand(dest), Immediate(1));
-  __ sub(Operand(count), Immediate(1));
+  __ add(src, Immediate(1));
+  __ add(dest, Immediate(1));
+  __ sub(count, Immediate(1));
   __ j(not_zero, &loop);
 
   __ bind(&done);
@@ -5491,12 +5634,12 @@
   // different hash algorithm. Don't try to look for these in the symbol table.
   Label not_array_index;
   __ mov(scratch, c1);
-  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
-  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+  __ sub(scratch, Immediate(static_cast<int>('0')));
+  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
   __ j(above, &not_array_index, Label::kNear);
   __ mov(scratch, c2);
-  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
-  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+  __ sub(scratch, Immediate(static_cast<int>('0')));
+  __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
   __ j(below_equal, not_probed);
 
   __ bind(&not_array_index);
@@ -5509,7 +5652,7 @@
   // Collect the two characters in a register.
   Register chars = c1;
   __ shl(c2, kBitsPerByte);
-  __ or_(chars, Operand(c2));
+  __ or_(chars, c2);
 
   // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
   // hash:  hash of two character string.
@@ -5526,7 +5669,7 @@
   Register mask = scratch2;
   __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
   __ SmiUntag(mask);
-  __ sub(Operand(mask), Immediate(1));
+  __ sub(mask, Immediate(1));
 
   // Registers
   // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
@@ -5543,9 +5686,9 @@
     // Calculate entry in symbol table.
     __ mov(scratch, hash);
     if (i > 0) {
-      __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+      __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
     }
-    __ and_(scratch, Operand(mask));
+    __ and_(scratch, mask);
 
     // Load the entry from the symbol table.
     Register candidate = scratch;  // Scratch register contains candidate.
@@ -5582,7 +5725,7 @@
     // Check if the two characters match.
     __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
     __ and_(temp, 0x0000ffff);
-    __ cmp(chars, Operand(temp));
+    __ cmp(chars, temp);
     __ j(equal, &found_in_symbol_table);
     __ bind(&next_probe_pop_mask[i]);
     __ pop(mask);
@@ -5609,11 +5752,11 @@
   // hash = character + (character << 10);
   __ mov(hash, character);
   __ shl(hash, 10);
-  __ add(hash, Operand(character));
+  __ add(hash, character);
   // hash ^= hash >> 6;
   __ mov(scratch, hash);
   __ sar(scratch, 6);
-  __ xor_(hash, Operand(scratch));
+  __ xor_(hash, scratch);
 }
 
 
@@ -5622,15 +5765,15 @@
                                             Register character,
                                             Register scratch) {
   // hash += character;
-  __ add(hash, Operand(character));
+  __ add(hash, character);
   // hash += hash << 10;
   __ mov(scratch, hash);
   __ shl(scratch, 10);
-  __ add(hash, Operand(scratch));
+  __ add(hash, scratch);
   // hash ^= hash >> 6;
   __ mov(scratch, hash);
   __ sar(scratch, 6);
-  __ xor_(hash, Operand(scratch));
+  __ xor_(hash, scratch);
 }
 
 
@@ -5640,19 +5783,19 @@
   // hash += hash << 3;
   __ mov(scratch, hash);
   __ shl(scratch, 3);
-  __ add(hash, Operand(scratch));
+  __ add(hash, scratch);
   // hash ^= hash >> 11;
   __ mov(scratch, hash);
   __ sar(scratch, 11);
-  __ xor_(hash, Operand(scratch));
+  __ xor_(hash, scratch);
   // hash += hash << 15;
   __ mov(scratch, hash);
   __ shl(scratch, 15);
-  __ add(hash, Operand(scratch));
+  __ add(hash, scratch);
 
   // if (hash == 0) hash = 27;
   Label hash_not_zero;
-  __ test(hash, Operand(hash));
+  __ test(hash, hash);
   __ j(not_zero, &hash_not_zero, Label::kNear);
   __ mov(hash, Immediate(27));
   __ bind(&hash_not_zero);
@@ -5684,7 +5827,7 @@
   __ JumpIfNotSmi(ecx, &runtime);
   __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
   __ JumpIfNotSmi(edx, &runtime);
-  __ sub(ecx, Operand(edx));
+  __ sub(ecx, edx);
   __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
   Label return_eax;
   __ j(equal, &return_eax);
@@ -5816,13 +5959,13 @@
   __ mov(edx, esi);  // esi used by following code.
   // Locate first character of result.
   __ mov(edi, eax);
-  __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
   __ mov(esi, Operand(esp, 3 * kPointerSize));
-  __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
   __ SmiUntag(ebx);
-  __ add(esi, Operand(ebx));
+  __ add(esi, ebx);
 
   // eax: result string
   // ecx: result length
@@ -5851,18 +5994,17 @@
   __ mov(edx, esi);  // esi used by following code.
   // Locate first character of result.
   __ mov(edi, eax);
-  __ add(Operand(edi),
+  __ add(edi,
          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
   __ mov(esi, Operand(esp, 3 * kPointerSize));
-  __ add(Operand(esi),
-         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
   // As from is a smi it is 2 times the value which matches the size of a two
   // byte character.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
-  __ add(esi, Operand(ebx));
+  __ add(esi, ebx);
 
   // eax: result string
   // ecx: result length
@@ -5902,7 +6044,7 @@
   Label compare_chars;
   __ bind(&check_zero_length);
   STATIC_ASSERT(kSmiTag == 0);
-  __ test(length, Operand(length));
+  __ test(length, length);
   __ j(not_zero, &compare_chars, Label::kNear);
   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
   __ ret(0);
@@ -5937,14 +6079,14 @@
 
   __ j(less_equal, &left_shorter, Label::kNear);
   // Right string is shorter. Change scratch1 to be length of right string.
-  __ sub(scratch1, Operand(length_delta));
+  __ sub(scratch1, length_delta);
   __ bind(&left_shorter);
 
   Register min_length = scratch1;
 
   // If either length is zero, just compare lengths.
   Label compare_lengths;
-  __ test(min_length, Operand(min_length));
+  __ test(min_length, min_length);
   __ j(zero, &compare_lengths, Label::kNear);
 
   // Compare characters.
@@ -5954,7 +6096,7 @@
 
   // Compare lengths -  strings up to min-length are equal.
   __ bind(&compare_lengths);
-  __ test(length_delta, Operand(length_delta));
+  __ test(length_delta, length_delta);
   __ j(not_zero, &result_not_equal, Label::kNear);
 
   // Result is EQUAL.
@@ -6003,7 +6145,7 @@
   __ mov_b(scratch, Operand(left, index, times_1, 0));
   __ cmpb(scratch, Operand(right, index, times_1, 0));
   __ j(not_equal, chars_not_equal, chars_not_equal_near);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
   __ j(not_zero, &loop);
 }
 
@@ -6020,7 +6162,7 @@
   __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
 
   Label not_same;
-  __ cmp(edx, Operand(eax));
+  __ cmp(edx, eax);
   __ j(not_equal, &not_same, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -6036,7 +6178,7 @@
   // Compare flat ascii strings.
   // Drop arguments from the stack.
   __ pop(ecx);
-  __ add(Operand(esp), Immediate(2 * kPointerSize));
+  __ add(esp, Immediate(2 * kPointerSize));
   __ push(ecx);
   GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
 
@@ -6050,16 +6192,16 @@
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
   Label miss;
-  __ mov(ecx, Operand(edx));
-  __ or_(ecx, Operand(eax));
+  __ mov(ecx, edx);
+  __ or_(ecx, eax);
   __ JumpIfNotSmi(ecx, &miss, Label::kNear);
 
   if (GetCondition() == equal) {
     // For equality we do not care about the sign of the result.
-    __ sub(eax, Operand(edx));
+    __ sub(eax, edx);
   } else {
     Label done;
-    __ sub(edx, Operand(eax));
+    __ sub(edx, eax);
     __ j(no_overflow, &done, Label::kNear);
     // Correct sign of result in case of overflow.
     __ not_(edx);
@@ -6079,8 +6221,8 @@
   Label generic_stub;
   Label unordered;
   Label miss;
-  __ mov(ecx, Operand(edx));
-  __ and_(ecx, Operand(eax));
+  __ mov(ecx, edx);
+  __ and_(ecx, eax);
   __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
 
   __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
@@ -6108,9 +6250,9 @@
     // Performing mov, because xor would destroy the flag register.
     __ mov(eax, 0);  // equal
     __ mov(ecx, Immediate(Smi::FromInt(1)));
-    __ cmov(above, eax, Operand(ecx));
+    __ cmov(above, eax, ecx);
     __ mov(ecx, Immediate(Smi::FromInt(-1)));
-    __ cmov(below, eax, Operand(ecx));
+    __ cmov(below, eax, ecx);
     __ ret(0);
 
     __ bind(&unordered);
@@ -6137,9 +6279,9 @@
 
   // Check that both operands are heap objects.
   Label miss;
-  __ mov(tmp1, Operand(left));
+  __ mov(tmp1, left);
   STATIC_ASSERT(kSmiTag == 0);
-  __ and_(tmp1, Operand(right));
+  __ and_(tmp1, right);
   __ JumpIfSmi(tmp1, &miss, Label::kNear);
 
   // Check that both operands are symbols.
@@ -6148,13 +6290,13 @@
   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
   STATIC_ASSERT(kSymbolTag != 0);
-  __ and_(tmp1, Operand(tmp2));
+  __ and_(tmp1, tmp2);
   __ test(tmp1, Immediate(kIsSymbolMask));
   __ j(zero, &miss, Label::kNear);
 
   // Symbols are compared by identity.
   Label done;
-  __ cmp(left, Operand(right));
+  __ cmp(left, right);
   // Make sure eax is non-zero. At this point input operands are
   // guaranteed to be non-zero.
   ASSERT(right.is(eax));
@@ -6183,9 +6325,9 @@
   Register tmp3 = edi;
 
   // Check that both operands are heap objects.
-  __ mov(tmp1, Operand(left));
+  __ mov(tmp1, left);
   STATIC_ASSERT(kSmiTag == 0);
-  __ and_(tmp1, Operand(right));
+  __ and_(tmp1, right);
   __ JumpIfSmi(tmp1, &miss);
 
   // Check that both operands are strings. This leaves the instance
@@ -6196,13 +6338,13 @@
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
   __ mov(tmp3, tmp1);
   STATIC_ASSERT(kNotStringTag != 0);
-  __ or_(tmp3, Operand(tmp2));
+  __ or_(tmp3, tmp2);
   __ test(tmp3, Immediate(kIsNotStringMask));
   __ j(not_zero, &miss);
 
   // Fast check for identical strings.
   Label not_same;
-  __ cmp(left, Operand(right));
+  __ cmp(left, right);
   __ j(not_equal, &not_same, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -6216,7 +6358,7 @@
   // because we already know they are not identical.
   Label do_compare;
   STATIC_ASSERT(kSymbolTag != 0);
-  __ and_(tmp1, Operand(tmp2));
+  __ and_(tmp1, tmp2);
   __ test(tmp1, Immediate(kIsSymbolMask));
   __ j(zero, &do_compare, Label::kNear);
   // Make sure eax is non-zero. At this point input operands are
@@ -6249,8 +6391,8 @@
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::OBJECTS);
   Label miss;
-  __ mov(ecx, Operand(edx));
-  __ and_(ecx, Operand(eax));
+  __ mov(ecx, edx);
+  __ and_(ecx, eax);
   __ JumpIfSmi(ecx, &miss, Label::kNear);
 
   __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
@@ -6259,7 +6401,7 @@
   __ j(not_equal, &miss, Label::kNear);
 
   ASSERT(GetCondition() == equal);
-  __ sub(eax, Operand(edx));
+  __ sub(eax, edx);
   __ ret(0);
 
   __ bind(&miss);
@@ -6274,15 +6416,16 @@
   __ push(eax);
   __ push(ecx);
 
-  // Call the runtime system in a fresh internal frame.
-  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
-                                             masm->isolate());
-  __ EnterInternalFrame();
-  __ push(edx);
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(op_)));
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
+  {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
+                                               masm->isolate());
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(edx);
+    __ push(eax);
+    __ push(Immediate(Smi::FromInt(op_)));
+    __ CallExternalReference(miss, 3);
+  }
 
   // Compute the entry point of the rewritten stub.
   __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
@@ -6294,7 +6437,7 @@
   __ push(ecx);
 
   // Do a tail call to the rewritten stub.
-  __ jmp(Operand(edi));
+  __ jmp(edi);
 }
 
 
@@ -6323,8 +6466,8 @@
     // Capacity is smi 2^n.
     __ mov(index, FieldOperand(properties, kCapacityOffset));
     __ dec(index);
-    __ and_(Operand(index),
-           Immediate(Smi::FromInt(name->Hash() +
+    __ and_(index,
+            Immediate(Smi::FromInt(name->Hash() +
                                    StringDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
@@ -6357,7 +6500,7 @@
   __ push(Immediate(name->Hash()));
   MaybeObject* result = masm->TryCallStub(&stub);
   if (result->IsFailure()) return result;
-  __ test(r0, Operand(r0));
+  __ test(r0, r0);
   __ j(not_zero, miss);
   __ jmp(done);
   return result;
@@ -6390,9 +6533,9 @@
     __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
     __ shr(r0, String::kHashShift);
     if (i > 0) {
-      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
+      __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
     }
-    __ and_(r0, Operand(r1));
+    __ and_(r0, r1);
 
     // Scale the index by multiplying by the entry size.
     ASSERT(StringDictionary::kEntrySize == 3);
@@ -6416,13 +6559,15 @@
   __ push(r0);
   __ CallStub(&stub);
 
-  __ test(r1, Operand(r1));
+  __ test(r1, r1);
   __ j(zero, miss);
   __ jmp(done);
 }
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Stack frame on entry:
   //  esp[0 * kPointerSize]: return address.
   //  esp[1 * kPointerSize]: key's hash.
@@ -6453,8 +6598,7 @@
     // Compute the masked index: (hash + i + i * i) & mask.
     __ mov(scratch, Operand(esp, 2 * kPointerSize));
     if (i > 0) {
-      __ add(Operand(scratch),
-             Immediate(StringDictionary::GetProbeOffset(i)));
+      __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
     }
     __ and_(scratch, Operand(esp, 0));
 
@@ -6510,6 +6654,275 @@
 }
 
 
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { ebx, eax, edi, EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  { ebx, ecx, edx, EMIT_REMEMBERED_SET },
+  { ebx, edi, edx, OMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal and CallFunctionStub.
+  { ebx, ecx, edx, OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField and
+  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { edx, ecx, ebx, EMIT_REMEMBERED_SET },
+  // GenerateStoreField calls the stub with two different permutations of
+  // registers.  This is the second.
+  { ebx, ecx, edx, EMIT_REMEMBERED_SET },
+  // StoreIC::GenerateNormal via GenerateDictionaryStore
+  { ebx, edi, edx, EMIT_REMEMBERED_SET },
+  // KeyedStoreIC::GenerateGeneric.
+  { ebx, edx, ecx, EMIT_REMEMBERED_SET},
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { edi, edx, ecx, EMIT_REMEMBERED_SET},
+  // Null termination.
+  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+
+  CpuFeatures::TryForceFeatureScope scope(SSE2);
+  if (CpuFeatures::IsSupported(SSE2)) {
+    StoreBufferOverflowStub stub2(kSaveFPRegs);
+    stub2.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two instructions are generated with labels so as to get the
+  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
+  // forth between a compare instructions (a nop in this position) and the
+  // real branch when we start and stop incremental heap marking.
+  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+  __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  masm->set_byte_at(0, kTwoByteNopInstruction);
+  masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     not_zero,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm,
+        kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
+        mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm,
+      kReturnOnNoNeedToInformIncrementalMarker,
+      mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ mov(Operand(esp, 1 * kPointerSize), regs_.address());  // Slot.
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+    __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0());  // Value.
+  }
+  __ mov(Operand(esp, 2 * kPointerSize),
+         Immediate(ExternalReference::isolate_address()));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label object_is_black, need_incremental, need_incremental_pop_object;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(),
+                 regs_.scratch0(),
+                 regs_.scratch1(),
+                 &object_is_black,
+                 Label::kNear);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&object_is_black);
+
+  // Get the value from the slot.
+  __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     zero,
+                     &ensure_not_white,
+                     Label::kNear);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     not_zero,
+                     &ensure_not_white,
+                     Label::kNear);
+
+    __ jmp(&need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need an extra register for this, so we push the object register
+  // temporarily.
+  __ push(regs_.object());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    &need_incremental_pop_object,
+                    Label::kNear);
+  __ pop(regs_.object());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&need_incremental_pop_object);
+  __ pop(regs_.object());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index fa255da..2a7d316 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -60,6 +60,25 @@
 };
 
 
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated() { return true; }
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -418,6 +437,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -430,7 +451,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return DictionaryBits::encode(dictionary_.code()) |
@@ -451,6 +472,272 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
+  static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
+
+  static const byte kFiveByteNopInstruction = 0x3d;  // Cmpl eax, #imm32.
+  static const byte kFiveByteJumpInstruction = 0xe9;  // Jmp #imm32.
+
+  static Mode GetMode(Code* stub) {
+    byte first_instruction = stub->instruction_start()[0];
+    byte second_instruction = stub->instruction_start()[2];
+
+    if (first_instruction == kTwoByteJumpInstruction) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(first_instruction == kTwoByteNopInstruction);
+
+    if (second_instruction == kFiveByteJumpInstruction) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(second_instruction == kFiveByteNopInstruction);
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteNopInstruction;
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteJumpInstruction;
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteJumpInstruction;
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 7);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers, where the third
+  // is always ecx (needed for shift operations).  The input is two registers
+  // that must be preserved and one scratch register provided by the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_orig_(object),
+          address_orig_(address),
+          scratch0_orig_(scratch0),
+          object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
+      if (scratch0.is(ecx)) {
+        scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
+      }
+      if (object.is(ecx)) {
+        object_ = GetRegThatIsNotEcxOr(address_, scratch0_, scratch1_);
+      }
+      if (address.is(ecx)) {
+        address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
+      }
+      ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!address_orig_.is(object_));
+      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+      // We don't have to save scratch0_orig_ because it was given to us as
+      // a scratch register.  But if we had to switch to a different reg then
+      // we should save the new scratch0_.
+      if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+      if (!ecx.is(scratch0_orig_) &&
+          !ecx.is(object_orig_) &&
+          !ecx.is(address_orig_)) {
+        masm->push(ecx);
+      }
+      masm->push(scratch1_);
+      if (!address_.is(address_orig_)) {
+        masm->push(address_);
+        masm->mov(address_, address_orig_);
+      }
+      if (!object_.is(object_orig_)) {
+        masm->push(object_);
+        masm->mov(object_, object_orig_);
+      }
+    }
+
+    void Restore(MacroAssembler* masm) {
+      // These will have been preserved the entire time, so we just need to move
+      // them back.  Only in one case is the orig_ reg different from the plain
+      // one, since only one of them can alias with ecx.
+      if (!object_.is(object_orig_)) {
+        masm->mov(object_orig_, object_);
+        masm->pop(object_);
+      }
+      if (!address_.is(address_orig_)) {
+        masm->mov(address_orig_, address_);
+        masm->pop(address_);
+      }
+      masm->pop(scratch1_);
+      if (!ecx.is(scratch0_orig_) &&
+          !ecx.is(object_orig_) &&
+          !ecx.is(address_orig_)) {
+        masm->pop(ecx);
+      }
+      if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The caller saved
+    // registers are eax, ecx and edx.  The three scratch registers (incl. ecx)
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
+      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(SSE2);
+        masm->sub(esp,
+                  Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+        // Save all XMM registers except XMM0.
+        for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+          XMMRegister reg = XMMRegister::from_code(i);
+          masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg);
+        }
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        CpuFeatures::Scope scope(SSE2);
+        // Restore all XMM registers except XMM0.
+        for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
+          XMMRegister reg = XMMRegister::from_code(i);
+          masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize));
+        }
+        masm->add(esp,
+                  Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1)));
+      }
+      if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
+      if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_orig_;
+    Register address_orig_;
+    Register scratch0_orig_;
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+    // Third scratch register is always ecx.
+
+    Register GetRegThatIsNotEcxOr(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(ecx)) continue;
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  }
+;
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  bool MustBeInStubCache() {
+    // All stubs must be registered in the stub cache
+    // otherwise IncrementalMarker would not be able to find
+    // and patch it.
+    return true;
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 3> {};
+  class ValueBits: public BitField<int, 3, 3> {};
+  class AddressBits: public BitField<int, 6, 3> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  RegisterAllocation regs_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_CODE_STUBS_IA32_H_
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 3a657bd..f901b6f 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -39,12 +39,16 @@
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
 
@@ -108,14 +112,14 @@
     __ mov(edx, dst);
     __ and_(edx, 0xF);
     __ neg(edx);
-    __ add(Operand(edx), Immediate(16));
-    __ add(dst, Operand(edx));
-    __ add(src, Operand(edx));
-    __ sub(Operand(count), edx);
+    __ add(edx, Immediate(16));
+    __ add(dst, edx);
+    __ add(src, edx);
+    __ sub(count, edx);
 
     // edi is now aligned. Check if esi is also aligned.
     Label unaligned_source;
-    __ test(Operand(src), Immediate(0x0F));
+    __ test(src, Immediate(0x0F));
     __ j(not_zero, &unaligned_source);
     {
       // Copy loop for aligned source and destination.
@@ -130,11 +134,11 @@
         __ prefetch(Operand(src, 0x20), 1);
         __ movdqa(xmm0, Operand(src, 0x00));
         __ movdqa(xmm1, Operand(src, 0x10));
-        __ add(Operand(src), Immediate(0x20));
+        __ add(src, Immediate(0x20));
 
         __ movdqa(Operand(dst, 0x00), xmm0);
         __ movdqa(Operand(dst, 0x10), xmm1);
-        __ add(Operand(dst), Immediate(0x20));
+        __ add(dst, Immediate(0x20));
 
         __ dec(loop_count);
         __ j(not_zero, &loop);
@@ -142,12 +146,12 @@
 
       // At most 31 bytes to copy.
       Label move_less_16;
-      __ test(Operand(count), Immediate(0x10));
+      __ test(count, Immediate(0x10));
       __ j(zero, &move_less_16);
       __ movdqa(xmm0, Operand(src, 0));
-      __ add(Operand(src), Immediate(0x10));
+      __ add(src, Immediate(0x10));
       __ movdqa(Operand(dst, 0), xmm0);
-      __ add(Operand(dst), Immediate(0x10));
+      __ add(dst, Immediate(0x10));
       __ bind(&move_less_16);
 
       // At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -176,11 +180,11 @@
         __ prefetch(Operand(src, 0x20), 1);
         __ movdqu(xmm0, Operand(src, 0x00));
         __ movdqu(xmm1, Operand(src, 0x10));
-        __ add(Operand(src), Immediate(0x20));
+        __ add(src, Immediate(0x20));
 
         __ movdqa(Operand(dst, 0x00), xmm0);
         __ movdqa(Operand(dst, 0x10), xmm1);
-        __ add(Operand(dst), Immediate(0x20));
+        __ add(dst, Immediate(0x20));
 
         __ dec(loop_count);
         __ j(not_zero, &loop);
@@ -188,12 +192,12 @@
 
       // At most 31 bytes to copy.
       Label move_less_16;
-      __ test(Operand(count), Immediate(0x10));
+      __ test(count, Immediate(0x10));
       __ j(zero, &move_less_16);
       __ movdqu(xmm0, Operand(src, 0));
-      __ add(Operand(src), Immediate(0x10));
+      __ add(src, Immediate(0x10));
       __ movdqa(Operand(dst, 0), xmm0);
-      __ add(Operand(dst), Immediate(0x10));
+      __ add(dst, Immediate(0x10));
       __ bind(&move_less_16);
 
       // At most 15 bytes to copy. Copy 16 bytes at end of string.
@@ -228,10 +232,10 @@
     __ mov(edx, dst);
     __ and_(edx, 0x03);
     __ neg(edx);
-    __ add(Operand(edx), Immediate(4));  // edx = 4 - (dst & 3)
-    __ add(dst, Operand(edx));
-    __ add(src, Operand(edx));
-    __ sub(Operand(count), edx);
+    __ add(edx, Immediate(4));  // edx = 4 - (dst & 3)
+    __ add(dst, edx);
+    __ add(src, edx);
+    __ sub(count, edx);
     // edi is now aligned, ecx holds number of remaning bytes to copy.
 
     __ mov(edx, count);
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 2389948..d7184ed 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -100,63 +100,64 @@
                                           RegList non_object_regs,
                                           bool convert_call_to_jmp) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as a smi causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  for (int i = 0; i < kNumJSCallerSaved; i++) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    if ((object_regs & (1 << r)) != 0) {
-      __ push(reg);
-    }
-    if ((non_object_regs & (1 << r)) != 0) {
-      if (FLAG_debug_code) {
-        __ test(reg, Immediate(0xc0000000));
-        __ Assert(zero, "Unable to encode value as smi");
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((object_regs & (1 << r)) != 0) {
+        __ push(reg);
       }
-      __ SmiTag(reg);
-      __ push(reg);
+      if ((non_object_regs & (1 << r)) != 0) {
+        if (FLAG_debug_code) {
+          __ test(reg, Immediate(0xc0000000));
+          __ Assert(zero, "Unable to encode value as smi");
+        }
+        __ SmiTag(reg);
+        __ push(reg);
+      }
     }
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ Set(eax, Immediate(0));  // No arguments.
-  __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
+    __ Set(eax, Immediate(0));  // No arguments.
+    __ mov(ebx, Immediate(ExternalReference::debug_break(masm->isolate())));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values containing object pointers from the expression
-  // stack.
-  for (int i = kNumJSCallerSaved; --i >= 0;) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    if (FLAG_debug_code) {
-      __ Set(reg, Immediate(kDebugZapValue));
+    // Restore the register values containing object pointers from the
+    // expression stack.
+    for (int i = kNumJSCallerSaved; --i >= 0;) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if (FLAG_debug_code) {
+        __ Set(reg, Immediate(kDebugZapValue));
+      }
+      if ((object_regs & (1 << r)) != 0) {
+        __ pop(reg);
+      }
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ pop(reg);
+        __ SmiUntag(reg);
+      }
     }
-    if ((object_regs & (1 << r)) != 0) {
-      __ pop(reg);
-    }
-    if ((non_object_regs & (1 << r)) != 0) {
-      __ pop(reg);
-      __ SmiUntag(reg);
-    }
+
+    // Get rid of the internal frame.
   }
 
-  // Get rid of the internal frame.
-  __ LeaveInternalFrame();
-
   // If this call did not replace a call but patched other code then there will
   // be an unwanted return address left on the stack. Here we get rid of that.
   if (convert_call_to_jmp) {
-    __ add(Operand(esp), Immediate(kPointerSize));
+    __ add(esp, Immediate(kPointerSize));
   }
 
   // Now that the break point has been handled, resume normal execution by
@@ -298,7 +299,7 @@
   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
 
   // Re-run JSFunction, edi is function, esi is context.
-  __ jmp(Operand(edx));
+  __ jmp(edx);
 }
 
 const bool Debug::kFrameDropperSupported = true;
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index e23f3e9..02cc4eb 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -116,7 +116,7 @@
         new_reloc->GetDataStartAddress() + padding, 0);
     intptr_t comment_string
         = reinterpret_cast<intptr_t>(RelocInfo::kFillerCommentString);
-    RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string);
+    RelocInfo rinfo(0, RelocInfo::COMMENT, comment_string, NULL);
     for (int i = 0; i < additional_comments; ++i) {
 #ifdef DEBUG
       byte* pos_before = reloc_info_writer.pos();
@@ -174,7 +174,8 @@
       // We use RUNTIME_ENTRY for deoptimization bailouts.
       RelocInfo rinfo(curr_address + 1,  // 1 after the call opcode.
                       RelocInfo::RUNTIME_ENTRY,
-                      reinterpret_cast<intptr_t>(deopt_entry));
+                      reinterpret_cast<intptr_t>(deopt_entry),
+                      NULL);
       reloc_info_writer.Write(&rinfo);
       ASSERT_GE(reloc_info_writer.pos(),
                 reloc_info->address() + ByteArray::kHeaderSize);
@@ -205,6 +206,11 @@
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -221,7 +227,8 @@
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
@@ -250,6 +257,13 @@
   *(call_target_address - 2) = 0x90;  // nop
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
+
+  RelocInfo rinfo(call_target_address,
+                  RelocInfo::CODE_TARGET,
+                  0,
+                  unoptimized_code);
+  unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+      unoptimized_code, &rinfo, replacement_code);
 }
 
 
@@ -268,6 +282,9 @@
   *(call_target_address - 2) = 0x07;  // offset
   Assembler::set_target_address_at(call_target_address,
                                    check_code->entry());
+
+  check_code->GetHeap()->incremental_marking()->
+      RecordCodeTargetPatch(call_target_address, check_code);
 }
 
 
@@ -415,7 +432,14 @@
     output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
   } else {
     // Setup the frame pointer and the context pointer.
-    output_[0]->SetRegister(ebp.code(), input_->GetRegister(ebp.code()));
+    // All OSR stack frames are dynamically aligned to an 8-byte boundary.
+    int frame_pointer = input_->GetRegister(ebp.code());
+    if ((frame_pointer & 0x4) == 0) {
+      // Return address at FP + 4 should be aligned, so FP mod 8 should be 4.
+      frame_pointer -= kPointerSize;
+      has_alignment_padding_ = 1;
+    }
+    output_[0]->SetRegister(ebp.code(), frame_pointer);
     output_[0]->SetRegister(esi.code(), input_->GetRegister(esi.code()));
 
     unsigned pc_offset = data->OsrPcOffset()->value();
@@ -480,9 +504,11 @@
   // top address and the current frame's size.
   uint32_t top_address;
   if (is_bottommost) {
-    // 2 = context and function in the frame.
-    top_address =
-        input_->GetRegister(ebp.code()) - (2 * kPointerSize) - height_in_bytes;
+    // If the optimized frame had alignment padding, adjust the frame pointer
+    // to point to the new position of the old frame pointer after padding
+    // is removed. Subtract 2 * kPointerSize for the context and function slots.
+    top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
+        height_in_bytes + has_alignment_padding_ * kPointerSize;
   } else {
     top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
   }
@@ -533,7 +559,9 @@
   }
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
-  ASSERT(!is_bottommost || input_->GetRegister(ebp.code()) == fp_value);
+  ASSERT(!is_bottommost ||
+      input_->GetRegister(ebp.code()) + has_alignment_padding_ * kPointerSize
+      == fp_value);
   output_frame->SetFp(fp_value);
   if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
   if (FLAG_trace_deopt) {
@@ -638,7 +666,7 @@
 
   const int kDoubleRegsSize = kDoubleSize *
                               XMMRegister::kNumAllocatableRegisters;
-  __ sub(Operand(esp), Immediate(kDoubleRegsSize));
+  __ sub(esp, Immediate(kDoubleRegsSize));
   for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
     XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
     int offset = i * kDoubleSize;
@@ -662,7 +690,7 @@
     __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
     __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
   }
-  __ sub(edx, Operand(ebp));
+  __ sub(edx, ebp);
   __ neg(edx);
 
   // Allocate a new deoptimizer object.
@@ -675,7 +703,10 @@
   __ mov(Operand(esp, 4 * kPointerSize), edx);  // Fp-to-sp delta.
   __ mov(Operand(esp, 5 * kPointerSize),
          Immediate(ExternalReference::isolate_address()));
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
 
   // Preserve deoptimizer object in register eax and get the input
   // frame descriptor pointer.
@@ -698,15 +729,15 @@
 
   // Remove the bailout id and the double registers from the stack.
   if (type() == EAGER) {
-    __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
+    __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
   } else {
-    __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
+    __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
   }
 
   // Compute a pointer to the unwinding limit in register ecx; that is
   // the first stack slot not part of the input frame.
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
-  __ add(ecx, Operand(esp));
+  __ add(ecx, esp);
 
   // Unwind the stack down to - but not including - the unwinding
   // limit and copy the contents of the activation frame to the input
@@ -715,18 +746,43 @@
   Label pop_loop;
   __ bind(&pop_loop);
   __ pop(Operand(edx, 0));
-  __ add(Operand(edx), Immediate(sizeof(uint32_t)));
-  __ cmp(ecx, Operand(esp));
+  __ add(edx, Immediate(sizeof(uint32_t)));
+  __ cmp(ecx, esp);
   __ j(not_equal, &pop_loop);
 
+  // If frame was dynamically aligned, pop padding.
+  Label sentinel, sentinel_done;
+  __ pop(ecx);
+  __ cmp(ecx, Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
+  __ j(equal, &sentinel);
+  __ push(ecx);
+  __ jmp(&sentinel_done);
+  __ bind(&sentinel);
+  __ mov(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+         Immediate(1));
+  __ bind(&sentinel_done);
   // Compute the output frame in the deoptimizer.
   __ push(eax);
   __ PrepareCallCFunction(1, ebx);
   __ mov(Operand(esp, 0 * kPointerSize), eax);
-  __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 1);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 1);
+  }
   __ pop(eax);
 
+  if (type() == OSR) {
+    // If alignment padding is added, push the sentinel.
+    Label no_osr_padding;
+    __ cmp(Operand(eax, Deoptimizer::has_alignment_padding_offset()),
+           Immediate(0));
+    __ j(equal, &no_osr_padding, Label::kNear);
+    __ push(Operand(eax, Deoptimizer::frame_alignment_marker_offset()));
+    __ bind(&no_osr_padding);
+  }
+
+
   // Replace the current frame with the output frames.
   Label outer_push_loop, inner_push_loop;
   // Outer loop state: eax = current FrameDescription**, edx = one past the
@@ -739,12 +795,12 @@
   __ mov(ebx, Operand(eax, 0));
   __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
   __ bind(&inner_push_loop);
-  __ sub(Operand(ecx), Immediate(sizeof(uint32_t)));
+  __ sub(ecx, Immediate(sizeof(uint32_t)));
   __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
-  __ test(ecx, Operand(ecx));
+  __ test(ecx, ecx);
   __ j(not_zero, &inner_push_loop);
-  __ add(Operand(eax), Immediate(kPointerSize));
-  __ cmp(eax, Operand(edx));
+  __ add(eax, Immediate(kPointerSize));
+  __ cmp(eax, edx);
   __ j(below, &outer_push_loop);
 
   // In case of OSR, we have to restore the XMM registers.
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index a936277..04edc5f 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -55,6 +55,7 @@
 
 
 static const ByteMnemonic two_operands_instr[] = {
+  {0x01, "add", OPER_REG_OP_ORDER},
   {0x03, "add", REG_OPER_OP_ORDER},
   {0x09, "or", OPER_REG_OP_ORDER},
   {0x0B, "or", REG_OPER_OP_ORDER},
@@ -117,6 +118,19 @@
 };
 
 
+// Generally we don't want to generate these because they are subject to partial
+// register stalls.  They are included for completeness and because the cmp
+// variant is used by the RecordWrite stub.  Because it does not update the
+// register it is not subject to partial register stalls.
+static ByteMnemonic byte_immediate_instr[] = {
+  {0x0c, "or", UNSET_OP_ORDER},
+  {0x24, "and", UNSET_OP_ORDER},
+  {0x34, "xor", UNSET_OP_ORDER},
+  {0x3c, "cmp", UNSET_OP_ORDER},
+  {-1, "", UNSET_OP_ORDER}
+};
+
+
 static const char* const jump_conditional_mnem[] = {
   /*0*/ "jo", "jno", "jc", "jnc",
   /*4*/ "jz", "jnz", "jna", "ja",
@@ -149,7 +163,8 @@
   REGISTER_INSTR,
   MOVE_REG_INSTR,
   CALL_JUMP_INSTR,
-  SHORT_IMMEDIATE_INSTR
+  SHORT_IMMEDIATE_INSTR,
+  BYTE_IMMEDIATE_INSTR
 };
 
 
@@ -198,6 +213,7 @@
   CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
   CopyTable(call_jump_instr, CALL_JUMP_INSTR);
   CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+  CopyTable(byte_immediate_instr, BYTE_IMMEDIATE_INSTR);
   AddJumpConditionalShort();
   SetTableRange(REGISTER_INSTR, 0x40, 0x47, "inc");
   SetTableRange(REGISTER_INSTR, 0x48, 0x4F, "dec");
@@ -912,6 +928,12 @@
       break;
     }
 
+    case BYTE_IMMEDIATE_INSTR: {
+      AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]);
+      data += 2;
+      break;
+    }
+
     case NO_INSTR:
       processed = false;
       break;
@@ -1346,11 +1368,6 @@
         data += 2;
         break;
 
-      case 0x2C:
-        AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
-        data += 2;
-        break;
-
       case 0xA9:
         AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
         data += 5;
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 81c9ccb..d45a9cd 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -138,7 +138,7 @@
   // function calls.
   if (info->is_strict_mode() || info->is_native()) {
     Label ok;
-    __ test(ecx, Operand(ecx));
+    __ test(ecx, ecx);
     __ j(zero, &ok, Label::kNear);
     // +1 for return address.
     int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@@ -147,6 +147,11 @@
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   __ push(ebp);  // Caller's frame pointer.
   __ mov(ebp, esp);
   __ push(esi);  // Callee's context.
@@ -200,11 +205,12 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have use a third register to avoid
-        // clobbering esi.
-        __ mov(ecx, esi);
-        __ RecordWrite(ecx, context_offset, eax, ebx);
+        // Update the write barrier. This clobbers eax and ebx.
+        __ RecordWriteContextSlot(esi,
+                                  context_offset,
+                                  eax,
+                                  ebx,
+                                  kDontSaveFPRegs);
       }
     }
   }
@@ -365,10 +371,10 @@
 
 void FullCodeGenerator::verify_stack_height() {
   ASSERT(FLAG_verify_stack_height);
-  __ sub(Operand(ebp), Immediate(kPointerSize * stack_height()));
-  __ cmp(ebp, Operand(esp));
+  __ sub(ebp, Immediate(kPointerSize * stack_height()));
+  __ cmp(ebp, esp);
   __ Assert(equal, "Full codegen stack height not as expected.");
-  __ add(Operand(ebp), Immediate(kPointerSize * stack_height()));
+  __ add(ebp, Immediate(kPointerSize * stack_height()));
 }
 
 
@@ -597,7 +603,7 @@
   ToBooleanStub stub(result_register());
   __ push(result_register());
   __ CallStub(&stub, condition->test_id());
-  __ test(result_register(), Operand(result_register()));
+  __ test(result_register(), result_register());
   // The stub returns nonzero for true.
   Split(not_zero, if_true, if_false, fall_through);
 }
@@ -661,11 +667,12 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ mov(location, src);
+
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
     ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
-    __ RecordWrite(scratch0, offset, src, scratch1);
+    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
   }
 }
 
@@ -738,9 +745,14 @@
         Comment cmnt(masm_, "[ Declaration");
         VisitForAccumulatorValue(function);
         __ mov(ContextOperand(esi, variable->index()), result_register());
-        int offset = Context::SlotOffset(variable->index());
-        __ mov(ebx, esi);
-        __ RecordWrite(ebx, offset, result_register(), ecx);
+        // We know that we have written a function, which is not a smi.
+        __ RecordWriteContextSlot(esi,
+                                  Context::SlotOffset(variable->index()),
+                                  result_register(),
+                                  ecx,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
       } else if (mode == Variable::CONST || mode == Variable::LET) {
         Comment cmnt(masm_, "[ Declaration");
@@ -835,10 +847,10 @@
     if (inline_smi_code) {
       Label slow_case;
       __ mov(ecx, edx);
-      __ or_(ecx, Operand(eax));
+      __ or_(ecx, eax);
       patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
 
-      __ cmp(edx, Operand(eax));
+      __ cmp(edx, eax);
       __ j(not_equal, &next_test);
       __ Drop(1);  // Switch value is no longer needed.
       __ jmp(clause->body_target());
@@ -850,7 +862,7 @@
     Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
     __ call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
     patch_site.EmitPatchInfo();
-    __ test(eax, Operand(eax));
+    __ test(eax, eax);
     __ j(not_equal, &next_test);
     __ Drop(1);  // Switch value is no longer needed.
     __ jmp(clause->body_target());
@@ -939,7 +951,7 @@
 
   // For all objects but the receiver, check that the cache is empty.
   Label check_prototype;
-  __ cmp(ecx, Operand(eax));
+  __ cmp(ecx, eax);
   __ j(equal, &check_prototype, Label::kNear);
   __ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
   __ cmp(edx, isolate()->factory()->empty_fixed_array());
@@ -1021,9 +1033,9 @@
   __ push(ecx);  // Enumerable.
   __ push(ebx);  // Current entry.
   __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
-  __ test(eax, Operand(eax));
+  __ test(eax, eax);
   __ j(equal, loop_statement.continue_label());
-  __ mov(ebx, Operand(eax));
+  __ mov(ebx, eax);
 
   // Update the 'each' property or variable from the possibly filtered
   // entry in register ebx.
@@ -1047,7 +1059,7 @@
 
   // Remove the pointers stored on the stack.
   __ bind(loop_statement.break_label());
-  __ add(Operand(esp), Immediate(5 * kPointerSize));
+  __ add(esp, Immediate(5 * kPointerSize));
 
   decrement_stack_height(ForIn::kElementCount);
   // Exit and decrement the loop depth.
@@ -1480,8 +1492,20 @@
     int offset = FixedArray::kHeaderSize + (i * kPointerSize);
     __ mov(FieldOperand(ebx, offset), result_register());
 
+    Label no_map_change;
+    __ JumpIfSmi(result_register(), &no_map_change);
     // Update the write barrier for the array store.
-    __ RecordWrite(ebx, offset, result_register(), ecx);
+    __ RecordWriteField(ebx, offset, result_register(), ecx,
+                        kDontSaveFPRegs,
+                        EMIT_REMEMBERED_SET,
+                        OMIT_SMI_CHECK);
+    if (FLAG_smi_only_arrays) {
+      __ mov(edi, FieldOperand(ebx, JSObject::kMapOffset));
+      __ CheckFastSmiOnlyElements(edi, &no_map_change, Label::kNear);
+      __ push(Operand(esp, 0));
+      __ CallRuntime(Runtime::kNonSmiElementStored, 1);
+    }
+    __ bind(&no_map_change);
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1641,7 +1665,7 @@
   __ pop(edx);
   decrement_stack_height();
   __ mov(ecx, eax);
-  __ or_(eax, Operand(edx));
+  __ or_(eax, edx);
   JumpPatchSite patch_site(masm_);
   patch_site.EmitJumpIfSmi(eax, &smi_case, Label::kNear);
 
@@ -1691,32 +1715,32 @@
       break;
     }
     case Token::ADD:
-      __ add(eax, Operand(ecx));
+      __ add(eax, ecx);
       __ j(overflow, &stub_call);
       break;
     case Token::SUB:
-      __ sub(eax, Operand(ecx));
+      __ sub(eax, ecx);
       __ j(overflow, &stub_call);
       break;
     case Token::MUL: {
       __ SmiUntag(eax);
-      __ imul(eax, Operand(ecx));
+      __ imul(eax, ecx);
       __ j(overflow, &stub_call);
-      __ test(eax, Operand(eax));
+      __ test(eax, eax);
       __ j(not_zero, &done, Label::kNear);
       __ mov(ebx, edx);
-      __ or_(ebx, Operand(ecx));
+      __ or_(ebx, ecx);
       __ j(negative, &stub_call);
       break;
     }
     case Token::BIT_OR:
-      __ or_(eax, Operand(ecx));
+      __ or_(eax, ecx);
       break;
     case Token::BIT_AND:
-      __ and_(eax, Operand(ecx));
+      __ and_(eax, ecx);
       break;
     case Token::BIT_XOR:
-      __ xor_(eax, Operand(ecx));
+      __ xor_(eax, ecx);
       break;
     default:
       UNREACHABLE();
@@ -1859,7 +1883,8 @@
       __ mov(location, eax);
       if (var->IsContextSlot()) {
         __ mov(edx, eax);
-        __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
       }
     }
 
@@ -1877,7 +1902,8 @@
       __ mov(location, eax);
       if (var->IsContextSlot()) {
         __ mov(edx, eax);
-        __ RecordWrite(ecx, Context::SlotOffset(var->index()), edx, ebx);
+        int offset = Context::SlotOffset(var->index());
+        __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
@@ -2069,8 +2095,29 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
+
+  // Record call targets in unoptimized code, but not in the snapshot.
+  bool record_call_target = !Serializer::enabled();
+  if (record_call_target) {
+    flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
+  }
   CallFunctionStub stub(arg_count, flags);
   __ CallStub(&stub);
+  if (record_call_target) {
+    // There is a one element cache in the instruction stream.
+#ifdef DEBUG
+    int return_site_offset = masm()->pc_offset();
+#endif
+    Handle<Object> uninitialized =
+        CallFunctionStub::UninitializedSentinel(isolate());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+    __ test(eax, Immediate(cell));
+    // Patching code in the stub assumes the opcode is 1 byte and there is
+    // word for a pointer in the operand.
+    ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize);
+  }
+
   RecordJSReturnSite(expr);
   // Restore context register.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2438,9 +2485,9 @@
   STATIC_ASSERT(kPointerSize == 4);
   __ lea(ecx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
   // Calculate location of the first key name.
-  __ add(Operand(ebx),
-           Immediate(FixedArray::kHeaderSize +
-                     DescriptorArray::kFirstIndex * kPointerSize));
+  __ add(ebx,
+         Immediate(FixedArray::kHeaderSize +
+                   DescriptorArray::kFirstIndex * kPointerSize));
   // Loop through all the keys in the descriptor array. If one of these is the
   // symbol valueOf the result is false.
   Label entry, loop;
@@ -2449,9 +2496,9 @@
   __ mov(edx, FieldOperand(ebx, 0));
   __ cmp(edx, FACTORY->value_of_symbol());
   __ j(equal, if_false);
-  __ add(Operand(ebx), Immediate(kPointerSize));
+  __ add(ebx, Immediate(kPointerSize));
   __ bind(&entry);
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(not_equal, &loop);
 
   // Reload map as register ebx was used as temporary above.
@@ -2591,7 +2638,7 @@
 
   __ pop(ebx);
   decrement_stack_height();
-  __ cmp(eax, Operand(ebx));
+  __ cmp(eax, ebx);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
@@ -2647,20 +2694,24 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, eax);
   // Map is now in eax.
   __ j(below, &null);
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ j(equal, &function);
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ CmpInstanceType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-  __ j(above_equal, &function);
+  __ CmpInstanceType(eax, LAST_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ j(equal, &function);
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
 
-  // Check if the constructor in the map is a function.
+  // Check if the constructor in the map is a JS function.
   __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
   __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
   __ j(not_equal, &non_function_constructor);
@@ -2741,8 +2792,8 @@
   if (CpuFeatures::IsSupported(SSE2)) {
     CpuFeatures::Scope fscope(SSE2);
     __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
-    __ movd(xmm1, Operand(ebx));
-    __ movd(xmm0, Operand(eax));
+    __ movd(xmm1, ebx);
+    __ movd(xmm0, eax);
     __ cvtss2sd(xmm1, xmm1);
     __ xorps(xmm0, xmm1);
     __ subsd(xmm0, xmm1);
@@ -2843,10 +2894,11 @@
 
   // Store the value.
   __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
+
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
   __ mov(edx, eax);
-  __ RecordWrite(ebx, JSValue::kValueOffset, edx, ecx);
+  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(eax);
@@ -3119,14 +3171,14 @@
   __ mov(index_1, Operand(esp, 1 * kPointerSize));
   __ mov(index_2, Operand(esp, 0));
   __ mov(temp, index_1);
-  __ or_(temp, Operand(index_2));
+  __ or_(temp, index_2);
   __ JumpIfNotSmi(temp, &slow_case);
 
   // Check that both indices are valid.
   __ mov(temp, FieldOperand(object, JSArray::kLengthOffset));
-  __ cmp(temp, Operand(index_1));
+  __ cmp(temp, index_1);
   __ j(below_equal, &slow_case);
-  __ cmp(temp, Operand(index_2));
+  __ cmp(temp, index_2);
   __ j(below_equal, &slow_case);
 
   // Bring addresses into index1 and index2.
@@ -3139,16 +3191,35 @@
   __ mov(Operand(index_2, 0), object);
   __ mov(Operand(index_1, 0), temp);
 
-  Label new_space;
-  __ InNewSpace(elements, temp, equal, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   temp,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   not_zero,
+                   &no_remembered_set,
+                   Label::kNear);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask.)
 
-  __ mov(object, elements);
-  __ RecordWriteHelper(object, index_1, temp);
-  __ RecordWriteHelper(elements, index_2, temp);
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index_1,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index_2,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
+
   // We are done. Drop elements from the stack, and return undefined.
-  __ add(Operand(esp), Immediate(3 * kPointerSize));
+  __ add(esp, Immediate(3 * kPointerSize));
   __ mov(eax, isolate()->factory()->undefined_value());
   __ jmp(&done);
 
@@ -3221,11 +3292,11 @@
   __ pop(left);
 
   Label done, fail, ok;
-  __ cmp(left, Operand(right));
+  __ cmp(left, right);
   __ j(equal, &ok);
   // Fail if either is a non-HeapObject.
   __ mov(tmp, left);
-  __ and_(Operand(tmp), right);
+  __ and_(tmp, right);
   __ JumpIfSmi(tmp, &fail);
   __ mov(tmp, FieldOperand(left, HeapObject::kMapOffset));
   __ CmpInstanceType(tmp, JS_REGEXP_TYPE);
@@ -3316,7 +3387,7 @@
   Operand separator_operand = Operand(esp, 2 * kPointerSize);
   Operand result_operand = Operand(esp, 1 * kPointerSize);
   Operand array_length_operand = Operand(esp, 0);
-  __ sub(Operand(esp), Immediate(2 * kPointerSize));
+  __ sub(esp, Immediate(2 * kPointerSize));
   __ cld();
   // Check that the array is a JSArray
   __ JumpIfSmi(array, &bailout);
@@ -3352,7 +3423,7 @@
   // Live loop registers: index, array_length, string,
   //                      scratch, string_length, elements.
   if (FLAG_debug_code) {
-    __ cmp(index, Operand(array_length));
+    __ cmp(index, array_length);
     __ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
   }
   __ bind(&loop);
@@ -3370,8 +3441,8 @@
   __ add(string_length,
          FieldOperand(string, SeqAsciiString::kLengthOffset));
   __ j(overflow, &bailout);
-  __ add(Operand(index), Immediate(1));
-  __ cmp(index, Operand(array_length));
+  __ add(index, Immediate(1));
+  __ cmp(index, array_length);
   __ j(less, &loop);
 
   // If array_length is 1, return elements[0], a string.
@@ -3405,10 +3476,10 @@
   // to string_length.
   __ mov(scratch, separator_operand);
   __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
-  __ sub(string_length, Operand(scratch));  // May be negative, temporarily.
+  __ sub(string_length, scratch);  // May be negative, temporarily.
   __ imul(scratch, array_length_operand);
   __ j(overflow, &bailout);
-  __ add(string_length, Operand(scratch));
+  __ add(string_length, scratch);
   __ j(overflow, &bailout);
 
   __ shr(string_length, 1);
@@ -3449,7 +3520,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
   __ bind(&loop_1_condition);
   __ cmp(index, array_length_operand);
   __ j(less, &loop_1);  // End while (index < length).
@@ -3490,7 +3561,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
 
   __ cmp(index, array_length_operand);
   __ j(less, &loop_2);  // End while (index < length).
@@ -3531,7 +3602,7 @@
   __ lea(string,
          FieldOperand(string, SeqAsciiString::kHeaderSize));
   __ CopyBytes(string, result_pos, string_length, scratch);
-  __ add(Operand(index), Immediate(1));
+  __ add(index, Immediate(1));
 
   __ cmp(index, array_length_operand);
   __ j(less, &loop_3);  // End while (index < length).
@@ -3543,7 +3614,7 @@
   __ bind(&done);
   __ mov(eax, result_operand);
   // Drop temp values from the stack, and restore context register.
-  __ add(Operand(esp), Immediate(3 * kPointerSize));
+  __ add(esp, Immediate(3 * kPointerSize));
 
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   decrement_stack_height();
@@ -3823,9 +3894,9 @@
 
   if (ShouldInlineSmiCase(expr->op())) {
     if (expr->op() == Token::INC) {
-      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ add(eax, Immediate(Smi::FromInt(1)));
     } else {
-      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ sub(eax, Immediate(Smi::FromInt(1)));
     }
     __ j(overflow, &stub_call, Label::kNear);
     // We could eliminate this smi check if we split the code at
@@ -3835,9 +3906,9 @@
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     if (expr->op() == Token::INC) {
-      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ sub(eax, Immediate(Smi::FromInt(1)));
     } else {
-      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+      __ add(eax, Immediate(Smi::FromInt(1)));
     }
   }
 
@@ -3956,10 +4027,14 @@
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
     VisitForTypeofValue(expr);
   }
@@ -3998,8 +4073,11 @@
     Split(not_zero, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(eax, if_false);
-    __ CmpObjectType(eax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, edx);
-    Split(above_equal, if_true, if_false, fall_through);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
+    __ j(equal, if_true);
+    __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
+    Split(equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(eax, if_false);
     if (!FLAG_harmony_typeof) {
@@ -4017,18 +4095,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ cmp(eax, isolate()->factory()->undefined_value());
-  Split(equal, if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -4036,9 +4103,12 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4046,16 +4116,9 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
-  switch (expr->op()) {
+  switch (op) {
     case Token::IN:
       VisitForStackValue(expr->right());
       __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
@@ -4071,7 +4134,7 @@
       __ CallStub(&stub);
       decrement_stack_height(2);
       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-      __ test(eax, Operand(eax));
+      __ test(eax, eax);
       // The stub returns 0 for true.
       Split(zero, if_true, if_false, fall_through);
       break;
@@ -4080,11 +4143,8 @@
     default: {
       VisitForAccumulatorValue(expr->right());
       Condition cc = no_condition;
-      bool strict = false;
       switch (op) {
         case Token::EQ_STRICT:
-          strict = true;
-          // Fall through
         case Token::EQ:
           cc = equal;
           __ pop(edx);
@@ -4120,10 +4180,10 @@
       JumpPatchSite patch_site(masm_);
       if (inline_smi_code) {
         Label slow_case;
-        __ mov(ecx, Operand(edx));
-        __ or_(ecx, Operand(eax));
+        __ mov(ecx, edx);
+        __ or_(ecx, eax);
         patch_site.EmitJumpIfNotSmi(ecx, &slow_case, Label::kNear);
-        __ cmp(edx, Operand(eax));
+        __ cmp(edx, eax);
         Split(cc, if_true, if_false, NULL);
         __ bind(&slow_case);
       }
@@ -4135,7 +4195,7 @@
       patch_site.EmitPatchInfo();
 
       PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-      __ test(eax, Operand(eax));
+      __ test(eax, eax);
       Split(cc, if_true, if_false, fall_through);
     }
   }
@@ -4146,7 +4206,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4154,15 +4216,20 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
+  VisitForAccumulatorValue(sub_expr);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ cmp(eax, isolate()->factory()->null_value());
-  if (expr->is_strict()) {
+  Handle<Object> nil_value = nil == kNullValue ?
+      isolate()->factory()->null_value() :
+      isolate()->factory()->undefined_value();
+  __ cmp(eax, nil_value);
+  if (expr->op() == Token::EQ_STRICT) {
     Split(equal, if_true, if_false, fall_through);
   } else {
+    Handle<Object> other_nil_value = nil == kNullValue ?
+        isolate()->factory()->undefined_value() :
+        isolate()->factory()->null_value();
     __ j(equal, if_true);
-    __ cmp(eax, isolate()->factory()->undefined_value());
+    __ cmp(eax, other_nil_value);
     __ j(equal, if_true);
     __ JumpIfSmi(eax, if_false);
     // It can be an undetectable object.
@@ -4229,7 +4296,7 @@
   // Cook return address on top of stack (smi encoded Code* delta)
   ASSERT(!result_register().is(edx));
   __ pop(edx);
-  __ sub(Operand(edx), Immediate(masm_->CodeObject()));
+  __ sub(edx, Immediate(masm_->CodeObject()));
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   STATIC_ASSERT(kSmiTag == 0);
   __ SmiTag(edx);
@@ -4245,8 +4312,8 @@
   // Uncook return address.
   __ pop(edx);
   __ SmiUntag(edx);
-  __ add(Operand(edx), Immediate(masm_->CodeObject()));
-  __ jmp(Operand(edx));
+  __ add(edx, Immediate(masm_->CodeObject()));
+  __ jmp(edx);
 }
 
 
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 9b5cc56..4c72e56 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -212,7 +212,7 @@
 
   // Update write barrier. Make sure not to clobber the value.
   __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1);
+  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
 }
 
 
@@ -326,7 +326,7 @@
   // Fast case: Do the load.
   STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
   __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
-  __ cmp(Operand(scratch), Immediate(FACTORY->the_hole_value()));
+  __ cmp(scratch, Immediate(FACTORY->the_hole_value()));
   // In case the loaded value is the_hole we have to consult GetProperty
   // to ensure the prototype chain is searched.
   __ j(equal, out_of_range);
@@ -394,8 +394,8 @@
   // Check if element is in the range of mapped arguments. If not, jump
   // to the unmapped lookup with the parameter map in scratch1.
   __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
-  __ sub(Operand(scratch2), Immediate(Smi::FromInt(2)));
-  __ cmp(key, Operand(scratch2));
+  __ sub(scratch2, Immediate(Smi::FromInt(2)));
+  __ cmp(key, scratch2);
   __ j(greater_equal, unmapped_case);
 
   // Load element index and check whether it is the hole.
@@ -432,7 +432,7 @@
   Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
   __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
   __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(scratch));
+  __ cmp(key, scratch);
   __ j(greater_equal, slow_case);
   return FieldOperand(backing_store,
                       key,
@@ -534,7 +534,7 @@
   __ shr(ecx, KeyedLookupCache::kMapHashShift);
   __ mov(edi, FieldOperand(eax, String::kHashFieldOffset));
   __ shr(edi, String::kHashShift);
-  __ xor_(ecx, Operand(edi));
+  __ xor_(ecx, edi);
   __ and_(ecx, KeyedLookupCache::kCapacityMask);
 
   // Load the key (consisting of map and symbol) from the cache and
@@ -545,7 +545,7 @@
   __ shl(edi, kPointerSizeLog2 + 1);
   __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
-  __ add(Operand(edi), Immediate(kPointerSize));
+  __ add(edi, Immediate(kPointerSize));
   __ cmp(eax, Operand::StaticArray(edi, times_1, cache_keys));
   __ j(not_equal, &slow);
 
@@ -559,12 +559,12 @@
   __ mov(edi,
          Operand::StaticArray(ecx, times_pointer_size, cache_field_offsets));
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
-  __ sub(edi, Operand(ecx));
+  __ sub(edi, ecx);
   __ j(above_equal, &property_array_property);
 
   // Load in-object property.
   __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
-  __ add(ecx, Operand(edi));
+  __ add(ecx, edi);
   __ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
   __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
   __ ret(0);
@@ -651,8 +651,8 @@
   // Check that it has indexed interceptor and access checks
   // are not enabled for this object.
   __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
-  __ and_(Operand(ecx), Immediate(kSlowCaseBitFieldMask));
-  __ cmp(Operand(ecx), Immediate(1 << Map::kHasIndexedInterceptor));
+  __ and_(ecx, Immediate(kSlowCaseBitFieldMask));
+  __ cmp(ecx, Immediate(1 << Map::kHasIndexedInterceptor));
   __ j(not_zero, &slow);
 
   // Everything is fine, call runtime.
@@ -710,7 +710,7 @@
   __ mov(mapped_location, eax);
   __ lea(ecx, mapped_location);
   __ mov(edx, eax);
-  __ RecordWrite(ebx, ecx, edx);
+  __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in ebx.
@@ -719,7 +719,7 @@
   __ mov(unmapped_location, eax);
   __ lea(edi, unmapped_location);
   __ mov(edx, eax);
-  __ RecordWrite(ebx, edi, edx);
+  __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
@@ -734,7 +734,9 @@
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label slow, fast, array, extra;
+  Label slow, fast_object_with_map_check, fast_object_without_map_check;
+  Label fast_double_with_map_check, fast_double_without_map_check;
+  Label check_if_double_array, array, extra;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(edx, &slow);
@@ -750,22 +752,18 @@
   __ CmpInstanceType(edi, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(edi, FIRST_JS_RECEIVER_TYPE);
+  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
   __ j(below, &slow);
-  __ CmpInstanceType(edi, JS_PROXY_TYPE);
-  __ j(equal, &slow);
-  __ CmpInstanceType(edi, JS_FUNCTION_PROXY_TYPE);
-  __ j(equal, &slow);
 
   // Object case: Check key against length in the elements array.
   // eax: value
   // edx: JSObject
   // ecx: key (a smi)
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  // Check that the object is in fast mode and writable.
-  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
-  __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-  __ j(below, &fast);
+  // edi: receiver map
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
+  __ j(below, &fast_object_with_map_check);
 
   // Slow case: call runtime.
   __ bind(&slow);
@@ -778,16 +776,28 @@
   // eax: value
   // edx: receiver, a JSArray
   // ecx: key, a smi.
-  // edi: receiver->elements, a FixedArray
+  // ebx: receiver->elements, a FixedArray
+  // edi: receiver map
   // flags: compare (ecx, edx.length())
   // do not leave holes in the array:
   __ j(not_equal, &slow);
-  __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
   __ j(above_equal, &slow);
-  // Add 1 to receiver->length, and go to fast array write.
+  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+  __ j(not_equal, &check_if_double_array);
+  // Add 1 to receiver->length, and go to common element store code for Objects.
   __ add(FieldOperand(edx, JSArray::kLengthOffset),
          Immediate(Smi::FromInt(1)));
-  __ jmp(&fast);
+  __ jmp(&fast_object_without_map_check);
+
+  __ bind(&check_if_double_array);
+  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+  __ j(not_equal, &slow);
+  // Add 1 to receiver->length, and go to common element store code for doubles.
+  __ add(FieldOperand(edx, JSArray::kLengthOffset),
+         Immediate(Smi::FromInt(1)));
+  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
@@ -796,24 +806,56 @@
   // eax: value
   // edx: receiver, a JSArray
   // ecx: key, a smi.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ CheckMap(edi, FACTORY->fixed_array_map(), &slow, DONT_DO_SMI_CHECK);
+  // edi: receiver map
+  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
 
-  // Check the key against the length in the array, compute the
-  // address to store into and fall through to fast case.
+  // Check the key against the length in the array and fall through to the
+  // common store code.
   __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
   __ j(above_equal, &extra);
 
-  // Fast case: Do the store.
-  __ bind(&fast);
+  // Fast case: Do the store, could either Object or double.
+  __ bind(&fast_object_with_map_check);
   // eax: value
   // ecx: key (a smi)
   // edx: receiver
-  // edi: FixedArray receiver->elements
-  __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
+  // ebx: FixedArray receiver->elements
+  // edi: receiver map
+  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+  __ j(not_equal, &fast_double_with_map_check);
+  __ bind(&fast_object_without_map_check);
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(eax, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
+  __ ret(0);
+
+  __ bind(&non_smi_value);
+  if (FLAG_smi_only_arrays) {
+    // Escape to slow case when writing non-smi into smi-only array.
+    __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+    __ CheckFastObjectElements(edi, &slow, Label::kNear);
+  }
+
+  // Fast elements array, store the value to the elements backing store.
+  __ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
   // Update write barrier for the elements array address.
-  __ mov(edx, Operand(eax));
-  __ RecordWrite(edi, 0, edx, ecx);
+  __ mov(edx, eax);  // Preserve the value which is returned.
+  __ RecordWriteArray(
+      ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ ret(0);
+
+  __ bind(&fast_double_with_map_check);
+  // Check for fast double array case. If this fails, call through to the
+  // runtime.
+  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+  __ j(not_equal, &slow);
+  __ bind(&fast_double_without_map_check);
+  // If the value is a number, store it as a double in the FastDoubleElements
+  // array.
+  __ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
   __ ret(0);
 }
 
@@ -951,22 +993,22 @@
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ push(edx);
-  __ push(ecx);
+    // Push the receiver and the name of the function.
+    __ push(edx);
+    __ push(ecx);
 
-  // Call the entry.
-  CEntryStub stub(1);
-  __ mov(eax, Immediate(2));
-  __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
-  __ CallStub(&stub);
+    // Call the entry.
+    CEntryStub stub(1);
+    __ mov(eax, Immediate(2));
+    __ mov(ebx, Immediate(ExternalReference(IC_Utility(id), masm->isolate())));
+    __ CallStub(&stub);
 
-  // Move result to edi and exit the internal frame.
-  __ mov(edi, eax);
-  __ LeaveInternalFrame();
+    // Move result to edi and exit the internal frame.
+    __ mov(edi, eax);
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -1111,13 +1153,17 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-  __ EnterInternalFrame();
-  __ push(ecx);  // save the key
-  __ push(edx);  // pass the receiver
-  __ push(ecx);  // pass the key
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(ecx);  // restore the key
-  __ LeaveInternalFrame();
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(ecx);  // save the key
+    __ push(edx);  // pass the receiver
+    __ push(ecx);  // pass the key
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(ecx);  // restore the key
+    // Leave the internal frame.
+  }
+
   __ mov(edi, eax);
   __ jmp(&do_call);
 
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 4e3ea98..9e1fd34 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -70,6 +70,17 @@
   ASSERT(is_unused());
   status_ = GENERATING;
   CpuFeatures::Scope scope(SSE2);
+
+  CodeStub::GenerateFPStubs();
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+  dynamic_frame_alignment_ = chunk()->num_double_slots() > 2 ||
+                             info()->osr_ast_id() != AstNode::kNoNumber;
+
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -144,6 +155,29 @@
     __ bind(&ok);
   }
 
+  if (dynamic_frame_alignment_) {
+    Label do_not_pad, align_loop;
+    STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+    // Align esp to a multiple of 2 * kPointerSize.
+    __ test(esp, Immediate(kPointerSize));
+    __ j(zero, &do_not_pad, Label::kNear);
+    __ push(Immediate(0));
+    __ mov(ebx, esp);
+    // Copy arguments, receiver, and return address.
+    __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+    __ bind(&align_loop);
+    __ mov(eax, Operand(ebx, 1 * kPointerSize));
+    __ mov(Operand(ebx, 0), eax);
+    __ add(Operand(ebx), Immediate(kPointerSize));
+    __ dec(ecx);
+    __ j(not_zero, &align_loop, Label::kNear);
+    __ mov(Operand(ebx, 0),
+           Immediate(isolate()->factory()->frame_alignment_marker()));
+
+    __ bind(&do_not_pad);
+  }
+
   __ push(ebp);  // Caller's frame pointer.
   __ mov(ebp, esp);
   __ push(esi);  // Callee's context.
@@ -204,11 +238,12 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ mov(Operand(esi, context_offset), eax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have to use a third register to avoid
-        // clobbering esi.
-        __ mov(ecx, esi);
-        __ RecordWrite(ecx, context_offset, eax, ebx);
+        // Update the write barrier. This clobbers eax and ebx.
+        __ RecordWriteContextSlot(esi,
+                                  context_offset,
+                                  eax,
+                                  ebx,
+                                  kDontSaveFPRegs);
       }
     }
     Comment(";;; End allocate local context");
@@ -260,6 +295,9 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -481,14 +519,18 @@
                                        int argc,
                                        LInstruction* instr,
                                        LOperand* context) {
-  ASSERT(context->IsRegister() || context->IsStackSlot());
   if (context->IsRegister()) {
     if (!ToRegister(context).is(esi)) {
       __ mov(esi, ToRegister(context));
     }
-  } else {
-    // Context is stack slot.
+  } else if (context->IsStackSlot()) {
     __ mov(esi, ToOperand(context));
+  } else if (context->IsConstantOperand()) {
+    Handle<Object> literal =
+        chunk_->LookupLiteral(LConstantOperand::cast(context));
+    LoadHeapObject(esi, Handle<Context>::cast(literal));
+  } else {
+    UNREACHABLE();
   }
 
   __ CallRuntimeSaveDoubles(id);
@@ -669,7 +711,7 @@
     int arguments,
     int deoptimization_index) {
   ASSERT(kind == expected_safepoint_kind_);
-  const ZoneList<LOperand*>* operands = pointers->operands();
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deoptimization_index);
   for (int i = 0; i < operands->length(); i++) {
@@ -1200,8 +1242,13 @@
 
 
 void LCodeGen::DoConstantT(LConstantT* instr) {
-  ASSERT(instr->result()->IsRegister());
-  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
+  Register reg = ToRegister(instr->result());
+  Handle<Object> handle = instr->value();
+  if (handle->IsHeapObject()) {
+    LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
+  } else {
+    __ Set(reg, Immediate(handle));
+  }
 }
 
 
@@ -1577,23 +1624,33 @@
 }
 
 
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
-
-  // TODO(fsc): If the expression is known to be a smi, then it's
-  // definitely not null. Jump to the false block.
-
-  int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
-  __ cmp(reg, factory()->null_value());
-  if (instr->is_strict()) {
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
+  if (instr->hydrogen()->representation().IsSpecialization() ||
+      instr->hydrogen()->type().IsSmi()) {
+    EmitGoto(false_block);
+    return;
+  }
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  Handle<Object> nil_value = instr->nil() == kNullValue ?
+      factory()->null_value() :
+      factory()->undefined_value();
+  __ cmp(reg, nil_value);
+  if (instr->kind() == kStrictEquality) {
     EmitBranch(true_block, false_block, equal);
   } else {
+    Handle<Object> other_nil_value = instr->nil() == kNullValue ?
+        factory()->undefined_value() :
+        factory()->null_value();
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ j(equal, true_label);
-    __ cmp(reg, factory()->undefined_value());
+    __ cmp(reg, other_nil_value);
     __ j(equal, true_label);
     __ JumpIfSmi(reg, false_label);
     // Check for undetectable objects by looking in the bit field in
@@ -1745,28 +1802,36 @@
   ASSERT(!input.is(temp));
   ASSERT(!temp.is(temp2));  // But input and temp2 may be the same register.
   __ JumpIfSmi(input, is_false);
-  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
-  __ j(below, is_false);
 
-  // Map is now in temp.
-  // Functions have class 'Function'.
-  __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ j(above_equal, is_true);
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+    __ j(below, is_false);
+    __ j(equal, is_true);
+    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+    __ j(equal, is_true);
   } else {
-    __ j(above_equal, is_false);
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+    __ mov(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
+    __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ cmpb(Operand(temp2),
+            static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                                FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ j(above, is_false);
   }
 
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
-
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1851,9 +1916,8 @@
     virtual void Generate() {
       codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
     }
-
+    virtual LInstruction* instr() { return instr_; }
     Label* map_check() { return &map_check_; }
-
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -1991,6 +2055,17 @@
   }
   __ mov(esp, ebp);
   __ pop(ebp);
+  if (dynamic_frame_alignment_) {
+    Label aligned;
+    // Frame alignment marker (padding) is below arguments,
+    // and receiver, so its return-address-relative offset is
+    // (num_arguments + 2) words.
+    __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
+           Immediate(factory()->frame_alignment_marker()));
+    __ j(not_equal, &aligned);
+    __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
+    __ bind(&aligned);
+  }
   __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
 }
 
@@ -1998,7 +2073,7 @@
 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
-  if (instr->hydrogen()->check_hole_value()) {
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(result, factory()->the_hole_value());
     DeoptimizeIf(equal, instr->environment());
   }
@@ -2019,20 +2094,34 @@
 
 
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+  Register object = ToRegister(instr->TempAt(0));
+  Register address = ToRegister(instr->TempAt(1));
   Register value = ToRegister(instr->InputAt(0));
-  Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
+  ASSERT(!value.is(object));
+  Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
+
+  int offset = JSGlobalPropertyCell::kValueOffset;
+  __ mov(object, Immediate(cell_handle));
 
   // If the cell we are storing to contains the hole it could have
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted. We deoptimize in that case.
-  if (instr->hydrogen()->check_hole_value()) {
-    __ cmp(cell_operand, factory()->the_hole_value());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(FieldOperand(object, offset), factory()->the_hole_value());
     DeoptimizeIf(equal, instr->environment());
   }
 
   // Store the value.
-  __ mov(cell_operand, value);
+  __ mov(FieldOperand(object, offset), value);
+
+  // Cells are always in the remembered set.
+  __ RecordWriteField(object,
+                      offset,
+                      value,
+                      address,
+                      kSaveFPRegs,
+                      OMIT_REMEMBERED_SET);
 }
 
 
@@ -2063,7 +2152,7 @@
   if (instr->needs_write_barrier()) {
     Register temp = ToRegister(instr->TempAt(0));
     int offset = Context::SlotOffset(instr->slot_index());
-    __ RecordWrite(context, offset, value, temp);
+    __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs);
   }
 }
 
@@ -2280,16 +2369,14 @@
     LLoadKeyedFastDoubleElement* instr) {
   XMMRegister result = ToDoubleRegister(instr->result());
 
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
-        sizeof(kHoleNanLower32);
-    Operand hole_check_operand = BuildFastArrayOperand(
-        instr->elements(), instr->key(),
-        FAST_DOUBLE_ELEMENTS,
-        offset);
-    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr->environment());
-  }
+  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+      sizeof(kHoleNanLower32);
+  Operand hole_check_operand = BuildFastArrayOperand(
+      instr->elements(), instr->key(),
+      FAST_DOUBLE_ELEMENTS,
+      offset);
+  __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+  DeoptimizeIf(equal, instr->environment());
 
   Operand double_load_operand = BuildFastArrayOperand(
       instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2359,6 +2446,7 @@
         break;
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -2680,6 +2768,7 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -3005,7 +3094,7 @@
   ASSERT(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ Drop(1);
 }
@@ -3062,7 +3151,7 @@
     if (instr->needs_write_barrier()) {
       Register temp = ToRegister(instr->TempAt(0));
       // Update the write barrier for the object for in-object properties.
-      __ RecordWrite(object, offset, value, temp);
+      __ RecordWriteField(object, offset, value, temp, kSaveFPRegs);
     }
   } else {
     Register temp = ToRegister(instr->TempAt(0));
@@ -3071,7 +3160,7 @@
     if (instr->needs_write_barrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWrite(temp, offset, value, object);
+      __ RecordWriteField(temp, offset, value, object, kSaveFPRegs);
     }
   }
 }
@@ -3130,6 +3219,7 @@
         break;
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
@@ -3146,6 +3236,13 @@
   Register elements = ToRegister(instr->object());
   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
 
+  // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  // conversion, so it deopts in that case.
+  if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+    __ test(value, Immediate(kSmiTagMask));
+    DeoptimizeIf(not_zero, instr->environment());
+  }
+
   // Do the store.
   if (instr->key()->IsConstantOperand()) {
     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3168,7 +3265,7 @@
                         key,
                         times_pointer_size,
                         FixedArray::kHeaderSize));
-    __ RecordWrite(elements, key, value);
+    __ RecordWrite(elements, key, value, kSaveFPRegs);
   }
 }
 
@@ -3212,6 +3309,7 @@
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
@@ -3334,6 +3432,7 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3413,6 +3512,7 @@
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagI* instr_;
   };
@@ -3480,6 +3580,7 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3581,16 +3682,6 @@
 }
 
 
-class DeferredTaggedToI: public LDeferredCode {
- public:
-  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-      : LDeferredCode(codegen), instr_(instr) { }
-  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
-  LTaggedToI* instr_;
-};
-
-
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
@@ -3672,6 +3763,16 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -3882,9 +3983,16 @@
 
 
 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
-  ASSERT(instr->InputAt(0)->IsRegister());
-  Operand operand = ToOperand(instr->InputAt(0));
-  __ cmp(operand, instr->hydrogen()->target());
+  Handle<JSFunction> target = instr->hydrogen()->target();
+  if (isolate()->heap()->InNewSpace(*target)) {
+    Register reg = ToRegister(instr->value());
+    Handle<JSGlobalPropertyCell> cell =
+        isolate()->factory()->NewJSGlobalPropertyCell(target);
+    __ cmp(reg, Operand::Cell(cell));
+  } else {
+    Operand operand = ToOperand(instr->value());
+    __ cmp(operand, instr->hydrogen()->target());
+  }
   DeoptimizeIf(not_equal, instr->environment());
 }
 
@@ -4188,10 +4296,12 @@
     final_branch_condition = not_zero;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
-    final_branch_condition = above_equal;
+    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+    __ j(equal, true_label);
+    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+    final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -4303,6 +4413,7 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 6156327..6037c08 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -58,6 +58,7 @@
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
+        dynamic_frame_alignment_(false),
         deferred_(8),
         osr_pc_offset_(-1),
         deoptimization_reloc_size(),
@@ -133,6 +134,10 @@
   int strict_mode_flag() const {
     return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
+  bool dynamic_frame_alignment() const { return dynamic_frame_alignment_; }
+  void set_dynamic_frame_alignment(bool value) {
+    dynamic_frame_alignment_ = value;
+  }
 
   LChunk* chunk() const { return chunk_; }
   Scope* scope() const { return scope_; }
@@ -297,6 +302,7 @@
   int inlined_function_count_;
   Scope* const scope_;
   Status status_;
+  bool dynamic_frame_alignment_;
   TranslationBuffer translations_;
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
@@ -346,16 +352,20 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen), external_exit_(NULL) {
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
 
   void SetExit(Label *exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -366,6 +376,7 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 3dc220d..856106c 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -214,10 +214,11 @@
 }
 
 
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -351,7 +352,11 @@
 
 int LChunk::GetNextSpillIndex(bool is_double) {
   // Skip a slot if for a double-width slot.
-  if (is_double) spill_slot_count_++;
+  if (is_double) {
+    spill_slot_count_ |= 1;  // Make it odd, so incrementing makes it even.
+    spill_slot_count_++;
+    num_double_slots_++;
+  }
   return spill_slot_count_++;
 }
 
@@ -707,7 +712,9 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  instr->set_environment(CreateEnvironment(hydrogen_env));
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
   return instr;
 }
 
@@ -994,10 +1001,13 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
   int ast_id = hydrogen_env->ast_id();
   ASSERT(ast_id != AstNode::kNoNumber);
   int value_count = hydrogen_env->length();
@@ -1007,7 +1017,6 @@
                                           argument_count_,
                                           value_count,
                                           outer);
-  int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1016,7 +1025,7 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new LArgument(argument_index++);
+      op = new LArgument((*argument_index_accumulator)++);
     } else {
       op = UseAny(value);
     }
@@ -1471,10 +1480,10 @@
 }
 
 
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
   // We only need a temp register for non-strict compare.
-  LOperand* temp = instr->is_strict() ? NULL : TempRegister();
-  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+  LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
+  return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
@@ -1683,7 +1692,13 @@
 
 
 LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
-  LOperand* value = UseAtStart(instr->value());
+  // If the target is in new space, we'll emit a global cell compare and so
+  // want the value in a register.  If the target gets promoted before we
+  // emit code, we will still get the register but will do an immediate
+  // compare instead of the cell compare.  This is safe.
+  LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
+      ? UseRegisterAtStart(instr->value())
+      : UseAtStart(instr->value());
   return AssignEnvironment(new LCheckFunction(value));
 }
 
@@ -1770,7 +1785,7 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
   LLoadGlobalCell* result = new LLoadGlobalCell;
-  return instr->check_hole_value()
+  return instr->RequiresHoleCheck()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1786,8 +1801,10 @@
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
   LStoreGlobalCell* result =
-      new LStoreGlobalCell(UseRegisterAtStart(instr->value()));
-  return instr->check_hole_value() ? AssignEnvironment(result) : result;
+      new LStoreGlobalCell(UseTempRegister(instr->value()),
+                           TempRegister(),
+                           TempRegister());
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
@@ -1808,15 +1825,13 @@
 
 
 LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
-  LOperand* context;
   LOperand* value;
   LOperand* temp;
+  LOperand* context = UseRegister(instr->context());
   if (instr->NeedsWriteBarrier()) {
-    context = UseTempRegister(instr->context());
     value = UseTempRegister(instr->value());
     temp = TempRegister();
   } else {
-    context = UseRegister(instr->context());
     value = UseRegister(instr->value());
     temp = NULL;
   }
@@ -1944,7 +1959,7 @@
   ASSERT(instr->object()->representation().IsTagged());
   ASSERT(instr->key()->representation().IsInteger32());
 
-  LOperand* obj = UseTempRegister(instr->object());
+  LOperand* obj = UseRegister(instr->object());
   LOperand* val = needs_write_barrier
       ? UseTempRegister(instr->value())
       : UseRegisterAtStart(instr->value());
@@ -2021,9 +2036,14 @@
 LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   bool needs_write_barrier = instr->NeedsWriteBarrier();
 
-  LOperand* obj = needs_write_barrier
-      ? UseTempRegister(instr->object())
-      : UseRegisterAtStart(instr->object());
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = instr->is_in_object()
+        ? UseRegister(instr->object())
+        : UseTempRegister(instr->object());
+  } else {
+    obj = UseRegisterAtStart(instr->object());
+  }
 
   LOperand* val = needs_write_barrier
       ? UseTempRegister(instr->value())
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 038049c..3a06ac3 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -101,7 +101,7 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNullAndBranch)                            \
+  V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
@@ -615,17 +615,18 @@
 };
 
 
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
+class LIsNilAndBranch: public LControlInstruction<1, 1> {
  public:
-  LIsNullAndBranch(LOperand* value, LOperand* temp) {
+  LIsNilAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
 
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -1230,10 +1231,12 @@
 };
 
 
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
  public:
-  explicit LStoreGlobalCell(LOperand* value) {
+  explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@@ -1798,6 +1801,8 @@
     inputs_[0] = value;
   }
 
+  LOperand* value() { return inputs_[0]; }
+
   DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
   DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
 };
@@ -2070,6 +2075,7 @@
       graph_(graph),
       instructions_(32),
       pointer_maps_(8),
+      num_double_slots_(0),
       inlined_closures_(1) { }
 
   void AddInstruction(LInstruction* instruction, HBasicBlock* block);
@@ -2083,6 +2089,8 @@
   int ParameterAt(int index);
   int GetParameterStackSlot(int index) const;
   int spill_slot_count() const { return spill_slot_count_; }
+  int num_double_slots() const { return num_double_slots_; }
+
   CompilationInfo* info() const { return info_; }
   HGraph* graph() const { return graph_; }
   const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
@@ -2124,6 +2132,7 @@
   HGraph* const graph_;
   ZoneList<LInstruction*> instructions_;
   ZoneList<LPointerMap*> pointer_maps_;
+  int num_double_slots_;
   ZoneList<Handle<JSFunction> > inlined_closures_;
 };
 
@@ -2259,7 +2268,8 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
 
   void VisitInstruction(HInstruction* current);
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 837112a..3aaa22a 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -44,7 +44,8 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true) {
+      allow_stub_calls_(true),
+      has_frame_(false) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -52,33 +53,75 @@
 }
 
 
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register addr,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, not_equal, &not_in_new_space);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
+void MacroAssembler::InNewSpace(
+    Register object,
+    Register scratch,
+    Condition cc,
+    Label* condition_met,
+    Label::Distance condition_met_distance) {
+  ASSERT(cc == equal || cc == not_equal);
+  if (scratch.is(object)) {
+    and_(scratch, Immediate(~Page::kPageAlignmentMask));
+  } else {
+    mov(scratch, Immediate(~Page::kPageAlignmentMask));
+    and_(scratch, object);
   }
+  // Check that we can use a test_b.
+  ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
+  ASSERT(MemoryChunk::IN_TO_SPACE < 8);
+  int mask = (1 << MemoryChunk::IN_FROM_SPACE)
+           | (1 << MemoryChunk::IN_TO_SPACE);
+  // If non-zero, the page belongs to new-space.
+  test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+         static_cast<uint8_t>(mask));
+  j(cc, condition_met, condition_met_distance);
+}
 
-  // Compute the page start address from the heap object pointer, and reuse
-  // the 'object' register for it.
-  and_(object, ~Page::kPageAlignmentMask);
 
-  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
-  // method for more details.
-  shr(addr, Page::kRegionSizeLog2);
-  and_(addr, Page::kPageAlignmentMask >> Page::kRegionSizeLog2);
-
-  // Set dirty mark for region.
-  // Bit tests with a memory operand should be avoided on Intel processors,
-  // as they usually have long latency and multiple uops. We load the bit base
-  // operand to a register at first and store it back after bit set.
-  mov(scratch, Operand(object, Page::kDirtyFlagOffset));
-  bts(Operand(scratch), addr);
-  mov(Operand(object, Page::kDirtyFlagOffset), scratch);
+void MacroAssembler::RememberedSetHelper(
+    Register object,  // Only used for debug checks.
+    Register addr,
+    Register scratch,
+    SaveFPRegsMode save_fp,
+    MacroAssembler::RememberedSetFinalAction and_then) {
+  Label done;
+  if (FLAG_debug_code) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  mov(scratch, Operand::StaticVariable(store_buffer));
+  // Store pointer to buffer.
+  mov(Operand(scratch, 0), addr);
+  // Increment buffer top.
+  add(scratch, Immediate(kPointerSize));
+  // Write back new top of buffer.
+  mov(Operand::StaticVariable(store_buffer), scratch);
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  test(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kReturnAtEnd) {
+    Label buffer_overflowed;
+    j(not_equal, &buffer_overflowed, Label::kNear);
+    ret(0);
+    bind(&buffer_overflowed);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    j(equal, &done, Label::kNear);
+  }
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(save_fp);
+  CallStub(&store_buffer_overflow);
+  if (and_then == kReturnAtEnd) {
+    ret(0);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    bind(&done);
+  }
 }
 
 
@@ -112,100 +155,144 @@
 }
 
 
-void MacroAssembler::InNewSpace(Register object,
-                                Register scratch,
-                                Condition cc,
-                                Label* branch,
-                                Label::Distance branch_near) {
-  ASSERT(cc == equal || cc == not_equal);
-  if (Serializer::enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    mov(scratch, Operand(object));
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    and_(Operand(scratch),
-         Immediate(ExternalReference::new_space_mask(isolate())));
-    cmp(Operand(scratch),
-        Immediate(ExternalReference::new_space_start(isolate())));
-    j(cc, branch, branch_near);
-  } else {
-    int32_t new_space_start = reinterpret_cast<int32_t>(
-        ExternalReference::new_space_start(isolate()).address());
-    lea(scratch, Operand(object, -new_space_start));
-    and_(scratch, isolate()->heap()->NewSpaceMask());
-    j(cc, branch, branch_near);
+void MacroAssembler::RecordWriteArray(Register object,
+                                      Register value,
+                                      Register index,
+                                      SaveFPRegsMode save_fp,
+                                      RememberedSetAction remembered_set_action,
+                                      SmiCheck smi_check) {
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    ASSERT_EQ(0, kSmiTag);
+    test(value, Immediate(kSmiTagMask));
+    j(zero, &done);
+  }
+
+  // Array access: calculate the destination address in the same manner as
+  // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
+  // into an array of words.
+  Register dst = index;
+  lea(dst, Operand(object, index, times_half_pointer_size,
+                   FixedArray::kHeaderSize - kHeapObjectTag));
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
+  bind(&done);
+
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(index, Immediate(BitCast<int32_t>(kZapValue)));
   }
 }
 
 
-void MacroAssembler::RecordWrite(Register object,
-                                 int offset,
-                                 Register value,
-                                 Register scratch) {
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
   // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis and stores into young gen.
+  // catch stores of Smis.
   Label done;
 
   // Skip barrier if writing a smi.
-  STATIC_ASSERT(kSmiTag == 0);
-  JumpIfSmi(value, &done, Label::kNear);
-
-  InNewSpace(object, value, equal, &done, Label::kNear);
-
-  // The offset is relative to a tagged or untagged HeapObject pointer,
-  // so either offset or offset + kHeapObjectTag must be a
-  // multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize) ||
-         IsAligned(offset + kHeapObjectTag, kPointerSize));
-
-  Register dst = scratch;
-  if (offset != 0) {
-    lea(dst, Operand(object, offset));
-  } else {
-    // Array access: calculate the destination address in the same manner as
-    // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
-    // into an array of words.
-    STATIC_ASSERT(kSmiTagSize == 1);
-    STATIC_ASSERT(kSmiTag == 0);
-    lea(dst, Operand(object, dst, times_half_pointer_size,
-                     FixedArray::kHeaderSize - kHeapObjectTag));
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done, Label::kNear);
   }
-  RecordWriteHelper(object, dst, value);
+
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
+
+  lea(dst, FieldOperand(object, offset));
+  if (emit_debug_code()) {
+    Label ok;
+    test_b(dst, (1 << kPointerSizeLog2) - 1);
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Immediate(BitCast<int32_t>(kZapValue)));
     mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
   }
 }
 
 
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register value) {
+                                 Register value,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
+  ASSERT(!object.is(value));
+  ASSERT(!object.is(address));
+  ASSERT(!value.is(address));
+  if (emit_debug_code()) {
+    AbortIfSmi(object);
+  }
+
+  if (remembered_set_action == OMIT_REMEMBERED_SET &&
+      !FLAG_incremental_marking) {
+    return;
+  }
+
+  if (FLAG_debug_code) {
+    Label ok;
+    cmp(value, Operand(address, 0));
+    j(equal, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
   Label done;
 
-  // Skip barrier if writing a smi.
-  STATIC_ASSERT(kSmiTag == 0);
-  JumpIfSmi(value, &done, Label::kNear);
+  if (smi_check == INLINE_SMI_CHECK) {
+    // Skip barrier if writing a smi.
+    JumpIfSmi(value, &done, Label::kNear);
+  }
 
-  InNewSpace(object, value, equal, &done);
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
 
-  RecordWriteHelper(object, address, value);
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(object, Immediate(BitCast<int32_t>(kZapValue)));
     mov(address, Immediate(BitCast<int32_t>(kZapValue)));
     mov(value, Immediate(BitCast<int32_t>(kZapValue)));
   }
@@ -224,7 +311,7 @@
 
 void MacroAssembler::Set(Register dst, const Immediate& x) {
   if (x.is_zero()) {
-    xor_(dst, Operand(dst));  // Shorter than mov.
+    xor_(dst, dst);  // Shorter than mov.
   } else {
     mov(dst, x);
   }
@@ -287,13 +374,111 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Label* fail,
                                        Label::Distance distance) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
        Map::kMaximumBitField2FastElementValue);
   j(above, fail, distance);
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Label* fail,
+                                             Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Map::kMaximumBitField2FastSmiOnlyElementValue);
+  j(below_equal, fail, distance);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Map::kMaximumBitField2FastElementValue);
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Label* fail,
+                                              Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Map::kMaximumBitField2FastSmiOnlyElementValue);
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+    Register maybe_number,
+    Register elements,
+    Register key,
+    Register scratch1,
+    XMMRegister scratch2,
+    Label* fail,
+    bool specialize_for_processor) {
+  Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
+  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+  CheckMap(maybe_number,
+           isolate()->factory()->heap_number_map(),
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Double value, canonicalize NaN.
+  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+  cmp(FieldOperand(maybe_number, offset),
+      Immediate(kNaNOrInfinityLowerBoundUpper32));
+  j(greater_equal, &maybe_nan, Label::kNear);
+
+  bind(&not_nan);
+  ExternalReference canonical_nan_reference =
+      ExternalReference::address_of_canonical_non_hole_nan();
+  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+    bind(&have_double_value);
+    movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+           scratch2);
+  } else {
+    fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
+    bind(&have_double_value);
+    fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+  }
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  j(greater, &is_nan, Label::kNear);
+  cmp(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+  j(zero, &not_nan);
+  bind(&is_nan);
+  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference));
+  } else {
+    fld_d(Operand::StaticVariable(canonical_nan_reference));
+  }
+  jmp(&have_double_value, Label::kNear);
+
+  bind(&smi_value);
+  // Value is a smi. Convert to a double and store.
+  // Preserve original value.
+  mov(scratch1, maybe_number);
+  SmiUntag(scratch1);
+  if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
+    CpuFeatures::Scope fscope(SSE2);
+    cvtsi2sd(scratch2, scratch1);
+    movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+           scratch2);
+  } else {
+    push(scratch1);
+    fild_s(Operand(esp, 0));
+    pop(scratch1);
+    fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+  }
+  bind(&done);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
@@ -345,7 +530,7 @@
                                             Register scratch,
                                             Label* fail) {
   movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
-  sub(Operand(scratch), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   cmp(scratch,
       LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   j(above, fail);
@@ -402,7 +587,7 @@
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
-  mov(ebp, Operand(esp));
+  mov(ebp, esp);
   push(esi);
   push(Immediate(Smi::FromInt(type)));
   push(Immediate(CodeObject()));
@@ -429,7 +614,7 @@
   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
   push(ebp);
-  mov(ebp, Operand(esp));
+  mov(ebp, esp);
 
   // Reserve room for entry stack pointer and push the code object.
   ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
@@ -451,14 +636,14 @@
   if (save_doubles) {
     CpuFeatures::Scope scope(SSE2);
     int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
-    sub(Operand(esp), Immediate(space));
+    sub(esp, Immediate(space));
     const int offset = -2 * kPointerSize;
     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
       movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
     }
   } else {
-    sub(Operand(esp), Immediate(argc * kPointerSize));
+    sub(esp, Immediate(argc * kPointerSize));
   }
 
   // Get the required frame alignment for the OS.
@@ -478,7 +663,7 @@
 
   // Setup argc and argv in callee-saved registers.
   int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
-  mov(edi, Operand(eax));
+  mov(edi, eax);
   lea(esi, Operand(ebp, eax, times_4, offset));
 
   // Reserve space for argc, argv and isolate.
@@ -532,7 +717,7 @@
 
 
 void MacroAssembler::LeaveApiExitFrame() {
-  mov(esp, Operand(ebp));
+  mov(esp, ebp);
   pop(ebp);
 
   LeaveExitFrameEpilogue();
@@ -580,7 +765,7 @@
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   pop(Operand::StaticVariable(ExternalReference(Isolate::kHandlerAddress,
                                                 isolate())));
-  add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+  add(esp, Immediate(StackHandlerConstants::kSize - kPointerSize));
 }
 
 
@@ -612,7 +797,7 @@
   // (edx == ENTRY) == (ebp == 0) == (esi == 0), so we could test any
   // of them.
   Label skip;
-  cmp(Operand(edx), Immediate(StackHandler::ENTRY));
+  cmp(edx, Immediate(StackHandler::ENTRY));
   j(equal, &skip, Label::kNear);
   mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
   bind(&skip);
@@ -696,7 +881,7 @@
 
   // When generating debug code, make sure the lexical context is set.
   if (emit_debug_code()) {
-    cmp(Operand(scratch), Immediate(0));
+    cmp(scratch, Immediate(0));
     Check(not_equal, "we should not have an empty lexical context");
   }
   // Load the global context of the current context.
@@ -784,23 +969,23 @@
   mov(r1, r0);
   not_(r0);
   shl(r1, 15);
-  add(r0, Operand(r1));
+  add(r0, r1);
   // hash = hash ^ (hash >> 12);
   mov(r1, r0);
   shr(r1, 12);
-  xor_(r0, Operand(r1));
+  xor_(r0, r1);
   // hash = hash + (hash << 2);
   lea(r0, Operand(r0, r0, times_4, 0));
   // hash = hash ^ (hash >> 4);
   mov(r1, r0);
   shr(r1, 4);
-  xor_(r0, Operand(r1));
+  xor_(r0, r1);
   // hash = hash * 2057;
   imul(r0, r0, 2057);
   // hash = hash ^ (hash >> 16);
   mov(r1, r0);
   shr(r1, 16);
-  xor_(r0, Operand(r1));
+  xor_(r0, r1);
 
   // Compute capacity mask.
   mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
@@ -814,9 +999,9 @@
     mov(r2, r0);
     // Compute the masked index: (hash + i + i * i) & mask.
     if (i > 0) {
-      add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i)));
+      add(r2, Immediate(NumberDictionary::GetProbeOffset(i)));
     }
-    and_(r2, Operand(r1));
+    and_(r2, r1);
 
     // Scale the index by multiplying by the entry size.
     ASSERT(NumberDictionary::kEntrySize == 3);
@@ -872,7 +1057,7 @@
   if (scratch.is(no_reg)) {
     mov(result, Operand::StaticVariable(new_space_allocation_top));
   } else {
-    mov(Operand(scratch), Immediate(new_space_allocation_top));
+    mov(scratch, Immediate(new_space_allocation_top));
     mov(result, Operand(scratch, 0));
   }
 }
@@ -931,7 +1116,7 @@
   if (!top_reg.is(result)) {
     mov(top_reg, result);
   }
-  add(Operand(top_reg), Immediate(object_size));
+  add(top_reg, Immediate(object_size));
   j(carry, gc_required);
   cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -942,12 +1127,12 @@
   // Tag result if requested.
   if (top_reg.is(result)) {
     if ((flags & TAG_OBJECT) != 0) {
-      sub(Operand(result), Immediate(object_size - kHeapObjectTag));
+      sub(result, Immediate(object_size - kHeapObjectTag));
     } else {
-      sub(Operand(result), Immediate(object_size));
+      sub(result, Immediate(object_size));
     }
   } else if ((flags & TAG_OBJECT) != 0) {
-    add(Operand(result), Immediate(kHeapObjectTag));
+    add(result, Immediate(kHeapObjectTag));
   }
 }
 
@@ -985,7 +1170,7 @@
   // We assume that element_count*element_size + header_size does not
   // overflow.
   lea(result_end, Operand(element_count, element_size, header_size));
-  add(result_end, Operand(result));
+  add(result_end, result);
   j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -1030,7 +1215,7 @@
   if (!object_size.is(result_end)) {
     mov(result_end, object_size);
   }
-  add(result_end, Operand(result));
+  add(result_end, result);
   j(carry, gc_required);
   cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
   j(above, gc_required);
@@ -1050,7 +1235,7 @@
       ExternalReference::new_space_allocation_top_address(isolate());
 
   // Make sure the object has no tag before resetting top.
-  and_(Operand(object), Immediate(~kHeapObjectTagMask));
+  and_(object, Immediate(~kHeapObjectTagMask));
 #ifdef DEBUG
   cmp(object, Operand::StaticVariable(new_space_allocation_top));
   Check(below, "Undo allocation of non allocated memory");
@@ -1089,7 +1274,7 @@
   ASSERT(kShortSize == 2);
   // scratch1 = length * 2 + kObjectAlignmentMask.
   lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
-  and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+  and_(scratch1, Immediate(~kObjectAlignmentMask));
 
   // Allocate two byte string in new space.
   AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
@@ -1123,8 +1308,8 @@
   ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   mov(scratch1, length);
   ASSERT(kCharSize == 1);
-  add(Operand(scratch1), Immediate(kObjectAlignmentMask));
-  and_(Operand(scratch1), Immediate(~kObjectAlignmentMask));
+  add(scratch1, Immediate(kObjectAlignmentMask));
+  and_(scratch1, Immediate(~kObjectAlignmentMask));
 
   // Allocate ascii string in new space.
   AllocateInNewSpace(SeqAsciiString::kHeaderSize,
@@ -1258,7 +1443,7 @@
                                Register scratch) {
   Label loop, done, short_string, short_loop;
   // Experimentation shows that the short string loop is faster if length < 10.
-  cmp(Operand(length), Immediate(10));
+  cmp(length, Immediate(10));
   j(less_equal, &short_string);
 
   ASSERT(source.is(esi));
@@ -1273,12 +1458,12 @@
   mov(scratch, ecx);
   shr(ecx, 2);
   rep_movs();
-  and_(Operand(scratch), Immediate(0x3));
-  add(destination, Operand(scratch));
+  and_(scratch, Immediate(0x3));
+  add(destination, scratch);
   jmp(&done);
 
   bind(&short_string);
-  test(length, Operand(length));
+  test(length, length);
   j(zero, &done);
 
   bind(&short_loop);
@@ -1293,13 +1478,27 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  jmp(&entry);
+  bind(&loop);
+  mov(Operand(start_offset, 0), filler);
+  add(start_offset, Immediate(kPointerSize));
+  bind(&entry);
+  cmp(start_offset, end_offset);
+  j(less, &loop);
+}
+
+
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
   Label ok;
-  test(result, Operand(result));
+  test(result, result);
   j(not_zero, &ok);
-  test(op, Operand(op));
+  test(op, op);
   j(sign, then_label);
   bind(&ok);
 }
@@ -1311,10 +1510,10 @@
                                       Register scratch,
                                       Label* then_label) {
   Label ok;
-  test(result, Operand(result));
+  test(result, result);
   j(not_zero, &ok);
-  mov(scratch, Operand(op1));
-  or_(scratch, Operand(op2));
+  mov(scratch, op1);
+  or_(scratch, op2);
   j(sign, then_label);
   bind(&ok);
 }
@@ -1344,7 +1543,7 @@
   // If the prototype or initial map is the hole, don't return it and
   // simply miss the cache instead. This will allow us to allocate a
   // prototype object on-demand in the runtime system.
-  cmp(Operand(result), Immediate(isolate()->factory()->the_hole_value()));
+  cmp(result, Immediate(isolate()->factory()->the_hole_value()));
   j(equal, miss);
 
   // If the function does not have an initial map, we're done.
@@ -1367,13 +1566,13 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
   call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
   Object* result;
   { MaybeObject* maybe_result = stub->TryGetCode();
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -1384,13 +1583,12 @@
 
 
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
 MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
   Object* result;
   { MaybeObject* maybe_result = stub->TryGetCode();
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -1406,9 +1604,15 @@
 }
 
 
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
-    add(Operand(esp), Immediate(num_arguments * kPointerSize));
+    add(esp, Immediate(num_arguments * kPointerSize));
   }
   mov(eax, Immediate(isolate()->factory()->undefined_value()));
 }
@@ -1442,8 +1646,7 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(eax, Immediate(function->nargs));
   mov(ebx, Immediate(ExternalReference(function, isolate())));
-  CEntryStub ces(1);
-  ces.SaveDoubles();
+  CEntryStub ces(1, kSaveFPRegs);
   CallStub(&ces);
 }
 
@@ -1623,7 +1826,7 @@
   Label leave_exit_frame;
 
   // Check if the result handle holds 0.
-  test(eax, Operand(eax));
+  test(eax, eax);
   j(zero, &empty_handle);
   // It was non-zero.  Dereference to get the result value.
   mov(eax, Operand(eax, 0));
@@ -1664,7 +1867,7 @@
   mov(edi, eax);
   mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
   mov(eax, Immediate(delete_extensions));
-  call(Operand(eax));
+  call(eax);
   mov(eax, edi);
   jmp(&leave_exit_frame);
 
@@ -1698,10 +1901,10 @@
   if (call_kind == CALL_AS_FUNCTION) {
     // Set to some non-zero smi by updating the least significant
     // byte.
-    mov_b(Operand(dst), 1 << kSmiTagSize);
+    mov_b(dst, 1 << kSmiTagSize);
   } else {
     // Set to smi zero by clearing the register.
-    xor_(dst, Operand(dst));
+    xor_(dst, dst);
   }
 }
 
@@ -1746,7 +1949,7 @@
     } else if (!expected.reg().is(actual.reg())) {
       // Both expected and actual are in (different) registers. This
       // is the case when we invoke functions using call and apply.
-      cmp(expected.reg(), Operand(actual.reg()));
+      cmp(expected.reg(), actual.reg());
       j(equal, &invoke);
       ASSERT(actual.reg().is(eax));
       ASSERT(expected.reg().is(ebx));
@@ -1758,7 +1961,7 @@
         isolate()->builtins()->ArgumentsAdaptorTrampoline();
     if (!code_constant.is_null()) {
       mov(edx, Immediate(code_constant));
-      add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+      add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
     } else if (!code_operand.is_reg(edx)) {
       mov(edx, code_operand);
     }
@@ -1784,6 +1987,9 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
   InvokePrologue(expected, actual, Handle<Code>::null(), code,
                  &done, flag, Label::kNear, call_wrapper,
@@ -1809,8 +2015,11 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
-  Operand dummy(eax);
+  Operand dummy(eax, 0);
   InvokePrologue(expected, actual, code, dummy, &done, flag, Label::kNear,
                  call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
@@ -1832,6 +2041,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   ASSERT(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -1849,6 +2061,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   ASSERT(function->is_compiled());
   // Get the function and setup the context.
   mov(edi, Immediate(Handle<JSFunction>(function)));
@@ -1872,8 +2087,8 @@
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
-  // Calls are not allowed in some stubs.
-  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -1884,6 +2099,7 @@
              expected, expected, flag, call_wrapper, CALL_AS_METHOD);
 }
 
+
 void MacroAssembler::GetBuiltinFunction(Register target,
                                         Builtins::JavaScript id) {
   // Load the JavaScript builtin function from the builtins object.
@@ -1893,6 +2109,7 @@
                            JSBuiltinsObject::OffsetOfFunctionWithId(id)));
 }
 
+
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
   ASSERT(!target.is(edi));
   // Load the JavaScript builtin function from the builtins object.
@@ -1994,7 +2211,7 @@
     ret(bytes_dropped);
   } else {
     pop(scratch);
-    add(Operand(esp), Immediate(bytes_dropped));
+    add(esp, Immediate(bytes_dropped));
     push(scratch);
     ret(0);
   }
@@ -2005,7 +2222,7 @@
 
 void MacroAssembler::Drop(int stack_elements) {
   if (stack_elements > 0) {
-    add(Operand(esp), Immediate(stack_elements * kPointerSize));
+    add(esp, Immediate(stack_elements * kPointerSize));
   }
 }
 
@@ -2148,13 +2365,19 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
 
   push(eax);
   push(Immediate(p0));
   push(Immediate(reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0))));
-  CallRuntime(Runtime::kAbort, 2);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
   // will not return here
   int3();
 }
@@ -2177,7 +2400,7 @@
   ASSERT(is_uintn(power + HeapNumber::kExponentBias,
                   HeapNumber::kExponentBits));
   mov(scratch, Immediate(power + HeapNumber::kExponentBias));
-  movd(dst, Operand(scratch));
+  movd(dst, scratch);
   psllq(dst, HeapNumber::kMantissaBits);
 }
 
@@ -2203,8 +2426,8 @@
                                                          Label* failure) {
   // Check that both objects are not smis.
   STATIC_ASSERT(kSmiTag == 0);
-  mov(scratch1, Operand(object1));
-  and_(scratch1, Operand(object2));
+  mov(scratch1, object1);
+  and_(scratch1, object2);
   JumpIfSmi(scratch1, failure);
 
   // Load instance type for both strings.
@@ -2233,12 +2456,12 @@
     // Make stack end at alignment and make room for num_arguments words
     // and the original value of esp.
     mov(scratch, esp);
-    sub(Operand(esp), Immediate((num_arguments + 1) * kPointerSize));
+    sub(esp, Immediate((num_arguments + 1) * kPointerSize));
     ASSERT(IsPowerOf2(frame_alignment));
     and_(esp, -frame_alignment);
     mov(Operand(esp, num_arguments * kPointerSize), scratch);
   } else {
-    sub(Operand(esp), Immediate(num_arguments * kPointerSize));
+    sub(esp, Immediate(num_arguments * kPointerSize));
   }
 }
 
@@ -2246,27 +2469,39 @@
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
   // Trashing eax is ok as it will be the return value.
-  mov(Operand(eax), Immediate(function));
+  mov(eax, Immediate(function));
   CallCFunction(eax, num_arguments);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
                                    int num_arguments) {
+  ASSERT(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
   }
 
-  call(Operand(function));
+  call(function);
   if (OS::ActivationFrameAlignment() != 0) {
     mov(esp, Operand(esp, num_arguments * kPointerSize));
   } else {
-    add(Operand(esp), Immediate(num_arguments * kPointerSize));
+    add(esp, Immediate(num_arguments * kPointerSize));
   }
 }
 
 
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
@@ -2288,6 +2523,198 @@
 }
 
 
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met,
+    Label::Distance condition_met_distance) {
+  ASSERT(cc == zero || cc == not_zero);
+  if (scratch.is(object)) {
+    and_(scratch, Immediate(~Page::kPageAlignmentMask));
+  } else {
+    mov(scratch, Immediate(~Page::kPageAlignmentMask));
+    and_(scratch, object);
+  }
+  if (mask < (1 << kBitsPerByte)) {
+    test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
+           static_cast<uint8_t>(mask));
+  } else {
+    test(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+  }
+  j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register scratch0,
+                                 Register scratch1,
+                                 Label* on_black,
+                                 Label::Distance on_black_near) {
+  HasColor(object, scratch0, scratch1,
+           on_black, on_black_near,
+           1, 0);  // kBlackBitPattern.
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+                              Register bitmap_scratch,
+                              Register mask_scratch,
+                              Label* has_color,
+                              Label::Distance has_color_distance,
+                              int first_bit,
+                              int second_bit) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color, word_boundary;
+  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  j(first_bit == 1 ? zero : not_zero, &other_color, Label::kNear);
+  add(mask_scratch, mask_scratch);  // Shift left 1 by adding.
+  j(zero, &word_boundary, Label::kNear);
+  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+  jmp(&other_color, Label::kNear);
+
+  bind(&word_boundary);
+  test_b(Operand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize), 1);
+
+  j(second_bit == 1 ? not_zero : zero, has_color, has_color_distance);
+  bind(&other_color);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
+  mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+  and_(bitmap_reg, addr_reg);
+  mov(ecx, addr_reg);
+  int shift =
+      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+  shr(ecx, shift);
+  and_(ecx,
+       (Page::kPageAlignmentMask >> shift) & ~(Bitmap::kBytesPerCell - 1));
+
+  add(bitmap_reg, ecx);
+  mov(ecx, addr_reg);
+  shr(ecx, kPointerSizeLog2);
+  and_(ecx, (1 << Bitmap::kBitsPerCellLog2) - 1);
+  mov(mask_reg, Immediate(1));
+  shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Label* value_is_white_and_not_data,
+    Label::Distance distance) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  j(not_zero, &done, Label::kNear);
+
+  if (FLAG_debug_code) {
+    // Check for impossible bit pattern.
+    Label ok;
+    push(mask_scratch);
+    // shl.  May overflow making the check conservative.
+    add(mask_scratch, mask_scratch);
+    test(mask_scratch, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+    pop(mask_scratch);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = ecx;  // Holds map while checking type.
+  Register length = ecx;  // Holds length of object after checking type.
+  Label not_heap_number;
+  Label is_data_object;
+
+  // Check for heap-number
+  mov(map, FieldOperand(value, HeapObject::kMapOffset));
+  cmp(map, FACTORY->heap_number_map());
+  j(not_equal, &not_heap_number, Label::kNear);
+  mov(length, Immediate(HeapNumber::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_heap_number);
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = ecx;
+  movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+  test_b(instance_type, kIsIndirectStringMask | kIsNotStringMask);
+  j(not_zero, value_is_white_and_not_data);
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  Label not_external;
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  test_b(instance_type, kExternalStringTag);
+  j(zero, &not_external, Label::kNear);
+  mov(length, Immediate(ExternalString::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_external);
+  // Sequential string, either ASCII or UC16.
+  ASSERT(kAsciiStringTag == 0x04);
+  and_(length, Immediate(kStringEncodingMask));
+  xor_(length, Immediate(kStringEncodingMask));
+  add(length, Immediate(0x04));
+  // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
+  // by 2. If we multiply the string length as smi by this, it still
+  // won't overflow a 32-bit value.
+  ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
+  ASSERT(SeqAsciiString::kMaxSize <=
+         static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
+  imul(length, FieldOperand(value, String::kLengthOffset));
+  shr(length, 2 + kSmiTagSize + kSmiShiftSize);
+  add(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+  and_(length, Immediate(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+  add(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset),
+      length);
+  if (FLAG_debug_code) {
+    mov(length, Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+    cmp(length, Operand(bitmap_scratch, MemoryChunk::kSizeOffset));
+    Check(less_equal, "Live Bytes Count overflow chunk size");
+  }
+
+  bind(&done);
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 1906644..a1b42c2 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -29,6 +29,7 @@
 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
 
 #include "assembler.h"
+#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -50,6 +51,13 @@
 // distinguish memory operands from other operands on ia32.
 typedef Operand MemOperand;
 
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -61,42 +69,130 @@
 
   // ---------------------------------------------------------------------------
   // GC Support
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
 
-  // For page containing |object| mark region covering |addr| dirty.
-  // RecordWriteHelper only works if the object is not in new
-  // space.
-  void RecordWriteHelper(Register object,
-                         Register addr,
-                         Register scratch);
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
 
-  // Check if object is in new space.
-  // scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,  // equal for new space, not_equal otherwise.
-                  Label* branch,
-                  Label::Distance branch_near = Label::kFar);
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met,
+                     Label::Distance condition_met_distance = Label::kFar);
 
-  // For page containing |object| mark region covering [object+offset]
-  // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. If offset is zero, then the scratch register
-  // contains the array index into the elements array represented as a
-  // Smi. All registers are clobbered by the operation. RecordWrite
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch,
+                           Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, zero, branch, distance);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch,
+                        Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, not_zero, branch, distance);
+  }
+
+  // Check if an object has a given incremental marking color.  Also uses ecx!
+  void HasColor(Register object,
+                Register scratch0,
+                Register scratch1,
+                Label* has_color,
+                Label::Distance has_color_distance,
+                int first_bit,
+                int second_bit);
+
+  void JumpIfBlack(Register object,
+                   Register scratch0,
+                   Register scratch1,
+                   Label* on_black,
+                   Label::Distance on_black_distance = Label::kFar);
+
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Label* object_is_white_and_not_data,
+                      Label::Distance distance);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // Operand(reg, off).
+  void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // Notify the garbage collector that we wrote a pointer into a fixed array.
+  // |array| is the array being stored into, |value| is the
+  // object being stored.  |index| is the array index represented as a
+  // Smi. All registers are clobbered by the operation RecordWriteArray
   // filters out smis so it does not update the write barrier if the
   // value is a smi.
-  void RecordWrite(Register object,
-                   int offset,
-                   Register value,
-                   Register scratch);
+  void RecordWriteArray(
+      Register array,
+      Register value,
+      Register index,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
   // For page containing |object| mark region covering |address|
   // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. All registers are clobbered by the
+  // object being stored. The address and value registers are clobbered by the
   // operation. RecordWrite filters out smis so it does not update the
   // write barrier if the value is a smi.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register value);
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
@@ -105,15 +201,6 @@
   void DebugBreak();
 #endif
 
-  // ---------------------------------------------------------------------------
-  // Activation frames
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter specific kind of exit frame. Expects the number of
   // arguments in register eax and sets up the number of arguments in
   // register edi and the pointer to the first argument in register
@@ -159,6 +246,15 @@
   void SetCallKind(Register dst, CallKind kind);
 
   // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(Register code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag,
+                  const CallWrapper& call_wrapper,
+                  CallKind call_kind) {
+    InvokeCode(Operand(code), expected, actual, flag, call_wrapper, call_kind);
+  }
+
   void InvokeCode(const Operand& code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
@@ -225,6 +321,29 @@
                          Label* fail,
                          Label::Distance distance = Label::kFar);
 
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Label* fail,
+                               Label::Distance distance = Label::kFar);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Label* fail,
+                                Label::Distance distance = Label::kFar);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements, otherwise jump to fail.
+  void StoreNumberToDoubleElements(Register maybe_number,
+                                   Register elements,
+                                   Register key,
+                                   Register scratch1,
+                                   XMMRegister scratch2,
+                                   Label* fail,
+                                   bool specialize_for_processor);
+
   // Check if the map of an object is equal to a specified map and branch to
   // label if not. Skip the smi check if not required (object is known to be a
   // heap object)
@@ -277,7 +396,7 @@
   void SmiTag(Register reg) {
     STATIC_ASSERT(kSmiTag == 0);
     STATIC_ASSERT(kSmiTagSize == 1);
-    add(reg, Operand(reg));
+    add(reg, reg);
   }
   void SmiUntag(Register reg) {
     sar(reg, kSmiTagSize);
@@ -465,6 +584,13 @@
                  Register length,
                  Register scratch);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
@@ -667,6 +793,9 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   // ---------------------------------------------------------------------------
   // String utilities.
@@ -690,9 +819,14 @@
     return SafepointRegisterStackIndex(reg.code());
   }
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
  private:
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -703,14 +837,10 @@
                       const Operand& code_operand,
                       Label* done,
                       InvokeFlag flag,
-                      Label::Distance done_near = Label::kFar,
+                      Label::Distance done_distance,
                       const CallWrapper& call_wrapper = NullCallWrapper(),
                       CallKind call_kind = CALL_AS_METHOD);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void EnterExitFramePrologue();
   void EnterExitFrameEpilogue(int argc, bool save_doubles);
 
@@ -729,6 +859,20 @@
                                                     Register scratch,
                                                     bool gc_allowed);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,
+                  Label* condition_met,
+                  Label::Distance condition_met_distance = Label::kFar);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Uses ecx as scratch and leaves addr_reg
+  // unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
 
   // Compute memory operands for safepoint stack slots.
   Operand SafepointRegisterSlot(Register reg);
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index d175d9e..8b0b9ab 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2008-2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -134,7 +134,7 @@
 
 void RegExpMacroAssemblerIA32::AdvanceCurrentPosition(int by) {
   if (by != 0) {
-    __ add(Operand(edi), Immediate(by * char_size()));
+    __ add(edi, Immediate(by * char_size()));
   }
 }
 
@@ -152,8 +152,8 @@
   CheckPreemption();
   // Pop Code* offset from backtrack stack, add Code* and jump to location.
   Pop(ebx);
-  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
-  __ jmp(Operand(ebx));
+  __ add(ebx, Immediate(masm_->CodeObject()));
+  __ jmp(ebx);
 }
 
 
@@ -219,7 +219,7 @@
   int byte_offset = cp_offset * char_size();
   if (check_end_of_string) {
     // Check that there are at least str.length() characters left in the input.
-    __ cmp(Operand(edi), Immediate(-(byte_offset + byte_length)));
+    __ cmp(edi, Immediate(-(byte_offset + byte_length)));
     BranchOrBacktrack(greater, on_failure);
   }
 
@@ -288,7 +288,7 @@
   Label fallthrough;
   __ cmp(edi, Operand(backtrack_stackpointer(), 0));
   __ j(not_equal, &fallthrough);
-  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));  // Pop.
+  __ add(backtrack_stackpointer(), Immediate(kPointerSize));  // Pop.
   BranchOrBacktrack(no_condition, on_equal);
   __ bind(&fallthrough);
 }
@@ -300,7 +300,7 @@
   Label fallthrough;
   __ mov(edx, register_location(start_reg));  // Index of start of capture
   __ mov(ebx, register_location(start_reg + 1));  // Index of end of capture
-  __ sub(ebx, Operand(edx));  // Length of capture.
+  __ sub(ebx, edx);  // Length of capture.
 
   // The length of a capture should not be negative. This can only happen
   // if the end of the capture is unrecorded, or at a point earlier than
@@ -320,9 +320,9 @@
     __ push(backtrack_stackpointer());
     // After this, the eax, ecx, and edi registers are available.
 
-    __ add(edx, Operand(esi));  // Start of capture
-    __ add(edi, Operand(esi));  // Start of text to match against capture.
-    __ add(ebx, Operand(edi));  // End of text to match against capture.
+    __ add(edx, esi);  // Start of capture
+    __ add(edi, esi);  // Start of text to match against capture.
+    __ add(ebx, edi);  // End of text to match against capture.
 
     Label loop;
     __ bind(&loop);
@@ -339,15 +339,15 @@
     __ movzx_b(ecx, Operand(edx, 0));
     __ or_(ecx, 0x20);
 
-    __ cmp(eax, Operand(ecx));
+    __ cmp(eax, ecx);
     __ j(not_equal, &fail);
 
     __ bind(&loop_increment);
     // Increment pointers into match and capture strings.
-    __ add(Operand(edx), Immediate(1));
-    __ add(Operand(edi), Immediate(1));
+    __ add(edx, Immediate(1));
+    __ add(edi, Immediate(1));
     // Compare to end of match, and loop if not done.
-    __ cmp(edi, Operand(ebx));
+    __ cmp(edi, ebx);
     __ j(below, &loop);
     __ jmp(&success);
 
@@ -361,9 +361,9 @@
     // Restore original value before continuing.
     __ pop(backtrack_stackpointer());
     // Drop original value of character position.
-    __ add(Operand(esp), Immediate(kPointerSize));
+    __ add(esp, Immediate(kPointerSize));
     // Compute new value of character position after the matched part.
-    __ sub(edi, Operand(esi));
+    __ sub(edi, esi);
   } else {
     ASSERT(mode_ == UC16);
     // Save registers before calling C function.
@@ -389,16 +389,19 @@
     // Set byte_offset2.
     // Found by adding negative string-end offset of current position (edi)
     // to end of string.
-    __ add(edi, Operand(esi));
+    __ add(edi, esi);
     __ mov(Operand(esp, 1 * kPointerSize), edi);
     // Set byte_offset1.
     // Start of capture, where edx already holds string-end negative offset.
-    __ add(edx, Operand(esi));
+    __ add(edx, esi);
     __ mov(Operand(esp, 0 * kPointerSize), edx);
 
-    ExternalReference compare =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-    __ CallCFunction(compare, argument_count);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference compare =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+      __ CallCFunction(compare, argument_count);
+    }
     // Pop original values before reacting on result value.
     __ pop(ebx);
     __ pop(backtrack_stackpointer());
@@ -406,10 +409,10 @@
     __ pop(esi);
 
     // Check if function returned non-zero for success or zero for failure.
-    __ or_(eax, Operand(eax));
+    __ or_(eax, eax);
     BranchOrBacktrack(zero, on_no_match);
     // On success, increment position by length of capture.
-    __ add(edi, Operand(ebx));
+    __ add(edi, ebx);
   }
   __ bind(&fallthrough);
 }
@@ -425,7 +428,7 @@
   // Find length of back-referenced capture.
   __ mov(edx, register_location(start_reg));
   __ mov(eax, register_location(start_reg + 1));
-  __ sub(eax, Operand(edx));  // Length to check.
+  __ sub(eax, edx);  // Length to check.
   // Fail on partial or illegal capture (start of capture after end of capture).
   BranchOrBacktrack(less, on_no_match);
   // Succeed on empty capture (including no capture)
@@ -433,7 +436,7 @@
 
   // Check that there are sufficient characters left in the input.
   __ mov(ebx, edi);
-  __ add(ebx, Operand(eax));
+  __ add(ebx, eax);
   BranchOrBacktrack(greater, on_no_match);
 
   // Save register to make it available below.
@@ -441,7 +444,7 @@
 
   // Compute pointers to match string and capture string
   __ lea(ebx, Operand(esi, edi, times_1, 0));  // Start of match.
-  __ add(edx, Operand(esi));  // Start of capture.
+  __ add(edx, esi);  // Start of capture.
   __ lea(ecx, Operand(eax, ebx, times_1, 0));  // End of match
 
   Label loop;
@@ -456,10 +459,10 @@
   }
   __ j(not_equal, &fail);
   // Increment pointers into capture and match string.
-  __ add(Operand(edx), Immediate(char_size()));
-  __ add(Operand(ebx), Immediate(char_size()));
+  __ add(edx, Immediate(char_size()));
+  __ add(ebx, Immediate(char_size()));
   // Check if we have reached end of match area.
-  __ cmp(ebx, Operand(ecx));
+  __ cmp(ebx, ecx);
   __ j(below, &loop);
   __ jmp(&success);
 
@@ -471,7 +474,7 @@
   __ bind(&success);
   // Move current character position to position after match.
   __ mov(edi, ecx);
-  __ sub(Operand(edi), esi);
+  __ sub(edi, esi);
   // Restore backtrack stackpointer.
   __ pop(backtrack_stackpointer());
 
@@ -574,17 +577,17 @@
     return true;
   case '.': {
     // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
-    __ mov(Operand(eax), current_character());
-    __ xor_(Operand(eax), Immediate(0x01));
+    __ mov(eax, current_character());
+    __ xor_(eax, Immediate(0x01));
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
-    __ sub(Operand(eax), Immediate(0x0b));
+    __ sub(eax, Immediate(0x0b));
     __ cmp(eax, 0x0c - 0x0b);
     BranchOrBacktrack(below_equal, on_no_match);
     if (mode_ == UC16) {
       // Compare original value to 0x2028 and 0x2029, using the already
       // computed (current_char ^ 0x01 - 0x0b). I.e., check for
       // 0x201d (0x2028 - 0x0b) or 0x201e.
-      __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+      __ sub(eax, Immediate(0x2028 - 0x0b));
       __ cmp(eax, 0x2029 - 0x2028);
       BranchOrBacktrack(below_equal, on_no_match);
     }
@@ -593,7 +596,7 @@
   case 'w': {
     if (mode_ != ASCII) {
       // Table is 128 entries, so all ASCII characters can be tested.
-      __ cmp(Operand(current_character()), Immediate('z'));
+      __ cmp(current_character(), Immediate('z'));
       BranchOrBacktrack(above, on_no_match);
     }
     ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
@@ -607,7 +610,7 @@
     Label done;
     if (mode_ != ASCII) {
       // Table is 128 entries, so all ASCII characters can be tested.
-      __ cmp(Operand(current_character()), Immediate('z'));
+      __ cmp(current_character(), Immediate('z'));
       __ j(above, &done);
     }
     ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
@@ -627,10 +630,10 @@
   case 'n': {
     // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
     // The opposite of '.'.
-    __ mov(Operand(eax), current_character());
-    __ xor_(Operand(eax), Immediate(0x01));
+    __ mov(eax, current_character());
+    __ xor_(eax, Immediate(0x01));
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
-    __ sub(Operand(eax), Immediate(0x0b));
+    __ sub(eax, Immediate(0x0b));
     __ cmp(eax, 0x0c - 0x0b);
     if (mode_ == ASCII) {
       BranchOrBacktrack(above, on_no_match);
@@ -641,7 +644,7 @@
       // Compare original value to 0x2028 and 0x2029, using the already
       // computed (current_char ^ 0x01 - 0x0b). I.e., check for
       // 0x201d (0x2028 - 0x0b) or 0x201e.
-      __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+      __ sub(eax, Immediate(0x2028 - 0x0b));
       __ cmp(eax, 1);
       BranchOrBacktrack(above, on_no_match);
       __ bind(&done);
@@ -668,7 +671,12 @@
 
   // Entry code:
   __ bind(&entry_label_);
-  // Start new stack frame.
+
+  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
+  // code is generated.
+  FrameScope scope(masm_, StackFrame::MANUAL);
+
+  // Actually emit code to start a new stack frame.
   __ push(ebp);
   __ mov(ebp, esp);
   // Save callee-save registers. Order here should correspond to order of
@@ -699,7 +707,7 @@
 
   __ bind(&stack_limit_hit);
   CallCheckStackGuardState(ebx);
-  __ or_(eax, Operand(eax));
+  __ or_(eax, eax);
   // If returned value is non-zero, we exit with the returned value as result.
   __ j(not_zero, &exit_label_);
 
@@ -708,13 +716,13 @@
   __ mov(ebx, Operand(ebp, kStartIndex));
 
   // Allocate space on stack for registers.
-  __ sub(Operand(esp), Immediate(num_registers_ * kPointerSize));
+  __ sub(esp, Immediate(num_registers_ * kPointerSize));
   // Load string length.
   __ mov(esi, Operand(ebp, kInputEnd));
   // Load input position.
   __ mov(edi, Operand(ebp, kInputStart));
   // Set up edi to be negative offset from string end.
-  __ sub(edi, Operand(esi));
+  __ sub(edi, esi);
 
   // Set eax to address of char before start of the string.
   // (effectively string position -1).
@@ -736,7 +744,7 @@
     Label init_loop;
     __ bind(&init_loop);
     __ mov(Operand(ebp, ecx, times_1, +0), eax);
-    __ sub(Operand(ecx), Immediate(kPointerSize));
+    __ sub(ecx, Immediate(kPointerSize));
     __ cmp(ecx, kRegisterZero - num_saved_registers_ * kPointerSize);
     __ j(greater, &init_loop);
   }
@@ -777,12 +785,12 @@
       if (mode_ == UC16) {
         __ lea(ecx, Operand(ecx, edx, times_2, 0));
       } else {
-        __ add(ecx, Operand(edx));
+        __ add(ecx, edx);
       }
       for (int i = 0; i < num_saved_registers_; i++) {
         __ mov(eax, register_location(i));
         // Convert to index from start of string, not end.
-        __ add(eax, Operand(ecx));
+        __ add(eax, ecx);
         if (mode_ == UC16) {
           __ sar(eax, 1);  // Convert byte index to character index.
         }
@@ -819,7 +827,7 @@
     __ push(edi);
 
     CallCheckStackGuardState(ebx);
-    __ or_(eax, Operand(eax));
+    __ or_(eax, eax);
     // If returning non-zero, we should end execution with the given
     // result as return value.
     __ j(not_zero, &exit_label_);
@@ -854,7 +862,7 @@
     __ CallCFunction(grow_stack, num_arguments);
     // If return NULL, we have failed to grow the stack, and
     // must exit with a stack-overflow exception.
-    __ or_(eax, Operand(eax));
+    __ or_(eax, eax);
     __ j(equal, &exit_with_exception);
     // Otherwise use return value as new stack pointer.
     __ mov(backtrack_stackpointer(), eax);
@@ -1183,8 +1191,8 @@
 
 void RegExpMacroAssemblerIA32::SafeReturn() {
   __ pop(ebx);
-  __ add(Operand(ebx), Immediate(masm_->CodeObject()));
-  __ jmp(Operand(ebx));
+  __ add(ebx, Immediate(masm_->CodeObject()));
+  __ jmp(ebx);
 }
 
 
@@ -1196,14 +1204,14 @@
 void RegExpMacroAssemblerIA32::Push(Register source) {
   ASSERT(!source.is(backtrack_stackpointer()));
   // Notice: This updates flags, unlike normal Push.
-  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
   __ mov(Operand(backtrack_stackpointer(), 0), source);
 }
 
 
 void RegExpMacroAssemblerIA32::Push(Immediate value) {
   // Notice: This updates flags, unlike normal Push.
-  __ sub(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
   __ mov(Operand(backtrack_stackpointer(), 0), value);
 }
 
@@ -1212,7 +1220,7 @@
   ASSERT(!target.is(backtrack_stackpointer()));
   __ mov(target, Operand(backtrack_stackpointer(), 0));
   // Notice: This updates flags, unlike normal Pop.
-  __ add(Operand(backtrack_stackpointer()), Immediate(kPointerSize));
+  __ add(backtrack_stackpointer(), Immediate(kPointerSize));
 }
 
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index ab62764..9b8f096 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -66,8 +66,8 @@
     __ j(not_equal, &miss);
 
     // Jump to the first instruction in the code stub.
-    __ add(Operand(extra), Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(Operand(extra));
+    __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(extra);
 
     __ bind(&miss);
   } else {
@@ -92,8 +92,8 @@
     __ mov(offset, Operand::StaticArray(offset, times_2, value_offset));
 
     // Jump to the first instruction in the code stub.
-    __ add(Operand(offset), Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(Operand(offset));
+    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(offset);
 
     // Pop at miss.
     __ bind(&miss);
@@ -204,8 +204,8 @@
   __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
   __ xor_(scratch, flags);
   __ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
-  __ sub(scratch, Operand(name));
-  __ add(Operand(scratch), Immediate(flags));
+  __ sub(scratch, name);
+  __ add(scratch, Immediate(flags));
   __ and_(scratch, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
 
   // Probe the secondary table.
@@ -318,7 +318,7 @@
                                                  Register scratch2,
                                                  Label* miss_label) {
   __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mov(eax, Operand(scratch1));
+  __ mov(eax, scratch1);
   __ ret(0);
 }
 
@@ -406,7 +406,7 @@
   //                                          frame.
   // -----------------------------------
   __ pop(scratch);
-  __ add(Operand(esp), Immediate(kPointerSize * kFastApiCallArguments));
+  __ add(esp, Immediate(kPointerSize * kFastApiCallArguments));
   __ push(scratch);
 }
 
@@ -462,7 +462,7 @@
   __ PrepareCallApiFunction(kApiArgc + kApiStackSpace);
 
   __ mov(ApiParameterOperand(1), eax);  // v8::Arguments::implicit_args_.
-  __ add(Operand(eax), Immediate(argc * kPointerSize));
+  __ add(eax, Immediate(argc * kPointerSize));
   __ mov(ApiParameterOperand(2), eax);  // v8::Arguments::values_.
   __ Set(ApiParameterOperand(3), Immediate(argc));  // v8::Arguments::length_.
   // v8::Arguments::is_construct_call_.
@@ -651,7 +651,7 @@
                                         scratch1, scratch2, scratch3, name,
                                         miss_label);
 
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
@@ -668,7 +668,8 @@
 
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
@@ -676,19 +677,21 @@
                            Register holder,
                            JSObject* holder_obj,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
-    __ push(holder);  // Save the holder.
-    __ push(name_);  // Save the name.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(holder);  // Save the holder.
+      __ push(name_);  // Save the name.
 
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
 
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+      // Leave the internal frame.
+    }
 
     __ cmp(eax, masm->isolate()->factory()->no_interceptor_result_sentinel());
     __ j(not_equal, interceptor_succeeded);
@@ -786,8 +789,12 @@
 
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
-    __ mov(name_reg, Operand(eax));
-    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+    __ mov(name_reg, eax);
+    __ RecordWriteField(receiver_reg,
+                        offset,
+                        name_reg,
+                        scratch,
+                        kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -797,8 +804,12 @@
 
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
-    __ mov(name_reg, Operand(eax));
-    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+    __ mov(name_reg, eax);
+    __ RecordWriteField(scratch,
+                        offset,
+                        name_reg,
+                        receiver_reg,
+                        kDontSaveFPRegs);
   }
 
   // Return the value (register eax).
@@ -932,7 +943,7 @@
     } else if (heap()->InNewSpace(prototype)) {
       // Get the map of the current object.
       __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      __ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
+      __ cmp(scratch1, Immediate(Handle<Map>(current->map())));
       // Branch on the result of the map check.
       __ j(not_equal, miss);
       // Check access rights to the global object.  This has to happen
@@ -1053,7 +1064,7 @@
   __ pop(scratch3);  // Get return address to place it below.
 
   __ push(receiver);  // receiver
-  __ mov(scratch2, Operand(esp));
+  __ mov(scratch2, esp);
   ASSERT(!scratch2.is(reg));
   __ push(reg);  // holder
   // Push data from AccessorInfo.
@@ -1084,7 +1095,7 @@
 
   __ PrepareCallApiFunction(kApiArgc);
   __ mov(ApiParameterOperand(0), ebx);  // name.
-  __ add(Operand(ebx), Immediate(kPointerSize));
+  __ add(ebx, Immediate(kPointerSize));
   __ mov(ApiParameterOperand(1), ebx);  // arguments pointer.
 
   // Emitting a stub call may try to allocate (if the code is not
@@ -1158,40 +1169,42 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ push(receiver);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ push(receiver);
+      }
+      __ push(holder_reg);
+      __ push(name_reg);
+
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method.)
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ cmp(eax, factory()->no_interceptor_result_sentinel());
+      __ j(equal, &interceptor_failed);
+      frame_scope.GenerateLeaveFrame();
+      __ ret(0);
+
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+
+      // Leave the internal frame.
     }
-    __ push(holder_reg);
-    __ push(name_reg);
-
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method.)
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ cmp(eax, factory()->no_interceptor_result_sentinel());
-    __ j(equal, &interceptor_failed);
-    __ LeaveInternalFrame();
-    __ ret(0);
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into holder_reg.
@@ -1259,7 +1272,7 @@
 
 void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
   if (kind_ == Code::KEYED_CALL_IC) {
-    __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+    __ cmp(ecx, Immediate(Handle<String>(name)));
     __ j(not_equal, miss);
   }
 }
@@ -1316,7 +1329,7 @@
            Immediate(Handle<SharedFunctionInfo>(function->shared())));
     __ j(not_equal, miss);
   } else {
-    __ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
+    __ cmp(edi, Immediate(Handle<JSFunction>(function)));
     __ j(not_equal, miss);
   }
 }
@@ -1441,21 +1454,25 @@
     __ j(not_equal, &call_builtin);
 
     if (argc == 1) {  // Otherwise fall through to call builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements, with_write_barrier;
 
       // Get the array's length into eax and calculate new length.
       __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
       STATIC_ASSERT(kSmiTagSize == 1);
       STATIC_ASSERT(kSmiTag == 0);
-      __ add(Operand(eax), Immediate(Smi::FromInt(argc)));
+      __ add(eax, Immediate(Smi::FromInt(argc)));
 
       // Get the element's length into ecx.
       __ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
-      __ cmp(eax, Operand(ecx));
+      __ cmp(eax, ecx);
       __ j(greater, &attempt_to_grow_elements);
 
+      // Check if value is a smi.
+      __ mov(ecx, Operand(esp, argc * kPointerSize));
+      __ JumpIfNotSmi(ecx, &with_write_barrier);
+
       // Save new length.
       __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
 
@@ -1463,20 +1480,29 @@
       __ lea(edx, FieldOperand(ebx,
                                eax, times_half_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
-      __ mov(ecx, Operand(esp, argc * kPointerSize));
       __ mov(Operand(edx, 0), ecx);
 
-      // Check if value is a smi.
-      __ JumpIfNotSmi(ecx, &with_write_barrier);
-
-      __ bind(&exit);
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&with_write_barrier);
 
-      __ InNewSpace(ebx, ecx, equal, &exit);
+      if (FLAG_smi_only_arrays) {
+        __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
+        __ CheckFastObjectElements(edi, &call_builtin);
+      }
 
-      __ RecordWriteHelper(ebx, edx, ecx);
+      // Save new length.
+      __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+
+      // Push the element.
+      __ lea(edx, FieldOperand(ebx,
+                               eax, times_half_pointer_size,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ mov(Operand(edx, 0), ecx);
+
+      __ RecordWrite(
+          ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&attempt_to_grow_elements);
@@ -1484,6 +1510,21 @@
         __ jmp(&call_builtin);
       }
 
+      __ mov(edi, Operand(esp, argc * kPointerSize));
+      if (FLAG_smi_only_arrays) {
+        // Growing elements that are SMI-only requires special handling in case
+        // the new element is non-Smi. For now, delegate to the builtin.
+        Label no_fast_elements_check;
+        __ JumpIfSmi(edi, &no_fast_elements_check);
+        __ mov(esi, FieldOperand(edx, HeapObject::kMapOffset));
+        __ CheckFastObjectElements(esi, &call_builtin, Label::kFar);
+        __ bind(&no_fast_elements_check);
+      }
+
+      // We could be lucky and the elements array could be at the top of
+      // new-space.  In this case we can just grow it in place by moving the
+      // allocation pointer up.
+
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate());
       ExternalReference new_space_allocation_limit =
@@ -1497,33 +1538,43 @@
       __ lea(edx, FieldOperand(ebx,
                                eax, times_half_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
-      __ cmp(edx, Operand(ecx));
+      __ cmp(edx, ecx);
       __ j(not_equal, &call_builtin);
-      __ add(Operand(ecx), Immediate(kAllocationDelta * kPointerSize));
+      __ add(ecx, Immediate(kAllocationDelta * kPointerSize));
       __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
       __ j(above, &call_builtin);
 
       // We fit and could grow elements.
       __ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
-      __ mov(ecx, Operand(esp, argc * kPointerSize));
 
       // Push the argument...
-      __ mov(Operand(edx, 0), ecx);
+      __ mov(Operand(edx, 0), edi);
       // ... and fill the rest with holes.
       for (int i = 1; i < kAllocationDelta; i++) {
         __ mov(Operand(edx, i * kPointerSize),
                Immediate(factory()->the_hole_value()));
       }
 
+      // We know the elements array is in new space so we don't need the
+      // remembered set, but we just pushed a value onto it so we may have to
+      // tell the incremental marker to rescan the object that we just grew.  We
+      // don't need to worry about the holes because they are in old space and
+      // already marked black.
+      __ RecordWrite(ebx, edx, edi, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+
       // Restore receiver to edx as finish sequence assumes it's here.
       __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
       // Increment element's and array's sizes.
       __ add(FieldOperand(ebx, FixedArray::kLengthOffset),
              Immediate(Smi::FromInt(kAllocationDelta)));
+
+      // NOTE: This only happen in new-space, where we don't
+      // care about the black-byte-count on pages. Otherwise we should
+      // update that too if the object is black.
+
       __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
 
-      // Elements are in new space, so write barrier is not required.
       __ ret((argc + 1) * kPointerSize);
     }
 
@@ -1585,7 +1636,7 @@
 
   // Get the array's length into ecx and calculate new length.
   __ mov(ecx, FieldOperand(edx, JSArray::kLengthOffset));
-  __ sub(Operand(ecx), Immediate(Smi::FromInt(1)));
+  __ sub(ecx, Immediate(Smi::FromInt(1)));
   __ j(negative, &return_undefined);
 
   // Get the last element.
@@ -1594,7 +1645,7 @@
   __ mov(eax, FieldOperand(ebx,
                            ecx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
-  __ cmp(Operand(eax), Immediate(factory()->the_hole_value()));
+  __ cmp(eax, Immediate(factory()->the_hole_value()));
   __ j(equal, &call_builtin);
 
   // Set the array's length.
@@ -2058,10 +2109,10 @@
   __ sar(ebx, kBitsPerInt - 1);
 
   // Do bitwise not or do nothing depending on ebx.
-  __ xor_(eax, Operand(ebx));
+  __ xor_(eax, ebx);
 
   // Add 1 or do nothing depending on ebx.
-  __ sub(eax, Operand(ebx));
+  __ sub(eax, ebx);
 
   // If the result is still negative, go to the slow case.
   // This only happens for the most negative smi.
@@ -2144,7 +2195,7 @@
 
   // Allocate space for v8::Arguments implicit values. Must be initialized
   // before calling any runtime function.
-  __ sub(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+  __ sub(esp, Immediate(kFastApiCallArguments * kPointerSize));
 
   // Check that the maps haven't changed and find a Holder as a side effect.
   CheckPrototypes(JSObject::cast(object), edx, holder,
@@ -2160,7 +2211,7 @@
   if (result->IsFailure()) return result;
 
   __ bind(&miss);
-  __ add(Operand(esp), Immediate(kFastApiCallArguments * kPointerSize));
+  __ add(esp, Immediate(kFastApiCallArguments * kPointerSize));
 
   __ bind(&miss_before_stack_reserved);
   MaybeObject* maybe_result = GenerateMissBranch();
@@ -2599,13 +2650,9 @@
          Immediate(Handle<Map>(object->map())));
   __ j(not_equal, &miss);
 
-
   // Compute the cell operand to use.
-  Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
-  if (Serializer::enabled()) {
-    __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
-    cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
-  }
+  __ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
+  Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
 
   // Check that the value in the cell is not the hole. If it is, this
   // cell could have been deleted and reintroducing the global needs
@@ -2616,8 +2663,23 @@
 
   // Store the value in the cell.
   __ mov(cell_operand, eax);
+  Label done;
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &done);
+
+  __ mov(ecx, eax);
+  __ lea(edx, cell_operand);
+  // Cells are always in the remembered set.
+  __ RecordWrite(ebx,  // Object.
+                 edx,  // Address.
+                 ecx,  // Value.
+                 kDontSaveFPRegs,
+                 OMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
 
   // Return the value (register eax).
+  __ bind(&done);
+
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1);
   __ ret(0);
@@ -2649,7 +2711,7 @@
   __ IncrementCounter(counters->keyed_store_field(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(ecx), Immediate(Handle<String>(name)));
+  __ cmp(ecx, Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   // Generate store field code.  Trashes the name register.
@@ -2941,7 +3003,7 @@
   __ IncrementCounter(counters->keyed_load_field(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   GenerateLoadField(receiver, holder, edx, ebx, ecx, edi, index, name, &miss);
@@ -2971,7 +3033,7 @@
   __ IncrementCounter(counters->keyed_load_callback(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   MaybeObject* result = GenerateLoadCallback(receiver, holder, edx, eax, ebx,
@@ -3006,7 +3068,7 @@
   __ IncrementCounter(counters->keyed_load_constant_function(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   GenerateLoadConstant(receiver, holder, edx, ebx, ecx, edi,
@@ -3034,7 +3096,7 @@
   __ IncrementCounter(counters->keyed_load_interceptor(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   LookupResult lookup;
@@ -3070,7 +3132,7 @@
   __ IncrementCounter(counters->keyed_load_array_length(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   GenerateLoadArrayLength(masm(), edx, ecx, &miss);
@@ -3095,7 +3157,7 @@
   __ IncrementCounter(counters->keyed_load_string_length(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   GenerateLoadStringLength(masm(), edx, ecx, ebx, &miss, true);
@@ -3120,7 +3182,7 @@
   __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
 
   // Check that the name has not changed.
-  __ cmp(Operand(eax), Immediate(Handle<String>(name)));
+  __ cmp(eax, Immediate(Handle<String>(name)));
   __ j(not_equal, &miss);
 
   GenerateLoadFunctionPrototype(masm(), edx, ecx, ebx, &miss);
@@ -3298,7 +3360,7 @@
   // Move argc to ebx and retrieve and tag the JSObject to return.
   __ mov(ebx, eax);
   __ pop(eax);
-  __ or_(Operand(eax), Immediate(kHeapObjectTag));
+  __ or_(eax, Immediate(kHeapObjectTag));
 
   // Remove caller arguments and receiver from the stack and return.
   __ pop(ecx);
@@ -3679,10 +3741,10 @@
             // If the value is NaN or +/-infinity, the result is 0x80000000,
             // which is automatically zero when taken mod 2^n, n < 32.
             __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-            __ sub(Operand(esp), Immediate(2 * kPointerSize));
+            __ sub(esp, Immediate(2 * kPointerSize));
             __ fisttp_d(Operand(esp, 0));
             __ pop(ebx);
-            __ add(Operand(esp), Immediate(kPointerSize));
+            __ add(esp, Immediate(kPointerSize));
           } else {
             ASSERT(CpuFeatures::IsSupported(SSE2));
             CpuFeatures::Scope scope(SSE2);
@@ -3838,8 +3900,10 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -3870,11 +3934,28 @@
     __ j(above_equal, &miss_force_generic);
   }
 
-  // Do the store and update the write barrier. Make sure to preserve
-  // the value in register eax.
-  __ mov(edx, Operand(eax));
-  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
-  __ RecordWrite(edi, 0, edx, ecx);
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(eax, &miss_force_generic);
+    // ecx is a smi, use times_half_pointer_size instead of
+    // times_pointer_size
+    __ mov(FieldOperand(edi,
+                        ecx,
+                        times_half_pointer_size,
+                        FixedArray::kHeaderSize), eax);
+  } else {
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    // Do the store and update the write barrier.
+    // ecx is a smi, use times_half_pointer_size instead of
+    // times_pointer_size
+    __ lea(ecx, FieldOperand(edi,
+                             ecx,
+                             times_half_pointer_size,
+                             FixedArray::kHeaderSize));
+    __ mov(Operand(ecx, 0), eax);
+    // Make sure to preserve the value in register eax.
+    __ mov(edx, eax);
+    __ RecordWrite(edi, ecx, edx, kDontSaveFPRegs);
+  }
 
   // Done.
   __ ret(0);
@@ -3896,8 +3977,7 @@
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label miss_force_generic, smi_value, is_nan, maybe_nan;
-  Label have_double_value, not_nan;
+  Label miss_force_generic;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3918,59 +3998,13 @@
   }
   __ j(above_equal, &miss_force_generic);
 
-  __ JumpIfSmi(eax, &smi_value, Label::kNear);
-
-  __ CheckMap(eax,
-              masm->isolate()->factory()->heap_number_map(),
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
-
-  // Double value, canonicalize NaN.
-  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(eax, offset), Immediate(kNaNOrInfinityLowerBoundUpper32));
-  __ j(greater_equal, &maybe_nan, Label::kNear);
-
-  __ bind(&not_nan);
-  ExternalReference canonical_nan_reference =
-      ExternalReference::address_of_canonical_non_hole_nan();
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
-    __ bind(&have_double_value);
-    __ movdbl(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize),
-              xmm0);
-    __ ret(0);
-  } else {
-    __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-    __ bind(&have_double_value);
-    __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
-    __ ret(0);
-  }
-
-  __ bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  __ j(greater, &is_nan, Label::kNear);
-  __ cmp(FieldOperand(eax, HeapNumber::kValueOffset), Immediate(0));
-  __ j(zero, &not_nan);
-  __ bind(&is_nan);
-  if (CpuFeatures::IsSupported(SSE2)) {
-    CpuFeatures::Scope use_sse2(SSE2);
-    __ movdbl(xmm0, Operand::StaticVariable(canonical_nan_reference));
-  } else {
-    __ fld_d(Operand::StaticVariable(canonical_nan_reference));
-  }
-  __ jmp(&have_double_value, Label::kNear);
-
-  __ bind(&smi_value);
-  // Value is a smi. convert to a double and store.
-  // Preserve original value.
-  __ mov(edx, eax);
-  __ SmiUntag(edx);
-  __ push(edx);
-  __ fild_s(Operand(esp, 0));
-  __ pop(edx);
-  __ fstp_d(FieldOperand(edi, ecx, times_4, FixedDoubleArray::kHeaderSize));
+  __ StoreNumberToDoubleElements(eax,
+                                 edi,
+                                 ecx,
+                                 edx,
+                                 xmm0,
+                                 &miss_force_generic,
+                                 true);
   __ ret(0);
 
   // Handle store cache miss, replacing the ic with the generic stub.
diff --git a/src/ic-inl.h b/src/ic-inl.h
index b4f789c..498cf3a 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -87,6 +87,8 @@
   }
 #endif
   Assembler::set_target_address_at(address, target->instruction_start());
+  target->GetHeap()->incremental_marking()->RecordCodeTargetPatch(address,
+                                                                  target);
 }
 
 
diff --git a/src/ic.cc b/src/ic.cc
index 0f76a9a..50b1cde 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1351,7 +1351,7 @@
 }
 
 
-static bool LookupForWrite(JSReceiver* receiver,
+static bool LookupForWrite(JSObject* receiver,
                            String* name,
                            LookupResult* lookup) {
   receiver->LocalLookup(name, lookup);
@@ -1359,12 +1359,10 @@
     return false;
   }
 
-  if (lookup->type() == INTERCEPTOR) {
-    JSObject* object = JSObject::cast(receiver);
-    if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
-      object->LocalLookupRealNamedProperty(name, lookup);
-      return StoreICableLookup(lookup);
-    }
+  if (lookup->type() == INTERCEPTOR &&
+      receiver->GetNamedInterceptor()->setter()->IsUndefined()) {
+    receiver->LocalLookupRealNamedProperty(name, lookup);
+    return StoreICableLookup(lookup);
   }
 
   return true;
@@ -1376,28 +1374,28 @@
                             Handle<Object> object,
                             Handle<String> name,
                             Handle<Object> value) {
-  // If the object is undefined or null it's illegal to try to set any
-  // properties on it; throw a TypeError in that case.
-  if (object->IsUndefined() || object->IsNull()) {
-    return TypeError("non_object_property_store", object, name);
-  }
+  if (!object->IsJSObject()) {
+    // Handle proxies.
+    if (object->IsJSProxy()) {
+      return JSProxy::cast(*object)->
+          SetProperty(*name, *value, NONE, strict_mode);
+    }
 
-  if (!object->IsJSReceiver()) {
+    // If the object is undefined or null it's illegal to try to set any
+    // properties on it; throw a TypeError in that case.
+    if (object->IsUndefined() || object->IsNull()) {
+      return TypeError("non_object_property_store", object, name);
+    }
+
     // The length property of string values is read-only. Throw in strict mode.
     if (strict_mode == kStrictMode && object->IsString() &&
         name->Equals(isolate()->heap()->length_symbol())) {
       return TypeError("strict_read_only_property", object, name);
     }
-    // Ignore stores where the receiver is not a JSObject.
+    // Ignore other stores where the receiver is not a JSObject.
     return *value;
   }
 
-  // Handle proxies.
-  if (object->IsJSProxy()) {
-    return JSReceiver::cast(*object)->
-        SetProperty(*name, *value, NONE, strict_mode);
-  }
-
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
 
   // Check if the given name is an array index.
@@ -1675,6 +1673,7 @@
   } else {
     ASSERT(receiver_map->has_dictionary_elements() ||
            receiver_map->has_fast_elements() ||
+           receiver_map->has_fast_smi_only_elements() ||
            receiver_map->has_fast_double_elements() ||
            receiver_map->has_external_array_elements());
     bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
@@ -1690,6 +1689,7 @@
                                              Code* generic_stub) {
   Code* result = NULL;
   if (receiver->HasFastElements() ||
+      receiver->HasFastSmiOnlyElements() ||
       receiver->HasExternalArrayElements() ||
       receiver->HasFastDoubleElements() ||
       receiver->HasDictionaryElements()) {
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
new file mode 100644
index 0000000..43fe0f5
--- /dev/null
+++ b/src/incremental-marking-inl.h
@@ -0,0 +1,155 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_INCREMENTAL_MARKING_INL_H_
+#define V8_INCREMENTAL_MARKING_INL_H_
+
+#include "incremental-marking.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
+                                         Object** slot,
+                                         Object* value) {
+  if (IsMarking() && value->IsHeapObject()) {
+    MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+    if (Marking::IsWhite(value_bit)) {
+      MarkBit obj_bit = Marking::MarkBitFrom(obj);
+      if (Marking::IsBlack(obj_bit)) {
+        BlackToGreyAndUnshift(obj, obj_bit);
+        RestartIfNotMarking();
+      }
+
+      // Object is either grey or white it will be scanned if survives.
+      return false;
+    }
+    return true;
+  }
+  return false;
+}
+
+
+void IncrementalMarking::RecordWrite(HeapObject* obj,
+                                     Object** slot,
+                                     Object* value) {
+  if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned we need to record the slot.
+      heap_->mark_compact_collector()->RecordSlot(
+          HeapObject::RawField(obj, 0), slot, value);
+    }
+  }
+}
+
+
+void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
+                                             RelocInfo* rinfo,
+                                             Object* value) {
+  if (IsMarking() && value->IsHeapObject()) {
+    MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+    if (Marking::IsWhite(value_bit)) {
+      MarkBit obj_bit = Marking::MarkBitFrom(obj);
+      if (Marking::IsBlack(obj_bit)) {
+        BlackToGreyAndUnshift(obj, obj_bit);
+        RestartIfNotMarking();
+      }
+
+      // Object is either grey or white it will be scanned if survives.
+      return;
+    }
+
+    if (is_compacting_) {
+      MarkBit obj_bit = Marking::MarkBitFrom(obj);
+      if (Marking::IsBlack(obj_bit)) {
+        // Object is not going to be rescanned we need to record the slot.
+        heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
+                                                         Code::cast(value));
+      }
+    }
+  }
+}
+
+
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+  if (IsMarking()) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+  }
+}
+
+
+void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
+                                               MarkBit mark_bit) {
+  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+  ASSERT(obj->Size() >= 2*kPointerSize);
+  ASSERT(IsMarking());
+  Marking::BlackToGrey(mark_bit);
+  int obj_size = obj->Size();
+  MemoryChunk::IncrementLiveBytes(obj->address(), -obj_size);
+  int64_t old_bytes_rescanned = bytes_rescanned_;
+  bytes_rescanned_ = old_bytes_rescanned + obj_size;
+  if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
+    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) {
+      // If we have queued twice the heap size for rescanning then we are
+      // going around in circles, scanning the same objects again and again
+      // as the program mutates the heap faster than we can incrementally
+      // trace it.  In this case we switch to non-incremental marking in
+      // order to finish off this marking phase.
+      if (FLAG_trace_gc) {
+        PrintF("Hurrying incremental marking because of lack of progress\n");
+      }
+      allocation_marking_factor_ = kMaxAllocationMarkingFactor;
+    }
+  }
+
+  marking_deque_.UnshiftGrey(obj);
+}
+
+
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
+  WhiteToGrey(obj, mark_bit);
+  marking_deque_.PushGrey(obj);
+}
+
+
+void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
+  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+  ASSERT(obj->Size() >= 2*kPointerSize);
+  ASSERT(IsMarking());
+  Marking::WhiteToGrey(mark_bit);
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_INCREMENTAL_MARKING_INL_H_
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
new file mode 100644
index 0000000..95642e9
--- /dev/null
+++ b/src/incremental-marking.cc
@@ -0,0 +1,809 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "incremental-marking.h"
+
+#include "code-stubs.h"
+#include "compilation-cache.h"
+#include "v8conversions.h"
+
+namespace v8 {
+namespace internal {
+
+
+IncrementalMarking::IncrementalMarking(Heap* heap)
+    : heap_(heap),
+      state_(STOPPED),
+      marking_deque_memory_(NULL),
+      steps_count_(0),
+      steps_took_(0),
+      longest_step_(0.0),
+      old_generation_space_available_at_start_of_incremental_(0),
+      old_generation_space_used_at_start_of_incremental_(0),
+      steps_count_since_last_gc_(0),
+      steps_took_since_last_gc_(0),
+      should_hurry_(false),
+      allocation_marking_factor_(0),
+      allocated_(0) {
+}
+
+
+void IncrementalMarking::TearDown() {
+  delete marking_deque_memory_;
+}
+
+
+void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
+                                             Object* value,
+                                             Isolate* isolate) {
+  ASSERT(obj->IsHeapObject());
+
+  // Fast cases should already be covered by RecordWriteStub.
+  ASSERT(value->IsHeapObject());
+  ASSERT(!value->IsHeapNumber());
+  ASSERT(!value->IsString() || value->IsConsString());
+  ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
+
+  IncrementalMarking* marking = isolate->heap()->incremental_marking();
+  ASSERT(!marking->is_compacting_);
+  marking->RecordWrite(obj, NULL, value);
+}
+
+
+void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
+                                                          Object** slot,
+                                                          Isolate* isolate) {
+  IncrementalMarking* marking = isolate->heap()->incremental_marking();
+  ASSERT(marking->is_compacting_);
+  marking->RecordWrite(obj, slot, *slot);
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
+  if (IsMarking()) {
+    Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
+        GcSafeFindCodeForInnerPointer(pc);
+    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RecordWriteIntoCode(host, &rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
+                                                Object** slot,
+                                                Code* value) {
+  if (BaseRecordWrite(host, slot, value) && is_compacting_) {
+    ASSERT(slot != NULL);
+    heap_->mark_compact_collector()->
+        RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
+  }
+}
+
+
+
+class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
+ public:
+  IncrementalMarkingMarkingVisitor(Heap* heap,
+                                   IncrementalMarking* incremental_marking)
+      : heap_(heap),
+        incremental_marking_(incremental_marking) {
+  }
+
+  void VisitEmbeddedPointer(Code* host, Object** p) {
+    Object* obj = *p;
+    if (obj->NonFailureIsHeapObject()) {
+      heap_->mark_compact_collector()->RecordSlot(
+          reinterpret_cast<Object**>(host),
+          p,
+          obj);
+      MarkObject(obj);
+    }
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
+    MarkObject(target);
+  }
+
+  void VisitDebugTarget(RelocInfo* rinfo) {
+    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+            rinfo->IsPatchedReturnSequence()) ||
+           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+            rinfo->IsPatchedDebugBreakSlotSequence()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
+    MarkObject(target);
+  }
+
+  void VisitCodeEntry(Address entry_address) {
+    Object* target = Code::GetObjectFromEntryAddress(entry_address);
+    heap_->mark_compact_collector()->
+        RecordCodeEntrySlot(entry_address, Code::cast(target));
+    MarkObject(target);
+  }
+
+  void VisitPointer(Object** p) {
+    Object* obj = *p;
+    if (obj->NonFailureIsHeapObject()) {
+      heap_->mark_compact_collector()->RecordSlot(p, p, obj);
+      MarkObject(obj);
+    }
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) {
+      Object* obj = *p;
+      if (obj->NonFailureIsHeapObject()) {
+        heap_->mark_compact_collector()->RecordSlot(start, p, obj);
+        MarkObject(obj);
+      }
+    }
+  }
+
+ private:
+  // Mark object pointed to by p.
+  INLINE(void MarkObject(Object* obj)) {
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (mark_bit.data_only()) {
+      if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+        MemoryChunk::IncrementLiveBytes(heap_object->address(),
+                                        heap_object->Size());
+      }
+    } else if (Marking::IsWhite(mark_bit)) {
+      incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+    }
+  }
+
+  Heap* heap_;
+  IncrementalMarking* incremental_marking_;
+};
+
+
+class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
+ public:
+  IncrementalMarkingRootMarkingVisitor(Heap* heap,
+                                       IncrementalMarking* incremental_marking)
+      : heap_(heap),
+        incremental_marking_(incremental_marking) {
+  }
+
+  void VisitPointer(Object** p) {
+    MarkObjectByPointer(p);
+  }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+  }
+
+ private:
+  void MarkObjectByPointer(Object** p) {
+    Object* obj = *p;
+    if (!obj->IsHeapObject()) return;
+
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (mark_bit.data_only()) {
+      if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+          MemoryChunk::IncrementLiveBytes(heap_object->address(),
+                                          heap_object->Size());
+      }
+    } else {
+      if (Marking::IsWhite(mark_bit)) {
+        incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+      }
+    }
+  }
+
+  Heap* heap_;
+  IncrementalMarking* incremental_marking_;
+};
+
+
+void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
+                                              bool is_marking,
+                                              bool is_compacting) {
+  if (is_marking) {
+    chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+
+    // It's difficult to filter out slots recorded for large objects.
+    if (chunk->owner()->identity() == LO_SPACE &&
+        chunk->size() > static_cast<size_t>(Page::kPageSize) &&
+        is_compacting) {
+      chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+  } else if (chunk->owner()->identity() == CELL_SPACE ||
+             chunk->scan_on_scavenge()) {
+    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  } else {
+    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  }
+}
+
+
+void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
+                                              bool is_marking) {
+  chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+  if (is_marking) {
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  } else {
+    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  }
+  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+    PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SetOldSpacePageFlags(p, false, false);
+  }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+    NewSpace* space) {
+  NewSpacePageIterator it(space);
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    SetNewSpacePageFlags(p, false);
+  }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
+
+  LargePage* lop = heap_->lo_space()->first_page();
+  while (lop->is_valid()) {
+    SetOldSpacePageFlags(lop, false, false);
+    lop = lop->next_page();
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SetOldSpacePageFlags(p, true, is_compacting_);
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
+  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    SetNewSpacePageFlags(p, true);
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier() {
+  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
+  ActivateIncrementalWriteBarrier(heap_->old_data_space());
+  ActivateIncrementalWriteBarrier(heap_->cell_space());
+  ActivateIncrementalWriteBarrier(heap_->map_space());
+  ActivateIncrementalWriteBarrier(heap_->code_space());
+  ActivateIncrementalWriteBarrier(heap_->new_space());
+
+  LargePage* lop = heap_->lo_space()->first_page();
+  while (lop->is_valid()) {
+    SetOldSpacePageFlags(lop, true, is_compacting_);
+    lop = lop->next_page();
+  }
+}
+
+
+bool IncrementalMarking::WorthActivating() {
+#ifndef DEBUG
+  static const intptr_t kActivationThreshold = 8 * MB;
+#else
+  // TODO(gc) consider setting this to some low level so that some
+  // debug tests run with incremental marking and some without.
+  static const intptr_t kActivationThreshold = 0;
+#endif
+
+  return FLAG_incremental_marking &&
+      heap_->PromotedSpaceSize() > kActivationThreshold;
+}
+
+
+void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
+  ASSERT(RecordWriteStub::GetMode(stub) ==
+         RecordWriteStub::STORE_BUFFER_ONLY);
+
+  if (!IsMarking()) {
+    // Initially stub is generated in STORE_BUFFER_ONLY mode thus
+    // we don't need to do anything if incremental marking is
+    // not active.
+  } else if (IsCompacting()) {
+    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
+  } else {
+    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
+  }
+}
+
+
+static void PatchIncrementalMarkingRecordWriteStubs(
+    Heap* heap, RecordWriteStub::Mode mode) {
+  NumberDictionary* stubs = heap->code_stubs();
+
+  int capacity = stubs->Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* k = stubs->KeyAt(i);
+    if (stubs->IsKey(k)) {
+      uint32_t key = NumberToUint32(k);
+
+      if (CodeStub::MajorKeyFromKey(key) ==
+          CodeStub::RecordWrite) {
+        Object* e = stubs->ValueAt(i);
+        if (e->IsCode()) {
+          RecordWriteStub::Patch(Code::cast(e), mode);
+        }
+      }
+    }
+  }
+}
+
+
+void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
+  if (marking_deque_memory_ == NULL) {
+    marking_deque_memory_ = new VirtualMemory(4 * MB);
+    marking_deque_memory_->Commit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size(),
+        false);  // Not executable.
+  }
+}
+
+
+void IncrementalMarking::Start() {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Start\n");
+  }
+  ASSERT(FLAG_incremental_marking);
+  ASSERT(state_ == STOPPED);
+
+  ResetStepCounters();
+
+  if (heap_->old_pointer_space()->IsSweepingComplete() &&
+      heap_->old_data_space()->IsSweepingComplete()) {
+    StartMarking();
+  } else {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Start sweeping.\n");
+    }
+    state_ = SWEEPING;
+  }
+
+  heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
+}
+
+
+static void MarkObjectGreyDoNotEnqueue(Object* obj) {
+  if (obj->IsHeapObject()) {
+    HeapObject* heap_obj = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
+    if (Marking::IsBlack(mark_bit)) {
+      MemoryChunk::IncrementLiveBytes(heap_obj->address(),
+                                      -heap_obj->Size());
+    }
+    Marking::AnyToGrey(mark_bit);
+  }
+}
+
+
+void IncrementalMarking::StartMarking() {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Start marking\n");
+  }
+
+  is_compacting_ = !FLAG_never_compact &&
+      heap_->mark_compact_collector()->StartCompaction();
+
+  state_ = MARKING;
+
+  RecordWriteStub::Mode mode = is_compacting_ ?
+      RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
+
+  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
+
+  EnsureMarkingDequeIsCommitted();
+
+  // Initialize marking stack.
+  Address addr = static_cast<Address>(marking_deque_memory_->address());
+  size_t size = marking_deque_memory_->size();
+  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+  marking_deque_.Initialize(addr, addr + size);
+
+  ActivateIncrementalWriteBarrier();
+
+#ifdef DEBUG
+  // Marking bits are cleared by the sweeper.
+  heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
+#endif
+
+  heap_->CompletelyClearInstanceofCache();
+  heap_->isolate()->compilation_cache()->MarkCompactPrologue();
+
+  if (FLAG_cleanup_code_caches_at_gc) {
+    // We will mark cache black with a separate pass
+    // when we finish marking.
+    MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
+  }
+
+  // Mark strong roots grey.
+  IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
+  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+
+  // Ready to start incremental marking.
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Running\n");
+  }
+}
+
+
+void IncrementalMarking::PrepareForScavenge() {
+  if (!IsMarking()) return;
+  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
+                          heap_->new_space()->FromSpaceEnd());
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
+  if (!IsMarking()) return;
+
+  int current = marking_deque_.bottom();
+  int mask = marking_deque_.mask();
+  int limit = marking_deque_.top();
+  HeapObject** array = marking_deque_.array();
+  int new_top = current;
+
+  Map* filler_map = heap_->one_pointer_filler_map();
+
+  while (current != limit) {
+    HeapObject* obj = array[current];
+    ASSERT(obj->IsHeapObject());
+    current = ((current + 1) & mask);
+    if (heap_->InNewSpace(obj)) {
+      MapWord map_word = obj->map_word();
+      if (map_word.IsForwardingAddress()) {
+        HeapObject* dest = map_word.ToForwardingAddress();
+        array[new_top] = dest;
+        new_top = ((new_top + 1) & mask);
+        ASSERT(new_top != marking_deque_.bottom());
+        ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj)));
+      }
+    } else if (obj->map() != filler_map) {
+      // Skip one word filler objects that appear on the
+      // stack when we perform in place array shift.
+      array[new_top] = obj;
+      new_top = ((new_top + 1) & mask);
+      ASSERT(new_top != marking_deque_.bottom());
+      ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj)));
+    }
+  }
+  marking_deque_.set_top(new_top);
+
+  steps_took_since_last_gc_ = 0;
+  steps_count_since_last_gc_ = 0;
+  longest_step_ = 0.0;
+}
+
+
+void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
+  v->VisitPointers(
+      HeapObject::RawField(
+          ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
+      HeapObject::RawField(
+          ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
+
+  MarkCompactCollector* collector = heap_->mark_compact_collector();
+  for (int idx = Context::FIRST_WEAK_SLOT;
+       idx < Context::GLOBAL_CONTEXT_SLOTS;
+       ++idx) {
+    Object** slot =
+        HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
+    collector->RecordSlot(slot, slot, *slot);
+  }
+}
+
+
+void IncrementalMarking::Hurry() {
+  if (state() == MARKING) {
+    double start = 0.0;
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Hurry\n");
+      start = OS::TimeCurrentMillis();
+    }
+    // TODO(gc) hurry can mark objects it encounters black as mutator
+    // was stopped.
+    Map* filler_map = heap_->one_pointer_filler_map();
+    Map* global_context_map = heap_->global_context_map();
+    IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
+    while (!marking_deque_.IsEmpty()) {
+      HeapObject* obj = marking_deque_.Pop();
+
+      // Explicitly skip one word fillers. Incremental markbit patterns are
+      // correct only for objects that occupy at least two words.
+      Map* map = obj->map();
+      if (map == filler_map) {
+        continue;
+      } else if (map == global_context_map) {
+        // Global contexts have weak fields.
+        VisitGlobalContext(Context::cast(obj), &marking_visitor);
+      } else {
+        obj->Iterate(&marking_visitor);
+      }
+
+      MarkBit mark_bit = Marking::MarkBitFrom(obj);
+      ASSERT(!Marking::IsBlack(mark_bit));
+      Marking::MarkBlack(mark_bit);
+      MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+    }
+    state_ = COMPLETE;
+    if (FLAG_trace_incremental_marking) {
+      double end = OS::TimeCurrentMillis();
+      PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+             static_cast<int>(end - start));
+    }
+  }
+
+  if (FLAG_cleanup_code_caches_at_gc) {
+    PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
+    Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
+    MemoryChunk::IncrementLiveBytes(poly_cache->address(),
+                                    PolymorphicCodeCache::kSize);
+  }
+
+  Object* context = heap_->global_contexts_list();
+  while (!context->IsUndefined()) {
+    NormalizedMapCache* cache = Context::cast(context)->normalized_map_cache();
+    MarkBit mark_bit = Marking::MarkBitFrom(cache);
+    if (Marking::IsGrey(mark_bit)) {
+      Marking::GreyToBlack(mark_bit);
+      MemoryChunk::IncrementLiveBytes(cache->address(), cache->Size());
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void IncrementalMarking::Abort() {
+  if (IsStopped()) return;
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Aborting.\n");
+  }
+  heap_->new_space()->LowerInlineAllocationLimit(0);
+  IncrementalMarking::set_should_hurry(false);
+  ResetStepCounters();
+  if (IsMarking()) {
+    PatchIncrementalMarkingRecordWriteStubs(heap_,
+                                            RecordWriteStub::STORE_BUFFER_ONLY);
+    DeactivateIncrementalWriteBarrier();
+
+    if (is_compacting_) {
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
+  }
+  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
+  state_ = STOPPED;
+  is_compacting_ = false;
+}
+
+
+void IncrementalMarking::Finalize() {
+  Hurry();
+  state_ = STOPPED;
+  is_compacting_ = false;
+  heap_->new_space()->LowerInlineAllocationLimit(0);
+  IncrementalMarking::set_should_hurry(false);
+  ResetStepCounters();
+  PatchIncrementalMarkingRecordWriteStubs(heap_,
+                                          RecordWriteStub::STORE_BUFFER_ONLY);
+  DeactivateIncrementalWriteBarrier();
+  ASSERT(marking_deque_.IsEmpty());
+  heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
+}
+
+
+void IncrementalMarking::MarkingComplete() {
+  state_ = COMPLETE;
+  // We will set the stack guard to request a GC now.  This will mean the rest
+  // of the GC gets performed as soon as possible (we can't do a GC here in a
+  // record-write context).  If a few things get allocated between now and then
+  // that shouldn't make us do a scavenge and keep being incremental, so we set
+  // the should-hurry flag to indicate that there can't be much work left to do.
+  set_should_hurry(true);
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Complete (normal).\n");
+  }
+  heap_->isolate()->stack_guard()->RequestGC();
+}
+
+
+void IncrementalMarking::Step(intptr_t allocated_bytes) {
+  if (heap_->gc_state() != Heap::NOT_IN_GC ||
+      !FLAG_incremental_marking ||
+      !FLAG_incremental_marking_steps ||
+      (state_ != SWEEPING && state_ != MARKING)) {
+    return;
+  }
+
+  allocated_ += allocated_bytes;
+
+  if (allocated_ < kAllocatedThreshold) return;
+
+  intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
+
+  double start = 0;
+
+  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+    start = OS::TimeCurrentMillis();
+  }
+
+  if (state_ == SWEEPING) {
+    if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) &&
+        heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) {
+      StartMarking();
+    }
+  } else if (state_ == MARKING) {
+    Map* filler_map = heap_->one_pointer_filler_map();
+    Map* global_context_map = heap_->global_context_map();
+    IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
+    while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
+      HeapObject* obj = marking_deque_.Pop();
+
+      // Explicitly skip one word fillers. Incremental markbit patterns are
+      // correct only for objects that occupy at least two words.
+      Map* map = obj->map();
+      if (map == filler_map) continue;
+
+      ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj)));
+      int size = obj->SizeFromMap(map);
+      bytes_to_process -= size;
+      MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+      if (Marking::IsWhite(map_mark_bit)) {
+        WhiteToGreyAndPush(map, map_mark_bit);
+      }
+
+      // TODO(gc) switch to static visitor instead of normal visitor.
+      if (map == global_context_map) {
+        // Global contexts have weak fields.
+        Context* ctx = Context::cast(obj);
+
+        // We will mark cache black with a separate pass
+        // when we finish marking.
+        MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
+
+        VisitGlobalContext(ctx, &marking_visitor);
+      } else {
+        obj->IterateBody(map->instance_type(), size, &marking_visitor);
+      }
+
+      MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
+      ASSERT(!Marking::IsBlack(obj_mark_bit));
+      Marking::MarkBlack(obj_mark_bit);
+      MemoryChunk::IncrementLiveBytes(obj->address(), size);
+    }
+    if (marking_deque_.IsEmpty()) MarkingComplete();
+  }
+
+  allocated_ = 0;
+
+  steps_count_++;
+  steps_count_since_last_gc_++;
+
+  bool speed_up = false;
+
+  if (old_generation_space_available_at_start_of_incremental_ < 10 * MB ||
+      SpaceLeftInOldSpace() <
+          old_generation_space_available_at_start_of_incremental_ >> 1) {
+    // Half of the space that was available is gone while we were
+    // incrementally marking.
+    speed_up = true;
+    old_generation_space_available_at_start_of_incremental_ =
+        SpaceLeftInOldSpace();
+  }
+
+  if (heap_->PromotedTotalSize() >
+      old_generation_space_used_at_start_of_incremental_ << 1) {
+    // Size of old space doubled while we were incrementally marking.
+    speed_up = true;
+    old_generation_space_used_at_start_of_incremental_ =
+        heap_->PromotedTotalSize();
+  }
+
+  if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0 &&
+      allocation_marking_factor_ < kMaxAllocationMarkingFactor) {
+    speed_up = true;
+  }
+
+  if (speed_up && 0) {
+    allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
+    allocation_marking_factor_ =
+        static_cast<int>(allocation_marking_factor_ * 1.3);
+    if (FLAG_trace_gc) {
+      PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
+    }
+  }
+
+  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+    double end = OS::TimeCurrentMillis();
+    double delta = (end - start);
+    longest_step_ = Max(longest_step_, delta);
+    steps_took_ += delta;
+    steps_took_since_last_gc_ += delta;
+  }
+}
+
+
+void IncrementalMarking::ResetStepCounters() {
+  steps_count_ = 0;
+  steps_took_ = 0;
+  longest_step_ = 0.0;
+  old_generation_space_available_at_start_of_incremental_ =
+      SpaceLeftInOldSpace();
+  old_generation_space_used_at_start_of_incremental_ =
+      heap_->PromotedTotalSize();
+  steps_count_since_last_gc_ = 0;
+  steps_took_since_last_gc_ = 0;
+  bytes_rescanned_ = 0;
+  allocation_marking_factor_ = kInitialAllocationMarkingFactor;
+}
+
+
+int64_t IncrementalMarking::SpaceLeftInOldSpace() {
+  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
+}
+
+} }  // namespace v8::internal
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
new file mode 100644
index 0000000..30dbbc1
--- /dev/null
+++ b/src/incremental-marking.h
@@ -0,0 +1,254 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_INCREMENTAL_MARKING_H_
+#define V8_INCREMENTAL_MARKING_H_
+
+
+#include "execution.h"
+#include "mark-compact.h"
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+class IncrementalMarking {
+ public:
+  enum State {
+    STOPPED,
+    SWEEPING,
+    MARKING,
+    COMPLETE
+  };
+
+  explicit IncrementalMarking(Heap* heap);
+
+  void TearDown();
+
+  State state() {
+    ASSERT(state_ == STOPPED || FLAG_incremental_marking);
+    return state_;
+  }
+
+  bool should_hurry() { return should_hurry_; }
+
+  inline bool IsStopped() { return state() == STOPPED; }
+
+  inline bool IsMarking() { return state() >= MARKING; }
+
+  inline bool IsMarkingIncomplete() { return state() == MARKING; }
+
+  bool WorthActivating();
+
+  void Start();
+
+  void Stop();
+
+  void PrepareForScavenge();
+
+  void UpdateMarkingDequeAfterScavenge();
+
+  void Hurry();
+
+  void Finalize();
+
+  void Abort();
+
+  void MarkingComplete();
+
+  // It's hard to know how much work the incremental marker should do to make
+  // progress in the face of the mutator creating new work for it.  We start
+  // of at a moderate rate of work and gradually increase the speed of the
+  // incremental marker until it completes.
+  // Do some marking every time this much memory has been allocated.
+  static const intptr_t kAllocatedThreshold = 65536;
+  // Start off by marking this many times more memory than has been allocated.
+  static const intptr_t kInitialAllocationMarkingFactor = 1;
+  // But if we are promoting a lot of data we need to mark faster to keep up
+  // with the data that is entering the old space through promotion.
+  static const intptr_t kFastMarking = 3;
+  // After this many steps we increase the marking/allocating factor.
+  static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
+  // This is how much we increase the marking/allocating factor by.
+  static const intptr_t kAllocationMarkingFactorSpeedup = 2;
+  static const intptr_t kMaxAllocationMarkingFactor = 1000000000;
+
+  void OldSpaceStep(intptr_t allocated) {
+    Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
+  }
+  void Step(intptr_t allocated);
+
+  inline void RestartIfNotMarking() {
+    if (state_ == COMPLETE) {
+      state_ = MARKING;
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
+      }
+    }
+  }
+
+  static void RecordWriteFromCode(HeapObject* obj,
+                                  Object* value,
+                                  Isolate* isolate);
+
+  static void RecordWriteForEvacuationFromCode(HeapObject* obj,
+                                               Object** slot,
+                                               Isolate* isolate);
+
+  inline bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value);
+
+
+  inline void RecordWrite(HeapObject* obj, Object** slot, Object* value);
+  inline void RecordWriteIntoCode(HeapObject* obj,
+                                  RelocInfo* rinfo,
+                                  Object* value);
+  void RecordCodeTargetPatch(Address pc, HeapObject* value);
+  void RecordWriteOfCodeEntry(JSFunction* host, Object** slot, Code* value);
+
+  inline void RecordWrites(HeapObject* obj);
+
+  inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
+
+  inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+
+  inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
+
+  // Does white->black or keeps gray or black color. Returns true if converting
+  // white to black.
+  inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
+    ASSERT(!Marking::IsImpossible(mark_bit));
+    if (mark_bit.Get()) {
+      // Grey or black: Keep the color.
+      return false;
+    }
+    mark_bit.Set();
+    ASSERT(Marking::IsBlack(mark_bit));
+    return true;
+  }
+
+  inline int steps_count() {
+    return steps_count_;
+  }
+
+  inline double steps_took() {
+    return steps_took_;
+  }
+
+  inline double longest_step() {
+    return longest_step_;
+  }
+
+  inline int steps_count_since_last_gc() {
+    return steps_count_since_last_gc_;
+  }
+
+  inline double steps_took_since_last_gc() {
+    return steps_took_since_last_gc_;
+  }
+
+  inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
+    SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
+  }
+
+  inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
+    SetNewSpacePageFlags(chunk, IsMarking());
+  }
+
+  MarkingDeque* marking_deque() { return &marking_deque_; }
+
+  bool IsCompacting() { return IsMarking() && is_compacting_; }
+
+  void ActivateGeneratedStub(Code* stub);
+
+  void NotifyOfHighPromotionRate() {
+    if (IsMarking()) {
+      if (allocation_marking_factor_ < kFastMarking) {
+        if (FLAG_trace_gc) {
+          PrintF("Increasing marking speed to %d due to high promotion rate\n",
+                 static_cast<int>(kFastMarking));
+        }
+        allocation_marking_factor_ = kFastMarking;
+      }
+    }
+  }
+
+ private:
+  void set_should_hurry(bool val) {
+    should_hurry_ = val;
+  }
+
+  int64_t SpaceLeftInOldSpace();
+
+  void ResetStepCounters();
+
+  void StartMarking();
+
+  void ActivateIncrementalWriteBarrier(PagedSpace* space);
+  static void ActivateIncrementalWriteBarrier(NewSpace* space);
+  void ActivateIncrementalWriteBarrier();
+
+  static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
+  static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
+  void DeactivateIncrementalWriteBarrier();
+
+  static void SetOldSpacePageFlags(MemoryChunk* chunk,
+                                   bool is_marking,
+                                   bool is_compacting);
+
+  static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
+
+  void EnsureMarkingDequeIsCommitted();
+
+  void VisitGlobalContext(Context* ctx, ObjectVisitor* v);
+
+  Heap* heap_;
+
+  State state_;
+  bool is_compacting_;
+
+  VirtualMemory* marking_deque_memory_;
+  MarkingDeque marking_deque_;
+
+  int steps_count_;
+  double steps_took_;
+  double longest_step_;
+  int64_t old_generation_space_available_at_start_of_incremental_;
+  int64_t old_generation_space_used_at_start_of_incremental_;
+  int steps_count_since_last_gc_;
+  double steps_took_since_last_gc_;
+  int64_t bytes_rescanned_;
+  bool should_hurry_;
+  int allocation_marking_factor_;
+  intptr_t allocated_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_INCREMENTAL_MARKING_H_
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index aa6b537..d6e6131 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -36,6 +36,21 @@
 namespace internal {
 
 
+SaveContext::SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
+  if (isolate->context() != NULL) {
+    context_ = Handle<Context>(isolate->context());
+#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
+    dummy_ = Handle<Context>(isolate->context());
+#endif
+  }
+  isolate->set_save_context(this);
+
+  // If there is no JS frame under the current C frame, use the value 0.
+  JavaScriptFrameIterator it(isolate);
+  js_sp_ = it.done() ? 0 : it.frame()->sp();
+}
+
+
 bool Isolate::DebuggerHasBreakPoints() {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   return debug()->has_break_points();
diff --git a/src/isolate.cc b/src/isolate.cc
index fd0f673..951f428 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1403,11 +1403,12 @@
       in_use_list_(0),
       free_list_(0),
       preallocated_storage_preallocated_(false),
-      pc_to_code_cache_(NULL),
+      inner_pointer_to_code_cache_(NULL),
       write_input_buffer_(NULL),
       global_handles_(NULL),
       context_switcher_(NULL),
       thread_manager_(NULL),
+      fp_stubs_generated_(false),
       string_tracker_(NULL),
       regexp_stack_(NULL),
       embedder_data_(NULL) {
@@ -1575,8 +1576,8 @@
   compilation_cache_ = NULL;
   delete bootstrapper_;
   bootstrapper_ = NULL;
-  delete pc_to_code_cache_;
-  pc_to_code_cache_ = NULL;
+  delete inner_pointer_to_code_cache_;
+  inner_pointer_to_code_cache_ = NULL;
   delete write_input_buffer_;
   write_input_buffer_ = NULL;
 
@@ -1700,7 +1701,7 @@
   context_slot_cache_ = new ContextSlotCache();
   descriptor_lookup_cache_ = new DescriptorLookupCache();
   unicode_cache_ = new UnicodeCache();
-  pc_to_code_cache_ = new PcToCodeCache(this);
+  inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
   write_input_buffer_ = new StringInputBuffer();
   global_handles_ = new GlobalHandles(this);
   bootstrapper_ = new Bootstrapper();
@@ -1767,7 +1768,7 @@
   // If we are deserializing, read the state into the now-empty heap.
   if (des != NULL) {
     des->Deserialize();
-    stub_cache_->Clear();
+    stub_cache_->Initialize(true);
   }
 
   // Deserializing may put strange things in the root array's copy of the
diff --git a/src/isolate.h b/src/isolate.h
index 2582da6..01ab04e 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -66,7 +66,7 @@
 class HeapProfiler;
 class InlineRuntimeFunctionsTable;
 class NoAllocationStringAllocator;
-class PcToCodeCache;
+class InnerPointerToCodeCache;
 class PreallocatedMemoryThread;
 class RegExpStack;
 class SaveContext;
@@ -841,7 +841,9 @@
     return unicode_cache_;
   }
 
-  PcToCodeCache* pc_to_code_cache() { return pc_to_code_cache_; }
+  InnerPointerToCodeCache* inner_pointer_to_code_cache() {
+    return inner_pointer_to_code_cache_;
+  }
 
   StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
 
@@ -879,6 +881,12 @@
 
   RuntimeState* runtime_state() { return &runtime_state_; }
 
+  void set_fp_stubs_generated(bool value) {
+    fp_stubs_generated_ = value;
+  }
+
+  bool fp_stubs_generated() { return fp_stubs_generated_; }
+
   StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
     return &compiler_safe_string_input_buffer_;
   }
@@ -1130,12 +1138,13 @@
   PreallocatedStorage in_use_list_;
   PreallocatedStorage free_list_;
   bool preallocated_storage_preallocated_;
-  PcToCodeCache* pc_to_code_cache_;
+  InnerPointerToCodeCache* inner_pointer_to_code_cache_;
   StringInputBuffer* write_input_buffer_;
   GlobalHandles* global_handles_;
   ContextSwitcher* context_switcher_;
   ThreadManager* thread_manager_;
   RuntimeState runtime_state_;
+  bool fp_stubs_generated_;
   StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
   Builtins builtins_;
   StringTracker* string_tracker_;
@@ -1210,19 +1219,7 @@
 // versions of GCC. See V8 issue 122 for details.
 class SaveContext BASE_EMBEDDED {
  public:
-  explicit SaveContext(Isolate* isolate) : prev_(isolate->save_context()) {
-    if (isolate->context() != NULL) {
-      context_ = Handle<Context>(isolate->context());
-#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
-      dummy_ = Handle<Context>(isolate->context());
-#endif
-    }
-    isolate->set_save_context(this);
-
-    // If there is no JS frame under the current C frame, use the value 0.
-    JavaScriptFrameIterator it(isolate);
-    js_sp_ = it.done() ? 0 : it.frame()->sp();
-  }
+  inline explicit SaveContext(Isolate* isolate);
 
   ~SaveContext() {
     if (context_.is_null()) {
diff --git a/src/json-parser.h b/src/json-parser.h
index 68eab65..ca796a6 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -165,7 +165,7 @@
 
 template <bool seq_ascii>
 Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source) {
-  isolate_ = source->map()->isolate();
+  isolate_ = source->map()->GetHeap()->isolate();
   FlattenString(source);
   source_ = source;
   source_length_ = source_->length();
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 54297a4..5281487 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,6 +29,7 @@
 #define V8_JSREGEXP_H_
 
 #include "allocation.h"
+#include "assembler.h"
 #include "zone-inl.h"
 
 namespace v8 {
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 4661106..35281eb 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -1043,11 +1043,13 @@
       // it into a location different from the operand of a live range
       // covering a branch instruction.
       // Thus we need to manually record a pointer.
-      if (phi->representation().IsTagged()) {
-        LInstruction* branch =
-            InstructionAt(cur_block->last_instruction_index());
-        if (branch->HasPointerMap()) {
+      LInstruction* branch =
+          InstructionAt(cur_block->last_instruction_index());
+      if (branch->HasPointerMap()) {
+        if (phi->representation().IsTagged()) {
           branch->pointer_map()->RecordPointer(phi_operand);
+        } else if (!phi->representation().IsDouble()) {
+          branch->pointer_map()->RecordUntagged(phi_operand);
         }
       }
     }
@@ -1142,10 +1144,13 @@
         // it into a location different from the operand of a live range
         // covering a branch instruction.
         // Thus we need to manually record a pointer.
-        if (HasTaggedValue(range->id())) {
-          LInstruction* branch = InstructionAt(pred->last_instruction_index());
-          if (branch->HasPointerMap()) {
+        LInstruction* branch = InstructionAt(pred->last_instruction_index());
+        if (branch->HasPointerMap()) {
+          if (HasTaggedValue(range->id())) {
             branch->pointer_map()->RecordPointer(cur_op);
+          } else if (!cur_op->IsDoubleStackSlot() &&
+                     !cur_op->IsDoubleRegister()) {
+            branch->pointer_map()->RemovePointer(cur_op);
           }
         }
       }
diff --git a/src/lithium.cc b/src/lithium.cc
index 5410f6f..31b1698 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -156,6 +156,27 @@
 }
 
 
+void LPointerMap::RemovePointer(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (pointer_operands_[i]->Equals(op)) {
+      pointer_operands_.Remove(i);
+      --i;
+    }
+  }
+}
+
+
+void LPointerMap::RecordUntagged(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  untagged_operands_.Add(op);
+}
+
+
 void LPointerMap::PrintTo(StringStream* stream) {
   stream->Add("{");
   for (int i = 0; i < pointer_operands_.length(); ++i) {
@@ -182,6 +203,7 @@
     case EXTERNAL_DOUBLE_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
       return 3;
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
diff --git a/src/lithium.h b/src/lithium.h
index 20da21a..1e90804 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -407,9 +407,18 @@
 class LPointerMap: public ZoneObject {
  public:
   explicit LPointerMap(int position)
-      : pointer_operands_(8), position_(position), lithium_position_(-1) { }
+      : pointer_operands_(8),
+        untagged_operands_(0),
+        position_(position),
+        lithium_position_(-1) { }
 
-  const ZoneList<LOperand*>* operands() const { return &pointer_operands_; }
+  const ZoneList<LOperand*>* GetNormalizedOperands() {
+    for (int i = 0; i < untagged_operands_.length(); ++i) {
+      RemovePointer(untagged_operands_[i]);
+    }
+    untagged_operands_.Clear();
+    return &pointer_operands_;
+  }
   int position() const { return position_; }
   int lithium_position() const { return lithium_position_; }
 
@@ -419,10 +428,13 @@
   }
 
   void RecordPointer(LOperand* op);
+  void RemovePointer(LOperand* op);
+  void RecordUntagged(LOperand* op);
   void PrintTo(StringStream* stream);
 
  private:
   ZoneList<LOperand*> pointer_operands_;
+  ZoneList<LOperand*> untagged_operands_;
   int position_;
   int lithium_position_;
 };
diff --git a/src/liveedit.cc b/src/liveedit.cc
index d44c2fc..6107cbf 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -1000,6 +1000,7 @@
 static void ReplaceCodeObject(Code* original, Code* substitution) {
   ASSERT(!HEAP->InNewSpace(substitution));
 
+  HeapIterator iterator;
   AssertNoAllocation no_allocations_please;
 
   // A zone scope for ReferenceCollectorVisitor.
@@ -1016,7 +1017,6 @@
 
   // Now iterate over all pointers of all objects, including code_target
   // implicit pointers.
-  HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     obj->Iterate(&visitor);
   }
@@ -1101,6 +1101,8 @@
 
   Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
 
+  HEAP->EnsureHeapIsIterable();
+
   if (IsJSFunctionCode(shared_info->code())) {
     Handle<Code> code = compile_info_wrapper.GetFunctionCode();
     ReplaceCodeObject(shared_info->code(), *code);
@@ -1271,7 +1273,8 @@
 
 // Patch positions in code (changes relocation info section) and possibly
 // returns new instance of code.
-static Handle<Code> PatchPositionsInCode(Handle<Code> code,
+static Handle<Code> PatchPositionsInCode(
+    Handle<Code> code,
     Handle<JSArray> position_change_array) {
 
   RelocInfoBuffer buffer_writer(code->relocation_size(),
@@ -1286,7 +1289,7 @@
         int new_position = TranslatePosition(position,
                                              position_change_array);
         if (position != new_position) {
-          RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position);
+          RelocInfo info_copy(rinfo->pc(), rinfo->rmode(), new_position, NULL);
           buffer_writer.Write(&info_copy);
           continue;
         }
@@ -1333,6 +1336,8 @@
   info->set_end_position(new_function_end);
   info->set_function_token_position(new_function_token_pos);
 
+  HEAP->EnsureHeapIsIterable();
+
   if (IsJSFunctionCode(info->code())) {
     // Patch relocation info section of the code.
     Handle<Code> patched_code = PatchPositionsInCode(Handle<Code>(info->code()),
diff --git a/src/liveobjectlist.cc b/src/liveobjectlist.cc
index 957c051..d62c4d1 100644
--- a/src/liveobjectlist.cc
+++ b/src/liveobjectlist.cc
@@ -1336,7 +1336,9 @@
   // Allocate the JSArray of the elements.
   Handle<JSObject> elements = factory->NewJSObject(isolate->array_function());
   if (elements->IsFailure()) return Object::cast(*elements);
-  Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
+
+  maybe_result = Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Set body.elements.
   Handle<String> elements_sym = factory->LookupAsciiSymbol("elements");
@@ -1462,7 +1464,9 @@
   Handle<JSObject> summary_obj =
     factory->NewJSObject(isolate->array_function());
   if (summary_obj->IsFailure()) return Object::cast(*summary_obj);
-  Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
+
+  maybe_result = Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
+  if (maybe_result->IsFailure()) return maybe_result;
 
   // Create the body object.
   Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
@@ -1589,7 +1593,9 @@
 
   // Return the result as a JS array.
   Handle<JSObject> lols = factory->NewJSObject(isolate->array_function());
-  Handle<JSArray>::cast(lols)->SetContent(*list);
+
+  maybe_result = Handle<JSArray>::cast(lols)->SetContent(*list);
+  if (maybe_result->IsFailure()) return maybe_result;
 
   Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
   if (result->IsFailure()) return Object::cast(*result);
@@ -2613,7 +2619,7 @@
     HeapObject* heap_obj = it.Obj();
     if (heap->InFromSpace(heap_obj)) {
       OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n",
-                i++, heap_obj, heap->new_space()->FromSpaceLow());
+                i++, heap_obj, Heap::new_space()->FromSpaceStart());
     }
   }
 }
diff --git a/src/log.cc b/src/log.cc
index 3d66b5f..bad5fdc 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1356,12 +1356,12 @@
 
 static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
                                       Handle<Code>* code_objects) {
+  HeapIterator iterator;
   AssertNoAllocation no_alloc;
   int compiled_funcs_count = 0;
 
   // Iterate the heap to find shared function info objects and record
   // the unoptimized code for them.
-  HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsSharedFunctionInfo()) continue;
     SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
@@ -1519,8 +1519,9 @@
 
 
 void Logger::LogCodeObjects() {
-  AssertNoAllocation no_alloc;
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
   HeapIterator iterator;
+  AssertNoAllocation no_alloc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsCode()) LogCodeObject(obj);
   }
@@ -1573,6 +1574,7 @@
 
 
 void Logger::LogCompiledFunctions() {
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
   HandleScope scope;
   const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
   ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
@@ -1591,9 +1593,9 @@
 
 
 void Logger::LogAccessorCallbacks() {
-  AssertNoAllocation no_alloc;
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
   HeapIterator iterator;
-  i::Isolate* isolate = ISOLATE;
+  AssertNoAllocation no_alloc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsAccessorInfo()) continue;
     AccessorInfo* ai = AccessorInfo::cast(obj);
@@ -1601,11 +1603,11 @@
     String* name = String::cast(ai->name());
     Address getter_entry = v8::ToCData<Address>(ai->getter());
     if (getter_entry != 0) {
-      PROFILE(isolate, GetterCallbackEvent(name, getter_entry));
+      PROFILE(ISOLATE, GetterCallbackEvent(name, getter_entry));
     }
     Address setter_entry = v8::ToCData<Address>(ai->setter());
     if (setter_entry != 0) {
-      PROFILE(isolate, SetterCallbackEvent(name, setter_entry));
+      PROFILE(ISOLATE, SetterCallbackEvent(name, setter_entry));
     }
   }
 }
diff --git a/src/log.h b/src/log.h
index fe19810..4d76fc8 100644
--- a/src/log.h
+++ b/src/log.h
@@ -29,6 +29,7 @@
 #define V8_LOG_H_
 
 #include "allocation.h"
+#include "objects.h"
 #include "platform.h"
 #include "log-utils.h"
 
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 30838bd..364fdb6 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -93,6 +93,63 @@
 namespace v8 {
 namespace internal {
 
+class FrameScope {
+ public:
+  explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
+      : masm_(masm), type_(type), old_has_frame_(masm->has_frame()) {
+    masm->set_has_frame(true);
+    if (type != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+      masm->EnterFrame(type);
+    }
+  }
+
+  ~FrameScope() {
+    if (type_ != StackFrame::MANUAL && type_ != StackFrame::NONE) {
+      masm_->LeaveFrame(type_);
+    }
+    masm_->set_has_frame(old_has_frame_);
+  }
+
+  // Normally we generate the leave-frame code when this object goes
+  // out of scope.  Sometimes we may need to generate the code somewhere else
+  // in addition.  Calling this will achieve that, but the object stays in
+  // scope, the MacroAssembler is still marked as being in a frame scope, and
+  // the code will be generated again when it goes out of scope.
+  void GenerateLeaveFrame() {
+    masm_->LeaveFrame(type_);
+  }
+
+ private:
+  MacroAssembler* masm_;
+  StackFrame::Type type_;
+  bool old_has_frame_;
+};
+
+
+class AllowExternalCallThatCantCauseGC: public FrameScope {
+ public:
+  explicit AllowExternalCallThatCantCauseGC(MacroAssembler* masm)
+      : FrameScope(masm, StackFrame::NONE) { }
+};
+
+
+class NoCurrentFrameScope {
+ public:
+  explicit NoCurrentFrameScope(MacroAssembler* masm)
+      : masm_(masm), saved_(masm->has_frame()) {
+    masm->set_has_frame(false);
+  }
+
+  ~NoCurrentFrameScope() {
+    masm_->set_has_frame(saved_);
+  }
+
+ private:
+  MacroAssembler* masm_;
+  bool saved_;
+};
+
+
 // Support for "structured" code comments.
 #ifdef DEBUG
 
diff --git a/src/mark-compact-inl.h b/src/mark-compact-inl.h
new file mode 100644
index 0000000..20f11a7
--- /dev/null
+++ b/src/mark-compact-inl.h
@@ -0,0 +1,101 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MARK_COMPACT_INL_H_
+#define V8_MARK_COMPACT_INL_H_
+
+#include "isolate.h"
+#include "memory.h"
+#include "mark-compact.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MarkBit Marking::MarkBitFrom(Address addr) {
+  MemoryChunk *p = MemoryChunk::FromAddress(addr);
+  return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
+                                         p->ContainsOnlyData());
+}
+
+
+void MarkCompactCollector::SetFlags(int flags) {
+  sweep_precisely_ = ((flags & Heap::kMakeHeapIterableMask) != 0);
+}
+
+
+void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
+  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+  if (!mark_bit.Get()) {
+    mark_bit.Set();
+    MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+#ifdef DEBUG
+    UpdateLiveObjectCount(obj);
+#endif
+    ProcessNewlyMarkedObject(obj);
+  }
+}
+
+
+void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
+  ASSERT(!mark_bit.Get());
+  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
+  mark_bit.Set();
+  MemoryChunk::IncrementLiveBytes(obj->address(), obj->Size());
+#ifdef DEBUG
+  UpdateLiveObjectCount(obj);
+#endif
+}
+
+
+bool MarkCompactCollector::IsMarked(Object* obj) {
+  ASSERT(obj->IsHeapObject());
+  HeapObject* heap_object = HeapObject::cast(obj);
+  return Marking::MarkBitFrom(heap_object).Get();
+}
+
+
+void MarkCompactCollector::RecordSlot(Object** anchor_slot,
+                                      Object** slot,
+                                      Object* object) {
+  Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
+  if (object_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            object_page->slots_buffer_address(),
+                            slot,
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(object_page);
+    }
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MARK_COMPACT_INL_H_
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 3e4a617..450a58c 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -27,20 +27,31 @@
 
 #include "v8.h"
 
+#include "code-stubs.h"
 #include "compilation-cache.h"
+#include "deoptimizer.h"
 #include "execution.h"
-#include "heap-profiler.h"
 #include "gdb-jit.h"
 #include "global-handles.h"
+#include "heap-profiler.h"
 #include "ic-inl.h"
+#include "incremental-marking.h"
 #include "liveobjectlist-inl.h"
 #include "mark-compact.h"
 #include "objects-visiting.h"
+#include "objects-visiting-inl.h"
 #include "stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
+
+const char* Marking::kWhiteBitPattern = "00";
+const char* Marking::kBlackBitPattern = "10";
+const char* Marking::kGreyBitPattern = "11";
+const char* Marking::kImpossibleBitPattern = "01";
+
+
 // -------------------------------------------------------------------------
 // MarkCompactCollector
 
@@ -48,11 +59,12 @@
 #ifdef DEBUG
       state_(IDLE),
 #endif
-      force_compaction_(false),
-      compacting_collection_(false),
-      compact_on_next_gc_(false),
-      previous_marked_count_(0),
+      sweep_precisely_(false),
+      compacting_(false),
+      was_marked_incrementally_(false),
+      collect_maps_(FLAG_collect_maps),
       tracer_(NULL),
+      migration_slots_buffer_(NULL),
 #ifdef DEBUG
       live_young_objects_size_(0),
       live_old_pointer_objects_size_(0),
@@ -68,50 +80,400 @@
       encountered_weak_maps_(NULL) { }
 
 
+#ifdef DEBUG
+class VerifyMarkingVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
+      }
+    }
+  }
+};
+
+
+static void VerifyMarking(Address bottom, Address top) {
+  VerifyMarkingVisitor visitor;
+  HeapObject* object;
+  Address next_object_must_be_here_or_later = bottom;
+
+  for (Address current = bottom;
+       current < top;
+       current += kPointerSize) {
+    object = HeapObject::FromAddress(current);
+    if (MarkCompactCollector::IsMarked(object)) {
+      ASSERT(current >= next_object_must_be_here_or_later);
+      object->Iterate(&visitor);
+      next_object_must_be_here_or_later = current + object->Size();
+    }
+  }
+}
+
+
+static void VerifyMarking(NewSpace* space) {
+  Address end = space->top();
+  NewSpacePageIterator it(space->bottom(), end);
+  // The bottom position is at the start of its page. Allows us to use
+  // page->body() as start of range on all pages.
+  ASSERT_EQ(space->bottom(),
+            NewSpacePage::FromAddress(space->bottom())->body());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address limit = it.has_next() ? page->body_limit() : end;
+    ASSERT(limit == end || !page->Contains(end));
+    VerifyMarking(page->body(), limit);
+  }
+}
+
+
+static void VerifyMarking(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd());
+  }
+}
+
+
+static void VerifyMarking(Heap* heap) {
+  VerifyMarking(heap->old_pointer_space());
+  VerifyMarking(heap->old_data_space());
+  VerifyMarking(heap->code_space());
+  VerifyMarking(heap->cell_space());
+  VerifyMarking(heap->map_space());
+  VerifyMarking(heap->new_space());
+
+  VerifyMarkingVisitor visitor;
+
+  LargeObjectIterator it(heap->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    if (MarkCompactCollector::IsMarked(obj)) {
+      obj->Iterate(&visitor);
+    }
+  }
+
+  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+}
+
+
+class VerifyEvacuationVisitor: public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
+      }
+    }
+  }
+};
+
+
+static void VerifyEvacuation(Address bottom, Address top) {
+  VerifyEvacuationVisitor visitor;
+  HeapObject* object;
+  Address next_object_must_be_here_or_later = bottom;
+
+  for (Address current = bottom;
+       current < top;
+       current += kPointerSize) {
+    object = HeapObject::FromAddress(current);
+    if (MarkCompactCollector::IsMarked(object)) {
+      ASSERT(current >= next_object_must_be_here_or_later);
+      object->Iterate(&visitor);
+      next_object_must_be_here_or_later = current + object->Size();
+    }
+  }
+}
+
+
+static void VerifyEvacuation(NewSpace* space) {
+  NewSpacePageIterator it(space->bottom(), space->top());
+  VerifyEvacuationVisitor visitor;
+
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address current = page->body();
+    Address limit = it.has_next() ? page->body_limit() : space->top();
+    ASSERT(limit == space->top() || !page->Contains(space->top()));
+    while (current < limit) {
+      HeapObject* object = HeapObject::FromAddress(current);
+      object->Iterate(&visitor);
+      current += object->Size();
+    }
+  }
+}
+
+
+static void VerifyEvacuation(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    if (p->IsEvacuationCandidate()) continue;
+    VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd());
+  }
+}
+
+
+static void VerifyEvacuation(Heap* heap) {
+  VerifyEvacuation(heap->old_pointer_space());
+  VerifyEvacuation(heap->old_data_space());
+  VerifyEvacuation(heap->code_space());
+  VerifyEvacuation(heap->cell_space());
+  VerifyEvacuation(heap->map_space());
+  VerifyEvacuation(heap->new_space());
+
+  VerifyEvacuationVisitor visitor;
+  heap->IterateStrongRoots(&visitor, VISIT_ALL);
+}
+#endif
+
+
+void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
+  p->MarkEvacuationCandidate();
+  evacuation_candidates_.Add(p);
+}
+
+
+bool MarkCompactCollector::StartCompaction() {
+  if (!compacting_) {
+    ASSERT(evacuation_candidates_.length() == 0);
+
+    CollectEvacuationCandidates(heap()->old_pointer_space());
+    CollectEvacuationCandidates(heap()->old_data_space());
+
+    if (FLAG_compact_code_space) {
+      CollectEvacuationCandidates(heap()->code_space());
+    }
+
+    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
+
+    compacting_ = evacuation_candidates_.length() > 0;
+  }
+
+  return compacting_;
+}
+
+
 void MarkCompactCollector::CollectGarbage() {
   // Make sure that Prepare() has been called. The individual steps below will
   // update the state as they proceed.
   ASSERT(state_ == PREPARE_GC);
   ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
 
-  // Prepare has selected whether to compact the old generation or not.
-  // Tell the tracer.
-  if (IsCompacting()) tracer_->set_is_compacting();
-
   MarkLiveObjects();
+  ASSERT(heap_->incremental_marking()->IsStopped());
 
-  if (FLAG_collect_maps) ClearNonLiveTransitions();
+  if (collect_maps_) ClearNonLiveTransitions();
 
   ClearWeakMaps();
 
-  SweepLargeObjectSpace();
-
-  if (IsCompacting()) {
-    GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
-    EncodeForwardingAddresses();
-
-    heap()->MarkMapPointersAsEncoded(true);
-    UpdatePointers();
-    heap()->MarkMapPointersAsEncoded(false);
-    heap()->isolate()->pc_to_code_cache()->Flush();
-
-    RelocateObjects();
-  } else {
-    SweepSpaces();
-    heap()->isolate()->pc_to_code_cache()->Flush();
+#ifdef DEBUG
+  if (FLAG_verify_heap) {
+    VerifyMarking(heap_);
   }
+#endif
+
+  SweepSpaces();
+
+  if (!collect_maps_) ReattachInitialMaps();
+
+  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
 
   Finish();
 
-  // Save the count of marked objects remaining after the collection and
-  // null out the GC tracer.
-  previous_marked_count_ = tracer_->marked_count();
-  ASSERT(previous_marked_count_ == 0);
   tracer_ = NULL;
 }
 
 
+#ifdef DEBUG
+void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    CHECK(p->markbits()->IsClean());
+    CHECK_EQ(0, p->LiveBytes());
+  }
+}
+
+void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
+  NewSpacePageIterator it(space->bottom(), space->top());
+
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    CHECK(p->markbits()->IsClean());
+    CHECK_EQ(0, p->LiveBytes());
+  }
+}
+
+void MarkCompactCollector::VerifyMarkbitsAreClean() {
+  VerifyMarkbitsAreClean(heap_->old_pointer_space());
+  VerifyMarkbitsAreClean(heap_->old_data_space());
+  VerifyMarkbitsAreClean(heap_->code_space());
+  VerifyMarkbitsAreClean(heap_->cell_space());
+  VerifyMarkbitsAreClean(heap_->map_space());
+  VerifyMarkbitsAreClean(heap_->new_space());
+
+  LargeObjectIterator it(heap_->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(obj);
+    ASSERT(Marking::IsWhite(mark_bit));
+  }
+}
+#endif
+
+
+static void ClearMarkbits(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+static void ClearMarkbits(NewSpace* space) {
+  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+static void ClearMarkbits(Heap* heap) {
+  ClearMarkbits(heap->code_space());
+  ClearMarkbits(heap->map_space());
+  ClearMarkbits(heap->old_pointer_space());
+  ClearMarkbits(heap->old_data_space());
+  ClearMarkbits(heap->cell_space());
+  ClearMarkbits(heap->new_space());
+
+  LargeObjectIterator it(heap->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(obj);
+    mark_bit.Clear();
+    mark_bit.Next().Clear();
+  }
+}
+
+
+bool Marking::TransferMark(Address old_start, Address new_start) {
+  // This is only used when resizing an object.
+  ASSERT(MemoryChunk::FromAddress(old_start) ==
+         MemoryChunk::FromAddress(new_start));
+
+  // If the mark doesn't move, we don't check the color of the object.
+  // It doesn't matter whether the object is black, since it hasn't changed
+  // size, so the adjustment to the live data count will be zero anyway.
+  if (old_start == new_start) return false;
+
+  MarkBit new_mark_bit = MarkBitFrom(new_start);
+  MarkBit old_mark_bit = MarkBitFrom(old_start);
+
+#ifdef DEBUG
+  ObjectColor old_color = Color(old_mark_bit);
+#endif
+
+  if (Marking::IsBlack(old_mark_bit)) {
+    old_mark_bit.Clear();
+    ASSERT(IsWhite(old_mark_bit));
+    Marking::MarkBlack(new_mark_bit);
+    return true;
+  } else if (Marking::IsGrey(old_mark_bit)) {
+    ASSERT(heap_->incremental_marking()->IsMarking());
+    old_mark_bit.Clear();
+    old_mark_bit.Next().Clear();
+    ASSERT(IsWhite(old_mark_bit));
+    heap_->incremental_marking()->WhiteToGreyAndPush(
+        HeapObject::FromAddress(new_start), new_mark_bit);
+    heap_->incremental_marking()->RestartIfNotMarking();
+  }
+
+#ifdef DEBUG
+  ObjectColor new_color = Color(new_mark_bit);
+  ASSERT(new_color == old_color);
+#endif
+
+  return false;
+}
+
+
+const char* AllocationSpaceName(AllocationSpace space) {
+  switch (space) {
+    case NEW_SPACE: return "NEW_SPACE";
+    case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
+    case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
+    case CODE_SPACE: return "CODE_SPACE";
+    case MAP_SPACE: return "MAP_SPACE";
+    case CELL_SPACE: return "CELL_SPACE";
+    case LO_SPACE: return "LO_SPACE";
+    default:
+      UNREACHABLE();
+  }
+
+  return NULL;
+}
+
+
+void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
+  ASSERT(space->identity() == OLD_POINTER_SPACE ||
+         space->identity() == OLD_DATA_SPACE ||
+         space->identity() == CODE_SPACE);
+
+  PageIterator it(space);
+  int count = 0;
+  if (it.has_next()) it.next();  // Never compact the first page.
+  while (it.has_next()) {
+    Page* p = it.next();
+    if (space->IsFragmented(p)) {
+      AddEvacuationCandidate(p);
+      count++;
+    } else {
+      p->ClearEvacuationCandidate();
+    }
+  }
+
+  if (count > 0 && FLAG_trace_fragmentation) {
+    PrintF("Collected %d evacuation candidates for space %s\n",
+           count,
+           AllocationSpaceName(space->identity()));
+  }
+}
+
+
+void MarkCompactCollector::AbortCompaction() {
+  if (compacting_) {
+    int npages = evacuation_candidates_.length();
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+      p->ClearEvacuationCandidate();
+      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+    compacting_ = false;
+    evacuation_candidates_.Rewind(0);
+    invalidated_code_.Rewind(0);
+  }
+  ASSERT_EQ(0, evacuation_candidates_.length());
+}
+
+
 void MarkCompactCollector::Prepare(GCTracer* tracer) {
+  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
+
+  // Disable collection of maps if incremental marking is enabled.
+  // Map collection algorithm relies on a special map transition tree traversal
+  // order which is not implemented for incremental marking.
+  collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
+
   // Rather than passing the tracer around we stash it in a static member
   // variable.
   tracer_ = tracer;
@@ -120,16 +482,10 @@
   ASSERT(state_ == IDLE);
   state_ = PREPARE_GC;
 #endif
-  ASSERT(!FLAG_always_compact || !FLAG_never_compact);
 
-  compacting_collection_ =
-      FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
-  compact_on_next_gc_ = false;
+  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
 
-  if (FLAG_never_compact) compacting_collection_ = false;
-  if (!heap()->map_space()->MapPointersEncodable())
-      compacting_collection_ = false;
-  if (FLAG_collect_maps) CreateBackPointers();
+  if (collect_maps_) CreateBackPointers();
 #ifdef ENABLE_GDB_JIT_INTERFACE
   if (FLAG_gdbjit) {
     // If GDBJIT interface is active disable compaction.
@@ -137,13 +493,34 @@
   }
 #endif
 
+  // Clear marking bits for precise sweeping to collect all garbage.
+  if (was_marked_incrementally_ && PreciseSweepingRequired()) {
+    heap()->incremental_marking()->Abort();
+    ClearMarkbits(heap_);
+    AbortCompaction();
+    was_marked_incrementally_ = false;
+  }
+
+  // Don't start compaction if we are in the middle of incremental
+  // marking cycle. We did not collect any slots.
+  if (!FLAG_never_compact && !was_marked_incrementally_) {
+    StartCompaction();
+  }
+
   PagedSpaces spaces;
   for (PagedSpace* space = spaces.next();
-       space != NULL; space = spaces.next()) {
-    space->PrepareForMarkCompact(compacting_collection_);
+       space != NULL;
+       space = spaces.next()) {
+    space->PrepareForMarkCompact();
   }
 
 #ifdef DEBUG
+  if (!was_marked_incrementally_) {
+    VerifyMarkbitsAreClean();
+  }
+#endif
+
+#ifdef DEBUG
   live_bytes_ = 0;
   live_young_objects_size_ = 0;
   live_old_pointer_objects_size_ = 0;
@@ -168,31 +545,6 @@
   heap()->isolate()->stub_cache()->Clear();
 
   heap()->external_string_table_.CleanUp();
-
-  // If we've just compacted old space there's no reason to check the
-  // fragmentation limit. Just return.
-  if (HasCompacted()) return;
-
-  // We compact the old generation on the next GC if it has gotten too
-  // fragmented (ie, we could recover an expected amount of space by
-  // reclaiming the waste and free list blocks).
-  static const int kFragmentationLimit = 15;        // Percent.
-  static const int kFragmentationAllowed = 1 * MB;  // Absolute.
-  intptr_t old_gen_recoverable = 0;
-  intptr_t old_gen_used = 0;
-
-  OldSpaces spaces;
-  for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
-    old_gen_recoverable += space->Waste() + space->AvailableFree();
-    old_gen_used += space->Size();
-  }
-
-  int old_gen_fragmentation =
-      static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
-  if (old_gen_fragmentation > kFragmentationLimit &&
-      old_gen_recoverable > kFragmentationAllowed) {
-    compact_on_next_gc_ = true;
-  }
 }
 
 
@@ -261,13 +613,21 @@
       SharedFunctionInfo* shared = candidate->unchecked_shared();
 
       Code* code = shared->unchecked_code();
-      if (!code->IsMarked()) {
+      MarkBit code_mark = Marking::MarkBitFrom(code);
+      if (!code_mark.Get()) {
         shared->set_code(lazy_compile);
         candidate->set_code(lazy_compile);
       } else {
         candidate->set_code(shared->unchecked_code());
       }
 
+      // We are in the middle of a GC cycle so the write barrier in the code
+      // setter did not record the slot update and we have to do that manually.
+      Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
+      Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
+      isolate_->heap()->mark_compact_collector()->
+          RecordCodeEntrySlot(slot, target);
+
       candidate = next_candidate;
     }
 
@@ -285,7 +645,8 @@
       SetNextCandidate(candidate, NULL);
 
       Code* code = candidate->unchecked_code();
-      if (!code->IsMarked()) {
+      MarkBit code_mark = Marking::MarkBitFrom(code);
+      if (!code_mark.Get()) {
         candidate->set_code(lazy_compile);
       }
 
@@ -355,14 +716,13 @@
   // except the maps for the object and its possible substrings might be
   // marked.
   HeapObject* object = HeapObject::cast(*p);
-  MapWord map_word = object->map_word();
-  map_word.ClearMark();
-  InstanceType type = map_word.ToMap()->instance_type();
+  Map* map = object->map();
+  InstanceType type = map->instance_type();
   if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
 
   Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
-  Heap* heap = map_word.ToMap()->heap();
-  if (second != heap->raw_unchecked_empty_string()) {
+  Heap* heap = map->GetHeap();
+  if (second != heap->empty_string()) {
     return object;
   }
 
@@ -404,14 +764,12 @@
                                          FixedArray::BodyDescriptor,
                                          void>::Visit);
 
+    table_.Register(kVisitGlobalContext, &VisitGlobalContext);
+
     table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
 
-    table_.Register(kVisitGlobalContext,
-                    &FixedBodyVisitor<StaticMarkingVisitor,
-                                      Context::MarkCompactBodyDescriptor,
-                                      void>::Visit);
-
     table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
+    table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
     table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
 
@@ -456,7 +814,7 @@
   }
 
   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
-    MarkObjectByPointer(heap, p);
+    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
   }
 
   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
@@ -466,29 +824,45 @@
       if (VisitUnmarkedObjects(heap, start, end)) return;
       // We are close to a stack overflow, so just mark the objects.
     }
-    for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
-  }
-
-  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
-      IC::Clear(rinfo->pc());
-      // Please note targets for cleared inline cached do not have to be
-      // marked since they are contained in HEAP->non_monomorphic_cache().
-    } else {
-      heap->mark_compact_collector()->MarkObject(code);
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    for (Object** p = start; p < end; p++) {
+      MarkObjectByPointer(collector, start, p);
     }
   }
 
   static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
     ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
-    Object* cell = rinfo->target_cell();
-    Object* old_cell = cell;
-    VisitPointer(heap, &cell);
-    if (cell != old_cell) {
-      rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
+    JSGlobalPropertyCell* cell =
+        JSGlobalPropertyCell::cast(rinfo->target_cell());
+    MarkBit mark = Marking::MarkBitFrom(cell);
+    heap->mark_compact_collector()->MarkObject(cell, mark);
+  }
+
+  static inline void VisitEmbeddedPointer(Heap* heap, Code* host, Object** p) {
+    MarkObjectByPointer(heap->mark_compact_collector(),
+                        reinterpret_cast<Object**>(host),
+                        p);
+  }
+
+  static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
+      IC::Clear(rinfo->pc());
+      // Please note targets for cleared inline cached do not have to be
+      // marked since they are contained in HEAP->non_monomorphic_cache().
+      target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    } else {
+      if (FLAG_cleanup_code_caches_at_gc &&
+          target->kind() == Code::STUB &&
+          target->major_key() == CodeStub::CallFunction &&
+          target->has_function_cache()) {
+        CallFunctionStub::Clear(heap, rinfo->pc());
+      }
+      MarkBit code_mark = Marking::MarkBitFrom(target);
+      heap->mark_compact_collector()->MarkObject(target, code_mark);
     }
+    heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
   }
 
   static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
@@ -496,17 +870,21 @@
             rinfo->IsPatchedReturnSequence()) ||
            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
             rinfo->IsPatchedDebugBreakSlotSequence()));
-    HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    heap->mark_compact_collector()->MarkObject(code);
+    Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    MarkBit code_mark = Marking::MarkBitFrom(target);
+    heap->mark_compact_collector()->MarkObject(target, code_mark);
+    heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
   }
 
   // Mark object pointed to by p.
-  INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
+  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
+                                         Object** anchor_slot,
+                                         Object** p)) {
     if (!(*p)->IsHeapObject()) return;
     HeapObject* object = ShortCircuitConsString(p);
-    if (!object->IsMarked()) {
-      heap->mark_compact_collector()->MarkUnmarkedObject(object);
-    }
+    collector->RecordSlot(anchor_slot, p, object);
+    MarkBit mark = Marking::MarkBitFrom(object);
+    collector->MarkObject(object, mark);
   }
 
 
@@ -515,12 +893,15 @@
                                          HeapObject* obj)) {
 #ifdef DEBUG
     ASSERT(Isolate::Current()->heap()->Contains(obj));
-    ASSERT(!obj->IsMarked());
+    ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
 #endif
     Map* map = obj->map();
-    collector->SetMark(obj);
+    Heap* heap = obj->GetHeap();
+    MarkBit mark = Marking::MarkBitFrom(obj);
+    heap->mark_compact_collector()->SetMark(obj, mark);
     // Mark the map pointer and the body.
-    if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    heap->mark_compact_collector()->MarkObject(map, map_mark);
     IterateBody(map, obj);
   }
 
@@ -536,9 +917,12 @@
     MarkCompactCollector* collector = heap->mark_compact_collector();
     // Visit the unmarked objects.
     for (Object** p = start; p < end; p++) {
-      if (!(*p)->IsHeapObject()) continue;
-      HeapObject* obj = HeapObject::cast(*p);
-      if (obj->IsMarked()) continue;
+      Object* o = *p;
+      if (!o->IsHeapObject()) continue;
+      collector->RecordSlot(start, p, o);
+      HeapObject* obj = HeapObject::cast(o);
+      MarkBit mark = Marking::MarkBitFrom(obj);
+      if (mark.Get()) continue;
       VisitUnmarkedObject(collector, obj);
     }
     return true;
@@ -567,7 +951,7 @@
                               void> StructObjectVisitor;
 
   static void VisitJSWeakMap(Map* map, HeapObject* object) {
-    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
 
     // Enqueue weak map in linked list of encountered weak maps.
@@ -578,25 +962,28 @@
     // Skip visiting the backing hash table containing the mappings.
     int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
     BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         JSWeakMap::BodyDescriptor::kStartOffset,
         JSWeakMap::kTableOffset);
     BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         JSWeakMap::kTableOffset + kPointerSize,
         object_size);
 
     // Mark the backing hash table without pushing it on the marking stack.
-    ASSERT(!weak_map->unchecked_table()->IsMarked());
-    ASSERT(weak_map->unchecked_table()->map()->IsMarked());
-    collector->SetMark(weak_map->unchecked_table());
+    ASSERT(!MarkCompactCollector::IsMarked(weak_map->unchecked_table()));
+    ASSERT(MarkCompactCollector::IsMarked(weak_map->unchecked_table()->map()));
+
+    HeapObject* unchecked_table = weak_map->unchecked_table();
+    MarkBit mark_bit = Marking::MarkBitFrom(unchecked_table);
+    collector->SetMark(unchecked_table, mark_bit);
   }
 
   static void VisitCode(Map* map, HeapObject* object) {
     reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
-        map->heap());
+        map->GetHeap());
   }
 
   // Code flushing support.
@@ -608,7 +995,7 @@
   static const int kRegExpCodeThreshold = 5;
 
   inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
-    Object* undefined = heap->raw_unchecked_undefined_value();
+    Object* undefined = heap->undefined_value();
     return (info->script() != undefined) &&
         (reinterpret_cast<Script*>(info->script())->source() != undefined);
   }
@@ -629,7 +1016,9 @@
 
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
-    if (function->unchecked_code()->IsMarked()) {
+    MarkBit code_mark =
+        Marking::MarkBitFrom(function->unchecked_code());
+    if (code_mark.Get()) {
       shared_info->set_code_age(0);
       return false;
     }
@@ -645,7 +1034,9 @@
   inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
     // Code is either on stack, in compilation cache or referenced
     // by optimized version of function.
-    if (shared_info->unchecked_code()->IsMarked()) {
+    MarkBit code_mark =
+        Marking::MarkBitFrom(shared_info->unchecked_code());
+    if (code_mark.Get()) {
       shared_info->set_code_age(0);
       return false;
     }
@@ -658,11 +1049,7 @@
 
     // We never flush code for Api functions.
     Object* function_data = shared_info->function_data();
-    if (function_data->IsHeapObject() &&
-        (SafeMap(function_data)->instance_type() ==
-         FUNCTION_TEMPLATE_INFO_TYPE)) {
-      return false;
-    }
+    if (function_data->IsFunctionTemplateInfo()) return false;
 
     // Only flush code for functions.
     if (shared_info->code()->kind() != Code::FUNCTION) return false;
@@ -695,40 +1082,9 @@
     return true;
   }
 
-
-  static inline Map* SafeMap(Object* obj) {
-    MapWord map_word = HeapObject::cast(obj)->map_word();
-    map_word.ClearMark();
-    map_word.ClearOverflow();
-    return map_word.ToMap();
-  }
-
-
-  static inline bool IsJSBuiltinsObject(Object* obj) {
-    return obj->IsHeapObject() &&
-        (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
-  }
-
-
   static inline bool IsValidNotBuiltinContext(Object* ctx) {
-    if (!ctx->IsHeapObject()) return false;
-
-    Map* map = SafeMap(ctx);
-    Heap* heap = map->heap();
-    if (!(map == heap->raw_unchecked_function_context_map() ||
-          map == heap->raw_unchecked_catch_context_map() ||
-          map == heap->raw_unchecked_with_context_map() ||
-          map == heap->raw_unchecked_global_context_map())) {
-      return false;
-    }
-
-    Context* context = reinterpret_cast<Context*>(ctx);
-
-    if (IsJSBuiltinsObject(context->global())) {
-      return false;
-    }
-
-    return true;
+    return ctx->IsContext() &&
+        !Context::cast(ctx)->global()->IsJSBuiltinsObject();
   }
 
 
@@ -748,13 +1104,15 @@
                                           bool is_ascii) {
     // Make sure that the fixed array is in fact initialized on the RegExp.
     // We could potentially trigger a GC when initializing the RegExp.
-    if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
+    if (HeapObject::cast(re->data())->map()->instance_type() !=
+            FIXED_ARRAY_TYPE) return;
 
     // Make sure this is a RegExp that actually contains code.
     if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
 
     Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
-    if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
+    if (!code->IsSmi() &&
+        HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
       // Save a copy that can be reinstated if we need the code again.
       re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
                              code,
@@ -790,7 +1148,7 @@
   // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
   // we flush the code.
   static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     MarkCompactCollector* collector = heap->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitJSRegExpFields(map, object);
@@ -807,7 +1165,7 @@
 
   static void VisitSharedFunctionInfoAndFlushCode(Map* map,
                                                   HeapObject* object) {
-    MarkCompactCollector* collector = map->heap()->mark_compact_collector();
+    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitSharedFunctionInfoGeneric(map, object);
       return;
@@ -818,7 +1176,7 @@
 
   static void VisitSharedFunctionInfoAndFlushCodeGeneric(
       Map* map, HeapObject* object, bool known_flush_code_candidate) {
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
 
     if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
@@ -835,18 +1193,30 @@
 
 
   static void VisitCodeEntry(Heap* heap, Address entry_address) {
-    Object* code = Code::GetObjectFromEntryAddress(entry_address);
-    Object* old_code = code;
-    VisitPointer(heap, &code);
-    if (code != old_code) {
-      Memory::Address_at(entry_address) =
-          reinterpret_cast<Code*>(code)->entry();
+    Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+    MarkBit mark = Marking::MarkBitFrom(code);
+    heap->mark_compact_collector()->MarkObject(code, mark);
+    heap->mark_compact_collector()->
+        RecordCodeEntrySlot(entry_address, code);
+  }
+
+  static void VisitGlobalContext(Map* map, HeapObject* object) {
+    FixedBodyVisitor<StaticMarkingVisitor,
+                     Context::MarkCompactBodyDescriptor,
+                     void>::Visit(map, object);
+
+    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+    for (int idx = Context::FIRST_WEAK_SLOT;
+         idx < Context::GLOBAL_CONTEXT_SLOTS;
+         ++idx) {
+      Object** slot =
+          HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
+      collector->RecordSlot(slot, slot, *slot);
     }
   }
 
-
   static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
-    Heap* heap = map->heap();
+    Heap* heap = map->GetHeap();
     MarkCompactCollector* collector = heap->mark_compact_collector();
     if (!collector->is_code_flushing_enabled()) {
       VisitJSFunction(map, object);
@@ -861,7 +1231,9 @@
     }
 
     if (!flush_code_candidate) {
-      collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
+      Code* code = jsfunction->unchecked_shared()->unchecked_code();
+      MarkBit code_mark = Marking::MarkBitFrom(code);
+      heap->mark_compact_collector()->MarkObject(code, code_mark);
 
       if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
         // For optimized functions we should retain both non-optimized version
@@ -877,7 +1249,11 @@
              i < count;
              i++) {
           JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
-          collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
+          Code* inlined_code = inlined->unchecked_shared()->unchecked_code();
+          MarkBit inlined_code_mark =
+              Marking::MarkBitFrom(inlined_code);
+          heap->mark_compact_collector()->MarkObject(
+              inlined_code, inlined_code_mark);
         }
       }
     }
@@ -902,12 +1278,11 @@
   static inline void VisitJSFunctionFields(Map* map,
                                            JSFunction* object,
                                            bool flush_code_candidate) {
-    Heap* heap = map->heap();
-    MarkCompactCollector* collector = heap->mark_compact_collector();
+    Heap* heap = map->GetHeap();
 
     VisitPointers(heap,
-                  SLOT_ADDR(object, JSFunction::kPropertiesOffset),
-                  SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
+                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
+                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
 
     if (!flush_code_candidate) {
       VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
@@ -917,29 +1292,39 @@
       // Visit shared function info to avoid double checking of it's
       // flushability.
       SharedFunctionInfo* shared_info = object->unchecked_shared();
-      if (!shared_info->IsMarked()) {
+      MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
+      if (!shared_info_mark.Get()) {
         Map* shared_info_map = shared_info->map();
-        collector->SetMark(shared_info);
-        collector->MarkObject(shared_info_map);
+        MarkBit shared_info_map_mark =
+            Marking::MarkBitFrom(shared_info_map);
+        heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
+        heap->mark_compact_collector()->MarkObject(shared_info_map,
+                                                   shared_info_map_mark);
         VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
                                                    shared_info,
                                                    true);
       }
     }
 
-    VisitPointers(heap,
-                  SLOT_ADDR(object,
-                            JSFunction::kCodeEntryOffset + kPointerSize),
-                  SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
+    VisitPointers(
+        heap,
+        HeapObject::RawField(object,
+                             JSFunction::kCodeEntryOffset + kPointerSize),
+        HeapObject::RawField(object,
+                             JSFunction::kNonWeakFieldsEndOffset));
 
     // Don't visit the next function list field as it is a weak reference.
+    Object** next_function =
+        HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
+    heap->mark_compact_collector()->RecordSlot(
+        next_function, next_function, *next_function);
   }
 
   static inline void VisitJSRegExpFields(Map* map,
                                          HeapObject* object) {
     int last_property_offset =
         JSRegExp::kSize + kPointerSize * map->inobject_properties();
-    VisitPointers(map->heap(),
+    VisitPointers(map->GetHeap(),
                   SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
                   SLOT_ADDR(object, last_property_offset));
   }
@@ -995,7 +1380,9 @@
 
   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
     for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
-      collector_->MarkObject(it.frame()->unchecked_code());
+      Code* code = it.frame()->unchecked_code();
+      MarkBit code_bit = Marking::MarkBitFrom(code);
+      collector_->MarkObject(it.frame()->unchecked_code(), code_bit);
     }
   }
 
@@ -1017,8 +1404,10 @@
     Object* obj = *slot;
     if (obj->IsSharedFunctionInfo()) {
       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
-      collector_->MarkObject(shared->unchecked_code());
-      collector_->MarkObject(shared);
+      MarkBit shared_mark = Marking::MarkBitFrom(shared);
+      MarkBit code_mark = Marking::MarkBitFrom(shared->unchecked_code());
+      collector_->MarkObject(shared->unchecked_code(), code_mark);
+      collector_->MarkObject(shared, shared_mark);
     }
   }
 
@@ -1030,7 +1419,8 @@
 void MarkCompactCollector::PrepareForCodeFlushing() {
   ASSERT(heap() == Isolate::Current()->heap());
 
-  if (!FLAG_flush_code) {
+  // TODO(1609) Currently incremental marker does not support code flushing.
+  if (!FLAG_flush_code || was_marked_incrementally_) {
     EnableCodeFlushing(false);
     return;
   }
@@ -1042,16 +1432,21 @@
     return;
   }
 #endif
+
   EnableCodeFlushing(true);
 
   // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
   // relies on it being marked before any other descriptor array.
-  MarkObject(heap()->raw_unchecked_empty_descriptor_array());
+  HeapObject* descriptor_array = heap()->empty_descriptor_array();
+  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
+  MarkObject(descriptor_array, descriptor_array_mark);
 
   // Make sure we are not referencing the code from the stack.
   ASSERT(this == heap()->mark_compact_collector());
   for (StackFrameIterator it; !it.done(); it.Advance()) {
-    MarkObject(it.frame()->unchecked_code());
+    Code* code = it.frame()->unchecked_code();
+    MarkBit code_mark = Marking::MarkBitFrom(code);
+    MarkObject(code, code_mark);
   }
 
   // Iterate the archived stacks in all threads to check if
@@ -1064,7 +1459,7 @@
   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
 
-  ProcessMarkingStack();
+  ProcessMarkingDeque();
 }
 
 
@@ -1088,19 +1483,21 @@
 
     // Replace flat cons strings in place.
     HeapObject* object = ShortCircuitConsString(p);
-    if (object->IsMarked()) return;
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) return;
 
     Map* map = object->map();
     // Mark the object.
-    collector_->SetMark(object);
+    collector_->SetMark(object, mark_bit);
 
     // Mark the map pointer and body, and push them on the marking stack.
-    collector_->MarkObject(map);
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    collector_->MarkObject(map, map_mark);
     StaticMarkingVisitor::IterateBody(map, object);
 
     // Mark all the objects reachable from the map and body.  May leave
     // overflowed objects in the heap.
-    collector_->EmptyMarkingStack();
+    collector_->EmptyMarkingDeque();
   }
 
   MarkCompactCollector* collector_;
@@ -1116,17 +1513,19 @@
   virtual void VisitPointers(Object** start, Object** end) {
     // Visit all HeapObject pointers in [start, end).
     for (Object** p = start; p < end; p++) {
-      if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
+      Object* o = *p;
+      if (o->IsHeapObject() &&
+          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
         // Check if the symbol being pruned is an external symbol. We need to
         // delete the associated external data as this symbol is going away.
 
         // Since no objects have yet been moved we can safely access the map of
         // the object.
-        if ((*p)->IsExternalString()) {
+        if (o->IsExternalString()) {
           heap_->FinalizeExternalString(String::cast(*p));
         }
         // Set the entry to null_value (as deleted).
-        *p = heap_->raw_unchecked_null_value();
+        *p = heap_->null_value();
         pointers_removed_++;
       }
     }
@@ -1147,8 +1546,7 @@
 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
  public:
   virtual Object* RetainAs(Object* object) {
-    MapWord first_word = HeapObject::cast(object)->map_word();
-    if (first_word.IsMarked()) {
+    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
       return object;
     } else {
       return NULL;
@@ -1157,28 +1555,26 @@
 };
 
 
-void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
-  ASSERT(!object->IsMarked());
+void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
+  ASSERT(IsMarked(object));
   ASSERT(HEAP->Contains(object));
   if (object->IsMap()) {
     Map* map = Map::cast(object);
     if (FLAG_cleanup_code_caches_at_gc) {
       map->ClearCodeCache(heap());
     }
-    SetMark(map);
 
     // When map collection is enabled we have to mark through map's transitions
     // in a special way to make transition links weak.
     // Only maps for subclasses of JSReceiver can have transitions.
     STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-    if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+    if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
       MarkMapContents(map);
     } else {
-      marking_stack_.Push(map);
+      marking_deque_.PushBlack(map);
     }
   } else {
-    SetMark(object);
-    marking_stack_.Push(object);
+    marking_deque_.PushBlack(object);
   }
 }
 
@@ -1187,12 +1583,17 @@
   // Mark prototype transitions array but don't push it into marking stack.
   // This will make references from it weak. We will clean dead prototype
   // transitions in ClearNonLiveTransitions.
-  FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
-  if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
+  FixedArray* prototype_transitions = map->prototype_transitions();
+  MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
+  if (!mark.Get()) {
+    mark.Set();
+    MemoryChunk::IncrementLiveBytes(prototype_transitions->address(),
+                                    prototype_transitions->Size());
+  }
 
-  Object* raw_descriptor_array =
-      *HeapObject::RawField(map,
-                            Map::kInstanceDescriptorsOrBitField3Offset);
+  Object** raw_descriptor_array_slot =
+      HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
+  Object* raw_descriptor_array = *raw_descriptor_array_slot;
   if (!raw_descriptor_array->IsSmi()) {
     MarkDescriptorArray(
         reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
@@ -1206,24 +1607,26 @@
 
   Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
 
-  StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
+  StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
 }
 
 
 void MarkCompactCollector::MarkDescriptorArray(
     DescriptorArray* descriptors) {
-  if (descriptors->IsMarked()) return;
+  MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
+  if (descriptors_mark.Get()) return;
   // Empty descriptor array is marked as a root before any maps are marked.
-  ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
-  SetMark(descriptors);
+  ASSERT(descriptors != heap()->empty_descriptor_array());
+  SetMark(descriptors, descriptors_mark);
 
   FixedArray* contents = reinterpret_cast<FixedArray*>(
       descriptors->get(DescriptorArray::kContentArrayIndex));
   ASSERT(contents->IsHeapObject());
-  ASSERT(!contents->IsMarked());
+  ASSERT(!IsMarked(contents));
   ASSERT(contents->IsFixedArray());
   ASSERT(contents->length() >= 2);
-  SetMark(contents);
+  MarkBit contents_mark = Marking::MarkBitFrom(contents);
+  SetMark(contents, contents_mark);
   // Contents contains (value, details) pairs.  If the details say that the type
   // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
   // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
@@ -1233,27 +1636,45 @@
     // If the pair (value, details) at index i, i+1 is not
     // a transition or null descriptor, mark the value.
     PropertyDetails details(Smi::cast(contents->get(i + 1)));
-    if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
-      HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
-      if (object->IsHeapObject() && !object->IsMarked()) {
-        SetMark(object);
-        marking_stack_.Push(object);
+
+    Object** slot = contents->data_start() + i;
+    Object* value = *slot;
+    if (!value->IsHeapObject()) continue;
+
+    RecordSlot(slot, slot, *slot);
+
+    PropertyType type = details.type();
+    if (type < FIRST_PHANTOM_PROPERTY_TYPE) {
+      HeapObject* object = HeapObject::cast(value);
+      MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
+      if (!mark.Get()) {
+        SetMark(HeapObject::cast(object), mark);
+        marking_deque_.PushBlack(object);
+      }
+    } else if (type == ELEMENTS_TRANSITION && value->IsFixedArray()) {
+      // For maps with multiple elements transitions, the transition maps are
+      // stored in a FixedArray. Keep the fixed array alive but not the maps
+      // that it refers to.
+      HeapObject* object = HeapObject::cast(value);
+      MarkBit mark = Marking::MarkBitFrom(HeapObject::cast(object));
+      if (!mark.Get()) {
+        SetMark(HeapObject::cast(object), mark);
       }
     }
   }
   // The DescriptorArray descriptors contains a pointer to its contents array,
   // but the contents array is already marked.
-  marking_stack_.Push(descriptors);
+  marking_deque_.PushBlack(descriptors);
 }
 
 
 void MarkCompactCollector::CreateBackPointers() {
   HeapObjectIterator iterator(heap()->map_space());
-  for (HeapObject* next_object = iterator.next();
-       next_object != NULL; next_object = iterator.next()) {
-    if (next_object->IsMap()) {  // Could also be ByteArray on free list.
+  for (HeapObject* next_object = iterator.Next();
+       next_object != NULL; next_object = iterator.Next()) {
+    if (next_object->IsMap()) {  // Could also be FreeSpace object on free list.
       Map* map = Map::cast(next_object);
-      STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+      STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
       if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
         map->CreateBackPointers();
       } else {
@@ -1264,54 +1685,123 @@
 }
 
 
-static int OverflowObjectSize(HeapObject* obj) {
-  // Recover the normal map pointer, it might be marked as live and
-  // overflowed.
-  MapWord map_word = obj->map_word();
-  map_word.ClearMark();
-  map_word.ClearOverflow();
-  return obj->SizeFromMap(map_word.ToMap());
+// Fill the marking stack with overflowed objects returned by the given
+// iterator.  Stop when the marking stack is filled or the end of the space
+// is reached, whichever comes first.
+template<class T>
+static void DiscoverGreyObjectsWithIterator(Heap* heap,
+                                            MarkingDeque* marking_deque,
+                                            T* it) {
+  // The caller should ensure that the marking stack is initially not full,
+  // so that we don't waste effort pointlessly scanning for objects.
+  ASSERT(!marking_deque->IsFull());
+
+  Map* filler_map = heap->one_pointer_filler_map();
+  for (HeapObject* object = it->Next();
+       object != NULL;
+       object = it->Next()) {
+    MarkBit markbit = Marking::MarkBitFrom(object);
+    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
+      Marking::GreyToBlack(markbit);
+      MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
+      marking_deque->PushBlack(object);
+      if (marking_deque->IsFull()) return;
+    }
+  }
 }
 
 
-class OverflowedObjectsScanner : public AllStatic {
- public:
-  // Fill the marking stack with overflowed objects returned by the given
-  // iterator.  Stop when the marking stack is filled or the end of the space
-  // is reached, whichever comes first.
-  template<class T>
-  static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
-                                           T* it) {
-    // The caller should ensure that the marking stack is initially not full,
-    // so that we don't waste effort pointlessly scanning for objects.
-    ASSERT(!collector->marking_stack_.is_full());
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
 
-    for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
-      if (object->IsOverflowed()) {
-        object->ClearOverflow();
-        ASSERT(object->IsMarked());
-        ASSERT(HEAP->Contains(object));
-        collector->marking_stack_.Push(object);
-        if (collector->marking_stack_.is_full()) return;
-      }
+
+static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  MarkBit::CellType* cells = p->markbits()->cells();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+
+  int cell_index = Page::kFirstUsedCell;
+  Address cell_base = p->ObjectAreaStart();
+
+  for (cell_index = Page::kFirstUsedCell;
+       cell_index < last_cell_index;
+       cell_index++, cell_base += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(cell_base))));
+
+    const MarkBit::CellType current_cell = cells[cell_index];
+    if (current_cell == 0) continue;
+
+    const MarkBit::CellType next_cell = cells[cell_index + 1];
+    MarkBit::CellType grey_objects = current_cell &
+        ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
+
+    int offset = 0;
+    while (grey_objects != 0) {
+      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
+      grey_objects >>= trailing_zeros;
+      offset += trailing_zeros;
+      MarkBit markbit(&cells[cell_index], 1 << offset, false);
+      ASSERT(Marking::IsGrey(markbit));
+      Marking::GreyToBlack(markbit);
+      Address addr = cell_base + offset * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(addr);
+      MemoryChunk::IncrementLiveBytes(object->address(), object->Size());
+      marking_deque->PushBlack(object);
+      if (marking_deque->IsFull()) return;
+      offset += 2;
+      grey_objects >>= 2;
+    }
+
+    grey_objects >>= (Bitmap::kBitsPerCell - 1);
+  }
+}
+
+
+static void DiscoverGreyObjectsInSpace(Heap* heap,
+                                       MarkingDeque* marking_deque,
+                                       PagedSpace* space) {
+  if (!space->was_swept_conservatively()) {
+    HeapObjectIterator it(space);
+    DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
+  } else {
+    PageIterator it(space);
+    while (it.has_next()) {
+      Page* p = it.next();
+      DiscoverGreyObjectsOnPage(marking_deque, p);
+      if (marking_deque->IsFull()) return;
     }
   }
-};
+}
 
 
 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
-  return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
+  Object* o = *p;
+  if (!o->IsHeapObject()) return false;
+  HeapObject* heap_object = HeapObject::cast(o);
+  MarkBit mark = Marking::MarkBitFrom(heap_object);
+  return !mark.Get();
 }
 
 
 void MarkCompactCollector::MarkSymbolTable() {
-  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+  SymbolTable* symbol_table = heap()->symbol_table();
   // Mark the symbol table itself.
-  SetMark(symbol_table);
+  MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
+  SetMark(symbol_table, symbol_table_mark);
   // Explicitly mark the prefix.
   MarkingVisitor marker(heap());
   symbol_table->IteratePrefix(&marker);
-  ProcessMarkingStack();
+  ProcessMarkingDeque();
 }
 
 
@@ -1324,9 +1814,9 @@
   MarkSymbolTable();
 
   // There may be overflowed objects in the heap.  Visit them now.
-  while (marking_stack_.overflowed()) {
-    RefillMarkingStack();
-    EmptyMarkingStack();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
   }
 }
 
@@ -1344,9 +1834,13 @@
     bool group_marked = false;
     for (size_t j = 0; j < entry->length_; j++) {
       Object* object = *objects[j];
-      if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
-        group_marked = true;
-        break;
+      if (object->IsHeapObject()) {
+        HeapObject* heap_object = HeapObject::cast(object);
+        MarkBit mark = Marking::MarkBitFrom(heap_object);
+        if (mark.Get()) {
+          group_marked = true;
+          break;
+        }
       }
     }
 
@@ -1355,17 +1849,21 @@
       continue;
     }
 
-    // An object in the group is marked, so mark all heap objects in
-    // the group.
+    // An object in the group is marked, so mark as grey all white heap
+    // objects in the group.
     for (size_t j = 0; j < entry->length_; ++j) {
-      if ((*objects[j])->IsHeapObject()) {
-        MarkObject(HeapObject::cast(*objects[j]));
+      Object* object = *objects[j];
+      if (object->IsHeapObject()) {
+        HeapObject* heap_object = HeapObject::cast(object);
+        MarkBit mark = Marking::MarkBitFrom(heap_object);
+        MarkObject(heap_object, mark);
       }
     }
 
-    // Once the entire group has been marked, dispose it because it's
-    // not needed anymore.
+    // Once the entire group has been colored grey, set the object group
+    // to NULL so it won't be processed again.
     entry->Dispose();
+    object_groups->at(i) = NULL;
   }
   object_groups->Rewind(last);
 }
@@ -1380,7 +1878,7 @@
     ImplicitRefGroup* entry = ref_groups->at(i);
     ASSERT(entry != NULL);
 
-    if (!(*entry->parent_)->IsMarked()) {
+    if (!IsMarked(*entry->parent_)) {
       (*ref_groups)[last++] = entry;
       continue;
     }
@@ -1389,7 +1887,9 @@
     // A parent object is marked, so mark all child heap objects.
     for (size_t j = 0; j < entry->length_; ++j) {
       if ((*children[j])->IsHeapObject()) {
-        MarkObject(HeapObject::cast(*children[j]));
+        HeapObject* child = HeapObject::cast(*children[j]);
+        MarkBit mark = Marking::MarkBitFrom(child);
+        MarkObject(child, mark);
       }
     }
 
@@ -1405,21 +1905,17 @@
 // Before: the marking stack contains zero or more heap object pointers.
 // After: the marking stack is empty, and all objects reachable from the
 // marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingStack() {
-  while (!marking_stack_.is_empty()) {
-    while (!marking_stack_.is_empty()) {
-      HeapObject* object = marking_stack_.Pop();
+void MarkCompactCollector::EmptyMarkingDeque() {
+  while (!marking_deque_.IsEmpty()) {
+    while (!marking_deque_.IsEmpty()) {
+      HeapObject* object = marking_deque_.Pop();
       ASSERT(object->IsHeapObject());
       ASSERT(heap()->Contains(object));
-      ASSERT(object->IsMarked());
-      ASSERT(!object->IsOverflowed());
+      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
 
-      // Because the object is marked, we have to recover the original map
-      // pointer and use it to mark the object's body.
-      MapWord map_word = object->map_word();
-      map_word.ClearMark();
-      Map* map = map_word.ToMap();
-      MarkObject(map);
+      Map* map = object->map();
+      MarkBit map_mark = Marking::MarkBitFrom(map);
+      MarkObject(map, map_mark);
 
       StaticMarkingVisitor::IterateBody(map, object);
     }
@@ -1436,39 +1932,45 @@
 // before sweeping completes.  If sweeping completes, there are no remaining
 // overflowed objects in the heap so the overflow flag on the markings stack
 // is cleared.
-void MarkCompactCollector::RefillMarkingStack() {
-  ASSERT(marking_stack_.overflowed());
+void MarkCompactCollector::RefillMarkingDeque() {
+  ASSERT(marking_deque_.overflowed());
 
-  SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
-  if (marking_stack_.is_full()) return;
+  SemiSpaceIterator new_it(heap()->new_space());
+  DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
-                                    &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->old_pointer_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->old_data_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->code_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->map_space());
+  if (marking_deque_.IsFull()) return;
 
-  HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
-  if (marking_stack_.is_full()) return;
+  DiscoverGreyObjectsInSpace(heap(),
+                             &marking_deque_,
+                             heap()->cell_space());
+  if (marking_deque_.IsFull()) return;
 
-  LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
-  OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
-  if (marking_stack_.is_full()) return;
+  LargeObjectIterator lo_it(heap()->lo_space());
+  DiscoverGreyObjectsWithIterator(heap(),
+                                  &marking_deque_,
+                                  &lo_it);
+  if (marking_deque_.IsFull()) return;
 
-  marking_stack_.clear_overflowed();
+  marking_deque_.ClearOverflowed();
 }
 
 
@@ -1476,23 +1978,23 @@
 // stack.  Before: the marking stack contains zero or more heap object
 // pointers.  After: the marking stack is empty and there are no overflowed
 // objects in the heap.
-void MarkCompactCollector::ProcessMarkingStack() {
-  EmptyMarkingStack();
-  while (marking_stack_.overflowed()) {
-    RefillMarkingStack();
-    EmptyMarkingStack();
+void MarkCompactCollector::ProcessMarkingDeque() {
+  EmptyMarkingDeque();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
   }
 }
 
 
 void MarkCompactCollector::ProcessExternalMarking() {
   bool work_to_do = true;
-  ASSERT(marking_stack_.is_empty());
+  ASSERT(marking_deque_.IsEmpty());
   while (work_to_do) {
     MarkObjectGroups();
     MarkImplicitRefGroups();
-    work_to_do = !marking_stack_.is_empty();
-    ProcessMarkingStack();
+    work_to_do = !marking_deque_.IsEmpty();
+    ProcessMarkingDeque();
   }
 }
 
@@ -1504,16 +2006,43 @@
   // with the C stack limit check.
   PostponeInterruptsScope postpone(heap()->isolate());
 
+  bool incremental_marking_overflowed = false;
+  IncrementalMarking* incremental_marking = heap_->incremental_marking();
+  if (was_marked_incrementally_) {
+    // Finalize the incremental marking and check whether we had an overflow.
+    // Both markers use grey color to mark overflowed objects so
+    // non-incremental marker can deal with them as if overflow
+    // occured during normal marking.
+    // But incremental marker uses a separate marking deque
+    // so we have to explicitly copy it's overflow state.
+    incremental_marking->Finalize();
+    incremental_marking_overflowed =
+        incremental_marking->marking_deque()->overflowed();
+    incremental_marking->marking_deque()->ClearOverflowed();
+  } else {
+    // Abort any pending incremental activities e.g. incremental sweeping.
+    incremental_marking->Abort();
+  }
+
 #ifdef DEBUG
   ASSERT(state_ == PREPARE_GC);
   state_ = MARK_LIVE_OBJECTS;
 #endif
-  // The to space contains live objects, the from space is used as a marking
-  // stack.
-  marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
-                            heap()->new_space()->FromSpaceHigh());
+  // The to space contains live objects, a page in from space is used as a
+  // marking stack.
+  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
+  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
+  if (FLAG_force_marking_deque_overflows) {
+    marking_deque_end = marking_deque_start + 64 * kPointerSize;
+  }
+  marking_deque_.Initialize(marking_deque_start,
+                            marking_deque_end);
+  ASSERT(!marking_deque_.overflowed());
 
-  ASSERT(!marking_stack_.overflowed());
+  if (incremental_marking_overflowed) {
+    // There are overflowed objects left in the heap after incremental marking.
+    marking_deque_.SetOverflowed();
+  }
 
   PrepareForCodeFlushing();
 
@@ -1535,15 +2064,20 @@
       &IsUnmarkedHeapObject);
   // Then we mark the objects and process the transitive closure.
   heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
-  while (marking_stack_.overflowed()) {
-    RefillMarkingStack();
-    EmptyMarkingStack();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
   }
 
   // Repeat host application specific marking to mark unmarked objects
   // reachable from the weak roots.
   ProcessExternalMarking();
 
+  AfterMarking();
+}
+
+
+void MarkCompactCollector::AfterMarking() {
   // Object literal map caches reference symbols (cache keys) and maps
   // (cache values). At this point still useful maps have already been
   // marked. Mark the keys for the alive values before we process the
@@ -1553,7 +2087,7 @@
   // Prune the symbol table removing all symbols only pointed to by the
   // symbol table.  Cannot use symbol_table() here because the symbol
   // table is marked.
-  SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
+  SymbolTable* symbol_table = heap()->symbol_table();
   SymbolTableCleaner v(heap());
   symbol_table->IterateElements(&v);
   symbol_table->ElementsRemoved(v.PointersRemoved());
@@ -1582,13 +2116,13 @@
   Object* raw_context = heap()->global_contexts_list_;
   while (raw_context != heap()->undefined_value()) {
     Context* context = reinterpret_cast<Context*>(raw_context);
-    if (context->IsMarked()) {
+    if (IsMarked(context)) {
       HeapObject* raw_map_cache =
           HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
       // A map cache may be reachable from the stack. In this case
       // it's already transitively marked and it's too late to clean
       // up its parts.
-      if (!raw_map_cache->IsMarked() &&
+      if (!IsMarked(raw_map_cache) &&
           raw_map_cache != heap()->undefined_value()) {
         MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
         int existing_elements = map_cache->NumberOfElements();
@@ -1601,8 +2135,7 @@
               raw_key == heap()->null_value()) continue;
           STATIC_ASSERT(MapCache::kEntrySize == 2);
           Object* raw_map = map_cache->get(i + 1);
-          if (raw_map->IsHeapObject() &&
-              HeapObject::cast(raw_map)->IsMarked()) {
+          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
             ++used_elements;
           } else {
             // Delete useless entries with unmarked maps.
@@ -1618,14 +2151,15 @@
           // extra complexity during GC. We rely on subsequent cache
           // usages (EnsureCapacity) to do this.
           map_cache->ElementsRemoved(existing_elements - used_elements);
-          MarkObject(map_cache);
+          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
+          MarkObject(map_cache, map_cache_markbit);
         }
       }
     }
     // Move to next element in the list.
     raw_context = context->get(Context::NEXT_CONTEXT_LINK);
   }
-  ProcessMarkingStack();
+  ProcessMarkingDeque();
 }
 
 
@@ -1655,27 +2189,26 @@
 #endif  // DEBUG
 
 
-void MarkCompactCollector::SweepLargeObjectSpace() {
-#ifdef DEBUG
-  ASSERT(state_ == MARK_LIVE_OBJECTS);
-  state_ =
-      compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
-#endif
-  // Deallocate unmarked objects and clear marked bits for marked objects.
-  heap()->lo_space()->FreeUnmarkedObjects();
-}
+void MarkCompactCollector::ReattachInitialMaps() {
+  HeapObjectIterator map_iterator(heap()->map_space());
+  for (HeapObject* obj = map_iterator.Next();
+       obj != NULL;
+       obj = map_iterator.Next()) {
+    if (obj->IsFreeSpace()) continue;
+    Map* map = Map::cast(obj);
 
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
 
-// Safe to use during marking phase only.
-bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
-  MapWord metamap = object->map_word();
-  metamap.ClearMark();
-  return metamap.ToMap()->instance_type() == MAP_TYPE;
+    if (map->attached_to_shared_function_info()) {
+      JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
+    }
+  }
 }
 
 
 void MarkCompactCollector::ClearNonLiveTransitions() {
-  HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
+  HeapObjectIterator map_iterator(heap()->map_space());
   // Iterate over the map space, setting map transitions that go from
   // a marked map to an unmarked map to null transitions.  At the same time,
   // set all the prototype fields of maps back to their original value,
@@ -1686,17 +2219,19 @@
   // scan the descriptor arrays of those maps, not all maps.
   // All of these actions are carried out only on maps of JSObjects
   // and related subtypes.
-  for (HeapObject* obj = map_iterator.next();
-       obj != NULL; obj = map_iterator.next()) {
+  for (HeapObject* obj = map_iterator.Next();
+       obj != NULL; obj = map_iterator.Next()) {
     Map* map = reinterpret_cast<Map*>(obj);
-    if (!map->IsMarked() && map->IsByteArray()) continue;
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    if (map->IsFreeSpace()) continue;
 
-    ASSERT(SafeIsMap(map));
+    ASSERT(map->IsMap());
     // Only JSObject and subtypes have map transitions and back pointers.
-    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+    if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
 
-    if (map->IsMarked() && map->attached_to_shared_function_info()) {
+    if (map_mark.Get() &&
+        map->attached_to_shared_function_info()) {
       // This map is used for inobject slack tracking and has been detached
       // from SharedFunctionInfo during the mark phase.
       // Since it survived the GC, reattach it now.
@@ -1705,52 +2240,55 @@
 
     // Clear dead prototype transitions.
     int number_of_transitions = map->NumberOfProtoTransitions();
-    if (number_of_transitions > 0) {
-      FixedArray* prototype_transitions =
-          map->unchecked_prototype_transitions();
-      int new_number_of_transitions = 0;
-      const int header = Map::kProtoTransitionHeaderSize;
-      const int proto_offset =
-          header + Map::kProtoTransitionPrototypeOffset;
-      const int map_offset = header + Map::kProtoTransitionMapOffset;
-      const int step = Map::kProtoTransitionElementsPerEntry;
-      for (int i = 0; i < number_of_transitions; i++) {
-        Object* prototype = prototype_transitions->get(proto_offset + i * step);
-        Object* cached_map = prototype_transitions->get(map_offset + i * step);
-        if (HeapObject::cast(prototype)->IsMarked() &&
-            HeapObject::cast(cached_map)->IsMarked()) {
-          if (new_number_of_transitions != i) {
-            prototype_transitions->set_unchecked(
-                heap_,
-                proto_offset + new_number_of_transitions * step,
-                prototype,
-                UPDATE_WRITE_BARRIER);
-            prototype_transitions->set_unchecked(
-                heap_,
-                map_offset + new_number_of_transitions * step,
-                cached_map,
-                SKIP_WRITE_BARRIER);
-          }
-          new_number_of_transitions++;
+    FixedArray* prototype_transitions = map->prototype_transitions();
+
+    int new_number_of_transitions = 0;
+    const int header = Map::kProtoTransitionHeaderSize;
+    const int proto_offset =
+        header + Map::kProtoTransitionPrototypeOffset;
+    const int map_offset = header + Map::kProtoTransitionMapOffset;
+    const int step = Map::kProtoTransitionElementsPerEntry;
+    for (int i = 0; i < number_of_transitions; i++) {
+      Object* prototype = prototype_transitions->get(proto_offset + i * step);
+      Object* cached_map = prototype_transitions->get(map_offset + i * step);
+      if (IsMarked(prototype) && IsMarked(cached_map)) {
+        if (new_number_of_transitions != i) {
+          prototype_transitions->set_unchecked(
+              heap_,
+              proto_offset + new_number_of_transitions * step,
+              prototype,
+              UPDATE_WRITE_BARRIER);
+          prototype_transitions->set_unchecked(
+              heap_,
+              map_offset + new_number_of_transitions * step,
+              cached_map,
+              SKIP_WRITE_BARRIER);
         }
       }
 
       // Fill slots that became free with undefined value.
-      Object* undefined = heap()->raw_unchecked_undefined_value();
+      Object* undefined = heap()->undefined_value();
       for (int i = new_number_of_transitions * step;
            i < number_of_transitions * step;
            i++) {
+        // The undefined object is on a page that is never compacted and never
+        // in new space so it is OK to skip the write barrier.  Also it's a
+        // root.
         prototype_transitions->set_unchecked(heap_,
                                              header + i,
                                              undefined,
                                              SKIP_WRITE_BARRIER);
+
+        Object** undefined_slot =
+            prototype_transitions->data_start() + i;
+        RecordSlot(undefined_slot, undefined_slot, undefined);
       }
       map->SetNumberOfProtoTransitions(new_number_of_transitions);
     }
 
     // Follow the chain of back pointers to find the prototype.
     Map* current = map;
-    while (SafeIsMap(current)) {
+    while (current->IsMap()) {
       current = reinterpret_cast<Map*>(current->prototype());
       ASSERT(current->IsHeapObject());
     }
@@ -1759,21 +2297,28 @@
     // Follow back pointers, setting them to prototype,
     // clearing map transitions when necessary.
     current = map;
-    bool on_dead_path = !current->IsMarked();
+    bool on_dead_path = !map_mark.Get();
     Object* next;
-    while (SafeIsMap(current)) {
+    while (current->IsMap()) {
       next = current->prototype();
       // There should never be a dead map above a live map.
-      ASSERT(on_dead_path || current->IsMarked());
+      MarkBit current_mark = Marking::MarkBitFrom(current);
+      bool is_alive = current_mark.Get();
+      ASSERT(on_dead_path || is_alive);
 
       // A live map above a dead map indicates a dead transition.
       // This test will always be false on the first iteration.
-      if (on_dead_path && current->IsMarked()) {
+      if (on_dead_path && is_alive) {
         on_dead_path = false;
         current->ClearNonLiveTransitions(heap(), real_prototype);
       }
       *HeapObject::RawField(current, Map::kPrototypeOffset) =
           real_prototype;
+
+      if (is_alive) {
+        Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
+        RecordSlot(slot, slot, real_prototype);
+      }
       current = reinterpret_cast<Map*>(next);
     }
   }
@@ -1783,13 +2328,13 @@
 void MarkCompactCollector::ProcessWeakMaps() {
   Object* weak_map_obj = encountered_weak_maps();
   while (weak_map_obj != Smi::FromInt(0)) {
-    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
+    ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
     ObjectHashTable* table = weak_map->unchecked_table();
     for (int i = 0; i < table->Capacity(); i++) {
-      if (HeapObject::cast(table->KeyAt(i))->IsMarked()) {
+      if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
         Object* value = table->get(table->EntryToValueIndex(i));
-        StaticMarkingVisitor::MarkObjectByPointer(heap(), &value);
+        StaticMarkingVisitor::VisitPointer(heap(), &value);
         table->set_unchecked(heap(),
                              table->EntryToValueIndex(i),
                              value,
@@ -1804,11 +2349,11 @@
 void MarkCompactCollector::ClearWeakMaps() {
   Object* weak_map_obj = encountered_weak_maps();
   while (weak_map_obj != Smi::FromInt(0)) {
-    ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
+    ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
     JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
     ObjectHashTable* table = weak_map->unchecked_table();
     for (int i = 0; i < table->Capacity(); i++) {
-      if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) {
+      if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
         table->RemoveEntry(i, heap());
       }
     }
@@ -1818,316 +2363,94 @@
   set_encountered_weak_maps(Smi::FromInt(0));
 }
 
-// -------------------------------------------------------------------------
-// Phase 2: Encode forwarding addresses.
-// When compacting, forwarding addresses for objects in old space and map
-// space are encoded in their map pointer word (along with an encoding of
-// their map pointers).
-//
-// The excact encoding is described in the comments for class MapWord in
-// objects.h.
-//
-// An address range [start, end) can have both live and non-live objects.
-// Maximal non-live regions are marked so they can be skipped on subsequent
-// sweeps of the heap.  A distinguished map-pointer encoding is used to mark
-// free regions of one-word size (in which case the next word is the start
-// of a live object).  A second distinguished map-pointer encoding is used
-// to mark free regions larger than one word, and the size of the free
-// region (including the first word) is written to the second word of the
-// region.
-//
-// Any valid map page offset must lie in the object area of the page, so map
-// page offsets less than Page::kObjectStartOffset are invalid.  We use a
-// pair of distinguished invalid map encodings (for single word and multiple
-// words) to indicate free regions in the page found during computation of
-// forwarding addresses and skipped over in subsequent sweeps.
-
-
-// Encode a free region, defined by the given start address and size, in the
-// first word or two of the region.
-void EncodeFreeRegion(Address free_start, int free_size) {
-  ASSERT(free_size >= kIntSize);
-  if (free_size == kIntSize) {
-    Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
-  } else {
-    ASSERT(free_size >= 2 * kIntSize);
-    Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
-    Memory::int_at(free_start + kIntSize) = free_size;
-  }
-
-#ifdef DEBUG
-  // Zap the body of the free region.
-  if (FLAG_enable_slow_asserts) {
-    for (int offset = 2 * kIntSize;
-         offset < free_size;
-         offset += kPointerSize) {
-      Memory::Address_at(free_start + offset) = kZapValue;
-    }
-  }
-#endif
-}
-
-
-// Try to promote all objects in new space.  Heap numbers and sequential
-// strings are promoted to the code space, large objects to large object space,
-// and all others to the old space.
-inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
-                                           HeapObject* object,
-                                           int object_size) {
-  MaybeObject* forwarded;
-  if (object_size > heap->MaxObjectSizeInPagedSpace()) {
-    forwarded = Failure::Exception();
-  } else {
-    OldSpace* target_space = heap->TargetSpace(object);
-    ASSERT(target_space == heap->old_pointer_space() ||
-           target_space == heap->old_data_space());
-    forwarded = target_space->MCAllocateRaw(object_size);
-  }
-  Object* result;
-  if (!forwarded->ToObject(&result)) {
-    result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
-  }
-  return result;
-}
-
-
-// Allocation functions for the paged spaces call the space's MCAllocateRaw.
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
-    Heap *heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->old_pointer_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
-    Heap* heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->old_data_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
-    Heap* heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->code_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
-    Heap* heap,
-    HeapObject* ignore,
-    int object_size) {
-  return heap->map_space()->MCAllocateRaw(object_size);
-}
-
-
-MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
-    Heap* heap, HeapObject* ignore, int object_size) {
-  return heap->cell_space()->MCAllocateRaw(object_size);
-}
-
-
-// The forwarding address is encoded at the same offset as the current
-// to-space object, but in from space.
-inline void EncodeForwardingAddressInNewSpace(Heap* heap,
-                                              HeapObject* old_object,
-                                              int object_size,
-                                              Object* new_object,
-                                              int* ignored) {
-  int offset =
-      heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
-  Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
-      HeapObject::cast(new_object)->address();
-}
-
-
-// The forwarding address is encoded in the map pointer of the object as an
-// offset (in terms of live bytes) from the address of the first live object
-// in the page.
-inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
-                                                HeapObject* old_object,
-                                                int object_size,
-                                                Object* new_object,
-                                                int* offset) {
-  // Record the forwarding address of the first live object if necessary.
-  if (*offset == 0) {
-    Page::FromAddress(old_object->address())->mc_first_forwarded =
-        HeapObject::cast(new_object)->address();
-  }
-
-  MapWord encoding =
-      MapWord::EncodeAddress(old_object->map()->address(), *offset);
-  old_object->set_map_word(encoding);
-  *offset += object_size;
-  ASSERT(*offset <= Page::kObjectAreaSize);
-}
-
-
-// Most non-live objects are ignored.
-inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
-
-
-// Function template that, given a range of addresses (eg, a semispace or a
-// paged space page), iterates through the objects in the range to clear
-// mark bits and compute and encode forwarding addresses.  As a side effect,
-// maximal free chunks are marked so that they can be skipped on subsequent
-// sweeps.
-//
-// The template parameters are an allocation function, a forwarding address
-// encoding function, and a function to process non-live objects.
-template<MarkCompactCollector::AllocationFunction Alloc,
-         MarkCompactCollector::EncodingFunction Encode,
-         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
-                                             Address start,
-                                             Address end,
-                                             int* offset) {
-  // The start address of the current free region while sweeping the space.
-  // This address is set when a transition from live to non-live objects is
-  // encountered.  A value (an encoding of the 'next free region' pointer)
-  // is written to memory at this address when a transition from non-live to
-  // live objects is encountered.
-  Address free_start = NULL;
-
-  // A flag giving the state of the previously swept object.  Initially true
-  // to ensure that free_start is initialized to a proper address before
-  // trying to write to it.
-  bool is_prev_alive = true;
-
-  int object_size;  // Will be set on each iteration of the loop.
-  for (Address current = start; current < end; current += object_size) {
-    HeapObject* object = HeapObject::FromAddress(current);
-    if (object->IsMarked()) {
-      object->ClearMark();
-      collector->tracer()->decrement_marked_count();
-      object_size = object->Size();
-
-      Object* forwarded =
-          Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
-      Encode(collector->heap(), object, object_size, forwarded, offset);
-
-#ifdef DEBUG
-      if (FLAG_gc_verbose) {
-        PrintF("forward %p -> %p.\n", object->address(),
-               HeapObject::cast(forwarded)->address());
-      }
-#endif
-      if (!is_prev_alive) {  // Transition from non-live to live.
-        EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
-        is_prev_alive = true;
-      }
-    } else {  // Non-live object.
-      object_size = object->Size();
-      ProcessNonLive(object, collector->heap()->isolate());
-      if (is_prev_alive) {  // Transition from live to non-live.
-        free_start = current;
-        is_prev_alive = false;
-      }
-      LiveObjectList::ProcessNonLive(object);
-    }
-  }
-
-  // If we ended on a free region, mark it.
-  if (!is_prev_alive) {
-    EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
-  }
-}
-
-
-// Functions to encode the forwarding pointers in each compactable space.
-void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
-  int ignored;
-  EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
-                                   EncodeForwardingAddressInNewSpace,
-                                   IgnoreNonLiveObject>(
-      this,
-      heap()->new_space()->bottom(),
-      heap()->new_space()->top(),
-      &ignored);
-}
-
-
-template<MarkCompactCollector::AllocationFunction Alloc,
-         MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
-void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
-    PagedSpace* space) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  while (it.has_next()) {
-    Page* p = it.next();
-
-    // The offset of each live object in the page from the first live object
-    // in the page.
-    int offset = 0;
-    EncodeForwardingAddressesInRange<Alloc,
-                                     EncodeForwardingAddressInPagedSpace,
-                                     ProcessNonLive>(
-        this,
-        p->ObjectAreaStart(),
-        p->AllocationTop(),
-        &offset);
-  }
-}
-
 
 // We scavange new space simultaneously with sweeping. This is done in two
 // passes.
+//
 // The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space. Forwading address is written directly into
-// first word of object without any encoding. If object is dead we are writing
+// promotes them to old space.  Forwarding address is written directly into
+// first word of object without any encoding.  If object is dead we write
 // NULL as a forwarding address.
-// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead objects during traversal of dirty regions we
-// should clear them to avoid encountering them during next dirty regions
-// iteration.
-static void MigrateObject(Heap* heap,
-                          Address dst,
-                          Address src,
-                          int size,
-                          bool to_old_space) {
-  if (to_old_space) {
-    heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
-  } else {
-    heap->CopyBlock(dst, src, size);
-  }
+//
+// The second pass updates pointers to new space in all spaces.  It is possible
+// to encounter pointers to dead new space objects during traversal of pointers
+// to new space.  We should clear them to avoid encountering them during next
+// pointer iteration.  This is an issue if the store buffer overflows and we
+// have to scan the entire old space, including dead objects, looking for
+// pointers to new space.
+void MarkCompactCollector::MigrateObject(Address dst,
+                                         Address src,
+                                         int size,
+                                         AllocationSpace dest) {
+  HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
+  if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
+    Address src_slot = src;
+    Address dst_slot = dst;
+    ASSERT(IsAligned(size, kPointerSize));
 
+    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
+      Object* value = Memory::Object_at(src_slot);
+
+      Memory::Object_at(dst_slot) = value;
+
+      if (heap_->InNewSpace(value)) {
+        heap_->store_buffer()->Mark(dst_slot);
+      } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
+        SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                           &migration_slots_buffer_,
+                           reinterpret_cast<Object**>(dst_slot),
+                           SlotsBuffer::IGNORE_OVERFLOW);
+      }
+
+      src_slot += kPointerSize;
+      dst_slot += kPointerSize;
+    }
+
+    if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
+      Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
+      Address code_entry = Memory::Address_at(code_entry_slot);
+
+      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+        SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                           &migration_slots_buffer_,
+                           SlotsBuffer::CODE_ENTRY_SLOT,
+                           code_entry_slot,
+                           SlotsBuffer::IGNORE_OVERFLOW);
+      }
+    }
+  } else if (dest == CODE_SPACE) {
+    PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
+    heap()->MoveBlock(dst, src, size);
+    SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                       &migration_slots_buffer_,
+                       SlotsBuffer::RELOCATED_CODE_OBJECT,
+                       dst,
+                       SlotsBuffer::IGNORE_OVERFLOW);
+    Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
+  } else {
+    ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
+    heap()->MoveBlock(dst, src, size);
+  }
   Memory::Address_at(src) = dst;
 }
 
 
-class StaticPointersToNewGenUpdatingVisitor : public
-  StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
- public:
-  static inline void VisitPointer(Heap* heap, Object** p) {
-    if (!(*p)->IsHeapObject()) return;
-
-    HeapObject* obj = HeapObject::cast(*p);
-    Address old_addr = obj->address();
-
-    if (heap->new_space()->Contains(obj)) {
-      ASSERT(heap->InFromSpace(*p));
-      *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
-    }
-  }
-};
-
-
 // Visitor for updating pointers from live objects in old spaces to new space.
 // It does not expect to encounter pointers to dead objects.
-class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
+class PointersUpdatingVisitor: public ObjectVisitor {
  public:
-  explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
+  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
 
   void VisitPointer(Object** p) {
-    StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
+    UpdatePointer(p);
   }
 
   void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) {
-      StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
-    }
+    for (Object** p = start; p < end; p++) UpdatePointer(p);
+  }
+
+  void VisitEmbeddedPointer(Code* host, Object** p) {
+    UpdatePointer(p);
   }
 
   void VisitCodeTarget(RelocInfo* rinfo) {
@@ -2147,68 +2470,96 @@
     rinfo->set_call_address(Code::cast(target)->instruction_start());
   }
 
+  static inline void UpdateSlot(Heap* heap, Object** slot) {
+    Object* obj = *slot;
+
+    if (!obj->IsHeapObject()) return;
+
+    HeapObject* heap_obj = HeapObject::cast(obj);
+
+    MapWord map_word = heap_obj->map_word();
+    if (map_word.IsForwardingAddress()) {
+      ASSERT(heap->InFromSpace(heap_obj) ||
+             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
+      HeapObject* target = map_word.ToForwardingAddress();
+      *slot = target;
+      ASSERT(!heap->InFromSpace(target) &&
+             !MarkCompactCollector::IsOnEvacuationCandidate(target));
+    }
+  }
+
  private:
+  inline void UpdatePointer(Object** p) {
+    UpdateSlot(heap_, p);
+  }
+
   Heap* heap_;
 };
 
 
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It can encounter pointers to dead objects in new space when traversing map
-// space (see comment for MigrateObject).
-static void UpdatePointerToNewGen(HeapObject** p) {
-  if (!(*p)->IsHeapObject()) return;
+static void UpdatePointer(HeapObject** p, HeapObject* object) {
+  ASSERT(*p == object);
 
-  Address old_addr = (*p)->address();
-  ASSERT(HEAP->InFromSpace(*p));
+  Address old_addr = object->address();
 
   Address new_addr = Memory::Address_at(old_addr);
 
-  if (new_addr == NULL) {
-    // We encountered pointer to a dead object. Clear it so we will
-    // not visit it again during next iteration of dirty regions.
-    *p = NULL;
-  } else {
+  // The new space sweep will overwrite the map word of dead objects
+  // with NULL. In this case we do not need to transfer this entry to
+  // the store buffer which we are rebuilding.
+  if (new_addr != NULL) {
     *p = HeapObject::FromAddress(new_addr);
+  } else {
+    // We have to zap this pointer, because the store buffer may overflow later,
+    // and then we have to scan the entire heap and we don't want to find
+    // spurious newspace pointers in the old space.
+    *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
   }
 }
 
 
-static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
-                                                                 Object** p) {
-  Address old_addr = HeapObject::cast(*p)->address();
-  Address new_addr = Memory::Address_at(old_addr);
-  return String::cast(HeapObject::FromAddress(new_addr));
+static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
+                                                         Object** p) {
+  MapWord map_word = HeapObject::cast(*p)->map_word();
+
+  if (map_word.IsForwardingAddress()) {
+    return String::cast(map_word.ToForwardingAddress());
+  }
+
+  return String::cast(*p);
 }
 
 
-static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
+bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
+                                            int object_size) {
   Object* result;
 
-  if (object_size > heap->MaxObjectSizeInPagedSpace()) {
+  if (object_size > heap()->MaxObjectSizeInPagedSpace()) {
     MaybeObject* maybe_result =
-        heap->lo_space()->AllocateRawFixedArray(object_size);
+        heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
     if (maybe_result->ToObject(&result)) {
       HeapObject* target = HeapObject::cast(result);
-      MigrateObject(heap, target->address(), object->address(), object_size,
-                    true);
-      heap->mark_compact_collector()->tracer()->
+      MigrateObject(target->address(),
+                    object->address(),
+                    object_size,
+                    LO_SPACE);
+      heap()->mark_compact_collector()->tracer()->
           increment_promoted_objects_size(object_size);
       return true;
     }
   } else {
-    OldSpace* target_space = heap->TargetSpace(object);
+    OldSpace* target_space = heap()->TargetSpace(object);
 
-    ASSERT(target_space == heap->old_pointer_space() ||
-           target_space == heap->old_data_space());
+    ASSERT(target_space == heap()->old_pointer_space() ||
+           target_space == heap()->old_data_space());
     MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
     if (maybe_result->ToObject(&result)) {
       HeapObject* target = HeapObject::cast(result);
-      MigrateObject(heap,
-                    target->address(),
+      MigrateObject(target->address(),
                     object->address(),
                     object_size,
-                    target_space == heap->old_pointer_space());
-      heap->mark_compact_collector()->tracer()->
+                    target_space->identity());
+      heap()->mark_compact_collector()->tracer()->
           increment_promoted_objects_size(object_size);
       return true;
     }
@@ -2218,84 +2569,524 @@
 }
 
 
-static void SweepNewSpace(Heap* heap, NewSpace* space) {
-  heap->CheckNewSpaceExpansionCriteria();
+void MarkCompactCollector::EvacuateNewSpace() {
+  heap()->CheckNewSpaceExpansionCriteria();
 
-  Address from_bottom = space->bottom();
-  Address from_top = space->top();
+  NewSpace* new_space = heap()->new_space();
+
+  // Store allocation range before flipping semispaces.
+  Address from_bottom = new_space->bottom();
+  Address from_top = new_space->top();
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
-  space->Flip();
-  space->ResetAllocationInfo();
+  new_space->Flip();
+  new_space->ResetAllocationInfo();
 
-  int size = 0;
   int survivors_size = 0;
 
   // First pass: traverse all objects in inactive semispace, remove marks,
-  // migrate live objects and write forwarding addresses.
-  for (Address current = from_bottom; current < from_top; current += size) {
-    HeapObject* object = HeapObject::FromAddress(current);
-
-    if (object->IsMarked()) {
-      object->ClearMark();
-      heap->mark_compact_collector()->tracer()->decrement_marked_count();
-
-      size = object->Size();
+  // migrate live objects and write forwarding addresses.  This stage puts
+  // new entries in the store buffer and may cause some pages to be marked
+  // scan-on-scavenge.
+  SemiSpaceIterator from_it(from_bottom, from_top);
+  for (HeapObject* object = from_it.Next();
+       object != NULL;
+       object = from_it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) {
+      mark_bit.Clear();
+      // Don't bother decrementing live bytes count. We'll discard the
+      // entire page at the end.
+      int size = object->Size();
       survivors_size += size;
 
       // Aggressively promote young survivors to the old space.
-      if (TryPromoteObject(heap, object, size)) {
+      if (TryPromoteObject(object, size)) {
         continue;
       }
 
       // Promotion failed. Just migrate object to another semispace.
-      // Allocation cannot fail at this point: semispaces are of equal size.
-      Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
+      MaybeObject* allocation = new_space->AllocateRaw(size);
+      if (allocation->IsFailure()) {
+        if (!new_space->AddFreshPage()) {
+          // Shouldn't happen. We are sweeping linearly, and to-space
+          // has the same number of pages as from-space, so there is
+          // always room.
+          UNREACHABLE();
+        }
+        allocation = new_space->AllocateRaw(size);
+        ASSERT(!allocation->IsFailure());
+      }
+      Object* target = allocation->ToObjectUnchecked();
 
-      MigrateObject(heap,
-                    HeapObject::cast(target)->address(),
-                    current,
+      MigrateObject(HeapObject::cast(target)->address(),
+                    object->address(),
                     size,
-                    false);
+                    NEW_SPACE);
     } else {
       // Process the dead object before we write a NULL into its header.
       LiveObjectList::ProcessNonLive(object);
 
-      size = object->Size();
-      Memory::Address_at(current) = NULL;
+      // Mark dead objects in the new space with null in their map field.
+      Memory::Address_at(object->address()) = NULL;
     }
   }
 
+  heap_->IncrementYoungSurvivorsCounter(survivors_size);
+  new_space->set_age_mark(new_space->top());
+}
+
+
+void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
+  AlwaysAllocateScope always_allocate;
+  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
+  MarkBit::CellType* cells = p->markbits()->cells();
+  p->MarkSweptPrecisely();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+
+  int cell_index = Page::kFirstUsedCell;
+  Address cell_base = p->ObjectAreaStart();
+  int offsets[16];
+
+  for (cell_index = Page::kFirstUsedCell;
+       cell_index < last_cell_index;
+       cell_index++, cell_base += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(cell_base))));
+    if (cells[cell_index] == 0) continue;
+
+    int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+    for (int i = 0; i < live_objects; i++) {
+      Address object_addr = cell_base + offsets[i] * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(object_addr);
+      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
+
+      int size = object->Size();
+
+      MaybeObject* target = space->AllocateRaw(size);
+      if (target->IsFailure()) {
+        // OS refused to give us memory.
+        V8::FatalProcessOutOfMemory("Evacuation");
+        return;
+      }
+
+      Object* target_object = target->ToObjectUnchecked();
+
+      MigrateObject(HeapObject::cast(target_object)->address(),
+                    object_addr,
+                    size,
+                    space->identity());
+      ASSERT(object->map_word().IsForwardingAddress());
+    }
+
+    // Clear marking bits for current cell.
+    cells[cell_index] = 0;
+  }
+  p->ResetLiveBytes();
+}
+
+
+void MarkCompactCollector::EvacuatePages() {
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    ASSERT(p->IsEvacuationCandidate() ||
+           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+    if (p->IsEvacuationCandidate()) {
+      // During compaction we might have to request a new page.
+      // Check that space still have room for that.
+      if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
+        EvacuateLiveObjectsFromPage(p);
+      } else {
+        // Without room for expansion evacuation is not guaranteed to succeed.
+        // Pessimistically abandon unevacuated pages.
+        for (int j = i; j < npages; j++) {
+          Page* page = evacuation_candidates_[j];
+          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
+          page->ClearEvacuationCandidate();
+          page->SetFlag(Page::RESCAN_ON_EVACUATION);
+        }
+        return;
+      }
+    }
+  }
+}
+
+
+class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  virtual Object* RetainAs(Object* object) {
+    if (object->IsHeapObject()) {
+      HeapObject* heap_object = HeapObject::cast(object);
+      MapWord map_word = heap_object->map_word();
+      if (map_word.IsForwardingAddress()) {
+        return map_word.ToForwardingAddress();
+      }
+    }
+    return object;
+  }
+};
+
+
+static inline void UpdateSlot(ObjectVisitor* v,
+                              SlotsBuffer::SlotType slot_type,
+                              Address addr) {
+  switch (slot_type) {
+    case SlotsBuffer::CODE_TARGET_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
+      rinfo.Visit(v);
+      break;
+    }
+    case SlotsBuffer::CODE_ENTRY_SLOT: {
+      v->VisitCodeEntry(addr);
+      break;
+    }
+    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+      HeapObject* obj = HeapObject::FromAddress(addr);
+      Code::cast(obj)->CodeIterateBody(v);
+      break;
+    }
+    case SlotsBuffer::DEBUG_TARGET_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
+      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
+      break;
+    }
+    case SlotsBuffer::JS_RETURN_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
+      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+enum SweepingMode {
+  SWEEP_ONLY,
+  SWEEP_AND_VISIT_LIVE_OBJECTS
+};
+
+
+enum SkipListRebuildingMode {
+  REBUILD_SKIP_LIST,
+  IGNORE_SKIP_LIST
+};
+
+
+// Sweep a space precisely.  After this has been done the space can
+// be iterated precisely, hitting only the live objects.  Code space
+// is always swept precisely because we want to be able to iterate
+// over it.  Map space is swept precisely, because it is not compacted.
+// Slots in live objects pointing into evacuation candidates are updated
+// if requested.
+template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
+static void SweepPrecisely(PagedSpace* space,
+                           Page* p,
+                           ObjectVisitor* v) {
+  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
+            space->identity() == CODE_SPACE);
+  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
+
+  MarkBit::CellType* cells = p->markbits()->cells();
+  p->MarkSweptPrecisely();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+
+  int cell_index = Page::kFirstUsedCell;
+  Address free_start = p->ObjectAreaStart();
+  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+  Address object_address = p->ObjectAreaStart();
+  int offsets[16];
+
+  SkipList* skip_list = p->skip_list();
+  int curr_region = -1;
+  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
+    skip_list->Clear();
+  }
+
+  for (cell_index = Page::kFirstUsedCell;
+       cell_index < last_cell_index;
+       cell_index++, object_address += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(object_address))));
+    int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
+    int live_index = 0;
+    for ( ; live_objects != 0; live_objects--) {
+      Address free_end = object_address + offsets[live_index++] * kPointerSize;
+      if (free_end != free_start) {
+        space->Free(free_start, static_cast<int>(free_end - free_start));
+      }
+      HeapObject* live_object = HeapObject::FromAddress(free_end);
+      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
+      Map* map = live_object->map();
+      int size = live_object->SizeFromMap(map);
+      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
+        live_object->IterateBody(map->instance_type(), size, v);
+      }
+      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+        int new_region_start =
+            SkipList::RegionNumber(free_end);
+        int new_region_end =
+            SkipList::RegionNumber(free_end + size - kPointerSize);
+        if (new_region_start != curr_region ||
+            new_region_end != curr_region) {
+          skip_list->AddObject(free_end, size);
+          curr_region = new_region_end;
+        }
+      }
+      free_start = free_end + size;
+    }
+    // Clear marking bits for current cell.
+    cells[cell_index] = 0;
+  }
+  if (free_start != p->ObjectAreaEnd()) {
+    space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
+  }
+  p->ResetLiveBytes();
+}
+
+
+static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
+  Page* p = Page::FromAddress(code->address());
+
+  if (p->IsEvacuationCandidate() ||
+      p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+    return false;
+  }
+
+  Address code_start = code->address();
+  Address code_end = code_start + code->Size();
+
+  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
+  uint32_t end_index =
+      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
+
+  Bitmap* b = p->markbits();
+
+  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
+  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
+
+  MarkBit::CellType* start_cell = start_mark_bit.cell();
+  MarkBit::CellType* end_cell = end_mark_bit.cell();
+
+  if (value) {
+    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
+    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
+
+    if (start_cell == end_cell) {
+      *start_cell |= start_mask & end_mask;
+    } else {
+      *start_cell |= start_mask;
+      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
+        *cell = ~0;
+      }
+      *end_cell |= end_mask;
+    }
+  } else {
+    for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
+      *cell = 0;
+    }
+  }
+
+  return true;
+}
+
+
+static bool IsOnInvalidatedCodeObject(Address addr) {
+  // We did not record any slots in large objects thus
+  // we can safely go to the page from the slot address.
+  Page* p = Page::FromAddress(addr);
+
+  // First check owner's identity because old pointer and old data spaces
+  // are swept lazily and might still have non-zero mark-bits on some
+  // pages.
+  if (p->owner()->identity() != CODE_SPACE) return false;
+
+  // In code space only bits on evacuation candidates (but we don't record
+  // any slots on them) and under invalidated code objects are non-zero.
+  MarkBit mark_bit =
+      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
+
+  return mark_bit.Get();
+}
+
+
+void MarkCompactCollector::InvalidateCode(Code* code) {
+  if (heap_->incremental_marking()->IsCompacting() &&
+      !ShouldSkipEvacuationSlotRecording(code)) {
+    ASSERT(compacting_);
+
+    // If the object is white than no slots were recorded on it yet.
+    MarkBit mark_bit = Marking::MarkBitFrom(code);
+    if (Marking::IsWhite(mark_bit)) return;
+
+    invalidated_code_.Add(code);
+  }
+}
+
+
+bool MarkCompactCollector::MarkInvalidatedCode() {
+  bool code_marked = false;
+
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+
+    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
+      code_marked = true;
+    }
+  }
+
+  return code_marked;
+}
+
+
+void MarkCompactCollector::RemoveDeadInvalidatedCode() {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
+  }
+}
+
+
+void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+    if (code != NULL) {
+      code->Iterate(visitor);
+      SetMarkBitsUnderInvalidatedCode(code, false);
+    }
+  }
+  invalidated_code_.Rewind(0);
+}
+
+
+void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+  bool code_slots_filtering_required = MarkInvalidatedCode();
+
+  EvacuateNewSpace();
+  EvacuatePages();
+
   // Second pass: find pointers to new space and update them.
-  PointersToNewGenUpdatingVisitor updating_visitor(heap);
+  PointersUpdatingVisitor updating_visitor(heap());
 
   // Update pointers in to space.
-  Address current = space->bottom();
-  while (current < space->top()) {
-    HeapObject* object = HeapObject::FromAddress(current);
-    current +=
-        StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
-                                                           object);
+  SemiSpaceIterator to_it(heap()->new_space()->bottom(),
+                          heap()->new_space()->top());
+  for (HeapObject* object = to_it.Next();
+       object != NULL;
+       object = to_it.Next()) {
+    Map* map = object->map();
+    object->IterateBody(map->instance_type(),
+                        object->SizeFromMap(map),
+                        &updating_visitor);
   }
 
   // Update roots.
-  heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+  heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
   LiveObjectList::IterateElements(&updating_visitor);
 
-  // Update pointers in old spaces.
-  heap->IterateDirtyRegions(heap->old_pointer_space(),
-                            &Heap::IteratePointersInDirtyRegion,
-                            &UpdatePointerToNewGen,
-                            heap->WATERMARK_SHOULD_BE_VALID);
+  {
+    StoreBufferRebuildScope scope(heap_,
+                                  heap_->store_buffer(),
+                                  &Heap::ScavengeStoreBufferCallback);
+    heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
+  }
 
-  heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
+  SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+                                     migration_slots_buffer_,
+                                     code_slots_filtering_required);
+  if (FLAG_trace_fragmentation) {
+    PrintF("  migration slots buffer: %d\n",
+           SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+  }
+
+  if (compacting_ && was_marked_incrementally_) {
+    // It's difficult to filter out slots recorded for large objects.
+    LargeObjectIterator it(heap_->lo_space());
+    for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+      // LargeObjectSpace is not swept yet thus we have to skip
+      // dead objects explicitly.
+      if (!IsMarked(obj)) continue;
+
+      Page* p = Page::FromAddress(obj->address());
+      if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+        obj->Iterate(&updating_visitor);
+        p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+      }
+    }
+  }
+
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    ASSERT(p->IsEvacuationCandidate() ||
+           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+
+    if (p->IsEvacuationCandidate()) {
+      SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+                                         p->slots_buffer(),
+                                         code_slots_filtering_required);
+      if (FLAG_trace_fragmentation) {
+        PrintF("  page %p slots buffer: %d\n",
+               reinterpret_cast<void*>(p),
+               SlotsBuffer::SizeOfChain(p->slots_buffer()));
+      }
+
+      // Important: skip list should be cleared only after roots were updated
+      // because root iteration traverses the stack and might have to find code
+      // objects from non-updated pc pointing into evacuation candidate.
+      SkipList* list = p->skip_list();
+      if (list != NULL) list->Clear();
+    } else {
+      if (FLAG_gc_verbose) {
+        PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
+               reinterpret_cast<intptr_t>(p));
+      }
+      PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+
+      switch (space->identity()) {
+        case OLD_DATA_SPACE:
+          SweepConservatively(space, p);
+          break;
+        case OLD_POINTER_SPACE:
+          SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
+              space, p, &updating_visitor);
+          break;
+        case CODE_SPACE:
+          SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
+              space, p, &updating_visitor);
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+    }
+  }
 
   // Update pointers from cells.
-  HeapObjectIterator cell_iterator(heap->cell_space());
-  for (HeapObject* cell = cell_iterator.next();
+  HeapObjectIterator cell_iterator(heap_->cell_space());
+  for (HeapObject* cell = cell_iterator.Next();
        cell != NULL;
-       cell = cell_iterator.next()) {
+       cell = cell_iterator.Next()) {
     if (cell->IsJSGlobalPropertyCell()) {
       Address value_address =
           reinterpret_cast<Address>(cell) +
@@ -2305,1020 +3096,542 @@
   }
 
   // Update pointer from the global contexts list.
-  updating_visitor.VisitPointer(heap->global_contexts_list_address());
+  updating_visitor.VisitPointer(heap_->global_contexts_list_address());
+
+  heap_->symbol_table()->Iterate(&updating_visitor);
 
   // Update pointers from external string table.
-  heap->UpdateNewSpaceReferencesInExternalStringTable(
-      &UpdateNewSpaceReferenceInExternalStringTableEntry);
-
-  // All pointers were updated. Update auxiliary allocation info.
-  heap->IncrementYoungSurvivorsCounter(survivors_size);
-  space->set_age_mark(space->top());
+  heap_->UpdateReferencesInExternalStringTable(
+      &UpdateReferenceInExternalStringTableEntry);
 
   // Update JSFunction pointers from the runtime profiler.
-  heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+  heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
+      &updating_visitor);
+
+  EvacuationWeakObjectRetainer evacuation_object_retainer;
+  heap()->ProcessWeakReferences(&evacuation_object_retainer);
+
+  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
+  // under it.
+  ProcessInvalidatedCode(&updating_visitor);
+
+#ifdef DEBUG
+  if (FLAG_verify_heap) {
+    VerifyEvacuation(heap_);
+  }
+#endif
+
+  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
+  ASSERT(migration_slots_buffer_ == NULL);
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    if (!p->IsEvacuationCandidate()) continue;
+    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+    space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
+    p->set_scan_on_scavenge(false);
+    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+    p->ClearEvacuationCandidate();
+  }
+  evacuation_candidates_.Rewind(0);
+  compacting_ = false;
 }
 
 
-static void SweepSpace(Heap* heap, PagedSpace* space) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
+static const int kStartTableEntriesPerLine = 5;
+static const int kStartTableLines = 171;
+static const int kStartTableInvalidLine = 127;
+static const int kStartTableUnusedEntry = 126;
 
-  // During sweeping of paged space we are trying to find longest sequences
-  // of pages without live objects and free them (instead of putting them on
-  // the free list).
+#define _ kStartTableUnusedEntry
+#define X kStartTableInvalidLine
+// Mark-bit to object start offset table.
+//
+// The line is indexed by the mark bits in a byte.  The first number on
+// the line describes the number of live object starts for the line and the
+// other numbers on the line describe the offsets (in words) of the object
+// starts.
+//
+// Since objects are at least 2 words large we don't have entries for two
+// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
+char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
+  0, _, _, _, _,  // 0
+  1, 0, _, _, _,  // 1
+  1, 1, _, _, _,  // 2
+  X, _, _, _, _,  // 3
+  1, 2, _, _, _,  // 4
+  2, 0, 2, _, _,  // 5
+  X, _, _, _, _,  // 6
+  X, _, _, _, _,  // 7
+  1, 3, _, _, _,  // 8
+  2, 0, 3, _, _,  // 9
+  2, 1, 3, _, _,  // 10
+  X, _, _, _, _,  // 11
+  X, _, _, _, _,  // 12
+  X, _, _, _, _,  // 13
+  X, _, _, _, _,  // 14
+  X, _, _, _, _,  // 15
+  1, 4, _, _, _,  // 16
+  2, 0, 4, _, _,  // 17
+  2, 1, 4, _, _,  // 18
+  X, _, _, _, _,  // 19
+  2, 2, 4, _, _,  // 20
+  3, 0, 2, 4, _,  // 21
+  X, _, _, _, _,  // 22
+  X, _, _, _, _,  // 23
+  X, _, _, _, _,  // 24
+  X, _, _, _, _,  // 25
+  X, _, _, _, _,  // 26
+  X, _, _, _, _,  // 27
+  X, _, _, _, _,  // 28
+  X, _, _, _, _,  // 29
+  X, _, _, _, _,  // 30
+  X, _, _, _, _,  // 31
+  1, 5, _, _, _,  // 32
+  2, 0, 5, _, _,  // 33
+  2, 1, 5, _, _,  // 34
+  X, _, _, _, _,  // 35
+  2, 2, 5, _, _,  // 36
+  3, 0, 2, 5, _,  // 37
+  X, _, _, _, _,  // 38
+  X, _, _, _, _,  // 39
+  2, 3, 5, _, _,  // 40
+  3, 0, 3, 5, _,  // 41
+  3, 1, 3, 5, _,  // 42
+  X, _, _, _, _,  // 43
+  X, _, _, _, _,  // 44
+  X, _, _, _, _,  // 45
+  X, _, _, _, _,  // 46
+  X, _, _, _, _,  // 47
+  X, _, _, _, _,  // 48
+  X, _, _, _, _,  // 49
+  X, _, _, _, _,  // 50
+  X, _, _, _, _,  // 51
+  X, _, _, _, _,  // 52
+  X, _, _, _, _,  // 53
+  X, _, _, _, _,  // 54
+  X, _, _, _, _,  // 55
+  X, _, _, _, _,  // 56
+  X, _, _, _, _,  // 57
+  X, _, _, _, _,  // 58
+  X, _, _, _, _,  // 59
+  X, _, _, _, _,  // 60
+  X, _, _, _, _,  // 61
+  X, _, _, _, _,  // 62
+  X, _, _, _, _,  // 63
+  1, 6, _, _, _,  // 64
+  2, 0, 6, _, _,  // 65
+  2, 1, 6, _, _,  // 66
+  X, _, _, _, _,  // 67
+  2, 2, 6, _, _,  // 68
+  3, 0, 2, 6, _,  // 69
+  X, _, _, _, _,  // 70
+  X, _, _, _, _,  // 71
+  2, 3, 6, _, _,  // 72
+  3, 0, 3, 6, _,  // 73
+  3, 1, 3, 6, _,  // 74
+  X, _, _, _, _,  // 75
+  X, _, _, _, _,  // 76
+  X, _, _, _, _,  // 77
+  X, _, _, _, _,  // 78
+  X, _, _, _, _,  // 79
+  2, 4, 6, _, _,  // 80
+  3, 0, 4, 6, _,  // 81
+  3, 1, 4, 6, _,  // 82
+  X, _, _, _, _,  // 83
+  3, 2, 4, 6, _,  // 84
+  4, 0, 2, 4, 6,  // 85
+  X, _, _, _, _,  // 86
+  X, _, _, _, _,  // 87
+  X, _, _, _, _,  // 88
+  X, _, _, _, _,  // 89
+  X, _, _, _, _,  // 90
+  X, _, _, _, _,  // 91
+  X, _, _, _, _,  // 92
+  X, _, _, _, _,  // 93
+  X, _, _, _, _,  // 94
+  X, _, _, _, _,  // 95
+  X, _, _, _, _,  // 96
+  X, _, _, _, _,  // 97
+  X, _, _, _, _,  // 98
+  X, _, _, _, _,  // 99
+  X, _, _, _, _,  // 100
+  X, _, _, _, _,  // 101
+  X, _, _, _, _,  // 102
+  X, _, _, _, _,  // 103
+  X, _, _, _, _,  // 104
+  X, _, _, _, _,  // 105
+  X, _, _, _, _,  // 106
+  X, _, _, _, _,  // 107
+  X, _, _, _, _,  // 108
+  X, _, _, _, _,  // 109
+  X, _, _, _, _,  // 110
+  X, _, _, _, _,  // 111
+  X, _, _, _, _,  // 112
+  X, _, _, _, _,  // 113
+  X, _, _, _, _,  // 114
+  X, _, _, _, _,  // 115
+  X, _, _, _, _,  // 116
+  X, _, _, _, _,  // 117
+  X, _, _, _, _,  // 118
+  X, _, _, _, _,  // 119
+  X, _, _, _, _,  // 120
+  X, _, _, _, _,  // 121
+  X, _, _, _, _,  // 122
+  X, _, _, _, _,  // 123
+  X, _, _, _, _,  // 124
+  X, _, _, _, _,  // 125
+  X, _, _, _, _,  // 126
+  X, _, _, _, _,  // 127
+  1, 7, _, _, _,  // 128
+  2, 0, 7, _, _,  // 129
+  2, 1, 7, _, _,  // 130
+  X, _, _, _, _,  // 131
+  2, 2, 7, _, _,  // 132
+  3, 0, 2, 7, _,  // 133
+  X, _, _, _, _,  // 134
+  X, _, _, _, _,  // 135
+  2, 3, 7, _, _,  // 136
+  3, 0, 3, 7, _,  // 137
+  3, 1, 3, 7, _,  // 138
+  X, _, _, _, _,  // 139
+  X, _, _, _, _,  // 140
+  X, _, _, _, _,  // 141
+  X, _, _, _, _,  // 142
+  X, _, _, _, _,  // 143
+  2, 4, 7, _, _,  // 144
+  3, 0, 4, 7, _,  // 145
+  3, 1, 4, 7, _,  // 146
+  X, _, _, _, _,  // 147
+  3, 2, 4, 7, _,  // 148
+  4, 0, 2, 4, 7,  // 149
+  X, _, _, _, _,  // 150
+  X, _, _, _, _,  // 151
+  X, _, _, _, _,  // 152
+  X, _, _, _, _,  // 153
+  X, _, _, _, _,  // 154
+  X, _, _, _, _,  // 155
+  X, _, _, _, _,  // 156
+  X, _, _, _, _,  // 157
+  X, _, _, _, _,  // 158
+  X, _, _, _, _,  // 159
+  2, 5, 7, _, _,  // 160
+  3, 0, 5, 7, _,  // 161
+  3, 1, 5, 7, _,  // 162
+  X, _, _, _, _,  // 163
+  3, 2, 5, 7, _,  // 164
+  4, 0, 2, 5, 7,  // 165
+  X, _, _, _, _,  // 166
+  X, _, _, _, _,  // 167
+  3, 3, 5, 7, _,  // 168
+  4, 0, 3, 5, 7,  // 169
+  4, 1, 3, 5, 7   // 170
+};
+#undef _
+#undef X
 
-  // Page preceding current.
-  Page* prev = Page::FromAddress(NULL);
 
-  // First empty page in a sequence.
-  Page* first_empty_page = Page::FromAddress(NULL);
+// Takes a word of mark bits.  Returns the number of objects that start in the
+// range.  Puts the offsets of the words in the supplied array.
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
+  int objects = 0;
+  int offset = 0;
 
-  // Page preceding first empty page.
-  Page* prec_first_empty_page = Page::FromAddress(NULL);
+  // No consecutive 1 bits.
+  ASSERT((mark_bits & 0x180) != 0x180);
+  ASSERT((mark_bits & 0x18000) != 0x18000);
+  ASSERT((mark_bits & 0x1800000) != 0x1800000);
 
-  // If last used page of space ends with a sequence of dead objects
-  // we can adjust allocation top instead of puting this free area into
-  // the free list. Thus during sweeping we keep track of such areas
-  // and defer their deallocation until the sweeping of the next page
-  // is done: if one of the next pages contains live objects we have
-  // to put such area into the free list.
-  Address last_free_start = NULL;
-  int last_free_size = 0;
+  while (mark_bits != 0) {
+    int byte = (mark_bits & 0xff);
+    mark_bits >>= 8;
+    if (byte != 0) {
+      ASSERT(byte < kStartTableLines);  // No consecutive 1 bits.
+      char* table = kStartTable + byte * kStartTableEntriesPerLine;
+      int objects_in_these_8_words = table[0];
+      ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
+      ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
+      for (int i = 0; i < objects_in_these_8_words; i++) {
+        starts[objects++] = offset + table[1 + i];
+      }
+    }
+    offset += 8;
+  }
+  return objects;
+}
+
+
+static inline Address DigestFreeStart(Address approximate_free_start,
+                                      uint32_t free_start_cell) {
+  ASSERT(free_start_cell != 0);
+
+  // No consecutive 1 bits.
+  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
+
+  int offsets[16];
+  uint32_t cell = free_start_cell;
+  int offset_of_last_live;
+  if ((cell & 0x80000000u) != 0) {
+    // This case would overflow below.
+    offset_of_last_live = 31;
+  } else {
+    // Remove all but one bit, the most significant.  This is an optimization
+    // that may or may not be worthwhile.
+    cell |= cell >> 16;
+    cell |= cell >> 8;
+    cell |= cell >> 4;
+    cell |= cell >> 2;
+    cell |= cell >> 1;
+    cell = (cell + 1) >> 1;
+    int live_objects = MarkWordToObjectStarts(cell, offsets);
+    ASSERT(live_objects == 1);
+    offset_of_last_live = offsets[live_objects - 1];
+  }
+  Address last_live_start =
+      approximate_free_start + offset_of_last_live * kPointerSize;
+  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
+  Address free_start = last_live_start + last_live->Size();
+  return free_start;
+}
+
+
+static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
+  ASSERT(cell != 0);
+
+  // No consecutive 1 bits.
+  ASSERT((cell & (cell << 1)) == 0);
+
+  int offsets[16];
+  if (cell == 0x80000000u) {  // Avoid overflow below.
+    return block_address + 31 * kPointerSize;
+  }
+  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
+  ASSERT((first_set_bit & cell) == first_set_bit);
+  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
+  ASSERT(live_objects == 1);
+  USE(live_objects);
+  return block_address + offsets[0] * kPointerSize;
+}
+
+
+// Sweeps a space conservatively.  After this has been done the larger free
+// spaces have been put on the free list and the smaller ones have been
+// ignored and left untouched.  A free space is always either ignored or put
+// on the free list, never split up into two parts.  This is important
+// because it means that any FreeSpace maps left actually describe a region of
+// memory that can be ignored when scanning.  Dead objects other than free
+// spaces will not contain the free space map.
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+  MarkBit::CellType* cells = p->markbits()->cells();
+  p->MarkSweptConservatively();
+
+  int last_cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+
+  int cell_index = Page::kFirstUsedCell;
+  intptr_t freed_bytes = 0;
+
+  // This is the start of the 32 word block that we are currently looking at.
+  Address block_address = p->ObjectAreaStart();
+
+  // Skip over all the dead objects at the start of the page and mark them free.
+  for (cell_index = Page::kFirstUsedCell;
+       cell_index < last_cell_index;
+       cell_index++, block_address += 32 * kPointerSize) {
+    if (cells[cell_index] != 0) break;
+  }
+  size_t size = block_address - p->ObjectAreaStart();
+  if (cell_index == last_cell_index) {
+    freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
+                                                static_cast<int>(size)));
+    ASSERT_EQ(0, p->LiveBytes());
+    return freed_bytes;
+  }
+  // Grow the size of the start-of-page free space a little to get up to the
+  // first live object.
+  Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
+  // Free the first free space.
+  size = free_end - p->ObjectAreaStart();
+  freed_bytes += space->Free(p->ObjectAreaStart(),
+                             static_cast<int>(size));
+  // The start of the current free area is represented in undigested form by
+  // the address of the last 32-word section that contained a live object and
+  // the marking bitmap for that cell, which describes where the live object
+  // started.  Unless we find a large free space in the bitmap we will not
+  // digest this pair into a real address.  We start the iteration here at the
+  // first word in the marking bit map that indicates a live object.
+  Address free_start = block_address;
+  uint32_t free_start_cell = cells[cell_index];
+
+  for ( ;
+       cell_index < last_cell_index;
+       cell_index++, block_address += 32 * kPointerSize) {
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(block_address))));
+    uint32_t cell = cells[cell_index];
+    if (cell != 0) {
+      // We have a live object.  Check approximately whether it is more than 32
+      // words since the last live object.
+      if (block_address - free_start > 32 * kPointerSize) {
+        free_start = DigestFreeStart(free_start, free_start_cell);
+        if (block_address - free_start > 32 * kPointerSize) {
+          // Now that we know the exact start of the free space it still looks
+          // like we have a large enough free space to be worth bothering with.
+          // so now we need to find the start of the first live object at the
+          // end of the free space.
+          free_end = StartOfLiveObject(block_address, cell);
+          freed_bytes += space->Free(free_start,
+                                     static_cast<int>(free_end - free_start));
+        }
+      }
+      // Update our undigested record of where the current free area started.
+      free_start = block_address;
+      free_start_cell = cell;
+      // Clear marking bits for current cell.
+      cells[cell_index] = 0;
+    }
+  }
+
+  // Handle the free space at the end of the page.
+  if (block_address - free_start > 32 * kPointerSize) {
+    free_start = DigestFreeStart(free_start, free_start_cell);
+    freed_bytes += space->Free(free_start,
+                               static_cast<int>(block_address - free_start));
+  }
+
+  p->ResetLiveBytes();
+  return freed_bytes;
+}
+
+
+void MarkCompactCollector::SweepSpace(PagedSpace* space,
+                                      SweeperType sweeper) {
+  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
+                                      sweeper == LAZY_CONSERVATIVE);
+
+  space->ClearStats();
+
+  PageIterator it(space);
+
+  intptr_t freed_bytes = 0;
+  intptr_t newspace_size = space->heap()->new_space()->Size();
+  bool lazy_sweeping_active = false;
+  bool unused_page_present = false;
 
   while (it.has_next()) {
     Page* p = it.next();
 
-    bool is_previous_alive = true;
-    Address free_start = NULL;
-    HeapObject* object;
+    // Clear sweeping flags indicating that marking bits are still intact.
+    p->ClearSweptPrecisely();
+    p->ClearSweptConservatively();
 
-    for (Address current = p->ObjectAreaStart();
-         current < p->AllocationTop();
-         current += object->Size()) {
-      object = HeapObject::FromAddress(current);
-      if (object->IsMarked()) {
-        object->ClearMark();
-        heap->mark_compact_collector()->tracer()->decrement_marked_count();
-
-        if (!is_previous_alive) {  // Transition from free to live.
-          space->DeallocateBlock(free_start,
-                                 static_cast<int>(current - free_start),
-                                 true);
-          is_previous_alive = true;
-        }
-      } else {
-        heap->mark_compact_collector()->ReportDeleteIfNeeded(
-            object, heap->isolate());
-        if (is_previous_alive) {  // Transition from live to free.
-          free_start = current;
-          is_previous_alive = false;
-        }
-        LiveObjectList::ProcessNonLive(object);
-      }
-      // The object is now unmarked for the call to Size() at the top of the
-      // loop.
+    if (p->IsEvacuationCandidate()) {
+      ASSERT(evacuation_candidates_.length() > 0);
+      continue;
     }
 
-    bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
-        || (!is_previous_alive && free_start == p->ObjectAreaStart());
-
-    if (page_is_empty) {
-      // This page is empty. Check whether we are in the middle of
-      // sequence of empty pages and start one if not.
-      if (!first_empty_page->is_valid()) {
-        first_empty_page = p;
-        prec_first_empty_page = prev;
-      }
-
-      if (!is_previous_alive) {
-        // There are dead objects on this page. Update space accounting stats
-        // without putting anything into free list.
-        int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
-        if (size_in_bytes > 0) {
-          space->DeallocateBlock(free_start, size_in_bytes, false);
-        }
-      }
-    } else {
-      // This page is not empty. Sequence of empty pages ended on the previous
-      // one.
-      if (first_empty_page->is_valid()) {
-        space->FreePages(prec_first_empty_page, prev);
-        prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
-      }
-
-      // If there is a free ending area on one of the previous pages we have
-      // deallocate that area and put it on the free list.
-      if (last_free_size > 0) {
-        Page::FromAddress(last_free_start)->
-            SetAllocationWatermark(last_free_start);
-        space->DeallocateBlock(last_free_start, last_free_size, true);
-        last_free_start = NULL;
-        last_free_size  = 0;
-      }
-
-      // If the last region of this page was not live we remember it.
-      if (!is_previous_alive) {
-        ASSERT(last_free_size == 0);
-        last_free_size = static_cast<int>(p->AllocationTop() - free_start);
-        last_free_start = free_start;
-      }
+    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+      // Will be processed in EvacuateNewSpaceAndCandidates.
+      continue;
     }
 
-    prev = p;
-  }
-
-  // We reached end of space. See if we need to adjust allocation top.
-  Address new_allocation_top = NULL;
-
-  if (first_empty_page->is_valid()) {
-    // Last used pages in space are empty. We can move allocation top backwards
-    // to the beginning of first empty page.
-    ASSERT(prev == space->AllocationTopPage());
-
-    new_allocation_top = first_empty_page->ObjectAreaStart();
-  }
-
-  if (last_free_size > 0) {
-    // There was a free ending area on the previous page.
-    // Deallocate it without putting it into freelist and move allocation
-    // top to the beginning of this free area.
-    space->DeallocateBlock(last_free_start, last_free_size, false);
-    new_allocation_top = last_free_start;
-  }
-
-  if (new_allocation_top != NULL) {
-#ifdef DEBUG
-    Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
-    if (!first_empty_page->is_valid()) {
-      ASSERT(new_allocation_top_page == space->AllocationTopPage());
-    } else if (last_free_size > 0) {
-      ASSERT(new_allocation_top_page == prec_first_empty_page);
-    } else {
-      ASSERT(new_allocation_top_page == first_empty_page);
-    }
-#endif
-
-    space->SetTop(new_allocation_top);
-  }
-}
-
-
-void MarkCompactCollector::EncodeForwardingAddresses() {
-  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
-  // Objects in the active semispace of the young generation may be
-  // relocated to the inactive semispace (if not promoted).  Set the
-  // relocation info to the beginning of the inactive semispace.
-  heap()->new_space()->MCResetRelocationInfo();
-
-  // Compute the forwarding pointers in each space.
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
-                                        ReportDeleteIfNeeded>(
-      heap()->old_pointer_space());
-
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
-                                        IgnoreNonLiveObject>(
-      heap()->old_data_space());
-
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
-                                        ReportDeleteIfNeeded>(
-      heap()->code_space());
-
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
-                                        IgnoreNonLiveObject>(
-      heap()->cell_space());
-
-
-  // Compute new space next to last after the old and code spaces have been
-  // compacted.  Objects in new space can be promoted to old or code space.
-  EncodeForwardingAddressesInNewSpace();
-
-  // Compute map space last because computing forwarding addresses
-  // overwrites non-live objects.  Objects in the other spaces rely on
-  // non-live map pointers to get the sizes of non-live objects.
-  EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
-                                        IgnoreNonLiveObject>(
-      heap()->map_space());
-
-  // Write relocation info to the top page, so we can use it later.  This is
-  // done after promoting objects from the new space so we get the correct
-  // allocation top.
-  heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
-  heap()->old_data_space()->MCWriteRelocationInfoToPage();
-  heap()->code_space()->MCWriteRelocationInfoToPage();
-  heap()->map_space()->MCWriteRelocationInfoToPage();
-  heap()->cell_space()->MCWriteRelocationInfoToPage();
-}
-
-
-class MapIterator : public HeapObjectIterator {
- public:
-  explicit MapIterator(Heap* heap)
-    : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
-
-  MapIterator(Heap* heap, Address start)
-      : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
-
- private:
-  static int SizeCallback(HeapObject* unused) {
-    USE(unused);
-    return Map::kSize;
-  }
-};
-
-
-class MapCompact {
- public:
-  explicit MapCompact(Heap* heap, int live_maps)
-    : heap_(heap),
-      live_maps_(live_maps),
-      to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
-      vacant_map_it_(heap),
-      map_to_evacuate_it_(heap, to_evacuate_start_),
-      first_map_to_evacuate_(
-          reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
-  }
-
-  void CompactMaps() {
-    // As we know the number of maps to evacuate beforehand,
-    // we stop then there is no more vacant maps.
-    for (Map* next_vacant_map = NextVacantMap();
-         next_vacant_map;
-         next_vacant_map = NextVacantMap()) {
-      EvacuateMap(next_vacant_map, NextMapToEvacuate());
-    }
-
-#ifdef DEBUG
-    CheckNoMapsToEvacuate();
-#endif
-  }
-
-  void UpdateMapPointersInRoots() {
-    MapUpdatingVisitor map_updating_visitor;
-    heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
-    heap()->isolate()->global_handles()->IterateWeakRoots(
-        &map_updating_visitor);
-    LiveObjectList::IterateElements(&map_updating_visitor);
-  }
-
-  void UpdateMapPointersInPagedSpace(PagedSpace* space) {
-    ASSERT(space != heap()->map_space());
-
-    PageIterator it(space, PageIterator::PAGES_IN_USE);
-    while (it.has_next()) {
-      Page* p = it.next();
-      UpdateMapPointersInRange(heap(),
-                               p->ObjectAreaStart(),
-                               p->AllocationTop());
-    }
-  }
-
-  void UpdateMapPointersInNewSpace() {
-    NewSpace* space = heap()->new_space();
-    UpdateMapPointersInRange(heap(), space->bottom(), space->top());
-  }
-
-  void UpdateMapPointersInLargeObjectSpace() {
-    LargeObjectIterator it(heap()->lo_space());
-    for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
-      UpdateMapPointersInObject(heap(), obj);
-  }
-
-  void Finish() {
-    heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
-  }
-
-  inline Heap* heap() const { return heap_; }
-
- private:
-  Heap* heap_;
-  int live_maps_;
-  Address to_evacuate_start_;
-  MapIterator vacant_map_it_;
-  MapIterator map_to_evacuate_it_;
-  Map* first_map_to_evacuate_;
-
-  // Helper class for updating map pointers in HeapObjects.
-  class MapUpdatingVisitor: public ObjectVisitor {
-  public:
-    MapUpdatingVisitor() {}
-
-    void VisitPointer(Object** p) {
-      UpdateMapPointer(p);
-    }
-
-    void VisitPointers(Object** start, Object** end) {
-      for (Object** p = start; p < end; p++) UpdateMapPointer(p);
-    }
-
-  private:
-    void UpdateMapPointer(Object** p) {
-      if (!(*p)->IsHeapObject()) return;
-      HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
-
-      // Moved maps are tagged with overflowed map word.  They are the only
-      // objects those map word is overflowed as marking is already complete.
-      MapWord map_word = old_map->map_word();
-      if (!map_word.IsOverflowed()) return;
-
-      *p = GetForwardedMap(map_word);
-    }
-  };
-
-  static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
-    while (true) {
-      HeapObject* next = it->next();
-      ASSERT(next != NULL);
-      if (next == last)
-        return NULL;
-      ASSERT(!next->IsOverflowed());
-      ASSERT(!next->IsMarked());
-      ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
-      if (next->IsMap() == live)
-        return reinterpret_cast<Map*>(next);
-    }
-  }
-
-  Map* NextVacantMap() {
-    Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
-    ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
-    return map;
-  }
-
-  Map* NextMapToEvacuate() {
-    Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
-    ASSERT(map != NULL);
-    ASSERT(map->IsMap());
-    return map;
-  }
-
-  static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
-    ASSERT(FreeListNode::IsFreeListNode(vacant_map));
-    ASSERT(map_to_evacuate->IsMap());
-
-    ASSERT(Map::kSize % 4 == 0);
-
-    map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
-        vacant_map->address(), map_to_evacuate->address(), Map::kSize);
-
-    ASSERT(vacant_map->IsMap());  // Due to memcpy above.
-
-    MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
-    forwarding_map_word.SetOverflow();
-    map_to_evacuate->set_map_word(forwarding_map_word);
-
-    ASSERT(map_to_evacuate->map_word().IsOverflowed());
-    ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
-  }
-
-  static Map* GetForwardedMap(MapWord map_word) {
-    ASSERT(map_word.IsOverflowed());
-    map_word.ClearOverflow();
-    Map* new_map = map_word.ToMap();
-    ASSERT_MAP_ALIGNED(new_map->address());
-    return new_map;
-  }
-
-  static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
-    ASSERT(!obj->IsMarked());
-    Map* map = obj->map();
-    ASSERT(heap->map_space()->Contains(map));
-    MapWord map_word = map->map_word();
-    ASSERT(!map_word.IsMarked());
-    if (map_word.IsOverflowed()) {
-      Map* new_map = GetForwardedMap(map_word);
-      ASSERT(heap->map_space()->Contains(new_map));
-      obj->set_map(new_map);
-
-#ifdef DEBUG
+    if (lazy_sweeping_active) {
       if (FLAG_gc_verbose) {
-        PrintF("update %p : %p -> %p\n",
-               obj->address(),
-               reinterpret_cast<void*>(map),
-               reinterpret_cast<void*>(new_map));
+        PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
+               reinterpret_cast<intptr_t>(p));
       }
-#endif
+      continue;
     }
 
-    int size = obj->SizeFromMap(map);
-    MapUpdatingVisitor map_updating_visitor;
-    obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
-    return size;
-  }
+    // One unused page is kept, all further are released before sweeping them.
+    if (p->LiveBytes() == 0) {
+      if (unused_page_present) {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        space->ReleasePage(p);
+        continue;
+      }
+      unused_page_present = true;
+    }
 
-  static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
-    HeapObject* object;
-    int size;
-    for (Address current = start; current < end; current += size) {
-      object = HeapObject::FromAddress(current);
-      size = UpdateMapPointersInObject(heap, object);
-      ASSERT(size > 0);
+    if (FLAG_gc_verbose) {
+      PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n",
+             reinterpret_cast<intptr_t>(p),
+             sweeper);
+    }
+
+    switch (sweeper) {
+      case CONSERVATIVE: {
+        SweepConservatively(space, p);
+        break;
+      }
+      case LAZY_CONSERVATIVE: {
+        freed_bytes += SweepConservatively(space, p);
+        if (freed_bytes >= newspace_size && p != space->LastPage()) {
+          space->SetPagesToSweep(p->next_page(), space->anchor());
+          lazy_sweeping_active = true;
+        }
+        break;
+      }
+      case PRECISE: {
+        if (space->identity() == CODE_SPACE) {
+          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
+        } else {
+          SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
+        }
+        break;
+      }
+      default: {
+        UNREACHABLE();
+      }
     }
   }
 
-#ifdef DEBUG
-  void CheckNoMapsToEvacuate() {
-    if (!FLAG_enable_slow_asserts)
-      return;
-
-    for (HeapObject* obj = map_to_evacuate_it_.next();
-         obj != NULL; obj = map_to_evacuate_it_.next())
-      ASSERT(FreeListNode::IsFreeListNode(obj));
-  }
-#endif
-};
+  // Give pages that are queued to be freed back to the OS.
+  heap()->FreeQueuedChunks();
+}
 
 
 void MarkCompactCollector::SweepSpaces() {
   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
-
-  ASSERT(state_ == SWEEP_SPACES);
-  ASSERT(!IsCompacting());
+#ifdef DEBUG
+  state_ = SWEEP_SPACES;
+#endif
+  SweeperType how_to_sweep =
+      FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+  if (sweep_precisely_) how_to_sweep = PRECISE;
   // Noncompacting collections simply sweep the spaces to clear the mark
   // bits and free the nonlive blocks (for old and map spaces).  We sweep
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
-  SweepSpace(heap(), heap()->old_pointer_space());
-  SweepSpace(heap(), heap()->old_data_space());
-  SweepSpace(heap(), heap()->code_space());
-  SweepSpace(heap(), heap()->cell_space());
+  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
+  SweepSpace(heap()->old_data_space(), how_to_sweep);
+
+  RemoveDeadInvalidatedCode();
+  SweepSpace(heap()->code_space(), PRECISE);
+
+  SweepSpace(heap()->cell_space(), PRECISE);
+
   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
-    SweepNewSpace(heap(), heap()->new_space());
-  }
-  SweepSpace(heap(), heap()->map_space());
-
-  heap()->IterateDirtyRegions(heap()->map_space(),
-                             &heap()->IteratePointersInDirtyMapsRegion,
-                             &UpdatePointerToNewGen,
-                             heap()->WATERMARK_SHOULD_BE_VALID);
-
-  intptr_t live_maps_size = heap()->map_space()->Size();
-  int live_maps = static_cast<int>(live_maps_size / Map::kSize);
-  ASSERT(live_map_objects_size_ == live_maps_size);
-
-  if (heap()->map_space()->NeedsCompaction(live_maps)) {
-    MapCompact map_compact(heap(), live_maps);
-
-    map_compact.CompactMaps();
-    map_compact.UpdateMapPointersInRoots();
-
-    PagedSpaces spaces;
-    for (PagedSpace* space = spaces.next();
-         space != NULL; space = spaces.next()) {
-      if (space == heap()->map_space()) continue;
-      map_compact.UpdateMapPointersInPagedSpace(space);
-    }
-    map_compact.UpdateMapPointersInNewSpace();
-    map_compact.UpdateMapPointersInLargeObjectSpace();
-
-    map_compact.Finish();
-  }
-}
-
-
-// Iterate the live objects in a range of addresses (eg, a page or a
-// semispace).  The live regions of the range have been linked into a list.
-// The first live region is [first_live_start, first_live_end), and the last
-// address in the range is top.  The callback function is used to get the
-// size of each live object.
-int MarkCompactCollector::IterateLiveObjectsInRange(
-    Address start,
-    Address end,
-    LiveObjectCallback size_func) {
-  int live_objects_size = 0;
-  Address current = start;
-  while (current < end) {
-    uint32_t encoded_map = Memory::uint32_at(current);
-    if (encoded_map == kSingleFreeEncoding) {
-      current += kPointerSize;
-    } else if (encoded_map == kMultiFreeEncoding) {
-      current += Memory::int_at(current + kIntSize);
-    } else {
-      int size = (this->*size_func)(HeapObject::FromAddress(current));
-      current += size;
-      live_objects_size += size;
-    }
-  }
-  return live_objects_size;
-}
-
-
-int MarkCompactCollector::IterateLiveObjects(
-    NewSpace* space, LiveObjectCallback size_f) {
-  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
-  return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
-}
-
-
-int MarkCompactCollector::IterateLiveObjects(
-    PagedSpace* space, LiveObjectCallback size_f) {
-  ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
-  int total = 0;
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  while (it.has_next()) {
-    Page* p = it.next();
-    total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
-                                       p->AllocationTop(),
-                                       size_f);
-  }
-  return total;
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 3: Update pointers
-
-// Helper class for updating pointers in HeapObjects.
-class UpdatingVisitor: public ObjectVisitor {
- public:
-  explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
-
-  void VisitPointer(Object** p) {
-    UpdatePointer(p);
+    EvacuateNewSpaceAndCandidates();
   }
 
-  void VisitPointers(Object** start, Object** end) {
-    // Mark all HeapObject pointers in [start, end)
-    for (Object** p = start; p < end; p++) UpdatePointer(p);
-  }
+  // ClearNonLiveTransitions depends on precise sweeping of map space to
+  // detect whether unmarked map became dead in this collection or in one
+  // of the previous ones.
+  SweepSpace(heap()->map_space(), PRECISE);
 
-  void VisitCodeTarget(RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    VisitPointer(&target);
-    rinfo->set_target_address(
-        reinterpret_cast<Code*>(target)->instruction_start());
-  }
+  ASSERT(live_map_objects_size_ <= heap()->map_space()->Size());
 
-  void VisitDebugTarget(RelocInfo* rinfo) {
-    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
-            rinfo->IsPatchedReturnSequence()) ||
-           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
-            rinfo->IsPatchedDebugBreakSlotSequence()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    VisitPointer(&target);
-    rinfo->set_call_address(
-        reinterpret_cast<Code*>(target)->instruction_start());
-  }
-
-  inline Heap* heap() const { return heap_; }
-
- private:
-  void UpdatePointer(Object** p) {
-    if (!(*p)->IsHeapObject()) return;
-
-    HeapObject* obj = HeapObject::cast(*p);
-    Address old_addr = obj->address();
-    Address new_addr;
-    ASSERT(!heap()->InFromSpace(obj));
-
-    if (heap()->new_space()->Contains(obj)) {
-      Address forwarding_pointer_addr =
-          heap()->new_space()->FromSpaceLow() +
-          heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
-      new_addr = Memory::Address_at(forwarding_pointer_addr);
-
-#ifdef DEBUG
-      ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
-             heap()->old_data_space()->Contains(new_addr) ||
-             heap()->new_space()->FromSpaceContains(new_addr) ||
-             heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
-
-      if (heap()->new_space()->FromSpaceContains(new_addr)) {
-        ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
-               heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
-      }
-#endif
-
-    } else if (heap()->lo_space()->Contains(obj)) {
-      // Don't move objects in the large object space.
-      return;
-
-    } else {
-#ifdef DEBUG
-      PagedSpaces spaces;
-      PagedSpace* original_space = spaces.next();
-      while (original_space != NULL) {
-        if (original_space->Contains(obj)) break;
-        original_space = spaces.next();
-      }
-      ASSERT(original_space != NULL);
-#endif
-      new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
-      ASSERT(original_space->Contains(new_addr));
-      ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
-             original_space->MCSpaceOffsetForAddress(old_addr));
-    }
-
-    *p = HeapObject::FromAddress(new_addr);
-
-#ifdef DEBUG
-    if (FLAG_gc_verbose) {
-      PrintF("update %p : %p -> %p\n",
-             reinterpret_cast<Address>(p), old_addr, new_addr);
-    }
-#endif
-  }
-
-  Heap* heap_;
-};
-
-
-void MarkCompactCollector::UpdatePointers() {
-#ifdef DEBUG
-  ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
-  state_ = UPDATE_POINTERS;
-#endif
-  UpdatingVisitor updating_visitor(heap());
-  heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
-      &updating_visitor);
-  heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
-  heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
-
-  // Update the pointer to the head of the weak list of global contexts.
-  updating_visitor.VisitPointer(&heap()->global_contexts_list_);
-
-  LiveObjectList::IterateElements(&updating_visitor);
-
-  int live_maps_size = IterateLiveObjects(
-      heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_pointer_olds_size = IterateLiveObjects(
-      heap()->old_pointer_space(),
-      &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_data_olds_size = IterateLiveObjects(
-      heap()->old_data_space(),
-      &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_codes_size = IterateLiveObjects(
-      heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_cells_size = IterateLiveObjects(
-      heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
-  int live_news_size = IterateLiveObjects(
-      heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
-
-  // Large objects do not move, the map word can be updated directly.
-  LargeObjectIterator it(heap()->lo_space());
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
-    UpdatePointersInNewObject(obj);
-  }
-
-  USE(live_maps_size);
-  USE(live_pointer_olds_size);
-  USE(live_data_olds_size);
-  USE(live_codes_size);
-  USE(live_cells_size);
-  USE(live_news_size);
-  ASSERT(live_maps_size == live_map_objects_size_);
-  ASSERT(live_data_olds_size == live_old_data_objects_size_);
-  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
-  ASSERT(live_codes_size == live_code_objects_size_);
-  ASSERT(live_cells_size == live_cell_objects_size_);
-  ASSERT(live_news_size == live_young_objects_size_);
-}
-
-
-int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
-  // Keep old map pointers
-  Map* old_map = obj->map();
-  ASSERT(old_map->IsHeapObject());
-
-  Address forwarded = GetForwardingAddressInOldSpace(old_map);
-
-  ASSERT(heap()->map_space()->Contains(old_map));
-  ASSERT(heap()->map_space()->Contains(forwarded));
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
-           forwarded);
-  }
-#endif
-  // Update the map pointer.
-  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
-
-  // We have to compute the object size relying on the old map because
-  // map objects are not relocated yet.
-  int obj_size = obj->SizeFromMap(old_map);
-
-  // Update pointers in the object body.
-  UpdatingVisitor updating_visitor(heap());
-  obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
-  return obj_size;
-}
-
-
-int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
-  // Decode the map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
-  // At this point, the first word of map_addr is also encoded, cannot
-  // cast it to Map* using Map::cast.
-  Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
-  int obj_size = obj->SizeFromMap(map);
-  InstanceType type = map->instance_type();
-
-  // Update map pointer.
-  Address new_map_addr = GetForwardingAddressInOldSpace(map);
-  int offset = encoding.DecodeOffset();
-  obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("update %p : %p -> %p\n", obj->address(),
-           map_addr, new_map_addr);
-  }
-#endif
-
-  // Update pointers in the object body.
-  UpdatingVisitor updating_visitor(heap());
-  obj->IterateBody(type, obj_size, &updating_visitor);
-  return obj_size;
-}
-
-
-Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
-  // Object should either in old or map space.
-  MapWord encoding = obj->map_word();
-
-  // Offset to the first live object's forwarding address.
-  int offset = encoding.DecodeOffset();
-  Address obj_addr = obj->address();
-
-  // Find the first live object's forwarding address.
-  Page* p = Page::FromAddress(obj_addr);
-  Address first_forwarded = p->mc_first_forwarded;
-
-  // Page start address of forwarded address.
-  Page* forwarded_page = Page::FromAddress(first_forwarded);
-  int forwarded_offset = forwarded_page->Offset(first_forwarded);
-
-  // Find end of allocation in the page of first_forwarded.
-  int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
-
-  // Check if current object's forward pointer is in the same page
-  // as the first live object's forwarding pointer
-  if (forwarded_offset + offset < mc_top_offset) {
-    // In the same page.
-    return first_forwarded + offset;
-  }
-
-  // Must be in the next page, NOTE: this may cross chunks.
-  Page* next_page = forwarded_page->next_page();
-  ASSERT(next_page->is_valid());
-
-  offset -= (mc_top_offset - forwarded_offset);
-  offset += Page::kObjectStartOffset;
-
-  ASSERT_PAGE_OFFSET(offset);
-  ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
-
-  return next_page->OffsetToAddress(offset);
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 4: Relocate objects
-
-void MarkCompactCollector::RelocateObjects() {
-#ifdef DEBUG
-  ASSERT(state_ == UPDATE_POINTERS);
-  state_ = RELOCATE_OBJECTS;
-#endif
-  // Relocates objects, always relocate map objects first. Relocating
-  // objects in other space relies on map objects to get object size.
-  int live_maps_size = IterateLiveObjects(
-      heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
-  int live_pointer_olds_size = IterateLiveObjects(
-      heap()->old_pointer_space(),
-      &MarkCompactCollector::RelocateOldPointerObject);
-  int live_data_olds_size = IterateLiveObjects(
-      heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
-  int live_codes_size = IterateLiveObjects(
-      heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
-  int live_cells_size = IterateLiveObjects(
-      heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
-  int live_news_size = IterateLiveObjects(
-      heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
-
-  USE(live_maps_size);
-  USE(live_pointer_olds_size);
-  USE(live_data_olds_size);
-  USE(live_codes_size);
-  USE(live_cells_size);
-  USE(live_news_size);
-  ASSERT(live_maps_size == live_map_objects_size_);
-  ASSERT(live_data_olds_size == live_old_data_objects_size_);
-  ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
-  ASSERT(live_codes_size == live_code_objects_size_);
-  ASSERT(live_cells_size == live_cell_objects_size_);
-  ASSERT(live_news_size == live_young_objects_size_);
-
-  // Flip from and to spaces
-  heap()->new_space()->Flip();
-
-  heap()->new_space()->MCCommitRelocationInfo();
-
-  // Set age_mark to bottom in to space
-  Address mark = heap()->new_space()->bottom();
-  heap()->new_space()->set_age_mark(mark);
-
-  PagedSpaces spaces;
-  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
-    space->MCCommitRelocationInfo();
-
-  heap()->CheckNewSpaceExpansionCriteria();
-  heap()->IncrementYoungSurvivorsCounter(live_news_size);
-}
-
-
-int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
-  // Recover map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
-  // Get forwarding address before resetting map pointer
-  Address new_addr = GetForwardingAddressInOldSpace(obj);
-
-  // Reset map pointer.  The meta map object may not be copied yet so
-  // Map::cast does not yet work.
-  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
-
-  Address old_addr = obj->address();
-
-  if (new_addr != old_addr) {
-    // Move contents.
-    heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
-                                                   old_addr,
-                                                   Map::kSize);
-  }
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("relocate %p -> %p\n", old_addr, new_addr);
-  }
-#endif
-
-  return Map::kSize;
-}
-
-
-static inline int RestoreMap(HeapObject* obj,
-                             PagedSpace* space,
-                             Address new_addr,
-                             Address map_addr) {
-  // This must be a non-map object, and the function relies on the
-  // assumption that the Map space is compacted before the other paged
-  // spaces (see RelocateObjects).
-
-  // Reset map pointer.
-  obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
-
-  int obj_size = obj->Size();
-  ASSERT_OBJECT_SIZE(obj_size);
-
-  ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
-         space->MCSpaceOffsetForAddress(obj->address()));
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("relocate %p -> %p\n", obj->address(), new_addr);
-  }
-#endif
-
-  return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
-                                                   PagedSpace* space) {
-  // Recover map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(map_addr));
-
-  // Get forwarding address before resetting map pointer.
-  Address new_addr = GetForwardingAddressInOldSpace(obj);
-
-  // Reset the map pointer.
-  int obj_size = RestoreMap(obj, space, new_addr, map_addr);
-
-  Address old_addr = obj->address();
-
-  if (new_addr != old_addr) {
-    // Move contents.
-    if (space == heap()->old_data_space()) {
-      heap()->MoveBlock(new_addr, old_addr, obj_size);
-    } else {
-      heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
-                                                     old_addr,
-                                                     obj_size);
-    }
-  }
-
-  ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
-
-  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
-  if (copied_to->IsSharedFunctionInfo()) {
-    PROFILE(heap()->isolate(),
-            SharedFunctionInfoMoveEvent(old_addr, new_addr));
-  }
-  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
-  return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
-}
-
-
-int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap()->old_data_space());
-}
-
-
-int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
-  return RelocateOldNonCodeObject(obj, heap()->cell_space());
-}
-
-
-int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
-  // Recover map pointer.
-  MapWord encoding = obj->map_word();
-  Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
-  ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
-
-  // Get forwarding address before resetting map pointer
-  Address new_addr = GetForwardingAddressInOldSpace(obj);
-
-  // Reset the map pointer.
-  int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
-
-  Address old_addr = obj->address();
-
-  if (new_addr != old_addr) {
-    // Move contents.
-    heap()->MoveBlock(new_addr, old_addr, obj_size);
-  }
-
-  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
-  if (copied_to->IsCode()) {
-    // May also update inline cache target.
-    Code::cast(copied_to)->Relocate(new_addr - old_addr);
-    // Notify the logger that compiled code has moved.
-    PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
-  }
-  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
-  return obj_size;
-}
-
-
-int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
-  int obj_size = obj->Size();
-
-  // Get forwarding address
-  Address old_addr = obj->address();
-  int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
-
-  Address new_addr =
-    Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
-
-#ifdef DEBUG
-  if (heap()->new_space()->FromSpaceContains(new_addr)) {
-    ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
-           heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
-  } else {
-    ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
-           heap()->TargetSpace(obj) == heap()->old_data_space());
-  }
-#endif
-
-  // New and old addresses cannot overlap.
-  if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
-    heap()->CopyBlock(new_addr, old_addr, obj_size);
-  } else {
-    heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
-                                                   old_addr,
-                                                   obj_size);
-  }
-
-#ifdef DEBUG
-  if (FLAG_gc_verbose) {
-    PrintF("relocate %p -> %p\n", old_addr, new_addr);
-  }
-#endif
-
-  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
-  if (copied_to->IsSharedFunctionInfo()) {
-    PROFILE(heap()->isolate(),
-            SharedFunctionInfoMoveEvent(old_addr, new_addr));
-  }
-  HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
-
-  return obj_size;
+  // Deallocate unmarked objects and clear marked bits for marked objects.
+  heap_->lo_space()->FreeUnmarkedObjects();
 }
 
 
@@ -3334,6 +3647,9 @@
 }
 
 
+// TODO(1466) ReportDeleteIfNeeded is not called currently.
+// Our profiling tools do not expect intersections between
+// code objects. We should either reenable it or change our tools.
 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
                                                 Isolate* isolate) {
 #ifdef ENABLE_GDB_JIT_INTERFACE
@@ -3347,16 +3663,148 @@
 }
 
 
-int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
-  MapWord map_word = obj->map_word();
-  map_word.ClearMark();
-  return obj->SizeFromMap(map_word.ToMap());
+void MarkCompactCollector::Initialize() {
+  StaticMarkingVisitor::Initialize();
 }
 
 
-void MarkCompactCollector::Initialize() {
-  StaticPointersToNewGenUpdatingVisitor::Initialize();
-  StaticMarkingVisitor::Initialize();
+bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
+  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
+}
+
+
+bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
+                        SlotsBuffer** buffer_address,
+                        SlotType type,
+                        Address addr,
+                        AdditionMode mode) {
+  SlotsBuffer* buffer = *buffer_address;
+  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
+    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+      allocator->DeallocateChain(buffer_address);
+      return false;
+    }
+    buffer = allocator->AllocateBuffer(buffer);
+    *buffer_address = buffer;
+  }
+  ASSERT(buffer->HasSpaceForTypedSlot());
+  buffer->Add(reinterpret_cast<ObjectSlot>(type));
+  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
+  return true;
+}
+
+
+static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+  if (RelocInfo::IsCodeTarget(rmode)) {
+    return SlotsBuffer::CODE_TARGET_SLOT;
+  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
+    return SlotsBuffer::DEBUG_TARGET_SLOT;
+  } else if (RelocInfo::IsJSReturn(rmode)) {
+    return SlotsBuffer::JS_RETURN_SLOT;
+  }
+  UNREACHABLE();
+  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+}
+
+
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Code* target) {
+  Page* target_page = Page::FromAddress(
+      reinterpret_cast<Address>(target));
+  if (target_page->IsEvacuationCandidate() &&
+      (rinfo->host() == NULL ||
+       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            target_page->slots_buffer_address(),
+                            SlotTypeForRMode(rinfo->rmode()),
+                            rinfo->pc(),
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
+  Page* target_page = Page::FromAddress(
+      reinterpret_cast<Address>(target));
+  if (target_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            target_page->slots_buffer_address(),
+                            SlotsBuffer::CODE_ENTRY_SLOT,
+                            slot,
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+static inline SlotsBuffer::SlotType DecodeSlotType(
+    SlotsBuffer::ObjectSlot slot) {
+  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
+}
+
+
+void SlotsBuffer::UpdateSlots(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      PointersUpdatingVisitor::UpdateSlot(heap, slot);
+    } else {
+      ++slot_idx;
+      ASSERT(slot_idx < idx_);
+      UpdateSlot(&v,
+                 DecodeSlotType(slot),
+                 reinterpret_cast<Address>(slots_[slot_idx]));
+    }
+  }
+}
+
+
+void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
+        PointersUpdatingVisitor::UpdateSlot(heap, slot);
+      }
+    } else {
+      ++slot_idx;
+      ASSERT(slot_idx < idx_);
+      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
+      if (!IsOnInvalidatedCodeObject(pc)) {
+        UpdateSlot(&v,
+                   DecodeSlotType(slot),
+                   reinterpret_cast<Address>(slots_[slot_idx]));
+      }
+    }
+  }
+}
+
+
+SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
+  return new SlotsBuffer(next_buffer);
+}
+
+
+void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
+  delete buffer;
+}
+
+
+void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
+  SlotsBuffer* buffer = *buffer_address;
+  while (buffer != NULL) {
+    SlotsBuffer* next_buffer = buffer->next();
+    DeallocateBuffer(buffer);
+    buffer = next_buffer;
+  }
+  *buffer_address = NULL;
 }
 
 
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 9b67c8a..8685036 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_MARK_COMPACT_H_
 #define V8_MARK_COMPACT_H_
 
+#include "compiler-intrinsics.h"
 #include "spaces.h"
 
 namespace v8 {
@@ -45,62 +46,364 @@
 class RootMarkingVisitor;
 
 
-// ----------------------------------------------------------------------------
-// Marking stack for tracing live objects.
-
-class MarkingStack {
+class Marking {
  public:
-  MarkingStack() : low_(NULL), top_(NULL), high_(NULL), overflowed_(false) { }
+  explicit Marking(Heap* heap)
+      : heap_(heap) {
+  }
+
+  static inline MarkBit MarkBitFrom(Address addr);
+
+  static inline MarkBit MarkBitFrom(HeapObject* obj) {
+    return MarkBitFrom(reinterpret_cast<Address>(obj));
+  }
+
+  // Impossible markbits: 01
+  static const char* kImpossibleBitPattern;
+  static inline bool IsImpossible(MarkBit mark_bit) {
+    ASSERT(strcmp(kImpossibleBitPattern, "01") == 0);
+    return !mark_bit.Get() && mark_bit.Next().Get();
+  }
+
+  // Black markbits: 10 - this is required by the sweeper.
+  static const char* kBlackBitPattern;
+  static inline bool IsBlack(MarkBit mark_bit) {
+    ASSERT(strcmp(kBlackBitPattern, "10") == 0);
+    ASSERT(!IsImpossible(mark_bit));
+    return mark_bit.Get() && !mark_bit.Next().Get();
+  }
+
+  // White markbits: 00 - this is required by the mark bit clearer.
+  static const char* kWhiteBitPattern;
+  static inline bool IsWhite(MarkBit mark_bit) {
+    ASSERT(strcmp(kWhiteBitPattern, "00") == 0);
+    ASSERT(!IsImpossible(mark_bit));
+    return !mark_bit.Get();
+  }
+
+  // Grey markbits: 11
+  static const char* kGreyBitPattern;
+  static inline bool IsGrey(MarkBit mark_bit) {
+    ASSERT(strcmp(kGreyBitPattern, "11") == 0);
+    ASSERT(!IsImpossible(mark_bit));
+    return mark_bit.Get() && mark_bit.Next().Get();
+  }
+
+  static inline void MarkBlack(MarkBit mark_bit) {
+    mark_bit.Set();
+    mark_bit.Next().Clear();
+    ASSERT(Marking::IsBlack(mark_bit));
+  }
+
+  static inline void BlackToGrey(MarkBit markbit) {
+    ASSERT(IsBlack(markbit));
+    markbit.Next().Set();
+    ASSERT(IsGrey(markbit));
+  }
+
+  static inline void WhiteToGrey(MarkBit markbit) {
+    ASSERT(IsWhite(markbit));
+    markbit.Set();
+    markbit.Next().Set();
+    ASSERT(IsGrey(markbit));
+  }
+
+  static inline void GreyToBlack(MarkBit markbit) {
+    ASSERT(IsGrey(markbit));
+    markbit.Next().Clear();
+    ASSERT(IsBlack(markbit));
+  }
+
+  static inline void BlackToGrey(HeapObject* obj) {
+    ASSERT(obj->Size() >= 2 * kPointerSize);
+    BlackToGrey(MarkBitFrom(obj));
+  }
+
+  static inline void AnyToGrey(MarkBit markbit) {
+    markbit.Set();
+    markbit.Next().Set();
+    ASSERT(IsGrey(markbit));
+  }
+
+  // Returns true if the the object whose mark is transferred is marked black.
+  bool TransferMark(Address old_start, Address new_start);
+
+#ifdef DEBUG
+  enum ObjectColor {
+    BLACK_OBJECT,
+    WHITE_OBJECT,
+    GREY_OBJECT,
+    IMPOSSIBLE_COLOR
+  };
+
+  static const char* ColorName(ObjectColor color) {
+    switch (color) {
+      case BLACK_OBJECT: return "black";
+      case WHITE_OBJECT: return "white";
+      case GREY_OBJECT: return "grey";
+      case IMPOSSIBLE_COLOR: return "impossible";
+    }
+    return "error";
+  }
+
+  static ObjectColor Color(HeapObject* obj) {
+    return Color(Marking::MarkBitFrom(obj));
+  }
+
+  static ObjectColor Color(MarkBit mark_bit) {
+    if (IsBlack(mark_bit)) return BLACK_OBJECT;
+    if (IsWhite(mark_bit)) return WHITE_OBJECT;
+    if (IsGrey(mark_bit)) return GREY_OBJECT;
+    UNREACHABLE();
+    return IMPOSSIBLE_COLOR;
+  }
+#endif
+
+  // Returns true if the transferred color is black.
+  INLINE(static bool TransferColor(HeapObject* from,
+                                   HeapObject* to)) {
+    MarkBit from_mark_bit = MarkBitFrom(from);
+    MarkBit to_mark_bit = MarkBitFrom(to);
+    bool is_black = false;
+    if (from_mark_bit.Get()) {
+      to_mark_bit.Set();
+      is_black = true;  // Looks black so far.
+    }
+    if (from_mark_bit.Next().Get()) {
+      to_mark_bit.Next().Set();
+      is_black = false;  // Was actually gray.
+    }
+    ASSERT(Color(from) == Color(to));
+    ASSERT(is_black == (Color(to) == BLACK_OBJECT));
+    return is_black;
+  }
+
+ private:
+  Heap* heap_;
+};
+
+// ----------------------------------------------------------------------------
+// Marking deque for tracing live objects.
+
+class MarkingDeque {
+ public:
+  MarkingDeque()
+      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
 
   void Initialize(Address low, Address high) {
-    top_ = low_ = reinterpret_cast<HeapObject**>(low);
-    high_ = reinterpret_cast<HeapObject**>(high);
+    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
+    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
+    array_ = obj_low;
+    mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
+    top_ = bottom_ = 0;
     overflowed_ = false;
   }
 
-  bool is_full() const { return top_ >= high_; }
+  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
 
-  bool is_empty() const { return top_ <= low_; }
+  inline bool IsEmpty() { return top_ == bottom_; }
 
   bool overflowed() const { return overflowed_; }
 
-  void clear_overflowed() { overflowed_ = false; }
+  void ClearOverflowed() { overflowed_ = false; }
+
+  void SetOverflowed() { overflowed_ = true; }
 
   // Push the (marked) object on the marking stack if there is room,
   // otherwise mark the object as overflowed and wait for a rescan of the
   // heap.
-  void Push(HeapObject* object) {
-    CHECK(object->IsHeapObject());
-    if (is_full()) {
-      object->SetOverflow();
-      overflowed_ = true;
+  inline void PushBlack(HeapObject* object) {
+    ASSERT(object->IsHeapObject());
+    if (IsFull()) {
+      Marking::BlackToGrey(object);
+      MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
+      SetOverflowed();
     } else {
-      *(top_++) = object;
+      array_[top_] = object;
+      top_ = ((top_ + 1) & mask_);
     }
   }
 
-  HeapObject* Pop() {
-    ASSERT(!is_empty());
-    HeapObject* object = *(--top_);
-    CHECK(object->IsHeapObject());
+  inline void PushGrey(HeapObject* object) {
+    ASSERT(object->IsHeapObject());
+    if (IsFull()) {
+      ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
+      SetOverflowed();
+    } else {
+      array_[top_] = object;
+      top_ = ((top_ + 1) & mask_);
+    }
+  }
+
+  inline HeapObject* Pop() {
+    ASSERT(!IsEmpty());
+    top_ = ((top_ - 1) & mask_);
+    HeapObject* object = array_[top_];
+    ASSERT(object->IsHeapObject());
     return object;
   }
 
+  inline void UnshiftGrey(HeapObject* object) {
+    ASSERT(object->IsHeapObject());
+    if (IsFull()) {
+      ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
+      SetOverflowed();
+    } else {
+      bottom_ = ((bottom_ - 1) & mask_);
+      array_[bottom_] = object;
+    }
+  }
+
+  HeapObject** array() { return array_; }
+  int bottom() { return bottom_; }
+  int top() { return top_; }
+  int mask() { return mask_; }
+  void set_top(int top) { top_ = top; }
+
  private:
-  HeapObject** low_;
-  HeapObject** top_;
-  HeapObject** high_;
+  HeapObject** array_;
+  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
+  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
+  // (mod mask + 1).
+  int top_;
+  int bottom_;
+  int mask_;
   bool overflowed_;
 
-  DISALLOW_COPY_AND_ASSIGN(MarkingStack);
+  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
+};
+
+
+class SlotsBufferAllocator {
+ public:
+  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
+  void DeallocateBuffer(SlotsBuffer* buffer);
+
+  void DeallocateChain(SlotsBuffer** buffer_address);
+};
+
+
+// SlotsBuffer records a sequence of slots that has to be updated
+// after live objects were relocated from evacuation candidates.
+// All slots are either untyped or typed:
+//    - Untyped slots are expected to contain a tagged object pointer.
+//      They are recorded by an address.
+//    - Typed slots are expected to contain an encoded pointer to a heap
+//      object where the way of encoding depends on the type of the slot.
+//      They are recorded as a pair (SlotType, slot address).
+// We assume that zero-page is never mapped this allows us to distinguish
+// untyped slots from typed slots during iteration by a simple comparison:
+// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
+// is the first element of typed slot's pair.
+class SlotsBuffer {
+ public:
+  typedef Object** ObjectSlot;
+
+  explicit SlotsBuffer(SlotsBuffer* next_buffer)
+      : idx_(0), chain_length_(1), next_(next_buffer) {
+    if (next_ != NULL) {
+      chain_length_ = next_->chain_length_ + 1;
+    }
+  }
+
+  ~SlotsBuffer() {
+  }
+
+  void Add(ObjectSlot slot) {
+    ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
+    slots_[idx_++] = slot;
+  }
+
+  enum SlotType {
+    RELOCATED_CODE_OBJECT,
+    CODE_TARGET_SLOT,
+    CODE_ENTRY_SLOT,
+    DEBUG_TARGET_SLOT,
+    JS_RETURN_SLOT,
+    NUMBER_OF_SLOT_TYPES
+  };
+
+  void UpdateSlots(Heap* heap);
+
+  void UpdateSlotsWithFilter(Heap* heap);
+
+  SlotsBuffer* next() { return next_; }
+
+  static int SizeOfChain(SlotsBuffer* buffer) {
+    if (buffer == NULL) return 0;
+    return static_cast<int>(buffer->idx_ +
+                            (buffer->chain_length_ - 1) * kNumberOfElements);
+  }
+
+  inline bool IsFull() {
+    return idx_ == kNumberOfElements;
+  }
+
+  inline bool HasSpaceForTypedSlot() {
+    return idx_ < kNumberOfElements - 1;
+  }
+
+  static void UpdateSlotsRecordedIn(Heap* heap,
+                                    SlotsBuffer* buffer,
+                                    bool code_slots_filtering_required) {
+    while (buffer != NULL) {
+      if (code_slots_filtering_required) {
+        buffer->UpdateSlotsWithFilter(heap);
+      } else {
+        buffer->UpdateSlots(heap);
+      }
+      buffer = buffer->next();
+    }
+  }
+
+  enum AdditionMode {
+    FAIL_ON_OVERFLOW,
+    IGNORE_OVERFLOW
+  };
+
+  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
+    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
+  }
+
+  static bool AddTo(SlotsBufferAllocator* allocator,
+                    SlotsBuffer** buffer_address,
+                    ObjectSlot slot,
+                    AdditionMode mode) {
+    SlotsBuffer* buffer = *buffer_address;
+    if (buffer == NULL || buffer->IsFull()) {
+      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+        allocator->DeallocateChain(buffer_address);
+        return false;
+      }
+      buffer = allocator->AllocateBuffer(buffer);
+      *buffer_address = buffer;
+    }
+    buffer->Add(slot);
+    return true;
+  }
+
+  static bool IsTypedSlot(ObjectSlot slot);
+
+  static bool AddTo(SlotsBufferAllocator* allocator,
+                    SlotsBuffer** buffer_address,
+                    SlotType type,
+                    Address addr,
+                    AdditionMode mode);
+
+  static const int kNumberOfElements = 1021;
+
+ private:
+  static const int kChainLengthThreshold = 6;
+
+  intptr_t idx_;
+  intptr_t chain_length_;
+  SlotsBuffer* next_;
+  ObjectSlot slots_[kNumberOfElements];
 };
 
 
 // -------------------------------------------------------------------------
 // Mark-Compact collector
-
-class OverflowedObjectsScanner;
-
 class MarkCompactCollector {
  public:
   // Type of functions to compute forwarding addresses of objects in
@@ -134,13 +437,18 @@
 
   // Set the global force_compaction flag, it must be called before Prepare
   // to take effect.
-  void SetForceCompaction(bool value) {
-    force_compaction_ = value;
+  inline void SetFlags(int flags);
+
+  inline bool PreciseSweepingRequired() {
+    return sweep_precisely_;
   }
 
-
   static void Initialize();
 
+  void CollectEvacuationCandidates(PagedSpace* space);
+
+  void AddEvacuationCandidate(Page* p);
+
   // Prepares for GC by resetting relocation info in old and map spaces and
   // choosing spaces to compact.
   void Prepare(GCTracer* tracer);
@@ -148,23 +456,9 @@
   // Performs a global garbage collection.
   void CollectGarbage();
 
-  // True if the last full GC performed heap compaction.
-  bool HasCompacted() { return compacting_collection_; }
+  bool StartCompaction();
 
-  // True after the Prepare phase if the compaction is taking place.
-  bool IsCompacting() {
-#ifdef DEBUG
-    // For the purposes of asserts we don't want this to keep returning true
-    // after the collection is completed.
-    return state_ != IDLE && compacting_collection_;
-#else
-    return compacting_collection_;
-#endif
-  }
-
-  // The count of the number of objects left marked at the end of the last
-  // completed full GC (expected to be zero).
-  int previous_marked_count() { return previous_marked_count_; }
+  void AbortCompaction();
 
   // During a full GC, there is a stack-allocated GCTracer that is used for
   // bookkeeping information.  Return a pointer to that tracer.
@@ -179,29 +473,99 @@
   // Determine type of object and emit deletion log event.
   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
 
-  // Returns size of a possibly marked object.
-  static int SizeOfMarkedObject(HeapObject* obj);
-
   // Distinguishable invalid map encodings (for single word and multiple words)
   // that indicate free regions.
   static const uint32_t kSingleFreeEncoding = 0;
   static const uint32_t kMultiFreeEncoding = 1;
 
+  static inline bool IsMarked(Object* obj);
+
   inline Heap* heap() const { return heap_; }
 
   CodeFlusher* code_flusher() { return code_flusher_; }
   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
   void EnableCodeFlushing(bool enable);
 
+  enum SweeperType {
+    CONSERVATIVE,
+    LAZY_CONSERVATIVE,
+    PRECISE
+  };
+
+#ifdef DEBUG
+  void VerifyMarkbitsAreClean();
+  static void VerifyMarkbitsAreClean(PagedSpace* space);
+  static void VerifyMarkbitsAreClean(NewSpace* space);
+#endif
+
+  // Sweep a single page from the given space conservatively.
+  // Return a number of reclaimed bytes.
+  static intptr_t SweepConservatively(PagedSpace* space, Page* p);
+
+  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
+    return Page::FromAddress(reinterpret_cast<Address>(anchor))->
+        ShouldSkipEvacuationSlotRecording();
+  }
+
+  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
+    return Page::FromAddress(reinterpret_cast<Address>(host))->
+        ShouldSkipEvacuationSlotRecording();
+  }
+
+  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
+    return Page::FromAddress(reinterpret_cast<Address>(obj))->
+        IsEvacuationCandidate();
+  }
+
+  void EvictEvacuationCandidate(Page* page) {
+    if (FLAG_trace_fragmentation) {
+      PrintF("Page %p is too popular. Disabling evacuation.\n",
+             reinterpret_cast<void*>(page));
+    }
+
+    // TODO(gc) If all evacuation candidates are too popular we
+    // should stop slots recording entirely.
+    page->ClearEvacuationCandidate();
+
+    // We were not collecting slots on this page that point
+    // to other evacuation candidates thus we have to
+    // rescan the page after evacuation to discover and update all
+    // pointers to evacuated objects.
+    if (page->owner()->identity() == OLD_DATA_SPACE) {
+      evacuation_candidates_.RemoveElement(page);
+    } else {
+      page->SetFlag(Page::RESCAN_ON_EVACUATION);
+    }
+  }
+
+  void RecordRelocSlot(RelocInfo* rinfo, Code* target);
+  void RecordCodeEntrySlot(Address slot, Code* target);
+
+  INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
+
+  void MigrateObject(Address dst,
+                     Address src,
+                     int size,
+                     AllocationSpace to_old_space);
+
+  bool TryPromoteObject(HeapObject* object, int object_size);
+
   inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
   inline void set_encountered_weak_maps(Object* weak_map) {
     encountered_weak_maps_ = weak_map;
   }
 
+  void InvalidateCode(Code* code);
+
  private:
   MarkCompactCollector();
   ~MarkCompactCollector();
 
+  bool MarkInvalidatedCode();
+  void RemoveDeadInvalidatedCode();
+  void ProcessInvalidatedCode(ObjectVisitor* visitor);
+
+
 #ifdef DEBUG
   enum CollectorState {
     IDLE,
@@ -217,23 +581,26 @@
   CollectorState state_;
 #endif
 
-  // Global flag that forces a compaction.
-  bool force_compaction_;
+  // Global flag that forces sweeping to be precise, so we can traverse the
+  // heap.
+  bool sweep_precisely_;
 
-  // Global flag indicating whether spaces were compacted on the last GC.
-  bool compacting_collection_;
+  // True if we are collecting slots to perform evacuation from evacuation
+  // candidates.
+  bool compacting_;
 
-  // Global flag indicating whether spaces will be compacted on the next GC.
-  bool compact_on_next_gc_;
+  bool was_marked_incrementally_;
 
-  // The number of objects left marked at the end of the last completed full
-  // GC (expected to be zero).
-  int previous_marked_count_;
+  bool collect_maps_;
 
   // A pointer to the current stack-allocated GC tracer object during a full
   // collection (NULL before and after).
   GCTracer* tracer_;
 
+  SlotsBufferAllocator slots_buffer_allocator_;
+
+  SlotsBuffer* migration_slots_buffer_;
+
   // Finishes GC, performs heap verification if enabled.
   void Finish();
 
@@ -258,13 +625,13 @@
   // Marking operations for objects reachable from roots.
   void MarkLiveObjects();
 
-  void MarkUnmarkedObject(HeapObject* obj);
+  void AfterMarking();
 
-  inline void MarkObject(HeapObject* obj) {
-    if (!obj->IsMarked()) MarkUnmarkedObject(obj);
-  }
+  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
 
-  inline void SetMark(HeapObject* obj);
+  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
+
+  void ProcessNewlyMarkedObject(HeapObject* obj);
 
   // Creates back pointers for all map transitions, stores them in
   // the prototype field.  The original prototype pointers are restored
@@ -298,18 +665,18 @@
 
   // Mark objects reachable (transitively) from objects in the marking stack
   // or overflowed in the heap.
-  void ProcessMarkingStack();
+  void ProcessMarkingDeque();
 
   // Mark objects reachable (transitively) from objects in the marking
   // stack.  This function empties the marking stack, but may leave
   // overflowed objects in the heap, in which case the marking stack's
   // overflow flag will be set.
-  void EmptyMarkingStack();
+  void EmptyMarkingDeque();
 
   // Refill the marking stack with overflowed objects from the heap.  This
   // function either leaves the marking stack full or clears the overflow
   // flag on the marking stack.
-  void RefillMarkingStack();
+  void RefillMarkingDeque();
 
   // After reachable maps have been marked process per context object
   // literal map caches removing unmarked entries.
@@ -323,17 +690,16 @@
   void UpdateLiveObjectCount(HeapObject* obj);
 #endif
 
-  // We sweep the large object space in the same way whether we are
-  // compacting or not, because the large object space is never compacted.
-  void SweepLargeObjectSpace();
-
-  // Test whether a (possibly marked) object is a Map.
-  static inline bool SafeIsMap(HeapObject* object);
-
   // Map transitions from a live map to a dead map must be killed.
   // We replace them with a null descriptor, with the same key.
   void ClearNonLiveTransitions();
 
+  // Marking detaches initial maps from SharedFunctionInfo objects
+  // to make this reference weak. We need to reattach initial maps
+  // back after collection. This is either done during
+  // ClearNonLiveTransitions pass or by calling this function.
+  void ReattachInitialMaps();
+
   // Mark all values associated with reachable keys in weak maps encountered
   // so far.  This might push new object or even new weak maps onto the
   // marking stack.
@@ -346,133 +712,30 @@
 
   // -----------------------------------------------------------------------
   // Phase 2: Sweeping to clear mark bits and free non-live objects for
-  // a non-compacting collection, or else computing and encoding
-  // forwarding addresses for a compacting collection.
+  // a non-compacting collection.
   //
   //  Before: Live objects are marked and non-live objects are unmarked.
   //
-  //   After: (Non-compacting collection.)  Live objects are unmarked,
-  //          non-live regions have been added to their space's free
-  //          list.
+  //   After: Live objects are unmarked, non-live regions have been added to
+  //          their space's free list. Active eden semispace is compacted by
+  //          evacuation.
   //
-  //   After: (Compacting collection.)  The forwarding address of live
-  //          objects in the paged spaces is encoded in their map word
-  //          along with their (non-forwarded) map pointer.
-  //
-  //          The forwarding address of live objects in the new space is
-  //          written to their map word's offset in the inactive
-  //          semispace.
-  //
-  //          Bookkeeping data is written to the page header of
-  //          eached paged-space page that contains live objects after
-  //          compaction:
-  //
-  //          The allocation watermark field is used to track the
-  //          relocation top address, the address of the first word
-  //          after the end of the last live object in the page after
-  //          compaction.
-  //
-  //          The Page::mc_page_index field contains the zero-based index of the
-  //          page in its space.  This word is only used for map space pages, in
-  //          order to encode the map addresses in 21 bits to free 11
-  //          bits per map word for the forwarding address.
-  //
-  //          The Page::mc_first_forwarded field contains the (nonencoded)
-  //          forwarding address of the first live object in the page.
-  //
-  //          In both the new space and the paged spaces, a linked list
-  //          of live regions is constructructed (linked through
-  //          pointers in the non-live region immediately following each
-  //          live region) to speed further passes of the collector.
-
-  // Encodes forwarding addresses of objects in compactable parts of the
-  // heap.
-  void EncodeForwardingAddresses();
-
-  // Encodes the forwarding addresses of objects in new space.
-  void EncodeForwardingAddressesInNewSpace();
-
-  // Function template to encode the forwarding addresses of objects in
-  // paged spaces, parameterized by allocation and non-live processing
-  // functions.
-  template<AllocationFunction Alloc, ProcessNonLiveFunction ProcessNonLive>
-  void EncodeForwardingAddressesInPagedSpace(PagedSpace* space);
-
-  // Iterates live objects in a space, passes live objects
-  // to a callback function which returns the heap size of the object.
-  // Returns the number of live objects iterated.
-  int IterateLiveObjects(NewSpace* space, LiveObjectCallback size_f);
-  int IterateLiveObjects(PagedSpace* space, LiveObjectCallback size_f);
-
-  // Iterates the live objects between a range of addresses, returning the
-  // number of live objects.
-  int IterateLiveObjectsInRange(Address start, Address end,
-                                LiveObjectCallback size_func);
 
   // If we are not compacting the heap, we simply sweep the spaces except
   // for the large object space, clearing mark bits and adding unmarked
   // regions to each space's free list.
   void SweepSpaces();
 
-  // -----------------------------------------------------------------------
-  // Phase 3: Updating pointers in live objects.
-  //
-  //  Before: Same as after phase 2 (compacting collection).
-  //
-  //   After: All pointers in live objects, including encoded map
-  //          pointers, are updated to point to their target's new
-  //          location.
+  void EvacuateNewSpace();
 
-  friend class UpdatingVisitor;  // helper for updating visited objects
+  void EvacuateLiveObjectsFromPage(Page* p);
 
-  // Updates pointers in all spaces.
-  void UpdatePointers();
+  void EvacuatePages();
 
-  // Updates pointers in an object in new space.
-  // Returns the heap size of the object.
-  int UpdatePointersInNewObject(HeapObject* obj);
+  void EvacuateNewSpaceAndCandidates();
 
-  // Updates pointers in an object in old spaces.
-  // Returns the heap size of the object.
-  int UpdatePointersInOldObject(HeapObject* obj);
+  void SweepSpace(PagedSpace* space, SweeperType sweeper);
 
-  // Calculates the forwarding address of an object in an old space.
-  static Address GetForwardingAddressInOldSpace(HeapObject* obj);
-
-  // -----------------------------------------------------------------------
-  // Phase 4: Relocating objects.
-  //
-  //  Before: Pointers to live objects are updated to point to their
-  //          target's new location.
-  //
-  //   After: Objects have been moved to their new addresses.
-
-  // Relocates objects in all spaces.
-  void RelocateObjects();
-
-  // Converts a code object's inline target to addresses, convention from
-  // address to target happens in the marking phase.
-  int ConvertCodeICTargetToAddress(HeapObject* obj);
-
-  // Relocate a map object.
-  int RelocateMapObject(HeapObject* obj);
-
-  // Relocates an old object.
-  int RelocateOldPointerObject(HeapObject* obj);
-  int RelocateOldDataObject(HeapObject* obj);
-
-  // Relocate a property cell object.
-  int RelocateCellObject(HeapObject* obj);
-
-  // Helper function.
-  inline int RelocateOldNonCodeObject(HeapObject* obj,
-                                      PagedSpace* space);
-
-  // Relocates an object in the code space.
-  int RelocateCodeObject(HeapObject* obj);
-
-  // Copy a new object.
-  int RelocateNewObject(HeapObject* obj);
 
 #ifdef DEBUG
   // -----------------------------------------------------------------------
@@ -512,15 +775,19 @@
 #endif
 
   Heap* heap_;
-  MarkingStack marking_stack_;
+  MarkingDeque marking_deque_;
   CodeFlusher* code_flusher_;
   Object* encountered_weak_maps_;
 
+  List<Page*> evacuation_candidates_;
+  List<Code*> invalidated_code_;
+
   friend class Heap;
-  friend class OverflowedObjectsScanner;
 };
 
 
+const char* AllocationSpaceName(AllocationSpace space);
+
 } }  // namespace v8::internal
 
 #endif  // V8_MARK_COMPACT_H_
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index c4c4fd2..0cbe533 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -244,7 +244,7 @@
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     Object** p = target_object_address();
     Object* orig = *p;
-    visitor->VisitPointer(p);
+    visitor->VisitEmbeddedPointer(host(), p);
     if (*p != orig) {
       set_target_object(*p);
     }
@@ -273,7 +273,7 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, host(), target_object_address());
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index d772304..f89dca9 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -587,10 +587,11 @@
   __ bind(&convert_argument);
   __ push(function);  // Preserve the function.
   __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
-  __ EnterInternalFrame();
-  __ push(v0);
-  __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(v0);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  }
   __ pop(function);
   __ mov(argument, v0);
   __ Branch(&argument_is_string);
@@ -606,10 +607,11 @@
   // create a string wrapper.
   __ bind(&gc_required);
   __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
-  __ EnterInternalFrame();
-  __ push(argument);
-  __ CallRuntime(Runtime::kNewStringWrapper, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(argument);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
   __ Ret();
 }
 
@@ -622,13 +624,13 @@
   //  -- sp[...]: constructor arguments
   // -----------------------------------
 
-  Label non_function_call;
+  Label slow, non_function_call;
   // Check that the function is not a smi.
   __ And(t0, a1, Operand(kSmiTagMask));
   __ Branch(&non_function_call, eq, t0, Operand(zero_reg));
   // Check that the function is a JSFunction.
   __ GetObjectType(a1, a2, a2);
-  __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_TYPE));
+  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
 
   // Jump to the function-specific construct stub.
   __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -638,13 +640,21 @@
 
   // a0: number of arguments
   // a1: called object
+  // a2: object type
+  Label do_call;
+  __ bind(&slow);
+  __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
   __ bind(&non_function_call);
+  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
   // CALL_NON_FUNCTION expects the non-function constructor as receiver
   // (instead of the original receiver from the call site). The receiver is
   // stack element argc.
   // Set expected number of arguments to zero (not changing a0).
   __ mov(a2, zero_reg);
-  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   __ SetCallKind(t1, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
@@ -667,331 +677,335 @@
   // -----------------------------------
 
   // Enter a construct frame.
-  __ EnterConstructFrame();
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Preserve the two incoming parameters on the stack.
-  __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
-  __ MultiPushReversed(a0.bit() | a1.bit());
+    // Preserve the two incoming parameters on the stack.
+    __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
+    __ MultiPushReversed(a0.bit() | a1.bit());
 
-  // Use t7 to hold undefined, which is used in several places below.
-  __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+    // Use t7 to hold undefined, which is used in several places below.
+    __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
 
-  Label rt_call, allocated;
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    Label rt_call, allocated;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(isolate);
-    __ li(a2, Operand(debug_step_in_fp));
-    __ lw(a2, MemOperand(a2));
-    __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(isolate);
+      __ li(a2, Operand(debug_step_in_fp));
+      __ lw(a2, MemOperand(a2));
+      __ Branch(&rt_call, ne, a2, Operand(zero_reg));
 #endif
 
-    // Load the initial map and verify that it is in fact a map.
-    // a1: constructor function
-    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
-    __ And(t0, a2, Operand(kSmiTagMask));
-    __ Branch(&rt_call, eq, t0, Operand(zero_reg));
-    __ GetObjectType(a2, a3, t4);
-    __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
+      // Load the initial map and verify that it is in fact a map.
+      // a1: constructor function
+      __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+      __ And(t0, a2, Operand(kSmiTagMask));
+      __ Branch(&rt_call, eq, t0, Operand(zero_reg));
+      __ GetObjectType(a2, a3, t4);
+      __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // a1: constructor function
-    // a2: initial map
-    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
-    __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // a1: constructor function
+      // a2: initial map
+      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+      __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-      MemOperand constructor_count =
-         FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
-      __ lbu(t0, constructor_count);
-      __ Subu(t0, t0, Operand(1));
-      __ sb(t0, constructor_count);
-      __ Branch(&allocate, ne, t0, Operand(zero_reg));
-
-      __ Push(a1, a2);
-
-      __ push(a1);  // Constructor.
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(a2);
-      __ pop(a1);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    // a1: constructor function
-    // a2: initial map
-    __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-    __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
-
-    // Allocated the JSObject, now initialize the fields. Map is set to initial
-    // map and properties and elements are set to empty fixed array.
-    // a1: constructor function
-    // a2: initial map
-    // a3: object size
-    // t4: JSObject (not tagged)
-    __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
-    __ mov(t5, t4);
-    __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
-    __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
-    __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
-    __ Addu(t5, t5, Operand(3*kPointerSize));
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
-    ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
-
-    // Fill all the in-object properties with appropriate filler.
-    // a1: constructor function
-    // a2: initial map
-    // a3: object size (in words)
-    // t4: JSObject (not tagged)
-    // t5: First in-object property of JSObject (not tagged)
-    __ sll(t0, a3, kPointerSizeLog2);
-    __ addu(t6, t4, t0);   // End of object.
-    ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
-    { Label loop, entry;
       if (count_constructions) {
-        // To allow for truncation.
-        __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
-      } else {
-        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+        Label allocate;
+        // Decrease generous allocation count.
+        __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+        MemOperand constructor_count =
+           FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
+        __ lbu(t0, constructor_count);
+        __ Subu(t0, t0, Operand(1));
+        __ sb(t0, constructor_count);
+        __ Branch(&allocate, ne, t0, Operand(zero_reg));
+
+        __ Push(a1, a2);
+
+        __ push(a1);  // Constructor.
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(a2);
+        __ pop(a1);
+
+        __ bind(&allocate);
       }
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ sw(t7, MemOperand(t5, 0));
-      __ addiu(t5, t5, kPointerSize);
-      __ bind(&entry);
-      __ Branch(&loop, Uless, t5, Operand(t6));
+
+      // Now allocate the JSObject on the heap.
+      // a1: constructor function
+      // a2: initial map
+      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+      __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
+
+      // Allocated the JSObject, now initialize the fields. Map is set to
+      // initial map and properties and elements are set to empty fixed array.
+      // a1: constructor function
+      // a2: initial map
+      // a3: object size
+      // t4: JSObject (not tagged)
+      __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
+      __ mov(t5, t4);
+      __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
+      __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
+      __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
+      __ Addu(t5, t5, Operand(3*kPointerSize));
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+      // Fill all the in-object properties with appropriate filler.
+      // a1: constructor function
+      // a2: initial map
+      // a3: object size (in words)
+      // t4: JSObject (not tagged)
+      // t5: First in-object property of JSObject (not tagged)
+      __ sll(t0, a3, kPointerSizeLog2);
+      __ addu(t6, t4, t0);   // End of object.
+      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+      { Label loop, entry;
+        if (count_constructions) {
+          // To allow for truncation.
+          __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
+        } else {
+          __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+        }
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ sw(t7, MemOperand(t5, 0));
+        __ addiu(t5, t5, kPointerSize);
+        __ bind(&entry);
+        __ Branch(&loop, Uless, t5, Operand(t6));
+      }
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      __ Addu(t4, t4, Operand(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed. Continue with
+      // allocated object if not fall through to runtime call if it is.
+      // a1: constructor function
+      // t4: JSObject
+      // t5: start of next object (not tagged)
+      __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+      // The field instance sizes contains both pre-allocated property fields
+      // and in-object properties.
+      __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+      __ And(t6,
+             a0,
+             Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
+      __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
+      __ Addu(a3, a3, Operand(t0));
+      __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
+      __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
+      __ subu(a3, a3, t0);
+
+      // Done if no extra properties are to be allocated.
+      __ Branch(&allocated, eq, a3, Operand(zero_reg));
+      __ Assert(greater_equal, "Property allocation count failed.",
+          a3, Operand(zero_reg));
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // a1: constructor
+      // a3: number of elements in properties array
+      // t4: JSObject
+      // t5: start of next object
+      __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+      __ AllocateInNewSpace(
+          a0,
+          t5,
+          t6,
+          a2,
+          &undo_allocation,
+          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+      // Initialize the FixedArray.
+      // a1: constructor
+      // a3: number of elements in properties array (un-tagged)
+      // t4: JSObject
+      // t5: start of next object
+      __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
+      __ mov(a2, t5);
+      __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
+      __ sll(a0, a3, kSmiTagSize);
+      __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
+      __ Addu(a2, a2, Operand(2 * kPointerSize));
+
+      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+      // Initialize the fields to undefined.
+      // a1: constructor
+      // a2: First element of FixedArray (not tagged)
+      // a3: number of elements in properties array
+      // t4: JSObject
+      // t5: FixedArray (not tagged)
+      __ sll(t3, a3, kPointerSizeLog2);
+      __ addu(t6, a2, t3);  // End of object.
+      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+      { Label loop, entry;
+        if (count_constructions) {
+          __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
+        } else if (FLAG_debug_code) {
+          __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
+          __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
+        }
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ sw(t7, MemOperand(a2));
+        __ addiu(a2, a2, kPointerSize);
+        __ bind(&entry);
+        __ Branch(&loop, less, a2, Operand(t6));
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject.
+      // a1: constructor function
+      // t4: JSObject
+      // t5: FixedArray (not tagged)
+      __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
+      __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+
+      // Continue with JSObject being successfully allocated.
+      // a1: constructor function
+      // a4: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // t4: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(t4, t5);
     }
 
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    __ Addu(t4, t4, Operand(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed. Continue with allocated
-    // object if not fall through to runtime call if it is.
+    __ bind(&rt_call);
+    // Allocate the new receiver object using the runtime call.
     // a1: constructor function
+    __ push(a1);  // Argument for Runtime_NewObject.
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ mov(t4, v0);
+
+    // Receiver for constructor call allocated.
     // t4: JSObject
-    // t5: start of next object (not tagged)
-    __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
-    // The field instance sizes contains both pre-allocated property fields and
-    // in-object properties.
-    __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
-    __ And(t6,
-           a0,
-           Operand(0x000000FF << Map::kPreAllocatedPropertyFieldsByte * 8));
-    __ srl(t0, t6, Map::kPreAllocatedPropertyFieldsByte * 8);
-    __ Addu(a3, a3, Operand(t0));
-    __ And(t6, a0, Operand(0x000000FF << Map::kInObjectPropertiesByte * 8));
-    __ srl(t0, t6, Map::kInObjectPropertiesByte * 8);
-    __ subu(a3, a3, t0);
+    __ bind(&allocated);
+    __ push(t4);
 
-    // Done if no extra properties are to be allocated.
-    __ Branch(&allocated, eq, a3, Operand(zero_reg));
-    __ Assert(greater_equal, "Property allocation count failed.",
-        a3, Operand(zero_reg));
+    // Push the function and the allocated receiver from the stack.
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ lw(a1, MemOperand(sp, kPointerSize));
+    __ MultiPushReversed(a1.bit() | t4.bit());
 
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // a1: constructor
-    // a3: number of elements in properties array
-    // t4: JSObject
-    // t5: start of next object
-    __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
-    __ AllocateInNewSpace(
-        a0,
-        t5,
-        t6,
-        a2,
-        &undo_allocation,
-        static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+    // Reload the number of arguments from the stack.
+    // a1: constructor function
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    __ lw(a3, MemOperand(sp, 4 * kPointerSize));
 
-    // Initialize the FixedArray.
-    // a1: constructor
-    // a3: number of elements in properties array (un-tagged)
-    // t4: JSObject
-    // t5: start of next object
-    __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
-    __ mov(a2, t5);
-    __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
-    __ sll(a0, a3, kSmiTagSize);
-    __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
-    __ Addu(a2, a2, Operand(2 * kPointerSize));
+    // Setup pointer to last argument.
+    __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
 
-    ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-    ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+    // Setup number of arguments for function call below.
+    __ srl(a0, a3, kSmiTagSize);
 
-    // Initialize the fields to undefined.
-    // a1: constructor
-    // a2: First element of FixedArray (not tagged)
-    // a3: number of elements in properties array
-    // t4: JSObject
-    // t5: FixedArray (not tagged)
-    __ sll(t3, a3, kPointerSizeLog2);
-    __ addu(t6, a2, t3);  // End of object.
-    ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
-    { Label loop, entry;
-      if (count_constructions) {
-        __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-      } else if (FLAG_debug_code) {
-        __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
-        __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
-      }
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ sw(t7, MemOperand(a2));
-      __ addiu(a2, a2, kPointerSize);
-      __ bind(&entry);
-      __ Branch(&loop, less, a2, Operand(t6));
+    // Copy arguments and receiver to the expression stack.
+    // a0: number of arguments
+    // a1: constructor function
+    // a2: address of last argument (caller sp)
+    // a3: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    Label loop, entry;
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+    __ Addu(t0, a2, Operand(t0));
+    __ lw(t1, MemOperand(t0));
+    __ push(t1);
+    __ bind(&entry);
+    __ Addu(a3, a3, Operand(-2));
+    __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+    // Call the function.
+    // a0: number of arguments
+    // a1: constructor function
+    if (is_api_function) {
+      __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected,
+                    RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(a0);
+      __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject.
-    // a1: constructor function
-    // t4: JSObject
-    // t5: FixedArray (not tagged)
-    __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
-    __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
+    // Pop the function from the stack.
+    // v0: result
+    // sp[0]: constructor function
+    // sp[2]: receiver
+    // sp[3]: constructor function
+    // sp[4]: number of arguments (smi-tagged)
+    __ Pop();
 
-    // Continue with JSObject being successfully allocated.
-    // a1: constructor function
-    // a4: JSObject
-    __ jmp(&allocated);
+    // Restore context from the frame.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // t4: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(t4, t5);
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    // v0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ And(t0, v0, Operand(kSmiTagMask));
+    __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ GetObjectType(v0, a3, a3);
+    __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ lw(v0, MemOperand(sp));
+
+    // Remove receiver from the stack, remove caller arguments, and
+    // return.
+    __ bind(&exit);
+    // v0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ lw(a1, MemOperand(sp, 2 * kPointerSize));
+
+    // Leave construct frame.
   }
 
-  __ bind(&rt_call);
-  // Allocate the new receiver object using the runtime call.
-  // a1: constructor function
-  __ push(a1);  // Argument for Runtime_NewObject.
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ mov(t4, v0);
-
-  // Receiver for constructor call allocated.
-  // t4: JSObject
-  __ bind(&allocated);
-  __ push(t4);
-
-  // Push the function and the allocated receiver from the stack.
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ lw(a1, MemOperand(sp, kPointerSize));
-  __ MultiPushReversed(a1.bit() | t4.bit());
-
-  // Reload the number of arguments from the stack.
-  // a1: constructor function
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ lw(a3, MemOperand(sp, 4 * kPointerSize));
-
-  // Setup pointer to last argument.
-  __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // Setup number of arguments for function call below.
-  __ srl(a0, a3, kSmiTagSize);
-
-  // Copy arguments and receiver to the expression stack.
-  // a0: number of arguments
-  // a1: constructor function
-  // a2: address of last argument (caller sp)
-  // a3: number of arguments (smi-tagged)
-  // sp[0]: receiver
-  // sp[1]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  Label loop, entry;
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(t0, a2, Operand(t0));
-  __ lw(t1, MemOperand(t0));
-  __ push(t1);
-  __ bind(&entry);
-  __ Addu(a3, a3, Operand(-2));
-  __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
-
-  // Call the function.
-  // a0: number of arguments
-  // a1: constructor function
-  if (is_api_function) {
-    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected,
-                  RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(a0);
-    __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Pop the function from the stack.
-  // v0: result
-  // sp[0]: constructor function
-  // sp[2]: receiver
-  // sp[3]: constructor function
-  // sp[4]: number of arguments (smi-tagged)
-  __ Pop();
-
-  // Restore context from the frame.
-  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  // v0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ And(t0, v0, Operand(kSmiTagMask));
-  __ Branch(&use_receiver, eq, t0, Operand(zero_reg));
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ GetObjectType(v0, a3, a3);
-  __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ lw(v0, MemOperand(sp));
-
-  // Remove receiver from the stack, remove caller arguments, and
-  // return.
-  __ bind(&exit);
-  // v0: result
-  // sp[0]: receiver (newly allocated object)
-  // sp[1]: constructor function
-  // sp[2]: number of arguments (smi-tagged)
-  __ lw(a1, MemOperand(sp, 2 * kPointerSize));
-  __ LeaveConstructFrame();
   __ sll(t0, a1, kPointerSizeLog2 - 1);
   __ Addu(sp, sp, t0);
   __ Addu(sp, sp, kPointerSize);
@@ -1031,59 +1045,61 @@
   __ mov(cp, zero_reg);
 
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Set up the context from the function argument.
-  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    // Set up the context from the function argument.
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
-  // Set up the roots register.
-  ExternalReference roots_address =
-      ExternalReference::roots_address(masm->isolate());
-  __ li(s6, Operand(roots_address));
+    // Set up the roots register.
+    ExternalReference roots_address =
+        ExternalReference::roots_address(masm->isolate());
+    __ li(s6, Operand(roots_address));
 
-  // Push the function and the receiver onto the stack.
-  __ Push(a1, a2);
+    // Push the function and the receiver onto the stack.
+    __ Push(a1, a2);
 
-  // Copy arguments to the stack in a loop.
-  // a3: argc
-  // s0: argv, ie points to first arg
-  Label loop, entry;
-  __ sll(t0, a3, kPointerSizeLog2);
-  __ addu(t2, s0, t0);
-  __ b(&entry);
-  __ nop();   // Branch delay slot nop.
-  // t2 points past last arg.
-  __ bind(&loop);
-  __ lw(t0, MemOperand(s0));  // Read next parameter.
-  __ addiu(s0, s0, kPointerSize);
-  __ lw(t0, MemOperand(t0));  // Dereference handle.
-  __ push(t0);  // Push parameter.
-  __ bind(&entry);
-  __ Branch(&loop, ne, s0, Operand(t2));
+    // Copy arguments to the stack in a loop.
+    // a3: argc
+    // s0: argv, ie points to first arg
+    Label loop, entry;
+    __ sll(t0, a3, kPointerSizeLog2);
+    __ addu(t2, s0, t0);
+    __ b(&entry);
+    __ nop();   // Branch delay slot nop.
+    // t2 points past last arg.
+    __ bind(&loop);
+    __ lw(t0, MemOperand(s0));  // Read next parameter.
+    __ addiu(s0, s0, kPointerSize);
+    __ lw(t0, MemOperand(t0));  // Dereference handle.
+    __ push(t0);  // Push parameter.
+    __ bind(&entry);
+    __ Branch(&loop, ne, s0, Operand(t2));
 
-  // Initialize all JavaScript callee-saved registers, since they will be seen
-  // by the garbage collector as part of handlers.
-  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
-  __ mov(s1, t0);
-  __ mov(s2, t0);
-  __ mov(s3, t0);
-  __ mov(s4, t0);
-  __ mov(s5, t0);
-  // s6 holds the root address. Do not clobber.
-  // s7 is cp. Do not init.
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+    __ mov(s1, t0);
+    __ mov(s2, t0);
+    __ mov(s3, t0);
+    __ mov(s4, t0);
+    __ mov(s5, t0);
+    // s6 holds the root address. Do not clobber.
+    // s7 is cp. Do not init.
 
-  // Invoke the code and pass argc as a0.
-  __ mov(a0, a3);
-  if (is_construct) {
-    __ Call(masm->isolate()->builtins()->JSConstructCall());
-  } else {
-    ParameterCount actual(a0);
-    __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the code and pass argc as a0.
+    __ mov(a0, a3);
+    if (is_construct) {
+      __ Call(masm->isolate()->builtins()->JSConstructCall());
+    } else {
+      ParameterCount actual(a0);
+      __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Leave internal frame.
   }
 
-  __ LeaveInternalFrame();
-
   __ Jump(ra);
 }
 
@@ -1100,27 +1116,28 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(a1);
-  // Push call kind information.
-  __ push(t1);
+    // Preserve the function.
+    __ push(a1);
+    // Push call kind information.
+    __ push(t1);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(a1);
-  // Call the runtime function.
-  __ CallRuntime(Runtime::kLazyCompile, 1);
-  // Calculate the entry point.
-  __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(a1);
+    // Call the runtime function.
+    __ CallRuntime(Runtime::kLazyCompile, 1);
+    // Calculate the entry point.
+    __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
 
-  // Restore call kind information.
-  __ pop(t1);
-  // Restore saved function.
-  __ pop(a1);
+    // Restore call kind information.
+    __ pop(t1);
+    // Restore saved function.
+    __ pop(a1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down temporary frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(t9);
@@ -1129,26 +1146,27 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Preserve the function.
-  __ push(a1);
-  // Push call kind information.
-  __ push(t1);
+    // Preserve the function.
+    __ push(a1);
+    // Push call kind information.
+    __ push(t1);
 
-  // Push the function on the stack as the argument to the runtime function.
-  __ push(a1);
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
-  // Calculate the entry point.
-  __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Push the function on the stack as the argument to the runtime function.
+    __ push(a1);
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
+    // Calculate the entry point.
+    __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
 
-  // Restore call kind information.
-  __ pop(t1);
-  // Restore saved function.
-  __ pop(a1);
+    // Restore call kind information.
+    __ pop(t1);
+    // Restore saved function.
+    __ pop(a1);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down temporary frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ Jump(t9);
@@ -1190,19 +1208,20 @@
   // 2. Get the function to call (passed as receiver) from the stack, check
   //    if it is a function.
   // a0: actual number of arguments
-  Label non_function;
+  Label slow, non_function;
   __ sll(at, a0, kPointerSizeLog2);
   __ addu(at, sp, at);
   __ lw(a1, MemOperand(at));
   __ And(at, a1, Operand(kSmiTagMask));
   __ Branch(&non_function, eq, at, Operand(zero_reg));
   __ GetObjectType(a1, a2, a2);
-  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_TYPE));
+  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
 
   // 3a. Patch the first argument if necessary when calling a function.
   // a0: actual number of arguments
   // a1: function
   Label shift_arguments;
+  __ li(t0, Operand(0, RelocInfo::NONE));  // Indicate regular JS_FUNCTION.
   { Label convert_to_object, use_global_receiver, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -1210,13 +1229,13 @@
     // Do not transform the receiver for strict mode functions.
     __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
     __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
-    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+    __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
                                  kSmiTagSize)));
-    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+    __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
 
     // Do not transform the receiver for native (Compilerhints already in a3).
-    __ And(t0, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-    __ Branch(&shift_arguments, ne, t0, Operand(zero_reg));
+    __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
 
     // Compute the receiver in non-strict mode.
     // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
@@ -1238,21 +1257,25 @@
     __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
-    __ push(a0);
+    // Enter an internal frame in order to preserve argument count.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
+      __ push(a0);
 
-    __ push(a2);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ mov(a2, v0);
+      __ push(a2);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mov(a2, v0);
 
-    __ pop(a0);
-    __ sra(a0, a0, kSmiTagSize);  // Un-tag.
-    __ LeaveInternalFrame();
-    // Restore the function to a1.
+      __ pop(a0);
+      __ sra(a0, a0, kSmiTagSize);  // Un-tag.
+      // Leave internal frame.
+    }
+    // Restore the function to a1, and the flag to t0.
     __ sll(at, a0, kPointerSizeLog2);
     __ addu(at, sp, at);
     __ lw(a1, MemOperand(at));
+    __ li(t0, Operand(0, RelocInfo::NONE));
     __ Branch(&patch_receiver);
 
     // Use the global receiver object from the called function as the
@@ -1273,25 +1296,31 @@
     __ Branch(&shift_arguments);
   }
 
-  // 3b. Patch the first argument when calling a non-function.  The
+  // 3b. Check for function proxy.
+  __ bind(&slow);
+  __ li(t0, Operand(1, RelocInfo::NONE));  // Indicate function proxy.
+  __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+  __ bind(&non_function);
+  __ li(t0, Operand(2, RelocInfo::NONE));  // Indicate non-function.
+
+  // 3c. Patch the first argument when calling a non-function.  The
   //     CALL_NON_FUNCTION builtin expects the non-function callee as
   //     receiver, so overwrite the first argument which will ultimately
   //     become the receiver.
   // a0: actual number of arguments
   // a1: function
-  __ bind(&non_function);
-  // Restore the function in case it has been modified.
+  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
   __ sll(at, a0, kPointerSizeLog2);
   __ addu(a2, sp, at);
   __ sw(a1, MemOperand(a2, -kPointerSize));
-  // Clear a1 to indicate a non-function being called.
-  __ mov(a1, zero_reg);
 
   // 4. Shift arguments and return address one slot down on the stack
   //    (overwriting the original receiver).  Adjust argument count to make
   //    the original first argument the new receiver.
   // a0: actual number of arguments
   // a1: function
+  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
   __ bind(&shift_arguments);
   { Label loop;
     // Calculate the copy start address (destination). Copy end address is sp.
@@ -1309,14 +1338,26 @@
     __ Pop();
   }
 
-  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin.
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+  //     or a function proxy via CALL_FUNCTION_PROXY.
   // a0: actual number of arguments
   // a1: function
-  { Label function;
-    __ Branch(&function, ne, a1, Operand(zero_reg));
-    __ mov(a2, zero_reg);  // expected arguments is 0 for CALL_NON_FUNCTION
-    __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
+  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
+  { Label function, non_proxy;
+    __ Branch(&function, eq, t0, Operand(zero_reg));
+    // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+    __ mov(a2, zero_reg);
     __ SetCallKind(t1, CALL_AS_METHOD);
+    __ Branch(&non_proxy, ne, t0, Operand(1));
+
+    __ push(a1);  // Re-add proxy object as additional argument.
+    __ Addu(a0, a0, Operand(1));
+    __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+
+    __ bind(&non_proxy);
+    __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
     __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
             RelocInfo::CODE_TARGET);
     __ bind(&function);
@@ -1350,134 +1391,160 @@
   const int kRecvOffset     =  3 * kPointerSize;
   const int kFunctionOffset =  4 * kPointerSize;
 
-  __ EnterInternalFrame();
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
+    __ push(a0);
+    __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
+    __ push(a0);
+    // Returns (in v0) number of arguments to copy to stack as Smi.
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
-  __ push(a0);
-  __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
-  __ push(a0);
-  // Returns (in v0) number of arguments to copy to stack as Smi.
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+    // Make a2 the space we have left. The stack might already be overflowed
+    // here which will cause a2 to become negative.
+    __ subu(a2, sp, a2);
+    // Check if the arguments will overflow the stack.
+    __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
+    __ Branch(&okay, gt, a2, Operand(t3));  // Signed comparison.
 
-  // Check the stack for overflow. We are not trying need to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
-  // Make a2 the space we have left. The stack might already be overflowed
-  // here which will cause a2 to become negative.
-  __ subu(a2, sp, a2);
-  // Check if the arguments will overflow the stack.
-  __ sll(t0, v0, kPointerSizeLog2 - kSmiTagSize);
-  __ Branch(&okay, gt, a2, Operand(t0));  // Signed comparison.
+    // Out of stack space.
+    __ lw(a1, MemOperand(fp, kFunctionOffset));
+    __ push(a1);
+    __ push(v0);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    // End of stack check.
 
-  // Out of stack space.
-  __ lw(a1, MemOperand(fp, kFunctionOffset));
-  __ push(a1);
-  __ push(v0);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  // End of stack check.
+    // Push current limit and index.
+    __ bind(&okay);
+    __ push(v0);  // Limit.
+    __ mov(a1, zero_reg);  // Initial index.
+    __ push(a1);
 
-  // Push current limit and index.
-  __ bind(&okay);
-  __ push(v0);  // Limit.
-  __ mov(a1, zero_reg);  // Initial index.
-  __ push(a1);
+    // Get the receiver.
+    __ lw(a0, MemOperand(fp, kRecvOffset));
 
-  // Change context eagerly to get the right global object if necessary.
-  __ lw(a0, MemOperand(fp, kFunctionOffset));
-  __ lw(cp, FieldMemOperand(a0, JSFunction::kContextOffset));
-  // Load the shared function info while the function is still in a0.
-  __ lw(a1, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ lw(a1, MemOperand(fp, kFunctionOffset));
+    __ GetObjectType(a1, a2, a2);
+    __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
 
-  // Compute the receiver.
-  Label call_to_object, use_global_receiver, push_receiver;
-  __ lw(a0, MemOperand(fp, kRecvOffset));
+    // Change context eagerly to get the right global object if necessary.
+    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    // Load the shared function info while the function is still in a1.
+    __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
 
-  // Do not transform the receiver for strict mode functions.
-  __ lw(a2, FieldMemOperand(a1, SharedFunctionInfo::kCompilerHintsOffset));
-  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
-                               kSmiTagSize)));
-  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
+    __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+                                 kSmiTagSize)));
+    __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
 
-  // Do not transform the receiver for native (Compilerhints already in a2).
-  __ And(t0, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
-  __ Branch(&push_receiver, ne, t0, Operand(zero_reg));
+    // Do not transform the receiver for native (Compilerhints already in a2).
+    __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
 
-  // Compute the receiver in non-strict mode.
-  __ And(t0, a0, Operand(kSmiTagMask));
-  __ Branch(&call_to_object, eq, t0, Operand(zero_reg));
-  __ LoadRoot(a1, Heap::kNullValueRootIndex);
-  __ Branch(&use_global_receiver, eq, a0, Operand(a1));
-  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-  __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+    // Compute the receiver in non-strict mode.
+    __ And(t3, a0, Operand(kSmiTagMask));
+    __ Branch(&call_to_object, eq, t3, Operand(zero_reg));
+    __ LoadRoot(a1, Heap::kNullValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ Branch(&use_global_receiver, eq, a0, Operand(a2));
 
-  // Check if the receiver is already a JavaScript object.
-  // a0: receiver
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ GetObjectType(a0, a1, a1);
-  __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+    // Check if the receiver is already a JavaScript object.
+    // a0: receiver
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ GetObjectType(a0, a1, a1);
+    __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  // Convert the receiver to a regular object.
-  // a0: receiver
-  __ bind(&call_to_object);
-  __ push(a0);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
-  __ Branch(&push_receiver);
+    // Convert the receiver to a regular object.
+    // a0: receiver
+    __ bind(&call_to_object);
+    __ push(a0);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
+    __ Branch(&push_receiver);
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
-  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
-  __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
-  __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
+    __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
 
-  // Push the receiver.
-  // a0: receiver
-  __ bind(&push_receiver);
-  __ push(a0);
+    // Push the receiver.
+    // a0: receiver
+    __ bind(&push_receiver);
+    __ push(a0);
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ lw(a0, MemOperand(fp, kIndexOffset));
-  __ Branch(&entry);
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ lw(a0, MemOperand(fp, kIndexOffset));
+    __ Branch(&entry);
 
-  // Load the current argument from the arguments array and push it to the
-  // stack.
-  // a0: current argument index
-  __ bind(&loop);
-  __ lw(a1, MemOperand(fp, kArgsOffset));
-  __ push(a1);
-  __ push(a0);
+    // Load the current argument from the arguments array and push it to the
+    // stack.
+    // a0: current argument index
+    __ bind(&loop);
+    __ lw(a1, MemOperand(fp, kArgsOffset));
+    __ push(a1);
+    __ push(a0);
 
-  // Call the runtime to access the property in the arguments array.
-  __ CallRuntime(Runtime::kGetProperty, 2);
-  __ push(v0);
+    // Call the runtime to access the property in the arguments array.
+    __ CallRuntime(Runtime::kGetProperty, 2);
+    __ push(v0);
 
-  // Use inline caching to access the arguments.
-  __ lw(a0, MemOperand(fp, kIndexOffset));
-  __ Addu(a0, a0, Operand(1 << kSmiTagSize));
-  __ sw(a0, MemOperand(fp, kIndexOffset));
+    // Use inline caching to access the arguments.
+    __ lw(a0, MemOperand(fp, kIndexOffset));
+    __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+    __ sw(a0, MemOperand(fp, kIndexOffset));
 
-  // Test if the copy loop has finished copying all the elements from the
-  // arguments object.
-  __ bind(&entry);
-  __ lw(a1, MemOperand(fp, kLimitOffset));
-  __ Branch(&loop, ne, a0, Operand(a1));
-  // Invoke the function.
-  ParameterCount actual(a0);
-  __ sra(a0, a0, kSmiTagSize);
-  __ lw(a1, MemOperand(fp, kFunctionOffset));
-  __ InvokeFunction(a1, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    // Test if the copy loop has finished copying all the elements from the
+    // arguments object.
+    __ bind(&entry);
+    __ lw(a1, MemOperand(fp, kLimitOffset));
+    __ Branch(&loop, ne, a0, Operand(a1));
 
-  // Tear down the internal frame and remove function, receiver and args.
-  __ LeaveInternalFrame();
-  __ Addu(sp, sp, Operand(3 * kPointerSize));
-  __ Ret();
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(a0);
+    __ sra(a0, a0, kSmiTagSize);
+    __ lw(a1, MemOperand(fp, kFunctionOffset));
+    __ GetObjectType(a1, a2, a2);
+    __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+    __ InvokeFunction(a1, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
+
+    frame_scope.GenerateLeaveFrame();
+    __ Ret(USE_DELAY_SLOT);
+    __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
+
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(a1);  // Add function proxy as last argument.
+    __ Addu(a0, a0, Operand(1));
+    __ li(a2, Operand(0, RelocInfo::NONE));
+    __ SetCallKind(t1, CALL_AS_METHOD);
+    __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+
+    // Tear down the internal frame and remove function, receiver and args.
+  }
+
+  __ Ret(USE_DELAY_SLOT);
+  __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
 }
 
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 521b8e5..b5ecc68 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -615,7 +615,7 @@
 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
                                                   Register object,
                                                   Destination destination,
-                                                  FPURegister double_dst,
+                                                  DoubleRegister double_dst,
                                                   Register dst1,
                                                   Register dst2,
                                                   Register heap_number_map,
@@ -651,25 +651,16 @@
     // Load the double value.
     __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
 
-    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
-    // On MIPS a lot of things cannot be implemented the same way so right
-    // now it makes a lot more sense to just do things manually.
-
-    // Save FCSR.
-    __ cfc1(scratch1, FCSR);
-    // Disable FPU exceptions.
-    __ ctc1(zero_reg, FCSR);
-    __ trunc_w_d(single_scratch, double_dst);
-    // Retrieve FCSR.
-    __ cfc1(scratch2, FCSR);
-    // Restore FCSR.
-    __ ctc1(scratch1, FCSR);
-
-    // Check for inexact conversion or exception.
-    __ And(scratch2, scratch2, kFCSRFlagMask);
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       single_scratch,
+                       double_dst,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
 
     // Jump to not_int32 if the operation did not succeed.
-    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+    __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
 
     if (destination == kCoreRegisters) {
       __ Move(dst1, dst2, double_dst);
@@ -706,7 +697,7 @@
                                             Register scratch1,
                                             Register scratch2,
                                             Register scratch3,
-                                            FPURegister double_scratch,
+                                            DoubleRegister double_scratch,
                                             Label* not_int32) {
   ASSERT(!dst.is(object));
   ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -735,27 +726,19 @@
     // Load the double value.
     __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
 
-    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
-    // On MIPS a lot of things cannot be implemented the same way so right
-    // now it makes a lot more sense to just do things manually.
-
-    // Save FCSR.
-    __ cfc1(scratch1, FCSR);
-    // Disable FPU exceptions.
-    __ ctc1(zero_reg, FCSR);
-    __ trunc_w_d(double_scratch, double_scratch);
-    // Retrieve FCSR.
-    __ cfc1(scratch2, FCSR);
-    // Restore FCSR.
-    __ ctc1(scratch1, FCSR);
-
-    // Check for inexact conversion or exception.
-    __ And(scratch2, scratch2, kFCSRFlagMask);
+    FPURegister single_scratch = double_scratch.low();
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       single_scratch,
+                       double_scratch,
+                       scratch1,
+                       except_flag,
+                       kCheckForInexactConversion);
 
     // Jump to not_int32 if the operation did not succeed.
-    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
+    __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
     // Get the result in the destination register.
-    __ mfc1(dst, double_scratch);
+    __ mfc1(dst, single_scratch);
 
   } else {
     // Load the double value in the destination registers.
@@ -881,9 +864,11 @@
     __ Move(f12, a0, a1);
     __ Move(f14, a2, a3);
   }
-  // Call C routine that may not cause GC or other trouble.
-  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
-                   4);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm);
+    __ CallCFunction(
+        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
+  }
   // Store answer in the overwritable heap number.
   if (!IsMipsSoftFloatABI) {
     CpuFeatures::Scope scope(FPU);
@@ -1258,7 +1243,7 @@
 
   if (!CpuFeatures::IsSupported(FPU)) {
     __ push(ra);
-    __ PrepareCallCFunction(4, t4);  // Two doubles count as 4 arguments.
+    __ PrepareCallCFunction(0, 2, t4);
     if (!IsMipsSoftFloatABI) {
       // We are not using MIPS FPU instructions, and parameters for the runtime
       // function call are prepaired in a0-a3 registers, but function we are
@@ -1268,19 +1253,15 @@
       __ Move(f12, a0, a1);
       __ Move(f14, a2, a3);
     }
-    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
+    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
+       0, 2);
     __ pop(ra);  // Because this function returns int, result is in v0.
     __ Ret();
   } else {
     CpuFeatures::Scope scope(FPU);
     Label equal, less_than;
-    __ c(EQ, D, f12, f14);
-    __ bc1t(&equal);
-    __ nop();
-
-    __ c(OLT, D, f12, f14);
-    __ bc1t(&less_than);
-    __ nop();
+    __ BranchF(&equal, NULL, eq, f12, f14);
+    __ BranchF(&less_than, NULL, lt, f12, f14);
 
     // Not equal, not less, not NaN, must be greater.
     __ li(v0, Operand(GREATER));
@@ -1473,9 +1454,7 @@
       __ JumpIfSmi(probe, not_found);
       __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
       __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
-      __ c(EQ, D, f12, f14);
-      __ bc1t(&load_result_from_cache);
-      __ nop();   // bc1t() requires explicit fill of branch delay slot.
+      __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
       __ Branch(not_found);
     } else {
       // Note that there is no cache check for non-FPU case, even though
@@ -1591,9 +1570,7 @@
     __ li(t2, Operand(EQUAL));
 
     // Check if either rhs or lhs is NaN.
-    __ c(UN, D, f12, f14);
-    __ bc1t(&nan);
-    __ nop();
+    __ BranchF(NULL, &nan, eq, f12, f14);
 
     // Check if LESS condition is satisfied. If true, move conditionally
     // result to v0.
@@ -1711,89 +1688,116 @@
 }
 
 
-// The stub returns zero for false, and a non-zero value for true.
+// The stub expects its argument in the tos_ register and returns its result in
+// it, too: zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
   // This stub uses FPU instructions.
   CpuFeatures::Scope scope(FPU);
 
-  Label false_result;
-  Label not_heap_number;
-  Register scratch0 = t5.is(tos_) ? t3 : t5;
+  Label patch;
+  const Register map = t5.is(tos_) ? t3 : t5;
 
-  // undefined -> false
-  __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
-  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+  // undefined -> false.
+  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
 
-  // Boolean -> its value
-  __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
-  __ Branch(&false_result, eq, tos_, Operand(scratch0));
-  __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
-  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
-  // return true if the equal condition is satisfied.
-  __ Ret(eq, tos_, Operand(scratch0));
+  // Boolean -> its value.
+  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
+  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
 
-  // Smis: 0 -> false, all other -> true
-  __ And(scratch0, tos_, tos_);
-  __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
-  __ And(scratch0, tos_, Operand(kSmiTagMask));
-  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
-  // return true if the not equal condition is satisfied.
-  __ Ret(eq, scratch0, Operand(zero_reg));
+  // 'null' -> false.
+  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
 
-  // 'null' -> false
-  __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
-  __ Branch(&false_result, eq, tos_, Operand(scratch0));
+  if (types_.Contains(SMI)) {
+    // Smis: 0 -> false, all other -> true
+    __ And(at, tos_, kSmiTagMask);
+    // tos_ contains the correct return value already
+    __ Ret(eq, at, Operand(zero_reg));
+  } else if (types_.NeedsMap()) {
+    // If we need a map later and have a Smi -> patch.
+    __ JumpIfSmi(tos_, &patch);
+  }
 
-  // HeapNumber => false if +0, -0, or NaN.
-  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&not_heap_number, ne, scratch0, Operand(at));
+  if (types_.NeedsMap()) {
+    __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
 
-  __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
-  __ fcmp(f12, 0.0, UEQ);
+    if (types_.CanBeUndetectable()) {
+      __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+      __ And(at, at, Operand(1 << Map::kIsUndetectable));
+      // Undetectable -> false.
+      __   movn(tos_, zero_reg, at);
+      __ Ret(ne, at, Operand(zero_reg));
+    }
+  }
 
-  // "tos_" is a register, and contains a non zero value by default.
-  // Hence we only need to overwrite "tos_" with zero to return false for
-  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
-  __ movt(tos_, zero_reg);
-  __ Ret();
+  if (types_.Contains(SPEC_OBJECT)) {
+    // Spec object -> true.
+    __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+    // tos_ contains the correct non-zero return value already.
+    __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+  }
 
-  __ bind(&not_heap_number);
+  if (types_.Contains(STRING)) {
+    // String value -> false iff empty.
+    __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+    Label skip;
+    __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
+    __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+    __ Ret();  // the string length is OK as the return value
+    __ bind(&skip);
+  }
 
-  // It can be an undetectable object.
-  // Undetectable => false.
-  __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
-  __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
-  __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
+  if (types_.Contains(HEAP_NUMBER)) {
+    // Heap number -> false iff +0, -0, or NaN.
+    Label not_heap_number;
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    __ Branch(&not_heap_number, ne, map, Operand(at));
+    Label zero_or_nan, number;
+    __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+    __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
+    // "tos_" is a register, and contains a non zero value by default.
+    // Hence we only need to overwrite "tos_" with zero to return false for
+    // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+    __ bind(&zero_or_nan);
+    __ mov(tos_, zero_reg);
+    __ bind(&number);
+    __ Ret();
+    __ bind(&not_heap_number);
+  }
 
-  // JavaScript object => true.
-  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
+  __ bind(&patch);
+  GenerateTypeTransition(masm);
+}
 
-  // "tos_" is a register and contains a non-zero value.
-  // Hence we implicitly return true if the greater than
-  // condition is satisfied.
-  __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
 
-  // Check for string.
-  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
-  // "tos_" is a register and contains a non-zero value.
-  // Hence we implicitly return true if the greater than
-  // condition is satisfied.
-  __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
+void ToBooleanStub::CheckOddball(MacroAssembler* masm,
+                                 Type type,
+                                 Heap::RootListIndex value,
+                                 bool result) {
+  if (types_.Contains(type)) {
+    // If we see an expected oddball, return its ToBoolean value tos_.
+    __ LoadRoot(at, value);
+    __ Subu(at, at, tos_);  // This is a check for equality for the movz below.
+    // The value of a root is never NULL, so we can avoid loading a non-null
+    // value into tos_ when we want to return 'true'.
+    if (!result) {
+      __ movz(tos_, zero_reg, at);
+    }
+    __ Ret(eq, at, Operand(zero_reg));
+  }
+}
 
-  // String value => false iff empty, i.e., length is zero.
-  __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
-  // If length is zero, "tos_" contains zero ==> false.
-  // If length is not zero, "tos_" contains a non-zero value ==> true.
-  __ Ret();
 
-  // Return 0 in "tos_" for false.
-  __ bind(&false_result);
-  __ mov(tos_, zero_reg);
-  __ Ret();
+void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
+  __ Move(a3, tos_);
+  __ li(a2, Operand(Smi::FromInt(tos_.code())));
+  __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
+  __ Push(a3, a2, a1);
+  // Patch the caller to an appropriate specialized stub and return the
+  // operation result to the caller of the stub.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
+      3,
+      1);
 }
 
 
@@ -1951,12 +1955,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(a0);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(a1, v0);
-    __ pop(a0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(a0);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(a1, v0);
+      __ pop(a0);
+    }
 
     __ bind(&heapnumber_allocated);
     __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
@@ -1998,13 +2003,14 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(v0);  // Push the heap number, not the untagged int32.
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ mov(a2, v0);  // Move the new heap number into a2.
-    // Get the heap number into v0, now that the new heap number is in a2.
-    __ pop(v0);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(v0);  // Push the heap number, not the untagged int32.
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ mov(a2, v0);  // Move the new heap number into a2.
+      // Get the heap number into v0, now that the new heap number is in a2.
+      __ pop(v0);
+    }
 
     // Convert the heap number in v0 to an untagged integer in a1.
     // This can't go slow-case because it's the same number we already
@@ -2717,26 +2723,16 @@
           // Otherwise return a heap number if allowed, or jump to type
           // transition.
 
-          // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
-          // On MIPS a lot of things cannot be implemented the same way so right
-          // now it makes a lot more sense to just do things manually.
-
-          // Save FCSR.
-          __ cfc1(scratch1, FCSR);
-          // Disable FPU exceptions.
-          __ ctc1(zero_reg, FCSR);
-          __ trunc_w_d(single_scratch, f10);
-          // Retrieve FCSR.
-          __ cfc1(scratch2, FCSR);
-          // Restore FCSR.
-          __ ctc1(scratch1, FCSR);
-
-          // Check for inexact conversion or exception.
-          __ And(scratch2, scratch2, kFCSRFlagMask);
+          Register except_flag = scratch2;
+          __ EmitFPUTruncate(kRoundToZero,
+                             single_scratch,
+                             f10,
+                             scratch1,
+                             except_flag);
 
           if (result_type_ <= BinaryOpIC::INT32) {
-            // If scratch2 != 0, result does not fit in a 32-bit integer.
-            __ Branch(&transition, ne, scratch2, Operand(zero_reg));
+            // If except_flag != 0, result does not fit in a 32-bit integer.
+            __ Branch(&transition, ne, except_flag, Operand(zero_reg));
           }
 
           // Check if the result fits in a smi.
@@ -3225,7 +3221,6 @@
     __ lw(t0, MemOperand(cache_entry, 0));
     __ lw(t1, MemOperand(cache_entry, 4));
     __ lw(t2, MemOperand(cache_entry, 8));
-    __ Addu(cache_entry, cache_entry, 12);
     __ Branch(&calculate, ne, a2, Operand(t0));
     __ Branch(&calculate, ne, a3, Operand(t1));
     // Cache hit. Load result, cleanup and return.
@@ -3259,13 +3254,13 @@
     // Register a0 holds precalculated cache entry address; preserve
     // it on the stack and pop it into register cache_entry after the
     // call.
-    __ push(cache_entry);
+    __ Push(cache_entry, a2, a3);
     GenerateCallCFunction(masm, scratch0);
     __ GetCFunctionDoubleResult(f4);
 
     // Try to update the cache. If we cannot allocate a
     // heap number, we return the result without updating.
-    __ pop(cache_entry);
+    __ Pop(cache_entry, a2, a3);
     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
     __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
@@ -3283,10 +3278,11 @@
     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
     __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
     __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
-    __ EnterInternalFrame();
-    __ push(a0);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(a0);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
     __ Ret();
 
@@ -3299,14 +3295,15 @@
 
     // We return the value in f4 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Allocate an aligned object larger than a HeapNumber.
-    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
-    __ li(scratch0, Operand(4 * kPointerSize));
-    __ push(scratch0);
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+      // Allocate an aligned object larger than a HeapNumber.
+      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
+      __ li(scratch0, Operand(4 * kPointerSize));
+      __ push(scratch0);
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 }
@@ -3317,22 +3314,26 @@
   __ push(ra);
   __ PrepareCallCFunction(2, scratch);
   if (IsMipsSoftFloatABI) {
-    __ Move(v0, v1, f4);
+    __ Move(a0, a1, f4);
   } else {
     __ mov_d(f12, f4);
   }
+  AllowExternalCallThatCantCauseGC scope(masm);
   switch (type_) {
     case TranscendentalCache::SIN:
       __ CallCFunction(
-          ExternalReference::math_sin_double_function(masm->isolate()), 2);
+          ExternalReference::math_sin_double_function(masm->isolate()),
+          0, 1);
       break;
     case TranscendentalCache::COS:
       __ CallCFunction(
-          ExternalReference::math_cos_double_function(masm->isolate()), 2);
+          ExternalReference::math_cos_double_function(masm->isolate()),
+          0, 1);
       break;
     case TranscendentalCache::LOG:
       __ CallCFunction(
-          ExternalReference::math_log_double_function(masm->isolate()), 2);
+          ExternalReference::math_log_double_function(masm->isolate()),
+          0, 1);
       break;
     default:
       UNIMPLEMENTED();
@@ -3415,12 +3416,15 @@
                           heapnumbermap,
                           &call_runtime);
     __ push(ra);
-    __ PrepareCallCFunction(3, scratch);
+    __ PrepareCallCFunction(1, 1, scratch);
     __ SetCallCDoubleArguments(double_base, exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_int_function(masm->isolate()), 3);
-    __ pop(ra);
-    __ GetCFunctionDoubleResult(double_result);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ CallCFunction(
+          ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
+      __ pop(ra);
+      __ GetCFunctionDoubleResult(double_result);
+    }
     __ sdc1(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     __ mov(v0, heapnumber);
@@ -3443,15 +3447,20 @@
                           heapnumbermap,
                           &call_runtime);
     __ push(ra);
-    __ PrepareCallCFunction(4, scratch);
+    __ PrepareCallCFunction(0, 2, scratch);
     // ABI (o32) for func(double a, double b): a in f12, b in f14.
     ASSERT(double_base.is(f12));
     ASSERT(double_exponent.is(f14));
     __ SetCallCDoubleArguments(double_base, double_exponent);
-    __ CallCFunction(
-        ExternalReference::power_double_double_function(masm->isolate()), 4);
-    __ pop(ra);
-    __ GetCFunctionDoubleResult(double_result);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(masm->isolate()),
+          0,
+          2);
+      __ pop(ra);
+      __ GetCFunctionDoubleResult(double_result);
+    }
     __ sdc1(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
     __ mov(v0, heapnumber);
@@ -3468,6 +3477,24 @@
 }
 
 
+bool CEntryStub::IsPregenerated() {
+  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
+          result_size_ == 1;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+}
+
+
+void CodeStub::GenerateFPStubs() {
+  CEntryStub save_doubles(1);
+  save_doubles.SaveDoubles();
+  Handle<Code> code = save_doubles.GetCode();
+  code->GetIsolate()->set_fp_stubs_generated(true);
+}
+
+
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   __ Throw(v0);
 }
@@ -3493,9 +3520,10 @@
   if (do_gc) {
     // Move result passed in v0 into a0 to call PerformGC.
     __ mov(a0, v0);
-    __ PrepareCallCFunction(1, a1);
+    __ PrepareCallCFunction(1, 0, a1);
     __ CallCFunction(
-        ExternalReference::perform_gc_function(masm->isolate()), 1);
+        ExternalReference::perform_gc_function(masm->isolate()),
+        1, 0);
   }
 
   ExternalReference scope_depth =
@@ -3628,6 +3656,7 @@
   __ Subu(s1, s1, Operand(kPointerSize));
 
   // Enter the exit frame that transitions from JavaScript to C++.
+  FrameScope scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(save_doubles_);
 
   // Setup argc and the builtin function in callee-saved registers.
@@ -3699,8 +3728,11 @@
     CpuFeatures::Scope scope(FPU);
     // Save callee-saved FPU registers.
     __ MultiPushFPU(kCalleeSavedFPU);
+    // Set up the reserved register for 0.0.
+    __ Move(kDoubleRegZero, 0.0);
   }
 
+
   // Load argv in s0 register.
   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
   if (CpuFeatures::IsSupported(FPU)) {
@@ -3857,11 +3889,10 @@
 // * object: a0 or at sp + 1 * kPointerSize.
 // * function: a1 or at sp.
 //
-// Inlined call site patching is a crankshaft-specific feature that is not
-// implemented on MIPS.
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed on the stack,
+// in the safepoint slot for register t0.
 void InstanceofStub::Generate(MacroAssembler* masm) {
-  // This is a crankshaft-specific feature that has not been implemented yet.
-  ASSERT(!HasCallSiteInlineCheck());
   // Call site inlining and patching implies arguments in registers.
   ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
   // ReturnTrueFalse is only implemented for inlined call sites.
@@ -3875,6 +3906,8 @@
   const Register inline_site = t5;
   const Register scratch = a2;
 
+  const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
+
   Label slow, loop, is_instance, is_not_instance, not_js_object;
 
   if (!HasArgsInRegisters()) {
@@ -3890,10 +3923,10 @@
   // real lookup and update the call site cache.
   if (!HasCallSiteInlineCheck()) {
     Label miss;
-    __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
-    __ Branch(&miss, ne, function, Operand(t1));
-    __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
-    __ Branch(&miss, ne, map, Operand(t1));
+    __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
+    __ Branch(&miss, ne, function, Operand(at));
+    __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
+    __ Branch(&miss, ne, map, Operand(at));
     __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
     __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
@@ -3913,7 +3946,15 @@
     __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
   } else {
-    UNIMPLEMENTED_MIPS();
+    ASSERT(HasArgsInRegisters());
+    // Patch the (relocated) inlined map check.
+
+    // The offset was stored in t0 safepoint slot.
+    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
+    __ LoadFromSafepointRegisterSlot(scratch, t0);
+    __ Subu(inline_site, ra, scratch);
+    // Patch the relocated value to map.
+    __ PatchRelocatedValue(inline_site, scratch, map);
   }
 
   // Register mapping: a3 is object map and t0 is function prototype.
@@ -3939,7 +3980,16 @@
     __ mov(v0, zero_reg);
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
-    UNIMPLEMENTED_MIPS();
+    // Patch the call site to return true.
+    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // Get the boolean result location in scratch and patch it.
+    __ PatchRelocatedValue(inline_site, scratch, v0);
+
+    if (!ReturnTrueFalseObject()) {
+      ASSERT_EQ(Smi::FromInt(0), 0);
+      __ mov(v0, zero_reg);
+    }
   }
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
@@ -3948,8 +3998,17 @@
     __ li(v0, Operand(Smi::FromInt(1)));
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
-    UNIMPLEMENTED_MIPS();
+    // Patch the call site to return false.
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // Get the boolean result location in scratch and patch it.
+    __ PatchRelocatedValue(inline_site, scratch, v0);
+
+    if (!ReturnTrueFalseObject()) {
+      __ li(v0, Operand(Smi::FromInt(1)));
+    }
   }
+
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
   Label object_not_null, object_not_null_or_smi;
@@ -3986,10 +4045,11 @@
     }
   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
   } else {
-    __ EnterInternalFrame();
-    __ Push(a0, a1);
-    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(a0, a1);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
     __ mov(a0, v0);
     __ LoadRoot(v0, Heap::kTrueValueRootIndex);
     __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
@@ -4661,8 +4721,7 @@
 
   // For arguments 4 and 3 get string length, calculate start of string data
   // and calculate the shift of the index (0 for ASCII and 1 for two byte).
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
-  __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
   // Load the length from the original subject string from the previous stack
   // frame. Therefore we have to use fp, which points exactly to two pointer
@@ -4896,7 +4955,7 @@
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  Label slow;
+  Label slow, non_function;
 
   // The receiver might implicitly be the global object. This is
   // indicated by passing the hole as the receiver to the call
@@ -4922,7 +4981,7 @@
 
   // Check that the function is really a JavaScript function.
   // a1: pushed function (to be verified)
-  __ JumpIfSmi(a1, &slow);
+  __ JumpIfSmi(a1, &non_function);
   // Get the map of the function object.
   __ GetObjectType(a1, a2, a2);
   __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
@@ -4950,8 +5009,22 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  // Check for function proxy.
+  __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ push(a1);  // Put proxy as additional argument.
+  __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
+  __ li(a2, Operand(0, RelocInfo::NONE));
+  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
+  __ SetCallKind(t1, CALL_AS_FUNCTION);
+  {
+    Handle<Code> adaptor =
+      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    __ Jump(adaptor, RelocInfo::CODE_TARGET);
+  }
+
   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
   // of the original receiver from the call site).
+  __ bind(&non_function);
   __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
   __ li(a0, Operand(argc_));  // Setup the number of arguments.
   __ mov(a2, zero_reg);
@@ -6463,39 +6536,25 @@
     __ Subu(a2, a0, Operand(kHeapObjectTag));
     __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
 
-    Label fpu_eq, fpu_lt, fpu_gt;
-    // Compare operands (test if unordered).
-    __ c(UN, D, f0, f2);
-    // Don't base result on status bits when a NaN is involved.
-    __ bc1t(&unordered);
-    __ nop();
+    // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
+    Label fpu_eq, fpu_lt;
+    // Test if equal, and also handle the unordered/NaN case.
+    __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
 
-    // Test if equal.
-    __ c(EQ, D, f0, f2);
-    __ bc1t(&fpu_eq);
-    __ nop();
+    // Test if less (unordered case is already handled).
+    __ BranchF(&fpu_lt, NULL, lt, f0, f2);
 
-    // Test if unordered or less (unordered case is already handled).
-    __ c(ULT, D, f0, f2);
-    __ bc1t(&fpu_lt);
-    __ nop();
+    // Otherwise it's greater, so just fall thru, and return.
+    __ Ret(USE_DELAY_SLOT);
+    __ li(v0, Operand(GREATER));  // In delay slot.
 
-    // Otherwise it's greater.
-    __ bc1f(&fpu_gt);
-    __ nop();
-
-    // Return a result of -1, 0, or 1.
     __ bind(&fpu_eq);
-    __ li(v0, Operand(EQUAL));
-    __ Ret();
+    __ Ret(USE_DELAY_SLOT);
+    __ li(v0, Operand(EQUAL));  // In delay slot.
 
     __ bind(&fpu_lt);
-    __ li(v0, Operand(LESS));
-    __ Ret();
-
-    __ bind(&fpu_gt);
-    __ li(v0, Operand(GREATER));
-    __ Ret();
+    __ Ret(USE_DELAY_SLOT);
+    __ li(v0, Operand(LESS));  // In delay slot.
 
     __ bind(&unordered);
   }
@@ -6646,12 +6705,13 @@
   // Call the runtime system in a fresh internal frame.
   ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
                                              masm->isolate());
-  __ EnterInternalFrame();
-  __ Push(a1, a0);
-  __ li(t0, Operand(Smi::FromInt(op_)));
-  __ push(t0);
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a0);
+    __ li(t0, Operand(Smi::FromInt(op_)));
+    __ push(t0);
+    __ CallExternalReference(miss, 3);
+  }
   // Compute the entry point of the rewritten stub.
   __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
   // Restore registers.
@@ -6867,6 +6927,8 @@
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Registers:
   //  result: StringDictionary to probe
   //  a1: key
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index aa224bc..c04421a 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -578,6 +578,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -590,7 +592,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return LookupModeBits::encode(mode_);
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 4400b64..ff146dd 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -38,12 +38,16 @@
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
 
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index e323c50..5b3ae89 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -124,55 +124,58 @@
 static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
                                           RegList object_regs,
                                           RegList non_object_regs) {
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as a smi causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  if ((object_regs | non_object_regs) != 0) {
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        if (FLAG_debug_code) {
-          __ And(at, reg, 0xc0000000);
-          __ Assert(eq, "Unable to encode value as smi", at, Operand(zero_reg));
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    if ((object_regs | non_object_regs) != 0) {
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          if (FLAG_debug_code) {
+            __ And(at, reg, 0xc0000000);
+            __ Assert(
+                eq, "Unable to encode value as smi", at, Operand(zero_reg));
+          }
+          __ sll(reg, reg, kSmiTagSize);
         }
-        __ sll(reg, reg, kSmiTagSize);
       }
+      __ MultiPush(object_regs | non_object_regs);
     }
-    __ MultiPush(object_regs | non_object_regs);
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ mov(a0, zero_reg);  // No arguments.
-  __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
+    __ mov(a0, zero_reg);  // No arguments.
+    __ li(a1, Operand(ExternalReference::debug_break(masm->isolate())));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values from the expression stack.
-  if ((object_regs | non_object_regs) != 0) {
-    __ MultiPop(object_regs | non_object_regs);
-    for (int i = 0; i < kNumJSCallerSaved; i++) {
-      int r = JSCallerSavedCode(i);
-      Register reg = { r };
-      if ((non_object_regs & (1 << r)) != 0) {
-        __ srl(reg, reg, kSmiTagSize);
-      }
-      if (FLAG_debug_code &&
-          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
-        __ li(reg, kDebugZapValue);
+    // Restore the register values from the expression stack.
+    if ((object_regs | non_object_regs) != 0) {
+      __ MultiPop(object_regs | non_object_regs);
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = { r };
+        if ((non_object_regs & (1 << r)) != 0) {
+          __ srl(reg, reg, kSmiTagSize);
+        }
+        if (FLAG_debug_code &&
+            (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+          __ li(reg, kDebugZapValue);
+        }
       }
     }
-  }
 
-  __ LeaveInternalFrame();
+    // Leave the internal frame.
+  }
 
   // Now that the break point has been handled, resume normal execution by
   // jumping to the target address intended by the caller and that was
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index b042a3e..57da2e7 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -62,9 +62,11 @@
 // A patch site is a location in the code which it is possible to patch. This
 // class has a number of methods to emit the code which is patchable and the
 // method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a andi at, rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16
-// bit immediate value is used) is the delta from the pc to the first
+// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
+// (raw 16 bit immediate value is used) is the delta from the pc to the first
 // instruction of the patchable code.
+// The marker instruction is effectively a NOP (dest is zero_reg) and will
+// never be emitted by normal code.
 class JumpPatchSite BASE_EMBEDDED {
  public:
   explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -103,7 +105,7 @@
     if (patch_site_.is_bound()) {
       int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
       Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
-      __ andi(at, reg, delta_to_patch_site % kImm16Mask);
+      __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
 #ifdef DEBUG
       info_emitted_ = true;
 #endif
@@ -162,6 +164,11 @@
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   int locals_count = info->scope()->num_stack_slots();
 
   __ Push(ra, fp, cp, a1);
@@ -310,17 +317,25 @@
 
 
 void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
+  // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
+  // to make sure it is constant. Branch may emit a skip-or-jump sequence
+  // instead of the normal Branch. It seems that the "skip" part of that
+  // sequence is about as long as this Branch would be so it is safe to ignore
+  // that.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   Comment cmnt(masm_, "[ Stack check");
   Label ok;
   __ LoadRoot(t0, Heap::kStackLimitRootIndex);
-  __ Branch(&ok, hs, sp, Operand(t0));
+  __ sltu(at, sp, t0);
+  __ beq(at, zero_reg, &ok);
+  // CallStub will emit a li t9, ... first, so it is safe to use the delay slot.
   StackCheckStub stub;
+  __ CallStub(&stub);
   // Record a mapping of this PC offset to the OSR id.  This is used to find
   // the AST id from the unoptimized code in order to use it as a key into
   // the deoptimization input data found in the optimized code.
   RecordStackCheck(stmt->OsrEntryId());
 
-  __ CallStub(&stub);
   __ bind(&ok);
   PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   // Record a mapping of the OSR id to this PC.  This is used if the OSR
@@ -3921,10 +3936,14 @@
 }
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
     VisitForTypeofValue(expr);
   }
@@ -3986,18 +4005,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -4005,9 +4013,12 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4015,13 +4026,6 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
@@ -4046,11 +4050,8 @@
     default: {
       VisitForAccumulatorValue(expr->right());
       Condition cc = eq;
-      bool strict = false;
       switch (op) {
         case Token::EQ_STRICT:
-          strict = true;
-          // Fall through.
         case Token::EQ:
           cc = eq;
           __ mov(a0, result_register());
@@ -4109,8 +4110,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4118,15 +4120,21 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
+  VisitForAccumulatorValue(sub_expr);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+  Heap::RootListIndex nil_value = nil == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
   __ mov(a0, result_register());
-  __ LoadRoot(a1, Heap::kNullValueRootIndex);
-  if (expr->is_strict()) {
+  __ LoadRoot(a1, nil_value);
+  if (expr->op() == Token::EQ_STRICT) {
     Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
   } else {
+    Heap::RootListIndex other_nil_value = nil == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     __ Branch(if_true, eq, a0, Operand(a1));
-    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(a1, other_nil_value);
     __ Branch(if_true, eq, a0, Operand(a1));
     __ And(at, a0, Operand(kSmiTagMask));
     __ Branch(if_false, eq, at, Operand(zero_reg));
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index a76c215..a68bb1d 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -504,21 +504,22 @@
   // Get the receiver of the function from the stack.
   __ lw(a3, MemOperand(sp, argc*kPointerSize));
 
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ Push(a3, a2);
+    // Push the receiver and the name of the function.
+    __ Push(a3, a2);
 
-  // Call the entry.
-  __ li(a0, Operand(2));
-  __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
+    // Call the entry.
+    __ li(a0, Operand(2));
+    __ li(a1, Operand(ExternalReference(IC_Utility(id), isolate)));
 
-  CEntryStub stub(1);
-  __ CallStub(&stub);
+    CEntryStub stub(1);
+    __ CallStub(&stub);
 
-  // Move result to a1 and leave the internal frame.
-  __ mov(a1, v0);
-  __ LeaveInternalFrame();
+    // Move result to a1 and leave the internal frame.
+    __ mov(a1, v0);
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -649,12 +650,13 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
-  __ EnterInternalFrame();
-  __ push(a2);  // Save the key.
-  __ Push(a1, a2);  // Pass the receiver and the key.
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(a2);  // Restore the key.
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(a2);  // Save the key.
+    __ Push(a1, a2);  // Pass the receiver and the key.
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(a2);  // Restore the key.
+  }
   __ mov(a1, v0);
   __ jmp(&do_call);
 
@@ -1572,7 +1574,8 @@
   // If the instruction following the call is not a andi at, rx, #yyy, nothing
   // was inlined.
   Instr instr = Assembler::instr_at(andi_instruction_address);
-  if (!Assembler::IsAndImmediate(instr)) {
+  if (!(Assembler::IsAndImmediate(instr) &&
+        Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
     return;
   }
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 4c48ef1..d7732c6 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -42,7 +42,8 @@
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      allow_stub_calls_(true) {
+      allow_stub_calls_(true),
+      has_frame_(false) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -119,7 +120,9 @@
   // stack, so adjust the stack for unsaved registers.
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
   ASSERT(num_unsaved >= 0);
-  Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+  if (num_unsaved > 0) {
+    Subu(sp, sp, Operand(num_unsaved * kPointerSize));
+  }
   MultiPush(kSafepointSavedRegisters);
 }
 
@@ -127,7 +130,9 @@
 void MacroAssembler::PopSafepointRegisters() {
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
   MultiPop(kSafepointSavedRegisters);
-  Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+  if (num_unsaved > 0) {
+    Addu(sp, sp, Operand(num_unsaved * kPointerSize));
+  }
 }
 
 
@@ -180,6 +185,7 @@
 
 
 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+  UNIMPLEMENTED_MIPS();
   // General purpose registers are pushed last on the stack.
   int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
@@ -187,8 +193,6 @@
 }
 
 
-
-
 void MacroAssembler::InNewSpace(Register object,
                                 Register scratch,
                                 Condition cc,
@@ -707,7 +711,7 @@
   int16_t stack_offset = num_to_push * kPointerSize;
 
   Subu(sp, sp, Operand(stack_offset));
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
     if ((regs & (1 << i)) != 0) {
       stack_offset -= kPointerSize;
       sw(ToRegister(i), MemOperand(sp, stack_offset));
@@ -746,7 +750,7 @@
 void MacroAssembler::MultiPopReversed(RegList regs) {
   int16_t stack_offset = 0;
 
-  for (int16_t i = kNumRegisters; i > 0; i--) {
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
     if ((regs & (1 << i)) != 0) {
       lw(ToRegister(i), MemOperand(sp, stack_offset));
       stack_offset += kPointerSize;
@@ -814,6 +818,21 @@
 }
 
 
+void MacroAssembler::FlushICache(Register address, unsigned instructions) {
+  RegList saved_regs = kJSCallerSaved | ra.bit();
+  MultiPush(saved_regs);
+  AllowExternalCallThatCantCauseGC scope(this);
+
+  // Save to a0 in case address == t0.
+  Move(a0, address);
+  PrepareCallCFunction(2, t0);
+
+  li(a1, instructions * kInstrSize);
+  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
+  MultiPop(saved_regs);
+}
+
+
 void MacroAssembler::Ext(Register rt,
                          Register rs,
                          uint16_t pos,
@@ -940,11 +959,9 @@
   mtc1(at, FPURegister::from_code(scratch.code() + 1));
   mtc1(zero_reg, scratch);
   // Test if scratch > fd.
-  c(OLT, D, fd, scratch);
-
-  Label simple_convert;
   // If fd < 2^31 we can convert it normally.
-  bc1t(&simple_convert);
+  Label simple_convert;
+  BranchF(&simple_convert, NULL, lt, fd, scratch);
 
   // First we subtract 2^31 from fd, then trunc it to rs
   // and add 2^31 to rs.
@@ -964,6 +981,102 @@
 }
 
 
+void MacroAssembler::BranchF(Label* target,
+                             Label* nan,
+                             Condition cc,
+                             FPURegister cmp1,
+                             FPURegister cmp2,
+                             BranchDelaySlot bd) {
+  if (cc == al) {
+    Branch(bd, target);
+    return;
+  }
+
+  ASSERT(nan || target);
+  // Check for unordered (NaN) cases.
+  if (nan) {
+    c(UN, D, cmp1, cmp2);
+    bc1t(nan);
+  }
+
+  if (target) {
+    // Here NaN cases were either handled by this function or are assumed to
+    // have been handled by the caller.
+    // Unsigned conditions are treated as their signed counterpart.
+    switch (cc) {
+      case Uless:
+      case less:
+        c(OLT, D, cmp1, cmp2);
+        bc1t(target);
+        break;
+      case Ugreater:
+      case greater:
+        c(ULE, D, cmp1, cmp2);
+        bc1f(target);
+        break;
+      case Ugreater_equal:
+      case greater_equal:
+        c(ULT, D, cmp1, cmp2);
+        bc1f(target);
+        break;
+      case Uless_equal:
+      case less_equal:
+        c(OLE, D, cmp1, cmp2);
+        bc1t(target);
+        break;
+      case eq:
+        c(EQ, D, cmp1, cmp2);
+        bc1t(target);
+        break;
+      case ne:
+        c(EQ, D, cmp1, cmp2);
+        bc1f(target);
+        break;
+      default:
+        CHECK(0);
+    };
+  }
+
+  if (bd == PROTECT) {
+    nop();
+  }
+}
+
+
+void MacroAssembler::Move(FPURegister dst, double imm) {
+  ASSERT(CpuFeatures::IsEnabled(FPU));
+  static const DoubleRepresentation minus_zero(-0.0);
+  static const DoubleRepresentation zero(0.0);
+  DoubleRepresentation value(imm);
+  // Handle special values first.
+  bool force_load = dst.is(kDoubleRegZero);
+  if (value.bits == zero.bits && !force_load) {
+    mov_d(dst, kDoubleRegZero);
+  } else if (value.bits == minus_zero.bits && !force_load) {
+    neg_d(dst, kDoubleRegZero);
+  } else {
+    uint32_t lo, hi;
+    DoubleAsTwoUInt32(imm, &lo, &hi);
+    // Move the low part of the double into the lower of the corresponding FPU
+    // register of FPU register pair.
+    if (lo != 0) {
+      li(at, Operand(lo));
+      mtc1(at, dst);
+    } else {
+      mtc1(zero_reg, dst);
+    }
+    // Move the high part of the double into the higher of the corresponding FPU
+    // register of FPU register pair.
+    if (hi != 0) {
+      li(at, Operand(hi));
+      mtc1(at, dst.high());
+    } else {
+      mtc1(zero_reg, dst.high());
+    }
+  }
+}
+
+
 // Tries to get a signed int32 out of a double precision floating point heap
 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
 // 32bits signed integer range.
@@ -1062,6 +1175,53 @@
 }
 
 
+void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
+                                     FPURegister result,
+                                     DoubleRegister double_input,
+                                     Register scratch1,
+                                     Register except_flag,
+                                     CheckForInexactConversion check_inexact) {
+  ASSERT(CpuFeatures::IsSupported(FPU));
+  CpuFeatures::Scope scope(FPU);
+
+  int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
+
+  if (check_inexact == kDontCheckForInexactConversion) {
+    // Ingore inexact exceptions.
+    except_mask &= ~kFCSRInexactFlagMask;
+  }
+
+  // Save FCSR.
+  cfc1(scratch1, FCSR);
+  // Disable FPU exceptions.
+  ctc1(zero_reg, FCSR);
+
+  // Do operation based on rounding mode.
+  switch (rounding_mode) {
+    case kRoundToNearest:
+      round_w_d(result, double_input);
+      break;
+    case kRoundToZero:
+      trunc_w_d(result, double_input);
+      break;
+    case kRoundToPlusInf:
+      ceil_w_d(result, double_input);
+      break;
+    case kRoundToMinusInf:
+      floor_w_d(result, double_input);
+      break;
+  }  // End of switch-statement.
+
+  // Retrieve FCSR.
+  cfc1(except_flag, FCSR);
+  // Restore FCSR.
+  ctc1(scratch1, FCSR);
+
+  // Check for fpu exceptions.
+  And(except_flag, except_flag, Operand(except_mask));
+}
+
+
 void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
                                                  Register input_high,
                                                  Register input_low,
@@ -1148,22 +1308,21 @@
                                       FPURegister double_input,
                                       FPURegister single_scratch,
                                       Register scratch,
-                                      Register input_high,
-                                      Register input_low) {
+                                      Register scratch2,
+                                      Register scratch3) {
   CpuFeatures::Scope scope(FPU);
-  ASSERT(!input_high.is(result));
-  ASSERT(!input_low.is(result));
-  ASSERT(!input_low.is(input_high));
+  ASSERT(!scratch2.is(result));
+  ASSERT(!scratch3.is(result));
+  ASSERT(!scratch3.is(scratch2));
   ASSERT(!scratch.is(result) &&
-         !scratch.is(input_high) &&
-         !scratch.is(input_low));
+         !scratch.is(scratch2) &&
+         !scratch.is(scratch3));
   ASSERT(!single_scratch.is(double_input));
 
   Label done;
   Label manual;
 
   // Clear cumulative exception flags and save the FCSR.
-  Register scratch2 = input_high;
   cfc1(scratch2, FCSR);
   ctc1(zero_reg, FCSR);
   // Try a conversion to a signed integer.
@@ -1180,6 +1339,8 @@
   Branch(&done, eq, scratch, Operand(zero_reg));
 
   // Load the double value and perform a manual truncation.
+  Register input_high = scratch2;
+  Register input_low = scratch3;
   Move(input_low, input_high, double_input);
   EmitOutOfInt32RangeTruncate(result,
                               input_high,
@@ -1211,15 +1372,6 @@
     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
 
 
-bool MacroAssembler::UseAbsoluteCodePointers() {
-  if (is_trampoline_emitted()) {
-    return true;
-  } else {
-    return false;
-  }
-}
-
-
 void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
   BranchShort(offset, bdslot);
 }
@@ -1233,11 +1385,18 @@
 
 
 void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Jr(L, bdslot);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchShort(L, bdslot);
+    } else {
+      Jr(L, bdslot);
+    }
   } else {
-    BranchShort(L, bdslot);
+    if (is_trampoline_emitted()) {
+      Jr(L, bdslot);
+    } else {
+      BranchShort(L, bdslot);
+    }
   }
 }
 
@@ -1245,15 +1404,26 @@
 void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
                             const Operand& rt,
                             BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Label skip;
-    Condition neg_cond = NegateCondition(cond);
-    BranchShort(&skip, neg_cond, rs, rt);
-    Jr(L, bdslot);
-    bind(&skip);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchShort(L, cond, rs, rt, bdslot);
+    } else {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jr(L, bdslot);
+      bind(&skip);
+    }
   } else {
-    BranchShort(L, cond, rs, rt, bdslot);
+    if (is_trampoline_emitted()) {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jr(L, bdslot);
+      bind(&skip);
+    } else {
+      BranchShort(L, cond, rs, rt, bdslot);
+    }
   }
 }
 
@@ -1276,8 +1446,8 @@
   Register scratch = at;
 
   if (rt.is_reg()) {
-    // We don't want any other register but scratch clobbered.
-    ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
+    // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
+    // rt.
     r2 = rt.rm_;
     switch (cond) {
       case cc_always:
@@ -1779,11 +1949,18 @@
 
 
 void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Jalr(L, bdslot);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchAndLinkShort(L, bdslot);
+    } else {
+      Jalr(L, bdslot);
+    }
   } else {
-    BranchAndLinkShort(L, bdslot);
+    if (is_trampoline_emitted()) {
+      Jalr(L, bdslot);
+    } else {
+      BranchAndLinkShort(L, bdslot);
+    }
   }
 }
 
@@ -1791,15 +1968,26 @@
 void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
                                    const Operand& rt,
                                    BranchDelaySlot bdslot) {
-  bool is_label_near = is_near(L);
-  if (UseAbsoluteCodePointers() && !is_label_near) {
-    Label skip;
-    Condition neg_cond = NegateCondition(cond);
-    BranchShort(&skip, neg_cond, rs, rt);
-    Jalr(L, bdslot);
-    bind(&skip);
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    } else {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jalr(L, bdslot);
+      bind(&skip);
+    }
   } else {
-    BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    if (is_trampoline_emitted()) {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jalr(L, bdslot);
+      bind(&skip);
+    } else {
+      BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    }
   }
 }
 
@@ -2306,10 +2494,10 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 void MacroAssembler::DebugBreak() {
-  ASSERT(allow_stub_calls());
   mov(a0, zero_reg);
   li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
   CEntryStub ces(1);
+  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 
@@ -2975,7 +3163,8 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Register scratch,
                                        Label* fail) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
   Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
 }
@@ -3171,13 +3360,18 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
 
   InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
                  call_wrapper, call_kind);
   if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(code));
     SetCallKind(t1, call_kind);
     Call(code);
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
     SetCallKind(t1, call_kind);
@@ -3195,6 +3389,9 @@
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
 
   InvokePrologue(expected, actual, code, no_reg, &done, flag,
@@ -3217,6 +3414,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   // Contract with called JS functions requires that function is passed in a1.
   ASSERT(function.is(a1));
   Register expected_reg = a2;
@@ -3239,6 +3439,9 @@
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   ASSERT(function->is_compiled());
 
   // Get the function and setup the context.
@@ -3249,7 +3452,11 @@
   Handle<Code> code(function->code());
   ParameterCount expected(function->shared()->formal_parameter_count());
   if (V8::UseCrankshaft()) {
-    UNIMPLEMENTED_MIPS();
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+    InvokeCode(a3, expected, actual, flag, NullCallWrapper(), call_kind);
   } else {
     InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag, call_kind);
   }
@@ -3349,14 +3556,14 @@
 
 void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
                               Register r1, const Operand& r2) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
 }
 
 
 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond,
                                          Register r1, const Operand& r2) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   Object* result;
   { MaybeObject* maybe_result = stub->TryGetCode();
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3368,7 +3575,7 @@
 
 
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
@@ -3377,7 +3584,6 @@
                                              Condition cond,
                                              Register r1,
                                              const Operand& r2) {
-  ASSERT(allow_stub_calls());  // Stub calls are not allowed in some stubs.
   Object* result;
   { MaybeObject* maybe_result = stub->TryGetCode();
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3486,6 +3692,12 @@
 }
 
 
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
     addiu(sp, sp, num_arguments * kPointerSize);
@@ -3566,7 +3778,16 @@
   ASSERT(!overflow_dst.is(scratch));
   ASSERT(!overflow_dst.is(left));
   ASSERT(!overflow_dst.is(right));
-  ASSERT(!left.is(right));
+
+  if (left.is(right) && dst.is(left)) {
+    ASSERT(!dst.is(t9));
+    ASSERT(!scratch.is(t9));
+    ASSERT(!left.is(t9));
+    ASSERT(!right.is(t9));
+    ASSERT(!overflow_dst.is(t9));
+    mov(t9, right);
+    right = t9;
+  }
 
   if (dst.is(left)) {
     mov(scratch, left);  // Preserve left.
@@ -3599,10 +3820,17 @@
   ASSERT(!overflow_dst.is(scratch));
   ASSERT(!overflow_dst.is(left));
   ASSERT(!overflow_dst.is(right));
-  ASSERT(!left.is(right));
   ASSERT(!scratch.is(left));
   ASSERT(!scratch.is(right));
 
+  // This happens with some crankshaft code. Since Subu works fine if
+  // left == right, let's not make that restriction here.
+  if (left.is(right)) {
+    mov(dst, zero_reg);
+    mov(overflow_dst, zero_reg);
+    return;
+  }
+
   if (dst.is(left)) {
     mov(scratch, left);  // Preserve left.
     subu(dst, left, right);  // Left is overwritten.
@@ -3722,6 +3950,9 @@
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   GetBuiltinEntry(t9, id);
   if (flag == CALL_FUNCTION) {
     call_wrapper.BeforeCall(CallSize(t9));
@@ -3854,14 +4085,20 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
 
   li(a0, Operand(p0));
   push(a0);
   li(a0, Operand(Smi::FromInt(p1 - p0)));
   push(a0);
-  CallRuntime(Runtime::kAbort, 2);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
   // Will not return here.
   if (is_trampoline_pool_blocked()) {
     // If the calling code cares about the exact number of
@@ -4245,7 +4482,23 @@
 
 static const int kRegisterPassedArguments = 4;
 
-void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+                                              int num_double_arguments) {
+  int stack_passed_words = 0;
+  num_reg_arguments += 2 * num_double_arguments;
+
+  // Up to four simple arguments are passed in registers a0..a3.
+  if (num_reg_arguments > kRegisterPassedArguments) {
+    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+  }
+  stack_passed_words += kCArgSlotCount;
+  return stack_passed_words;
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          int num_double_arguments,
+                                          Register scratch) {
   int frame_alignment = ActivationFrameAlignment();
 
   // Up to four simple arguments are passed in registers a0..a3.
@@ -4253,9 +4506,8 @@
   // mips, even though those argument slots are not normally used.
   // Remaining arguments are pushed on the stack, above (higher address than)
   // the argument slots.
-  int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
-                                 0 : num_arguments - kRegisterPassedArguments) +
-                                kCArgSlotCount;
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
   if (frame_alignment > kPointerSize) {
     // Make stack end at alignment and make room for num_arguments - 4 words
     // and the original value of sp.
@@ -4270,26 +4522,54 @@
 }
 
 
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          Register scratch) {
+  PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(no_reg,
+                      function,
+                      t8,
+                      num_reg_arguments,
+                      num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+                                   Register scratch,
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(function,
+                      ExternalReference::the_hole_value_location(isolate()),
+                      scratch,
+                      num_reg_arguments,
+                      num_double_arguments);
+}
+
+
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_arguments) {
-  CallCFunctionHelper(no_reg, function, t8, num_arguments);
+  CallCFunction(function, num_arguments, 0);
 }
 
 
 void MacroAssembler::CallCFunction(Register function,
                                    Register scratch,
                                    int num_arguments) {
-  CallCFunctionHelper(function,
-                      ExternalReference::the_hole_value_location(isolate()),
-                      scratch,
-                      num_arguments);
+  CallCFunction(function, scratch, num_arguments, 0);
 }
 
 
 void MacroAssembler::CallCFunctionHelper(Register function,
                                          ExternalReference function_reference,
                                          Register scratch,
-                                         int num_arguments) {
+                                         int num_reg_arguments,
+                                         int num_double_arguments) {
+  ASSERT(has_frame());
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -4327,9 +4607,8 @@
 
   Call(function);
 
-  int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
-                                0 : num_arguments - kRegisterPassedArguments) +
-                               kCArgSlotCount;
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
 
   if (OS::ActivationFrameAlignment() > kPointerSize) {
     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
@@ -4342,6 +4621,37 @@
 #undef BRANCH_ARGS_CHECK
 
 
+void MacroAssembler::PatchRelocatedValue(Register li_location,
+                                         Register scratch,
+                                         Register new_value) {
+  lw(scratch, MemOperand(li_location));
+  // At this point scratch is a lui(at, ...) instruction.
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, "The instruction to patch should be a lui.",
+        scratch, Operand(LUI));
+    lw(scratch, MemOperand(li_location));
+  }
+  srl(t9, new_value, kImm16Bits);
+  Ins(scratch, t9, 0, kImm16Bits);
+  sw(scratch, MemOperand(li_location));
+
+  lw(scratch, MemOperand(li_location, kInstrSize));
+  // scratch is now ori(at, ...).
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, "The instruction to patch should be an ori.",
+        scratch, Operand(ORI));
+    lw(scratch, MemOperand(li_location, kInstrSize));
+  }
+  Ins(scratch, new_value, 0, kImm16Bits);
+  sw(scratch, MemOperand(li_location, kInstrSize));
+
+  // Update the I-cache so the new lui and ori can be executed.
+  FlushICache(li_location, 2);
+}
+
+
 void MacroAssembler::LoadInstanceDescriptors(Register map,
                                              Register descriptors) {
   lw(descriptors,
@@ -4353,6 +4663,49 @@
 }
 
 
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+  ASSERT(!output_reg.is(input_reg));
+  Label done;
+  li(output_reg, Operand(255));
+  // Normal branch: nop in delay slot.
+  Branch(&done, gt, input_reg, Operand(output_reg));
+  // Use delay slot in this branch.
+  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
+  mov(output_reg, zero_reg);  // In delay slot.
+  mov(output_reg, input_reg);  // Value is in range 0..255.
+  bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+                                        DoubleRegister input_reg,
+                                        DoubleRegister temp_double_reg) {
+  Label above_zero;
+  Label done;
+  Label in_bounds;
+
+  Move(temp_double_reg, 0.0);
+  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
+
+  // Double value is less than zero, NaN or Inf, return 0.
+  mov(result_reg, zero_reg);
+  Branch(&done);
+
+  // Double value is >= 255, return 255.
+  bind(&above_zero);
+  Move(temp_double_reg, 255.0);
+  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
+  li(result_reg, Operand(255));
+  Branch(&done);
+
+  // In 0-255 range, round and truncate.
+  bind(&in_bounds);
+  round_w_d(temp_double_reg, input_reg);
+  mfc1(result_reg, temp_double_reg);
+  bind(&done);
+}
+
+
 CodePatcher::CodePatcher(byte* address, int instructions)
     : address_(address),
       instructions_(instructions),
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 5dd012e..5f60aa1 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -50,15 +50,16 @@
 // trying to update gp register for position-independent-code. Whenever
 // MIPS generated code calls C code, it must be via t9 register.
 
-// Registers aliases
+
+// Register aliases.
 // cp is assumed to be a callee saved register.
+const Register lithiumScratchReg = s3;  // Scratch register.
+const Register lithiumScratchReg2 = s4;  // Scratch register.
+const Register condReg = s5;  // Simulated (partial) condition code for mips.
 const Register roots = s6;  // Roots array pointer.
 const Register cp = s7;     // JavaScript context pointer.
 const Register fp = s8_fp;  // Alias for fp.
-// Registers used for condition evaluation.
-const Register condReg1 = s4;
-const Register condReg2 = s5;
-
+const DoubleRegister lithiumScratchDouble = f30;  // Double scratch register.
 
 // Flags used for the AllocateInNewSpace functions.
 enum AllocationFlags {
@@ -90,6 +91,36 @@
   PROTECT
 };
 
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+static MemOperand ContextOperand(Register context, int index) {
+  return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+static inline MemOperand GlobalObjectOperand()  {
+  return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+static inline MemOperand CFunctionArgumentOperand(int index) {
+  ASSERT(index > kCArgSlotCount);
+  // Argument 5 takes the slot just past the four Arg-slots.
+  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+  return MemOperand(sp, offset);
+}
+
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -138,21 +169,22 @@
   void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
-  int CallSize(Register target, COND_ARGS);
+  static int CallSize(Register target, COND_ARGS);
   void Call(Register target, COND_ARGS);
-  int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+  static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
   void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
-  int CallSize(Handle<Code> code,
-               RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
-               unsigned ast_id = kNoASTId,
-               COND_ARGS);
+  static int CallSize(Handle<Code> code,
+                      RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+                      unsigned ast_id = kNoASTId,
+                      COND_ARGS);
   void Call(Handle<Code> code,
             RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
             unsigned ast_id = kNoASTId,
             COND_ARGS);
   void Ret(COND_ARGS);
-  inline void Ret(BranchDelaySlot bd) {
-    Ret(al, zero_reg, Operand(zero_reg), bd);
+  inline void Ret(BranchDelaySlot bd, Condition cond = al,
+    Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
+    Ret(cond, rs, rt, bd);
   }
 
 #undef COND_ARGS
@@ -197,6 +229,8 @@
     mtc1(src_high, FPURegister::from_code(dst.code() + 1));
   }
 
+  void Move(FPURegister dst, double imm);
+
   // Jump unconditionally to given label.
   // We NEED a nop in the branch delay slot, as it used by v8, for example in
   // CodeGenerator::ProcessDeferred().
@@ -517,6 +551,14 @@
     Addu(sp, sp, 2 * kPointerSize);
   }
 
+  // Pop three registers. Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3) {
+    lw(src3, MemOperand(sp, 0 * kPointerSize));
+    lw(src2, MemOperand(sp, 1 * kPointerSize));
+    lw(src1, MemOperand(sp, 2 * kPointerSize));
+    Addu(sp, sp, 3 * kPointerSize);
+  }
+
   void Pop(uint32_t count = 1) {
     Addu(sp, sp, Operand(count * kPointerSize));
   }
@@ -535,10 +577,17 @@
   // into register dst.
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
+  // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
+  // Does not handle errors.
+  void FlushICache(Register address, unsigned instructions);
+
   // MIPS32 R2 instruction macro.
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
 
+  // ---------------------------------------------------------------------------
+  // FPU macros. These do not handle special cases like NaN or +- inf.
+
   // Convert unsigned word to double.
   void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
   void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
@@ -547,6 +596,24 @@
   void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
   void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
 
+  // Wrapper function for the different cmp/branch types.
+  void BranchF(Label* target,
+               Label* nan,
+               Condition cc,
+               FPURegister cmp1,
+               FPURegister cmp2,
+               BranchDelaySlot bd = PROTECT);
+
+  // Alternate (inline) version for better readability with USE_DELAY_SLOT.
+  inline void BranchF(BranchDelaySlot bd,
+                      Label* target,
+                      Label* nan,
+                      Condition cc,
+                      FPURegister cmp1,
+                      FPURegister cmp2) {
+    BranchF(target, nan, cc, cmp1, cmp2, bd);
+  };
+
   // Convert the HeapNumber pointed to by source to a 32bits signed integer
   // dest. If the HeapNumber does not fit into a 32bits signed integer branch
   // to not_int32 label. If FPU is available double_scratch is used but not
@@ -558,6 +625,18 @@
                       FPURegister double_scratch,
                       Label *not_int32);
 
+  // Truncates a double using a specific rounding mode.
+  // The except_flag will contain any exceptions caused by the instruction.
+  // If check_inexact is kDontCheckForInexactConversion, then the inexacat
+  // exception is masked.
+  void EmitFPUTruncate(FPURoundingMode rounding_mode,
+                       FPURegister result,
+                       DoubleRegister double_input,
+                       Register scratch1,
+                       Register except_flag,
+                       CheckForInexactConversion check_inexact
+                           = kDontCheckForInexactConversion);
+
   // Helper for EmitECMATruncate.
   // This will truncate a floating-point value outside of the singed 32bit
   // integer range to a 32bit signed integer.
@@ -579,15 +658,6 @@
                         Register scratch2,
                         Register scratch3);
 
-  // -------------------------------------------------------------------------
-  // Activation frames.
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter exit frame.
   // argc - argument count to be dropped by LeaveExitFrame.
   // save_doubles - saves FPU registers on stack, currently disabled.
@@ -614,6 +684,7 @@
                                     Register map,
                                     Register scratch);
 
+
   // -------------------------------------------------------------------------
   // JavaScript invokes.
 
@@ -754,6 +825,21 @@
   // occurred.
   void IllegalOperation(int num_arguments);
 
+
+  // Load and check the instance type of an object for being a string.
+  // Loads the type into the second argument register.
+  // Returns a condition that will be enabled if the object was a string.
+  Condition IsObjectStringType(Register obj,
+                               Register type,
+                               Register result) {
+    lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+    lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+    And(type, type, Operand(kIsNotStringMask));
+    ASSERT_EQ(0, kStringTag);
+    return eq;
+  }
+
+
   // Picks out an array index from the hash field.
   // Register use:
   //   hash - holds the index's hash. Clobbered.
@@ -879,6 +965,9 @@
                        int num_arguments,
                        int result_size);
 
+  int CalculateStackPassedWords(int num_reg_arguments,
+                                int num_double_arguments);
+
   // Before calling a C-function from generated code, align arguments on stack
   // and add space for the four mips argument slots.
   // After aligning the frame, non-register arguments must be stored on the
@@ -888,7 +977,11 @@
   // C++ code.
   // Needs a scratch register to do some arithmetic. This register will be
   // trashed.
-  void PrepareCallCFunction(int num_arguments, Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            int num_double_registers,
+                            Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            Register scratch);
 
   // Arguments 1-4 are placed in registers a0 thru a3 respectively.
   // Arguments 5..n are stored to stack using following:
@@ -901,6 +994,12 @@
   // function).
   void CallCFunction(ExternalReference function, int num_arguments);
   void CallCFunction(Register function, Register scratch, int num_arguments);
+  void CallCFunction(ExternalReference function,
+                     int num_reg_arguments,
+                     int num_double_arguments);
+  void CallCFunction(Register function, Register scratch,
+                     int num_reg_arguments,
+                     int num_double_arguments);
   void GetCFunctionDoubleResult(const DoubleRegister dst);
 
   // There are two ways of passing double arguments on MIPS, depending on
@@ -976,6 +1075,9 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   // ---------------------------------------------------------------------------
   // Number utilities.
@@ -1003,6 +1105,13 @@
     Addu(reg, reg, reg);
   }
 
+  // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+  void SmiTagCheckOverflow(Register reg, Register overflow) {
+    mov(overflow, reg);  // Save original value.
+    addu(reg, reg, reg);
+    xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
+  }
+
   void SmiTag(Register dst, Register src) {
     Addu(dst, src, src);
   }
@@ -1017,10 +1126,11 @@
 
   // Jump the register contains a smi.
   inline void JumpIfSmi(Register value, Label* smi_label,
-                        Register scratch = at) {
+                        Register scratch = at,
+                        BranchDelaySlot bd = PROTECT) {
     ASSERT_EQ(0, kSmiTag);
     andi(scratch, value, kSmiTagMask);
-    Branch(smi_label, eq, scratch, Operand(zero_reg));
+    Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
   }
 
   // Jump if the register contains a non-smi.
@@ -1090,13 +1200,31 @@
                                            Register scratch2,
                                            Label* failure);
 
+  void ClampUint8(Register output_reg, Register input_reg);
+
+  void ClampDoubleToUint8(Register result_reg,
+                          DoubleRegister input_reg,
+                          DoubleRegister temp_double_reg);
+
+
   void LoadInstanceDescriptors(Register map, Register descriptors);
 
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
+  // Patch the relocated value (lui/ori pair).
+  void PatchRelocatedValue(Register li_location,
+                           Register scratch,
+                           Register new_value);
+
  private:
   void CallCFunctionHelper(Register function,
                            ExternalReference function_reference,
                            Register scratch,
-                           int num_arguments);
+                           int num_reg_arguments,
+                           int num_double_arguments);
 
   void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
   void BranchShort(int16_t offset, Condition cond, Register rs,
@@ -1132,10 +1260,6 @@
   // the function in the 'resolved' flag.
   Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void InitializeNewString(Register string,
                            Register length,
                            Heap::RootListIndex map_index,
@@ -1147,10 +1271,9 @@
   MemOperand SafepointRegisterSlot(Register reg);
   MemOperand SafepointRegistersAndDoublesSlot(Register reg);
 
-  bool UseAbsoluteCodePointers();
-
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
@@ -1191,34 +1314,6 @@
 };
 
 
-// -----------------------------------------------------------------------------
-// Static helper functions.
-
-static MemOperand ContextOperand(Register context, int index) {
-  return MemOperand(context, Context::SlotOffset(index));
-}
-
-
-static inline MemOperand GlobalObjectOperand()  {
-  return ContextOperand(cp, Context::GLOBAL_INDEX);
-}
-
-
-// Generate a MemOperand for loading a field from an object.
-static inline MemOperand FieldMemOperand(Register object, int offset) {
-  return MemOperand(object, offset - kHeapObjectTag);
-}
-
-
-// Generate a MemOperand for storing arguments 5..N on the stack
-// when calling CallCFunction().
-static inline MemOperand CFunctionArgumentOperand(int index) {
-  ASSERT(index > kCArgSlotCount);
-  // Argument 5 takes the slot just past the four Arg-slots.
-  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
-  return MemOperand(sp, offset);
-}
-
 
 #ifdef GENERATED_CODE_COVERAGE
 #define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index 63e836f..9db5c5b 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -377,9 +377,12 @@
     // Isolate.
     __ li(a3, Operand(ExternalReference::isolate_address()));
 
-    ExternalReference function =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
-    __ CallCFunction(function, argument_count);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference function =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+      __ CallCFunction(function, argument_count);
+    }
 
     // Restore regexp engine registers.
     __ MultiPop(regexp_registers_to_retain);
@@ -607,6 +610,12 @@
 
     // Entry code:
     __ bind(&entry_label_);
+
+    // Tell the system that we have a stack frame.  Because the type is MANUAL,
+    // no is generated.
+    FrameScope scope(masm_, StackFrame::MANUAL);
+
+    // Actually emit code to start a new stack frame.
     // Push arguments
     // Save callee-save registers.
     // Start new stack frame.
@@ -1244,13 +1253,14 @@
   if (stack_alignment < kPointerSize) stack_alignment = kPointerSize;
   // Stack is already aligned for call, so decrement by alignment
   // to make room for storing the return address.
-  __ Subu(sp, sp, Operand(stack_alignment));
-  __ sw(ra, MemOperand(sp, 0));
-  __ mov(a0, sp);
+  __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
+  const int return_address_offset = kCArgsSlotsSize;
+  __ Addu(a0, sp, return_address_offset);
+  __ sw(ra, MemOperand(a0, 0));
   __ mov(t9, t1);
   __ Call(t9);
-  __ lw(ra, MemOperand(sp, 0));
-  __ Addu(sp, sp, Operand(stack_alignment));
+  __ lw(ra, MemOperand(sp, return_address_offset));
+  __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize));
   __ Jump(ra);
 }
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 5b94973..c17a8e8 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -554,9 +554,10 @@
 }
 
 
-static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
-                                      const CallOptimization& optimization,
-                                      int argc) {
+static MaybeObject* GenerateFastApiDirectCall(
+    MacroAssembler* masm,
+    const CallOptimization& optimization,
+    int argc) {
   // ----------- S t a t e -------------
   //  -- sp[0]              : holder (set by CheckPrototypes)
   //  -- sp[4]              : callee js function
@@ -595,6 +596,7 @@
 
   const int kApiStackSpace = 4;
 
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
   // NOTE: the O32 abi requires a0 to hold a special pointer when returning a
@@ -626,6 +628,7 @@
       ExternalReference(&fun,
                         ExternalReference::DIRECT_API_CALL,
                         masm->isolate());
+  AllowExternalCallThatCantCauseGC scope(masm);
   return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
 }
 
@@ -804,7 +807,7 @@
                                         miss_label);
 
     // Call a runtime function to load the interceptor property.
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
@@ -822,7 +825,8 @@
 
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
@@ -831,19 +835,20 @@
                            JSObject* holder_obj,
                            Register scratch,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
 
-    __ Push(holder, name_);
+      __ Push(holder, name_);
 
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
 
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+    }
 
     // If interceptor returns no-result sentinel, call the constant function.
     __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
@@ -1256,7 +1261,9 @@
 
   const int kApiStackSpace = 1;
 
+  FrameScope frame_scope(masm(), StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
+
   // Create AccessorInfo instance on the stack above the exit frame with
   // scratch2 (internal::Object **args_) as the data.
   __ sw(a2, MemOperand(sp, kPointerSize));
@@ -1317,41 +1324,43 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ Push(receiver, holder_reg, name_reg);
-    } else {
-      __ Push(holder_reg, name_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ Push(receiver, holder_reg, name_reg);
+      } else {
+        __ Push(holder_reg, name_reg);
+      }
+
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method).
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+      __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+      frame_scope.GenerateLeaveFrame();
+      __ Ret();
+
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+
+      // Leave the internal frame.
     }
 
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method).
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
-    __ LeaveInternalFrame();
-    __ Ret();
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
-
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
     if (interceptor_holder != lookup->holder()) {
@@ -1605,8 +1614,8 @@
       __ Addu(end_elements, elements, end_elements);
       const int kEndElementsOffset =
           FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
-      __ sw(t0, MemOperand(end_elements, kEndElementsOffset));
-      __ Addu(end_elements, end_elements, kPointerSize);
+      __ Addu(end_elements, end_elements, kEndElementsOffset);
+      __ sw(t0, MemOperand(end_elements));
 
       // Check for a smi.
       __ JumpIfNotSmi(t0, &with_write_barrier);
@@ -2551,7 +2560,12 @@
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   if (V8::UseCrankshaft()) {
-    UNIMPLEMENTED_MIPS();
+    // TODO(kasperl): For now, we always call indirectly through the
+    // code field in the function to allow recompilation to take effect
+    // without changing any of the call sites.
+    __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+    __ InvokeCode(a3, expected, arguments(), JUMP_FUNCTION,
+                  NullCallWrapper(), call_kind);
   } else {
     __ InvokeCode(code, expected, arguments(), RelocInfo::CODE_TARGET,
                   JUMP_FUNCTION, call_kind);
@@ -3457,6 +3471,7 @@
 
     case EXTERNAL_FLOAT_ELEMENTS:
     case EXTERNAL_DOUBLE_ELEMENTS:
+    case FAST_SMI_ELEMENTS:
     case FAST_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
@@ -3828,7 +3843,6 @@
   __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
 
   // Check that the index is in range.
-  __ SmiUntag(t0, key);
   __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
   // Unsigned comparison catches both negative and too-large values.
   __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
@@ -3836,7 +3850,6 @@
   // Handle both smis and HeapNumbers in the fast path. Go to the
   // runtime for all other kinds of values.
   // a3: external array.
-  // t0: key (integer).
 
   if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
     // Double to pixel conversion is only implemented in the runtime for now.
@@ -3848,7 +3861,6 @@
   __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
 
   // a3: base pointer of external storage.
-  // t0: key (integer).
   // t1: value (integer).
 
   switch (elements_kind) {
@@ -3865,33 +3877,36 @@
       __ mov(v0, t1);  // Value is in range 0..255.
       __ bind(&done);
       __ mov(t1, v0);
-      __ addu(t8, a3, t0);
+
+      __ srl(t8, key, 1);
+      __ addu(t8, a3, t8);
       __ sb(t1, MemOperand(t8, 0));
       }
       break;
     case EXTERNAL_BYTE_ELEMENTS:
     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-      __ addu(t8, a3, t0);
+      __ srl(t8, key, 1);
+      __ addu(t8, a3, t8);
       __ sb(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_SHORT_ELEMENTS:
     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-      __ sll(t8, t0, 1);
-      __ addu(t8, a3, t8);
+      __ addu(t8, a3, key);
       __ sh(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_INT_ELEMENTS:
     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-      __ sll(t8, t0, 2);
+      __ sll(t8, key, 1);
       __ addu(t8, a3, t8);
       __ sw(t1, MemOperand(t8, 0));
       break;
     case EXTERNAL_FLOAT_ELEMENTS:
       // Perform int-to-float conversion and store to memory.
+      __ SmiUntag(t0, key);
       StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
       break;
     case EXTERNAL_DOUBLE_ELEMENTS:
-      __ sll(t8, t0, 3);
+      __ sll(t8, key, 2);
       __ addu(a3, a3, t8);
       // a3: effective address of the double element
       FloatingPointHelper::Destination destination;
@@ -3921,12 +3936,11 @@
   }
 
   // Entry registers are intact, a0 holds the value which is the return value.
-  __ mov(v0, value);
+  __ mov(v0, a0);
   __ Ret();
 
   if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
     // a3: external array.
-    // t0: index (integer).
     __ bind(&check_heap_number);
     __ GetObjectType(value, t1, t2);
     __ Branch(&slow, ne, t2, Operand(HEAP_NUMBER_TYPE));
@@ -3934,7 +3948,6 @@
     __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
 
     // a3: base pointer of external storage.
-    // t0: key (integer).
 
     // The WebGL specification leaves the behavior of storing NaN and
     // +/-Infinity into integer arrays basically undefined. For more
@@ -3947,11 +3960,11 @@
 
       if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
         __ cvt_s_d(f0, f0);
-        __ sll(t8, t0, 2);
+        __ sll(t8, key, 1);
         __ addu(t8, a3, t8);
         __ swc1(f0, MemOperand(t8, 0));
       } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
-        __ sll(t8, t0, 3);
+        __ sll(t8, key, 2);
         __ addu(t8, a3, t8);
         __ sdc1(f0, MemOperand(t8, 0));
       } else {
@@ -3960,18 +3973,18 @@
         switch (elements_kind) {
           case EXTERNAL_BYTE_ELEMENTS:
           case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-            __ addu(t8, a3, t0);
+            __ srl(t8, key, 1);
+            __ addu(t8, a3, t8);
             __ sb(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_SHORT_ELEMENTS:
           case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-            __ sll(t8, t0, 1);
-            __ addu(t8, a3, t8);
+            __ addu(t8, a3, key);
             __ sh(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_INT_ELEMENTS:
           case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-            __ sll(t8, t0, 2);
+            __ sll(t8, key, 1);
             __ addu(t8, a3, t8);
             __ sw(t3, MemOperand(t8, 0));
             break;
@@ -3989,7 +4002,7 @@
 
       // Entry registers are intact, a0 holds the value
       // which is the return value.
-      __ mov(v0, value);
+      __ mov(v0, a0);
       __ Ret();
     } else {
       // FPU is not available, do manual conversions.
@@ -4044,13 +4057,13 @@
         __ or_(t3, t7, t6);
 
         __ bind(&done);
-        __ sll(t9, a1, 2);
+        __ sll(t9, key, 1);
         __ addu(t9, a2, t9);
         __ sw(t3, MemOperand(t9, 0));
 
         // Entry registers are intact, a0 holds the value which is the return
         // value.
-        __ mov(v0, value);
+        __ mov(v0, a0);
         __ Ret();
 
         __ bind(&nan_or_infinity_or_zero);
@@ -4068,6 +4081,7 @@
         // t8: effective address of destination element.
         __ sw(t4, MemOperand(t8, 0));
         __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
+        __ mov(v0, a0);
         __ Ret();
       } else {
         bool is_signed_type = IsElementTypeSigned(elements_kind);
@@ -4130,18 +4144,18 @@
         switch (elements_kind) {
           case EXTERNAL_BYTE_ELEMENTS:
           case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
-            __ addu(t8, a3, t0);
+            __ srl(t8, key, 1);
+            __ addu(t8, a3, t8);
             __ sb(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_SHORT_ELEMENTS:
           case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
-            __ sll(t8, t0, 1);
-            __ addu(t8, a3, t8);
+            __ addu(t8, a3, key);
             __ sh(t3, MemOperand(t8, 0));
             break;
           case EXTERNAL_INT_ELEMENTS:
           case EXTERNAL_UNSIGNED_INT_ELEMENTS:
-            __ sll(t8, t0, 2);
+            __ sll(t8, key, 1);
             __ addu(t8, a3, t8);
             __ sw(t3, MemOperand(t8, 0));
             break;
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index a791dbb..7a3fd09 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -312,7 +312,8 @@
   }
   // If we don't do this then we end up with a stray root pointing at the
   // context even after we have disposed of the context.
-  HEAP->CollectAllGarbage(true);
+  // TODO(gc): request full compaction?
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
   context.Dispose();
   CppByteSink sink(argv[1]);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 8de7162..6d2cf5f 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -94,6 +94,9 @@
     case BYTE_ARRAY_TYPE:
       ByteArray::cast(this)->ByteArrayVerify();
       break;
+    case FREE_SPACE_TYPE:
+      FreeSpace::cast(this)->FreeSpaceVerify();
+      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       ExternalPixelArray::cast(this)->ExternalPixelArrayVerify();
       break;
@@ -207,6 +210,11 @@
 }
 
 
+void FreeSpace::FreeSpaceVerify() {
+  ASSERT(IsFreeSpace());
+}
+
+
 void ExternalPixelArray::ExternalPixelArrayVerify() {
   ASSERT(IsExternalPixelArray());
 }
@@ -260,7 +268,7 @@
              (map()->inobject_properties() + properties()->length() -
               map()->NextFreePropertyIndex()));
   }
-  ASSERT_EQ(map()->has_fast_elements(),
+  ASSERT_EQ((map()->has_fast_elements() || map()->has_fast_smi_only_elements()),
             (elements()->map() == GetHeap()->fixed_array_map() ||
              elements()->map() == GetHeap()->fixed_cow_array_map()));
   ASSERT(map()->has_fast_elements() == HasFastElements());
@@ -322,7 +330,8 @@
       double value = get_scalar(i);
       ASSERT(!isnan(value) ||
              (BitCast<uint64_t>(value) ==
-              BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())));
+              BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
+             ((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
     }
   }
 }
@@ -387,6 +396,7 @@
   CHECK(IsJSFunction());
   VerifyObjectField(kPrototypeOrInitialMapOffset);
   VerifyObjectField(kNextFunctionLinkOffset);
+  CHECK(code()->IsCode());
   CHECK(next_function_link()->IsUndefined() ||
         next_function_link()->IsJSFunction());
 }
@@ -446,9 +456,8 @@
   } else {
     ASSERT(number->IsSmi());
     int value = Smi::cast(number)->value();
-    // Hidden oddballs have negative smis.
-    const int kLeastHiddenOddballNumber = -4;
     ASSERT(value <= 1);
+    // Hidden oddballs have negative smis.
     ASSERT(value >= kLeastHiddenOddballNumber);
   }
 }
@@ -463,6 +472,7 @@
 void Code::CodeVerify() {
   CHECK(IsAligned(reinterpret_cast<intptr_t>(instruction_start()),
                   kCodeAlignment));
+  relocation_info()->Verify();
   Address last_gc_pc = NULL;
   for (RelocIterator it(this); !it.done(); it.next()) {
     it.rinfo()->Verify();
@@ -488,7 +498,7 @@
   CHECK(IsJSWeakMap());
   JSObjectVerify();
   VerifyHeapPointer(table());
-  ASSERT(table()->IsHashTable());
+  ASSERT(table()->IsHashTable() || table()->IsUndefined());
 }
 
 
@@ -535,13 +545,14 @@
 
 
 void JSProxy::JSProxyVerify() {
-  ASSERT(IsJSProxy());
+  CHECK(IsJSProxy());
   VerifyPointer(handler());
+  ASSERT(hash()->IsSmi() || hash()->IsUndefined());
 }
 
 
 void JSFunctionProxy::JSFunctionProxyVerify() {
-  ASSERT(IsJSFunctionProxy());
+  CHECK(IsJSFunctionProxy());
   JSProxyVerify();
   VerifyPointer(call_trap());
   VerifyPointer(construct_trap());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index bb24a2f..c579d37 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -43,8 +43,11 @@
 #include "isolate.h"
 #include "property.h"
 #include "spaces.h"
+#include "store-buffer.h"
 #include "v8memory.h"
 
+#include "incremental-marking.h"
+
 namespace v8 {
 namespace internal {
 
@@ -80,16 +83,7 @@
   type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
   void holder::set_##name(type* value, WriteBarrierMode mode) {         \
     WRITE_FIELD(this, offset, value);                                   \
-    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);           \
-  }
-
-
-// GC-safe accessors do not use HeapObject::GetHeap(), but access TLS instead.
-#define ACCESSORS_GCSAFE(holder, name, type, offset)                    \
-  type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
-  void holder::set_##name(type* value, WriteBarrierMode mode) {         \
-    WRITE_FIELD(this, offset, value);                                   \
-    CONDITIONAL_WRITE_BARRIER(HEAP, this, offset, mode);                \
+    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);    \
   }
 
 
@@ -147,6 +141,12 @@
 }
 
 
+bool Object::NonFailureIsHeapObject() {
+  ASSERT(!this->IsFailure());
+  return (reinterpret_cast<intptr_t>(this) & kSmiTagMask) != 0;
+}
+
+
 bool Object::IsHeapNumber() {
   return Object::IsHeapObject()
     && HeapObject::cast(this)->map()->instance_type() == HEAP_NUMBER_TYPE;
@@ -165,6 +165,13 @@
 }
 
 
+bool Object::IsSpecFunction() {
+  if (!Object::IsHeapObject()) return false;
+  InstanceType type = HeapObject::cast(this)->map()->instance_type();
+  return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
+}
+
+
 bool Object::IsSymbol() {
   if (!this->IsHeapObject()) return false;
   uint32_t type = HeapObject::cast(this)->map()->instance_type();
@@ -402,6 +409,19 @@
 }
 
 
+bool Object::IsFreeSpace() {
+  return Object::IsHeapObject()
+    && HeapObject::cast(this)->map()->instance_type() == FREE_SPACE_TYPE;
+}
+
+
+bool Object::IsFiller() {
+  if (!Object::IsHeapObject()) return false;
+  InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
+  return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
+}
+
+
 bool Object::IsExternalPixelArray() {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map()->instance_type() ==
@@ -509,20 +529,23 @@
 
 
 bool Object::IsJSReceiver() {
+  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   return IsHeapObject() &&
       HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
 }
 
 
 bool Object::IsJSObject() {
-  return IsJSReceiver() && !IsJSProxy();
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+  return IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
 }
 
 
 bool Object::IsJSProxy() {
-  return Object::IsHeapObject() &&
-     (HeapObject::cast(this)->map()->instance_type() == JS_PROXY_TYPE ||
-      HeapObject::cast(this)->map()->instance_type() == JS_FUNCTION_PROXY_TYPE);
+  if (!Object::IsHeapObject()) return false;
+  InstanceType type = HeapObject::cast(this)->map()->instance_type();
+  return FIRST_JS_PROXY_TYPE <= type && type <= LAST_JS_PROXY_TYPE;
 }
 
 
@@ -642,7 +665,6 @@
 
 
 bool Object::IsOddball() {
-  ASSERT(HEAP->is_safe_to_read_maps());
   return Object::IsHeapObject()
     && HeapObject::cast(this)->map()->instance_type() == ODDBALL_TYPE;
 }
@@ -939,21 +961,20 @@
 #define WRITE_FIELD(p, offset, value) \
   (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
 
-// TODO(isolates): Pass heap in to these macros.
-#define WRITE_BARRIER(object, offset) \
-  object->GetHeap()->RecordWrite(object->address(), offset);
+#define WRITE_BARRIER(heap, object, offset, value)                      \
+  heap->incremental_marking()->RecordWrite(                             \
+      object, HeapObject::RawField(object, offset), value);             \
+  if (heap->InNewSpace(value)) {                                        \
+    heap->RecordWrite(object->address(), offset);                       \
+  }
 
-// CONDITIONAL_WRITE_BARRIER must be issued after the actual
-// write due to the assert validating the written value.
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, mode) \
-  if (mode == UPDATE_WRITE_BARRIER) { \
-    heap->RecordWrite(object->address(), offset); \
-  } else { \
-    ASSERT(mode == SKIP_WRITE_BARRIER); \
-    ASSERT(heap->InNewSpace(object) || \
-           !heap->InNewSpace(READ_FIELD(object, offset)) || \
-           Page::FromAddress(object->address())->           \
-               IsRegionDirty(object->address() + offset));  \
+#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode)    \
+  if (mode == UPDATE_WRITE_BARRIER) {                                   \
+    heap->incremental_marking()->RecordWrite(                           \
+      object, HeapObject::RawField(object, offset), value);             \
+    if (heap->InNewSpace(value)) {                                      \
+      heap->RecordWrite(object->address(), offset);                     \
+    }                                                                   \
   }
 
 #ifndef V8_TARGET_ARCH_MIPS
@@ -974,7 +995,6 @@
   #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
 #endif  // V8_TARGET_ARCH_MIPS
 
-
 #ifndef V8_TARGET_ARCH_MIPS
   #define WRITE_DOUBLE_FIELD(p, offset, value) \
     (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
@@ -1169,91 +1189,6 @@
 }
 
 
-bool MapWord::IsMarked() {
-  return (value_ & kMarkingMask) == 0;
-}
-
-
-void MapWord::SetMark() {
-  value_ &= ~kMarkingMask;
-}
-
-
-void MapWord::ClearMark() {
-  value_ |= kMarkingMask;
-}
-
-
-bool MapWord::IsOverflowed() {
-  return (value_ & kOverflowMask) != 0;
-}
-
-
-void MapWord::SetOverflow() {
-  value_ |= kOverflowMask;
-}
-
-
-void MapWord::ClearOverflow() {
-  value_ &= ~kOverflowMask;
-}
-
-
-MapWord MapWord::EncodeAddress(Address map_address, int offset) {
-  // Offset is the distance in live bytes from the first live object in the
-  // same page. The offset between two objects in the same page should not
-  // exceed the object area size of a page.
-  ASSERT(0 <= offset && offset < Page::kObjectAreaSize);
-
-  uintptr_t compact_offset = offset >> kObjectAlignmentBits;
-  ASSERT(compact_offset < (1 << kForwardingOffsetBits));
-
-  Page* map_page = Page::FromAddress(map_address);
-  ASSERT_MAP_PAGE_INDEX(map_page->mc_page_index);
-
-  uintptr_t map_page_offset =
-      map_page->Offset(map_address) >> kMapAlignmentBits;
-
-  uintptr_t encoding =
-      (compact_offset << kForwardingOffsetShift) |
-      (map_page_offset << kMapPageOffsetShift) |
-      (map_page->mc_page_index << kMapPageIndexShift);
-  return MapWord(encoding);
-}
-
-
-Address MapWord::DecodeMapAddress(MapSpace* map_space) {
-  int map_page_index =
-      static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift);
-  ASSERT_MAP_PAGE_INDEX(map_page_index);
-
-  int map_page_offset = static_cast<int>(
-      ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift) <<
-      kMapAlignmentBits);
-
-  return (map_space->PageAddress(map_page_index) + map_page_offset);
-}
-
-
-int MapWord::DecodeOffset() {
-  // The offset field is represented in the kForwardingOffsetBits
-  // most-significant bits.
-  uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits;
-  ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize));
-  return static_cast<int>(offset);
-}
-
-
-MapWord MapWord::FromEncodedAddress(Address address) {
-  return MapWord(reinterpret_cast<uintptr_t>(address));
-}
-
-
-Address MapWord::ToEncodedAddress() {
-  return reinterpret_cast<Address>(value_);
-}
-
-
 #ifdef DEBUG
 void HeapObject::VerifyObjectField(int offset) {
   VerifyPointer(READ_FIELD(this, offset));
@@ -1266,12 +1201,11 @@
 
 
 Heap* HeapObject::GetHeap() {
-  // During GC, the map pointer in HeapObject is used in various ways that
-  // prevent us from retrieving Heap from the map.
-  // Assert that we are not in GC, implement GC code in a way that it doesn't
-  // pull heap from the map.
-  ASSERT(HEAP->is_safe_to_read_maps());
-  return map()->heap();
+  Heap* heap =
+      MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
+  ASSERT(heap != NULL);
+  ASSERT(heap->isolate() == Isolate::Current());
+  return heap;
 }
 
 
@@ -1287,6 +1221,17 @@
 
 void HeapObject::set_map(Map* value) {
   set_map_word(MapWord::FromMap(value));
+  if (value != NULL) {
+    // TODO(1600) We are passing NULL as a slot because maps can never be on
+    // evacuation candidate.
+    value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
+  }
+}
+
+
+// Unsafe accessor omitting write barrier.
+void HeapObject::set_map_unsafe(Map* value) {
+  set_map_word(MapWord::FromMap(value));
 }
 
 
@@ -1329,47 +1274,6 @@
 }
 
 
-bool HeapObject::IsMarked() {
-  return map_word().IsMarked();
-}
-
-
-void HeapObject::SetMark() {
-  ASSERT(!IsMarked());
-  MapWord first_word = map_word();
-  first_word.SetMark();
-  set_map_word(first_word);
-}
-
-
-void HeapObject::ClearMark() {
-  ASSERT(IsMarked());
-  MapWord first_word = map_word();
-  first_word.ClearMark();
-  set_map_word(first_word);
-}
-
-
-bool HeapObject::IsOverflowed() {
-  return map_word().IsOverflowed();
-}
-
-
-void HeapObject::SetOverflow() {
-  MapWord first_word = map_word();
-  first_word.SetOverflow();
-  set_map_word(first_word);
-}
-
-
-void HeapObject::ClearOverflow() {
-  ASSERT(IsOverflowed());
-  MapWord first_word = map_word();
-  first_word.ClearOverflow();
-  set_map_word(first_word);
-}
-
-
 double HeapNumber::value() {
   return READ_DOUBLE_FIELD(this, kValueOffset);
 }
@@ -1400,16 +1304,84 @@
   return static_cast<FixedArrayBase*>(array);
 }
 
+void JSObject::ValidateSmiOnlyElements() {
+#if DEBUG
+  if (FLAG_smi_only_arrays &&
+      map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
+    Heap* heap = GetHeap();
+    // Don't use elements, since integrity checks will fail if there
+    // are filler pointers in the array.
+    FixedArray* fixed_array =
+        reinterpret_cast<FixedArray*>(READ_FIELD(this, kElementsOffset));
+    Map* map = fixed_array->map();
+    // Arrays that have been shifted in place can't be verified.
+    if (map != heap->raw_unchecked_one_pointer_filler_map() &&
+        map != heap->raw_unchecked_two_pointer_filler_map() &&
+        map != heap->free_space_map()) {
+      for (int i = 0; i < fixed_array->length(); i++) {
+        Object* current = fixed_array->get(i);
+        ASSERT(current->IsSmi() || current == heap->the_hole_value());
+      }
+    }
+  }
+#endif
+}
+
+
+MaybeObject* JSObject::EnsureCanContainNonSmiElements() {
+#if DEBUG
+  ValidateSmiOnlyElements();
+#endif
+  if (FLAG_smi_only_arrays &&
+      (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS)) {
+    Object* obj;
+    MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
+    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+    set_map(Map::cast(obj));
+  }
+  return this;
+}
+
+
+MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
+                                                uint32_t count) {
+  if (FLAG_smi_only_arrays &&
+      map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
+    for (uint32_t i = 0; i < count; ++i) {
+      Object* current = *objects++;
+      if (!current->IsSmi() && current != GetHeap()->the_hole_value()) {
+        return EnsureCanContainNonSmiElements();
+      }
+    }
+  }
+  return this;
+}
+
+
+MaybeObject* JSObject::EnsureCanContainElements(FixedArray* elements) {
+  if (FLAG_smi_only_arrays) {
+    Object** objects = reinterpret_cast<Object**>(
+        FIELD_ADDR(elements, elements->OffsetOfElementAt(0)));
+    return EnsureCanContainElements(objects, elements->length());
+  } else {
+    return this;
+  }
+}
+
 
 void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
-  ASSERT(map()->has_fast_elements() ==
+  ASSERT((map()->has_fast_elements() ||
+          map()->has_fast_smi_only_elements()) ==
          (value->map() == GetHeap()->fixed_array_map() ||
           value->map() == GetHeap()->fixed_cow_array_map()));
   ASSERT(map()->has_fast_double_elements() ==
          value->IsFixedDoubleArray());
   ASSERT(value->HasValidElements());
+#ifdef DEBUG
+  ValidateSmiOnlyElements();
+#endif
   WRITE_FIELD(this, kElementsOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
 }
 
 
@@ -1420,7 +1392,7 @@
 
 
 void JSObject::initialize_elements() {
-  ASSERT(map()->has_fast_elements());
+  ASSERT(map()->has_fast_elements() || map()->has_fast_smi_only_elements());
   ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
   WRITE_FIELD(this, kElementsOffset, GetHeap()->empty_fixed_array());
 }
@@ -1428,9 +1400,11 @@
 
 MaybeObject* JSObject::ResetElements() {
   Object* obj;
-  { MaybeObject* maybe_obj = map()->GetFastElementsMap();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
+  ElementsKind elements_kind = FLAG_smi_only_arrays
+      ? FAST_SMI_ONLY_ELEMENTS
+      : FAST_ELEMENTS;
+  MaybeObject* maybe_obj = GetElementsTransitionMap(elements_kind);
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   set_map(Map::cast(obj));
   initialize_elements();
   return this;
@@ -1442,12 +1416,12 @@
 
 
 byte Oddball::kind() {
-  return READ_BYTE_FIELD(this, kKindOffset);
+  return Smi::cast(READ_FIELD(this, kKindOffset))->value();
 }
 
 
 void Oddball::set_kind(byte value) {
-  WRITE_BYTE_FIELD(this, kKindOffset, value);
+  WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
 }
 
 
@@ -1460,6 +1434,8 @@
   // The write barrier is not used for global property cells.
   ASSERT(!val->IsJSGlobalPropertyCell());
   WRITE_FIELD(this, kValueOffset, val);
+  GetHeap()->incremental_marking()->RecordWrite(
+      this, HeapObject::RawField(this, kValueOffset), val);
 }
 
 
@@ -1528,7 +1504,7 @@
   // to adjust the index here.
   int offset = GetHeaderSize() + (kPointerSize * index);
   WRITE_FIELD(this, offset, value);
-  WRITE_BARRIER(this, offset);
+  WRITE_BARRIER(GetHeap(), this, offset, value);
 }
 
 
@@ -1554,7 +1530,7 @@
   if (index < 0) {
     int offset = map()->instance_size() + (index * kPointerSize);
     WRITE_FIELD(this, offset, value);
-    WRITE_BARRIER(this, offset);
+    WRITE_BARRIER(GetHeap(), this, offset, value);
   } else {
     ASSERT(index < properties()->length());
     properties()->set(index, value);
@@ -1588,16 +1564,32 @@
   ASSERT(index < 0);
   int offset = map()->instance_size() + (index * kPointerSize);
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
   return value;
 }
 
 
 
-void JSObject::InitializeBody(int object_size, Object* value) {
-  ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
-  for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
-    WRITE_FIELD(this, offset, value);
+void JSObject::InitializeBody(Map* map,
+                              Object* pre_allocated_value,
+                              Object* filler_value) {
+  ASSERT(!filler_value->IsHeapObject() ||
+         !GetHeap()->InNewSpace(filler_value));
+  ASSERT(!pre_allocated_value->IsHeapObject() ||
+         !GetHeap()->InNewSpace(pre_allocated_value));
+  int size = map->instance_size();
+  int offset = kHeaderSize;
+  if (filler_value != pre_allocated_value) {
+    int pre_allocated = map->pre_allocated_property_fields();
+    ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size);
+    for (int i = 0; i < pre_allocated; i++) {
+      WRITE_FIELD(this, offset, pre_allocated_value);
+      offset += kPointerSize;
+    }
+  }
+  while (offset < size) {
+    WRITE_FIELD(this, offset, filler_value);
+    offset += kPointerSize;
   }
 }
 
@@ -1683,7 +1675,7 @@
   ASSERT(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  WRITE_BARRIER(this, offset);
+  WRITE_BARRIER(GetHeap(), this, offset, value);
 }
 
 
@@ -1768,7 +1760,7 @@
 
 void FixedDoubleArray::Initialize(FixedArray* from) {
   int old_length = from->length();
-  ASSERT(old_length < length());
+  ASSERT(old_length <= length());
   for (int i = 0; i < old_length; i++) {
     Object* hole_or_object = from->get(i);
     if (hole_or_object->IsTheHole()) {
@@ -1802,7 +1794,9 @@
 
 
 WriteBarrierMode HeapObject::GetWriteBarrierMode(const AssertNoAllocation&) {
-  if (GetHeap()->InNewSpace(this)) return SKIP_WRITE_BARRIER;
+  Heap* heap = GetHeap();
+  if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
+  if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
   return UPDATE_WRITE_BARRIER;
 }
 
@@ -1814,7 +1808,7 @@
   ASSERT(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
 }
 
 
@@ -1823,6 +1817,10 @@
   ASSERT(index >= 0 && index < array->length());
   ASSERT(!HEAP->InNewSpace(value));
   WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
+  array->GetHeap()->incremental_marking()->RecordWrite(
+      array,
+      HeapObject::RawField(array, kHeaderSize + index * kPointerSize),
+      value);
 }
 
 
@@ -1875,7 +1873,7 @@
                                WriteBarrierMode mode) {
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
-  CONDITIONAL_WRITE_BARRIER(heap, this, offset, mode);
+  CONDITIONAL_WRITE_BARRIER(heap, this, offset, value, mode);
 }
 
 
@@ -2154,6 +2152,7 @@
 CAST_ACCESSOR(JSWeakMap)
 CAST_ACCESSOR(Foreign)
 CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(FreeSpace)
 CAST_ACCESSOR(ExternalArray)
 CAST_ACCESSOR(ExternalByteArray)
 CAST_ACCESSOR(ExternalUnsignedByteArray)
@@ -2180,6 +2179,7 @@
 
 
 SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
 
 SMI_ACCESSORS(String, length, kLengthOffset)
 
@@ -2336,7 +2336,7 @@
 
 
 void SlicedString::set_parent(String* parent) {
-  ASSERT(parent->IsSeqString());
+  ASSERT(parent->IsSeqString() || parent->IsExternalString());
   WRITE_FIELD(this, kParentOffset, parent);
 }
 
@@ -2356,7 +2356,7 @@
 
 void ConsString::set_first(String* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kFirstOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kFirstOffset, value, mode);
 }
 
 
@@ -2372,29 +2372,31 @@
 
 void ConsString::set_second(String* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kSecondOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
 }
 
 
-ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
   return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
 }
 
 
 void ExternalAsciiString::set_resource(
-    ExternalAsciiString::Resource* resource) {
-  *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+    const ExternalAsciiString::Resource* resource) {
+  *reinterpret_cast<const Resource**>(
+      FIELD_ADDR(this, kResourceOffset)) = resource;
 }
 
 
-ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
+const ExternalTwoByteString::Resource* ExternalTwoByteString::resource() {
   return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
 }
 
 
 void ExternalTwoByteString::set_resource(
-    ExternalTwoByteString::Resource* resource) {
-  *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)) = resource;
+    const ExternalTwoByteString::Resource* resource) {
+  *reinterpret_cast<const Resource**>(
+      FIELD_ADDR(this, kResourceOffset)) = resource;
 }
 
 
@@ -2694,6 +2696,9 @@
   if (instance_type == BYTE_ARRAY_TYPE) {
     return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
   }
+  if (instance_type == FREE_SPACE_TYPE) {
+    return reinterpret_cast<FreeSpace*>(this)->size();
+  }
   if (instance_type == STRING_TYPE) {
     return SeqTwoByteString::SizeFor(
         reinterpret_cast<SeqTwoByteString*>(this)->length());
@@ -2855,12 +2860,6 @@
 }
 
 
-FixedArray* Map::unchecked_prototype_transitions() {
-  return reinterpret_cast<FixedArray*>(
-      READ_FIELD(this, kPrototypeTransitionsOffset));
-}
-
-
 Code::Flags Code::flags() {
   return static_cast<Flags>(READ_INT_FIELD(this, kFlagsOffset));
 }
@@ -2932,6 +2931,19 @@
 }
 
 
+bool Code::is_pregenerated() {
+  return kind() == STUB && IsPregeneratedField::decode(flags());
+}
+
+
+void Code::set_is_pregenerated(bool value) {
+  ASSERT(kind() == STUB);
+  Flags f = flags();
+  f = static_cast<Flags>(IsPregeneratedField::update(f, value));
+  set_flags(f);
+}
+
+
 bool Code::optimizable() {
   ASSERT(kind() == FUNCTION);
   return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
@@ -3097,6 +3109,19 @@
   WRITE_BYTE_FIELD(this, kToBooleanTypeOffset, value);
 }
 
+
+bool Code::has_function_cache() {
+  ASSERT(kind() == STUB);
+  return READ_BYTE_FIELD(this, kHasFunctionCacheOffset) != 0;
+}
+
+
+void Code::set_has_function_cache(bool flag) {
+  ASSERT(kind() == STUB);
+  WRITE_BYTE_FIELD(this, kHasFunctionCacheOffset, flag);
+}
+
+
 bool Code::is_inline_cache_stub() {
   Kind kind = this->kind();
   return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
@@ -3182,48 +3207,6 @@
 }
 
 
-Isolate* Map::isolate() {
-  return heap()->isolate();
-}
-
-
-Heap* Map::heap() {
-  // NOTE: address() helper is not used to save one instruction.
-  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
-  ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
-  return heap;
-}
-
-
-Heap* Code::heap() {
-  // NOTE: address() helper is not used to save one instruction.
-  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
-  ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
-  return heap;
-}
-
-
-Isolate* Code::isolate() {
-  return heap()->isolate();
-}
-
-
-Heap* JSGlobalPropertyCell::heap() {
-  // NOTE: address() helper is not used to save one instruction.
-  Heap* heap = Page::FromAddress(reinterpret_cast<Address>(this))->heap_;
-  ASSERT(heap != NULL);
-  ASSERT(heap->isolate() == Isolate::Current());
-  return heap;
-}
-
-
-Isolate* JSGlobalPropertyCell::isolate() {
-  return heap()->isolate();
-}
-
-
 Object* Code::GetObjectFromEntryAddress(Address location_of_address) {
   return HeapObject::
       FromAddress(Memory::Address_at(location_of_address) - Code::kHeaderSize);
@@ -3238,46 +3221,7 @@
 void Map::set_prototype(Object* value, WriteBarrierMode mode) {
   ASSERT(value->IsNull() || value->IsJSReceiver());
   WRITE_FIELD(this, kPrototypeOffset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, mode);
-}
-
-
-MaybeObject* Map::GetFastElementsMap() {
-  if (has_fast_elements()) return this;
-  Object* obj;
-  { MaybeObject* maybe_obj = CopyDropTransitions();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  Map* new_map = Map::cast(obj);
-  new_map->set_elements_kind(FAST_ELEMENTS);
-  isolate()->counters()->map_to_fast_elements()->Increment();
-  return new_map;
-}
-
-
-MaybeObject* Map::GetFastDoubleElementsMap() {
-  if (has_fast_double_elements()) return this;
-  Object* obj;
-  { MaybeObject* maybe_obj = CopyDropTransitions();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  Map* new_map = Map::cast(obj);
-  new_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
-  isolate()->counters()->map_to_fast_double_elements()->Increment();
-  return new_map;
-}
-
-
-MaybeObject* Map::GetSlowElementsMap() {
-  if (!has_fast_elements() && !has_fast_double_elements()) return this;
-  Object* obj;
-  { MaybeObject* maybe_obj = CopyDropTransitions();
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
-  Map* new_map = Map::cast(obj);
-  new_map->set_elements_kind(DICTIONARY_ELEMENTS);
-  isolate()->counters()->map_to_slow_elements()->Increment();
-  return new_map;
+  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
 }
 
 
@@ -3312,7 +3256,8 @@
                                    WriteBarrierMode mode) {
   Object* object = READ_FIELD(this,
                               kInstanceDescriptorsOrBitField3Offset);
-  if (value == isolate()->heap()->empty_descriptor_array()) {
+  Heap* heap = GetHeap();
+  if (value == heap->empty_descriptor_array()) {
     clear_instance_descriptors();
     return;
   } else {
@@ -3325,10 +3270,8 @@
   }
   ASSERT(!is_shared());
   WRITE_FIELD(this, kInstanceDescriptorsOrBitField3Offset, value);
-  CONDITIONAL_WRITE_BARRIER(GetHeap(),
-                            this,
-                            kInstanceDescriptorsOrBitField3Offset,
-                            mode);
+  CONDITIONAL_WRITE_BARRIER(
+      heap, this, kInstanceDescriptorsOrBitField3Offset, value, mode);
 }
 
 
@@ -3357,14 +3300,22 @@
 }
 
 
+FixedArray* Map::unchecked_prototype_transitions() {
+  return reinterpret_cast<FixedArray*>(
+      READ_FIELD(this, kPrototypeTransitionsOffset));
+}
+
+
 ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
 ACCESSORS(Map, prototype_transitions, FixedArray, kPrototypeTransitionsOffset)
 ACCESSORS(Map, constructor, Object, kConstructorOffset)
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
 ACCESSORS(JSFunction, literals, FixedArray, kLiteralsOffset)
-ACCESSORS_GCSAFE(JSFunction, next_function_link, Object,
-                 kNextFunctionLinkOffset)
+ACCESSORS(JSFunction,
+          next_function_link,
+          Object,
+          kNextFunctionLinkOffset)
 
 ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
 ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
@@ -3453,8 +3404,8 @@
 #endif
 
 ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS_GCSAFE(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS_GCSAFE(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
+ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
+ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
           kInstanceClassNameOffset)
 ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -3660,7 +3611,7 @@
 
 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
   WRITE_FIELD(this, kCodeOffset, value);
-  ASSERT(!Isolate::Current()->heap()->InNewSpace(value));
+  CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
 }
 
 
@@ -3673,7 +3624,11 @@
 void SharedFunctionInfo::set_scope_info(SerializedScopeInfo* value,
                                         WriteBarrierMode mode) {
   WRITE_FIELD(this, kScopeInfoOffset, reinterpret_cast<Object*>(value));
-  CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kScopeInfoOffset, mode);
+  CONDITIONAL_WRITE_BARRIER(GetHeap(),
+                            this,
+                            kScopeInfoOffset,
+                            reinterpret_cast<Object*>(value),
+                            mode);
 }
 
 
@@ -3770,10 +3725,13 @@
 
 
 void JSFunction::set_code(Code* value) {
-  // Skip the write barrier because code is never in new space.
   ASSERT(!HEAP->InNewSpace(value));
   Address entry = value->entry();
   WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
+  GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
+      this,
+      HeapObject::RawField(this, kCodeEntryOffset),
+      value);
 }
 
 
@@ -3813,7 +3771,7 @@
 void JSFunction::set_context(Object* value) {
   ASSERT(value->IsUndefined() || value->IsContext());
   WRITE_FIELD(this, kContextOffset, value);
-  WRITE_BARRIER(this, kContextOffset);
+  WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
 }
 
 ACCESSORS(JSFunction, prototype_or_initial_map, Object,
@@ -3887,7 +3845,7 @@
                                               Object* value) {
   ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
   WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
-  WRITE_BARRIER(this, OffsetOfFunctionWithId(id));
+  WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value);
 }
 
 
@@ -3906,6 +3864,7 @@
 
 
 ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
+ACCESSORS(JSProxy, hash, Object, kHashOffset)
 ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset)
 ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
 
@@ -3918,8 +3877,8 @@
 }
 
 
-ACCESSORS(JSWeakMap, table, ObjectHashTable, kTableOffset)
-ACCESSORS_GCSAFE(JSWeakMap, next, Object, kNextOffset)
+ACCESSORS(JSWeakMap, table, Object, kTableOffset)
+ACCESSORS(JSWeakMap, next, Object, kNextOffset)
 
 
 ObjectHashTable* JSWeakMap::unchecked_table() {
@@ -4011,9 +3970,8 @@
 }
 
 
-bool Code::contains(byte* pc) {
-  return (instruction_start() <= pc) &&
-      (pc <= instruction_start() + instruction_size());
+bool Code::contains(byte* inner_pointer) {
+  return (address() <= inner_pointer) && (inner_pointer <= address() + Size());
 }
 
 
@@ -4092,6 +4050,7 @@
   if (value->IsSmi()) {
     fa->set_unchecked(index, Smi::cast(value));
   } else {
+    // We only do this during GC, so we don't need to notify the write barrier.
     fa->set_unchecked(heap, index, value, SKIP_WRITE_BARRIER);
   }
 }
@@ -4099,15 +4058,20 @@
 
 ElementsKind JSObject::GetElementsKind() {
   ElementsKind kind = map()->elements_kind();
-  ASSERT((kind == FAST_ELEMENTS &&
-          (elements()->map() == GetHeap()->fixed_array_map() ||
-           elements()->map() == GetHeap()->fixed_cow_array_map())) ||
+#if DEBUG
+  FixedArrayBase* fixed_array =
+      reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
+  Map* map = fixed_array->map();
+    ASSERT(((kind == FAST_ELEMENTS || kind == FAST_SMI_ONLY_ELEMENTS) &&
+          (map == GetHeap()->fixed_array_map() ||
+           map == GetHeap()->fixed_cow_array_map())) ||
          (kind == FAST_DOUBLE_ELEMENTS &&
-          elements()->IsFixedDoubleArray()) ||
+          fixed_array->IsFixedDoubleArray()) ||
          (kind == DICTIONARY_ELEMENTS &&
-          elements()->IsFixedArray() &&
-          elements()->IsDictionary()) ||
+          fixed_array->IsFixedArray() &&
+          fixed_array->IsDictionary()) ||
          (kind > DICTIONARY_ELEMENTS));
+#endif
   return kind;
 }
 
@@ -4122,6 +4086,18 @@
 }
 
 
+bool JSObject::HasFastSmiOnlyElements() {
+  return GetElementsKind() == FAST_SMI_ONLY_ELEMENTS;
+}
+
+
+bool JSObject::HasFastTypeElements() {
+  ElementsKind elements_kind = GetElementsKind();
+  return elements_kind == FAST_SMI_ONLY_ELEMENTS ||
+      elements_kind == FAST_ELEMENTS;
+}
+
+
 bool JSObject::HasFastDoubleElements() {
   return GetElementsKind() == FAST_DOUBLE_ELEMENTS;
 }
@@ -4132,6 +4108,11 @@
 }
 
 
+bool JSObject::HasNonStrictArgumentsElements() {
+  return GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+}
+
+
 bool JSObject::HasExternalArrayElements() {
   HeapObject* array = elements();
   ASSERT(array != NULL);
@@ -4183,7 +4164,7 @@
 
 
 MaybeObject* JSObject::EnsureWritableFastElements() {
-  ASSERT(HasFastElements());
+  ASSERT(HasFastTypeElements());
   FixedArray* elems = FixedArray::cast(elements());
   Isolate* isolate = GetIsolate();
   if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -4359,44 +4340,18 @@
 }
 
 
-bool JSObject::HasHiddenPropertiesObject() {
-  ASSERT(!IsJSGlobalProxy());
-  return GetPropertyAttributePostInterceptor(this,
-                                             GetHeap()->hidden_symbol(),
-                                             false) != ABSENT;
+MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) {
+  return IsJSProxy()
+      ? JSProxy::cast(this)->GetIdentityHash(flag)
+      : JSObject::cast(this)->GetIdentityHash(flag);
 }
 
 
-Object* JSObject::GetHiddenPropertiesObject() {
-  ASSERT(!IsJSGlobalProxy());
-  PropertyAttributes attributes;
-  // You can't install a getter on a property indexed by the hidden symbol,
-  // so we can be sure that GetLocalPropertyPostInterceptor returns a real
-  // object.
-  Object* result =
-      GetLocalPropertyPostInterceptor(this,
-                                      GetHeap()->hidden_symbol(),
-                                      &attributes)->ToObjectUnchecked();
-  return result;
-}
-
-
-MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
-  ASSERT(!IsJSGlobalProxy());
-  return SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
-                                    hidden_obj,
-                                    DONT_ENUM,
-                                    kNonStrictMode);
-}
-
-
-bool JSObject::HasHiddenProperties() {
-  return !GetHiddenProperties(OMIT_CREATION)->ToObjectChecked()->IsUndefined();
-}
-
-
-bool JSObject::HasElement(uint32_t index) {
-  return HasElementWithReceiver(this, index);
+bool JSReceiver::HasElement(uint32_t index) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->HasElementWithHandler(index);
+  }
+  return JSObject::cast(this)->HasElementWithReceiver(this, index);
 }
 
 
@@ -4508,27 +4463,27 @@
 }
 
 
-bool ObjectHashTableShape::IsMatch(JSObject* key, Object* other) {
-  return key == JSObject::cast(other);
+bool ObjectHashTableShape::IsMatch(JSReceiver* key, Object* other) {
+  return key == JSReceiver::cast(other);
 }
 
 
-uint32_t ObjectHashTableShape::Hash(JSObject* key) {
-  MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
+uint32_t ObjectHashTableShape::Hash(JSReceiver* key) {
+  MaybeObject* maybe_hash = key->GetIdentityHash(OMIT_CREATION);
   ASSERT(!maybe_hash->IsFailure());
   return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
 }
 
 
-uint32_t ObjectHashTableShape::HashForObject(JSObject* key, Object* other) {
-  MaybeObject* maybe_hash = JSObject::cast(other)->GetIdentityHash(
-      JSObject::OMIT_CREATION);
+uint32_t ObjectHashTableShape::HashForObject(JSReceiver* key, Object* other) {
+  MaybeObject* maybe_hash =
+      JSReceiver::cast(other)->GetIdentityHash(OMIT_CREATION);
   ASSERT(!maybe_hash->IsFailure());
   return Smi::cast(maybe_hash->ToObjectUnchecked())->value();
 }
 
 
-MaybeObject* ObjectHashTableShape::AsObject(JSObject* key) {
+MaybeObject* ObjectHashTableShape::AsObject(JSReceiver* key) {
   return key;
 }
 
@@ -4548,7 +4503,7 @@
 
 
 void JSArray::EnsureSize(int required_size) {
-  ASSERT(HasFastElements());
+  ASSERT(HasFastTypeElements());
   FixedArray* elts = FixedArray::cast(elements());
   const int kArraySizeThatFitsComfortablyInNewSpace = 128;
   if (elts->length() < required_size) {
@@ -4566,13 +4521,17 @@
 
 
 void JSArray::set_length(Smi* length) {
+  // Don't need a write barrier for a Smi.
   set_length(static_cast<Object*>(length), SKIP_WRITE_BARRIER);
 }
 
 
-void JSArray::SetContent(FixedArray* storage) {
+MaybeObject* JSArray::SetContent(FixedArray* storage) {
+  MaybeObject* maybe_object = EnsureCanContainElements(storage);
+  if (maybe_object->IsFailure()) return maybe_object;
   set_length(Smi::FromInt(storage->length()));
   set_elements(storage);
+  return this;
 }
 
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 0398572..fc75732 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -82,12 +82,18 @@
     case HEAP_NUMBER_TYPE:
       HeapNumber::cast(this)->HeapNumberPrint(out);
       break;
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
+      break;
     case FIXED_ARRAY_TYPE:
       FixedArray::cast(this)->FixedArrayPrint(out);
       break;
     case BYTE_ARRAY_TYPE:
       ByteArray::cast(this)->ByteArrayPrint(out);
       break;
+    case FREE_SPACE_TYPE:
+      FreeSpace::cast(this)->FreeSpacePrint(out);
+      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       ExternalPixelArray::cast(this)->ExternalPixelArrayPrint(out);
       break;
@@ -189,6 +195,11 @@
 }
 
 
+void FreeSpace::FreeSpacePrint(FILE* out) {
+  PrintF(out, "free space, size %d", Size());
+}
+
+
 void ExternalPixelArray::ExternalPixelArrayPrint(FILE* out) {
   PrintF(out, "external pixel array");
 }
@@ -234,6 +245,54 @@
 }
 
 
+static void PrintElementsKind(FILE* out, ElementsKind kind) {
+  switch (kind) {
+    case FAST_SMI_ONLY_ELEMENTS:
+      PrintF(out, "FAST_SMI_ONLY_ELEMENTS");
+      break;
+    case FAST_ELEMENTS:
+      PrintF(out, "FAST_ELEMENTS");
+      break;
+    case FAST_DOUBLE_ELEMENTS:
+      PrintF(out, "FAST_DOUBLE_ELEMENTS");
+      break;
+    case DICTIONARY_ELEMENTS:
+      PrintF(out, "DICTIONARY_ELEMENTS");
+      break;
+    case NON_STRICT_ARGUMENTS_ELEMENTS:
+      PrintF(out, "NON_STRICT_ARGUMENTS_ELEMENTS");
+      break;
+    case EXTERNAL_BYTE_ELEMENTS:
+      PrintF(out, "EXTERNAL_BYTE_ELEMENTS");
+      break;
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      PrintF(out, "EXTERNAL_UNSIGNED_BYTE_ELEMENTS");
+      break;
+    case EXTERNAL_SHORT_ELEMENTS:
+      PrintF(out, "EXTERNAL_SHORT_ELEMENTS");
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      PrintF(out, "EXTERNAL_UNSIGNED_SHORT_ELEMENTS");
+      break;
+    case EXTERNAL_INT_ELEMENTS:
+      PrintF(out, "EXTERNAL_INT_ELEMENTS");
+      break;
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      PrintF(out, "EXTERNAL_UNSIGNED_INT_ELEMENTS");
+      break;
+    case EXTERNAL_FLOAT_ELEMENTS:
+      PrintF(out, "EXTERNAL_FLOAT_ELEMENTS");
+      break;
+    case EXTERNAL_DOUBLE_ELEMENTS:
+      PrintF(out, "EXTERNAL_DOUBLE_ELEMENTS");
+      break;
+    case EXTERNAL_PIXEL_ELEMENTS:
+      PrintF(out, "EXTERNAL_DOUBLE_ELEMENTS");
+      break;
+  }
+}
+
+
 void JSObject::PrintProperties(FILE* out) {
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
@@ -256,14 +315,33 @@
           descs->GetCallbacksObject(i)->ShortPrint(out);
           PrintF(out, " (callback)\n");
           break;
+        case ELEMENTS_TRANSITION: {
+          PrintF(out, "(elements transition to ");
+          Object* descriptor_contents = descs->GetValue(i);
+          if (descriptor_contents->IsMap()) {
+            Map* map = Map::cast(descriptor_contents);
+            PrintElementsKind(out, map->elements_kind());
+          } else {
+            FixedArray* map_array = FixedArray::cast(descriptor_contents);
+            for (int i = 0; i < map_array->length(); ++i) {
+              Map* map = Map::cast(map_array->get(i));
+              if (i != 0) {
+                PrintF(out, ", ");
+              }
+              PrintElementsKind(out, map->elements_kind());
+            }
+          }
+          PrintF(out, ")\n");
+          break;
+        }
         case MAP_TRANSITION:
-          PrintF(out, " (map transition)\n");
+          PrintF(out, "(map transition)\n");
           break;
         case CONSTANT_TRANSITION:
-          PrintF(out, " (constant transition)\n");
+          PrintF(out, "(constant transition)\n");
           break;
         case NULL_DESCRIPTOR:
-          PrintF(out, " (null descriptor)\n");
+          PrintF(out, "(null descriptor)\n");
           break;
         default:
           UNREACHABLE();
@@ -277,7 +355,10 @@
 
 
 void JSObject::PrintElements(FILE* out) {
-  switch (GetElementsKind()) {
+  // Don't call GetElementsKind, its validation code can cause the printer to
+  // fail when debugging.
+  switch (map()->elements_kind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Print in array notation for non-sparse arrays.
       FixedArray* p = FixedArray::cast(elements());
@@ -385,8 +466,13 @@
 
 void JSObject::JSObjectPrint(FILE* out) {
   PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - prototype = %p\n", reinterpret_cast<void*>(GetPrototype()));
+  PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
+  // Don't call GetElementsKind, its validation code can cause the printer to
+  // fail when debugging.
+  PrintElementsKind(out, this->map()->elements_kind());
+  PrintF(out,
+         "]\n - prototype = %p\n",
+         reinterpret_cast<void*>(GetPrototype()));
   PrintF(out, " {\n");
   PrintProperties(out);
   PrintElements(out);
@@ -415,6 +501,7 @@
     case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
     case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
     case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
+    case FREE_SPACE_TYPE: return "FREE_SPACE";
     case EXTERNAL_PIXEL_ARRAY_TYPE: return "EXTERNAL_PIXEL_ARRAY";
     case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
     case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -458,7 +545,9 @@
   PrintF(out, " - type: %s\n", TypeToString(instance_type()));
   PrintF(out, " - instance size: %d\n", instance_size());
   PrintF(out, " - inobject properties: %d\n", inobject_properties());
-  PrintF(out, " - pre-allocated property fields: %d\n",
+  PrintF(out, " - elements kind: ");
+  PrintElementsKind(out, elements_kind());
+  PrintF(out, "\n - pre-allocated property fields: %d\n",
       pre_allocated_property_fields());
   PrintF(out, " - unused property fields: %d\n", unused_property_fields());
   if (is_hidden_prototype()) {
@@ -516,6 +605,16 @@
 }
 
 
+void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "FixedDoubleArray");
+  PrintF(out, " - length: %d", length());
+  for (int i = 0; i < length(); i++) {
+    PrintF(out, "\n  [%d]: %g", i, get_scalar(i));
+  }
+  PrintF(out, "\n");
+}
+
+
 void JSValue::JSValuePrint(FILE* out) {
   HeapObject::PrintHeader(out, "ValueObject");
   value()->Print(out);
@@ -587,6 +686,8 @@
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
   PrintF(out, " - handler = ");
   handler()->Print(out);
+  PrintF(out, " - hash = ");
+  hash()->Print(out);
   PrintF(out, "\n");
 }
 
@@ -607,7 +708,6 @@
 void JSWeakMap::JSWeakMapPrint(FILE* out) {
   HeapObject::PrintHeader(out, "JSWeakMap");
   PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - number of elements = %d\n", table()->NumberOfElements());
   PrintF(out, " - table = ");
   table()->ShortPrint(out);
   PrintF(out, "\n");
@@ -802,10 +902,15 @@
 
 void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
   HeapObject::PrintHeader(out, "ObjectTemplateInfo");
+  PrintF(out, " - tag: ");
+  tag()->ShortPrint(out);
+  PrintF(out, "\n - property_list: ");
+  property_list()->ShortPrint(out);
   PrintF(out, "\n - constructor: ");
   constructor()->ShortPrint(out);
   PrintF(out, "\n - internal_field_count: ");
   internal_field_count()->ShortPrint(out);
+  PrintF(out, "\n");
 }
 
 
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
new file mode 100644
index 0000000..6f0f61d
--- /dev/null
+++ b/src/objects-visiting-inl.h
@@ -0,0 +1,143 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_OBJECTS_VISITING_INL_H_
+#define V8_OBJECTS_VISITING_INL_H_
+
+
+namespace v8 {
+namespace internal {
+
+template<typename StaticVisitor>
+void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
+  table_.Register(kVisitShortcutCandidate,
+                  &FixedBodyVisitor<StaticVisitor,
+                  ConsString::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitConsString,
+                  &FixedBodyVisitor<StaticVisitor,
+                  ConsString::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitSlicedString,
+                  &FixedBodyVisitor<StaticVisitor,
+                  SlicedString::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitFixedArray,
+                  &FlexibleBodyVisitor<StaticVisitor,
+                  FixedArray::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+
+  table_.Register(kVisitGlobalContext,
+                  &FixedBodyVisitor<StaticVisitor,
+                  Context::ScavengeBodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitByteArray, &VisitByteArray);
+
+  table_.Register(kVisitSharedFunctionInfo,
+                  &FixedBodyVisitor<StaticVisitor,
+                  SharedFunctionInfo::BodyDescriptor,
+                  int>::Visit);
+
+  table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
+
+  table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
+
+  table_.Register(kVisitJSFunction,
+                  &JSObjectVisitor::
+                      template VisitSpecialized<JSFunction::kSize>);
+
+  table_.Register(kVisitFreeSpace, &VisitFreeSpace);
+
+  table_.Register(kVisitJSWeakMap, &JSObjectVisitor::Visit);
+
+  table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
+
+  table_.template RegisterSpecializations<DataObjectVisitor,
+                                          kVisitDataObject,
+                                          kVisitDataObjectGeneric>();
+
+  table_.template RegisterSpecializations<JSObjectVisitor,
+                                          kVisitJSObject,
+                                          kVisitJSObjectGeneric>();
+  table_.template RegisterSpecializations<StructVisitor,
+                                          kVisitStruct,
+                                          kVisitStructGeneric>();
+}
+
+
+void Code::CodeIterateBody(ObjectVisitor* v) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  IteratePointer(v, kRelocationInfoOffset);
+  IteratePointer(v, kDeoptimizationDataOffset);
+
+  RelocIterator it(this, mode_mask);
+  for (; !it.done(); it.next()) {
+    it.rinfo()->Visit(v);
+  }
+}
+
+
+template<typename StaticVisitor>
+void Code::CodeIterateBody(Heap* heap) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
+
+  RelocIterator it(this, mode_mask);
+  for (; !it.done(); it.next()) {
+    it.rinfo()->template Visit<StaticVisitor>(heap);
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_OBJECTS_VISITING_INL_H_
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
index 0aa21dd..20a7b31 100644
--- a/src/objects-visiting.cc
+++ b/src/objects-visiting.cc
@@ -73,6 +73,9 @@
     case BYTE_ARRAY_TYPE:
       return kVisitByteArray;
 
+    case FREE_SPACE_TYPE:
+      return kVisitFreeSpace;
+
     case FIXED_ARRAY_TYPE:
       return kVisitFixedArray;
 
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 4ce1bd0..b8b1a25 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -30,22 +30,6 @@
 
 #include "allocation.h"
 
-#if V8_TARGET_ARCH_IA32
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-#elif V8_TARGET_ARCH_X64
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#elif V8_TARGET_ARCH_ARM
-#include "arm/assembler-arm.h"
-#include "arm/assembler-arm-inl.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "mips/assembler-mips.h"
-#include "mips/assembler-mips-inl.h"
-#else
-#error Unsupported target architecture.
-#endif
-
 // This file provides base classes and auxiliary methods for defining
 // static object visitors used during GC.
 // Visiting HeapObject body with a normal ObjectVisitor requires performing
@@ -67,6 +51,7 @@
     kVisitSeqTwoByteString,
     kVisitShortcutCandidate,
     kVisitByteArray,
+    kVisitFreeSpace,
     kVisitFixedArray,
     kVisitFixedDoubleArray,
     kVisitGlobalContext,
@@ -236,7 +221,7 @@
   static inline ReturnType Visit(Map* map, HeapObject* object) {
     int object_size = BodyDescriptor::SizeOf(map, object);
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         BodyDescriptor::kStartOffset,
         object_size);
@@ -247,7 +232,7 @@
   static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
     ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         BodyDescriptor::kStartOffset,
         object_size);
@@ -261,7 +246,7 @@
  public:
   static inline ReturnType Visit(Map* map, HeapObject* object) {
     BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->heap(),
+        map->GetHeap(),
         object,
         BodyDescriptor::kStartOffset,
         BodyDescriptor::kEndOffset);
@@ -289,63 +274,7 @@
 template<typename StaticVisitor>
 class StaticNewSpaceVisitor : public StaticVisitorBase {
  public:
-  static void Initialize() {
-    table_.Register(kVisitShortcutCandidate,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      ConsString::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitConsString,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      ConsString::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitSlicedString,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      SlicedString::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitFixedArray,
-                    &FlexibleBodyVisitor<StaticVisitor,
-                                         FixedArray::BodyDescriptor,
-                                         int>::Visit);
-
-    table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
-
-    table_.Register(kVisitGlobalContext,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      Context::ScavengeBodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitByteArray, &VisitByteArray);
-
-    table_.Register(kVisitSharedFunctionInfo,
-                    &FixedBodyVisitor<StaticVisitor,
-                                      SharedFunctionInfo::BodyDescriptor,
-                                      int>::Visit);
-
-    table_.Register(kVisitJSWeakMap, &VisitJSObject);
-
-    table_.Register(kVisitJSRegExp, &VisitJSObject);
-
-    table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
-
-    table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
-    table_.Register(kVisitJSFunction,
-                    &JSObjectVisitor::
-                        template VisitSpecialized<JSFunction::kSize>);
-
-    table_.RegisterSpecializations<DataObjectVisitor,
-                                   kVisitDataObject,
-                                   kVisitDataObjectGeneric>();
-    table_.RegisterSpecializations<JSObjectVisitor,
-                                   kVisitJSObject,
-                                   kVisitJSObjectGeneric>();
-    table_.RegisterSpecializations<StructVisitor,
-                                   kVisitStruct,
-                                   kVisitStructGeneric>();
-  }
+  static void Initialize();
 
   static inline int IterateBody(Map* map, HeapObject* obj) {
     return table_.GetVisitor(map)(map, obj);
@@ -379,6 +308,10 @@
         SeqTwoByteStringSize(map->instance_type());
   }
 
+  static inline int VisitFreeSpace(Map* map, HeapObject* object) {
+    return FreeSpace::cast(object)->Size();
+  }
+
   class DataObjectVisitor {
    public:
     template<int object_size>
@@ -410,55 +343,6 @@
   StaticNewSpaceVisitor<StaticVisitor>::table_;
 
 
-void Code::CodeIterateBody(ObjectVisitor* v) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // Use the relocation info pointer before it is visited by
-  // the heap compaction in the next statement.
-  RelocIterator it(this, mode_mask);
-
-  IteratePointer(v, kRelocationInfoOffset);
-  IteratePointer(v, kDeoptimizationDataOffset);
-
-  for (; !it.done(); it.next()) {
-    it.rinfo()->Visit(v);
-  }
-}
-
-
-template<typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // Use the relocation info pointer before it is visited by
-  // the heap compaction in the next statement.
-  RelocIterator it(this, mode_mask);
-
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
-
-  for (; !it.done(); it.next()) {
-    it.rinfo()->template Visit<StaticVisitor>(heap);
-  }
-}
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_OBJECTS_VISITING_H_
diff --git a/src/objects.cc b/src/objects.cc
index 6085b4e..b77dd1b 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -39,7 +39,9 @@
 #include "hydrogen.h"
 #include "objects-inl.h"
 #include "objects-visiting.h"
+#include "objects-visiting-inl.h"
 #include "macro-assembler.h"
+#include "mark-compact.h"
 #include "safepoint-table.h"
 #include "string-stream.h"
 #include "utils.h"
@@ -132,27 +134,20 @@
 
 void Object::Lookup(String* name, LookupResult* result) {
   Object* holder = NULL;
-  if (IsSmi()) {
-    Context* global_context = Isolate::Current()->context()->global_context();
-    holder = global_context->number_function()->instance_prototype();
+  if (IsJSReceiver()) {
+    holder = this;
   } else {
-    HeapObject* heap_object = HeapObject::cast(this);
-    if (heap_object->IsJSObject()) {
-      return JSObject::cast(this)->Lookup(name, result);
-    } else if (heap_object->IsJSProxy()) {
-      return result->HandlerResult();
-    }
     Context* global_context = Isolate::Current()->context()->global_context();
-    if (heap_object->IsString()) {
-      holder = global_context->string_function()->instance_prototype();
-    } else if (heap_object->IsHeapNumber()) {
+    if (IsNumber()) {
       holder = global_context->number_function()->instance_prototype();
-    } else if (heap_object->IsBoolean()) {
+    } else if (IsString()) {
+      holder = global_context->string_function()->instance_prototype();
+    } else if (IsBoolean()) {
       holder = global_context->boolean_function()->instance_prototype();
     }
   }
   ASSERT(holder != NULL);  // Cannot handle null or undefined.
-  JSObject::cast(holder)->Lookup(name, result);
+  JSReceiver::cast(holder)->Lookup(name, result);
 }
 
 
@@ -167,10 +162,9 @@
 }
 
 
-MaybeObject* Object::GetPropertyWithCallback(Object* receiver,
-                                             Object* structure,
-                                             String* name,
-                                             Object* holder) {
+MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
+                                               Object* structure,
+                                               String* name) {
   Isolate* isolate = name->GetIsolate();
   // To accommodate both the old and the new api we switch on the
   // data structure used to store the callbacks.  Eventually foreign
@@ -191,10 +185,9 @@
     v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
     HandleScope scope(isolate);
     JSObject* self = JSObject::cast(receiver);
-    JSObject* holder_handle = JSObject::cast(holder);
     Handle<String> key(name);
     LOG(isolate, ApiNamedPropertyAccess("load", self, name));
-    CustomArguments args(isolate, data->data(), self, holder_handle);
+    CustomArguments args(isolate, data->data(), self, this);
     v8::AccessorInfo info(args.end());
     v8::Handle<v8::Value> result;
     {
@@ -212,9 +205,9 @@
   // __defineGetter__ callback
   if (structure->IsFixedArray()) {
     Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
-    if (getter->IsJSFunction()) {
-      return Object::GetPropertyWithDefinedGetter(receiver,
-                                                  JSFunction::cast(getter));
+    if (getter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
     }
     // Getter is not a function.
     return isolate->heap()->undefined_value();
@@ -225,47 +218,64 @@
 }
 
 
-MaybeObject* Object::GetPropertyWithHandler(Object* receiver_raw,
-                                            String* name_raw,
-                                            Object* handler_raw) {
-  Isolate* isolate = name_raw->GetIsolate();
+MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
+                                             String* name_raw) {
+  Isolate* isolate = GetIsolate();
   HandleScope scope(isolate);
   Handle<Object> receiver(receiver_raw);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(handler_raw);
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("get");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { receiver, name };
+  Handle<Object> result = CallTrap(
+    "get", isolate->derived_get_trap(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    // Get the derived `get' property.
-    trap = isolate->derived_get_trap();
-  }
-
-  // Call trap function.
-  Object** args[] = { receiver.location(), name.location() };
-  bool has_exception;
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
 
   return *result;
 }
 
 
+MaybeObject* JSProxy::GetElementWithHandler(Object* receiver,
+                                            uint32_t index) {
+  String* name;
+  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+  if (!maybe->To<String>(&name)) return maybe;
+  return GetPropertyWithHandler(receiver, name);
+}
+
+
+MaybeObject* JSProxy::SetElementWithHandler(uint32_t index,
+                                            Object* value,
+                                            StrictModeFlag strict_mode) {
+  String* name;
+  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+  if (!maybe->To<String>(&name)) return maybe;
+  return SetPropertyWithHandler(name, value, NONE, strict_mode);
+}
+
+
+bool JSProxy::HasElementWithHandler(uint32_t index) {
+  String* name;
+  MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+  if (!maybe->To<String>(&name)) return maybe;
+  return HasPropertyWithHandler(name);
+}
+
+
 MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
-                                                  JSFunction* getter) {
+                                                  JSReceiver* getter) {
   HandleScope scope;
-  Handle<JSFunction> fun(JSFunction::cast(getter));
+  Handle<JSReceiver> fun(getter);
   Handle<Object> self(receiver);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debug* debug = fun->GetHeap()->isolate()->debug();
   // Handle stepping into a getter if step into is active.
-  if (debug->StepInActive()) {
-    debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
+  // TODO(rossberg): should this apply to getters that are function proxies?
+  if (debug->StepInActive() && fun->IsJSFunction()) {
+    debug->HandleStepIn(
+        Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
   }
 #endif
+
   bool has_pending_exception;
   Handle<Object> result =
       Execution::Call(fun, self, 0, NULL, &has_pending_exception);
@@ -290,10 +300,8 @@
           AccessorInfo* info = AccessorInfo::cast(obj);
           if (info->all_can_read()) {
             *attributes = result->GetAttributes();
-            return GetPropertyWithCallback(receiver,
-                                           result->GetCallbackObject(),
-                                           name,
-                                           result->holder());
+            return result->holder()->GetPropertyWithCallback(
+                receiver, result->GetCallbackObject(), name);
           }
         }
         break;
@@ -486,7 +494,7 @@
       }
       JSGlobalPropertyCell* cell =
           JSGlobalPropertyCell::cast(dictionary->ValueAt(entry));
-      cell->set_value(cell->heap()->the_hole_value());
+      cell->set_value(cell->GetHeap()->the_hole_value());
       dictionary->DetailsAtPut(entry, details.AsDeleted());
     } else {
       Object* deleted = dictionary->DeleteProperty(entry, mode);
@@ -566,30 +574,26 @@
   }
   *attributes = result->GetAttributes();
   Object* value;
-  JSObject* holder = result->holder();
   switch (result->type()) {
     case NORMAL:
-      value = holder->GetNormalizedProperty(result);
+      value = result->holder()->GetNormalizedProperty(result);
       ASSERT(!value->IsTheHole() || result->IsReadOnly());
       return value->IsTheHole() ? heap->undefined_value() : value;
     case FIELD:
-      value = holder->FastPropertyAt(result->GetFieldIndex());
+      value = result->holder()->FastPropertyAt(result->GetFieldIndex());
       ASSERT(!value->IsTheHole() || result->IsReadOnly());
       return value->IsTheHole() ? heap->undefined_value() : value;
     case CONSTANT_FUNCTION:
       return result->GetConstantFunction();
     case CALLBACKS:
-      return GetPropertyWithCallback(receiver,
-                                     result->GetCallbackObject(),
-                                     name,
-                                     holder);
-    case HANDLER: {
-      JSProxy* proxy = JSProxy::cast(this);
-      return GetPropertyWithHandler(receiver, name, proxy->handler());
-    }
+      return result->holder()->GetPropertyWithCallback(
+          receiver, result->GetCallbackObject(), name);
+    case HANDLER:
+      return result->proxy()->GetPropertyWithHandler(receiver, name);
     case INTERCEPTOR: {
       JSObject* recvr = JSObject::cast(receiver);
-      return holder->GetPropertyWithInterceptor(recvr, name, attributes);
+      return result->holder()->GetPropertyWithInterceptor(
+          recvr, name, attributes);
     }
     case MAP_TRANSITION:
     case ELEMENTS_TRANSITION:
@@ -613,28 +617,21 @@
   for (holder = this;
        holder != heap->null_value();
        holder = holder->GetPrototype()) {
-    if (holder->IsSmi()) {
-      Context* global_context = Isolate::Current()->context()->global_context();
-      holder = global_context->number_function()->instance_prototype();
-    } else {
-      HeapObject* heap_object = HeapObject::cast(holder);
-      if (!heap_object->IsJSObject()) {
-        Isolate* isolate = heap->isolate();
-        Context* global_context = isolate->context()->global_context();
-        if (heap_object->IsString()) {
-          holder = global_context->string_function()->instance_prototype();
-        } else if (heap_object->IsHeapNumber()) {
-          holder = global_context->number_function()->instance_prototype();
-        } else if (heap_object->IsBoolean()) {
-          holder = global_context->boolean_function()->instance_prototype();
-        } else if (heap_object->IsJSProxy()) {
-          // TODO(rossberg): do something
-          return heap->undefined_value();  // For now...
-        } else {
-          // Undefined and null have no indexed properties.
-          ASSERT(heap_object->IsUndefined() || heap_object->IsNull());
-          return heap->undefined_value();
-        }
+    if (!holder->IsJSObject()) {
+      Isolate* isolate = heap->isolate();
+      Context* global_context = isolate->context()->global_context();
+      if (holder->IsNumber()) {
+        holder = global_context->number_function()->instance_prototype();
+      } else if (holder->IsString()) {
+        holder = global_context->string_function()->instance_prototype();
+      } else if (holder->IsBoolean()) {
+        holder = global_context->boolean_function()->instance_prototype();
+      } else if (holder->IsJSProxy()) {
+        return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
+      } else {
+        // Undefined and null have no indexed properties.
+        ASSERT(holder->IsUndefined() || holder->IsNull());
+        return heap->undefined_value();
       }
     }
 
@@ -877,6 +874,9 @@
   // Fill the remainder of the string with dead wood.
   int new_size = this->Size();  // Byte size of the external String object.
   heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+    MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
+  }
   return true;
 }
 
@@ -923,6 +923,10 @@
   // Fill the remainder of the string with dead wood.
   int new_size = this->Size();  // Byte size of the external String object.
   heap->CreateFillerObjectAt(this->address() + new_size, size - new_size);
+  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+    MemoryChunk::IncrementLiveBytes(this->address(), new_size - size);
+  }
+
   return true;
 }
 
@@ -998,8 +1002,7 @@
       break;
     }
     case JS_WEAK_MAP_TYPE: {
-      int elements = JSWeakMap::cast(this)->table()->NumberOfElements();
-      accumulator->Add("<JS WeakMap[%d]>", elements);
+      accumulator->Add("<JS WeakMap>");
       break;
     }
     case JS_REGEXP_TYPE: {
@@ -1027,7 +1030,7 @@
     // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
     default: {
       Map* map_of_this = map();
-      Heap* heap = map_of_this->heap();
+      Heap* heap = GetHeap();
       Object* constructor = map_of_this->constructor();
       bool printed = false;
       if (constructor->IsHeapObject() &&
@@ -1049,7 +1052,6 @@
                        global_object ? "Global Object: " : "",
                        vowel ? "n" : "");
                 accumulator->Put(str);
-                accumulator->Put('>');
                 printed = true;
               }
             }
@@ -1071,7 +1073,6 @@
 
 
 void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
-  // if (!HEAP->InNewSpace(this)) PrintF("*", this);
   Heap* heap = GetHeap();
   if (!heap->Contains(this)) {
     accumulator->Add("!!!INVALID POINTER!!!");
@@ -1102,6 +1103,9 @@
     case BYTE_ARRAY_TYPE:
       accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
       break;
+    case FREE_SPACE_TYPE:
+      accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size());
+      break;
     case EXTERNAL_PIXEL_ARRAY_TYPE:
       accumulator->Add("<ExternalPixelArray[%u]>",
                        ExternalPixelArray::cast(this)->length());
@@ -1277,6 +1281,7 @@
     case HEAP_NUMBER_TYPE:
     case FILLER_TYPE:
     case BYTE_ARRAY_TYPE:
+    case FREE_SPACE_TYPE:
     case EXTERNAL_PIXEL_ARRAY_TYPE:
     case EXTERNAL_BYTE_ARRAY_TYPE:
     case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
@@ -1533,7 +1538,7 @@
 
   // If the old map is the global object map (from new Object()),
   // then transitions are not added to it, so we are done.
-  Heap* heap = old_map->heap();
+  Heap* heap = GetHeap();
   if (old_map == heap->isolate()->context()->global_context()->
       object_function()->map()) {
     return function;
@@ -1609,7 +1614,7 @@
                                    StrictModeFlag strict_mode) {
   ASSERT(!IsJSGlobalProxy());
   Map* map_of_this = map();
-  Heap* heap = map_of_this->heap();
+  Heap* heap = GetHeap();
   if (!map_of_this->is_extensible()) {
     if (strict_mode == kNonStrictMode) {
       return heap->undefined_value();
@@ -1658,6 +1663,14 @@
     // found.  Use set property to handle all these cases.
     return SetProperty(&result, name, value, attributes, strict_mode);
   }
+  bool found = false;
+  MaybeObject* result_object;
+  result_object = SetPropertyWithCallbackSetterInPrototypes(name,
+                                                            value,
+                                                            attributes,
+                                                            &found,
+                                                            strict_mode);
+  if (found) return result_object;
   // Add a new real property.
   return AddProperty(name, value, attributes, strict_mode);
 }
@@ -1696,7 +1709,7 @@
     return result;
   }
   // Do not add transitions to the map of "new Object()".
-  if (map() == old_map->heap()->isolate()->context()->global_context()->
+  if (map() == GetIsolate()->context()->global_context()->
       object_function()->map()) {
     return result;
   }
@@ -1880,8 +1893,9 @@
 
   if (structure->IsFixedArray()) {
     Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
-    if (setter->IsJSFunction()) {
-     return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+    if (setter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+     return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value);
     } else {
       if (strict_mode == kNonStrictMode) {
         return value;
@@ -1900,17 +1914,19 @@
 }
 
 
-MaybeObject* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter,
-                                                    Object* value) {
+MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter,
+                                                      Object* value) {
   Isolate* isolate = GetIsolate();
   Handle<Object> value_handle(value, isolate);
-  Handle<JSFunction> fun(JSFunction::cast(setter), isolate);
-  Handle<JSObject> self(this, isolate);
+  Handle<JSReceiver> fun(setter, isolate);
+  Handle<JSReceiver> self(this, isolate);
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debug* debug = isolate->debug();
   // Handle stepping into a setter if step into is active.
-  if (debug->StepInActive()) {
-    debug->HandleStepIn(fun, Handle<Object>::null(), 0, false);
+  // TODO(rossberg): should this apply to getters that are function proxies?
+  if (debug->StepInActive() && fun->IsJSFunction()) {
+    debug->HandleStepIn(
+        Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false);
   }
 #endif
   bool has_pending_exception;
@@ -1928,6 +1944,9 @@
   for (Object* pt = GetPrototype();
        pt != heap->null_value();
        pt = pt->GetPrototype()) {
+    if (pt->IsJSProxy()) {
+      return result->HandlerResult(JSProxy::cast(pt));
+    }
     JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result);
     if (result->IsProperty()) {
       if (result->type() == CALLBACKS && !result->IsReadOnly()) return;
@@ -1948,6 +1967,16 @@
   for (Object* pt = GetPrototype();
        pt != heap->null_value();
        pt = pt->GetPrototype()) {
+    if (pt->IsJSProxy()) {
+      String* name;
+      MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+      if (!maybe->To<String>(&name)) {
+        *found = true;  // Force abort
+        return maybe;
+      }
+      return JSProxy::cast(pt)->SetPropertyWithHandlerIfDefiningSetter(
+          name, value, NONE, strict_mode, found);
+    }
     if (!JSObject::cast(pt)->HasDictionaryElements()) {
       continue;
     }
@@ -1969,6 +1998,60 @@
   return heap->the_hole_value();
 }
 
+MaybeObject* JSObject::SetPropertyWithCallbackSetterInPrototypes(
+    String* name,
+    Object* value,
+    PropertyAttributes attributes,
+    bool* found,
+    StrictModeFlag strict_mode) {
+  LookupResult result;
+  LookupCallbackSetterInPrototypes(name, &result);
+  Heap* heap = GetHeap();
+  if (result.IsFound()) {
+    *found = true;
+    if (result.type() == CALLBACKS) {
+      return SetPropertyWithCallback(result.GetCallbackObject(),
+                                     name,
+                                     value,
+                                     result.holder(),
+                                     strict_mode);
+    } else if (result.type() == HANDLER) {
+      // We could not find a local property so let's check whether there is an
+      // accessor that wants to handle the property.
+      LookupResult accessor_result;
+      LookupCallbackSetterInPrototypes(name, &accessor_result);
+      if (accessor_result.IsFound()) {
+        if (accessor_result.type() == CALLBACKS) {
+          return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
+                                         name,
+                                         value,
+                                         accessor_result.holder(),
+                                         strict_mode);
+        } else if (accessor_result.type() == HANDLER) {
+          // There is a proxy in the prototype chain. Invoke its
+          // getOwnPropertyDescriptor trap.
+          bool found = false;
+          // SetPropertyWithHandlerIfDefiningSetter can cause GC,
+          // make sure to use the handlified references after calling
+          // the function.
+          Handle<JSObject> self(this);
+          Handle<String> hname(name);
+          Handle<Object> hvalue(value);
+          MaybeObject* result =
+              accessor_result.proxy()->SetPropertyWithHandlerIfDefiningSetter(
+                  name, value, attributes, strict_mode, &found);
+          if (found) return result;
+          // The proxy does not define the property as an accessor.
+          // Consequently, it has no effect on setting the receiver.
+          return self->AddProperty(*hname, *hvalue, attributes, strict_mode);
+        }
+      }
+    }
+  }
+  *found = false;
+  return heap->the_hole_value();
+}
+
 
 void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
   DescriptorArray* descriptors = map()->instance_descriptors();
@@ -1985,7 +2068,8 @@
                               String* name,
                               LookupResult* result) {
   DescriptorArray* descriptors = instance_descriptors();
-  DescriptorLookupCache* cache = heap()->isolate()->descriptor_lookup_cache();
+  DescriptorLookupCache* cache =
+      GetHeap()->isolate()->descriptor_lookup_cache();
   int number = cache->Lookup(descriptors, name);
   if (number == DescriptorLookupCache::kAbsent) {
     number = descriptors->Search(name);
@@ -1999,17 +2083,111 @@
 }
 
 
-MaybeObject* Map::GetElementsTransitionMap(ElementsKind elements_kind,
-                                           bool safe_to_add_transition) {
-  Heap* current_heap = heap();
-  DescriptorArray* descriptors = instance_descriptors();
+static Map* GetElementsTransitionMapFromDescriptor(Object* descriptor_contents,
+                                                   ElementsKind elements_kind) {
+  if (descriptor_contents->IsMap()) {
+    Map* map = Map::cast(descriptor_contents);
+    if (map->elements_kind() == elements_kind) {
+      return map;
+    }
+    return NULL;
+  }
+
+  FixedArray* map_array = FixedArray::cast(descriptor_contents);
+  for (int i = 0; i < map_array->length(); ++i) {
+    Object* current = map_array->get(i);
+    // Skip undefined slots, they are sentinels for reclaimed maps.
+    if (!current->IsUndefined()) {
+      Map* current_map = Map::cast(map_array->get(i));
+      if (current_map->elements_kind() == elements_kind) {
+        return current_map;
+      }
+    }
+  }
+
+  return NULL;
+}
+
+
+static MaybeObject* AddElementsTransitionMapToDescriptor(
+    Object* descriptor_contents,
+    Map* new_map) {
+  // Nothing was in the descriptor for an ELEMENTS_TRANSITION,
+  // simply add the map.
+  if (descriptor_contents == NULL) {
+    return new_map;
+  }
+
+  // There was already a map in the descriptor, create a 2-element FixedArray
+  // to contain the existing map plus the new one.
+  FixedArray* new_array;
+  Heap* heap = new_map->GetHeap();
+  if (descriptor_contents->IsMap()) {
+    // Must tenure, DescriptorArray expects no new-space objects.
+    MaybeObject* maybe_new_array = heap->AllocateFixedArray(2, TENURED);
+    if (!maybe_new_array->To<FixedArray>(&new_array)) {
+      return maybe_new_array;
+    }
+    new_array->set(0, descriptor_contents);
+    new_array->set(1, new_map);
+    return new_array;
+  }
+
+  // The descriptor already contained a list of maps for different ElementKinds
+  // of ELEMENTS_TRANSITION, first check the existing array for an undefined
+  // slot, and if that's not available, create a FixedArray to hold the existing
+  // maps plus the new one and fill it in.
+  FixedArray* array = FixedArray::cast(descriptor_contents);
+  for (int i = 0; i < array->length(); ++i) {
+    if (array->get(i)->IsUndefined()) {
+      array->set(i, new_map);
+      return array;
+    }
+  }
+
+  // Must tenure, DescriptorArray expects no new-space objects.
+  MaybeObject* maybe_new_array =
+      heap->AllocateFixedArray(array->length() + 1, TENURED);
+  if (!maybe_new_array->To<FixedArray>(&new_array)) {
+    return maybe_new_array;
+  }
+  int i = 0;
+  while (i < array->length()) {
+    new_array->set(i, array->get(i));
+    ++i;
+  }
+  new_array->set(i, new_map);
+  return new_array;
+}
+
+
+MaybeObject* JSObject::GetElementsTransitionMap(ElementsKind elements_kind) {
+  Heap* current_heap = GetHeap();
+  Map* current_map = map();
+  DescriptorArray* descriptors = current_map->instance_descriptors();
   String* elements_transition_sentinel_name = current_heap->empty_symbol();
 
+  if (current_map->elements_kind() == elements_kind) return current_map;
+
+  // Only objects with FastProperties can have DescriptorArrays and can track
+  // element-related maps. Also don't add descriptors to maps that are shared.
+  bool safe_to_add_transition = HasFastProperties() &&
+      !current_map->IsUndefined() &&
+      !current_map->is_shared();
+
+  // Prevent long chains of DICTIONARY -> FAST_ELEMENTS maps cause by objects
+  // with elements that switch back and forth between dictionary and fast
+  // element mode.
+  if ((current_map->elements_kind() == DICTIONARY_ELEMENTS &&
+       elements_kind == FAST_ELEMENTS)) {
+    safe_to_add_transition = false;
+  }
+
+  Object* descriptor_contents = NULL;
   if (safe_to_add_transition) {
     // It's only safe to manipulate the descriptor array if it would be
     // safe to add a transition.
 
-    ASSERT(!is_shared());  // no transitions can be added to shared maps.
     // Check if the elements transition already exists.
     DescriptorLookupCache* cache =
         current_heap->isolate()->descriptor_lookup_cache();
@@ -2025,9 +2203,15 @@
     // return it.
     if (index != DescriptorArray::kNotFound) {
       PropertyDetails details(PropertyDetails(descriptors->GetDetails(index)));
-      if (details.type() == ELEMENTS_TRANSITION &&
-          details.elements_kind() == elements_kind) {
-        return descriptors->GetValue(index);
+      if (details.type() == ELEMENTS_TRANSITION) {
+        descriptor_contents = descriptors->GetValue(index);
+        Map* maybe_transition_map =
+            GetElementsTransitionMapFromDescriptor(descriptor_contents,
+                                                   elements_kind);
+        if (maybe_transition_map != NULL) {
+          ASSERT(maybe_transition_map->IsMap());
+          return maybe_transition_map;
+        }
       } else {
         safe_to_add_transition = false;
       }
@@ -2037,26 +2221,29 @@
   // No transition to an existing map for the given ElementsKind. Make a new
   // one.
   Object* obj;
-  { MaybeObject* maybe_map = CopyDropTransitions();
+  { MaybeObject* maybe_map = current_map->CopyDropTransitions();
     if (!maybe_map->ToObject(&obj)) return maybe_map;
   }
   Map* new_map = Map::cast(obj);
 
   new_map->set_elements_kind(elements_kind);
-  GetIsolate()->counters()->map_to_external_array_elements()->Increment();
 
   // Only remember the map transition if the object's map is NOT equal to the
   // global object_function's map and there is not an already existing
   // non-matching element transition.
-  bool allow_map_transition =
-      safe_to_add_transition &&
+  bool allow_map_transition = safe_to_add_transition &&
       (GetIsolate()->context()->global_context()->object_function()->map() !=
        map());
   if (allow_map_transition) {
-    // Allocate new instance descriptors for the old map with map transition.
+    MaybeObject* maybe_new_contents =
+        AddElementsTransitionMapToDescriptor(descriptor_contents, new_map);
+    Object* new_contents;
+    if (!maybe_new_contents->ToObject(&new_contents)) {
+      return maybe_new_contents;
+    }
+
     ElementsTransitionDescriptor desc(elements_transition_sentinel_name,
-                                      Map::cast(new_map),
-                                      elements_kind);
+                                      new_contents);
     Object* new_descriptors;
     MaybeObject* maybe_new_descriptors = descriptors->CopyInsert(
         &desc,
@@ -2065,7 +2252,7 @@
       return maybe_new_descriptors;
     }
     descriptors = DescriptorArray::cast(new_descriptors);
-    set_instance_descriptors(descriptors);
+    current_map->set_instance_descriptors(descriptors);
   }
 
   return new_map;
@@ -2078,6 +2265,7 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return result->NotFound();
     ASSERT(proto->IsJSGlobalObject());
+    // A GlobalProxy's prototype should always be a proper JSObject.
     return JSObject::cast(proto)->LocalLookupRealNamedProperty(name, result);
   }
 
@@ -2204,7 +2392,7 @@
                                      PropertyAttributes attributes,
                                      StrictModeFlag strict_mode) {
   if (result->IsFound() && result->type() == HANDLER) {
-    return JSProxy::cast(this)->SetPropertyWithHandler(
+    return result->proxy()->SetPropertyWithHandler(
         key, value, attributes, strict_mode);
   } else {
     return JSObject::cast(this)->SetPropertyForResult(
@@ -2218,22 +2406,11 @@
   HandleScope scope(isolate);
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("has");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { name };
+  Handle<Object> result = CallTrap(
+    "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    trap = isolate->derived_has_trap();
-  }
-
-  // Call trap function.
-  Object** args[] = { name.location() };
-  bool has_exception;
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
 
   return result->ToBoolean()->IsTrue();
 }
@@ -2249,24 +2426,82 @@
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
   Handle<Object> value(value_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("set");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { receiver, name, value };
+  CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    trap = isolate->derived_set_trap();
+
+  return *value;
+}
+
+
+MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandlerIfDefiningSetter(
+    String* name_raw,
+    Object* value_raw,
+    PropertyAttributes attributes,
+    StrictModeFlag strict_mode,
+    bool* found) {
+  *found = true;  // except where defined otherwise...
+  Isolate* isolate = GetHeap()->isolate();
+  Handle<JSProxy> proxy(this);
+  Handle<String> name(name_raw);
+  Handle<Object> value(value_raw);
+  Handle<Object> args[] = { name };
+  Handle<Object> result = proxy->CallTrap(
+      "getOwnPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
+  if (isolate->has_pending_exception()) return Failure::Exception();
+
+  if (!result->IsUndefined()) {
+    // The proxy handler cares about this property.
+    // Check whether it is virtualized as an accessor.
+    // Emulate [[GetProperty]] semantics for proxies.
+    bool has_pending_exception;
+    Object** argv[] = { result.location() };
+    Handle<Object> desc =
+        Execution::Call(isolate->to_complete_property_descriptor(), result,
+                        ARRAY_SIZE(argv), argv, &has_pending_exception);
+    if (has_pending_exception) return Failure::Exception();
+
+    Handle<String> conf_name =
+        isolate->factory()->LookupAsciiSymbol("configurable_");
+    Handle<Object> configurable(v8::internal::GetProperty(desc, conf_name));
+    ASSERT(!isolate->has_pending_exception());
+    if (configurable->IsFalse()) {
+      Handle<Object> args[] = { Handle<Object>(proxy->handler()), proxy, name };
+      Handle<Object> error = isolate->factory()->NewTypeError(
+          "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
+      return isolate->Throw(*error);
+    }
+    ASSERT(configurable->IsTrue());
+
+    // Check for AccessorDescriptor.
+    Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
+    Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
+    ASSERT(!isolate->has_pending_exception());
+    if (!setter->IsUndefined()) {
+      // We have a setter -- invoke it.
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return proxy->SetPropertyWithDefinedSetter(
+          JSReceiver::cast(*setter), *value);
+    } else {
+      Handle<String> get_name = isolate->factory()->LookupAsciiSymbol("get_");
+      Handle<Object> getter(v8::internal::GetProperty(desc, get_name));
+      ASSERT(!isolate->has_pending_exception());
+      if (!getter->IsUndefined()) {
+        // We have a getter but no setter -- the property may not be
+        // written. In strict mode, throw an error.
+        if (strict_mode == kNonStrictMode) return *value;
+        Handle<Object> args[] = { name, proxy };
+        Handle<Object> error = isolate->factory()->NewTypeError(
+            "no_setter_in_callback", HandleVector(args, ARRAY_SIZE(args)));
+        return isolate->Throw(*error);
+      }
+    }
+    // Fall-through.
   }
 
-  // Call trap function.
-  Object** args[] = {
-      receiver.location(), name.location(), value.location()
-  };
-  bool has_exception;
-  Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
-
+  // The proxy does not define the property as an accessor.
+  *found = false;
   return *value;
 }
 
@@ -2277,31 +2512,16 @@
   HandleScope scope(isolate);
   Handle<Object> receiver(this);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { name };
+  Handle<Object> result = CallTrap(
+    "delete", Handle<Object>(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return Failure::Exception();
-  if (trap->IsUndefined()) {
-    Handle<Object> args[] = { handler, trap_name };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
-    isolate->Throw(*error);
-    return Failure::Exception();
-  }
-
-  // Call trap function.
-  Object** args[] = { name.location() };
-  bool has_exception;
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, &has_exception);
-  if (has_exception) return Failure::Exception();
 
   Object* bool_result = result->ToBoolean();
-  if (mode == STRICT_DELETION &&
-      bool_result == isolate->heap()->false_value()) {
-    Handle<Object> args[] = { handler, trap_name };
+  if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
+    Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+    Handle<Object> args[] = { Handle<Object>(handler()), trap_name };
     Handle<Object> error = isolate->factory()->NewTypeError(
         "handler_failed", HandleVector(args, ARRAY_SIZE(args)));
     isolate->Throw(*error);
@@ -2311,39 +2531,73 @@
 }
 
 
-MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
-    JSReceiver* receiver_raw,
-    String* name_raw,
-    bool* has_exception) {
+MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler(
+    uint32_t index,
+    DeleteMode mode) {
   Isolate* isolate = GetIsolate();
   HandleScope scope(isolate);
+  Handle<String> name = isolate->factory()->Uint32ToString(index);
+  return JSProxy::DeletePropertyWithHandler(*name, mode);
+}
+
+
+MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
+    JSReceiver* receiver_raw,
+    String* name_raw) {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<JSProxy> proxy(this);
   Handle<JSReceiver> receiver(receiver_raw);
   Handle<Object> name(name_raw);
-  Handle<Object> handler(this->handler());
 
-  // Extract trap function.
-  Handle<String> trap_name =
-      isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
-  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  Handle<Object> args[] = { name };
+  Handle<Object> result = CallTrap(
+    "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args);
   if (isolate->has_pending_exception()) return NONE;
-  if (trap->IsUndefined()) {
-    Handle<Object> args[] = { handler, trap_name };
+
+  if (result->IsUndefined()) return ABSENT;
+
+  bool has_pending_exception;
+  Object** argv[] = { result.location() };
+  Handle<Object> desc =
+      Execution::Call(isolate->to_complete_property_descriptor(), result,
+                      ARRAY_SIZE(argv), argv, &has_pending_exception);
+  if (has_pending_exception) return NONE;
+
+  // Convert result to PropertyAttributes.
+  Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable");
+  Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n));
+  if (isolate->has_pending_exception()) return NONE;
+  Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable");
+  Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n));
+  if (isolate->has_pending_exception()) return NONE;
+  Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable");
+  Handle<Object> writable(v8::internal::GetProperty(desc, writ_n));
+  if (isolate->has_pending_exception()) return NONE;
+
+  if (configurable->IsFalse()) {
+    Handle<Object> args[] = { Handle<Object>(proxy->handler()), proxy, name };
     Handle<Object> error = isolate->factory()->NewTypeError(
-        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+        "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
     isolate->Throw(*error);
-    *has_exception = true;
     return NONE;
   }
 
-  // Call trap function.
-  Object** args[] = { name.location() };
-  Handle<Object> result =
-      Execution::Call(trap, handler, ARRAY_SIZE(args), args, has_exception);
-  if (has_exception) return NONE;
+  int attributes = NONE;
+  if (enumerable->ToBoolean()->IsFalse()) attributes |= DONT_ENUM;
+  if (configurable->ToBoolean()->IsFalse()) attributes |= DONT_DELETE;
+  if (writable->ToBoolean()->IsFalse()) attributes |= READ_ONLY;
+  return static_cast<PropertyAttributes>(attributes);
+}
 
-  // TODO(rossberg): convert result to PropertyAttributes
-  USE(result);
-  return NONE;
+
+MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
+    JSReceiver* receiver,
+    uint32_t index) {
+  Isolate* isolate = GetIsolate();
+  HandleScope scope(isolate);
+  Handle<String> name = isolate->factory()->Uint32ToString(index);
+  return GetPropertyAttributeWithHandler(receiver, *name);
 }
 
 
@@ -2352,6 +2606,9 @@
   HandleScope scope(isolate);
   Handle<JSProxy> self(this);
 
+  // Save identity hash.
+  MaybeObject* maybe_hash = GetIdentityHash(OMIT_CREATION);
+
   if (IsJSFunctionProxy()) {
     isolate->factory()->BecomeJSFunction(self);
     // Code will be set on the JavaScript side.
@@ -2359,9 +2616,44 @@
     isolate->factory()->BecomeJSObject(self);
   }
   ASSERT(self->IsJSObject());
+
+  // Inherit identity, if it was present.
+  Object* hash;
+  if (maybe_hash->To<Object>(&hash) && hash->IsSmi()) {
+    Handle<JSObject> new_self(JSObject::cast(*self));
+    isolate->factory()->SetIdentityHash(new_self, hash);
+  }
 }
 
 
+MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(
+    const char* name,
+    Handle<Object> derived,
+    int argc,
+    Handle<Object> args[]) {
+  Isolate* isolate = GetIsolate();
+  Handle<Object> handler(this->handler());
+
+  Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name);
+  Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+  if (isolate->has_pending_exception()) return trap;
+
+  if (trap->IsUndefined()) {
+    if (derived.is_null()) {
+      Handle<Object> args[] = { handler, trap_name };
+      Handle<Object> error = isolate->factory()->NewTypeError(
+        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
+      isolate->Throw(*error);
+      return Handle<Object>();
+    }
+    trap = Handle<Object>(derived);
+  }
+
+  Object*** argv = reinterpret_cast<Object***>(args);
+  bool threw;
+  return Execution::Call(trap, handler, argc, argv, &threw);
+}
+
 
 MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
                                             String* name,
@@ -2386,48 +2678,46 @@
   }
 
   // Check access rights if needed.
-  if (IsAccessCheckNeeded()
-      && !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
-    return SetPropertyWithFailedAccessCheck(result,
-                                            name,
-                                            value,
-                                            true,
-                                            strict_mode);
+  if (IsAccessCheckNeeded()) {
+    if (!heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+      return SetPropertyWithFailedAccessCheck(
+          result, name, value, true, strict_mode);
+    }
   }
 
   if (IsJSGlobalProxy()) {
     Object* proto = GetPrototype();
     if (proto->IsNull()) return value;
     ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->SetProperty(
+    return JSObject::cast(proto)->SetPropertyForResult(
         result, name, value, attributes, strict_mode);
   }
 
   if (!result->IsProperty() && !IsJSContextExtensionObject()) {
-    // We could not find a local property so let's check whether there is an
-    // accessor that wants to handle the property.
-    LookupResult accessor_result;
-    LookupCallbackSetterInPrototypes(name, &accessor_result);
-    if (accessor_result.IsProperty()) {
-      return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
-                                     name,
-                                     value,
-                                     accessor_result.holder(),
-                                     strict_mode);
-    }
+    bool found = false;
+    MaybeObject* result_object;
+    result_object = SetPropertyWithCallbackSetterInPrototypes(name,
+                                                              value,
+                                                              attributes,
+                                                              &found,
+                                                              strict_mode);
+    if (found) return result_object;
   }
+
+  // At this point, no GC should have happened, as this would invalidate
+  // 'result', which we cannot handlify!
+
   if (!result->IsFound()) {
     // Neither properties nor transitions found.
     return AddProperty(name, value, attributes, strict_mode);
   }
   if (result->IsReadOnly() && result->IsProperty()) {
     if (strict_mode == kStrictMode) {
-      HandleScope scope(heap->isolate());
-      Handle<String> key(name);
-      Handle<Object> holder(this);
-      Handle<Object> args[2] = { key, holder };
+      Handle<JSObject> self(this);
+      Handle<String> hname(name);
+      Handle<Object> args[] = { hname, self };
       return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
-          "strict_read_only_property", HandleVector(args, 2)));
+          "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
     } else {
       return value;
     }
@@ -2656,9 +2946,8 @@
       String* key) {
   uint32_t index = 0;
   if (IsJSObject() && key->AsArrayIndex(&index)) {
-    if (JSObject::cast(this)->HasElementWithReceiver(receiver, index))
-      return NONE;
-    return ABSENT;
+    return JSObject::cast(this)->HasElementWithReceiver(receiver, index)
+        ? NONE : ABSENT;
   }
   // Named property.
   LookupResult result;
@@ -2688,10 +2977,8 @@
       case CALLBACKS:
         return result->GetAttributes();
       case HANDLER: {
-        // TODO(rossberg): propagate exceptions properly.
-        bool has_exception = false;
-        return JSProxy::cast(this)->GetPropertyAttributeWithHandler(
-            receiver, name, &has_exception);
+        return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler(
+            receiver, name);
       }
       case INTERCEPTOR:
         return result->holder()->GetPropertyAttributeWithInterceptor(
@@ -2857,7 +3144,7 @@
     }
   }
 
-  Heap* current_heap = map_of_this->heap();
+  Heap* current_heap = GetHeap();
 
   // Copy the next enumeration index from instance descriptor.
   int index = map_of_this->instance_descriptors()->NextEnumerationIndex();
@@ -2879,6 +3166,10 @@
   ASSERT(instance_size_delta >= 0);
   current_heap->CreateFillerObjectAt(this->address() + new_instance_size,
                                      instance_size_delta);
+  if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+    MemoryChunk::IncrementLiveBytes(this->address(), -instance_size_delta);
+  }
+
 
   set_map(new_map);
   new_map->clear_instance_descriptors();
@@ -2912,13 +3203,14 @@
   FixedArrayBase* array = FixedArrayBase::cast(elements());
   Map* old_map = array->map();
   bool is_arguments =
-      (old_map == old_map->heap()->non_strict_arguments_elements_map());
+      (old_map == old_map->GetHeap()->non_strict_arguments_elements_map());
   if (is_arguments) {
     array = FixedArrayBase::cast(FixedArray::cast(array)->get(1));
   }
   if (array->IsDictionary()) return array;
 
   ASSERT(HasFastElements() ||
+         HasFastSmiOnlyElements() ||
          HasFastDoubleElements() ||
          HasFastArgumentsElements());
   // Compute the effective length and allocate a new backing store.
@@ -2953,7 +3245,8 @@
         if (!maybe_value_object->ToObject(&value)) return maybe_value_object;
       }
     } else {
-      ASSERT(old_map->has_fast_elements());
+      ASSERT(old_map->has_fast_elements() ||
+             old_map->has_fast_smi_only_elements());
       value = FixedArray::cast(array)->get(i);
     }
     PropertyDetails details = PropertyDetails(NONE, NORMAL);
@@ -2973,13 +3266,14 @@
     // Set the new map first to satify the elements type assert in
     // set_elements().
     Object* new_map;
-    MaybeObject* maybe = map()->GetSlowElementsMap();
+    MaybeObject* maybe = GetElementsTransitionMap(DICTIONARY_ELEMENTS);
     if (!maybe->ToObject(&new_map)) return maybe;
     set_map(Map::cast(new_map));
     set_elements(dictionary);
   }
 
-  old_map->isolate()->counters()->elements_to_dictionary()->Increment();
+  old_map->GetHeap()->isolate()->counters()->elements_to_dictionary()->
+      Increment();
 
 #ifdef DEBUG
   if (FLAG_trace_normalization) {
@@ -2993,76 +3287,8 @@
 }
 
 
-MaybeObject* JSObject::GetHiddenProperties(HiddenPropertiesFlag flag) {
+Smi* JSReceiver::GenerateIdentityHash() {
   Isolate* isolate = GetIsolate();
-  Heap* heap = isolate->heap();
-  Object* holder = BypassGlobalProxy();
-  if (holder->IsUndefined()) return heap->undefined_value();
-  JSObject* obj = JSObject::cast(holder);
-  if (obj->HasFastProperties()) {
-    // If the object has fast properties, check whether the first slot
-    // in the descriptor array matches the hidden symbol. Since the
-    // hidden symbols hash code is zero (and no other string has hash
-    // code zero) it will always occupy the first entry if present.
-    DescriptorArray* descriptors = obj->map()->instance_descriptors();
-    if ((descriptors->number_of_descriptors() > 0) &&
-        (descriptors->GetKey(0) == heap->hidden_symbol()) &&
-        descriptors->IsProperty(0)) {
-      ASSERT(descriptors->GetType(0) == FIELD);
-      return obj->FastPropertyAt(descriptors->GetFieldIndex(0));
-    }
-  }
-
-  // Only attempt to find the hidden properties in the local object and not
-  // in the prototype chain.
-  if (!obj->HasHiddenPropertiesObject()) {
-    // Hidden properties object not found. Allocate a new hidden properties
-    // object if requested. Otherwise return the undefined value.
-    if (flag == ALLOW_CREATION) {
-      Object* hidden_obj;
-      { MaybeObject* maybe_obj = heap->AllocateJSObject(
-            isolate->context()->global_context()->object_function());
-        if (!maybe_obj->ToObject(&hidden_obj)) return maybe_obj;
-      }
-      // Don't allow leakage of the hidden object through accessors
-      // on Object.prototype.
-      {
-        MaybeObject* maybe_obj =
-            JSObject::cast(hidden_obj)->SetPrototype(heap->null_value(), false);
-        if (maybe_obj->IsFailure()) return maybe_obj;
-      }
-      return obj->SetHiddenPropertiesObject(hidden_obj);
-    } else {
-      return heap->undefined_value();
-    }
-  }
-  return obj->GetHiddenPropertiesObject();
-}
-
-
-MaybeObject* JSObject::GetIdentityHash(HiddenPropertiesFlag flag) {
-  Isolate* isolate = GetIsolate();
-  Object* hidden_props_obj;
-  { MaybeObject* maybe_obj = GetHiddenProperties(flag);
-    if (!maybe_obj->ToObject(&hidden_props_obj)) return maybe_obj;
-  }
-  if (!hidden_props_obj->IsJSObject()) {
-    // We failed to create hidden properties.  That's a detached
-    // global proxy.
-    ASSERT(hidden_props_obj->IsUndefined());
-    return Smi::FromInt(0);
-  }
-  JSObject* hidden_props = JSObject::cast(hidden_props_obj);
-  String* hash_symbol = isolate->heap()->identity_hash_symbol();
-  {
-    // Note that HasLocalProperty() can cause a GC in the general case in the
-    // presence of interceptors.
-    AssertNoAllocation no_alloc;
-    if (hidden_props->HasLocalProperty(hash_symbol)) {
-      MaybeObject* hash = hidden_props->GetProperty(hash_symbol);
-      return Smi::cast(hash->ToObjectChecked());
-    }
-  }
 
   int hash_value;
   int attempts = 0;
@@ -3074,17 +3300,209 @@
   } while (hash_value == 0 && attempts < 30);
   hash_value = hash_value != 0 ? hash_value : 1;  // never return 0
 
-  Smi* hash = Smi::FromInt(hash_value);
-  { MaybeObject* result = hidden_props->SetLocalPropertyIgnoreAttributes(
-        hash_symbol,
-        hash,
-        static_cast<PropertyAttributes>(None));
-    if (result->IsFailure()) return result;
+  return Smi::FromInt(hash_value);
+}
+
+
+MaybeObject* JSObject::SetIdentityHash(Object* hash, CreationFlag flag) {
+  MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+                                         hash);
+  if (maybe->IsFailure()) return maybe;
+  return this;
+}
+
+
+MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
+  Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol());
+  if (stored_value->IsSmi()) return stored_value;
+
+  Smi* hash = GenerateIdentityHash();
+  MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+                                          hash);
+  if (result->IsFailure()) return result;
+  if (result->ToObjectUnchecked()->IsUndefined()) {
+    // Trying to get hash of detached proxy.
+    return Smi::FromInt(0);
   }
   return hash;
 }
 
 
+MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
+  Object* hash = this->hash();
+  if (!hash->IsSmi() && flag == ALLOW_CREATION) {
+    hash = GenerateIdentityHash();
+    set_hash(hash);
+  }
+  return hash;
+}
+
+
+Object* JSObject::GetHiddenProperty(String* key) {
+  if (IsJSGlobalProxy()) {
+    // For a proxy, use the prototype as target object.
+    Object* proxy_parent = GetPrototype();
+    // If the proxy is detached, return undefined.
+    if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+    ASSERT(proxy_parent->IsJSGlobalObject());
+    return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
+  }
+  ASSERT(!IsJSGlobalProxy());
+  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
+  ASSERT(!hidden_lookup->IsFailure());  // No failure when passing false as arg.
+  if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) {
+    return GetHeap()->undefined_value();
+  }
+  StringDictionary* dictionary =
+      StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
+  int entry = dictionary->FindEntry(key);
+  if (entry == StringDictionary::kNotFound) return GetHeap()->undefined_value();
+  return dictionary->ValueAt(entry);
+}
+
+
+MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
+  if (IsJSGlobalProxy()) {
+    // For a proxy, use the prototype as target object.
+    Object* proxy_parent = GetPrototype();
+    // If the proxy is detached, return undefined.
+    if (proxy_parent->IsNull()) return GetHeap()->undefined_value();
+    ASSERT(proxy_parent->IsJSGlobalObject());
+    return JSObject::cast(proxy_parent)->SetHiddenProperty(key, value);
+  }
+  ASSERT(!IsJSGlobalProxy());
+  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(true);
+  StringDictionary* dictionary;
+  if (!hidden_lookup->To<StringDictionary>(&dictionary)) return hidden_lookup;
+
+  // If it was found, check if the key is already in the dictionary.
+  int entry = dictionary->FindEntry(key);
+  if (entry != StringDictionary::kNotFound) {
+    // If key was found, just update the value.
+    dictionary->ValueAtPut(entry, value);
+    return this;
+  }
+  // Key was not already in the dictionary, so add the entry.
+  MaybeObject* insert_result = dictionary->Add(key,
+                                               value,
+                                               PropertyDetails(NONE, NORMAL));
+  StringDictionary* new_dict;
+  if (!insert_result->To<StringDictionary>(&new_dict)) return insert_result;
+  if (new_dict != dictionary) {
+    // If adding the key expanded the dictionary (i.e., Add returned a new
+    // dictionary), store it back to the object.
+    MaybeObject* store_result = SetHiddenPropertiesDictionary(new_dict);
+    if (store_result->IsFailure()) return store_result;
+  }
+  // Return this to mark success.
+  return this;
+}
+
+
+void JSObject::DeleteHiddenProperty(String* key) {
+  if (IsJSGlobalProxy()) {
+    // For a proxy, use the prototype as target object.
+    Object* proxy_parent = GetPrototype();
+    // If the proxy is detached, return immediately.
+    if (proxy_parent->IsNull()) return;
+    ASSERT(proxy_parent->IsJSGlobalObject());
+    JSObject::cast(proxy_parent)->DeleteHiddenProperty(key);
+    return;
+  }
+  MaybeObject* hidden_lookup = GetHiddenPropertiesDictionary(false);
+  ASSERT(!hidden_lookup->IsFailure());  // No failure when passing false as arg.
+  if (hidden_lookup->ToObjectUnchecked()->IsUndefined()) return;
+  StringDictionary* dictionary =
+      StringDictionary::cast(hidden_lookup->ToObjectUnchecked());
+  int entry = dictionary->FindEntry(key);
+  if (entry == StringDictionary::kNotFound) {
+    // Key wasn't in dictionary. Deletion is a success.
+    return;
+  }
+  // Key was in the dictionary. Remove it.
+  dictionary->DeleteProperty(entry, JSReceiver::FORCE_DELETION);
+}
+
+
+bool JSObject::HasHiddenProperties() {
+  LookupResult lookup;
+  LocalLookupRealNamedProperty(GetHeap()->hidden_symbol(), &lookup);
+  return lookup.IsFound();
+}
+
+
+MaybeObject* JSObject::GetHiddenPropertiesDictionary(bool create_if_absent) {
+  ASSERT(!IsJSGlobalProxy());
+  if (HasFastProperties()) {
+    // If the object has fast properties, check whether the first slot
+    // in the descriptor array matches the hidden symbol. Since the
+    // hidden symbols hash code is zero (and no other string has hash
+    // code zero) it will always occupy the first entry if present.
+    DescriptorArray* descriptors = this->map()->instance_descriptors();
+    if ((descriptors->number_of_descriptors() > 0) &&
+        (descriptors->GetKey(0) == GetHeap()->hidden_symbol()) &&
+        descriptors->IsProperty(0)) {
+      ASSERT(descriptors->GetType(0) == FIELD);
+      Object* hidden_store =
+          this->FastPropertyAt(descriptors->GetFieldIndex(0));
+      return StringDictionary::cast(hidden_store);
+    }
+  } else {
+    PropertyAttributes attributes;
+    // You can't install a getter on a property indexed by the hidden symbol,
+    // so we can be sure that GetLocalPropertyPostInterceptor returns a real
+    // object.
+    Object* lookup =
+        GetLocalPropertyPostInterceptor(this,
+                                        GetHeap()->hidden_symbol(),
+                                        &attributes)->ToObjectUnchecked();
+    if (!lookup->IsUndefined()) {
+      return StringDictionary::cast(lookup);
+    }
+  }
+  if (!create_if_absent) return GetHeap()->undefined_value();
+  const int kInitialSize = 5;
+  MaybeObject* dict_alloc = StringDictionary::Allocate(kInitialSize);
+  StringDictionary* dictionary;
+  if (!dict_alloc->To<StringDictionary>(&dictionary)) return dict_alloc;
+  MaybeObject* store_result =
+      SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+                                 dictionary,
+                                 DONT_ENUM,
+                                 kNonStrictMode);
+  if (store_result->IsFailure()) return store_result;
+  return dictionary;
+}
+
+
+MaybeObject* JSObject::SetHiddenPropertiesDictionary(
+    StringDictionary* dictionary) {
+  ASSERT(!IsJSGlobalProxy());
+  ASSERT(HasHiddenProperties());
+  if (HasFastProperties()) {
+    // If the object has fast properties, check whether the first slot
+    // in the descriptor array matches the hidden symbol. Since the
+    // hidden symbols hash code is zero (and no other string has hash
+    // code zero) it will always occupy the first entry if present.
+    DescriptorArray* descriptors = this->map()->instance_descriptors();
+    if ((descriptors->number_of_descriptors() > 0) &&
+        (descriptors->GetKey(0) == GetHeap()->hidden_symbol()) &&
+        descriptors->IsProperty(0)) {
+      ASSERT(descriptors->GetType(0) == FIELD);
+      this->FastPropertyAtPut(descriptors->GetFieldIndex(0), dictionary);
+      return this;
+    }
+  }
+  MaybeObject* store_result =
+      SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+                                 dictionary,
+                                 DONT_ENUM,
+                                 kNonStrictMode);
+  if (store_result->IsFailure()) return store_result;
+  return this;
+}
+
+
 MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name,
                                                      DeleteMode mode) {
   // Check local property, ignore interceptor.
@@ -3201,9 +3619,16 @@
 MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) {
   if (IsJSProxy()) {
     return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode);
-  } else {
-    return JSObject::cast(this)->DeleteProperty(name, mode);
   }
+  return JSObject::cast(this)->DeleteProperty(name, mode);
+}
+
+
+MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) {
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->DeleteElementWithHandler(index, mode);
+  }
+  return JSObject::cast(this)->DeleteElement(index, mode);
 }
 
 
@@ -3267,7 +3692,8 @@
 bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
                                             ElementsKind kind,
                                             Object* object) {
-  ASSERT(kind == FAST_ELEMENTS || kind == DICTIONARY_ELEMENTS);
+  ASSERT(kind == FAST_ELEMENTS ||
+         kind == DICTIONARY_ELEMENTS);
   if (kind == FAST_ELEMENTS) {
     int length = IsJSArray()
         ? Smi::cast(JSArray::cast(this)->length())->value()
@@ -3287,7 +3713,7 @@
 // Check whether this object references another object.
 bool JSObject::ReferencesObject(Object* obj) {
   Map* map_of_this = map();
-  Heap* heap = map_of_this->heap();
+  Heap* heap = GetHeap();
   AssertNoAllocation no_alloc;
 
   // Is the object the constructor for this object?
@@ -3322,6 +3748,8 @@
       // Raw pixels and external arrays do not reference other
       // objects.
       break;
+    case FAST_SMI_ONLY_ELEMENTS:
+      break;
     case FAST_ELEMENTS:
     case DICTIONARY_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(this->elements());
@@ -3509,15 +3937,6 @@
 
 
 void JSReceiver::LocalLookup(String* name, LookupResult* result) {
-  if (IsJSProxy()) {
-    result->HandlerResult();
-  } else {
-    JSObject::cast(this)->LocalLookup(name, result);
-  }
-}
-
-
-void JSObject::LocalLookup(String* name, LookupResult* result) {
   ASSERT(name->IsString());
 
   Heap* heap = GetHeap();
@@ -3526,28 +3945,36 @@
     Object* proto = GetPrototype();
     if (proto->IsNull()) return result->NotFound();
     ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->LocalLookup(name, result);
+    return JSReceiver::cast(proto)->LocalLookup(name, result);
+  }
+
+  if (IsJSProxy()) {
+    result->HandlerResult(JSProxy::cast(this));
+    return;
   }
 
   // Do not use inline caching if the object is a non-global object
   // that requires access checks.
-  if (!IsJSGlobalProxy() && IsAccessCheckNeeded()) {
+  if (IsAccessCheckNeeded()) {
     result->DisallowCaching();
   }
 
+  JSObject* js_object = JSObject::cast(this);
+
   // Check __proto__ before interceptor.
   if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
-    result->ConstantResult(this);
+    result->ConstantResult(js_object);
     return;
   }
 
   // Check for lookup interceptor except when bootstrapping.
-  if (HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) {
-    result->InterceptorResult(this);
+  if (js_object->HasNamedInterceptor() &&
+      !heap->isolate()->bootstrapper()->IsActive()) {
+    result->InterceptorResult(js_object);
     return;
   }
 
-  LocalLookupRealNamedProperty(name, result);
+  js_object->LocalLookupRealNamedProperty(name, result);
 }
 
 
@@ -3557,7 +3984,7 @@
   for (Object* current = this;
        current != heap->null_value();
        current = JSObject::cast(current)->GetPrototype()) {
-    JSObject::cast(current)->LocalLookup(name, result);
+    JSReceiver::cast(current)->LocalLookup(name, result);
     if (result->IsProperty()) return;
   }
   result->NotFound();
@@ -3614,6 +4041,7 @@
 
   if (is_element) {
     switch (GetElementsKind()) {
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
         break;
@@ -3800,7 +4228,7 @@
                                       bool is_getter,
                                       Object* fun,
                                       PropertyAttributes attributes) {
-  ASSERT(fun->IsJSFunction() || fun->IsUndefined());
+  ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
   Isolate* isolate = GetIsolate();
   // Check access rights if needed.
   if (IsAccessCheckNeeded() &&
@@ -3863,6 +4291,7 @@
 
     // Accessors overwrite previous callbacks (cf. with getters/setters).
     switch (GetElementsKind()) {
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
         break;
@@ -4086,7 +4515,7 @@
   // Allocate the code cache if not present.
   if (code_cache()->IsFixedArray()) {
     Object* result;
-    { MaybeObject* maybe_result = code->heap()->AllocateCodeCache();
+    { MaybeObject* maybe_result = GetHeap()->AllocateCodeCache();
       if (!maybe_result->ToObject(&result)) return maybe_result;
     }
     set_code_cache(result);
@@ -4128,7 +4557,7 @@
   // Traverse the transition tree without using a stack.  We do this by
   // reversing the pointers in the maps and descriptor arrays.
   Map* current = this;
-  Map* meta_map = heap()->meta_map();
+  Map* meta_map = GetHeap()->meta_map();
   Object** map_or_index_field = NULL;
   while (current != meta_map) {
     DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
@@ -4149,7 +4578,7 @@
           // of the next map and recording the index in the transition array in
           // the map field of the array.
           Map* next = Map::cast(contents->get(i));
-          next->set_map(current);
+          next->set_map_unsafe(current);
           *map_or_index_field = Smi::FromInt(i + 2);
           current = next;
           map_done = false;
@@ -4174,23 +4603,23 @@
       Object* perhaps_map = prototype_transitions->get(i);
       if (perhaps_map->IsMap()) {
         Map* next = Map::cast(perhaps_map);
-        next->set_map(current);
+        next->set_map_unsafe(current);
         *proto_map_or_index_field =
             Smi::FromInt(i + kProtoTransitionElementsPerEntry);
         current = next;
         continue;
       }
     }
-    *proto_map_or_index_field = heap()->fixed_array_map();
+    *proto_map_or_index_field = GetHeap()->fixed_array_map();
     if (map_or_index_field != NULL) {
-      *map_or_index_field = heap()->fixed_array_map();
+      *map_or_index_field = GetHeap()->fixed_array_map();
     }
 
     // The callback expects a map to have a real map as its map, so we save
     // the map field, which is being used to track the traversal and put the
     // correct map (the meta_map) in place while we do the callback.
     Map* prev = current->map();
-    current->set_map(meta_map);
+    current->set_map_unsafe(meta_map);
     callback(current, data);
     current = prev;
   }
@@ -4406,7 +4835,7 @@
   MUST_USE_RESULT MaybeObject* AsObject() {
     ASSERT(code_ != NULL);
     Object* obj;
-    { MaybeObject* maybe_obj = code_->heap()->AllocateFixedArray(2);
+    { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     FixedArray* pair = FixedArray::cast(obj);
@@ -5995,7 +6424,7 @@
   if (StringShape(this).IsSymbol()) return false;
 
   Map* map = this->map();
-  Heap* heap = map->heap();
+  Heap* heap = GetHeap();
   if (map == heap->string_map()) {
     this->set_map(heap->undetectable_string_map());
     return true;
@@ -6198,29 +6627,45 @@
 }
 
 
+void Map::CreateOneBackPointer(Map* target) {
+#ifdef DEBUG
+  // Verify target.
+  Object* source_prototype = prototype();
+  Object* target_prototype = target->prototype();
+  ASSERT(source_prototype->IsJSReceiver() ||
+         source_prototype->IsMap() ||
+         source_prototype->IsNull());
+  ASSERT(target_prototype->IsJSReceiver() ||
+         target_prototype->IsNull());
+  ASSERT(source_prototype->IsMap() ||
+         source_prototype == target_prototype);
+#endif
+  // Point target back to source.  set_prototype() will not let us set
+  // the prototype to a map, as we do here.
+  *RawField(target, kPrototypeOffset) = this;
+}
+
+
 void Map::CreateBackPointers() {
   DescriptorArray* descriptors = instance_descriptors();
   for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
     if (descriptors->GetType(i) == MAP_TRANSITION ||
         descriptors->GetType(i) == ELEMENTS_TRANSITION ||
         descriptors->GetType(i) == CONSTANT_TRANSITION) {
-      // Get target.
-      Map* target = Map::cast(descriptors->GetValue(i));
-#ifdef DEBUG
-      // Verify target.
-      Object* source_prototype = prototype();
-      Object* target_prototype = target->prototype();
-      ASSERT(source_prototype->IsJSObject() ||
-             source_prototype->IsMap() ||
-             source_prototype->IsNull());
-      ASSERT(target_prototype->IsJSObject() ||
-             target_prototype->IsNull());
-      ASSERT(source_prototype->IsMap() ||
-             source_prototype == target_prototype);
-#endif
-      // Point target back to source.  set_prototype() will not let us set
-      // the prototype to a map, as we do here.
-      *RawField(target, kPrototypeOffset) = this;
+      Object* object = reinterpret_cast<Object*>(descriptors->GetValue(i));
+      if (object->IsMap()) {
+        CreateOneBackPointer(reinterpret_cast<Map*>(object));
+      } else {
+        ASSERT(object->IsFixedArray());
+        ASSERT(descriptors->GetType(i) == ELEMENTS_TRANSITION);
+        FixedArray* array = reinterpret_cast<FixedArray*>(object);
+        for (int i = 0; i < array->length(); ++i) {
+          Map* target = reinterpret_cast<Map*>(array->get(i));
+          if (!target->IsUndefined()) {
+            CreateOneBackPointer(target);
+          }
+        }
+      }
     }
   }
 }
@@ -6247,16 +6692,46 @@
     if (details.type() == MAP_TRANSITION ||
         details.type() == ELEMENTS_TRANSITION ||
         details.type() == CONSTANT_TRANSITION) {
-      Map* target = reinterpret_cast<Map*>(contents->get(i));
-      ASSERT(target->IsHeapObject());
-      if (!target->IsMarked()) {
-        ASSERT(target->IsMap());
-        contents->set_unchecked(i + 1, NullDescriptorDetails);
-        contents->set_null_unchecked(heap, i);
-        ASSERT(target->prototype() == this ||
-               target->prototype() == real_prototype);
-        // Getter prototype() is read-only, set_prototype() has side effects.
-        *RawField(target, Map::kPrototypeOffset) = real_prototype;
+      Object* object = reinterpret_cast<Object*>(contents->get(i));
+      if (object->IsMap()) {
+        Map* target = reinterpret_cast<Map*>(object);
+        ASSERT(target->IsHeapObject());
+        MarkBit map_mark = Marking::MarkBitFrom(target);
+        if (!map_mark.Get()) {
+          ASSERT(target->IsMap());
+          contents->set_unchecked(i + 1, NullDescriptorDetails);
+          contents->set_null_unchecked(heap, i);
+          ASSERT(target->prototype() == this ||
+                 target->prototype() == real_prototype);
+          // Getter prototype() is read-only, set_prototype() has side effects.
+          *RawField(target, Map::kPrototypeOffset) = real_prototype;
+        }
+      } else {
+        ASSERT(object->IsFixedArray());
+        ASSERT(details.type() == ELEMENTS_TRANSITION);
+        FixedArray* array = reinterpret_cast<FixedArray*>(contents->get(i));
+        bool reachable_map_found = false;
+        for (int j = 0; j < array->length(); ++j) {
+          Map* target = reinterpret_cast<Map*>(array->get(j));
+          ASSERT(target->IsHeapObject());
+          MarkBit map_mark = Marking::MarkBitFrom(target);
+          if (!map_mark.Get()) {
+            ASSERT(target->IsMap());
+            array->set_undefined(j);
+            ASSERT(target->prototype() == this ||
+                   target->prototype() == real_prototype);
+            // Getter prototype() is read-only, set_prototype() has side
+            // effects.
+            *RawField(target, Map::kPrototypeOffset) = real_prototype;
+          } else {
+            reachable_map_found = true;
+          }
+        }
+        // If no map was found, make sure the FixedArray also gets collected.
+        if (!reachable_map_found) {
+          contents->set_unchecked(i + 1, NullDescriptorDetails);
+          contents->set_null_unchecked(heap, i);
+        }
       }
     }
   }
@@ -6362,7 +6837,7 @@
       if (!maybe_new_map->ToObject(&new_object)) return maybe_new_map;
     }
     Map* new_map = Map::cast(new_object);
-    Heap* heap = new_map->heap();
+    Heap* heap = new_map->GetHeap();
     set_map(new_map);
     new_map->set_constructor(value);
     new_map->set_non_instance_prototype(true);
@@ -6393,7 +6868,7 @@
   ASSERT(shared()->strict_mode() || map() == global_context->function_map());
 
   set_map(no_prototype_map);
-  set_prototype_or_initial_map(no_prototype_map->heap()->the_hole_value());
+  set_prototype_or_initial_map(no_prototype_map->GetHeap()->the_hole_value());
   return this;
 }
 
@@ -6701,7 +7176,7 @@
     set_construction_count(kGenerousAllocationCount);
   }
   set_initial_map(map);
-  Builtins* builtins = map->heap()->isolate()->builtins();
+  Builtins* builtins = map->GetHeap()->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
             construct_stub());
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -6721,8 +7196,9 @@
   // then StartInobjectTracking will be called again the next time the
   // constructor is called. The countdown will continue and (possibly after
   // several more GCs) CompleteInobjectSlackTracking will eventually be called.
-  set_initial_map(map->heap()->raw_unchecked_undefined_value());
-  Builtins* builtins = map->heap()->isolate()->builtins();
+  Heap* heap = map->GetHeap();
+  set_initial_map(heap->raw_unchecked_undefined_value());
+  Builtins* builtins = heap->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
             *RawField(this, kConstructStubOffset));
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubGeneric));
@@ -6738,7 +7214,7 @@
 
   // Resume inobject slack tracking.
   set_initial_map(map);
-  Builtins* builtins = map->heap()->isolate()->builtins();
+  Builtins* builtins = map->GetHeap()->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubGeneric),
             *RawField(this, kConstructStubOffset));
   set_construct_stub(builtins->builtin(Builtins::kJSConstructStubCountdown));
@@ -6770,7 +7246,7 @@
   ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
   Map* map = Map::cast(initial_map());
 
-  Heap* heap = map->heap();
+  Heap* heap = map->GetHeap();
   set_initial_map(heap->undefined_value());
   Builtins* builtins = heap->isolate()->builtins();
   ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
@@ -6833,7 +7309,7 @@
 
 
 void Code::InvalidateRelocation() {
-  set_relocation_info(heap()->empty_byte_array());
+  set_relocation_info(GetHeap()->empty_byte_array());
 }
 
 
@@ -6867,7 +7343,7 @@
       Handle<Object> p = it.rinfo()->target_object_handle(origin);
       it.rinfo()->set_target_object(*p);
     } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
-      Handle<JSGlobalPropertyCell> cell = it.rinfo()->target_cell_handle();
+      Handle<JSGlobalPropertyCell> cell  = it.rinfo()->target_cell_handle();
       it.rinfo()->set_target_cell(*cell);
     } else if (RelocInfo::IsCodeTarget(mode)) {
       // rewrite code handles in inline cache targets to direct
@@ -7270,8 +7746,10 @@
 }
 
 
-MaybeObject* JSObject::SetFastElementsCapacityAndLength(int capacity,
-                                                        int length) {
+MaybeObject* JSObject::SetFastElementsCapacityAndLength(
+    int capacity,
+    int length,
+    SetFastElementsCapacityMode set_capacity_mode) {
   Heap* heap = GetHeap();
   // We should never end in here with a pixel or external array.
   ASSERT(!HasExternalArrayElements());
@@ -7288,15 +7766,25 @@
   Map* new_map = NULL;
   if (elements()->map() != heap->non_strict_arguments_elements_map()) {
     Object* object;
-    MaybeObject* maybe = map()->GetFastElementsMap();
+    bool has_fast_smi_only_elements =
+        FLAG_smi_only_arrays &&
+        (set_capacity_mode == kAllowSmiOnlyElements) &&
+        (elements()->map()->has_fast_smi_only_elements() ||
+         elements() == heap->empty_fixed_array());
+    ElementsKind elements_kind = has_fast_smi_only_elements
+        ? FAST_SMI_ONLY_ELEMENTS
+        : FAST_ELEMENTS;
+    MaybeObject* maybe = GetElementsTransitionMap(elements_kind);
     if (!maybe->ToObject(&object)) return maybe;
     new_map = Map::cast(object);
   }
 
-  switch (GetElementsKind()) {
+  ElementsKind elements_kind = GetElementsKind();
+  switch (elements_kind) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       AssertNoAllocation no_gc;
-      WriteBarrierMode mode = new_elements->GetWriteBarrierMode(no_gc);
+      WriteBarrierMode mode(new_elements->GetWriteBarrierMode(no_gc));
       CopyFastElementsToFast(FixedArray::cast(elements()), new_elements, mode);
       set_map(new_map);
       set_elements(new_elements);
@@ -7391,13 +7879,15 @@
   }
   FixedDoubleArray* elems = FixedDoubleArray::cast(obj);
 
-  { MaybeObject* maybe_obj = map()->GetFastDoubleElementsMap();
+  { MaybeObject* maybe_obj =
+        GetElementsTransitionMap(FAST_DOUBLE_ELEMENTS);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   Map* new_map = Map::cast(obj);
 
   AssertNoAllocation no_gc;
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       elems->Initialize(FixedArray::cast(elements()));
       break;
@@ -7435,8 +7925,9 @@
   uint32_t new_length = static_cast<uint32_t>(len->Number());
 
   switch (GetElementsKind()) {
-    case FAST_ELEMENTS: {
-    case FAST_DOUBLE_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
+    case FAST_ELEMENTS:
+    case FAST_DOUBLE_ELEMENTS: {
       // Make sure we never try to shrink dense arrays into sparse arrays.
       ASSERT(static_cast<uint32_t>(
           FixedArrayBase::cast(elements())->length()) <= new_length);
@@ -7502,7 +7993,7 @@
   Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
   // Can't use this any more now because we may have had a GC!
   for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
-  self->SetContent(*new_backing);
+  GetIsolate()->factory()->SetContent(self, new_backing);
 }
 
 
@@ -7525,13 +8016,15 @@
     if (value < 0) return ArrayLengthRangeError(GetHeap());
     ElementsKind elements_kind = GetElementsKind();
     switch (elements_kind) {
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS: {
         int old_capacity = FixedArrayBase::cast(elements())->length();
         if (value <= old_capacity) {
           if (IsJSArray()) {
             Object* obj;
-            if (elements_kind == FAST_ELEMENTS) {
+            if (elements_kind == FAST_ELEMENTS ||
+                elements_kind == FAST_SMI_ONLY_ELEMENTS) {
               MaybeObject* maybe_obj = EnsureWritableFastElements();
               if (!maybe_obj->ToObject(&obj)) return maybe_obj;
             }
@@ -7542,7 +8035,8 @@
               } else {
                 Address filler_start;
                 int filler_size;
-                if (GetElementsKind() == FAST_ELEMENTS) {
+                if (elements_kind == FAST_ELEMENTS ||
+                    elements_kind == FAST_SMI_ONLY_ELEMENTS) {
                   FixedArray* fast_elements = FixedArray::cast(elements());
                   fast_elements->set_length(value);
                   filler_start = fast_elements->address() +
@@ -7562,13 +8056,14 @@
             } else {
               // Otherwise, fill the unused tail with holes.
               int old_length = FastD2I(JSArray::cast(this)->length()->Number());
-              if (GetElementsKind() == FAST_ELEMENTS) {
+              if (elements_kind == FAST_ELEMENTS ||
+                  elements_kind == FAST_SMI_ONLY_ELEMENTS) {
                 FixedArray* fast_elements = FixedArray::cast(elements());
                 for (int i = value; i < old_length; i++) {
                   fast_elements->set_the_hole(i);
                 }
               } else {
-                ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+                ASSERT(elements_kind == FAST_DOUBLE_ELEMENTS);
                 FixedDoubleArray* fast_double_elements =
                     FixedDoubleArray::cast(elements());
                 for (int i = value; i < old_length; i++) {
@@ -7584,10 +8079,17 @@
         int new_capacity = value > min ? value : min;
         if (!ShouldConvertToSlowElements(new_capacity)) {
           MaybeObject* result;
-          if (GetElementsKind() == FAST_ELEMENTS) {
-            result = SetFastElementsCapacityAndLength(new_capacity, value);
+          if (elements_kind == FAST_ELEMENTS ||
+              elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+            SetFastElementsCapacityMode set_capacity_mode =
+                elements_kind == FAST_SMI_ONLY_ELEMENTS
+                    ? kAllowSmiOnlyElements
+                    : kDontAllowSmiOnlyElements;
+            result = SetFastElementsCapacityAndLength(new_capacity,
+                                                      value,
+                                                      set_capacity_mode);
           }  else {
-            ASSERT(GetElementsKind() == FAST_DOUBLE_ELEMENTS);
+            ASSERT(elements_kind == FAST_DOUBLE_ELEMENTS);
             result = SetFastDoubleElementsCapacityAndLength(new_capacity,
                                                             value);
           }
@@ -7644,10 +8146,13 @@
   // len is not a number so make the array size one and
   // set only element to len.
   Object* obj;
-  { MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
-    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-  }
+  MaybeObject* maybe_obj = GetHeap()->AllocateFixedArray(1);
+  if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   FixedArray::cast(obj)->set(0, len);
+
+  maybe_obj = EnsureCanContainElements(&len, 1);
+  if (maybe_obj->IsFailure()) return maybe_obj;
+
   if (IsJSArray()) JSArray::cast(this)->set_length(Smi::FromInt(1));
   set_elements(FixedArray::cast(obj));
   return this;
@@ -7693,7 +8198,7 @@
     FixedArray* new_cache;
     // Grow array by factor 2 over and above what we need.
     { MaybeObject* maybe_cache =
-          heap()->AllocateFixedArray(transitions * 2 * step + header);
+          GetHeap()->AllocateFixedArray(transitions * 2 * step + header);
       if (!maybe_cache->To<FixedArray>(&new_cache)) return maybe_cache;
     }
 
@@ -7746,7 +8251,7 @@
   // It is sufficient to validate that the receiver is not in the new prototype
   // chain.
   for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
-    if (JSObject::cast(pt) == this) {
+    if (JSReceiver::cast(pt) == this) {
       // Cycle detected.
       HandleScope scope(heap->isolate());
       return heap->isolate()->Throw(
@@ -7761,8 +8266,8 @@
     // hidden and set the new prototype on that object.
     Object* current_proto = real_receiver->GetPrototype();
     while (current_proto->IsJSObject() &&
-          JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
-      real_receiver = JSObject::cast(current_proto);
+          JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) {
+      real_receiver = JSReceiver::cast(current_proto);
       current_proto = current_proto->GetPrototype();
     }
   }
@@ -7795,8 +8300,16 @@
 }
 
 
+MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
+                                                uint32_t first_arg,
+                                                uint32_t arg_count) {
+  return EnsureCanContainElements(args->arguments() - first_arg, arg_count);
+}
+
+
 bool JSObject::HasElementPostInterceptor(JSReceiver* receiver, uint32_t index) {
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>
@@ -7857,6 +8370,11 @@
 
   Object* pt = GetPrototype();
   if (pt->IsNull()) return false;
+  if (pt->IsJSProxy()) {
+    // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+    return JSProxy::cast(pt)->GetElementAttributeWithHandler(
+        receiver, index) != ABSENT;
+  }
   return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
 }
 
@@ -7933,6 +8451,7 @@
   }
 
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>
@@ -8047,6 +8566,7 @@
 
   ElementsKind kind = GetElementsKind();
   switch (kind) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>
@@ -8113,6 +8633,11 @@
 
   Object* pt = GetPrototype();
   if (pt->IsNull()) return false;
+  if (pt->IsJSProxy()) {
+    // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+    return JSProxy::cast(pt)->GetElementAttributeWithHandler(
+        receiver, index) != ABSENT;
+  }
   return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
 }
 
@@ -8189,9 +8714,9 @@
   // __defineGetter__ callback
   if (structure->IsFixedArray()) {
     Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
-    if (getter->IsJSFunction()) {
-      return Object::GetPropertyWithDefinedGetter(receiver,
-                                                  JSFunction::cast(getter));
+    if (getter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter));
     }
     // Getter is not a function.
     return isolate->heap()->undefined_value();
@@ -8246,8 +8771,9 @@
 
   if (structure->IsFixedArray()) {
     Handle<Object> setter(FixedArray::cast(structure)->get(kSetterIndex));
-    if (setter->IsJSFunction()) {
-     return SetPropertyWithDefinedSetter(JSFunction::cast(*setter), value);
+    if (setter->IsSpecFunction()) {
+      // TODO(rossberg): nicer would be to cast to some JSCallable here...
+      return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
     } else {
       if (strict_mode == kNonStrictMode) {
         return value;
@@ -8297,7 +8823,8 @@
                                       Object* value,
                                       StrictModeFlag strict_mode,
                                       bool check_prototype) {
-  ASSERT(HasFastElements() || HasFastArgumentsElements());
+  ASSERT(HasFastTypeElements() ||
+         HasFastArgumentsElements());
 
   FixedArray* backing_store = FixedArray::cast(elements());
   if (backing_store->map() == GetHeap()->non_strict_arguments_elements_map()) {
@@ -8322,6 +8849,24 @@
 
   // Check whether there is extra space in fixed array.
   if (index < length) {
+    if (HasFastSmiOnlyElements()) {
+      if (!value->IsSmi()) {
+        // If the value is a number, transition from smi-only to
+        // FastDoubleElements.
+        if (value->IsNumber()) {
+          MaybeObject* maybe =
+              SetFastDoubleElementsCapacityAndLength(length, length);
+          if (maybe->IsFailure()) return maybe;
+          FixedDoubleArray::cast(elements())->set(index, value->Number());
+          return value;
+        }
+        // Value is not a number, transition to generic fast elements.
+        MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
+        Map* new_map;
+        if (!maybe_new_map->To<Map>(&new_map)) return maybe_new_map;
+        set_map(new_map);
+      }
+    }
     backing_store->set(index, value);
     if (IsJSArray()) {
       // Update the length of the array if needed.
@@ -8341,8 +8886,14 @@
     if (!ShouldConvertToSlowElements(new_capacity)) {
       ASSERT(static_cast<uint32_t>(new_capacity) > index);
       Object* new_elements;
+      SetFastElementsCapacityMode set_capacity_mode =
+          value->IsSmi() && HasFastSmiOnlyElements()
+              ? kAllowSmiOnlyElements
+              : kDontAllowSmiOnlyElements;
       MaybeObject* maybe =
-          SetFastElementsCapacityAndLength(new_capacity, index + 1);
+          SetFastElementsCapacityAndLength(new_capacity,
+                                           index + 1,
+                                           set_capacity_mode);
       if (!maybe->ToObject(&new_elements)) return maybe;
       FixedArray::cast(new_elements)->set(index, value);
       return value;
@@ -8448,7 +8999,9 @@
     }
     MaybeObject* result = CanConvertToFastDoubleElements()
         ? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
-        : SetFastElementsCapacityAndLength(new_length, new_length);
+        : SetFastElementsCapacityAndLength(new_length,
+                                           new_length,
+                                           kDontAllowSmiOnlyElements);
     if (result->IsFailure()) return result;
 #ifdef DEBUG
     if (FLAG_trace_normalization) {
@@ -8492,10 +9045,15 @@
     if (IsJSArray()) {
       CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
     }
-    MaybeObject* maybe_obj =
-        SetFastElementsCapacityAndLength(elms_length, length);
+    MaybeObject* maybe_obj = SetFastElementsCapacityAndLength(
+        elms_length,
+        length,
+        kDontAllowSmiOnlyElements);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
-    return SetFastElement(index, value, strict_mode, check_prototype);
+    return SetFastElement(index,
+                          value,
+                          strict_mode,
+                          check_prototype);
   }
 
   double double_value = value_is_smi
@@ -8546,6 +9104,17 @@
 }
 
 
+MaybeObject* JSReceiver::SetElement(uint32_t index,
+                                    Object* value,
+                                    StrictModeFlag strict_mode,
+                                    bool check_proto) {
+  return IsJSProxy()
+      ? JSProxy::cast(this)->SetElementWithHandler(index, value, strict_mode)
+      : JSObject::cast(this)->SetElement(index, value, strict_mode, check_proto)
+  ;
+}
+
+
 MaybeObject* JSObject::SetElement(uint32_t index,
                                   Object* value,
                                   StrictModeFlag strict_mode,
@@ -8592,6 +9161,7 @@
                                                     bool check_prototype) {
   Isolate* isolate = GetIsolate();
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
       return SetFastElement(index, value, strict_mode, check_prototype);
     case FAST_DOUBLE_ELEMENTS:
@@ -8754,6 +9324,7 @@
         break;
       }
       // Fall through.
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS:
       backing_store = FixedArray::cast(backing_store_base);
       *capacity = backing_store->length();
@@ -9029,6 +9600,7 @@
   if (this->IsStringObjectWithCharacterAt(index)) return true;
 
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
           static_cast<uint32_t>(
@@ -9268,6 +9840,7 @@
                                   PropertyAttributes filter) {
   int counter = 0;
   switch (GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       int length = IsJSArray() ?
           Smi::cast(JSArray::cast(this)->length())->value() :
@@ -10133,8 +10706,6 @@
 // If the object is in dictionary mode, it is converted to fast elements
 // mode.
 MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
-  ASSERT(!HasExternalArrayElements());
-
   Heap* heap = GetHeap();
 
   if (HasDictionaryElements()) {
@@ -10148,7 +10719,7 @@
     // Convert to fast elements.
 
     Object* obj;
-    { MaybeObject* maybe_obj = map()->GetFastElementsMap();
+    { MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
     Map* new_map = Map::cast(obj);
@@ -10164,13 +10735,16 @@
 
     set_map(new_map);
     set_elements(fast_elements);
+  } else if (HasExternalArrayElements()) {
+    // External arrays cannot have holes or undefined elements.
+    return Smi::FromInt(ExternalArray::cast(elements())->length());
   } else if (!HasFastDoubleElements()) {
     Object* obj;
     { MaybeObject* maybe_obj = EnsureWritableFastElements();
       if (!maybe_obj->ToObject(&obj)) return maybe_obj;
     }
   }
-  ASSERT(HasFastElements() || HasFastDoubleElements());
+  ASSERT(HasFastTypeElements() || HasFastDoubleElements());
 
   // Collect holes at the end, undefined before that and the rest at the
   // start, and return the number of non-hole, non-undefined values.
@@ -11294,9 +11868,9 @@
 }
 
 
-Object* ObjectHashTable::Lookup(JSObject* key) {
+Object* ObjectHashTable::Lookup(JSReceiver* key) {
   // If the object does not have an identity hash, it was never used as a key.
-  MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::OMIT_CREATION);
+  MaybeObject* maybe_hash = key->GetIdentityHash(OMIT_CREATION);
   if (maybe_hash->IsFailure()) return GetHeap()->undefined_value();
   int entry = FindEntry(key);
   if (entry == kNotFound) return GetHeap()->undefined_value();
@@ -11304,10 +11878,10 @@
 }
 
 
-MaybeObject* ObjectHashTable::Put(JSObject* key, Object* value) {
+MaybeObject* ObjectHashTable::Put(JSReceiver* key, Object* value) {
   // Make sure the key object has an identity hash code.
   int hash;
-  { MaybeObject* maybe_hash = key->GetIdentityHash(JSObject::ALLOW_CREATION);
+  { MaybeObject* maybe_hash = key->GetIdentityHash(ALLOW_CREATION);
     if (maybe_hash->IsFailure()) return maybe_hash;
     hash = Smi::cast(maybe_hash->ToObjectUnchecked())->value();
   }
@@ -11337,7 +11911,7 @@
 }
 
 
-void ObjectHashTable::AddEntry(int entry, JSObject* key, Object* value) {
+void ObjectHashTable::AddEntry(int entry, JSReceiver* key, Object* value) {
   set(EntryToIndex(entry), key);
   set(EntryToIndex(entry) + 1, value);
   ElementAdded();
diff --git a/src/objects.h b/src/objects.h
index d9c7a82..7e531b2 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -38,6 +38,7 @@
 #elif V8_TARGET_ARCH_MIPS
 #include "mips/constants-mips.h"
 #endif
+#include "v8checks.h"
 
 //
 // Most object types in the V8 JavaScript are described in this file.
@@ -136,8 +137,13 @@
 namespace internal {
 
 enum ElementsKind {
-  // The "fast" kind for tagged values. Must be first to make it possible
-  // to efficiently check maps if they have fast elements.
+  // The "fast" kind for elements that only contain SMI values. Must be first
+  // to make it possible to efficiently check maps for this kind.
+  FAST_SMI_ONLY_ELEMENTS,
+
+  // The "fast" kind for tagged values. Must be second to make it possible to
+  // efficiently check maps for this and the FAST_SMI_ONLY_ELEMENTS kind
+  // together at once.
   FAST_ELEMENTS,
 
   // The "fast" kind for unwrapped, non-tagged double values.
@@ -160,7 +166,7 @@
   // Derived constants from ElementsKind
   FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_BYTE_ELEMENTS,
   LAST_EXTERNAL_ARRAY_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS,
-  FIRST_ELEMENTS_KIND = FAST_ELEMENTS,
+  FIRST_ELEMENTS_KIND = FAST_SMI_ONLY_ELEMENTS,
   LAST_ELEMENTS_KIND = EXTERNAL_PIXEL_ELEMENTS
 };
 
@@ -174,7 +180,6 @@
   PropertyDetails(PropertyAttributes attributes,
                   PropertyType type,
                   int index = 0) {
-    ASSERT(type != ELEMENTS_TRANSITION);
     ASSERT(TypeField::is_valid(type));
     ASSERT(AttributesField::is_valid(attributes));
     ASSERT(StorageField::is_valid(index));
@@ -188,23 +193,6 @@
     ASSERT(index == this->index());
   }
 
-  PropertyDetails(PropertyAttributes attributes,
-                  PropertyType type,
-                  ElementsKind elements_kind) {
-    ASSERT(type == ELEMENTS_TRANSITION);
-    ASSERT(TypeField::is_valid(type));
-    ASSERT(AttributesField::is_valid(attributes));
-    ASSERT(StorageField::is_valid(static_cast<int>(elements_kind)));
-
-    value_ = TypeField::encode(type)
-        | AttributesField::encode(attributes)
-        | StorageField::encode(static_cast<int>(elements_kind));
-
-    ASSERT(type == this->type());
-    ASSERT(attributes == this->attributes());
-    ASSERT(elements_kind == this->elements_kind());
-  }
-
   // Conversion for storing details as Object*.
   explicit inline PropertyDetails(Smi* smi);
   inline Smi* AsSmi();
@@ -226,11 +214,6 @@
 
   int index() { return StorageField::decode(value_); }
 
-  ElementsKind elements_kind() {
-    ASSERT(type() == ELEMENTS_TRANSITION);
-    return static_cast<ElementsKind>(StorageField::decode(value_));
-  }
-
   inline PropertyDetails AsDeleted();
 
   static bool IsValidIndex(int index) {
@@ -276,6 +259,13 @@
 };
 
 
+// Indicates whether a get method should implicitly create the object looked up.
+enum CreationFlag {
+  ALLOW_CREATION,
+  OMIT_CREATION
+};
+
+
 // Instance size sentinel for objects of variable size.
 static const int kVariableSizeSentinel = 0;
 
@@ -329,6 +319,7 @@
   V(HEAP_NUMBER_TYPE)                                                          \
   V(FOREIGN_TYPE)                                                              \
   V(BYTE_ARRAY_TYPE)                                                           \
+  V(FREE_SPACE_TYPE)                                                           \
   /* Note: the order of these external array */                                \
   /* types is relied upon in */                                                \
   /* Object::IsExternalArray(). */                                             \
@@ -585,6 +576,7 @@
   HEAP_NUMBER_TYPE,
   FOREIGN_TYPE,
   BYTE_ARRAY_TYPE,
+  FREE_SPACE_TYPE,
   EXTERNAL_BYTE_ARRAY_TYPE,  // FIRST_EXTERNAL_ARRAY_TYPE
   EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
   EXTERNAL_SHORT_ARRAY_TYPE,
@@ -621,24 +613,30 @@
 
   JS_MESSAGE_OBJECT_TYPE,
 
-  JS_VALUE_TYPE,  // FIRST_NON_CALLABLE_OBJECT_TYPE, FIRST_JS_RECEIVER_TYPE
+  // All the following types are subtypes of JSReceiver, which corresponds to
+  // objects in the JS sense. The first and the last type in this range are
+  // the two forms of function. This organization enables using the same
+  // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
+  // NONCALLABLE_JS_OBJECT range.
+  JS_FUNCTION_PROXY_TYPE,  // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
+  JS_PROXY_TYPE,  // LAST_JS_PROXY_TYPE
+
+  JS_VALUE_TYPE,  // FIRST_JS_OBJECT_TYPE
   JS_OBJECT_TYPE,
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GLOBAL_OBJECT_TYPE,
   JS_BUILTINS_OBJECT_TYPE,
   JS_GLOBAL_PROXY_TYPE,
   JS_ARRAY_TYPE,
-  JS_PROXY_TYPE,
   JS_WEAK_MAP_TYPE,
 
-  JS_REGEXP_TYPE,  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE
+  JS_REGEXP_TYPE,
 
-  JS_FUNCTION_TYPE,  // FIRST_CALLABLE_SPEC_OBJECT_TYPE
-  JS_FUNCTION_PROXY_TYPE,  // LAST_CALLABLE_SPEC_OBJECT_TYPE
+  JS_FUNCTION_TYPE,  // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
 
   // Pseudo-types
   FIRST_TYPE = 0x0,
-  LAST_TYPE = JS_FUNCTION_PROXY_TYPE,
+  LAST_TYPE = JS_FUNCTION_TYPE,
   INVALID_TYPE = FIRST_TYPE - 1,
   FIRST_NONSTRING_TYPE = MAP_TYPE,
   // Boundaries for testing for an external array.
@@ -651,17 +649,23 @@
   // are not continuous in this enum! The enum ranges instead reflect the
   // external class names, where proxies are treated as either ordinary objects,
   // or functions.
-  FIRST_JS_RECEIVER_TYPE = JS_VALUE_TYPE,
+  FIRST_JS_RECEIVER_TYPE = JS_FUNCTION_PROXY_TYPE,
   LAST_JS_RECEIVER_TYPE = LAST_TYPE,
-  // Boundaries for testing the types for which typeof is "object".
-  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_VALUE_TYPE,
-  LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
-  // Boundaries for testing the types for which typeof is "function".
-  FIRST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_TYPE,
-  LAST_CALLABLE_SPEC_OBJECT_TYPE = JS_FUNCTION_PROXY_TYPE,
+  // Boundaries for testing the types represented as JSObject
+  FIRST_JS_OBJECT_TYPE = JS_VALUE_TYPE,
+  LAST_JS_OBJECT_TYPE = LAST_TYPE,
+  // Boundaries for testing the types represented as JSProxy
+  FIRST_JS_PROXY_TYPE = JS_FUNCTION_PROXY_TYPE,
+  LAST_JS_PROXY_TYPE = JS_PROXY_TYPE,
   // Boundaries for testing whether the type is a JavaScript object.
-  FIRST_SPEC_OBJECT_TYPE = FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
-  LAST_SPEC_OBJECT_TYPE = LAST_CALLABLE_SPEC_OBJECT_TYPE
+  FIRST_SPEC_OBJECT_TYPE = FIRST_JS_RECEIVER_TYPE,
+  LAST_SPEC_OBJECT_TYPE = LAST_JS_RECEIVER_TYPE,
+  // Boundaries for testing the types for which typeof is "object".
+  FIRST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_PROXY_TYPE,
+  LAST_NONCALLABLE_SPEC_OBJECT_TYPE = JS_REGEXP_TYPE,
+  // Note that the types for which typeof is "function" are not continuous.
+  // Define this so that we can put assertions on discrete checks.
+  NUM_OF_CALLABLE_SPEC_OBJECT_TYPES = 2
 };
 
 static const int kExternalArrayTypeCount = LAST_EXTERNAL_ARRAY_TYPE -
@@ -697,6 +701,7 @@
 class FixedArrayBase;
 class ObjectVisitor;
 class StringStream;
+class Failure;
 
 struct ValueInfo : public Malloced {
   ValueInfo() : type(FIRST_TYPE), ptr(NULL), str(NULL), number(0) { }
@@ -710,7 +715,6 @@
 // A template-ized version of the IsXXX functions.
 template <class C> static inline bool Is(Object* obj);
 
-class Failure;
 
 class MaybeObject BASE_EMBEDDED {
  public:
@@ -748,7 +752,7 @@
   // Prints this object with details.
   inline void Print() {
     Print(stdout);
-  };
+  }
   inline void PrintLn() {
     PrintLn(stdout);
   }
@@ -791,6 +795,7 @@
   V(ExternalDoubleArray)                       \
   V(ExternalPixelArray)                        \
   V(ByteArray)                                 \
+  V(FreeSpace)                                 \
   V(JSReceiver)                                \
   V(JSObject)                                  \
   V(JSContextExtensionObject)                  \
@@ -835,6 +840,9 @@
   V(AccessCheckNeeded)                         \
   V(JSGlobalPropertyCell)                      \
 
+
+class JSReceiver;
+
 // Object is the abstract superclass for all classes in the
 // object hierarchy.
 // Object does not use any virtual functions to avoid the
@@ -859,6 +867,7 @@
 #undef DECLARE_STRUCT_PREDICATE
 
   INLINE(bool IsSpecObject());
+  INLINE(bool IsSpecFunction());
 
   // Oddball testing.
   INLINE(bool IsUndefined());
@@ -867,6 +876,10 @@
   INLINE(bool IsTrue());
   INLINE(bool IsFalse());
   inline bool IsArgumentsMarker();
+  inline bool NonFailureIsHeapObject();
+
+  // Filler objects (fillers and free space objects).
+  inline bool IsFiller();
 
   // Extract the number.
   inline double Number();
@@ -903,15 +916,8 @@
                                            LookupResult* result,
                                            String* key,
                                            PropertyAttributes* attributes);
-  MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
-                                                       Object* structure,
-                                                       String* name,
-                                                       Object* holder);
-  MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(Object* receiver,
-                                                      String* name,
-                                                      Object* handler);
   MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver,
-                                                            JSFunction* getter);
+                                                            JSReceiver* getter);
 
   inline MaybeObject* GetElement(uint32_t index);
   // For use when we know that no exception can be thrown.
@@ -1095,101 +1101,13 @@
   // View this map word as a forwarding address.
   inline HeapObject* ToForwardingAddress();
 
-  // Marking phase of full collection: the map word of live objects is
-  // marked, and may be marked as overflowed (eg, the object is live, its
-  // children have not been visited, and it does not fit in the marking
-  // stack).
+  static inline MapWord FromRawValue(uintptr_t value) {
+    return MapWord(value);
+  }
 
-  // True if this map word's mark bit is set.
-  inline bool IsMarked();
-
-  // Return this map word but with its mark bit set.
-  inline void SetMark();
-
-  // Return this map word but with its mark bit cleared.
-  inline void ClearMark();
-
-  // True if this map word's overflow bit is set.
-  inline bool IsOverflowed();
-
-  // Return this map word but with its overflow bit set.
-  inline void SetOverflow();
-
-  // Return this map word but with its overflow bit cleared.
-  inline void ClearOverflow();
-
-
-  // Compacting phase of a full compacting collection: the map word of live
-  // objects contains an encoding of the original map address along with the
-  // forwarding address (represented as an offset from the first live object
-  // in the same page as the (old) object address).
-
-  // Create a map word from a map address and a forwarding address offset.
-  static inline MapWord EncodeAddress(Address map_address, int offset);
-
-  // Return the map address encoded in this map word.
-  inline Address DecodeMapAddress(MapSpace* map_space);
-
-  // Return the forwarding offset encoded in this map word.
-  inline int DecodeOffset();
-
-
-  // During serialization: the map word is used to hold an encoded
-  // address, and possibly a mark bit (set and cleared with SetMark
-  // and ClearMark).
-
-  // Create a map word from an encoded address.
-  static inline MapWord FromEncodedAddress(Address address);
-
-  inline Address ToEncodedAddress();
-
-  // Bits used by the marking phase of the garbage collector.
-  //
-  // The first word of a heap object is normally a map pointer. The last two
-  // bits are tagged as '01' (kHeapObjectTag). We reuse the last two bits to
-  // mark an object as live and/or overflowed:
-  //   last bit = 0, marked as alive
-  //   second bit = 1, overflowed
-  // An object is only marked as overflowed when it is marked as live while
-  // the marking stack is overflowed.
-  static const int kMarkingBit = 0;  // marking bit
-  static const int kMarkingMask = (1 << kMarkingBit);  // marking mask
-  static const int kOverflowBit = 1;  // overflow bit
-  static const int kOverflowMask = (1 << kOverflowBit);  // overflow mask
-
-  // Forwarding pointers and map pointer encoding. On 32 bit all the bits are
-  // used.
-  // +-----------------+------------------+-----------------+
-  // |forwarding offset|page offset of map|page index of map|
-  // +-----------------+------------------+-----------------+
-  //          ^                 ^                  ^
-  //          |                 |                  |
-  //          |                 |          kMapPageIndexBits
-  //          |         kMapPageOffsetBits
-  // kForwardingOffsetBits
-  static const int kMapPageOffsetBits = kPageSizeBits - kMapAlignmentBits;
-  static const int kForwardingOffsetBits = kPageSizeBits - kObjectAlignmentBits;
-#ifdef V8_HOST_ARCH_64_BIT
-  static const int kMapPageIndexBits = 16;
-#else
-  // Use all the 32-bits to encode on a 32-bit platform.
-  static const int kMapPageIndexBits =
-      32 - (kMapPageOffsetBits + kForwardingOffsetBits);
-#endif
-
-  static const int kMapPageIndexShift = 0;
-  static const int kMapPageOffsetShift =
-      kMapPageIndexShift + kMapPageIndexBits;
-  static const int kForwardingOffsetShift =
-      kMapPageOffsetShift + kMapPageOffsetBits;
-
-  // Bit masks covering the different parts the encoding.
-  static const uintptr_t kMapPageIndexMask =
-      (1 << kMapPageOffsetShift) - 1;
-  static const uintptr_t kMapPageOffsetMask =
-      ((1 << kForwardingOffsetShift) - 1) & ~kMapPageIndexMask;
-  static const uintptr_t kForwardingOffsetMask =
-      ~(kMapPageIndexMask | kMapPageOffsetMask);
+  inline uintptr_t ToRawValue() {
+    return value_;
+  }
 
  private:
   // HeapObject calls the private constructor and directly reads the value.
@@ -1209,6 +1127,7 @@
   // information.
   inline Map* map();
   inline void set_map(Map* value);
+  inline void set_map_unsafe(Map* value);
 
   // During garbage collection, the map word of a heap object does not
   // necessarily contain a map pointer.
@@ -1216,8 +1135,8 @@
   inline void set_map_word(MapWord map_word);
 
   // The Heap the object was allocated in. Used also to access Isolate.
-  // This method can not be used during GC, it ASSERTs this.
   inline Heap* GetHeap();
+
   // Convenience method to get current isolate. This method can be
   // accessed only when its result is the same as
   // Isolate::Current(), it ASSERTs this. See also comment for GetHeap.
@@ -1246,31 +1165,6 @@
   // GC internal.
   inline int SizeFromMap(Map* map);
 
-  // Support for the marking heap objects during the marking phase of GC.
-  // True if the object is marked live.
-  inline bool IsMarked();
-
-  // Mutate this object's map pointer to indicate that the object is live.
-  inline void SetMark();
-
-  // Mutate this object's map pointer to remove the indication that the
-  // object is live (ie, partially restore the map pointer).
-  inline void ClearMark();
-
-  // True if this object is marked as overflowed.  Overflowed objects have
-  // been reached and marked during marking of the heap, but their children
-  // have not necessarily been marked and they have not been pushed on the
-  // marking stack.
-  inline bool IsOverflowed();
-
-  // Mutate this object's map pointer to indicate that the object is
-  // overflowed.
-  inline void SetOverflow();
-
-  // Mutate this object's map pointer to remove the indication that the
-  // object is overflowed (ie, partially restore the map pointer).
-  inline void ClearOverflow();
-
   // Returns the field at offset in obj, as a read/write Object* reference.
   // Does no checking, and is safe to use during GC, while maps are invalid.
   // Does not invoke write barrier, so should only be assigned to
@@ -1294,18 +1188,14 @@
     HeapObjectPrint(stdout);
   }
   void HeapObjectPrint(FILE* out);
-#endif
-#ifdef DEBUG
-  void HeapObjectVerify();
-  inline void VerifyObjectField(int offset);
-  inline void VerifySmiField(int offset);
-#endif
-
-#ifdef OBJECT_PRINT
   void PrintHeader(FILE* out, const char* id);
 #endif
 
 #ifdef DEBUG
+  void HeapObjectVerify();
+  inline void VerifyObjectField(int offset);
+  inline void VerifySmiField(int offset);
+
   // Verify a pointer is a valid HeapObject pointer that points to object
   // areas in the heap.
   static void VerifyHeapPointer(Object* p);
@@ -1448,8 +1338,18 @@
                                            Object* value,
                                            PropertyAttributes attributes,
                                            StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter,
+                                                            Object* value);
 
   MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
+  MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
+
+  // Set the index'th array element.
+  // Can cause GC, or return failure if GC is required.
+  MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
+                                          Object* value,
+                                          StrictModeFlag strict_mode,
+                                          bool check_prototype);
 
   // Returns the class name ([[Class]] property in the specification).
   String* class_name();
@@ -1466,6 +1366,7 @@
   // Can cause a GC.
   inline bool HasProperty(String* name);
   inline bool HasLocalProperty(String* name);
+  inline bool HasElement(uint32_t index);
 
   // Return the object's prototype (might be Heap::null_value()).
   inline Object* GetPrototype();
@@ -1474,11 +1375,18 @@
   MUST_USE_RESULT MaybeObject* SetPrototype(Object* value,
                                             bool skip_hidden_prototypes);
 
+  // Retrieves a permanent object identity hash code. The undefined value might
+  // be returned in case no has been created yet and OMIT_CREATION was used.
+  inline MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+
   // Lookup a property.  If found, the result is valid and has
   // detailed information.
   void LocalLookup(String* name, LookupResult* result);
   void Lookup(String* name, LookupResult* result);
 
+ protected:
+  Smi* GenerateIdentityHash();
+
  private:
   PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
                                           LookupResult* result,
@@ -1525,8 +1433,14 @@
   MUST_USE_RESULT inline MaybeObject* ResetElements();
   inline ElementsKind GetElementsKind();
   inline ElementsAccessor* GetElementsAccessor();
+  inline bool HasFastSmiOnlyElements();
   inline bool HasFastElements();
+  // Returns if an object has either FAST_ELEMENT or FAST_SMI_ONLY_ELEMENT
+  // elements.  TODO(danno): Rename HasFastTypeElements to HasFastElements() and
+  // HasFastElements to HasFastObjectElements.
+  inline bool HasFastTypeElements();
   inline bool HasFastDoubleElements();
+  inline bool HasNonStrictArgumentsElements();
   inline bool HasDictionaryElements();
   inline bool HasExternalPixelElements();
   inline bool HasExternalArrayElements();
@@ -1554,6 +1468,11 @@
   // a dictionary, and it will stay a dictionary.
   MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit);
 
+  MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver,
+                                                       Object* structure,
+                                                       String* name);
+
+  // Can cause GC.
   MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result,
                                            String* key,
                                            Object* value,
@@ -1571,8 +1490,6 @@
       Object* value,
       JSObject* holder,
       StrictModeFlag strict_mode);
-  MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSFunction* setter,
-                                                            Object* value);
   MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
       String* name,
       Object* value,
@@ -1660,37 +1577,28 @@
   // Accessors for hidden properties object.
   //
   // Hidden properties are not local properties of the object itself.
-  // Instead they are stored on an auxiliary JSObject stored as a local
+  // Instead they are stored in an auxiliary structure kept as a local
   // property with a special name Heap::hidden_symbol(). But if the
   // receiver is a JSGlobalProxy then the auxiliary object is a property
-  // of its prototype.
-  //
-  // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be
-  // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real
-  // holder.
-  //
-  // These accessors do not touch interceptors or accessors.
-  inline bool HasHiddenPropertiesObject();
-  inline Object* GetHiddenPropertiesObject();
-  MUST_USE_RESULT inline MaybeObject* SetHiddenPropertiesObject(
-      Object* hidden_obj);
+  // of its prototype, and if it's a detached proxy, then you can't have
+  // hidden properties.
 
-  // Indicates whether the hidden properties object should be created.
-  enum HiddenPropertiesFlag { ALLOW_CREATION, OMIT_CREATION };
+  // Sets a hidden property on this object. Returns this object if successful,
+  // undefined if called on a detached proxy, and a failure if a GC
+  // is required
+  MaybeObject* SetHiddenProperty(String* key, Object* value);
+  // Gets the value of a hidden property with the given key. Returns undefined
+  // if the property doesn't exist (or if called on a detached proxy),
+  // otherwise returns the value set for the key.
+  Object* GetHiddenProperty(String* key);
+  // Deletes a hidden property. Deleting a non-existing property is
+  // considered successful.
+  void DeleteHiddenProperty(String* key);
+  // Returns true if the object has a property with the hidden symbol as name.
+  bool HasHiddenProperties();
 
-  // Retrieves the hidden properties object.
-  //
-  // The undefined value might be returned in case no hidden properties object
-  // is present and creation was omitted.
-  inline bool HasHiddenProperties();
-  MUST_USE_RESULT MaybeObject* GetHiddenProperties(HiddenPropertiesFlag flag);
-
-  // Retrieves a permanent object identity hash code.
-  //
-  // The identity hash is stored as a hidden property. The undefined value might
-  // be returned in case no hidden properties object is present and creation was
-  // omitted.
-  MUST_USE_RESULT MaybeObject* GetIdentityHash(HiddenPropertiesFlag flag);
+  MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
+  MUST_USE_RESULT MaybeObject* SetIdentityHash(Object* hash, CreationFlag flag);
 
   MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
   MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
@@ -1698,6 +1606,19 @@
   // Tests for the fast common case for property enumeration.
   bool IsSimpleEnum();
 
+  inline void ValidateSmiOnlyElements();
+
+  // Makes sure that this object can contain non-smi Object as elements.
+  inline MaybeObject* EnsureCanContainNonSmiElements();
+
+  // Makes sure that this object can contain the specified elements.
+  inline MaybeObject* EnsureCanContainElements(Object** elements,
+                                               uint32_t count);
+  inline MaybeObject* EnsureCanContainElements(FixedArray* elements);
+  MaybeObject* EnsureCanContainElements(Arguments* arguments,
+                                        uint32_t first_arg,
+                                        uint32_t arg_count);
+
   // Do we want to keep the elements in fast case when increasing the
   // capacity?
   bool ShouldConvertToSlowElements(int new_capacity);
@@ -1711,7 +1632,6 @@
   bool CanConvertToFastDoubleElements();
 
   // Tells whether the index'th element is present.
-  inline bool HasElement(uint32_t index);
   bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
 
   // Computes the new capacity when expanding the elements of a JSObject.
@@ -1747,6 +1667,7 @@
                                               Object* value,
                                               StrictModeFlag strict_mode,
                                               bool check_prototype);
+
   MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
                                                     Object* value,
                                                     StrictModeFlag strict_mode,
@@ -1769,11 +1690,18 @@
   // The undefined object if index is out of bounds.
   MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
 
+  enum SetFastElementsCapacityMode {
+    kAllowSmiOnlyElements,
+    kDontAllowSmiOnlyElements
+  };
+
   // Replace the elements' backing store with fast elements of the given
   // capacity.  Update the length for JSArrays.  Returns the new backing
   // store.
-  MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
-                                                                int length);
+  MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(
+      int capacity,
+      int length,
+      SetFastElementsCapacityMode set_capacity_mode);
   MUST_USE_RESULT MaybeObject* SetFastDoubleElementsCapacityAndLength(
       int capacity,
       int length);
@@ -1801,10 +1729,6 @@
   inline Object* GetInternalField(int index);
   inline void SetInternalField(int index, Object* value);
 
-  // Lookup a property.  If found, the result is valid and has
-  // detailed information.
-  void LocalLookup(String* name, LookupResult* result);
-
   // The following lookup functions skip interceptors.
   void LocalLookupRealNamedProperty(String* name, LookupResult* result);
   void LookupRealNamedProperty(String* name, LookupResult* result);
@@ -1860,6 +1784,11 @@
       Object* value,
       PropertyAttributes attributes);
 
+  // Returns a new map with all transitions dropped from the object's current
+  // map and the ElementsKind set.
+  MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
+      ElementsKind elements_kind);
+
   // Converts a descriptor of any other type to a real field,
   // backed by the properties array.  Descriptors of visible
   // types, such as CONSTANT_FUNCTION, keep their enumeration order.
@@ -1925,11 +1854,14 @@
                                        WriteBarrierMode mode
                                        = UPDATE_WRITE_BARRIER);
 
-  // initializes the body after properties slot, properties slot is
-  // initialized by set_properties
-  // Note: this call does not update write barrier, it is caller's
-  // reponsibility to ensure that *v* can be collected without WB here.
-  inline void InitializeBody(int object_size, Object* value);
+  // Initializes the body after properties slot, properties slot is
+  // initialized by set_properties.  Fill the pre-allocated fields with
+  // pre_allocated_value and the rest with filler_value.
+  // Note: this call does not update write barrier, the caller is responsible
+  // to ensure that |filler_value| can be collected without WB here.
+  inline void InitializeBody(Map* map,
+                             Object* pre_allocated_value,
+                             Object* filler_value);
 
   // Check whether this object references another object
   bool ReferencesObject(Object* obj);
@@ -2054,6 +1986,18 @@
       StrictModeFlag strict_mode,
       bool check_prototype);
 
+  // Searches the prototype chain for a callback setter and sets the property
+  // with the setter if it finds one. The '*found' flag indicates whether
+  // a setter was found or not.
+  // This function can cause GC and can return a failure result with
+  // '*found==true'.
+  MUST_USE_RESULT MaybeObject* SetPropertyWithCallbackSetterInPrototypes(
+      String* name,
+      Object* value,
+      PropertyAttributes attributes,
+      bool* found,
+      StrictModeFlag strict_mode);
+
   MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name,
                                                              DeleteMode mode);
   MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(String* name);
@@ -2092,6 +2036,15 @@
 
   void LookupInDescriptor(String* name, LookupResult* result);
 
+  // Returns the hidden properties backing store object, currently
+  // a StringDictionary, stored on this object.
+  // If no hidden properties object has been put on this object,
+  // return undefined, unless create_if_absent is true, in which case
+  // a new dictionary is created, added to this object, and returned.
+  MaybeObject* GetHiddenPropertiesDictionary(bool create_if_absent);
+  // Updates the existing hidden properties dictionary.
+  MaybeObject* SetHiddenPropertiesDictionary(StringDictionary* dictionary);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
 };
 
@@ -2917,7 +2870,7 @@
       JSObject* obj,
       int unused_property_fields);
 
-  // Find entry for key otherwise return kNotFound. Optimzed version of
+  // Find entry for key, otherwise return kNotFound. Optimized version of
   // HashTable::FindEntry.
   int FindEntry(String* key);
 };
@@ -2980,10 +2933,10 @@
 
 class ObjectHashTableShape {
  public:
-  static inline bool IsMatch(JSObject* key, Object* other);
-  static inline uint32_t Hash(JSObject* key);
-  static inline uint32_t HashForObject(JSObject* key, Object* object);
-  MUST_USE_RESULT static inline MaybeObject* AsObject(JSObject* key);
+  static inline bool IsMatch(JSReceiver* key, Object* other);
+  static inline uint32_t Hash(JSReceiver* key);
+  static inline uint32_t HashForObject(JSReceiver* key, Object* object);
+  MUST_USE_RESULT static inline MaybeObject* AsObject(JSReceiver* key);
   static const int kPrefixSize = 0;
   static const int kEntrySize = 2;
 };
@@ -2991,7 +2944,7 @@
 
 // ObjectHashTable maps keys that are JavaScript objects to object values by
 // using the identity hash of the key for hashing purposes.
-class ObjectHashTable: public HashTable<ObjectHashTableShape, JSObject*> {
+class ObjectHashTable: public HashTable<ObjectHashTableShape, JSReceiver*> {
  public:
   static inline ObjectHashTable* cast(Object* obj) {
     ASSERT(obj->IsHashTable());
@@ -3000,16 +2953,16 @@
 
   // Looks up the value associated with the given key. The undefined value is
   // returned in case the key is not present.
-  Object* Lookup(JSObject* key);
+  Object* Lookup(JSReceiver* key);
 
   // Adds (or overwrites) the value associated with the given key. Mapping a
   // key to the undefined value causes removal of the whole entry.
-  MUST_USE_RESULT MaybeObject* Put(JSObject* key, Object* value);
+  MUST_USE_RESULT MaybeObject* Put(JSReceiver* key, Object* value);
 
  private:
   friend class MarkCompactCollector;
 
-  void AddEntry(int entry, JSObject* key, Object* value);
+  void AddEntry(int entry, JSReceiver* key, Object* value);
   void RemoveEntry(int entry, Heap* heap);
   inline void RemoveEntry(int entry);
 
@@ -3079,11 +3032,12 @@
 };
 
 
-// ByteArray represents fixed sized byte arrays.  Used by the outside world,
-// such as PCRE, and also by the memory allocator and garbage collector to
-// fill in free blocks in the heap.
+// ByteArray represents fixed sized byte arrays.  Used for the relocation info
+// that is attached to code objects.
 class ByteArray: public FixedArrayBase {
  public:
+  inline int Size() { return RoundUp(length() + kHeaderSize, kPointerSize); }
+
   // Setter and getter.
   inline byte get(int index);
   inline void set(int index, byte value);
@@ -3140,6 +3094,44 @@
 };
 
 
+// FreeSpace represents fixed sized areas of the heap that are not currently in
+// use.  Used by the heap and GC.
+class FreeSpace: public HeapObject {
+ public:
+  // [size]: size of the free space including the header.
+  inline int size();
+  inline void set_size(int value);
+
+  inline int Size() { return size(); }
+
+  // Casting.
+  static inline FreeSpace* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void FreeSpacePrint() {
+    FreeSpacePrint(stdout);
+  }
+  void FreeSpacePrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void FreeSpaceVerify();
+#endif
+
+  // Layout description.
+  // Size is smi tagged when it is stored.
+  static const int kSizeOffset = HeapObject::kHeaderSize;
+  static const int kHeaderSize = kSizeOffset + kPointerSize;
+
+  static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+
+  // Maximal size of a single FreeSpace.
+  static const int kMaxSize = 512 * MB;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
+};
+
+
 // An ExternalArray represents a fixed-size array of primitive values
 // which live outside the JavaScript heap. Its subclasses are used to
 // implement the CanvasArray types being defined in the WebGL
@@ -3673,6 +3665,11 @@
   inline int major_key();
   inline void set_major_key(int value);
 
+  // For stubs, tells whether they should always exist, so that they can be
+  // called from other stubs.
+  inline bool is_pregenerated();
+  inline void set_is_pregenerated(bool value);
+
   // [optimizable]: For FUNCTION kind, tells if it is optimizable.
   inline bool optimizable();
   inline void set_optimizable(bool value);
@@ -3732,6 +3729,11 @@
   inline byte to_boolean_state();
   inline void set_to_boolean_state(byte value);
 
+  // For kind STUB, major_key == CallFunction, tells whether there is
+  // a function cache in the instruction stream.
+  inline bool has_function_cache();
+  inline void set_has_function_cache(bool flag);
+
   // Get the safepoint entry for the given pc.
   SafepointEntry GetSafepointEntry(Address pc);
 
@@ -3836,10 +3838,6 @@
   void CodeVerify();
 #endif
 
-  // Returns the isolate/heap this code object belongs to.
-  inline Isolate* isolate();
-  inline Heap* heap();
-
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
   static const int kMaxLoopNestingMarker = 6;
@@ -3875,6 +3873,7 @@
   static const int kBinaryOpTypeOffset = kStubMajorKeyOffset + 1;
   static const int kCompareStateOffset = kStubMajorKeyOffset + 1;
   static const int kToBooleanTypeOffset = kStubMajorKeyOffset + 1;
+  static const int kHasFunctionCacheOffset = kStubMajorKeyOffset + 1;
 
   static const int kFullCodeFlags = kOptimizableOffset + 1;
   class FullCodeFlagsHasDeoptimizationSupportField:
@@ -3894,9 +3893,10 @@
   class KindField: public BitField<Kind, 7, 4> {};
   class CacheHolderField: public BitField<InlineCacheHolderFlag, 11, 1> {};
   class ExtraICStateField: public BitField<ExtraICState, 12, 2> {};
+  class IsPregeneratedField: public BitField<bool, 14, 1> {};
 
   // Signed field cannot be encoded using the BitField class.
-  static const int kArgumentsCountShift = 14;
+  static const int kArgumentsCountShift = 15;
   static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
 
   static const int kFlagsNotUsedInLookup =
@@ -4032,8 +4032,12 @@
         (bit_field2() & kElementsKindMask) >> kElementsKindShift);
   }
 
+  // Tells whether the instance has fast elements that are only Smis.
+  inline bool has_fast_smi_only_elements() {
+    return elements_kind() == FAST_SMI_ONLY_ELEMENTS;
+  }
+
   // Tells whether the instance has fast elements.
-  // Equivalent to instance->GetElementsKind() == FAST_ELEMENTS.
   inline bool has_fast_elements() {
     return elements_kind() == FAST_ELEMENTS;
   }
@@ -4042,6 +4046,10 @@
     return elements_kind() == FAST_DOUBLE_ELEMENTS;
   }
 
+  inline bool has_non_strict_arguments_elements() {
+    return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
+  }
+
   inline bool has_external_array_elements() {
     ElementsKind kind(elements_kind());
     return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
@@ -4100,6 +4108,7 @@
   //    1 + 2 * i: prototype
   //    2 + 2 * i: target map
   DECL_ACCESSORS(prototype_transitions, FixedArray)
+
   inline FixedArray* unchecked_prototype_transitions();
 
   static const int kProtoTransitionHeaderSize = 1;
@@ -4109,14 +4118,14 @@
   static const int kProtoTransitionMapOffset = 1;
 
   inline int NumberOfProtoTransitions() {
-    FixedArray* cache = unchecked_prototype_transitions();
+    FixedArray* cache = prototype_transitions();
     if (cache->length() == 0) return 0;
     return
         Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
   }
 
   inline void SetNumberOfProtoTransitions(int value) {
-    FixedArray* cache = unchecked_prototype_transitions();
+    FixedArray* cache = prototype_transitions();
     ASSERT(cache->length() != 0);
     cache->set_unchecked(kProtoTransitionNumberOfEntriesOffset,
                          Smi::FromInt(value));
@@ -4138,27 +4147,6 @@
   // instance descriptors.
   MUST_USE_RESULT MaybeObject* CopyDropTransitions();
 
-  // Returns this map if it already has elements that are fast, otherwise
-  // returns a copy of the map, with all transitions dropped from the
-  // descriptors and the ElementsKind set to FAST_ELEMENTS.
-  MUST_USE_RESULT inline MaybeObject* GetFastElementsMap();
-
-  // Returns this map if it already has fast elements that are doubles,
-  // otherwise returns a copy of the map, with all transitions dropped from the
-  // descriptors and the ElementsKind set to FAST_DOUBLE_ELEMENTS.
-  MUST_USE_RESULT inline MaybeObject* GetFastDoubleElementsMap();
-
-  // Returns this map if already has dictionary elements, otherwise returns a
-  // copy of the map, with all transitions dropped from the descriptors and the
-  // ElementsKind set to DICTIONARY_ELEMENTS.
-  MUST_USE_RESULT inline MaybeObject* GetSlowElementsMap();
-
-  // Returns a new map with all transitions dropped from the descriptors and the
-  // ElementsKind set.
-  MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
-      ElementsKind elements_kind,
-      bool safe_to_add_transition);
-
   // Returns the property index for name (only valid for FAST MODE).
   int PropertyIndexFor(String* name);
 
@@ -4197,6 +4185,8 @@
   // This is undone in MarkCompactCollector::ClearNonLiveTransitions().
   void CreateBackPointers();
 
+  void CreateOneBackPointer(Map* transition_target);
+
   // Set all map transitions from this map to dead maps to null.
   // Also, restore the original prototype on the targets of these
   // transitions, so that we do not process this map again while
@@ -4233,10 +4223,6 @@
   inline int visitor_id();
   inline void set_visitor_id(int visitor_id);
 
-  // Returns the isolate/heap this map belongs to.
-  inline Isolate* isolate();
-  inline Heap* heap();
-
   typedef void (*TraverseCallback)(Map* map, void* data);
 
   void TraverseTransitionTree(TraverseCallback callback, void* data);
@@ -4273,7 +4259,7 @@
   static const int kSize = MAP_POINTER_ALIGN(kPadStart);
 
   // Layout of pointer fields. Heap iteration code relies on them
-  // being continiously allocated.
+  // being continuously allocated.
   static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
   static const int kPointerFieldsEndOffset =
       Map::kPrototypeTransitionsOffset + kPointerSize;
@@ -4313,7 +4299,7 @@
   static const int kStringWrapperSafeForDefaultValueOf = 2;
   static const int kAttachedToSharedFunctionInfo = 3;
   // No bits can be used after kElementsKindFirstBit, they are all reserved for
-  // storing ElementKind.  for anything other than storing the ElementKind.
+  // storing ElementKind.
   static const int kElementsKindShift = 4;
   static const int kElementsKindBitCount = 4;
 
@@ -4322,6 +4308,9 @@
       ((1 << (kElementsKindShift + kElementsKindBitCount)) - 1);
   static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
       (FAST_ELEMENTS + 1) << Map::kElementsKindShift) - 1;
+  static const int8_t kMaximumBitField2FastSmiOnlyElementValue =
+      static_cast<int8_t>((FAST_SMI_ONLY_ELEMENTS + 1) <<
+                          Map::kElementsKindShift) - 1;
 
   // Bit positions for bit field 3
   static const int kIsShared = 0;
@@ -6226,6 +6215,9 @@
   // Casting.
   static inline SeqString* cast(Object* obj);
 
+  // Layout description.
+  static const int kHeaderSize = String::kSize;
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
 };
@@ -6259,12 +6251,8 @@
     return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
   }
 
-  // Layout description.
-  static const int kHeaderSize = String::kSize;
-  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
   // Maximal memory usage for a single sequential ASCII string.
-  static const int kMaxSize = 512 * MB;
+  static const int kMaxSize = 512 * MB - 1;
   // Maximal length of a single sequential ASCII string.
   // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
   static const int kMaxLength = (kMaxSize - kHeaderSize);
@@ -6313,12 +6301,8 @@
     return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
   }
 
-  // Layout description.
-  static const int kHeaderSize = String::kSize;
-  static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
   // Maximal memory usage for a single sequential two-byte string.
-  static const int kMaxSize = 512 * MB;
+  static const int kMaxSize = 512 * MB - 1;
   // Maximal length of a single sequential two-byte string.
   // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
   static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
@@ -6480,8 +6464,8 @@
   typedef v8::String::ExternalAsciiStringResource Resource;
 
   // The underlying resource.
-  inline Resource* resource();
-  inline void set_resource(Resource* buffer);
+  inline const Resource* resource();
+  inline void set_resource(const Resource* buffer);
 
   // Dispatched behavior.
   uint16_t ExternalAsciiStringGet(int index);
@@ -6517,8 +6501,8 @@
   typedef v8::String::ExternalStringResource Resource;
 
   // The underlying string resource.
-  inline Resource* resource();
-  inline void set_resource(Resource* buffer);
+  inline const Resource* resource();
+  inline void set_resource(const Resource* buffer);
 
   // Dispatched behavior.
   uint16_t ExternalTwoByteStringGet(int index);
@@ -6669,6 +6653,9 @@
   static const byte kUndefined = 5;
   static const byte kOther = 6;
 
+  // The ToNumber value of a hidden oddball is a negative smi.
+  static const int kLeastHiddenOddballNumber = -5;
+
   typedef FixedBodyDescriptor<kToStringOffset,
                               kToNumberOffset + kPointerSize,
                               kSize> BodyDescriptor;
@@ -6704,10 +6691,6 @@
                               kValueOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
-  // Returns the isolate/heap this cell object belongs to.
-  inline Isolate* isolate();
-  inline Heap* heap();
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGlobalPropertyCell);
 };
@@ -6719,25 +6702,56 @@
   // [handler]: The handler property.
   DECL_ACCESSORS(handler, Object)
 
+  // [hash]: The hash code property (undefined if not initialized yet).
+  DECL_ACCESSORS(hash, Object)
+
   // Casting.
   static inline JSProxy* cast(Object* obj);
 
   bool HasPropertyWithHandler(String* name);
+  bool HasElementWithHandler(uint32_t index);
+
+  MUST_USE_RESULT MaybeObject* GetPropertyWithHandler(
+      Object* receiver,
+      String* name);
+  MUST_USE_RESULT MaybeObject* GetElementWithHandler(
+      Object* receiver,
+      uint32_t index);
 
   MUST_USE_RESULT MaybeObject* SetPropertyWithHandler(
       String* name,
       Object* value,
       PropertyAttributes attributes,
       StrictModeFlag strict_mode);
+  MUST_USE_RESULT MaybeObject* SetElementWithHandler(
+      uint32_t index,
+      Object* value,
+      StrictModeFlag strict_mode);
+
+  // If the handler defines an accessor property, invoke its setter
+  // (or throw if only a getter exists) and set *found to true. Otherwise false.
+  MUST_USE_RESULT MaybeObject* SetPropertyWithHandlerIfDefiningSetter(
+      String* name,
+      Object* value,
+      PropertyAttributes attributes,
+      StrictModeFlag strict_mode,
+      bool* found);
 
   MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler(
       String* name,
       DeleteMode mode);
+  MUST_USE_RESULT MaybeObject* DeleteElementWithHandler(
+      uint32_t index,
+      DeleteMode mode);
 
   MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler(
       JSReceiver* receiver,
-      String* name,
-      bool* has_exception);
+      String* name);
+  MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler(
+      JSReceiver* receiver,
+      uint32_t index);
+
+  MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
 
   // Turn this into an (empty) JSObject.
   void Fix();
@@ -6745,6 +6759,13 @@
   // Initializes the body after the handler slot.
   inline void InitializeBody(int object_size, Object* value);
 
+  // Invoke a trap by name. If the trap does not exist on this's handler,
+  // but derived_trap is non-NULL, invoke that instead.  May cause GC.
+  Handle<Object> CallTrap(const char* name,
+                          Handle<Object> derived_trap,
+                          int argc,
+                          Handle<Object> args[]);
+
   // Dispatched behavior.
 #ifdef OBJECT_PRINT
   inline void JSProxyPrint() {
@@ -6760,7 +6781,8 @@
   // size as a virgin JSObject. This is essential for becoming a JSObject
   // upon freeze.
   static const int kHandlerOffset = HeapObject::kHeaderSize;
-  static const int kPaddingOffset = kHandlerOffset + kPointerSize;
+  static const int kHashOffset = kHandlerOffset + kPointerSize;
+  static const int kPaddingOffset = kHashOffset + kPointerSize;
   static const int kSize = JSObject::kHeaderSize;
   static const int kHeaderSize = kPaddingOffset;
   static const int kPaddingSize = kSize - kPaddingOffset;
@@ -6768,7 +6790,7 @@
   STATIC_CHECK(kPaddingSize >= 0);
 
   typedef FixedBodyDescriptor<kHandlerOffset,
-                              kHandlerOffset + kPointerSize,
+                              kPaddingOffset,
                               kSize> BodyDescriptor;
 
  private:
@@ -6799,7 +6821,7 @@
 #endif
 
   // Layout description.
-  static const int kCallTrapOffset = kHandlerOffset + kPointerSize;
+  static const int kCallTrapOffset = JSProxy::kPaddingOffset;
   static const int kConstructTrapOffset = kCallTrapOffset + kPointerSize;
   static const int kPaddingOffset = kConstructTrapOffset + kPointerSize;
   static const int kSize = JSFunction::kSize;
@@ -6820,7 +6842,7 @@
 class JSWeakMap: public JSObject {
  public:
   // [table]: the backing hash table mapping keys to values.
-  DECL_ACCESSORS(table, ObjectHashTable)
+  DECL_ACCESSORS(table, Object)
 
   // [next]: linked list of encountered weak maps during GC.
   DECL_ACCESSORS(next, Object)
@@ -6913,7 +6935,7 @@
   MUST_USE_RESULT MaybeObject* Initialize(int capacity);
 
   // Set the content of the array to the content of storage.
-  inline void SetContent(FixedArray* storage);
+  inline MaybeObject* SetContent(FixedArray* storage);
 
   // Casting.
   static inline JSArray* cast(Object* obj);
@@ -7433,6 +7455,13 @@
   // Handy shorthand for visiting a single pointer.
   virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
 
+  // Visit pointer embedded into a code object.
+  virtual void VisitEmbeddedPointer(Code* host, Object** p) {
+    // Default implementation for the convenience of users that do
+    // not care about the host object.
+    VisitPointer(p);
+  }
+
   // Visits a contiguous arrays of external references (references to the C++
   // heap) in the half-open range [start, end). Any or all of the values
   // may be modified on return.
diff --git a/src/parser.cc b/src/parser.cc
index e8d1810..ad8850e 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -1103,14 +1103,16 @@
 
 Statement* Parser::ParseSourceElement(ZoneStringList* labels,
                                       bool* ok) {
+  // (Ecma 262 5th Edition, clause 14):
+  // SourceElement:
+  //    Statement
+  //    FunctionDeclaration
+  //
+  // In harmony mode we allow additionally the following productions
+  // SourceElement:
+  //    LetDeclaration
+
   if (peek() == Token::FUNCTION) {
-    // FunctionDeclaration is only allowed in the context of SourceElements
-    // (Ecma 262 5th Edition, clause 14):
-    // SourceElement:
-    //    Statement
-    //    FunctionDeclaration
-    // Common language extension is to allow function declaration in place
-    // of any statement. This language extension is disabled in strict mode.
     return ParseFunctionDeclaration(ok);
   } else if (peek() == Token::LET) {
     return ParseVariableStatement(kSourceElement, ok);
@@ -1124,7 +1126,7 @@
                                   int end_token,
                                   bool* ok) {
   // SourceElements ::
-  //   (Statement)* <end_token>
+  //   (SourceElement)* <end_token>
 
   // Allocate a target stack to use for this set of source
   // elements. This way, all scripts and functions get their own
@@ -1295,8 +1297,13 @@
     }
 
     case Token::FUNCTION: {
-      // In strict mode, FunctionDeclaration is only allowed in the context
-      // of SourceElements.
+      // FunctionDeclaration is only allowed in the context of SourceElements
+      // (Ecma 262 5th Edition, clause 14):
+      // SourceElement:
+      //    Statement
+      //    FunctionDeclaration
+      // Common language extension is to allow function declaration in place
+      // of any statement. This language extension is disabled in strict mode.
       if (top_scope_->is_strict_mode()) {
         ReportMessageAt(scanner().peek_location(), "strict_function",
                         Vector<const char*>::empty());
@@ -1555,6 +1562,11 @@
 
 
 Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
+  // The harmony mode uses source elements instead of statements.
+  //
+  // Block ::
+  //   '{' SourceElement* '}'
+
   // Construct block expecting 16 statements.
   Block* body = new(zone()) Block(isolate(), labels, 16, false);
   Scope* saved_scope = top_scope_;
@@ -1753,6 +1765,8 @@
           value->AsCall() == NULL &&
           value->AsCallNew() == NULL) {
         fni_->Infer();
+      } else {
+        fni_->RemoveLastFunction();
       }
     }
 
@@ -2503,6 +2517,8 @@
          || op == Token::ASSIGN)
         && (right->AsCall() == NULL && right->AsCallNew() == NULL)) {
       fni_->Infer();
+    } else {
+      fni_->RemoveLastFunction();
     }
     fni_->Leave();
   }
@@ -2614,7 +2630,7 @@
           case Token::NE_STRICT: cmp = Token::EQ_STRICT; break;
           default: break;
         }
-        x = NewCompareNode(cmp, x, y, position);
+        x = new(zone()) CompareOperation(isolate(), cmp, x, y, position);
         if (cmp != op) {
           // The comparison was negated - add a NOT.
           x = new(zone()) UnaryOperation(isolate(), Token::NOT, x, position);
@@ -2630,27 +2646,6 @@
 }
 
 
-Expression* Parser::NewCompareNode(Token::Value op,
-                                   Expression* x,
-                                   Expression* y,
-                                   int position) {
-  ASSERT(op != Token::NE && op != Token::NE_STRICT);
-  if (op == Token::EQ || op == Token::EQ_STRICT) {
-    bool is_strict = (op == Token::EQ_STRICT);
-    Literal* x_literal = x->AsLiteral();
-    if (x_literal != NULL && x_literal->IsNull()) {
-      return new(zone()) CompareToNull(isolate(), is_strict, y);
-    }
-
-    Literal* y_literal = y->AsLiteral();
-    if (y_literal != NULL && y_literal->IsNull()) {
-      return new(zone()) CompareToNull(isolate(), is_strict, x);
-    }
-  }
-  return new(zone()) CompareOperation(isolate(), op, x, y, position);
-}
-
-
 Expression* Parser::ParseUnaryExpression(bool* ok) {
   // UnaryExpression ::
   //   PostfixExpression
diff --git a/src/parser.h b/src/parser.h
index 3312f2f..d834ed1 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -533,11 +533,6 @@
   ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok);
   Expression* ParseRegExpLiteral(bool seen_equal, bool* ok);
 
-  Expression* NewCompareNode(Token::Value op,
-                             Expression* x,
-                             Expression* y,
-                             int position);
-
   // Populate the constant properties fixed array for a materialized object
   // literal.
   void BuildObjectLiteralConstantProperties(
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index b152dae..855ebf7 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -88,6 +88,9 @@
     uint64_t rnd1 = V8::RandomPrivate(isolate);
     uint64_t rnd2 = V8::RandomPrivate(isolate);
     uint64_t raw_addr = (rnd1 << 32) ^ rnd2;
+    // Currently available CPUs have 48 bits of virtual addressing.  Truncate
+    // the hint address to 46 bits to give the kernel a fighting chance of
+    // fulfilling our placement request.
     raw_addr &= V8_UINT64_C(0x3ffffffff000);
 #else
     uint32_t raw_addr = V8::RandomPrivate(isolate);
@@ -381,7 +384,7 @@
 void* OS::Allocate(const size_t requested,
                    size_t* allocated,
                    bool is_executable) {
-  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+  const size_t msize = RoundUp(requested, AllocateAlignment());
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
   void* addr = GetRandomMmapAddr();
   void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -474,7 +477,7 @@
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }
 
@@ -556,7 +559,7 @@
   void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
                     fileno(f), 0);
   ASSERT(addr != MAP_FAILED);
-  munmap(addr, size);
+  OS::Free(addr, size);
   fclose(f);
 }
 
@@ -598,44 +601,126 @@
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(GetRandomMmapAddr(), size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
+  address_ = ReserveRegion(size);
   size_ = size;
 }
 
 
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
 }
 
 
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(address, size);
-  return true;
+  return CommitRegion(address, size, is_executable);
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, size);
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
 }
 
 
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 6be941a..a70b43c 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -228,7 +228,7 @@
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }
 
@@ -334,33 +334,102 @@
 }
 
 
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 
-VirtualMemory::VirtualMemory(size_t size) {
-  address_ = mmap(NULL, size, PROT_NONE,
-                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                  kMmapFd, kMmapFdOffset);
-  size_ = size;
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(NULL,
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  Address base = static_cast<Address>(reservation);
+  Address aligned_base = RoundUp(base, alignment);
+  ASSERT_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  ASSERT_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  ASSERT(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
 }
 
 
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    bool result = ReleaseRegion(address(), size());
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(NULL,
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
 bool VirtualMemory::IsReserved() {
-  return address_ != MAP_FAILED;
+  return address_ != NULL;
 }
 
 
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::CommitRegion(void* address,
+                                 size_t size,
+                                 bool is_executable) {
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address, size, prot,
+  if (MAP_FAILED == mmap(address,
+                         size,
+                         prot,
                          MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd, kMmapFdOffset)) {
+                         kMmapFd,
+                         kMmapFdOffset)) {
     return false;
   }
 
@@ -370,9 +439,22 @@
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return mmap(address, size, PROT_NONE,
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+  return mmap(address,
+              size,
+              PROT_NONE,
               MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+  return munmap(address, size) == 0;
 }
 
 
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 973329b..3151d18 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -245,7 +245,7 @@
 
 
 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
+  if (memory_) OS::Free(memory_, size_);
   fclose(file_);
 }
 
@@ -342,7 +342,8 @@
 
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+    OS::Free(address(), size());
+    address_ = MAP_FAILED
   }
 }
 
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 97788e2..8771c43 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1397,41 +1397,101 @@
 }
 
 
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
 
 
-VirtualMemory::VirtualMemory(size_t size) {
-  address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
-  size_ = size;
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* address = ReserveRegion(request_size);
+  if (address == NULL) return;
+  Address base = RoundUp(static_cast<Address>(address), alignment);
+  // Try reducing the size by freeing and then reallocating a specific area.
+  bool result = ReleaseRegion(address, request_size);
+  USE(result);
+  ASSERT(result);
+  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+  if (address != NULL) {
+    request_size = size;
+    ASSERT(base == static_cast<Address>(address));
+  } else {
+    // Resizing failed, just go with a bigger area.
+    address = ReserveRegion(request_size);
+    if (address == NULL) return;
+  }
+  address_ = address;
+  size_ = request_size;
 }
 
 
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
-    if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
+    bool result = ReleaseRegion(address_, size_);
+    ASSERT(result);
+    USE(result);
   }
 }
 
 
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
-    return false;
-  }
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
 
-  UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
-  return true;
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  if (CommitRegion(address, size, is_executable)) {
+    UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
+    return true;
+  }
+  return false;
 }
 
 
 bool VirtualMemory::Uncommit(void* address, size_t size) {
   ASSERT(IsReserved());
-  return VirtualFree(address, size, MEM_DECOMMIT) != false;
+  return UncommitRegion(address, size);
 }
 
 
+void* VirtualMemory::ReserveRegion(size_t size) {
+  return VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
+
 // ----------------------------------------------------------------------------
 // Win32 thread support.
 
@@ -1453,6 +1513,7 @@
  public:
   explicit PlatformData(HANDLE thread) : thread_(thread) {}
   HANDLE thread_;
+  unsigned thread_id_;
 };
 
 
@@ -1496,13 +1557,15 @@
                      ThreadEntry,
                      this,
                      0,
-                     NULL));
+                     &data_->thread_id_));
 }
 
 
 // Wait for thread to terminate.
 void Thread::Join() {
-  WaitForSingleObject(data_->thread_, INFINITE);
+  if (data_->thread_id_ != GetCurrentThreadId()) {
+    WaitForSingleObject(data_->thread_, INFINITE);
+  }
 }
 
 
diff --git a/src/platform.h b/src/platform.h
index 034fe34..99deb1b 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -301,23 +301,46 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
 };
 
-
+// Represents and controls an area of reserved memory.
+// Control of the reserved memory can be assigned to another VirtualMemory
+// object by assignment or copy-contructing. This removes the reserved memory
+// from the original object.
 class VirtualMemory {
  public:
+  // Empty VirtualMemory object, controlling no reserved memory.
+  VirtualMemory();
+
   // Reserves virtual memory with size.
   explicit VirtualMemory(size_t size);
+
+  // Reserves virtual memory containing an area of the given size that
+  // is aligned per alignment. This may not be at the position returned
+  // by address().
+  VirtualMemory(size_t size, size_t alignment);
+
+  // Releases the reserved memory, if any, controlled by this VirtualMemory
+  // object.
   ~VirtualMemory();
 
   // Returns whether the memory has been reserved.
   bool IsReserved();
 
+  // Initialize or resets an embedded VirtualMemory object.
+  void Reset();
+
   // Returns the start address of the reserved memory.
+  // If the memory was reserved with an alignment, this address is not
+  // necessarily aligned. The user might need to round it up to a multiple of
+  // the alignment to get the start of the aligned block.
   void* address() {
     ASSERT(IsReserved());
     return address_;
   }
 
-  // Returns the size of the reserved memory.
+  // Returns the size of the reserved memory. The returned value is only
+  // meaningful when IsReserved() returns true.
+  // If the memory was reserved with an alignment, this size may be larger
+  // than the requested size.
   size_t size() { return size_; }
 
   // Commits real memory. Returns whether the operation succeeded.
@@ -326,11 +349,43 @@
   // Uncommit real memory.  Returns whether the operation succeeded.
   bool Uncommit(void* address, size_t size);
 
+  void Release() {
+    ASSERT(IsReserved());
+    // Notice: Order is important here. The VirtualMemory object might live
+    // inside the allocated region.
+    void* address = address_;
+    size_t size = size_;
+    Reset();
+    bool result = ReleaseRegion(address, size);
+    USE(result);
+    ASSERT(result);
+  }
+
+  // Assign control of the reserved region to a different VirtualMemory object.
+  // The old object is no longer functional (IsReserved() returns false).
+  void TakeControl(VirtualMemory* from) {
+    ASSERT(!IsReserved());
+    address_ = from->address_;
+    size_ = from->size_;
+    from->Reset();
+  }
+
+  static void* ReserveRegion(size_t size);
+
+  static bool CommitRegion(void* base, size_t size, bool is_executable);
+
+  static bool UncommitRegion(void* base, size_t size);
+
+  // Must be called with a base pointer that has been returned by ReserveRegion
+  // and the same size it was reserved with.
+  static bool ReleaseRegion(void* base, size_t size);
+
  private:
   void* address_;  // Start address of the virtual memory.
   size_t size_;  // Size of the virtual memory.
 };
 
+
 // ----------------------------------------------------------------------------
 // Thread
 //
diff --git a/src/preparser.cc b/src/preparser.cc
index 47d21ba..c4d2cfe 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -117,7 +117,18 @@
 
 
 PreParser::Statement PreParser::ParseSourceElement(bool* ok) {
+  // (Ecma 262 5th Edition, clause 14):
+  // SourceElement:
+  //    Statement
+  //    FunctionDeclaration
+  //
+  // In harmony mode we allow additionally the following productions
+  // SourceElement:
+  //    LetDeclaration
+
   switch (peek()) {
+    case i::Token::FUNCTION:
+      return ParseFunctionDeclaration(ok);
     case i::Token::LET:
       return ParseVariableStatement(kSourceElement, ok);
     default:
@@ -225,8 +236,19 @@
     case i::Token::TRY:
       return ParseTryStatement(ok);
 
-    case i::Token::FUNCTION:
-      return ParseFunctionDeclaration(ok);
+    case i::Token::FUNCTION: {
+      i::Scanner::Location start_location = scanner_->peek_location();
+      Statement statement = ParseFunctionDeclaration(CHECK_OK);
+      i::Scanner::Location end_location = scanner_->location();
+      if (strict_mode()) {
+        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
+                        "strict_function", NULL);
+        *ok = false;
+        return Statement::Default();
+      } else {
+        return statement;
+      }
+    }
 
     case i::Token::DEBUGGER:
       return ParseDebuggerStatement(ok);
@@ -271,14 +293,10 @@
   //
   Expect(i::Token::LBRACE, CHECK_OK);
   while (peek() != i::Token::RBRACE) {
-    i::Scanner::Location start_location = scanner_->peek_location();
-    Statement statement = ParseSourceElement(CHECK_OK);
-    i::Scanner::Location end_location = scanner_->location();
-    if (strict_mode() && statement.IsFunctionDeclaration()) {
-      ReportMessageAt(start_location.beg_pos, end_location.end_pos,
-                      "strict_function", NULL);
-      *ok = false;
-      return Statement::Default();
+    if (harmony_block_scoping_) {
+      ParseSourceElement(CHECK_OK);
+    } else {
+      ParseStatement(CHECK_OK);
     }
   }
   Expect(i::Token::RBRACE, ok);
@@ -372,18 +390,11 @@
 
   Expression expr = ParseExpression(true, CHECK_OK);
   if (expr.IsRawIdentifier()) {
-    if (peek() == i::Token::COLON &&
-        (!strict_mode() || !expr.AsIdentifier().IsFutureReserved())) {
+    ASSERT(!expr.AsIdentifier().IsFutureReserved());
+    ASSERT(!strict_mode() || !expr.AsIdentifier().IsFutureStrictReserved());
+    if (peek() == i::Token::COLON) {
       Consume(i::Token::COLON);
-      i::Scanner::Location start_location = scanner_->peek_location();
-      Statement statement = ParseStatement(CHECK_OK);
-      if (strict_mode() && statement.IsFunctionDeclaration()) {
-        i::Scanner::Location end_location = scanner_->location();
-        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
-                        "strict_function", NULL);
-        *ok = false;
-      }
-      return Statement::Default();
+      return ParseStatement(ok);
     }
     // Preparsing is disabled for extensions (because the extension details
     // aren't passed to lazily compiled functions), so we don't
@@ -513,15 +524,7 @@
       Expect(i::Token::DEFAULT, CHECK_OK);
       Expect(i::Token::COLON, CHECK_OK);
     } else {
-      i::Scanner::Location start_location = scanner_->peek_location();
-      Statement statement = ParseStatement(CHECK_OK);
-      if (strict_mode() && statement.IsFunctionDeclaration()) {
-        i::Scanner::Location end_location = scanner_->location();
-        ReportMessageAt(start_location.beg_pos, end_location.end_pos,
-                        "strict_function", NULL);
-        *ok = false;
-        return Statement::Default();
-      }
+      ParseStatement(CHECK_OK);
     }
     token = peek();
   }
@@ -1434,9 +1437,16 @@
       ReportMessageAt(location.beg_pos, location.end_pos,
                       "reserved_word", NULL);
       *ok = false;
+      return GetIdentifierSymbol();
     }
-      // FALLTHROUGH
     case i::Token::FUTURE_STRICT_RESERVED_WORD:
+      if (strict_mode()) {
+        i::Scanner::Location location = scanner_->location();
+        ReportMessageAt(location.beg_pos, location.end_pos,
+                        "strict_reserved_word", NULL);
+        *ok = false;
+      }
+      // FALLTHROUGH
     case i::Token::IDENTIFIER:
       return GetIdentifierSymbol();
     default:
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index 663af28..37c76ce 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -372,13 +372,6 @@
 }
 
 
-void PrettyPrinter::VisitCompareToNull(CompareToNull* node) {
-  Print("(");
-  Visit(node->expression());
-  Print("%s null)", Token::String(node->op()));
-}
-
-
 void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
   Print("<this-function>");
 }
@@ -1020,15 +1013,6 @@
 }
 
 
-void AstPrinter::VisitCompareToNull(CompareToNull* node) {
-  const char* name = node->is_strict()
-      ? "COMPARE-TO-NULL-STRICT"
-      : "COMPARE-TO-NULL";
-  IndentedScope indent(this, name, node);
-  Visit(node->expression());
-}
-
-
 void AstPrinter::VisitThisFunction(ThisFunction* node) {
   IndentedScope indent(this, "THIS-FUNCTION");
 }
@@ -1404,16 +1388,6 @@
 }
 
 
-void JsonAstBuilder::VisitCompareToNull(CompareToNull* expr) {
-  TagScope tag(this, "CompareToNull");
-  {
-    AttributesScope attributes(this);
-    AddAttribute("is_strict", expr->is_strict());
-  }
-  Visit(expr->expression());
-}
-
-
 void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
   TagScope tag(this, "ThisFunction");
 }
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index a7384a6..c1052e6 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -1015,6 +1015,11 @@
 }
 
 
+Handle<HeapObject> HeapEntry::GetHeapObject() {
+  return snapshot_->collection()->FindHeapObjectById(id());
+}
+
+
 template<class Visitor>
 void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
   List<HeapEntry*> list(10);
@@ -1375,8 +1380,8 @@
 
 
 void HeapObjectsMap::SnapshotGenerationFinished() {
-    initial_fill_mode_ = false;
-    RemoveDeadEntries();
+  initial_fill_mode_ = false;
+  RemoveDeadEntries();
 }
 
 
@@ -1398,10 +1403,12 @@
   if (entry != NULL) {
     void* value = entry->value;
     entries_map_.Remove(from, AddressHash(from));
-    entry = entries_map_.Lookup(to, AddressHash(to), true);
-    // We can have an entry at the new location, it is OK, as GC can overwrite
-    // dead objects with alive objects being moved.
-    entry->value = value;
+    if (to != NULL) {
+      entry = entries_map_.Lookup(to, AddressHash(to), true);
+      // We can have an entry at the new location, it is OK, as GC can overwrite
+      // dead objects with alive objects being moved.
+      entry->value = value;
+    }
   }
 }
 
@@ -1522,6 +1529,26 @@
 }
 
 
+Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
+  // First perform a full GC in order to avoid dead objects.
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  AssertNoAllocation no_allocation;
+  HeapObject* object = NULL;
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+  // Make sure that object with the given id is still reachable.
+  for (HeapObject* obj = iterator.next();
+       obj != NULL;
+       obj = iterator.next()) {
+    if (ids_.FindObject(obj->address()) == id) {
+      ASSERT(object == NULL);
+      object = obj;
+      // Can't break -- kFilterUnreachable requires full heap traversal.
+    }
+  }
+  return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
+}
+
+
 HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
     reinterpret_cast<HeapEntry*>(1);
 
@@ -1812,12 +1839,13 @@
 }
 
 
-int V8HeapExplorer::EstimateObjectsCount() {
-  HeapIterator iterator(HeapIterator::kFilterUnreachable);
+int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
   int objects_count = 0;
-  for (HeapObject* obj = iterator.next();
+  for (HeapObject* obj = iterator->next();
        obj != NULL;
-       obj = iterator.next(), ++objects_count) {}
+       obj = iterator->next()) {
+    objects_count++;
+  }
   return objects_count;
 }
 
@@ -1945,6 +1973,14 @@
                            "descriptors", map->instance_descriptors(),
                            Map::kInstanceDescriptorsOrBitField3Offset);
     }
+    if (map->prototype_transitions() != heap_->empty_fixed_array()) {
+      TagObject(map->prototype_transitions(), "(prototype transitions)");
+      SetInternalReference(obj,
+                           entry,
+                           "prototype_transitions",
+                           map->prototype_transitions(),
+                           Map::kPrototypeTransitionsOffset);
+    }
     SetInternalReference(obj, entry,
                          "code_cache", map->code_cache(),
                          Map::kCodeCacheOffset);
@@ -2175,9 +2211,11 @@
 
 bool V8HeapExplorer::IterateAndExtractReferences(
     SnapshotFillerInterface* filler) {
-  filler_ = filler;
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
+
+  filler_ = filler;
   bool interrupted = false;
+
   // Heap iteration with filtering must be finished in any case.
   for (HeapObject* obj = iterator.next();
        obj != NULL;
@@ -2743,13 +2781,43 @@
 bool HeapSnapshotGenerator::GenerateSnapshot() {
   v8_heap_explorer_.TagGlobalObjects();
 
+  // TODO(1562) Profiler assumes that any object that is in the heap after
+  // full GC is reachable from the root when computing dominators.
+  // This is not true for weakly reachable objects.
+  // As a temporary solution we call GC twice.
+  Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+
+#ifdef DEBUG
+  Heap* debug_heap = Isolate::Current()->heap();
+  ASSERT(!debug_heap->old_data_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->old_pointer_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->code_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->cell_space()->was_swept_conservatively());
+  ASSERT(!debug_heap->map_space()->was_swept_conservatively());
+#endif
+
+  // The following code uses heap iterators, so we want the heap to be
+  // stable. It should follow TagGlobalObjects as that can allocate.
   AssertNoAllocation no_alloc;
 
+#ifdef DEBUG
+  debug_heap->Verify();
+#endif
+
   SetProgressTotal(4);  // 2 passes + dominators + sizes.
 
+#ifdef DEBUG
+  debug_heap->Verify();
+#endif
+
   // Pass 1. Iterate heap contents to count entries and references.
   if (!CountEntriesAndReferences()) return false;
 
+#ifdef DEBUG
+  debug_heap->Verify();
+#endif
+
   // Allocate and fill entries in the snapshot, allocate references.
   snapshot_->AllocateEntries(entries_.entries_count(),
                              entries_.total_children_count(),
@@ -2787,8 +2855,9 @@
 
 void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
   if (control_ == NULL) return;
+  HeapIterator iterator(HeapIterator::kFilterUnreachable);
   progress_total_ = (
-      v8_heap_explorer_.EstimateObjectsCount() +
+      v8_heap_explorer_.EstimateObjectsCount(&iterator) +
       dom_explorer_.EstimateObjectsCount()) * iterations_count;
   progress_counter_ = 0;
 }
@@ -2838,7 +2907,7 @@
       nodes_to_visit.RemoveLast();
     }
   }
-  entries->Truncate(current_entry);
+  ASSERT_EQ(current_entry, entries->length());
 }
 
 
diff --git a/src/profile-generator.h b/src/profile-generator.h
index f3737ea..a3b0f1a 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -550,7 +550,10 @@
   Vector<HeapGraphEdge*> retainers() {
     return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
   HeapEntry* dominator() { return dominator_; }
-  void set_dominator(HeapEntry* entry) { dominator_ = entry; }
+  void set_dominator(HeapEntry* entry) {
+    ASSERT(entry != NULL);
+    dominator_ = entry;
+  }
 
   void clear_paint() { painted_ = kUnpainted; }
   bool painted_reachable() { return painted_ == kPainted; }
@@ -585,6 +588,8 @@
 
   void Print(int max_depth, int indent);
 
+  Handle<HeapObject> GetHeapObject();
+
   static int EntriesSize(int entries_count,
                          int children_count,
                          int retainers_count);
@@ -763,6 +768,7 @@
   TokenEnumerator* token_enumerator() { return token_enumerator_; }
 
   uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); }
+  Handle<HeapObject> FindHeapObjectById(uint64_t id);
   void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
 
  private:
@@ -917,7 +923,7 @@
   virtual HeapEntry* AllocateEntry(
       HeapThing ptr, int children_count, int retainers_count);
   void AddRootEntries(SnapshotFillerInterface* filler);
-  int EstimateObjectsCount();
+  int EstimateObjectsCount(HeapIterator* iterator);
   bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
   void TagGlobalObjects();
 
diff --git a/src/property.h b/src/property.h
index e7d9fc5..ee2e8c8 100644
--- a/src/property.h
+++ b/src/property.h
@@ -115,11 +115,9 @@
 class ElementsTransitionDescriptor: public Descriptor {
  public:
   ElementsTransitionDescriptor(String* key,
-                               Map* map,
-                               ElementsKind elements_kind)
-      : Descriptor(key, map, PropertyDetails(NONE,
-                                             ELEMENTS_TRANSITION,
-                                             elements_kind)) { }
+                               Object* map_or_array)
+      : Descriptor(key, map_or_array, PropertyDetails(NONE,
+                                                      ELEMENTS_TRANSITION)) { }
 };
 
 // Marks a field name in a map so that adding the field is guaranteed
@@ -202,9 +200,9 @@
     number_ = entry;
   }
 
-  void HandlerResult() {
+  void HandlerResult(JSProxy* proxy) {
     lookup_type_ = HANDLER_TYPE;
-    holder_ = NULL;
+    holder_ = proxy;
     details_ = PropertyDetails(NONE, HANDLER);
     cacheable_ = false;
   }
@@ -221,7 +219,12 @@
 
   JSObject* holder() {
     ASSERT(IsFound());
-    return holder_;
+    return JSObject::cast(holder_);
+  }
+
+  JSProxy* proxy() {
+    ASSERT(IsFound());
+    return JSProxy::cast(holder_);
   }
 
   PropertyType type() {
@@ -354,7 +357,7 @@
     CONSTANT_TYPE
   } lookup_type_;
 
-  JSObject* holder_;
+  JSReceiver* holder_;
   int number_;
   bool cacheable_;
   PropertyDetails details_;
diff --git a/src/proxy.js b/src/proxy.js
index 4e44cd4..a51f09a 100644
--- a/src/proxy.js
+++ b/src/proxy.js
@@ -41,14 +41,20 @@
     throw MakeTypeError("handler_non_object", ["create"])
   if (!IS_SPEC_FUNCTION(callTrap))
     throw MakeTypeError("trap_function_expected", ["createFunction", "call"])
+  var construct
   if (IS_UNDEFINED(constructTrap)) {
-    constructTrap = callTrap
-  } else if (!IS_SPEC_FUNCTION(constructTrap)) {
+    construct = DerivedConstructTrap(callTrap)
+  } else if (IS_SPEC_FUNCTION(constructTrap)) {
+    construct = function() {
+      // Make sure the trap receives 'undefined' as this.
+      return %Apply(constructTrap, void 0, arguments, 0, %_ArgumentsLength());
+    }
+  } else {
     throw MakeTypeError("trap_function_expected",
                         ["createFunction", "construct"])
   }
   return %CreateJSFunctionProxy(
-    handler, callTrap, constructTrap, $Function.prototype)
+    handler, callTrap, construct, $Function.prototype)
 }
 
 
@@ -57,6 +63,17 @@
 // Builtins
 ////////////////////////////////////////////////////////////////////////////////
 
+function DerivedConstructTrap(callTrap) {
+  return function() {
+    var proto = this.prototype
+    if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype
+    var obj = new $Object()
+    obj.__proto__ = proto
+    var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength());
+    return IS_SPEC_OBJECT(result) ? result : obj
+  }
+}
+
 function DelegateCallAndConstruct(callTrap, constructTrap) {
   return function() {
     return %Apply(%_IsConstructCall() ? constructTrap : callTrap,
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index b32d71d..f843278 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -37,8 +37,8 @@
     RegExpMacroAssembler* assembler) :
   assembler_(assembler) {
   unsigned int type = assembler->Implementation();
-  ASSERT(type < 4);
-  const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"};
+  ASSERT(type < 5);
+  const char* impl_names[] = {"IA32", "ARM", "MIPS", "X64", "Bytecode"};
   PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
 }
 
diff --git a/src/regexp.js b/src/regexp.js
index 38d4496..0ab86f3 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -95,12 +95,11 @@
   }
 }
 
-
 // Deprecated RegExp.prototype.compile method.  We behave like the constructor
 // were called again.  In SpiderMonkey, this method returns the regexp object.
 // In JSC, it returns undefined.  For compatibility with JSC, we match their
 // behavior.
-function CompileRegExp(pattern, flags) {
+function RegExpCompile(pattern, flags) {
   // Both JSC and SpiderMonkey treat a missing pattern argument as the
   // empty subject string, and an actual undefined value passed as the
   // pattern as the string 'undefined'.  Note that JSC is inconsistent
@@ -108,6 +107,11 @@
   // RegExp.prototype.compile and in the constructor, where they are
   // the empty string.  For compatibility with JSC, we match their
   // behavior.
+  if (this == $RegExp.prototype) {
+    // We don't allow recompiling RegExp.prototype.
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['RegExp.prototype.compile', this]);
+  }
   if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) {
     DoConstructRegExp(this, 'undefined', flags);
   } else {
@@ -408,7 +412,6 @@
 function SetUpRegExp() {
   %CheckIsBootstrapping();
   %FunctionSetInstanceClassName($RegExp, 'RegExp');
-  %FunctionSetPrototype($RegExp, new $Object());
   %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
   %SetCode($RegExp, RegExpConstructor);
 
@@ -416,7 +419,7 @@
     "exec", RegExpExec,
     "test", RegExpTest,
     "toString", RegExpToString,
-    "compile", CompileRegExp
+    "compile", RegExpCompile
   ));
 
   // The length of compile is 1 in SpiderMonkey.
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 26d8846..520dd39 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -35,6 +35,7 @@
 #include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
+#include "isolate-inl.h"
 #include "mark-compact.h"
 #include "platform.h"
 #include "scopeinfo.h"
@@ -338,7 +339,8 @@
 void RuntimeProfiler::RemoveDeadSamples() {
   for (int i = 0; i < kSamplerWindowSize; i++) {
     Object* function = sampler_window_[i];
-    if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
+    if (function != NULL &&
+        !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
       sampler_window_[i] = NULL;
     }
   }
diff --git a/src/runtime.cc b/src/runtime.cc
index 3ea9304..1f52a22 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -42,6 +42,7 @@
 #include "deoptimizer.h"
 #include "execution.h"
 #include "global-handles.h"
+#include "isolate-inl.h"
 #include "jsregexp.h"
 #include "json-parser.h"
 #include "liveedit.h"
@@ -177,6 +178,7 @@
   // Pixel elements cannot be created using an object literal.
   ASSERT(!copy->HasExternalArrayElements());
   switch (copy->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(copy->elements());
       if (elements->map() == heap->fixed_cow_array_map()) {
@@ -189,6 +191,9 @@
       } else {
         for (int i = 0; i < elements->length(); i++) {
           Object* value = elements->get(i);
+          ASSERT(value->IsSmi() ||
+                 value->IsTheHole() ||
+                 (copy->GetElementsKind() == FAST_ELEMENTS));
           if (value->IsJSObject()) {
             JSObject* js_object = JSObject::cast(value);
             { MaybeObject* maybe_result = DeepCopyBoilerplate(isolate,
@@ -432,16 +437,28 @@
       is_cow ? elements : isolate->factory()->CopyFixedArray(elements);
 
   Handle<FixedArray> content = Handle<FixedArray>::cast(copied_elements);
+  bool has_non_smi = false;
   if (is_cow) {
-#ifdef DEBUG
     // Copy-on-write arrays must be shallow (and simple).
-    for (int i = 0; i < content->length(); i++) {
-      ASSERT(!content->get(i)->IsFixedArray());
-    }
+    if (FLAG_smi_only_arrays) {
+      for (int i = 0; i < content->length(); i++) {
+        Object* current = content->get(i);
+        ASSERT(!current->IsFixedArray());
+        if (!current->IsSmi() && !current->IsTheHole()) {
+          has_non_smi = true;
+        }
+      }
+    } else {
+#if DEBUG
+      for (int i = 0; i < content->length(); i++) {
+        ASSERT(!content->get(i)->IsFixedArray());
+      }
 #endif
+    }
   } else {
     for (int i = 0; i < content->length(); i++) {
-      if (content->get(i)->IsFixedArray()) {
+      Object* current = content->get(i);
+      if (current->IsFixedArray()) {
         // The value contains the constant_properties of a
         // simple object or array literal.
         Handle<FixedArray> fa(FixedArray::cast(content->get(i)));
@@ -449,12 +466,25 @@
             CreateLiteralBoilerplate(isolate, literals, fa);
         if (result.is_null()) return result;
         content->set(i, *result);
+        has_non_smi = true;
+      } else {
+        if (!current->IsSmi() && !current->IsTheHole()) {
+          has_non_smi = true;
+        }
       }
     }
   }
 
   // Set the elements.
-  Handle<JSArray>::cast(object)->SetContent(*content);
+  Handle<JSArray> js_object(Handle<JSArray>::cast(object));
+  isolate->factory()->SetContent(js_object, content);
+
+  if (FLAG_smi_only_arrays) {
+    if (has_non_smi && js_object->HasFastSmiOnlyElements()) {
+      isolate->factory()->EnsureCanContainNonSmiElements(js_object);
+    }
+  }
+
   return object;
 }
 
@@ -685,10 +715,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
-  // TODO(mstarzinger): Currently we cannot use JSProxy objects as keys
-  // because they cannot be cast to JSObject to get an identity hash code.
-  CONVERT_ARG_CHECKED(JSObject, key, 1);
-  return weakmap->table()->Lookup(*key);
+  CONVERT_ARG_CHECKED(JSReceiver, key, 1);
+  return ObjectHashTable::cast(weakmap->table())->Lookup(*key);
 }
 
 
@@ -696,10 +724,9 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 3);
   CONVERT_ARG_CHECKED(JSWeakMap, weakmap, 0);
-  // TODO(mstarzinger): See Runtime_WeakMapGet above.
-  CONVERT_ARG_CHECKED(JSObject, key, 1);
+  CONVERT_ARG_CHECKED(JSReceiver, key, 1);
   Handle<Object> value(args[2]);
-  Handle<ObjectHashTable> table(weakmap->table());
+  Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
   Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
   weakmap->set_table(*new_table);
   return *value;
@@ -1211,46 +1238,17 @@
       LookupResult lookup;
       global->Lookup(*name, &lookup);
       if (lookup.IsProperty()) {
-        // Determine if the property is local by comparing the holder
-        // against the global object. The information will be used to
-        // avoid throwing re-declaration errors when declaring
-        // variables or constants that exist in the prototype chain.
-        bool is_local = (*global == lookup.holder());
-        // Get the property attributes and determine if the property is
-        // read-only.
-        PropertyAttributes attributes = global->GetPropertyAttribute(*name);
-        bool is_read_only = (attributes & READ_ONLY) != 0;
-        if (lookup.type() == INTERCEPTOR) {
-          // If the interceptor says the property is there, we
-          // just return undefined without overwriting the property.
-          // Otherwise, we continue to setting the property.
-          if (attributes != ABSENT) {
-            // Check if the existing property conflicts with regards to const.
-            if (is_local && (is_read_only || is_const_property)) {
-              const char* type = (is_read_only) ? "const" : "var";
-              return ThrowRedeclarationError(isolate, type, name);
-            };
-            // The property already exists without conflicting: Go to
-            // the next declaration.
-            continue;
-          }
-          // Fall-through and introduce the absent property by using
-          // SetProperty.
-        } else {
-          // For const properties, we treat a callback with this name
-          // even in the prototype as a conflicting declaration.
-          if (is_const_property && (lookup.type() == CALLBACKS)) {
-            return ThrowRedeclarationError(isolate, "const", name);
-          }
-          // Otherwise, we check for locally conflicting declarations.
-          if (is_local && (is_read_only || is_const_property)) {
-            const char* type = (is_read_only) ? "const" : "var";
-            return ThrowRedeclarationError(isolate, type, name);
-          }
-          // The property already exists without conflicting: Go to
-          // the next declaration.
+        // We found an existing property. Unless it was an interceptor
+        // that claims the property is absent, skip this declaration.
+        if (lookup.type() != INTERCEPTOR) {
           continue;
         }
+        PropertyAttributes attributes = global->GetPropertyAttribute(*name);
+        if (attributes != ABSENT) {
+          continue;
+        }
+        // Fall-through and introduce the absent property by using
+        // SetProperty.
       }
     } else {
       is_function_declaration = true;
@@ -1267,20 +1265,6 @@
     LookupResult lookup;
     global->LocalLookup(*name, &lookup);
 
-    // There's a local property that we need to overwrite because
-    // we're either declaring a function or there's an interceptor
-    // that claims the property is absent.
-    //
-    // Check for conflicting re-declarations. We cannot have
-    // conflicting types in case of intercepted properties because
-    // they are absent.
-    if (lookup.IsProperty() &&
-        (lookup.type() != INTERCEPTOR) &&
-        (lookup.IsReadOnly() || is_const_property)) {
-      const char* type = (lookup.IsReadOnly()) ? "const" : "var";
-      return ThrowRedeclarationError(isolate, type, name);
-    }
-
     // Compute the property attributes. According to ECMA-262, section
     // 13, page 71, the property must be read-only and
     // non-deletable. However, neither SpiderMonkey nor KJS creates the
@@ -1335,15 +1319,17 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
 
-  CONVERT_ARG_CHECKED(Context, context, 0);
+  // Declarations are always made in a function or global context.  In the
+  // case of eval code, the context passed is the context of the caller,
+  // which may be some nested context and not the declaration context.
+  RUNTIME_ASSERT(args[0]->IsContext());
+  Handle<Context> context(Context::cast(args[0])->declaration_context());
+
   Handle<String> name(String::cast(args[1]));
   PropertyAttributes mode = static_cast<PropertyAttributes>(args.smi_at(2));
   RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
   Handle<Object> initial_value(args[3], isolate);
 
-  // Declarations are always done in a function or global context.
-  context = Handle<Context>(context->declaration_context());
-
   int index;
   PropertyAttributes attributes;
   ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
@@ -1352,9 +1338,7 @@
       context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
   if (attributes != ABSENT) {
-    // The name was declared before; check for conflicting
-    // re-declarations: This is similar to the code in parser.cc in
-    // the AstBuildingParser::Declare function.
+    // The name was declared before; check for conflicting re-declarations.
     if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
       // Functions are not read-only.
       ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
@@ -1365,53 +1349,41 @@
     // Initialize it if necessary.
     if (*initial_value != NULL) {
       if (index >= 0) {
-        // The variable or constant context slot should always be in
-        // the function context or the arguments object.
-        if (holder->IsContext()) {
-          ASSERT(holder.is_identical_to(context));
-          if (((attributes & READ_ONLY) == 0) ||
-              context->get(index)->IsTheHole()) {
-            context->set(index, *initial_value);
-          }
-        } else {
-          // The holder is an arguments object.
-          Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
-          Handle<Object> result = SetElement(arguments, index, initial_value,
-                                             kNonStrictMode);
-          if (result.is_null()) return Failure::Exception();
+        ASSERT(holder.is_identical_to(context));
+        if (((attributes & READ_ONLY) == 0) ||
+            context->get(index)->IsTheHole()) {
+          context->set(index, *initial_value);
         }
       } else {
-        // Slow case: The property is not in the FixedArray part of the context.
-        Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+        // Slow case: The property is in the context extension object of a
+        // function context or the global object of a global context.
+        Handle<JSObject> object = Handle<JSObject>::cast(holder);
         RETURN_IF_EMPTY_HANDLE(
             isolate,
-            SetProperty(context_ext, name, initial_value,
-                        mode, kNonStrictMode));
+            SetProperty(object, name, initial_value, mode, kNonStrictMode));
       }
     }
 
   } else {
     // The property is not in the function context. It needs to be
-    // "declared" in the function context's extension context, or in the
-    // global context.
-    Handle<JSObject> context_ext;
+    // "declared" in the function context's extension context or as a
+    // property of the the global object.
+    Handle<JSObject> object;
     if (context->has_extension()) {
-      // The function context's extension context exists - use it.
-      context_ext = Handle<JSObject>(JSObject::cast(context->extension()));
+      object = Handle<JSObject>(JSObject::cast(context->extension()));
     } else {
-      // The function context's extension context does not exists - allocate
-      // it.
-      context_ext = isolate->factory()->NewJSObject(
+      // Context extension objects are allocated lazily.
+      ASSERT(context->IsFunctionContext());
+      object = isolate->factory()->NewJSObject(
           isolate->context_extension_function());
-      // And store it in the extension slot.
-      context->set_extension(*context_ext);
+      context->set_extension(*object);
     }
-    ASSERT(*context_ext != NULL);
+    ASSERT(*object != NULL);
 
     // Declare the property by setting it to the initial value if provided,
     // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
     // constant declarations).
-    ASSERT(!context_ext->HasLocalProperty(*name));
+    ASSERT(!object->HasLocalProperty(*name));
     Handle<Object> value(isolate->heap()->undefined_value(), isolate);
     if (*initial_value != NULL) value = initial_value;
     // Declaring a const context slot is a conflicting declaration if
@@ -1421,15 +1393,15 @@
     // SetProperty and no setters are invoked for those since they are
     // not real JSObjects.
     if (initial_value->IsTheHole() &&
-        !context_ext->IsJSContextExtensionObject()) {
+        !object->IsJSContextExtensionObject()) {
       LookupResult lookup;
-      context_ext->Lookup(*name, &lookup);
+      object->Lookup(*name, &lookup);
       if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
         return ThrowRedeclarationError(isolate, "const", name);
       }
     }
     RETURN_IF_EMPTY_HANDLE(isolate,
-                           SetProperty(context_ext, name, value, mode,
+                           SetProperty(object, name, value, mode,
                                        kNonStrictMode));
   }
 
@@ -1465,64 +1437,32 @@
   // to assign to the property.
   // Note that objects can have hidden prototypes, so we need to traverse
   // the whole chain of hidden prototypes to do a 'local' lookup.
-  JSObject* real_holder = global;
+  Object* object = global;
   LookupResult lookup;
-  while (true) {
-    real_holder->LocalLookup(*name, &lookup);
-    if (lookup.IsProperty()) {
-      // Determine if this is a redeclaration of something read-only.
-      if (lookup.IsReadOnly()) {
-        // If we found readonly property on one of hidden prototypes,
-        // just shadow it.
-        if (real_holder != isolate->context()->global()) break;
-        return ThrowRedeclarationError(isolate, "const", name);
-      }
-
-      // Determine if this is a redeclaration of an intercepted read-only
-      // property and figure out if the property exists at all.
-      bool found = true;
-      PropertyType type = lookup.type();
-      if (type == INTERCEPTOR) {
-        HandleScope handle_scope(isolate);
-        Handle<JSObject> holder(real_holder);
-        PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
-        real_holder = *holder;
-        if (intercepted == ABSENT) {
-          // The interceptor claims the property isn't there. We need to
-          // make sure to introduce it.
-          found = false;
-        } else if ((intercepted & READ_ONLY) != 0) {
-          // The property is present, but read-only. Since we're trying to
-          // overwrite it with a variable declaration we must throw a
-          // re-declaration error.  However if we found readonly property
-          // on one of hidden prototypes, just shadow it.
-          if (real_holder != isolate->context()->global()) break;
-          return ThrowRedeclarationError(isolate, "const", name);
+  while (object->IsJSObject() &&
+         JSObject::cast(object)->map()->is_hidden_prototype()) {
+    JSObject* raw_holder = JSObject::cast(object);
+    raw_holder->LocalLookup(*name, &lookup);
+    if (lookup.IsProperty() && lookup.type() == INTERCEPTOR) {
+      HandleScope handle_scope(isolate);
+      Handle<JSObject> holder(raw_holder);
+      PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
+      // Update the raw pointer in case it's changed due to GC.
+      raw_holder = *holder;
+      if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+        // Found an interceptor that's not read only.
+        if (assign) {
+          return raw_holder->SetProperty(
+              &lookup, *name, args[2], attributes, strict_mode);
+        } else {
+          return isolate->heap()->undefined_value();
         }
       }
-
-      if (found && !assign) {
-        // The global property is there and we're not assigning any value
-        // to it. Just return.
-        return isolate->heap()->undefined_value();
-      }
-
-      // Assign the value (or undefined) to the property.
-      Object* value = (assign) ? args[2] : isolate->heap()->undefined_value();
-      return real_holder->SetProperty(
-          &lookup, *name, value, attributes, strict_mode);
     }
-
-    Object* proto = real_holder->GetPrototype();
-    if (!proto->IsJSObject())
-      break;
-
-    if (!JSObject::cast(proto)->map()->is_hidden_prototype())
-      break;
-
-    real_holder = JSObject::cast(proto);
+    object = raw_holder->GetPrototype();
   }
 
+  // Reload global in case the loop above performed a GC.
   global = isolate->context()->global();
   if (assign) {
     return global->SetProperty(*name, args[2], attributes, strict_mode);
@@ -1560,25 +1500,9 @@
                                                     attributes);
   }
 
-  // Determine if this is a redeclaration of something not
-  // read-only. In case the result is hidden behind an interceptor we
-  // need to ask it for the property attributes.
   if (!lookup.IsReadOnly()) {
-    if (lookup.type() != INTERCEPTOR) {
-      return ThrowRedeclarationError(isolate, "var", name);
-    }
-
-    PropertyAttributes intercepted = global->GetPropertyAttribute(*name);
-
-    // Throw re-declaration error if the intercepted property is present
-    // but not read-only.
-    if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
-      return ThrowRedeclarationError(isolate, "var", name);
-    }
-
     // Restore global object from context (in case of GC) and continue
-    // with setting the value because the property is either absent or
-    // read-only. We also have to do redo the lookup.
+    // with setting the value.
     HandleScope handle_scope(isolate);
     Handle<GlobalObject> global(isolate->context()->global());
 
@@ -1595,19 +1519,20 @@
     return *value;
   }
 
-  // Set the value, but only we're assigning the initial value to a
+  // Set the value, but only if we're assigning the initial value to a
   // constant. For now, we determine this by checking if the
   // current value is the hole.
-  // Strict mode handling not needed (const disallowed in strict mode).
+  // Strict mode handling not needed (const is disallowed in strict mode).
   PropertyType type = lookup.type();
   if (type == FIELD) {
     FixedArray* properties = global->properties();
     int index = lookup.GetFieldIndex();
-    if (properties->get(index)->IsTheHole()) {
+    if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
       properties->set(index, *value);
     }
   } else if (type == NORMAL) {
-    if (global->GetNormalizedProperty(&lookup)->IsTheHole()) {
+    if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
+        !lookup.IsReadOnly()) {
       global->SetNormalizedProperty(&lookup, *value);
     }
   } else {
@@ -1627,11 +1552,12 @@
 
   Handle<Object> value(args[0], isolate);
   ASSERT(!value->IsTheHole());
-  CONVERT_ARG_CHECKED(Context, context, 1);
-  Handle<String> name(String::cast(args[2]));
 
   // Initializations are always done in a function or global context.
-  context = Handle<Context>(context->declaration_context());
+  RUNTIME_ASSERT(args[1]->IsContext());
+  Handle<Context> context(Context::cast(args[1])->declaration_context());
+
+  Handle<String> name(String::cast(args[2]));
 
   int index;
   PropertyAttributes attributes;
@@ -1640,39 +1566,19 @@
   Handle<Object> holder =
       context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
-  // In most situations, the property introduced by the const
-  // declaration should be present in the context extension object.
-  // However, because declaration and initialization are separate, the
-  // property might have been deleted (if it was introduced by eval)
-  // before we reach the initialization point.
-  //
-  // Example:
-  //
-  //    function f() { eval("delete x; const x;"); }
-  //
-  // In that case, the initialization behaves like a normal assignment
-  // to property 'x'.
   if (index >= 0) {
-    if (holder->IsContext()) {
-      // Property was found in a context.  Perform the assignment if we
-      // found some non-constant or an uninitialized constant.
-      Handle<Context> context = Handle<Context>::cast(holder);
-      if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
-        context->set(index, *value);
-      }
-    } else {
-      // The holder is an arguments object.
-      ASSERT((attributes & READ_ONLY) == 0);
-      Handle<JSObject> arguments(Handle<JSObject>::cast(holder));
-      RETURN_IF_EMPTY_HANDLE(
-          isolate,
-          SetElement(arguments, index, value, kNonStrictMode));
+    ASSERT(holder->IsContext());
+    // Property was found in a context.  Perform the assignment if we
+    // found some non-constant or an uninitialized constant.
+    Handle<Context> context = Handle<Context>::cast(holder);
+    if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
+      context->set(index, *value);
     }
     return *value;
   }
 
-  // The property could not be found, we introduce it in the global
-  // context.
+  // The property could not be found, we introduce it as a property of the
+  // global object.
   if (attributes == ABSENT) {
     Handle<JSObject> global = Handle<JSObject>(
         isolate->context()->global());
@@ -1683,29 +1589,41 @@
     return *value;
   }
 
-  // The property was present in a context extension object.
-  Handle<JSObject> context_ext = Handle<JSObject>::cast(holder);
+  // The property was present in some function's context extension object,
+  // as a property on the subject of a with, or as a property of the global
+  // object.
+  //
+  // In most situations, eval-introduced consts should still be present in
+  // the context extension object.  However, because declaration and
+  // initialization are separate, the property might have been deleted
+  // before we reach the initialization point.
+  //
+  // Example:
+  //
+  //    function f() { eval("delete x; const x;"); }
+  //
+  // In that case, the initialization behaves like a normal assignment.
+  Handle<JSObject> object = Handle<JSObject>::cast(holder);
 
-  if (*context_ext == context->extension()) {
-    // This is the property that was introduced by the const
-    // declaration.  Set it if it hasn't been set before.  NOTE: We
-    // cannot use GetProperty() to get the current value as it
-    // 'unholes' the value.
+  if (*object == context->extension()) {
+    // This is the property that was introduced by the const declaration.
+    // Set it if it hasn't been set before.  NOTE: We cannot use
+    // GetProperty() to get the current value as it 'unholes' the value.
     LookupResult lookup;
-    context_ext->LocalLookupRealNamedProperty(*name, &lookup);
+    object->LocalLookupRealNamedProperty(*name, &lookup);
     ASSERT(lookup.IsProperty());  // the property was declared
     ASSERT(lookup.IsReadOnly());  // and it was declared as read-only
 
     PropertyType type = lookup.type();
     if (type == FIELD) {
-      FixedArray* properties = context_ext->properties();
+      FixedArray* properties = object->properties();
       int index = lookup.GetFieldIndex();
       if (properties->get(index)->IsTheHole()) {
         properties->set(index, *value);
       }
     } else if (type == NORMAL) {
-      if (context_ext->GetNormalizedProperty(&lookup)->IsTheHole()) {
-        context_ext->SetNormalizedProperty(&lookup, *value);
+      if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
+        object->SetNormalizedProperty(&lookup, *value);
       }
     } else {
       // We should not reach here. Any real, named property should be
@@ -1713,13 +1631,13 @@
       UNREACHABLE();
     }
   } else {
-    // The property was found in a different context extension object.
-    // Set it if it is not a read-only property.
+    // The property was found on some other object.  Set it if it is not a
+    // read-only property.
     if ((attributes & READ_ONLY) == 0) {
       // Strict mode not needed (const disallowed in strict mode).
       RETURN_IF_EMPTY_HANDLE(
           isolate,
-          SetProperty(context_ext, name, value, attributes, kNonStrictMode));
+          SetProperty(object, name, value, attributes, kNonStrictMode));
     }
   }
 
@@ -1740,6 +1658,19 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NonSmiElementStored) {
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSObject, object, 0);
+  if (FLAG_smi_only_arrays && object->HasFastSmiOnlyElements()) {
+    MaybeObject* maybe_map = object->GetElementsTransitionMap(FAST_ELEMENTS);
+    Map* map;
+    if (!maybe_map->To<Map>(&map)) return maybe_map;
+    object->set_map(Map::cast(map));
+  }
+  return *object;
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
@@ -1825,7 +1756,7 @@
     regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline);
     regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
                                   Smi::FromInt(0),
-                                  SKIP_WRITE_BARRIER);
+                                  SKIP_WRITE_BARRIER);  // It's a Smi.
     return regexp;
   }
 
@@ -2239,9 +2170,7 @@
       literals->set(JSFunction::kLiteralGlobalContextIndex,
                     context->global_context());
     }
-    // It's okay to skip the write barrier here because the literals
-    // are guaranteed to be in old space.
-    target->set_literals(*literals, SKIP_WRITE_BARRIER);
+    target->set_literals(*literals);
     target->set_next_function_link(isolate->heap()->undefined_value());
 
     if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) {
@@ -2325,7 +2254,8 @@
  public:
   explicit FixedArrayBuilder(Isolate* isolate, int initial_capacity)
       : array_(isolate->factory()->NewFixedArrayWithHoles(initial_capacity)),
-        length_(0) {
+        length_(0),
+        has_non_smi_elements_(false) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
     ASSERT(initial_capacity > 0);
@@ -2333,7 +2263,8 @@
 
   explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
       : array_(backing_store),
-        length_(0) {
+        length_(0),
+        has_non_smi_elements_(false) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
     ASSERT(backing_store->length() > 0);
@@ -2361,12 +2292,15 @@
   }
 
   void Add(Object* value) {
+    ASSERT(!value->IsSmi());
     ASSERT(length_ < capacity());
     array_->set(length_, value);
     length_++;
+    has_non_smi_elements_ = true;
   }
 
   void Add(Smi* value) {
+    ASSERT(value->IsSmi());
     ASSERT(length_ < capacity());
     array_->set(length_, value);
     length_++;
@@ -2391,7 +2325,7 @@
   }
 
   Handle<JSArray> ToJSArray(Handle<JSArray> target_array) {
-    target_array->set_elements(*array_);
+    FACTORY->SetContent(target_array, array_);
     target_array->set_length(Smi::FromInt(length_));
     return target_array;
   }
@@ -2399,6 +2333,7 @@
  private:
   Handle<FixedArray> array_;
   int length_;
+  bool has_non_smi_elements_;
 };
 
 
@@ -2893,7 +2828,7 @@
       }
     } else {
       Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
-      if (pattern->IsAsciiRepresentation()) {
+      if (pattern_content.IsAscii()) {
         FindStringIndices(isolate,
                           subject_vector,
                           pattern_content.ToAsciiVector(),
@@ -3019,7 +2954,7 @@
 
   // Shortcut for simple non-regexp global replacements
   if (is_global &&
-      regexp->TypeTag() == JSRegExp::ATOM &&
+      regexp_handle->TypeTag() == JSRegExp::ATOM &&
       compiled_replacement.simple_hint()) {
     if (subject_handle->HasOnlyAsciiChars() &&
         replacement_handle->HasOnlyAsciiChars()) {
@@ -3242,6 +3177,9 @@
 
   Address end_of_string = answer->address() + string_size;
   isolate->heap()->CreateFillerObjectAt(end_of_string, delta);
+  if (Marking::IsBlack(Marking::MarkBitFrom(*answer))) {
+    MemoryChunk::IncrementLiveBytes(answer->address(), -delta);
+  }
 
   return *answer;
 }
@@ -4001,13 +3939,13 @@
   // Slow case.
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   char* str = DoubleToRadixCString(value, radix);
   MaybeObject* result =
@@ -4023,13 +3961,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4048,13 +3986,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4073,13 +4011,13 @@
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   if (isnan(value)) {
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("NaN"));
+    return *isolate->factory()->nan_symbol();
   }
   if (isinf(value)) {
     if (value < 0) {
-      return isolate->heap()->AllocateStringFromAscii(CStrVector("-Infinity"));
+      return *isolate->factory()->minus_infinity_symbol();
     }
-    return isolate->heap()->AllocateStringFromAscii(CStrVector("Infinity"));
+    return *isolate->factory()->infinity_symbol();
   }
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
   int f = FastD2I(f_number);
@@ -4269,7 +4207,7 @@
   CONVERT_CHECKED(String, name, args[1]);
   CONVERT_CHECKED(Smi, flag_setter, args[2]);
   Object* fun = args[3];
-  RUNTIME_ASSERT(fun->IsJSFunction() || fun->IsUndefined());
+  RUNTIME_ASSERT(fun->IsSpecFunction() || fun->IsUndefined());
   CONVERT_CHECKED(Smi, flag_attr, args[4]);
   int unchecked = flag_attr->value();
   RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
@@ -4437,6 +4375,14 @@
     return isolate->Throw(*error);
   }
 
+  if (object->IsJSProxy()) {
+    bool has_pending_exception = false;
+    Handle<Object> name = Execution::ToString(key, &has_pending_exception);
+    if (has_pending_exception) return Failure::Exception();
+    return JSProxy::cast(*object)->SetProperty(
+        String::cast(*name), *value, attr, strict_mode);
+  }
+
   // If the object isn't a JavaScript object, we ignore the store.
   if (!object->IsJSObject()) return *value;
 
@@ -4556,7 +4502,7 @@
 
   // Check if the given key is an array index.
   uint32_t index;
-  if (receiver->IsJSObject() && key->ToArrayIndex(&index)) {
+  if (key->ToArrayIndex(&index)) {
     // In Firefox/SpiderMonkey, Safari and Opera you can access the
     // characters of a string using [] notation.  In the case of a
     // String object we just need to redirect the deletion to the
@@ -4567,8 +4513,7 @@
       return isolate->heap()->true_value();
     }
 
-    return JSObject::cast(*receiver)->DeleteElement(
-        index, JSReceiver::FORCE_DELETION);
+    return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION);
   }
 
   Handle<String> key_string;
@@ -4730,29 +4675,24 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSReceiver, receiver, args[0]);
+  CONVERT_CHECKED(String, key, args[1]);
 
-  // Only JS receivers can have properties.
-  if (args[0]->IsJSReceiver()) {
-    JSReceiver* receiver = JSReceiver::cast(args[0]);
-    CONVERT_CHECKED(String, key, args[1]);
-    if (receiver->HasProperty(key)) return isolate->heap()->true_value();
-  }
-  return isolate->heap()->false_value();
+  bool result = receiver->HasProperty(key);
+  if (isolate->has_pending_exception()) return Failure::Exception();
+  return isolate->heap()->ToBoolean(result);
 }
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
   NoHandleAllocation na;
   ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSReceiver, receiver, args[0]);
+  CONVERT_CHECKED(Smi, index, args[1]);
 
-  // Only JS objects can have elements.
-  if (args[0]->IsJSObject()) {
-    JSObject* object = JSObject::cast(args[0]);
-    CONVERT_CHECKED(Smi, index_obj, args[1]);
-    uint32_t index = index_obj->value();
-    if (object->HasElement(index)) return isolate->heap()->true_value();
-  }
-  return isolate->heap()->false_value();
+  bool result = receiver->HasElement(index->value());
+  if (isolate->has_pending_exception()) return Failure::Exception();
+  return isolate->heap()->ToBoolean(result);
 }
 
 
@@ -4765,7 +4705,37 @@
 
   uint32_t index;
   if (key->AsArrayIndex(&index)) {
-    return isolate->heap()->ToBoolean(object->HasElement(index));
+    JSObject::LocalElementType type = object->HasLocalElement(index);
+    switch (type) {
+      case JSObject::UNDEFINED_ELEMENT:
+      case JSObject::STRING_CHARACTER_ELEMENT:
+        return isolate->heap()->false_value();
+      case JSObject::INTERCEPTED_ELEMENT:
+      case JSObject::FAST_ELEMENT:
+        return isolate->heap()->true_value();
+      case JSObject::DICTIONARY_ELEMENT: {
+        if (object->IsJSGlobalProxy()) {
+          Object* proto = object->GetPrototype();
+          if (proto->IsNull()) {
+            return isolate->heap()->false_value();
+          }
+          ASSERT(proto->IsJSGlobalObject());
+          object = JSObject::cast(proto);
+        }
+        FixedArray* elements = FixedArray::cast(object->elements());
+        NumberDictionary* dictionary = NULL;
+        if (elements->map() ==
+            isolate->heap()->non_strict_arguments_elements_map()) {
+          dictionary = NumberDictionary::cast(elements->get(1));
+        } else {
+          dictionary = NumberDictionary::cast(elements);
+        }
+        int entry = dictionary->FindEntry(index);
+        ASSERT(entry != NumberDictionary::kNotFound);
+        PropertyDetails details = dictionary->DetailsAt(entry);
+        return isolate->heap()->ToBoolean(!details.IsDontEnum());
+      }
+    }
   }
 
   PropertyAttributes att = object->GetLocalPropertyAttribute(key);
@@ -5579,7 +5549,7 @@
   StringType* new_string = StringType::cast(new_object);
 
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqAsciiString::kHeaderSize);
+      new_string->address() + SeqString::kHeaderSize);
   if (comma) *(write_cursor++) = ',';
   *(write_cursor++) = '"';
 
@@ -5667,16 +5637,15 @@
   StringType* new_string = StringType::cast(new_object);
   ASSERT(isolate->heap()->new_space()->Contains(new_string));
 
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqAsciiString::kHeaderSize);
+      new_string->address() + SeqString::kHeaderSize);
   if (comma) *(write_cursor++) = ',';
   write_cursor = WriteQuoteJsonString<Char, Char>(isolate,
                                                   write_cursor,
                                                   characters);
   int final_length = static_cast<int>(
       write_cursor - reinterpret_cast<Char*>(
-          new_string->address() + SeqAsciiString::kHeaderSize));
+          new_string->address() + SeqString::kHeaderSize));
   isolate->heap()->new_space()->
       template ShrinkStringAtAllocationBoundary<StringType>(
           new_string, final_length);
@@ -5754,9 +5723,8 @@
   StringType* new_string = StringType::cast(new_object);
   ASSERT(isolate->heap()->new_space()->Contains(new_string));
 
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
   Char* write_cursor = reinterpret_cast<Char*>(
-      new_string->address() + SeqAsciiString::kHeaderSize);
+      new_string->address() + SeqString::kHeaderSize);
   *(write_cursor++) = '[';
   for (int i = 0; i < length; i++) {
     if (i != 0) *(write_cursor++) = ',';
@@ -5777,7 +5745,7 @@
 
   int final_length = static_cast<int>(
       write_cursor - reinterpret_cast<Char*>(
-          new_string->address() + SeqAsciiString::kHeaderSize));
+          new_string->address() + SeqString::kHeaderSize));
   isolate->heap()->new_space()->
       template ShrinkStringAtAllocationBoundary<StringType>(
           new_string, final_length);
@@ -6229,6 +6197,8 @@
   int part_count = indices.length();
 
   Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
+  MaybeObject* maybe_result = result->EnsureCanContainNonSmiElements();
+  if (maybe_result->IsFailure()) return maybe_result;
   result->set_length(Smi::FromInt(part_count));
 
   ASSERT(result->HasFastElements());
@@ -6275,11 +6245,11 @@
   FixedArray* ascii_cache = heap->single_character_string_cache();
   Object* undefined = heap->undefined_value();
   int i;
+  WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
   for (i = 0; i < length; ++i) {
     Object* value = ascii_cache->get(chars[i]);
     if (value == undefined) break;
-    ASSERT(!heap->InNewSpace(value));
-    elements->set(i, value, SKIP_WRITE_BARRIER);
+    elements->set(i, value, mode);
   }
   if (i < length) {
     ASSERT(Smi::FromInt(0) == 0);
@@ -6603,6 +6573,9 @@
   // This assumption is used by the slice encoding in one or two smis.
   ASSERT(Smi::kMaxValue >= String::kMaxLength);
 
+  MaybeObject* maybe_result = array->EnsureCanContainNonSmiElements();
+  if (maybe_result->IsFailure()) return maybe_result;
+
   int special_length = special->length();
   if (!array->HasFastElements()) {
     return isolate->Throw(isolate->heap()->illegal_argument_symbol());
@@ -6830,7 +6803,8 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 3);
   CONVERT_CHECKED(JSArray, elements_array, args[0]);
-  RUNTIME_ASSERT(elements_array->HasFastElements());
+  RUNTIME_ASSERT(elements_array->HasFastElements() ||
+                 elements_array->HasFastSmiOnlyElements());
   CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
   CONVERT_CHECKED(String, separator, args[2]);
   // elements_array is fast-mode JSarray of alternating positions
@@ -7997,7 +7971,7 @@
   int bound_argc = 0;
   if (!args[1]->IsNull()) {
     CONVERT_ARG_CHECKED(JSArray, params, 1);
-    RUNTIME_ASSERT(params->HasFastElements());
+    RUNTIME_ASSERT(params->HasFastTypeElements());
     bound_args = Handle<FixedArray>(FixedArray::cast(params->elements()));
     bound_argc = Smi::cast(params->length())->value();
   }
@@ -8307,6 +8281,8 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
+  // The least significant bit (after untagging) indicates whether the
+  // function is currently optimized, regardless of reason.
   if (!V8::UseCrankshaft()) {
     return Smi::FromInt(4);  // 4 == "never".
   }
@@ -8479,7 +8455,7 @@
      argv[i] = Handle<Object>(object);
   }
 
-  bool threw = false;
+  bool threw;
   Handle<JSReceiver> hfun(fun);
   Handle<Object> hreceiver(receiver);
   Handle<Object> result = Execution::Call(
@@ -8646,18 +8622,10 @@
   }
 
   // The slot was found in a JSObject, either a context extension object,
-  // the global object, or an arguments object.  Try to delete it
-  // (respecting DONT_DELETE).  For consistency with V8's usual behavior,
-  // which allows deleting all parameters in functions that mention
-  // 'arguments', we do this even for the case of slots found on an
-  // arguments object.  The slot was found on an arguments object if the
-  // index is non-negative.
+  // the global object, or the subject of a with.  Try to delete it
+  // (respecting DONT_DELETE).
   Handle<JSObject> object = Handle<JSObject>::cast(holder);
-  if (index >= 0) {
-    return object->DeleteElement(index, JSReceiver::NORMAL_DELETION);
-  } else {
-    return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
-  }
+  return object->DeleteProperty(*name, JSReceiver::NORMAL_DELETION);
 }
 
 
@@ -8742,24 +8710,19 @@
                                           &attributes,
                                           &binding_flags);
 
-  // If the index is non-negative, the slot has been found in a local
-  // variable or a parameter. Read it from the context object or the
-  // arguments object.
+  // If the index is non-negative, the slot has been found in a context.
   if (index >= 0) {
-    // If the "property" we were looking for is a local variable or an
-    // argument in a context, the receiver is the global object; see
-    // ECMA-262, 3rd., 10.1.6 and 10.2.3.
+    ASSERT(holder->IsContext());
+    // If the "property" we were looking for is a local variable, the
+    // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
     //
-    // Use the hole as the receiver to signal that the receiver is
-    // implicit and that the global receiver should be used.
+    // Use the hole as the receiver to signal that the receiver is implicit
+    // and that the global receiver should be used (as distinguished from an
+    // explicit receiver that happens to be a global object).
     Handle<Object> receiver = isolate->factory()->the_hole_value();
-    MaybeObject* value = (holder->IsContext())
-        ? Context::cast(*holder)->get(index)
-        : JSObject::cast(*holder)->GetElement(index);
+    Object* value = Context::cast(*holder)->get(index);
     // Check for uninitialized bindings.
-    if (holder->IsContext() &&
-        binding_flags == MUTABLE_CHECK_INITIALIZED &&
-        value->IsTheHole()) {
+    if (binding_flags == MUTABLE_CHECK_INITIALIZED && value->IsTheHole()) {
       Handle<Object> reference_error =
           isolate->factory()->NewReferenceError("not_defined",
                                                 HandleVector(&name, 1));
@@ -8769,25 +8732,18 @@
     }
   }
 
-  // If the holder is found, we read the property from it.
-  if (!holder.is_null() && holder->IsJSObject()) {
-    ASSERT(Handle<JSObject>::cast(holder)->HasProperty(*name));
-    JSObject* object = JSObject::cast(*holder);
-    Object* receiver;
-    if (object->IsGlobalObject()) {
-      receiver = GlobalObject::cast(object)->global_receiver();
-    } else if (context->is_exception_holder(*holder)) {
-      // Use the hole as the receiver to signal that the receiver is
-      // implicit and that the global receiver should be used.
-      receiver = isolate->heap()->the_hole_value();
-    } else {
-      receiver = ComputeReceiverForNonGlobal(isolate, object);
-    }
-
+  // Otherwise, if the slot was found the holder is a context extension
+  // object, subject of a with, or a global object.  We read the named
+  // property from it.
+  if (!holder.is_null()) {
+    Handle<JSObject> object = Handle<JSObject>::cast(holder);
+    ASSERT(object->HasProperty(*name));
     // GetProperty below can cause GC.
-    Handle<Object> receiver_handle(receiver);
+    Handle<Object> receiver_handle(object->IsGlobalObject()
+        ? GlobalObject::cast(*object)->global_receiver()
+        : ComputeReceiverForNonGlobal(isolate, *object));
 
-    // No need to unhole the value here. This is taken care of by the
+    // No need to unhole the value here.  This is taken care of by the
     // GetProperty function.
     MaybeObject* value = object->GetProperty(*name);
     return MakePair(value, *receiver_handle);
@@ -8840,45 +8796,37 @@
                                           &binding_flags);
 
   if (index >= 0) {
-    if (holder->IsContext()) {
-      Handle<Context> context = Handle<Context>::cast(holder);
-      if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
-          context->get(index)->IsTheHole()) {
-        Handle<Object> error =
-            isolate->factory()->NewReferenceError("not_defined",
-                                                  HandleVector(&name, 1));
-        return isolate->Throw(*error);
-      }
-      // Ignore if read_only variable.
-      if ((attributes & READ_ONLY) == 0) {
-        // Context is a fixed array and set cannot fail.
-        context->set(index, *value);
-      } else if (strict_mode == kStrictMode) {
-        // Setting read only property in strict mode.
-        Handle<Object> error =
-            isolate->factory()->NewTypeError("strict_cannot_assign",
-                                             HandleVector(&name, 1));
-        return isolate->Throw(*error);
-      }
-    } else {
-      ASSERT((attributes & READ_ONLY) == 0);
-      Handle<Object> result =
-          SetElement(Handle<JSObject>::cast(holder), index, value, strict_mode);
-      if (result.is_null()) {
-        ASSERT(isolate->has_pending_exception());
-        return Failure::Exception();
-      }
+    // The property was found in a context slot.
+    Handle<Context> context = Handle<Context>::cast(holder);
+    if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
+        context->get(index)->IsTheHole()) {
+      Handle<Object> error =
+          isolate->factory()->NewReferenceError("not_defined",
+                                                HandleVector(&name, 1));
+      return isolate->Throw(*error);
+    }
+    // Ignore if read_only variable.
+    if ((attributes & READ_ONLY) == 0) {
+      // Context is a fixed array and set cannot fail.
+      context->set(index, *value);
+    } else if (strict_mode == kStrictMode) {
+      // Setting read only property in strict mode.
+      Handle<Object> error =
+          isolate->factory()->NewTypeError("strict_cannot_assign",
+                                           HandleVector(&name, 1));
+      return isolate->Throw(*error);
     }
     return *value;
   }
 
-  // Slow case: The property is not in a FixedArray context.
-  // It is either in an JSObject extension context or it was not found.
-  Handle<JSObject> context_ext;
+  // Slow case: The property is not in a context slot.  It is either in a
+  // context extension object, a property of the subject of a with, or a
+  // property of the global object.
+  Handle<JSObject> object;
 
   if (!holder.is_null()) {
-    // The property exists in the extension context.
-    context_ext = Handle<JSObject>::cast(holder);
+    // The property exists on the holder.
+    object = Handle<JSObject>::cast(holder);
   } else {
     // The property was not found.
     ASSERT(attributes == ABSENT);
@@ -8886,22 +8834,21 @@
     if (strict_mode == kStrictMode) {
       // Throw in strict mode (assignment to undefined variable).
       Handle<Object> error =
-        isolate->factory()->NewReferenceError(
-            "not_defined", HandleVector(&name, 1));
+          isolate->factory()->NewReferenceError(
+              "not_defined", HandleVector(&name, 1));
       return isolate->Throw(*error);
     }
-    // In non-strict mode, the property is stored in the global context.
+    // In non-strict mode, the property is added to the global object.
     attributes = NONE;
-    context_ext = Handle<JSObject>(isolate->context()->global());
+    object = Handle<JSObject>(isolate->context()->global());
   }
 
-  // Set the property, but ignore if read_only variable on the context
-  // extension object itself.
+  // Set the property if it's not read only or doesn't yet exist.
   if ((attributes & READ_ONLY) == 0 ||
-      (context_ext->GetLocalPropertyAttribute(*name) == ABSENT)) {
+      (object->GetLocalPropertyAttribute(*name) == ABSENT)) {
     RETURN_IF_EMPTY_HANDLE(
         isolate,
-        SetProperty(context_ext, name, value, NONE, strict_mode));
+        SetProperty(object, name, value, NONE, strict_mode));
   } else if (strict_mode == kStrictMode && (attributes & READ_ONLY) != 0) {
     // Setting read only property in strict mode.
     Handle<Object> error =
@@ -9121,6 +9068,10 @@
   FlattenString(str);
 
   CONVERT_ARG_CHECKED(JSArray, output, 1);
+
+  MaybeObject* maybe_result_array =
+      output->EnsureCanContainNonSmiElements();
+  if (maybe_result_array->IsFailure()) return maybe_result_array;
   RUNTIME_ASSERT(output->HasFastElements());
 
   AssertNoAllocation no_allocation;
@@ -9306,6 +9257,9 @@
   PropertyAttributes attributes = ABSENT;
   BindingFlags binding_flags;
   while (true) {
+    // Don't follow context chains in Context::Lookup and implement the loop
+    // up the context chain here, so that we can know the context where eval
+    // was found.
     receiver = context->Lookup(isolate->factory()->eval_symbol(),
                                FOLLOW_PROTOTYPE_CHAIN,
                                &index,
@@ -9421,7 +9375,7 @@
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSArray, array, args[0]);
   CONVERT_CHECKED(JSObject, element, args[1]);
-  RUNTIME_ASSERT(array->HasFastElements());
+  RUNTIME_ASSERT(array->HasFastElements() || array->HasFastSmiOnlyElements());
   int length = Smi::cast(array->length())->value();
   FixedArray* elements = FixedArray::cast(array->elements());
   for (int i = 0; i < length; i++) {
@@ -9504,9 +9458,11 @@
         isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
     Handle<Map> map;
     if (fast_elements_) {
-      map = isolate_->factory()->GetFastElementsMap(Handle<Map>(array->map()));
+      map = isolate_->factory()->GetElementsTransitionMap(array,
+                                                          FAST_ELEMENTS);
     } else {
-      map = isolate_->factory()->GetSlowElementsMap(Handle<Map>(array->map()));
+      map = isolate_->factory()->GetElementsTransitionMap(array,
+                                                          DICTIONARY_ELEMENTS);
     }
     array->set_map(*map);
     array->set_length(*length);
@@ -9650,6 +9606,7 @@
                                   List<uint32_t>* indices) {
   ElementsKind kind = object->GetElementsKind();
   switch (kind) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       Handle<FixedArray> elements(FixedArray::cast(object->elements()));
       uint32_t length = static_cast<uint32_t>(elements->length());
@@ -9769,6 +9726,7 @@
                             ArrayConcatVisitor* visitor) {
   uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
   switch (receiver->GetElementsKind()) {
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_ELEMENTS: {
       // Run through the elements FixedArray and use HasElement and GetElement
       // to check the prototype for missing elements.
@@ -9997,15 +9955,17 @@
   CONVERT_CHECKED(JSArray, to, args[1]);
   FixedArrayBase* new_elements = from->elements();
   MaybeObject* maybe_new_map;
+  ElementsKind elements_kind;
   if (new_elements->map() == isolate->heap()->fixed_array_map() ||
       new_elements->map() == isolate->heap()->fixed_cow_array_map()) {
-    maybe_new_map = to->map()->GetFastElementsMap();
+    elements_kind = FAST_ELEMENTS;
   } else if (new_elements->map() ==
              isolate->heap()->fixed_double_array_map()) {
-    maybe_new_map = to->map()->GetFastDoubleElementsMap();
+    elements_kind = FAST_DOUBLE_ELEMENTS;
   } else {
-    maybe_new_map = to->map()->GetSlowElementsMap();
+    elements_kind = DICTIONARY_ELEMENTS;
   }
+  maybe_new_map = to->GetElementsTransitionMap(elements_kind);
   Object* new_map;
   if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
   to->set_map(Map::cast(new_map));
@@ -10090,7 +10050,9 @@
     }
     return *isolate->factory()->NewJSArrayWithElements(keys);
   } else {
-    ASSERT(array->HasFastElements() || array->HasFastDoubleElements());
+    ASSERT(array->HasFastElements() ||
+           array->HasFastSmiOnlyElements() ||
+           array->HasFastDoubleElements());
     Handle<FixedArray> single_interval = isolate->factory()->NewFixedArray(2);
     // -1 means start of array.
     single_interval->set(0, Smi::FromInt(-1));
@@ -10209,8 +10171,8 @@
     case CALLBACKS: {
       Object* structure = result->GetCallbackObject();
       if (structure->IsForeign() || structure->IsAccessorInfo()) {
-        MaybeObject* maybe_value = receiver->GetPropertyWithCallback(
-            receiver, structure, name, result->holder());
+        MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback(
+            receiver, structure, name);
         if (!maybe_value->ToObject(&value)) {
           if (maybe_value->IsRetryAfterGC()) return maybe_value;
           ASSERT(maybe_value->IsException());
@@ -11460,48 +11422,53 @@
   int target_start_position = RelocInfo::kNoPosition;
   Handle<SharedFunctionInfo> target;
   while (!done) {
-    HeapIterator iterator;
-    for (HeapObject* obj = iterator.next();
-         obj != NULL; obj = iterator.next()) {
-      if (obj->IsSharedFunctionInfo()) {
-        Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
-        if (shared->script() == *script) {
-          // If the SharedFunctionInfo found has the requested script data and
-          // contains the source position it is a candidate.
-          int start_position = shared->function_token_position();
-          if (start_position == RelocInfo::kNoPosition) {
-            start_position = shared->start_position();
-          }
-          if (start_position <= position &&
-              position <= shared->end_position()) {
-            // If there is no candidate or this function is within the current
-            // candidate this is the new candidate.
-            if (target.is_null()) {
-              target_start_position = start_position;
-              target = shared;
-            } else {
-              if (target_start_position == start_position &&
-                  shared->end_position() == target->end_position()) {
-                  // If a top-level function contain only one function
-                  // declartion the source for the top-level and the function is
-                  // the same. In that case prefer the non top-level function.
-                if (!shared->is_toplevel()) {
+    { // Extra scope for iterator and no-allocation.
+      isolate->heap()->EnsureHeapIsIterable();
+      AssertNoAllocation no_alloc_during_heap_iteration;
+      HeapIterator iterator;
+      for (HeapObject* obj = iterator.next();
+           obj != NULL; obj = iterator.next()) {
+        if (obj->IsSharedFunctionInfo()) {
+          Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
+          if (shared->script() == *script) {
+            // If the SharedFunctionInfo found has the requested script data and
+            // contains the source position it is a candidate.
+            int start_position = shared->function_token_position();
+            if (start_position == RelocInfo::kNoPosition) {
+              start_position = shared->start_position();
+            }
+            if (start_position <= position &&
+                position <= shared->end_position()) {
+              // If there is no candidate or this function is within the current
+              // candidate this is the new candidate.
+              if (target.is_null()) {
+                target_start_position = start_position;
+                target = shared;
+              } else {
+                if (target_start_position == start_position &&
+                    shared->end_position() == target->end_position()) {
+                    // If a top-level function contain only one function
+                    // declartion the source for the top-level and the
+                    // function is the same. In that case prefer the non
+                    // top-level function.
+                  if (!shared->is_toplevel()) {
+                    target_start_position = start_position;
+                    target = shared;
+                  }
+                } else if (target_start_position <= start_position &&
+                           shared->end_position() <= target->end_position()) {
+                  // This containment check includes equality as a function
+                  // inside a top-level function can share either start or end
+                  // position with the top-level function.
                   target_start_position = start_position;
                   target = shared;
                 }
-              } else if (target_start_position <= start_position &&
-                         shared->end_position() <= target->end_position()) {
-                // This containment check includes equality as a function inside
-                // a top-level function can share either start or end position
-                // with the top-level function.
-                target_start_position = start_position;
-                target = shared;
               }
             }
           }
         }
-      }
-    }
+      }  // End for loop.
+    }  // End No allocation scope.
 
     if (target.is_null()) {
       return isolate->heap()->undefined_value();
@@ -11516,7 +11483,7 @@
       // functions which might contain the requested source position.
       CompileLazyShared(target, KEEP_EXCEPTION);
     }
-  }
+  }  // End while loop.
 
   return *target;
 }
@@ -11966,6 +11933,8 @@
   Handle<Object> result =
     Execution::Call(compiled_function, receiver, 0, NULL,
                     &has_pending_exception);
+  // Clear the oneshot breakpoints so that the debugger does not step further.
+  isolate->debug()->ClearStepping();
   if (has_pending_exception) return Failure::Exception();
   return *result;
 }
@@ -11993,13 +11962,14 @@
   // Return result as a JS array.
   Handle<JSObject> result =
       isolate->factory()->NewJSObject(isolate->array_function());
-  Handle<JSArray>::cast(result)->SetContent(*instances);
+  isolate->factory()->SetContent(Handle<JSArray>::cast(result), instances);
   return *result;
 }
 
 
 // Helper function used by Runtime_DebugReferencedBy below.
-static int DebugReferencedBy(JSObject* target,
+static int DebugReferencedBy(HeapIterator* iterator,
+                             JSObject* target,
                              Object* instance_filter, int max_references,
                              FixedArray* instances, int instances_size,
                              JSFunction* arguments_function) {
@@ -12009,9 +11979,8 @@
   // Iterate the heap.
   int count = 0;
   JSObject* last = NULL;
-  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
-  while (((heap_obj = iterator.next()) != NULL) &&
+  while (((heap_obj = iterator->next()) != NULL) &&
          (max_references == 0 || count < max_references)) {
     // Only look at all JSObjects.
     if (heap_obj->IsJSObject()) {
@@ -12076,7 +12045,11 @@
   ASSERT(args.length() == 3);
 
   // First perform a full GC in order to avoid references from dead objects.
-  isolate->heap()->CollectAllGarbage(false);
+  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  // The heap iterator reserves the right to do a GC to make the heap iterable.
+  // Due to the GC above we know it won't need to do that, but it seems cleaner
+  // to get the heap iterator constructed before we start having unprotected
+  // Object* locals that are not protected by handles.
 
   // Check parameters.
   CONVERT_CHECKED(JSObject, target, args[0]);
@@ -12086,6 +12059,7 @@
   CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[2]);
   RUNTIME_ASSERT(max_references >= 0);
 
+
   // Get the constructor function for context extension and arguments array.
   JSObject* arguments_boilerplate =
       isolate->context()->global_context()->arguments_boilerplate();
@@ -12094,7 +12068,9 @@
 
   // Get the number of referencing objects.
   int count;
-  count = DebugReferencedBy(target, instance_filter, max_references,
+  HeapIterator heap_iterator;
+  count = DebugReferencedBy(&heap_iterator,
+                            target, instance_filter, max_references,
                             NULL, 0, arguments_function);
 
   // Allocate an array to hold the result.
@@ -12105,30 +12081,34 @@
   FixedArray* instances = FixedArray::cast(object);
 
   // Fill the referencing objects.
-  count = DebugReferencedBy(target, instance_filter, max_references,
+  // AllocateFixedArray above does not make the heap non-iterable.
+  ASSERT(HEAP->IsHeapIterable());
+  HeapIterator heap_iterator2;
+  count = DebugReferencedBy(&heap_iterator2,
+                            target, instance_filter, max_references,
                             instances, count, arguments_function);
 
   // Return result as JS array.
   Object* result;
-  { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+  MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
       isolate->context()->global_context()->array_function());
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-  JSArray::cast(result)->SetContent(instances);
-  return result;
+  if (!maybe_result->ToObject(&result)) return maybe_result;
+  return JSArray::cast(result)->SetContent(instances);
 }
 
 
 // Helper function used by Runtime_DebugConstructedBy below.
-static int DebugConstructedBy(JSFunction* constructor, int max_references,
-                              FixedArray* instances, int instances_size) {
+static int DebugConstructedBy(HeapIterator* iterator,
+                              JSFunction* constructor,
+                              int max_references,
+                              FixedArray* instances,
+                              int instances_size) {
   AssertNoAllocation no_alloc;
 
   // Iterate the heap.
   int count = 0;
-  HeapIterator iterator;
   HeapObject* heap_obj = NULL;
-  while (((heap_obj = iterator.next()) != NULL) &&
+  while (((heap_obj = iterator->next()) != NULL) &&
          (max_references == 0 || count < max_references)) {
     // Only look at all JSObjects.
     if (heap_obj->IsJSObject()) {
@@ -12156,7 +12136,7 @@
   ASSERT(args.length() == 2);
 
   // First perform a full GC in order to avoid dead objects.
-  isolate->heap()->CollectAllGarbage(false);
+  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
 
   // Check parameters.
   CONVERT_CHECKED(JSFunction, constructor, args[0]);
@@ -12165,7 +12145,12 @@
 
   // Get the number of referencing objects.
   int count;
-  count = DebugConstructedBy(constructor, max_references, NULL, 0);
+  HeapIterator heap_iterator;
+  count = DebugConstructedBy(&heap_iterator,
+                             constructor,
+                             max_references,
+                             NULL,
+                             0);
 
   // Allocate an array to hold the result.
   Object* object;
@@ -12174,8 +12159,14 @@
   }
   FixedArray* instances = FixedArray::cast(object);
 
+  ASSERT(HEAP->IsHeapIterable());
   // Fill the referencing objects.
-  count = DebugConstructedBy(constructor, max_references, instances, count);
+  HeapIterator heap_iterator2;
+  count = DebugConstructedBy(&heap_iterator2,
+                             constructor,
+                             max_references,
+                             instances,
+                             count);
 
   // Return result as JS array.
   Object* result;
@@ -12183,8 +12174,7 @@
           isolate->context()->global_context()->array_function());
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
-  JSArray::cast(result)->SetContent(instances);
-  return result;
+  return JSArray::cast(result)->SetContent(instances);
 }
 
 
@@ -12248,14 +12238,15 @@
 }
 
 
-static int FindSharedFunctionInfosForScript(Script* script,
+static int FindSharedFunctionInfosForScript(HeapIterator* iterator,
+                                            Script* script,
                                             FixedArray* buffer) {
   AssertNoAllocation no_allocations;
-
   int counter = 0;
   int buffer_size = buffer->length();
-  HeapIterator iterator;
-  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+  for (HeapObject* obj = iterator->next();
+       obj != NULL;
+       obj = iterator->next()) {
     ASSERT(obj != NULL);
     if (!obj->IsSharedFunctionInfo()) {
       continue;
@@ -12281,16 +12272,30 @@
   HandleScope scope(isolate);
   CONVERT_CHECKED(JSValue, script_value, args[0]);
 
+
   Handle<Script> script = Handle<Script>(Script::cast(script_value->value()));
 
   const int kBufferSize = 32;
 
   Handle<FixedArray> array;
   array = isolate->factory()->NewFixedArray(kBufferSize);
-  int number = FindSharedFunctionInfosForScript(*script, *array);
+  int number;
+  {
+    isolate->heap()->EnsureHeapIsIterable();
+    AssertNoAllocation no_allocations;
+    HeapIterator heap_iterator;
+    Script* scr = *script;
+    FixedArray* arr = *array;
+    number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
+  }
   if (number > kBufferSize) {
     array = isolate->factory()->NewFixedArray(number);
-    FindSharedFunctionInfosForScript(*script, *array);
+    isolate->heap()->EnsureHeapIsIterable();
+    AssertNoAllocation no_allocations;
+    HeapIterator heap_iterator;
+    Script* scr = *script;
+    FixedArray* arr = *array;
+    FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
   }
 
   Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(array);
@@ -12771,6 +12776,8 @@
   // Scan the heap for Script objects to find the script with the requested
   // script data.
   Handle<Script> script;
+  script_name->GetHeap()->EnsureHeapIsIterable();
+  AssertNoAllocation no_allocation_during_heap_iteration;
   HeapIterator iterator;
   HeapObject* obj = NULL;
   while (script.is_null() && ((obj = iterator.next()) != NULL)) {
@@ -12983,7 +12990,7 @@
     Handle<Object> receiver(isolate->global_context()->global());
     // This handle is nor shared, nor used later, so it's safe.
     Object** argv[] = { key_handle.location() };
-    bool pending_exception = false;
+    bool pending_exception;
     value = Execution::Call(factory,
                             receiver,
                             1,
@@ -13139,6 +13146,7 @@
     return isolate->heap()->ToBoolean(obj->Has##Name());  \
   }
 
+ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastSmiOnlyElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastDoubleElements)
 ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(DictionaryElements)
@@ -13222,6 +13230,9 @@
   Isolate* isolate = Isolate::Current();
   Failure* failure = Failure::cast(result);
   if (failure->IsRetryAfterGC()) {
+    if (isolate->heap()->new_space()->AddFreshPage()) {
+      return;
+    }
     // Try to do a garbage collection; ignore it if it fails. The C
     // entry stub will throw an out-of-memory exception in that case.
     isolate->heap()->CollectGarbage(failure->allocation_space());
@@ -13229,7 +13240,7 @@
     // Handle last resort GC and make sure to allow future allocations
     // to grow the heap without causing GCs (if possible).
     isolate->counters()->gc_last_resort_from_js()->Increment();
-    isolate->heap()->CollectAllGarbage(false);
+    isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
   }
 }
 
diff --git a/src/runtime.h b/src/runtime.h
index 1538b7d..c6f7773 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -330,6 +330,8 @@
   F(InitializeConstContextSlot, 3, 1) \
   F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
   \
+  /* Arrays */ \
+  F(NonSmiElementStored, 1, 1) \
   /* Debugging */ \
   F(DebugPrint, 1, 1) \
   F(DebugTrace, 0, 1) \
@@ -354,6 +356,7 @@
   F(IS_VAR, 1, 1) \
   \
   /* expose boolean functions from objects-inl.h */ \
+  F(HasFastSmiOnlyElements, 1, 1) \
   F(HasFastElements, 1, 1) \
   F(HasFastDoubleElements, 1, 1) \
   F(HasDictionaryElements, 1, 1) \
diff --git a/src/runtime.js b/src/runtime.js
index 14ff1b6..a12f6c7 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -355,7 +355,7 @@
   if (!IS_SPEC_OBJECT(x)) {
     throw %MakeTypeError('invalid_in_operator_use', [this, x]);
   }
-  return %_IsNonNegativeSmi(this) && !%IsJSProxy(x) ?
+  return %_IsNonNegativeSmi(this) ?
     %HasElement(x, this) : %HasProperty(x, %ToString(this));
 }
 
@@ -429,20 +429,10 @@
 }
 
 
-function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR(proxy) {
-  var arity = %_ArgumentsLength() - 1;
+function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR() {
+  var proxy = this;
   var trap = %GetConstructTrap(proxy);
-  var receiver = void 0;
-  if (!IS_UNDEFINED(trap)) {
-    trap = %GetCallTrap(proxy);
-    var proto = proxy.prototype;
-    if (!IS_SPEC_OBJECT(proto) && proto !== null) {
-      throw MakeTypeError("proto_object_or_null", [proto]);
-    }
-    receiver = new global.Object();
-    receiver.__proto__ = proto;
-  }
-  return %Apply(trap, this, arguments, 1, arity);
+  return %Apply(trap, this, arguments, 0, %_ArgumentsLength());
 }
 
 
diff --git a/src/serialize.cc b/src/serialize.cc
index ecb480a..84ab94a 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -300,12 +300,24 @@
       RUNTIME_ENTRY,
       4,
       "HandleScope::DeleteExtensions");
+  Add(ExternalReference::
+          incremental_marking_record_write_function(isolate).address(),
+      RUNTIME_ENTRY,
+      5,
+      "IncrementalMarking::RecordWrite");
+  Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
+      RUNTIME_ENTRY,
+      6,
+      "StoreBuffer::StoreBufferOverflow");
+  Add(ExternalReference::
+          incremental_evacuation_record_write_function(isolate).address(),
+      RUNTIME_ENTRY,
+      7,
+      "IncrementalMarking::RecordWrite");
+
+
 
   // Miscellaneous
-  Add(ExternalReference::the_hole_value_location(isolate).address(),
-      UNCLASSIFIED,
-      2,
-      "Factory::the_hole_value().location()");
   Add(ExternalReference::roots_address(isolate).address(),
       UNCLASSIFIED,
       3,
@@ -351,129 +363,133 @@
       "Heap::always_allocate_scope_depth()");
   Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
       UNCLASSIFIED,
-      13,
+      14,
       "Heap::NewSpaceAllocationLimitAddress()");
   Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
       UNCLASSIFIED,
-      14,
+      15,
       "Heap::NewSpaceAllocationTopAddress()");
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Add(ExternalReference::debug_break(isolate).address(),
       UNCLASSIFIED,
-      15,
+      16,
       "Debug::Break()");
   Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
       UNCLASSIFIED,
-      16,
+      17,
       "Debug::step_in_fp_addr()");
 #endif
   Add(ExternalReference::double_fp_operation(Token::ADD, isolate).address(),
       UNCLASSIFIED,
-      17,
+      18,
       "add_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::SUB, isolate).address(),
       UNCLASSIFIED,
-      18,
+      19,
       "sub_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::MUL, isolate).address(),
       UNCLASSIFIED,
-      19,
+      20,
       "mul_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::DIV, isolate).address(),
       UNCLASSIFIED,
-      20,
+      21,
       "div_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::MOD, isolate).address(),
       UNCLASSIFIED,
-      21,
+      22,
       "mod_two_doubles");
   Add(ExternalReference::compare_doubles(isolate).address(),
       UNCLASSIFIED,
-      22,
+      23,
       "compare_doubles");
 #ifndef V8_INTERPRETED_REGEXP
   Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
       UNCLASSIFIED,
-      23,
+      24,
       "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
   Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
       UNCLASSIFIED,
-      24,
+      25,
       "RegExpMacroAssembler*::CheckStackGuardState()");
   Add(ExternalReference::re_grow_stack(isolate).address(),
       UNCLASSIFIED,
-      25,
+      26,
       "NativeRegExpMacroAssembler::GrowStack()");
   Add(ExternalReference::re_word_character_map().address(),
       UNCLASSIFIED,
-      26,
+      27,
       "NativeRegExpMacroAssembler::word_character_map");
 #endif  // V8_INTERPRETED_REGEXP
   // Keyed lookup cache.
   Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
       UNCLASSIFIED,
-      27,
+      28,
       "KeyedLookupCache::keys()");
   Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
       UNCLASSIFIED,
-      28,
+      29,
       "KeyedLookupCache::field_offsets()");
   Add(ExternalReference::transcendental_cache_array_address(isolate).address(),
       UNCLASSIFIED,
-      29,
+      30,
       "TranscendentalCache::caches()");
   Add(ExternalReference::handle_scope_next_address().address(),
       UNCLASSIFIED,
-      30,
+      31,
       "HandleScope::next");
   Add(ExternalReference::handle_scope_limit_address().address(),
       UNCLASSIFIED,
-      31,
+      32,
       "HandleScope::limit");
   Add(ExternalReference::handle_scope_level_address().address(),
       UNCLASSIFIED,
-      32,
+      33,
       "HandleScope::level");
   Add(ExternalReference::new_deoptimizer_function(isolate).address(),
       UNCLASSIFIED,
-      33,
+      34,
       "Deoptimizer::New()");
   Add(ExternalReference::compute_output_frames_function(isolate).address(),
       UNCLASSIFIED,
-      34,
+      35,
       "Deoptimizer::ComputeOutputFrames()");
   Add(ExternalReference::address_of_min_int().address(),
       UNCLASSIFIED,
-      35,
+      36,
       "LDoubleConstant::min_int");
   Add(ExternalReference::address_of_one_half().address(),
       UNCLASSIFIED,
-      36,
+      37,
       "LDoubleConstant::one_half");
   Add(ExternalReference::isolate_address().address(),
       UNCLASSIFIED,
-      37,
+      38,
       "isolate");
   Add(ExternalReference::address_of_minus_zero().address(),
       UNCLASSIFIED,
-      38,
+      39,
       "LDoubleConstant::minus_zero");
   Add(ExternalReference::address_of_negative_infinity().address(),
       UNCLASSIFIED,
-      39,
+      40,
       "LDoubleConstant::negative_infinity");
   Add(ExternalReference::power_double_double_function(isolate).address(),
       UNCLASSIFIED,
-      40,
+      41,
       "power_double_double_function");
   Add(ExternalReference::power_double_int_function(isolate).address(),
       UNCLASSIFIED,
-      41,
-      "power_double_int_function");
-  Add(ExternalReference::arguments_marker_location(isolate).address(),
-      UNCLASSIFIED,
       42,
-      "Factory::arguments_marker().location()");
+      "power_double_int_function");
+  Add(ExternalReference::store_buffer_top(isolate).address(),
+      UNCLASSIFIED,
+      43,
+      "store_buffer_top");
+  Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
+      UNCLASSIFIED,
+      44,
+      "canonical_nan");
 }
 
 
@@ -569,6 +585,7 @@
       maybe_new_allocation =
           reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size);
     }
+    ASSERT(!maybe_new_allocation->IsFailure());
     Object* new_allocation = maybe_new_allocation->ToObjectUnchecked();
     HeapObject* new_object = HeapObject::cast(new_allocation);
     address = new_object->address();
@@ -577,14 +594,13 @@
     ASSERT(SpaceIsLarge(space_index));
     LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space);
     Object* new_allocation;
-    if (space_index == kLargeData) {
-      new_allocation = lo_space->AllocateRaw(size)->ToObjectUnchecked();
-    } else if (space_index == kLargeFixedArray) {
+    if (space_index == kLargeData || space_index == kLargeFixedArray) {
       new_allocation =
-          lo_space->AllocateRawFixedArray(size)->ToObjectUnchecked();
+          lo_space->AllocateRaw(size, NOT_EXECUTABLE)->ToObjectUnchecked();
     } else {
       ASSERT_EQ(kLargeCode, space_index);
-      new_allocation = lo_space->AllocateRawCode(size)->ToObjectUnchecked();
+      new_allocation =
+          lo_space->AllocateRaw(size, EXECUTABLE)->ToObjectUnchecked();
     }
     HeapObject* new_object = HeapObject::cast(new_allocation);
     // Record all large objects in the same space.
@@ -629,6 +645,7 @@
 
 void Deserializer::Deserialize() {
   isolate_ = Isolate::Current();
+  ASSERT(isolate_ != NULL);
   // Don't GC while deserializing - just expand the heap.
   AlwaysAllocateScope always_allocate;
   // Don't use the free lists while deserializing.
@@ -685,9 +702,8 @@
 // This routine writes the new object into the pointer provided and then
 // returns true if the new object was in young space and false otherwise.
 // The reason for this strange interface is that otherwise the object is
-// written very late, which means the ByteArray map is not set up by the
-// time we need to use it to mark the space at the end of a page free (by
-// making it into a byte array).
+// written very late, which means the FreeSpace map is not set up by the
+// time we need to use it to mark the space at the end of a page free.
 void Deserializer::ReadObject(int space_number,
                               Space* space,
                               Object** write_back) {
@@ -758,8 +774,9 @@
         if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
           ASSIGN_DEST_SPACE(space_number)                                      \
           ReadObject(space_number, dest_space, current);                       \
-          emit_write_barrier =                                                 \
-            (space_number == NEW_SPACE && source_space != NEW_SPACE);          \
+          emit_write_barrier = (space_number == NEW_SPACE &&                   \
+                                source_space != NEW_SPACE &&                   \
+                                source_space != CELL_SPACE);                   \
         } else {                                                               \
           Object* new_object = NULL;  /* May not be a real Object pointer. */  \
           if (where == kNewObject) {                                           \
@@ -778,14 +795,16 @@
                 Decode(reference_id);                                          \
             new_object = reinterpret_cast<Object*>(address);                   \
           } else if (where == kBackref) {                                      \
-            emit_write_barrier =                                               \
-              (space_number == NEW_SPACE && source_space != NEW_SPACE);        \
+            emit_write_barrier = (space_number == NEW_SPACE &&                 \
+                                  source_space != NEW_SPACE &&                 \
+                                  source_space != CELL_SPACE);                 \
             new_object = GetAddressFromEnd(data & kSpaceMask);                 \
           } else {                                                             \
             ASSERT(where == kFromStart);                                       \
             if (offset_from_start == kUnknownOffsetFromStart) {                \
-              emit_write_barrier =                                             \
-                (space_number == NEW_SPACE && source_space != NEW_SPACE);      \
+              emit_write_barrier = (space_number == NEW_SPACE &&               \
+                                    source_space != NEW_SPACE &&               \
+                                    source_space != CELL_SPACE);               \
               new_object = GetAddressFromStart(data & kSpaceMask);             \
             } else {                                                           \
               Address object_address = pages_[space_number][0] +               \
@@ -973,6 +992,11 @@
         break;
       }
 
+      case kSkip: {
+        current++;
+        break;
+      }
+
       case kNativesStringResource: {
         int index = source_->Get();
         Vector<const char> source_vector = Natives::GetRawScriptSource(index);
@@ -1097,8 +1121,13 @@
 
 
 void Serializer::VisitPointers(Object** start, Object** end) {
+  Isolate* isolate = Isolate::Current();
+
   for (Object** current = start; current < end; current++) {
-    if ((*current)->IsSmi()) {
+    if (reinterpret_cast<Address>(current) ==
+        isolate->heap()->store_buffer()->TopAddress()) {
+      sink_->Put(kSkip, "Skip");
+    } else if ((*current)->IsSmi()) {
       sink_->Put(kRawData, "RawData");
       sink_->PutInt(kPointerSize, "length");
       for (int i = 0; i < kPointerSize; i++) {
@@ -1420,7 +1449,7 @@
     if (!source->IsUndefined()) {
       ExternalAsciiString* string = ExternalAsciiString::cast(source);
       typedef v8::String::ExternalAsciiStringResource Resource;
-      Resource* resource = string->resource();
+      const Resource* resource = string->resource();
       if (resource == *resource_pointer) {
         sink_->Put(kNativesStringResource, "NativesStringResource");
         sink_->PutSection(i, "NativesStringResourceEnd");
diff --git a/src/serialize.h b/src/serialize.h
index 66d6fb5..c070923 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -238,7 +238,8 @@
     kRootArray = 0x9,               // Object is found in root array.
     kPartialSnapshotCache = 0xa,    // Object is in the cache.
     kExternalReference = 0xb,       // Pointer to an external reference.
-    // 0xc-0xf                         Free.
+    kSkip = 0xc,                    // Skip a pointer sized cell.
+    // 0xd-0xf                         Free.
     kBackref = 0x10,                 // Object is described relative to end.
     // 0x11-0x18                       One per space.
     // 0x19-0x1f                       Common backref offsets.
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 35d7224..d9e6053 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -37,355 +37,213 @@
 
 
 // -----------------------------------------------------------------------------
+// Bitmap
+
+void Bitmap::Clear(MemoryChunk* chunk) {
+  Bitmap* bitmap = chunk->markbits();
+  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
+  chunk->ResetLiveBytes();
+}
+
+
+// -----------------------------------------------------------------------------
 // PageIterator
 
+
+PageIterator::PageIterator(PagedSpace* space)
+    : space_(space),
+      prev_page_(&space->anchor_),
+      next_page_(prev_page_->next_page()) { }
+
+
 bool PageIterator::has_next() {
-  return prev_page_ != stop_page_;
+  return next_page_ != &space_->anchor_;
 }
 
 
 Page* PageIterator::next() {
   ASSERT(has_next());
-  prev_page_ = (prev_page_ == NULL)
-               ? space_->first_page_
-               : prev_page_->next_page();
+  prev_page_ = next_page_;
+  next_page_ = next_page_->next_page();
   return prev_page_;
 }
 
 
 // -----------------------------------------------------------------------------
-// Page
+// NewSpacePageIterator
 
-Page* Page::next_page() {
-  return heap_->isolate()->memory_allocator()->GetNextPage(this);
+
+NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
+    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
+      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
+      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
+
+NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
+    : prev_page_(space->anchor()),
+      next_page_(prev_page_->next_page()),
+      last_page_(prev_page_->prev_page()) { }
+
+NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
+    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
+      next_page_(NewSpacePage::FromAddress(start)),
+      last_page_(NewSpacePage::FromLimit(limit)) {
+  SemiSpace::AssertValidRange(start, limit);
 }
 
 
-Address Page::AllocationTop() {
-  PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
-  return owner->PageAllocationTop(this);
+bool NewSpacePageIterator::has_next() {
+  return prev_page_ != last_page_;
 }
 
 
-Address Page::AllocationWatermark() {
-  PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
-  if (this == owner->AllocationTopPage()) {
-    return owner->top();
-  }
-  return address() + AllocationWatermarkOffset();
+NewSpacePage* NewSpacePageIterator::next() {
+  ASSERT(has_next());
+  prev_page_ = next_page_;
+  next_page_ = next_page_->next_page();
+  return prev_page_;
 }
 
 
-uint32_t Page::AllocationWatermarkOffset() {
-  return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
-                               kAllocationWatermarkOffsetShift);
-}
-
-
-void Page::SetAllocationWatermark(Address allocation_watermark) {
-  if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
-    // When iterating intergenerational references during scavenge
-    // we might decide to promote an encountered young object.
-    // We will allocate a space for such an object and put it
-    // into the promotion queue to process it later.
-    // If space for object was allocated somewhere beyond allocation
-    // watermark this might cause garbage pointers to appear under allocation
-    // watermark. To avoid visiting them during dirty regions iteration
-    // which might be still in progress we store a valid allocation watermark
-    // value and mark this page as having an invalid watermark.
-    SetCachedAllocationWatermark(AllocationWatermark());
-    InvalidateWatermark(true);
-  }
-
-  flags_ = (flags_ & kFlagsMask) |
-           Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
-  ASSERT(AllocationWatermarkOffset()
-         == static_cast<uint32_t>(Offset(allocation_watermark)));
-}
-
-
-void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
-  mc_first_forwarded = allocation_watermark;
-}
-
-
-Address Page::CachedAllocationWatermark() {
-  return mc_first_forwarded;
-}
-
-
-uint32_t Page::GetRegionMarks() {
-  return dirty_regions_;
-}
-
-
-void Page::SetRegionMarks(uint32_t marks) {
-  dirty_regions_ = marks;
-}
-
-
-int Page::GetRegionNumberForAddress(Address addr) {
-  // Each page is divided into 256 byte regions. Each region has a corresponding
-  // dirty mark bit in the page header. Region can contain intergenerational
-  // references iff its dirty mark is set.
-  // A normal 8K page contains exactly 32 regions so all region marks fit
-  // into 32-bit integer field. To calculate a region number we just divide
-  // offset inside page by region size.
-  // A large page can contain more then 32 regions. But we want to avoid
-  // additional write barrier code for distinguishing between large and normal
-  // pages so we just ignore the fact that addr points into a large page and
-  // calculate region number as if addr pointed into a normal 8K page. This way
-  // we get a region number modulo 32 so for large pages several regions might
-  // be mapped to a single dirty mark.
-  ASSERT_PAGE_ALIGNED(this->address());
-  STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
-
-  // We are using masking with kPageAlignmentMask instead of Page::Offset()
-  // to get an offset to the beginning of 8K page containing addr not to the
-  // beginning of actual page which can be bigger then 8K.
-  intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
-  return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
-}
-
-
-uint32_t Page::GetRegionMaskForAddress(Address addr) {
-  return 1 << GetRegionNumberForAddress(addr);
-}
-
-
-uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
-  uint32_t result = 0;
-  static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1;
-  if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) {
-    result = kAllRegionsDirtyMarks;
-  } else if (length_in_bytes > 0) {
-    int start_region = GetRegionNumberForAddress(start);
-    int end_region =
-        GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
-    uint32_t start_mask = (~0) << start_region;
-    uint32_t end_mask = ~((~1) << end_region);
-    result = start_mask & end_mask;
-    // if end_region < start_region, the mask is ored.
-    if (result == 0) result = start_mask | end_mask;
-  }
-#ifdef DEBUG
-  if (FLAG_enable_slow_asserts) {
-    uint32_t expected = 0;
-    for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
-      expected |= GetRegionMaskForAddress(a);
+// -----------------------------------------------------------------------------
+// HeapObjectIterator
+HeapObject* HeapObjectIterator::FromCurrentPage() {
+  while (cur_addr_ != cur_end_) {
+    if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
+      cur_addr_ = space_->limit();
+      continue;
     }
-    ASSERT(expected == result);
+    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+    cur_addr_ += obj_size;
+    ASSERT(cur_addr_ <= cur_end_);
+    if (!obj->IsFiller()) {
+      ASSERT_OBJECT_SIZE(obj_size);
+      return obj;
+    }
   }
-#endif
-  return result;
-}
-
-
-void Page::MarkRegionDirty(Address address) {
-  SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
-}
-
-
-bool Page::IsRegionDirty(Address address) {
-  return GetRegionMarks() & GetRegionMaskForAddress(address);
-}
-
-
-void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
-  int rstart = GetRegionNumberForAddress(start);
-  int rend = GetRegionNumberForAddress(end);
-
-  if (reaches_limit) {
-    end += 1;
-  }
-
-  if ((rend - rstart) == 0) {
-    return;
-  }
-
-  uint32_t bitmask = 0;
-
-  if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
-      || (start == ObjectAreaStart())) {
-    // First region is fully covered
-    bitmask = 1 << rstart;
-  }
-
-  while (++rstart < rend) {
-    bitmask |= 1 << rstart;
-  }
-
-  if (bitmask) {
-    SetRegionMarks(GetRegionMarks() & ~bitmask);
-  }
-}
-
-
-void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
-  heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
-}
-
-
-bool Page::IsWatermarkValid() {
-  return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
-      heap_->page_watermark_invalidated_mark_;
-}
-
-
-void Page::InvalidateWatermark(bool value) {
-  if (value) {
-    flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
-             heap_->page_watermark_invalidated_mark_;
-  } else {
-    flags_ =
-        (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
-        (heap_->page_watermark_invalidated_mark_ ^
-         (1 << WATERMARK_INVALIDATED));
-  }
-
-  ASSERT(IsWatermarkValid() == !value);
-}
-
-
-bool Page::GetPageFlag(PageFlag flag) {
-  return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
-}
-
-
-void Page::SetPageFlag(PageFlag flag, bool value) {
-  if (value) {
-    flags_ |= static_cast<intptr_t>(1 << flag);
-  } else {
-    flags_ &= ~static_cast<intptr_t>(1 << flag);
-  }
-}
-
-
-void Page::ClearPageFlags() {
-  flags_ = 0;
-}
-
-
-void Page::ClearGCFields() {
-  InvalidateWatermark(true);
-  SetAllocationWatermark(ObjectAreaStart());
-  if (heap_->gc_state() == Heap::SCAVENGE) {
-    SetCachedAllocationWatermark(ObjectAreaStart());
-  }
-  SetRegionMarks(kAllRegionsCleanMarks);
-}
-
-
-bool Page::WasInUseBeforeMC() {
-  return GetPageFlag(WAS_IN_USE_BEFORE_MC);
-}
-
-
-void Page::SetWasInUseBeforeMC(bool was_in_use) {
-  SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
-}
-
-
-bool Page::IsLargeObjectPage() {
-  return !GetPageFlag(IS_NORMAL_PAGE);
-}
-
-
-void Page::SetIsLargeObjectPage(bool is_large_object_page) {
-  SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
-}
-
-Executability Page::PageExecutability() {
-  return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
-}
-
-
-void Page::SetPageExecutability(Executability executable) {
-  SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE);
+  return NULL;
 }
 
 
 // -----------------------------------------------------------------------------
 // MemoryAllocator
 
-void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
-  address_ = a;
-  size_ = s;
-  owner_ = o;
-  executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
-  owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
+#ifdef ENABLE_HEAP_PROTECTION
+
+void MemoryAllocator::Protect(Address start, size_t size) {
+  OS::Protect(start, size);
 }
 
 
-bool MemoryAllocator::IsValidChunk(int chunk_id) {
-  if (!IsValidChunkId(chunk_id)) return false;
-
-  ChunkInfo& c = chunks_[chunk_id];
-  return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
+void MemoryAllocator::Unprotect(Address start,
+                                size_t size,
+                                Executability executable) {
+  OS::Unprotect(start, size, executable);
 }
 
 
-bool MemoryAllocator::IsValidChunkId(int chunk_id) {
-  return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
+void MemoryAllocator::ProtectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  OS::Protect(chunks_[id].address(), chunks_[id].size());
 }
 
 
-bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
-  ASSERT(p->is_valid());
-
-  int chunk_id = GetChunkId(p);
-  if (!IsValidChunkId(chunk_id)) return false;
-
-  ChunkInfo& c = chunks_[chunk_id];
-  return (c.address() <= p->address()) &&
-         (p->address() < c.address() + c.size()) &&
-         (space == c.owner());
+void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
+                chunks_[id].owner()->executable() == EXECUTABLE);
 }
 
-
-Page* MemoryAllocator::GetNextPage(Page* p) {
-  ASSERT(p->is_valid());
-  intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
-  return Page::FromAddress(AddressFrom<Address>(raw_addr));
-}
-
-
-int MemoryAllocator::GetChunkId(Page* p) {
-  ASSERT(p->is_valid());
-  return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
-}
-
-
-void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
-  ASSERT(prev->is_valid());
-  int chunk_id = GetChunkId(prev);
-  ASSERT_PAGE_ALIGNED(next->address());
-  prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
-}
-
-
-PagedSpace* MemoryAllocator::PageOwner(Page* page) {
-  int chunk_id = GetChunkId(page);
-  ASSERT(IsValidChunk(chunk_id));
-  return chunks_[chunk_id].owner();
-}
-
-
-bool MemoryAllocator::InInitialChunk(Address address) {
-  if (initial_chunk_ == NULL) return false;
-
-  Address start = static_cast<Address>(initial_chunk_->address());
-  return (start <= address) && (address < start + initial_chunk_->size());
-}
+#endif
 
 
 // --------------------------------------------------------------------------
 // PagedSpace
+Page* Page::Initialize(Heap* heap,
+                       MemoryChunk* chunk,
+                       Executability executable,
+                       PagedSpace* owner) {
+  Page* page = reinterpret_cast<Page*>(chunk);
+  ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
+  ASSERT(chunk->owner() == owner);
+  owner->IncreaseCapacity(Page::kObjectAreaSize);
+  owner->Free(page->ObjectAreaStart(),
+              static_cast<int>(page->ObjectAreaEnd() -
+                               page->ObjectAreaStart()));
+
+  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+  return page;
+}
+
 
 bool PagedSpace::Contains(Address addr) {
   Page* p = Page::FromAddress(addr);
   if (!p->is_valid()) return false;
-  return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
+  return p->owner() == this;
+}
+
+
+void MemoryChunk::set_scan_on_scavenge(bool scan) {
+  if (scan) {
+    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
+    SetFlag(SCAN_ON_SCAVENGE);
+  } else {
+    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
+    ClearFlag(SCAN_ON_SCAVENGE);
+  }
+  heap_->incremental_marking()->SetOldSpacePageFlags(this);
+}
+
+
+MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
+  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
+      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
+  if (maybe->owner() != NULL) return maybe;
+  LargeObjectIterator iterator(HEAP->lo_space());
+  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
+    // Fixed arrays are the only pointer-containing objects in large object
+    // space.
+    if (o->IsFixedArray()) {
+      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
+      if (chunk->Contains(addr)) {
+        return chunk;
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+PointerChunkIterator::PointerChunkIterator(Heap* heap)
+    : state_(kOldPointerState),
+      old_pointer_iterator_(heap->old_pointer_space()),
+      map_iterator_(heap->map_space()),
+      lo_iterator_(heap->lo_space()) { }
+
+
+Page* Page::next_page() {
+  ASSERT(next_chunk()->owner() == owner());
+  return static_cast<Page*>(next_chunk());
+}
+
+
+Page* Page::prev_page() {
+  ASSERT(prev_chunk()->owner() == owner());
+  return static_cast<Page*>(prev_chunk());
+}
+
+
+void Page::set_next_page(Page* page) {
+  ASSERT(page->owner() == owner());
+  set_next_chunk(page);
+}
+
+
+void Page::set_prev_page(Page* page) {
+  ASSERT(page->owner() == owner());
+  set_prev_chunk(page);
 }
 
 
@@ -393,15 +251,14 @@
 // not contain slow case logic (eg, move to the next page or try free list
 // allocation) so it can be used by all the allocation functions and for all
 // the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
-                                         int size_in_bytes) {
-  Address current_top = alloc_info->top;
+HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
+  Address current_top = allocation_info_.top;
   Address new_top = current_top + size_in_bytes;
-  if (new_top > alloc_info->limit) return NULL;
+  if (new_top > allocation_info_.limit) return NULL;
 
-  alloc_info->top = new_top;
-  ASSERT(alloc_info->VerifyPagedAllocation());
-  accounting_stats_.AllocateBytes(size_in_bytes);
+  allocation_info_.top = new_top;
+  ASSERT(allocation_info_.VerifyPagedAllocation());
+  ASSERT(current_top != NULL);
   return HeapObject::FromAddress(current_top);
 }
 
@@ -410,25 +267,29 @@
 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
   ASSERT(HasBeenSetup());
   ASSERT_OBJECT_SIZE(size_in_bytes);
-  HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
-  if (object != NULL) return object;
+  HeapObject* object = AllocateLinearly(size_in_bytes);
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    return object;
+  }
+
+  object = free_list_.Allocate(size_in_bytes);
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    return object;
+  }
 
   object = SlowAllocateRaw(size_in_bytes);
-  if (object != NULL) return object;
-
-  return Failure::RetryAfterGC(identity());
-}
-
-
-// Reallocating (and promoting) objects during a compacting collection.
-MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
-  ASSERT(HasBeenSetup());
-  ASSERT_OBJECT_SIZE(size_in_bytes);
-  HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
-  if (object != NULL) return object;
-
-  object = SlowMCAllocateRaw(size_in_bytes);
-  if (object != NULL) return object;
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    return object;
+  }
 
   return Failure::RetryAfterGC(identity());
 }
@@ -436,28 +297,48 @@
 
 // -----------------------------------------------------------------------------
 // NewSpace
+MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes) {
+  Address old_top = allocation_info_.top;
+  if (allocation_info_.limit - old_top < size_in_bytes) {
+    Address new_top = old_top + size_in_bytes;
+    Address high = to_space_.page_high();
+    if (allocation_info_.limit < high) {
+      // Incremental marking has lowered the limit to get a
+      // chance to do a step.
+      allocation_info_.limit = Min(
+          allocation_info_.limit + inline_allocation_limit_step_,
+          high);
+      int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
+      heap()->incremental_marking()->Step(bytes_allocated);
+      top_on_previous_step_ = new_top;
+      return AllocateRawInternal(size_in_bytes);
+    } else if (AddFreshPage()) {
+      // Switched to new page. Try allocating again.
+      int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
+      heap()->incremental_marking()->Step(bytes_allocated);
+      top_on_previous_step_ = to_space_.page_low();
+      return AllocateRawInternal(size_in_bytes);
+    } else {
+      return Failure::RetryAfterGC();
+    }
+  }
 
-MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
-                                           AllocationInfo* alloc_info) {
-  Address new_top = alloc_info->top + size_in_bytes;
-  if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
+  Object* obj = HeapObject::FromAddress(allocation_info_.top);
+  allocation_info_.top += size_in_bytes;
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 
-  Object* obj = HeapObject::FromAddress(alloc_info->top);
-  alloc_info->top = new_top;
-#ifdef DEBUG
-  SemiSpace* space =
-      (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
-  ASSERT(space->low() <= alloc_info->top
-         && alloc_info->top <= space->high()
-         && alloc_info->limit == space->high());
-#endif
   return obj;
 }
 
 
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
+  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+  return static_cast<LargePage*>(chunk);
+}
+
+
 intptr_t LargeObjectSpace::Available() {
-  return LargeObjectChunk::ObjectSizeFor(
-      heap()->isolate()->memory_allocator()->Available());
+  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
 }
 
 
@@ -467,16 +348,23 @@
   ASSERT(string->IsSeqString());
   ASSERT(string->address() + StringType::SizeFor(string->length()) ==
          allocation_info_.top);
+  Address old_top = allocation_info_.top;
   allocation_info_.top =
       string->address() + StringType::SizeFor(length);
   string->set_length(length);
+  if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
+    int delta = static_cast<int>(old_top - allocation_info_.top);
+    MemoryChunk::IncrementLiveBytes(string->address(), -delta);
+  }
 }
 
 
 bool FreeListNode::IsFreeListNode(HeapObject* object) {
-  return object->map() == HEAP->raw_unchecked_byte_array_map()
-      || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
-      || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
+  Map* map = object->map();
+  Heap* heap = object->GetHeap();
+  return map == heap->raw_unchecked_free_space_map()
+      || map == heap->raw_unchecked_one_pointer_filler_map()
+      || map == heap->raw_unchecked_two_pointer_filler_map();
 }
 
 } }  // namespace v8::internal
diff --git a/src/spaces.cc b/src/spaces.cc
index 97c6d2a..2aaca5b 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -35,52 +35,66 @@
 namespace v8 {
 namespace internal {
 
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
-  ASSERT((space).low() <= (info).top                  \
-         && (info).top <= (space).high()              \
-         && (info).limit == (space).high())
 
 // ----------------------------------------------------------------------------
 // HeapObjectIterator
 
 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
-  Initialize(space->bottom(), space->top(), NULL);
+  // You can't actually iterate over the anchor page.  It is not a real page,
+  // just an anchor for the double linked page list.  Initialize as if we have
+  // reached the end of the anchor page, then the first iteration will move on
+  // to the first page.
+  Initialize(space,
+             NULL,
+             NULL,
+             kAllPagesInSpace,
+             NULL);
 }
 
 
 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
                                        HeapObjectCallback size_func) {
-  Initialize(space->bottom(), space->top(), size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) {
-  Initialize(start, space->top(), NULL);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
-                                       HeapObjectCallback size_func) {
-  Initialize(start, space->top(), size_func);
+  // You can't actually iterate over the anchor page.  It is not a real page,
+  // just an anchor for the double linked page list.  Initialize the current
+  // address and end as NULL, then the first iteration will move on
+  // to the first page.
+  Initialize(space,
+             NULL,
+             NULL,
+             kAllPagesInSpace,
+             size_func);
 }
 
 
 HeapObjectIterator::HeapObjectIterator(Page* page,
                                        HeapObjectCallback size_func) {
-  Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
+  Space* owner = page->owner();
+  ASSERT(owner == HEAP->old_pointer_space() ||
+         owner == HEAP->old_data_space() ||
+         owner == HEAP->map_space() ||
+         owner == HEAP->cell_space() ||
+         owner == HEAP->code_space());
+  Initialize(reinterpret_cast<PagedSpace*>(owner),
+             page->ObjectAreaStart(),
+             page->ObjectAreaEnd(),
+             kOnePageOnly,
+             size_func);
+  ASSERT(page->WasSweptPrecisely());
 }
 
 
-void HeapObjectIterator::Initialize(Address cur, Address end,
+void HeapObjectIterator::Initialize(PagedSpace* space,
+                                    Address cur, Address end,
+                                    HeapObjectIterator::PageMode mode,
                                     HeapObjectCallback size_f) {
+  // Check that we actually can iterate this space.
+  ASSERT(!space->was_swept_conservatively());
+
+  space_ = space;
   cur_addr_ = cur;
-  end_addr_ = end;
-  end_page_ = Page::FromAllocationTop(end);
+  cur_end_ = end;
+  page_mode_ = mode;
   size_func_ = size_f;
-  Page* p = Page::FromAllocationTop(cur_addr_);
-  cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
 
 #ifdef DEBUG
   Verify();
@@ -88,63 +102,35 @@
 }
 
 
-HeapObject* HeapObjectIterator::FromNextPage() {
-  if (cur_addr_ == end_addr_) return NULL;
-
-  Page* cur_page = Page::FromAllocationTop(cur_addr_);
+// We have hit the end of the page and should advance to the next block of
+// objects.  This happens at the end of the page.
+bool HeapObjectIterator::AdvanceToNextPage() {
+  ASSERT(cur_addr_ == cur_end_);
+  if (page_mode_ == kOnePageOnly) return false;
+  Page* cur_page;
+  if (cur_addr_ == NULL) {
+    cur_page = space_->anchor();
+  } else {
+    cur_page = Page::FromAddress(cur_addr_ - 1);
+    ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
+  }
   cur_page = cur_page->next_page();
-  ASSERT(cur_page->is_valid());
-
+  if (cur_page == space_->anchor()) return false;
   cur_addr_ = cur_page->ObjectAreaStart();
-  cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
-
-  if (cur_addr_ == end_addr_) return NULL;
-  ASSERT(cur_addr_ < cur_limit_);
-#ifdef DEBUG
-  Verify();
-#endif
-  return FromCurrentPage();
+  cur_end_ = cur_page->ObjectAreaEnd();
+  ASSERT(cur_page->WasSweptPrecisely());
+  return true;
 }
 
 
 #ifdef DEBUG
 void HeapObjectIterator::Verify() {
-  Page* p = Page::FromAllocationTop(cur_addr_);
-  ASSERT(p == Page::FromAllocationTop(cur_limit_));
-  ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
+  // TODO(gc): We should do something here.
 }
 #endif
 
 
 // -----------------------------------------------------------------------------
-// PageIterator
-
-PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
-  prev_page_ = NULL;
-  switch (mode) {
-    case PAGES_IN_USE:
-      stop_page_ = space->AllocationTopPage();
-      break;
-    case PAGES_USED_BY_MC:
-      stop_page_ = space->MCRelocationTopPage();
-      break;
-    case ALL_PAGES:
-#ifdef DEBUG
-      // Verify that the cached last page in the space is actually the
-      // last page.
-      for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
-        if (!p->next_page()->is_valid()) {
-          ASSERT(space->last_page_ == p);
-        }
-      }
-#endif
-      stop_page_ = space->last_page_;
-      break;
-  }
-}
-
-
-// -----------------------------------------------------------------------------
 // CodeRange
 
 
@@ -171,7 +157,12 @@
   // We are sure that we have mapped a block of requested addresses.
   ASSERT(code_range_->size() == requested);
   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
-  allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size()));
+  Address base = reinterpret_cast<Address>(code_range_->address());
+  Address aligned_base =
+      RoundUp(reinterpret_cast<Address>(code_range_->address()),
+              MemoryChunk::kAlignment);
+  size_t size = code_range_->size() - (aligned_base - base);
+  allocation_list_.Add(FreeBlock(aligned_base, size));
   current_allocation_block_index_ = 0;
   return true;
 }
@@ -228,7 +219,8 @@
 
 
 
-void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) {
+Address CodeRange::AllocateRawMemory(const size_t requested,
+                                     size_t* allocated) {
   ASSERT(current_allocation_block_index_ < allocation_list_.length());
   if (requested > allocation_list_[current_allocation_block_index_].size) {
     // Find an allocation block large enough.  This function call may
@@ -236,13 +228,16 @@
     GetNextAllocationBlock(requested);
   }
   // Commit the requested memory at the start of the current allocation block.
-  *allocated = RoundUp(requested, Page::kPageSize);
+  size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
   FreeBlock current = allocation_list_[current_allocation_block_index_];
-  if (*allocated >= current.size - Page::kPageSize) {
+  if (aligned_requested >= (current.size - Page::kPageSize)) {
     // Don't leave a small free block, useless for a large object or chunk.
     *allocated = current.size;
+  } else {
+    *allocated = aligned_requested;
   }
   ASSERT(*allocated <= current.size);
+  ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
   if (!code_range_->Commit(current.start, *allocated, true)) {
     *allocated = 0;
     return NULL;
@@ -256,7 +251,8 @@
 }
 
 
-void CodeRange::FreeRawMemory(void* address, size_t length) {
+void CodeRange::FreeRawMemory(Address address, size_t length) {
+  ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
   free_list_.Add(FreeBlock(address, length));
   code_range_->Uncommit(address, length);
 }
@@ -274,35 +270,12 @@
 // MemoryAllocator
 //
 
-// 270 is an estimate based on the static default heap size of a pair of 256K
-// semispaces and a 64M old generation.
-const int kEstimatedNumberOfChunks = 270;
-
-
 MemoryAllocator::MemoryAllocator(Isolate* isolate)
     : isolate_(isolate),
       capacity_(0),
       capacity_executable_(0),
       size_(0),
-      size_executable_(0),
-      initial_chunk_(NULL),
-      chunks_(kEstimatedNumberOfChunks),
-      free_chunk_ids_(kEstimatedNumberOfChunks),
-      max_nof_chunks_(0),
-      top_(0) {
-}
-
-
-void MemoryAllocator::Push(int free_chunk_id) {
-  ASSERT(max_nof_chunks_ > 0);
-  ASSERT(top_ < max_nof_chunks_);
-  free_chunk_ids_[top_++] = free_chunk_id;
-}
-
-
-int MemoryAllocator::Pop() {
-  ASSERT(top_ > 0);
-  return free_chunk_ids_[--top_];
+      size_executable_(0) {
 }
 
 
@@ -311,112 +284,322 @@
   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
   ASSERT_GE(capacity_, capacity_executable_);
 
-  // Over-estimate the size of chunks_ array.  It assumes the expansion of old
-  // space is always in the unit of a chunk (kChunkSize) except the last
-  // expansion.
-  //
-  // Due to alignment, allocated space might be one page less than required
-  // number (kPagesPerChunk) of pages for old spaces.
-  //
-  // Reserve two chunk ids for semispaces, one for map space, one for old
-  // space, and one for code space.
-  max_nof_chunks_ =
-      static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
-  if (max_nof_chunks_ > kMaxNofChunks) return false;
-
   size_ = 0;
   size_executable_ = 0;
-  ChunkInfo info;  // uninitialized element.
-  for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
-    chunks_.Add(info);
-    free_chunk_ids_.Add(i);
-  }
-  top_ = max_nof_chunks_;
+
   return true;
 }
 
 
 void MemoryAllocator::TearDown() {
-  for (int i = 0; i < max_nof_chunks_; i++) {
-    if (chunks_[i].address() != NULL) DeleteChunk(i);
-  }
-  chunks_.Clear();
-  free_chunk_ids_.Clear();
-
-  if (initial_chunk_ != NULL) {
-    LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
-    delete initial_chunk_;
-    initial_chunk_ = NULL;
-  }
-
-  ASSERT(top_ == max_nof_chunks_);  // all chunks are free
-  top_ = 0;
+  // Check that spaces were torn down before MemoryAllocator.
+  ASSERT(size_ == 0);
+  // TODO(gc) this will be true again when we fix FreeMemory.
+  // ASSERT(size_executable_ == 0);
   capacity_ = 0;
   capacity_executable_ = 0;
-  size_ = 0;
-  max_nof_chunks_ = 0;
 }
 
 
-void* MemoryAllocator::AllocateRawMemory(const size_t requested,
-                                         size_t* allocated,
-                                         Executability executable) {
-  if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) {
+void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
+                                 Executability executable) {
+  // TODO(gc) make code_range part of memory allocator?
+  ASSERT(reservation->IsReserved());
+  size_t size = reservation->size();
+  ASSERT(size_ >= size);
+  size_ -= size;
+
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (executable == EXECUTABLE) {
+    ASSERT(size_executable_ >= size);
+    size_executable_ -= size;
+  }
+  // Code which is part of the code-range does not have its own VirtualMemory.
+  ASSERT(!isolate_->code_range()->contains(
+      static_cast<Address>(reservation->address())));
+  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+  reservation->Release();
+}
+
+
+void MemoryAllocator::FreeMemory(Address base,
+                                 size_t size,
+                                 Executability executable) {
+  // TODO(gc) make code_range part of memory allocator?
+  ASSERT(size_ >= size);
+  size_ -= size;
+
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (executable == EXECUTABLE) {
+    ASSERT(size_executable_ >= size);
+    size_executable_ -= size;
+  }
+  if (isolate_->code_range()->contains(static_cast<Address>(base))) {
+    ASSERT(executable == EXECUTABLE);
+    isolate_->code_range()->FreeRawMemory(base, size);
+  } else {
+    ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+    bool result = VirtualMemory::ReleaseRegion(base, size);
+    USE(result);
+    ASSERT(result);
+  }
+}
+
+
+Address MemoryAllocator::ReserveAlignedMemory(size_t size,
+                                              size_t alignment,
+                                              VirtualMemory* controller) {
+  VirtualMemory reservation(size, alignment);
+
+  if (!reservation.IsReserved()) return NULL;
+  size_ += reservation.size();
+  Address base = RoundUp(static_cast<Address>(reservation.address()),
+                         alignment);
+  controller->TakeControl(&reservation);
+  return base;
+}
+
+
+Address MemoryAllocator::AllocateAlignedMemory(size_t size,
+                                               size_t alignment,
+                                               Executability executable,
+                                               VirtualMemory* controller) {
+  VirtualMemory reservation;
+  Address base = ReserveAlignedMemory(size, alignment, &reservation);
+  if (base == NULL) return NULL;
+  if (!reservation.Commit(base,
+                          size,
+                          executable == EXECUTABLE)) {
     return NULL;
   }
+  controller->TakeControl(&reservation);
+  return base;
+}
 
-  void* mem;
+
+void Page::InitializeAsAnchor(PagedSpace* owner) {
+  set_owner(owner);
+  set_prev_page(this);
+  set_next_page(this);
+}
+
+
+NewSpacePage* NewSpacePage::Initialize(Heap* heap,
+                                       Address start,
+                                       SemiSpace* semi_space) {
+  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
+                                               start,
+                                               Page::kPageSize,
+                                               NOT_EXECUTABLE,
+                                               semi_space);
+  chunk->set_next_chunk(NULL);
+  chunk->set_prev_chunk(NULL);
+  chunk->initialize_scan_on_scavenge(true);
+  bool in_to_space = (semi_space->id() != kFromSpace);
+  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+                             : MemoryChunk::IN_FROM_SPACE);
+  ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+                                       : MemoryChunk::IN_TO_SPACE));
+  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+  heap->incremental_marking()->SetNewSpacePageFlags(page);
+  return page;
+}
+
+
+void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
+  set_owner(semi_space);
+  set_next_chunk(this);
+  set_prev_chunk(this);
+  // Flags marks this invalid page as not being in new-space.
+  // All real new-space pages will be in new-space.
+  SetFlags(0, ~0);
+}
+
+
+MemoryChunk* MemoryChunk::Initialize(Heap* heap,
+                                     Address base,
+                                     size_t size,
+                                     Executability executable,
+                                     Space* owner) {
+  MemoryChunk* chunk = FromAddress(base);
+
+  ASSERT(base == chunk->address());
+
+  chunk->heap_ = heap;
+  chunk->size_ = size;
+  chunk->flags_ = 0;
+  chunk->set_owner(owner);
+  chunk->InitializeReservedMemory();
+  chunk->slots_buffer_ = NULL;
+  chunk->skip_list_ = NULL;
+  chunk->ResetLiveBytes();
+  Bitmap::Clear(chunk);
+  chunk->initialize_scan_on_scavenge(false);
+  chunk->SetFlag(WAS_SWEPT_PRECISELY);
+
+  ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
+  ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
+
+  if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
+
+  if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
+
+  return chunk;
+}
+
+
+void MemoryChunk::InsertAfter(MemoryChunk* other) {
+  next_chunk_ = other->next_chunk_;
+  prev_chunk_ = other;
+  other->next_chunk_->prev_chunk_ = this;
+  other->next_chunk_ = this;
+}
+
+
+void MemoryChunk::Unlink() {
+  if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
+    heap_->decrement_scan_on_scavenge_pages();
+    ClearFlag(SCAN_ON_SCAVENGE);
+  }
+  next_chunk_->prev_chunk_ = prev_chunk_;
+  prev_chunk_->next_chunk_ = next_chunk_;
+  prev_chunk_ = NULL;
+  next_chunk_ = NULL;
+}
+
+
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
+                                            Executability executable,
+                                            Space* owner) {
+  size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+  Heap* heap = isolate_->heap();
+  Address base = NULL;
+  VirtualMemory reservation;
   if (executable == EXECUTABLE) {
     // Check executable memory limit.
-    if (size_executable_ + requested >
-        static_cast<size_t>(capacity_executable_)) {
+    if (size_executable_ + chunk_size > capacity_executable_) {
       LOG(isolate_,
           StringEvent("MemoryAllocator::AllocateRawMemory",
                       "V8 Executable Allocation capacity exceeded"));
       return NULL;
     }
+
     // Allocate executable memory either from code range or from the
     // OS.
     if (isolate_->code_range()->exists()) {
-      mem = isolate_->code_range()->AllocateRawMemory(requested, allocated);
+      base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
+      ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
+                       MemoryChunk::kAlignment));
+      if (base == NULL) return NULL;
+      size_ += chunk_size;
+      // Update executable memory size.
+      size_executable_ += chunk_size;
     } else {
-      mem = OS::Allocate(requested, allocated, true);
+      base = AllocateAlignedMemory(chunk_size,
+                                   MemoryChunk::kAlignment,
+                                   executable,
+                                   &reservation);
+      if (base == NULL) return NULL;
+      // Update executable memory size.
+      size_executable_ += reservation.size();
     }
-    // Update executable memory size.
-    size_executable_ += static_cast<int>(*allocated);
   } else {
-    mem = OS::Allocate(requested, allocated, false);
+    base = AllocateAlignedMemory(chunk_size,
+                                 MemoryChunk::kAlignment,
+                                 executable,
+                                 &reservation);
+
+    if (base == NULL) return NULL;
   }
-  int alloced = static_cast<int>(*allocated);
-  size_ += alloced;
 
 #ifdef DEBUG
-  ZapBlock(reinterpret_cast<Address>(mem), alloced);
+  ZapBlock(base, chunk_size);
 #endif
-  isolate_->counters()->memory_allocated()->Increment(alloced);
-  return mem;
+  isolate_->counters()->memory_allocated()->
+      Increment(static_cast<int>(chunk_size));
+
+  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
+  if (owner != NULL) {
+    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
+  }
+
+  MemoryChunk* result = MemoryChunk::Initialize(heap,
+                                                base,
+                                                chunk_size,
+                                                executable,
+                                                owner);
+  result->set_reserved_memory(&reservation);
+  return result;
 }
 
 
-void MemoryAllocator::FreeRawMemory(void* mem,
-                                    size_t length,
+Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
                                     Executability executable) {
-#ifdef DEBUG
-  // Do not try to zap the guard page.
-  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
-  ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
-#endif
-  if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
-    isolate_->code_range()->FreeRawMemory(mem, length);
-  } else {
-    OS::Free(mem, length);
-  }
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
-  size_ -= static_cast<int>(length);
-  if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
+  MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
 
-  ASSERT(size_ >= 0);
-  ASSERT(size_executable_ >= 0);
+  if (chunk == NULL) return NULL;
+
+  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
+}
+
+
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
+                                              Executability executable,
+                                              Space* owner) {
+  MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
+  if (chunk == NULL) return NULL;
+  return LargePage::Initialize(isolate_->heap(), chunk);
+}
+
+
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+  if (chunk->owner() != NULL) {
+    ObjectSpace space =
+        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
+    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
+  }
+
+  delete chunk->slots_buffer();
+  delete chunk->skip_list();
+
+  VirtualMemory* reservation = chunk->reserved_memory();
+  if (reservation->IsReserved()) {
+    FreeMemory(reservation, chunk->executable());
+  } else {
+    FreeMemory(chunk->address(),
+               chunk->size(),
+               chunk->executable());
+  }
+}
+
+
+bool MemoryAllocator::CommitBlock(Address start,
+                                  size_t size,
+                                  Executability executable) {
+  if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
+#ifdef DEBUG
+  ZapBlock(start, size);
+#endif
+  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
+  return true;
+}
+
+
+bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
+  if (!VirtualMemory::UncommitRegion(start, size)) return false;
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+  return true;
+}
+
+
+void MemoryAllocator::ZapBlock(Address start, size_t size) {
+  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
+    Memory::Address_at(start + s) = kZapValue;
+  }
 }
 
 
@@ -465,269 +648,6 @@
   UNREACHABLE();
 }
 
-void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
-  ASSERT(initial_chunk_ == NULL);
-
-  initial_chunk_ = new VirtualMemory(requested);
-  CHECK(initial_chunk_ != NULL);
-  if (!initial_chunk_->IsReserved()) {
-    delete initial_chunk_;
-    initial_chunk_ = NULL;
-    return NULL;
-  }
-
-  // We are sure that we have mapped a block of requested addresses.
-  ASSERT(initial_chunk_->size() == requested);
-  LOG(isolate_,
-      NewEvent("InitialChunk", initial_chunk_->address(), requested));
-  size_ += static_cast<int>(requested);
-  return initial_chunk_->address();
-}
-
-
-static int PagesInChunk(Address start, size_t size) {
-  // The first page starts on the first page-aligned address from start onward
-  // and the last page ends on the last page-aligned address before
-  // start+size.  Page::kPageSize is a power of two so we can divide by
-  // shifting.
-  return static_cast<int>((RoundDown(start + size, Page::kPageSize)
-      - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
-}
-
-
-Page* MemoryAllocator::AllocatePages(int requested_pages,
-                                     int* allocated_pages,
-                                     PagedSpace* owner) {
-  if (requested_pages <= 0) return Page::FromAddress(NULL);
-  size_t chunk_size = requested_pages * Page::kPageSize;
-
-  void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
-  if (chunk == NULL) return Page::FromAddress(NULL);
-  LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
-
-  *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
-
-  // We may 'lose' a page due to alignment.
-  ASSERT(*allocated_pages >= kPagesPerChunk - 1);
-
-  size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
-
-  // Check that we got at least one page that we can use.
-  if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
-    FreeRawMemory(chunk,
-                  chunk_size,
-                  owner->executable());
-    LOG(isolate_, DeleteEvent("PagedChunk", chunk));
-    return Page::FromAddress(NULL);
-  }
-
-  if (guard_size != 0) {
-    OS::Guard(chunk, guard_size);
-    chunk_size -= guard_size;
-    chunk = static_cast<Address>(chunk) + guard_size;
-    --*allocated_pages;
-  }
-
-  int chunk_id = Pop();
-  chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
-
-  ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
-  PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
-  Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
-
-  return new_pages;
-}
-
-
-Page* MemoryAllocator::CommitPages(Address start, size_t size,
-                                   PagedSpace* owner, int* num_pages) {
-  ASSERT(start != NULL);
-  *num_pages = PagesInChunk(start, size);
-  ASSERT(*num_pages > 0);
-  ASSERT(initial_chunk_ != NULL);
-  ASSERT(InInitialChunk(start));
-  ASSERT(InInitialChunk(start + size - 1));
-  if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
-    return Page::FromAddress(NULL);
-  }
-#ifdef DEBUG
-  ZapBlock(start, size);
-#endif
-  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
-
-  // So long as we correctly overestimated the number of chunks we should not
-  // run out of chunk ids.
-  CHECK(!OutOfChunkIds());
-  int chunk_id = Pop();
-  chunks_[chunk_id].init(start, size, owner);
-  return InitializePagesInChunk(chunk_id, *num_pages, owner);
-}
-
-
-bool MemoryAllocator::CommitBlock(Address start,
-                                  size_t size,
-                                  Executability executable) {
-  ASSERT(start != NULL);
-  ASSERT(size > 0);
-  ASSERT(initial_chunk_ != NULL);
-  ASSERT(InInitialChunk(start));
-  ASSERT(InInitialChunk(start + size - 1));
-
-  if (!initial_chunk_->Commit(start, size, executable)) return false;
-#ifdef DEBUG
-  ZapBlock(start, size);
-#endif
-  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
-  return true;
-}
-
-
-bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
-  ASSERT(start != NULL);
-  ASSERT(size > 0);
-  ASSERT(initial_chunk_ != NULL);
-  ASSERT(InInitialChunk(start));
-  ASSERT(InInitialChunk(start + size - 1));
-
-  if (!initial_chunk_->Uncommit(start, size)) return false;
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-  return true;
-}
-
-
-void MemoryAllocator::ZapBlock(Address start, size_t size) {
-  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
-    Memory::Address_at(start + s) = kZapValue;
-  }
-}
-
-
-Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
-                                              PagedSpace* owner) {
-  ASSERT(IsValidChunk(chunk_id));
-  ASSERT(pages_in_chunk > 0);
-
-  Address chunk_start = chunks_[chunk_id].address();
-
-  Address low = RoundUp(chunk_start, Page::kPageSize);
-
-#ifdef DEBUG
-  size_t chunk_size = chunks_[chunk_id].size();
-  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
-  ASSERT(pages_in_chunk <=
-        ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
-#endif
-
-  Address page_addr = low;
-  for (int i = 0; i < pages_in_chunk; i++) {
-    Page* p = Page::FromAddress(page_addr);
-    p->heap_ = owner->heap();
-    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
-    p->InvalidateWatermark(true);
-    p->SetIsLargeObjectPage(false);
-    p->SetAllocationWatermark(p->ObjectAreaStart());
-    p->SetCachedAllocationWatermark(p->ObjectAreaStart());
-    page_addr += Page::kPageSize;
-  }
-
-  // Set the next page of the last page to 0.
-  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
-  last_page->opaque_header = OffsetFrom(0) | chunk_id;
-
-  return Page::FromAddress(low);
-}
-
-
-Page* MemoryAllocator::FreePages(Page* p) {
-  if (!p->is_valid()) return p;
-
-  // Find the first page in the same chunk as 'p'
-  Page* first_page = FindFirstPageInSameChunk(p);
-  Page* page_to_return = Page::FromAddress(NULL);
-
-  if (p != first_page) {
-    // Find the last page in the same chunk as 'prev'.
-    Page* last_page = FindLastPageInSameChunk(p);
-    first_page = GetNextPage(last_page);  // first page in next chunk
-
-    // set the next_page of last_page to NULL
-    SetNextPage(last_page, Page::FromAddress(NULL));
-    page_to_return = p;  // return 'p' when exiting
-  }
-
-  while (first_page->is_valid()) {
-    int chunk_id = GetChunkId(first_page);
-    ASSERT(IsValidChunk(chunk_id));
-
-    // Find the first page of the next chunk before deleting this chunk.
-    first_page = GetNextPage(FindLastPageInSameChunk(first_page));
-
-    // Free the current chunk.
-    DeleteChunk(chunk_id);
-  }
-
-  return page_to_return;
-}
-
-
-void MemoryAllocator::FreeAllPages(PagedSpace* space) {
-  for (int i = 0, length = chunks_.length(); i < length; i++) {
-    if (chunks_[i].owner() == space) {
-      DeleteChunk(i);
-    }
-  }
-}
-
-
-void MemoryAllocator::DeleteChunk(int chunk_id) {
-  ASSERT(IsValidChunk(chunk_id));
-
-  ChunkInfo& c = chunks_[chunk_id];
-
-  // We cannot free a chunk contained in the initial chunk because it was not
-  // allocated with AllocateRawMemory.  Instead we uncommit the virtual
-  // memory.
-  if (InInitialChunk(c.address())) {
-    // TODO(1240712): VirtualMemory::Uncommit has a return value which
-    // is ignored here.
-    initial_chunk_->Uncommit(c.address(), c.size());
-    Counters* counters = isolate_->counters();
-    counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
-  } else {
-    LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
-    ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
-    size_t size = c.size();
-    size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
-    FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
-    PerformAllocationCallback(space, kAllocationActionFree, size);
-  }
-  c.init(NULL, 0, NULL);
-  Push(chunk_id);
-}
-
-
-Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
-  int chunk_id = GetChunkId(p);
-  ASSERT(IsValidChunk(chunk_id));
-
-  Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize);
-  return Page::FromAddress(low);
-}
-
-
-Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) {
-  int chunk_id = GetChunkId(p);
-  ASSERT(IsValidChunk(chunk_id));
-
-  Address chunk_start = chunks_[chunk_id].address();
-  size_t chunk_size = chunks_[chunk_id].size();
-
-  Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
-  ASSERT(chunk_start <= p->address() && p->address() < high);
-
-  return Page::FromAddress(high - Page::kPageSize);
-}
-
 
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
@@ -739,75 +659,6 @@
 }
 #endif
 
-
-void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
-                                                 Page** first_page,
-                                                 Page** last_page,
-                                                 Page** last_page_in_use) {
-  Page* first = NULL;
-  Page* last = NULL;
-
-  for (int i = 0, length = chunks_.length(); i < length; i++) {
-    ChunkInfo& chunk = chunks_[i];
-
-    if (chunk.owner() == space) {
-      if (first == NULL) {
-        Address low = RoundUp(chunk.address(), Page::kPageSize);
-        first = Page::FromAddress(low);
-      }
-      last = RelinkPagesInChunk(i,
-                                chunk.address(),
-                                chunk.size(),
-                                last,
-                                last_page_in_use);
-    }
-  }
-
-  if (first_page != NULL) {
-    *first_page = first;
-  }
-
-  if (last_page != NULL) {
-    *last_page = last;
-  }
-}
-
-
-Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
-                                          Address chunk_start,
-                                          size_t chunk_size,
-                                          Page* prev,
-                                          Page** last_page_in_use) {
-  Address page_addr = RoundUp(chunk_start, Page::kPageSize);
-  int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
-
-  if (prev->is_valid()) {
-    SetNextPage(prev, Page::FromAddress(page_addr));
-  }
-
-  for (int i = 0; i < pages_in_chunk; i++) {
-    Page* p = Page::FromAddress(page_addr);
-    p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
-    page_addr += Page::kPageSize;
-
-    p->InvalidateWatermark(true);
-    if (p->WasInUseBeforeMC()) {
-      *last_page_in_use = p;
-    }
-  }
-
-  // Set the next page of the last page to 0.
-  Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
-  last_page->opaque_header = OffsetFrom(0) | chunk_id;
-
-  if (last_page->WasInUseBeforeMC()) {
-    *last_page_in_use = last_page;
-  }
-
-  return last_page;
-}
-
-
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
 
@@ -815,7 +666,11 @@
                        intptr_t max_capacity,
                        AllocationSpace id,
                        Executability executable)
-    : Space(heap, id, executable) {
+    : Space(heap, id, executable),
+      free_list_(this),
+      was_swept_conservatively_(false),
+      first_unswept_page_(Page::FromAddress(NULL)),
+      last_unswept_page_(Page::FromAddress(NULL)) {
   max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
                   * Page::kObjectAreaSize;
   accounting_stats_.Clear();
@@ -823,215 +678,73 @@
   allocation_info_.top = NULL;
   allocation_info_.limit = NULL;
 
-  mc_forwarding_info_.top = NULL;
-  mc_forwarding_info_.limit = NULL;
+  anchor_.InitializeAsAnchor(this);
 }
 
 
-bool PagedSpace::Setup(Address start, size_t size) {
-  if (HasBeenSetup()) return false;
-
-  int num_pages = 0;
-  // Try to use the virtual memory range passed to us.  If it is too small to
-  // contain at least one page, ignore it and allocate instead.
-  int pages_in_chunk = PagesInChunk(start, size);
-  if (pages_in_chunk > 0) {
-    first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
-        RoundUp(start, Page::kPageSize),
-        Page::kPageSize * pages_in_chunk,
-        this, &num_pages);
-  } else {
-    int requested_pages =
-        Min(MemoryAllocator::kPagesPerChunk,
-            static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
-    first_page_ =
-        Isolate::Current()->memory_allocator()->AllocatePages(
-            requested_pages, &num_pages, this);
-    if (!first_page_->is_valid()) return false;
-  }
-
-  // We are sure that the first page is valid and that we have at least one
-  // page.
-  ASSERT(first_page_->is_valid());
-  ASSERT(num_pages > 0);
-  accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
-  ASSERT(Capacity() <= max_capacity_);
-
-  // Sequentially clear region marks in the newly allocated
-  // pages and cache the current last page in the space.
-  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
-    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    last_page_ = p;
-  }
-
-  // Use first_page_ for allocation.
-  SetAllocationInfo(&allocation_info_, first_page_);
-
-  page_list_is_chunk_ordered_ = true;
-
+bool PagedSpace::Setup() {
   return true;
 }
 
 
 bool PagedSpace::HasBeenSetup() {
-  return (Capacity() > 0);
+  return true;
 }
 
 
 void PagedSpace::TearDown() {
-  Isolate::Current()->memory_allocator()->FreeAllPages(this);
-  first_page_ = NULL;
+  PageIterator iterator(this);
+  while (iterator.has_next()) {
+    heap()->isolate()->memory_allocator()->Free(iterator.next());
+  }
+  anchor_.set_next_page(&anchor_);
+  anchor_.set_prev_page(&anchor_);
   accounting_stats_.Clear();
 }
 
 
-void PagedSpace::MarkAllPagesClean() {
-  PageIterator it(this, PageIterator::ALL_PAGES);
-  while (it.has_next()) {
-    it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
-  }
-}
-
-
 MaybeObject* PagedSpace::FindObject(Address addr) {
-  // Note: this function can only be called before or after mark-compact GC
-  // because it accesses map pointers.
+  // Note: this function can only be called on precisely swept spaces.
   ASSERT(!heap()->mark_compact_collector()->in_use());
 
   if (!Contains(addr)) return Failure::Exception();
 
   Page* p = Page::FromAddress(addr);
-  ASSERT(IsUsed(p));
-  Address cur = p->ObjectAreaStart();
-  Address end = p->AllocationTop();
-  while (cur < end) {
-    HeapObject* obj = HeapObject::FromAddress(cur);
+  HeapObjectIterator it(p, NULL);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    Address cur = obj->address();
     Address next = cur + obj->Size();
     if ((cur <= addr) && (addr < next)) return obj;
-    cur = next;
   }
 
   UNREACHABLE();
   return Failure::Exception();
 }
 
-
-bool PagedSpace::IsUsed(Page* page) {
-  PageIterator it(this, PageIterator::PAGES_IN_USE);
-  while (it.has_next()) {
-    if (page == it.next()) return true;
-  }
-  return false;
-}
-
-
-void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
-  alloc_info->top = p->ObjectAreaStart();
-  alloc_info->limit = p->ObjectAreaEnd();
-  ASSERT(alloc_info->VerifyPagedAllocation());
-}
-
-
-void PagedSpace::MCResetRelocationInfo() {
-  // Set page indexes.
-  int i = 0;
-  PageIterator it(this, PageIterator::ALL_PAGES);
-  while (it.has_next()) {
-    Page* p = it.next();
-    p->mc_page_index = i++;
-  }
-
-  // Set mc_forwarding_info_ to the first page in the space.
-  SetAllocationInfo(&mc_forwarding_info_, first_page_);
-  // All the bytes in the space are 'available'.  We will rediscover
-  // allocated and wasted bytes during GC.
-  accounting_stats_.Reset();
-}
-
-
-int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
-#ifdef DEBUG
-  // The Contains function considers the address at the beginning of a
-  // page in the page, MCSpaceOffsetForAddress considers it is in the
-  // previous page.
-  if (Page::IsAlignedToPageSize(addr)) {
-    ASSERT(Contains(addr - kPointerSize));
-  } else {
-    ASSERT(Contains(addr));
-  }
-#endif
-
-  // If addr is at the end of a page, it belongs to previous page
-  Page* p = Page::IsAlignedToPageSize(addr)
-            ? Page::FromAllocationTop(addr)
-            : Page::FromAddress(addr);
-  int index = p->mc_page_index;
-  return (index * Page::kPageSize) + p->Offset(addr);
-}
-
-
-// Slow case for reallocating and promoting objects during a compacting
-// collection.  This function is not space-specific.
-HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
-  Page* current_page = TopPageOf(mc_forwarding_info_);
-  if (!current_page->next_page()->is_valid()) {
-    if (!Expand(current_page)) {
-      return NULL;
-    }
-  }
-
-  // There are surely more pages in the space now.
-  ASSERT(current_page->next_page()->is_valid());
-  // We do not add the top of page block for current page to the space's
-  // free list---the block may contain live objects so we cannot write
-  // bookkeeping information to it.  Instead, we will recover top of page
-  // blocks when we move objects to their new locations.
-  //
-  // We do however write the allocation pointer to the page.  The encoding
-  // of forwarding addresses is as an offset in terms of live bytes, so we
-  // need quick access to the allocation top of each page to decode
-  // forwarding addresses.
-  current_page->SetAllocationWatermark(mc_forwarding_info_.top);
-  current_page->next_page()->InvalidateWatermark(true);
-  SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
-  return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
-}
-
-
-bool PagedSpace::Expand(Page* last_page) {
+bool PagedSpace::CanExpand() {
   ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
   ASSERT(Capacity() % Page::kObjectAreaSize == 0);
 
   if (Capacity() == max_capacity_) return false;
 
   ASSERT(Capacity() < max_capacity_);
-  // Last page must be valid and its next page is invalid.
-  ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
 
-  int available_pages =
-      static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize);
-  // We don't want to have to handle small chunks near the end so if there are
-  // not kPagesPerChunk pages available without exceeding the max capacity then
-  // act as if memory has run out.
-  if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
+  // Are we going to exceed capacity for this space?
+  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
 
-  int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
-  Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
-      desired_pages, &desired_pages, this);
-  if (!p->is_valid()) return false;
+  return true;
+}
 
-  accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
+bool PagedSpace::Expand() {
+  if (!CanExpand()) return false;
+
+  Page* p = heap()->isolate()->memory_allocator()->
+      AllocatePage(this, executable());
+  if (p == NULL) return false;
+
   ASSERT(Capacity() <= max_capacity_);
 
-  heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
-
-  // Sequentially clear region marks of new pages and and cache the
-  // new last page in the space.
-  while (p->is_valid()) {
-    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    last_page_ = p;
-    p = p->next_page();
-  }
+  p->InsertAfter(anchor_.prev_page());
 
   return true;
 }
@@ -1039,8 +752,10 @@
 
 #ifdef DEBUG
 int PagedSpace::CountTotalPages() {
+  PageIterator it(this);
   int count = 0;
-  for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
+  while (it.has_next()) {
+    it.next();
     count++;
   }
   return count;
@@ -1048,63 +763,30 @@
 #endif
 
 
-void PagedSpace::Shrink() {
-  if (!page_list_is_chunk_ordered_) {
-    // We can't shrink space if pages is not chunk-ordered
-    // (see comment for class MemoryAllocator for definition).
-    return;
+void PagedSpace::ReleasePage(Page* page) {
+  ASSERT(page->LiveBytes() == 0);
+  page->Unlink();
+  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
+    heap()->isolate()->memory_allocator()->Free(page);
+  } else {
+    heap()->QueueMemoryChunkForFree(page);
   }
 
-  // Release half of free pages.
-  Page* top_page = AllocationTopPage();
-  ASSERT(top_page->is_valid());
-
-  // Count the number of pages we would like to free.
-  int pages_to_free = 0;
-  for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
-    pages_to_free++;
-  }
-
-  // Free pages after top_page.
-  Page* p = heap()->isolate()->memory_allocator()->
-      FreePages(top_page->next_page());
-  heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
-
-  // Find out how many pages we failed to free and update last_page_.
-  // Please note pages can only be freed in whole chunks.
-  last_page_ = top_page;
-  for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
-    pages_to_free--;
-    last_page_ = p;
-  }
-
-  accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
-  ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
+  ASSERT(Capacity() > 0);
+  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+  accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
 }
 
 
-bool PagedSpace::EnsureCapacity(int capacity) {
-  if (Capacity() >= capacity) return true;
-
-  // Start from the allocation top and loop to the last page in the space.
-  Page* last_page = AllocationTopPage();
-  Page* next_page = last_page->next_page();
-  while (next_page->is_valid()) {
-    last_page = heap()->isolate()->memory_allocator()->
-        FindLastPageInSameChunk(next_page);
-    next_page = last_page->next_page();
+void PagedSpace::ReleaseAllUnusedPages() {
+  PageIterator it(this);
+  while (it.has_next()) {
+    Page* page = it.next();
+    if (page->LiveBytes() == 0) {
+      ReleasePage(page);
+    }
   }
-
-  // Expand the space until it has the required capacity or expansion fails.
-  do {
-    if (!Expand(last_page)) return false;
-    ASSERT(last_page->next_page()->is_valid());
-    last_page =
-        heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
-            last_page->next_page());
-  } while (Capacity() < capacity);
-
-  return true;
+  heap()->FreeQueuedChunks();
 }
 
 
@@ -1114,61 +796,52 @@
 
 
 #ifdef DEBUG
-// We do not assume that the PageIterator works, because it depends on the
-// invariants we are checking during verification.
 void PagedSpace::Verify(ObjectVisitor* visitor) {
-  // The allocation pointer should be valid, and it should be in a page in the
-  // space.
-  ASSERT(allocation_info_.VerifyPagedAllocation());
-  Page* top_page = Page::FromAllocationTop(allocation_info_.top);
-  ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
+  // We can only iterate over the pages if they were swept precisely.
+  if (was_swept_conservatively_) return;
 
-  // Loop over all the pages.
-  bool above_allocation_top = false;
-  Page* current_page = first_page_;
-  while (current_page->is_valid()) {
-    if (above_allocation_top) {
-      // We don't care what's above the allocation top.
-    } else {
-      Address top = current_page->AllocationTop();
-      if (current_page == top_page) {
-        ASSERT(top == allocation_info_.top);
-        // The next page will be above the allocation top.
-        above_allocation_top = true;
-      }
-
-      // It should be packed with objects from the bottom to the top.
-      Address current = current_page->ObjectAreaStart();
-      while (current < top) {
-        HeapObject* object = HeapObject::FromAddress(current);
-
-        // The first word should be a map, and we expect all map pointers to
-        // be in map space.
-        Map* map = object->map();
-        ASSERT(map->IsMap());
-        ASSERT(heap()->map_space()->Contains(map));
-
-        // Perform space-specific object verification.
-        VerifyObject(object);
-
-        // The object itself should look OK.
-        object->Verify();
-
-        // All the interior pointers should be contained in the heap and
-        // have page regions covering intergenerational references should be
-        // marked dirty.
-        int size = object->Size();
-        object->IterateBody(map->instance_type(), size, visitor);
-
-        current += size;
-      }
-
-      // The allocation pointer should not be in the middle of an object.
-      ASSERT(current == top);
+  bool allocation_pointer_found_in_space =
+      (allocation_info_.top == allocation_info_.limit);
+  PageIterator page_iterator(this);
+  while (page_iterator.has_next()) {
+    Page* page = page_iterator.next();
+    ASSERT(page->owner() == this);
+    if (page == Page::FromAllocationTop(allocation_info_.top)) {
+      allocation_pointer_found_in_space = true;
     }
+    ASSERT(page->WasSweptPrecisely());
+    HeapObjectIterator it(page, NULL);
+    Address end_of_previous_object = page->ObjectAreaStart();
+    Address top = page->ObjectAreaEnd();
+    int black_size = 0;
+    for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+      ASSERT(end_of_previous_object <= object->address());
 
-    current_page = current_page->next_page();
+      // The first word should be a map, and we expect all map pointers to
+      // be in map space.
+      Map* map = object->map();
+      ASSERT(map->IsMap());
+      ASSERT(heap()->map_space()->Contains(map));
+
+      // Perform space-specific object verification.
+      VerifyObject(object);
+
+      // The object itself should look OK.
+      object->Verify();
+
+      // All the interior pointers should be contained in the heap.
+      int size = object->Size();
+      object->IterateBody(map->instance_type(), size, visitor);
+      if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
+        black_size += size;
+      }
+
+      ASSERT(object->address() + size <= top);
+      end_of_previous_object = object->address() + size;
+    }
+    ASSERT_LE(black_size, page->LiveBytes());
   }
+  ASSERT(allocation_pointer_found_in_space);
 }
 #endif
 
@@ -1177,13 +850,23 @@
 // NewSpace implementation
 
 
-bool NewSpace::Setup(Address start, int size) {
+bool NewSpace::Setup(int reserved_semispace_capacity,
+                     int maximum_semispace_capacity) {
   // Setup new space based on the preallocated memory block defined by
   // start and size. The provided space is divided into two semi-spaces.
   // To support fast containment testing in the new space, the size of
   // this chunk must be a power of two and it must be aligned to its size.
   int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
-  int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
+
+  size_t size = 2 * reserved_semispace_capacity;
+  Address base =
+      heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
+          size, size, &reservation_);
+  if (base == NULL) return false;
+
+  chunk_base_ = base;
+  chunk_size_ = static_cast<uintptr_t>(size);
+  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
 
   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
   ASSERT(IsPowerOf2(maximum_semispace_capacity));
@@ -1197,31 +880,29 @@
   INSTANCE_TYPE_LIST(SET_NAME)
 #undef SET_NAME
 
-  ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
-  ASSERT(IsAddressAligned(start, size, 0));
+  ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
+  ASSERT(static_cast<intptr_t>(chunk_size_) >=
+         2 * heap()->ReservedSemiSpaceSize());
+  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
 
-  if (!to_space_.Setup(start,
+  if (!to_space_.Setup(chunk_base_,
                        initial_semispace_capacity,
                        maximum_semispace_capacity)) {
     return false;
   }
-  if (!from_space_.Setup(start + maximum_semispace_capacity,
+  if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
                          initial_semispace_capacity,
                          maximum_semispace_capacity)) {
     return false;
   }
 
-  start_ = start;
-  address_mask_ = ~(size - 1);
+  start_ = chunk_base_;
+  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
   object_mask_ = address_mask_ | kHeapObjectTagMask;
-  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
+  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
 
-  allocation_info_.top = to_space_.low();
-  allocation_info_.limit = to_space_.high();
-  mc_forwarding_info_.top = NULL;
-  mc_forwarding_info_.limit = NULL;
+  ResetAllocationInfo();
 
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
   return true;
 }
 
@@ -1239,28 +920,34 @@
   start_ = NULL;
   allocation_info_.top = NULL;
   allocation_info_.limit = NULL;
-  mc_forwarding_info_.top = NULL;
-  mc_forwarding_info_.limit = NULL;
 
   to_space_.TearDown();
   from_space_.TearDown();
+
+  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
+
+  ASSERT(reservation_.IsReserved());
+  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
+                                                    NOT_EXECUTABLE);
+  chunk_base_ = NULL;
+  chunk_size_ = 0;
 }
 
 
 void NewSpace::Flip() {
-  SemiSpace tmp = from_space_;
-  from_space_ = to_space_;
-  to_space_ = tmp;
+  SemiSpace::Swap(&from_space_, &to_space_);
 }
 
 
 void NewSpace::Grow() {
+  // Double the semispace size but only up to maximum capacity.
   ASSERT(Capacity() < MaximumCapacity());
-  if (to_space_.Grow()) {
-    // Only grow from space if we managed to grow to space.
-    if (!from_space_.Grow()) {
-      // If we managed to grow to space but couldn't grow from space,
-      // attempt to shrink to space.
+  int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
+  if (to_space_.GrowTo(new_capacity)) {
+    // Only grow from space if we managed to grow to-space.
+    if (!from_space_.GrowTo(new_capacity)) {
+      // If we managed to grow to-space but couldn't grow from-space,
+      // attempt to shrink to-space.
       if (!to_space_.ShrinkTo(from_space_.Capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
@@ -1268,21 +955,20 @@
       }
     }
   }
-  allocation_info_.limit = to_space_.high();
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 void NewSpace::Shrink() {
   int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
-  int rounded_new_capacity =
-      RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
+  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
   if (rounded_new_capacity < Capacity() &&
       to_space_.ShrinkTo(rounded_new_capacity))  {
-    // Only shrink from space if we managed to shrink to space.
+    // Only shrink from-space if we managed to shrink to-space.
+    from_space_.Reset();
     if (!from_space_.ShrinkTo(rounded_new_capacity)) {
-      // If we managed to shrink to space but couldn't shrink from
-      // space, attempt to grow to space again.
+      // If we managed to shrink to-space but couldn't shrink from
+      // space, attempt to grow to-space again.
       if (!to_space_.GrowTo(from_space_.Capacity())) {
         // We are in an inconsistent state because we could not
         // commit/uncommit memory from new space.
@@ -1290,36 +976,65 @@
       }
     }
   }
-  allocation_info_.limit = to_space_.high();
+  allocation_info_.limit = to_space_.page_high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::UpdateAllocationInfo() {
+  allocation_info_.top = to_space_.page_low();
+  allocation_info_.limit = to_space_.page_high();
+
+  // Lower limit during incremental marking.
+  if (heap()->incremental_marking()->IsMarking() &&
+      inline_allocation_limit_step() != 0) {
+    Address new_limit =
+        allocation_info_.top + inline_allocation_limit_step();
+    allocation_info_.limit = Min(new_limit, allocation_info_.limit);
+  }
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
 void NewSpace::ResetAllocationInfo() {
-  allocation_info_.top = to_space_.low();
-  allocation_info_.limit = to_space_.high();
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+  to_space_.Reset();
+  UpdateAllocationInfo();
+  pages_used_ = 0;
+  // Clear all mark-bits in the to-space.
+  NewSpacePageIterator it(&to_space_);
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
 }
 
 
-void NewSpace::MCResetRelocationInfo() {
-  mc_forwarding_info_.top = from_space_.low();
-  mc_forwarding_info_.limit = from_space_.high();
-  ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_);
-}
+bool NewSpace::AddFreshPage() {
+  Address top = allocation_info_.top;
+  if (NewSpacePage::IsAtStart(top)) {
+    // The current page is already empty. Don't try to make another.
 
-
-void NewSpace::MCCommitRelocationInfo() {
-  // Assumes that the spaces have been flipped so that mc_forwarding_info_ is
-  // valid allocation info for the to space.
-  allocation_info_.top = mc_forwarding_info_.top;
-  allocation_info_.limit = to_space_.high();
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+    // We should only get here if someone asks to allocate more
+    // than what can be stored in a single page.
+    // TODO(gc): Change the limit on new-space allocation to prevent this
+    // from happening (all such allocations should go directly to LOSpace).
+    return false;
+  }
+  if (!to_space_.AdvancePage()) {
+    // Failed to get a new page in to-space.
+    return false;
+  }
+  // Clear remainder of current page.
+  int remaining_in_page =
+    static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top);
+  heap()->CreateFillerObjectAt(top, remaining_in_page);
+  pages_used_++;
+  UpdateAllocationInfo();
+  return true;
 }
 
 
 #ifdef DEBUG
-// We do not use the SemispaceIterator because verification doesn't assume
+// We do not use the SemiSpaceIterator because verification doesn't assume
 // that it works (it depends on the invariants we are checking).
 void NewSpace::Verify() {
   // The allocation pointer should be in the space or at the very end.
@@ -1327,59 +1042,53 @@
 
   // There should be objects packed in from the low address up to the
   // allocation pointer.
-  Address current = to_space_.low();
-  while (current < top()) {
-    HeapObject* object = HeapObject::FromAddress(current);
+  Address current = to_space_.first_page()->body();
+  CHECK_EQ(current, to_space_.space_start());
 
-    // The first word should be a map, and we expect all map pointers to
-    // be in map space.
-    Map* map = object->map();
-    ASSERT(map->IsMap());
-    ASSERT(heap()->map_space()->Contains(map));
+  while (current != top()) {
+    if (!NewSpacePage::IsAtEnd(current)) {
+      // The allocation pointer should not be in the middle of an object.
+      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
+            current < top());
 
-    // The object should not be code or a map.
-    ASSERT(!object->IsMap());
-    ASSERT(!object->IsCode());
+      HeapObject* object = HeapObject::FromAddress(current);
 
-    // The object itself should look OK.
-    object->Verify();
+      // The first word should be a map, and we expect all map pointers to
+      // be in map space.
+      Map* map = object->map();
+      CHECK(map->IsMap());
+      CHECK(heap()->map_space()->Contains(map));
 
-    // All the interior pointers should be contained in the heap.
-    VerifyPointersVisitor visitor;
-    int size = object->Size();
-    object->IterateBody(map->instance_type(), size, &visitor);
+      // The object should not be code or a map.
+      CHECK(!object->IsMap());
+      CHECK(!object->IsCode());
 
-    current += size;
+      // The object itself should look OK.
+      object->Verify();
+
+      // All the interior pointers should be contained in the heap.
+      VerifyPointersVisitor visitor;
+      int size = object->Size();
+      object->IterateBody(map->instance_type(), size, &visitor);
+
+      current += size;
+    } else {
+      // At end of page, switch to next page.
+      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
+      // Next page should be valid.
+      CHECK(!page->is_anchor());
+      current = page->body();
+    }
   }
 
-  // The allocation pointer should not be in the middle of an object.
-  ASSERT(current == top());
+  // Check semi-spaces.
+  ASSERT_EQ(from_space_.id(), kFromSpace);
+  ASSERT_EQ(to_space_.id(), kToSpace);
+  from_space_.Verify();
+  to_space_.Verify();
 }
 #endif
 
-
-bool SemiSpace::Commit() {
-  ASSERT(!is_committed());
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      start_, capacity_, executable())) {
-    return false;
-  }
-  committed_ = true;
-  return true;
-}
-
-
-bool SemiSpace::Uncommit() {
-  ASSERT(is_committed());
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
-      start_, capacity_)) {
-    return false;
-  }
-  committed_ = false;
-  return true;
-}
-
-
 // -----------------------------------------------------------------------------
 // SemiSpace implementation
 
@@ -1392,11 +1101,11 @@
   // otherwise.  In the mark-compact collector, the memory region of the from
   // space is used as the marking stack. It requires contiguous memory
   // addresses.
-  initial_capacity_ = initial_capacity;
+  ASSERT(maximum_capacity >= Page::kPageSize);
+  initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
   capacity_ = initial_capacity;
-  maximum_capacity_ = maximum_capacity;
+  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
   committed_ = false;
-
   start_ = start;
   address_mask_ = ~(maximum_capacity - 1);
   object_mask_ = address_mask_ | kHeapObjectTagMask;
@@ -1413,81 +1122,258 @@
 }
 
 
-bool SemiSpace::Grow() {
-  // Double the semispace size but only up to maximum capacity.
-  int maximum_extra = maximum_capacity_ - capacity_;
-  int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
-                  maximum_extra);
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      high(), extra, executable())) {
+bool SemiSpace::Commit() {
+  ASSERT(!is_committed());
+  int pages = capacity_ / Page::kPageSize;
+  Address end = start_ + maximum_capacity_;
+  Address start = end - pages * Page::kPageSize;
+  if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
+                                                          capacity_,
+                                                          executable())) {
     return false;
   }
-  capacity_ += extra;
+
+  NewSpacePage* page = anchor();
+  for (int i = 1; i <= pages; i++) {
+    NewSpacePage* new_page =
+      NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
+    new_page->InsertAfter(page);
+    page = new_page;
+  }
+
+  committed_ = true;
+  Reset();
+  return true;
+}
+
+
+bool SemiSpace::Uncommit() {
+  ASSERT(is_committed());
+  Address start = start_ + maximum_capacity_ - capacity_;
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
+    return false;
+  }
+  anchor()->set_next_page(anchor());
+  anchor()->set_prev_page(anchor());
+
+  committed_ = false;
   return true;
 }
 
 
 bool SemiSpace::GrowTo(int new_capacity) {
+  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
   ASSERT(new_capacity <= maximum_capacity_);
   ASSERT(new_capacity > capacity_);
+  int pages_before = capacity_ / Page::kPageSize;
+  int pages_after = new_capacity / Page::kPageSize;
+
+  Address end = start_ + maximum_capacity_;
+  Address start = end - new_capacity;
   size_t delta = new_capacity - capacity_;
+
   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
   if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      high(), delta, executable())) {
+      start, delta, executable())) {
     return false;
   }
   capacity_ = new_capacity;
+  NewSpacePage* last_page = anchor()->prev_page();
+  ASSERT(last_page != anchor());
+  for (int i = pages_before + 1; i <= pages_after; i++) {
+    Address page_address = end - i * Page::kPageSize;
+    NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
+                                                      page_address,
+                                                      this);
+    new_page->InsertAfter(last_page);
+    Bitmap::Clear(new_page);
+    // Duplicate the flags that was set on the old page.
+    new_page->SetFlags(last_page->GetFlags(),
+                       NewSpacePage::kCopyOnFlipFlagsMask);
+    last_page = new_page;
+  }
   return true;
 }
 
 
 bool SemiSpace::ShrinkTo(int new_capacity) {
+  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
   ASSERT(new_capacity >= initial_capacity_);
   ASSERT(new_capacity < capacity_);
+  // Semispaces grow backwards from the end of their allocated capacity,
+  // so we find the before and after start addresses relative to the
+  // end of the space.
+  Address space_end = start_ + maximum_capacity_;
+  Address old_start = space_end - capacity_;
   size_t delta = capacity_ - new_capacity;
   ASSERT(IsAligned(delta, OS::AllocateAlignment()));
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(
-      high() - delta, delta)) {
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) {
     return false;
   }
   capacity_ = new_capacity;
+
+  int pages_after = capacity_ / Page::kPageSize;
+  NewSpacePage* new_last_page =
+      NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
+  new_last_page->set_next_page(anchor());
+  anchor()->set_prev_page(new_last_page);
+  ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+
   return true;
 }
 
 
+void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
+  anchor_.set_owner(this);
+  // Fixup back-pointers to anchor. Address of anchor changes
+  // when we swap.
+  anchor_.prev_page()->set_next_page(&anchor_);
+  anchor_.next_page()->set_prev_page(&anchor_);
+
+  bool becomes_to_space = (id_ == kFromSpace);
+  id_ = becomes_to_space ? kToSpace : kFromSpace;
+  NewSpacePage* page = anchor_.next_page();
+  while (page != &anchor_) {
+    page->set_owner(this);
+    page->SetFlags(flags, mask);
+    if (becomes_to_space) {
+      page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
+      page->SetFlag(MemoryChunk::IN_TO_SPACE);
+      page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+      page->ResetLiveBytes();
+    } else {
+      page->SetFlag(MemoryChunk::IN_FROM_SPACE);
+      page->ClearFlag(MemoryChunk::IN_TO_SPACE);
+    }
+    ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+    ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
+           page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
+    page = page->next_page();
+  }
+}
+
+
+void SemiSpace::Reset() {
+  ASSERT(anchor_.next_page() != &anchor_);
+  current_page_ = anchor_.next_page();
+}
+
+
+void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
+  // We won't be swapping semispaces without data in them.
+  ASSERT(from->anchor_.next_page() != &from->anchor_);
+  ASSERT(to->anchor_.next_page() != &to->anchor_);
+
+  // Swap bits.
+  SemiSpace tmp = *from;
+  *from = *to;
+  *to = tmp;
+
+  // Fixup back-pointers to the page list anchor now that its address
+  // has changed.
+  // Swap to/from-space bits on pages.
+  // Copy GC flags from old active space (from-space) to new (to-space).
+  intptr_t flags = from->current_page()->GetFlags();
+  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
+
+  from->FlipPages(0, 0);
+}
+
+
+void SemiSpace::set_age_mark(Address mark) {
+  ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
+  age_mark_ = mark;
+  // Mark all pages up to the one containing mark.
+  NewSpacePageIterator it(space_start(), mark);
+  while (it.has_next()) {
+    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+  }
+}
+
+
 #ifdef DEBUG
 void SemiSpace::Print() { }
 
 
-void SemiSpace::Verify() { }
+void SemiSpace::Verify() {
+  bool is_from_space = (id_ == kFromSpace);
+  NewSpacePage* page = anchor_.next_page();
+  CHECK(anchor_.semi_space() == this);
+  while (page != &anchor_) {
+    CHECK(page->semi_space() == this);
+    CHECK(page->InNewSpace());
+    CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
+                                        : MemoryChunk::IN_TO_SPACE));
+    CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
+                                         : MemoryChunk::IN_FROM_SPACE));
+    CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
+    if (!is_from_space) {
+      // The pointers-from-here-are-interesting flag isn't updated dynamically
+      // on from-space pages, so it might be out of sync with the marking state.
+      if (page->heap()->incremental_marking()->IsMarking()) {
+        CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+      } else {
+        CHECK(!page->IsFlagSet(
+            MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+      }
+      // TODO(gc): Check that the live_bytes_count_ field matches the
+      // black marking on the page (if we make it match in new-space).
+    }
+    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+    CHECK(page->prev_page()->next_page() == page);
+    page = page->next_page();
+  }
+}
+
+
+void SemiSpace::AssertValidRange(Address start, Address end) {
+  // Addresses belong to same semi-space
+  NewSpacePage* page = NewSpacePage::FromLimit(start);
+  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
+  SemiSpace* space = page->semi_space();
+  CHECK_EQ(space, end_page->semi_space());
+  // Start address is before end address, either on same page,
+  // or end address is on a later page in the linked list of
+  // semi-space pages.
+  if (page == end_page) {
+    CHECK(start <= end);
+  } else {
+    while (page != end_page) {
+      page = page->next_page();
+      CHECK_NE(page, space->anchor());
+    }
+  }
+}
 #endif
 
 
 // -----------------------------------------------------------------------------
 // SemiSpaceIterator implementation.
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
-  Initialize(space, space->bottom(), space->top(), NULL);
+  Initialize(space->bottom(), space->top(), NULL);
 }
 
 
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
                                      HeapObjectCallback size_func) {
-  Initialize(space, space->bottom(), space->top(), size_func);
+  Initialize(space->bottom(), space->top(), size_func);
 }
 
 
 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
-  Initialize(space, start, space->top(), NULL);
+  Initialize(start, space->top(), NULL);
 }
 
 
-void SemiSpaceIterator::Initialize(NewSpace* space, Address start,
+SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
+  Initialize(from, to, NULL);
+}
+
+
+void SemiSpaceIterator::Initialize(Address start,
                                    Address end,
                                    HeapObjectCallback size_func) {
-  ASSERT(space->ToSpaceContains(start));
-  ASSERT(space->ToSpaceLow() <= end
-         && end <= space->ToSpaceHigh());
-  space_ = &space->to_space_;
+  SemiSpace::AssertValidRange(start, end);
   current_ = start;
   limit_ = end;
   size_func_ = size_func;
@@ -1623,7 +1509,7 @@
 void NewSpace::CollectStatistics() {
   ClearHistograms();
   SemiSpaceIterator it(this);
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
     RecordAllocation(obj);
 }
 
@@ -1699,7 +1585,6 @@
   promoted_histogram_[type].increment_bytes(obj->Size());
 }
 
-
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces implementation
 
@@ -1708,17 +1593,17 @@
   ASSERT(IsAligned(size_in_bytes, kPointerSize));
 
   // We write a map and possibly size information to the block.  If the block
-  // is big enough to be a ByteArray with at least one extra word (the next
-  // pointer), we set its map to be the byte array map and its size to an
+  // is big enough to be a FreeSpace with at least one extra word (the next
+  // pointer), we set its map to be the free space map and its size to an
   // appropriate array length for the desired size from HeapObject::Size().
   // If the block is too small (eg, one or two words), to hold both a size
   // field and a next pointer, we give it a filler map that gives it the
   // correct size.
-  if (size_in_bytes > ByteArray::kHeaderSize) {
-    set_map(heap->raw_unchecked_byte_array_map());
-    // Can't use ByteArray::cast because it fails during deserialization.
-    ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
-    this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
+  if (size_in_bytes > FreeSpace::kHeaderSize) {
+    set_map(heap->raw_unchecked_free_space_map());
+    // Can't use FreeSpace::cast because it fails during deserialization.
+    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
+    this_as_free_space->set_size(size_in_bytes);
   } else if (size_in_bytes == kPointerSize) {
     set_map(heap->raw_unchecked_one_pointer_filler_map());
   } else if (size_in_bytes == 2 * kPointerSize) {
@@ -1727,319 +1612,296 @@
     UNREACHABLE();
   }
   // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
-  // deserialization because the byte array map is not done yet.
+  // deserialization because the free space map is not done yet.
 }
 
 
-Address FreeListNode::next(Heap* heap) {
+FreeListNode* FreeListNode::next() {
   ASSERT(IsFreeListNode(this));
-  if (map() == heap->raw_unchecked_byte_array_map()) {
-    ASSERT(Size() >= kNextOffset + kPointerSize);
-    return Memory::Address_at(address() + kNextOffset);
+  if (map() == HEAP->raw_unchecked_free_space_map()) {
+    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kNextOffset));
   } else {
-    return Memory::Address_at(address() + kPointerSize);
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kPointerSize));
   }
 }
 
 
-void FreeListNode::set_next(Heap* heap, Address next) {
+FreeListNode** FreeListNode::next_address() {
   ASSERT(IsFreeListNode(this));
-  if (map() == heap->raw_unchecked_byte_array_map()) {
+  if (map() == HEAP->raw_unchecked_free_space_map()) {
     ASSERT(Size() >= kNextOffset + kPointerSize);
-    Memory::Address_at(address() + kNextOffset) = next;
+    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
   } else {
-    Memory::Address_at(address() + kPointerSize) = next;
+    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
   }
 }
 
 
-OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner)
-  : heap_(heap),
-    owner_(owner) {
+void FreeListNode::set_next(FreeListNode* next) {
+  ASSERT(IsFreeListNode(this));
+  // While we are booting the VM the free space map will actually be null.  So
+  // we have to make sure that we don't try to use it for anything at that
+  // stage.
+  if (map() == HEAP->raw_unchecked_free_space_map()) {
+    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    Memory::Address_at(address() + kNextOffset) =
+        reinterpret_cast<Address>(next);
+  } else {
+    Memory::Address_at(address() + kPointerSize) =
+        reinterpret_cast<Address>(next);
+  }
+}
+
+
+FreeList::FreeList(PagedSpace* owner)
+    : owner_(owner), heap_(owner->heap()) {
   Reset();
 }
 
 
-void OldSpaceFreeList::Reset() {
+void FreeList::Reset() {
   available_ = 0;
-  for (int i = 0; i < kFreeListsLength; i++) {
-    free_[i].head_node_ = NULL;
-  }
-  needs_rebuild_ = false;
-  finger_ = kHead;
-  free_[kHead].next_size_ = kEnd;
+  small_list_ = NULL;
+  medium_list_ = NULL;
+  large_list_ = NULL;
+  huge_list_ = NULL;
 }
 
 
-void OldSpaceFreeList::RebuildSizeList() {
-  ASSERT(needs_rebuild_);
-  int cur = kHead;
-  for (int i = cur + 1; i < kFreeListsLength; i++) {
-    if (free_[i].head_node_ != NULL) {
-      free_[cur].next_size_ = i;
-      cur = i;
-    }
-  }
-  free_[cur].next_size_ = kEnd;
-  needs_rebuild_ = false;
-}
-
-
-int OldSpaceFreeList::Free(Address start, int size_in_bytes) {
-#ifdef DEBUG
-  Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
-#endif
+int FreeList::Free(Address start, int size_in_bytes) {
+  if (size_in_bytes == 0) return 0;
   FreeListNode* node = FreeListNode::FromAddress(start);
   node->set_size(heap_, size_in_bytes);
 
-  // We don't use the freelists in compacting mode.  This makes it more like a
-  // GC that only has mark-sweep-compact and doesn't have a mark-sweep
-  // collector.
-  if (FLAG_always_compact) {
-    return size_in_bytes;
-  }
+  // Early return to drop too-small blocks on the floor.
+  if (size_in_bytes < kSmallListMin) return size_in_bytes;
 
-  // Early return to drop too-small blocks on the floor (one or two word
-  // blocks cannot hold a map pointer, a size field, and a pointer to the
-  // next block in the free list).
-  if (size_in_bytes < kMinBlockSize) {
-    return size_in_bytes;
+  // Insert other blocks at the head of a free list of the appropriate
+  // magnitude.
+  if (size_in_bytes <= kSmallListMax) {
+    node->set_next(small_list_);
+    small_list_ = node;
+  } else if (size_in_bytes <= kMediumListMax) {
+    node->set_next(medium_list_);
+    medium_list_ = node;
+  } else if (size_in_bytes <= kLargeListMax) {
+    node->set_next(large_list_);
+    large_list_ = node;
+  } else {
+    node->set_next(huge_list_);
+    huge_list_ = node;
   }
-
-  // Insert other blocks at the head of an exact free list.
-  int index = size_in_bytes >> kPointerSizeLog2;
-  node->set_next(heap_, free_[index].head_node_);
-  free_[index].head_node_ = node->address();
   available_ += size_in_bytes;
-  needs_rebuild_ = true;
+  ASSERT(IsVeryLong() || available_ == SumFreeLists());
   return 0;
 }
 
 
-MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) {
-  ASSERT(0 < size_in_bytes);
-  ASSERT(size_in_bytes <= kMaxBlockSize);
-  ASSERT(IsAligned(size_in_bytes, kPointerSize));
+FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
+  FreeListNode* node = *list;
 
-  if (needs_rebuild_) RebuildSizeList();
-  int index = size_in_bytes >> kPointerSizeLog2;
-  // Check for a perfect fit.
-  if (free_[index].head_node_ != NULL) {
-    FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_);
-    // If this was the last block of its size, remove the size.
-    if ((free_[index].head_node_ = node->next(heap_)) == NULL)
-      RemoveSize(index);
-    available_ -= size_in_bytes;
-    *wasted_bytes = 0;
-    ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
-    return node;
+  if (node == NULL) return NULL;
+
+  while (node != NULL &&
+         Page::FromAddress(node->address())->IsEvacuationCandidate()) {
+    available_ -= node->Size();
+    node = node->next();
   }
-  // Search the size list for the best fit.
-  int prev = finger_ < index ? finger_ : kHead;
-  int cur = FindSize(index, &prev);
-  ASSERT(index < cur);
-  if (cur == kEnd) {
-    // No large enough size in list.
-    *wasted_bytes = 0;
-    return Failure::RetryAfterGC(owner_);
-  }
-  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
-  int rem = cur - index;
-  int rem_bytes = rem << kPointerSizeLog2;
-  FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_);
-  ASSERT(cur_node->Size() == (cur << kPointerSizeLog2));
-  FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ +
-                                                     size_in_bytes);
-  // Distinguish the cases prev < rem < cur and rem <= prev < cur
-  // to avoid many redundant tests and calls to Insert/RemoveSize.
-  if (prev < rem) {
-    // Simple case: insert rem between prev and cur.
-    finger_ = prev;
-    free_[prev].next_size_ = rem;
-    // If this was the last block of size cur, remove the size.
-    if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
-      free_[rem].next_size_ = free_[cur].next_size_;
-    } else {
-      free_[rem].next_size_ = cur;
-    }
-    // Add the remainder block.
-    rem_node->set_size(heap_, rem_bytes);
-    rem_node->set_next(heap_, free_[rem].head_node_);
-    free_[rem].head_node_ = rem_node->address();
+
+  if (node != NULL) {
+    *node_size = node->Size();
+    *list = node->next();
   } else {
-    // If this was the last block of size cur, remove the size.
-    if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) {
-      finger_ = prev;
-      free_[prev].next_size_ = free_[cur].next_size_;
-    }
-    if (rem_bytes < kMinBlockSize) {
-      // Too-small remainder is wasted.
-      rem_node->set_size(heap_, rem_bytes);
-      available_ -= size_in_bytes + rem_bytes;
-      *wasted_bytes = rem_bytes;
-      return cur_node;
-    }
-    // Add the remainder block and, if needed, insert its size.
-    rem_node->set_size(heap_, rem_bytes);
-    rem_node->set_next(heap_, free_[rem].head_node_);
-    free_[rem].head_node_ = rem_node->address();
-    if (rem_node->next(heap_) == NULL) InsertSize(rem);
-  }
-  available_ -= size_in_bytes;
-  *wasted_bytes = 0;
-  return cur_node;
-}
-
-
-void OldSpaceFreeList::MarkNodes() {
-  for (int i = 0; i < kFreeListsLength; i++) {
-    Address cur_addr = free_[i].head_node_;
-    while (cur_addr != NULL) {
-      FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
-      cur_addr = cur_node->next(heap_);
-      cur_node->SetMark();
-    }
-  }
-}
-
-
-#ifdef DEBUG
-bool OldSpaceFreeList::Contains(FreeListNode* node) {
-  for (int i = 0; i < kFreeListsLength; i++) {
-    Address cur_addr = free_[i].head_node_;
-    while (cur_addr != NULL) {
-      FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
-      if (cur_node == node) return true;
-      cur_addr = cur_node->next(heap_);
-    }
-  }
-  return false;
-}
-#endif
-
-
-FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
-                                     AllocationSpace owner,
-                                     int object_size)
-    : heap_(heap), owner_(owner), object_size_(object_size) {
-  Reset();
-}
-
-
-void FixedSizeFreeList::Reset() {
-  available_ = 0;
-  head_ = tail_ = NULL;
-}
-
-
-void FixedSizeFreeList::Free(Address start) {
-#ifdef DEBUG
-  Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
-#endif
-  // We only use the freelists with mark-sweep.
-  ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
-  FreeListNode* node = FreeListNode::FromAddress(start);
-  node->set_size(heap_, object_size_);
-  node->set_next(heap_, NULL);
-  if (head_ == NULL) {
-    tail_ = head_ = node->address();
-  } else {
-    FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
-    tail_ = node->address();
-  }
-  available_ += object_size_;
-}
-
-
-MaybeObject* FixedSizeFreeList::Allocate() {
-  if (head_ == NULL) {
-    return Failure::RetryAfterGC(owner_);
+    *list = NULL;
   }
 
-  ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
-  FreeListNode* node = FreeListNode::FromAddress(head_);
-  head_ = node->next(heap_);
-  available_ -= object_size_;
   return node;
 }
 
 
-void FixedSizeFreeList::MarkNodes() {
-  Address cur_addr = head_;
-  while (cur_addr != NULL && cur_addr != tail_) {
-    FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
-    cur_addr = cur_node->next(heap_);
-    cur_node->SetMark();
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+  FreeListNode* node = NULL;
+
+  if (size_in_bytes <= kSmallAllocationMax) {
+    node = PickNodeFromList(&small_list_, node_size);
+    if (node != NULL) return node;
   }
+
+  if (size_in_bytes <= kMediumAllocationMax) {
+    node = PickNodeFromList(&medium_list_, node_size);
+    if (node != NULL) return node;
+  }
+
+  if (size_in_bytes <= kLargeAllocationMax) {
+    node = PickNodeFromList(&large_list_, node_size);
+    if (node != NULL) return node;
+  }
+
+  for (FreeListNode** cur = &huge_list_;
+       *cur != NULL;
+       cur = (*cur)->next_address()) {
+    FreeListNode* cur_node = *cur;
+    while (cur_node != NULL &&
+           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
+      available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
+      cur_node = cur_node->next();
+    }
+
+    *cur = cur_node;
+    if (cur_node == NULL) break;
+
+    ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
+    int size = cur_as_free_space->Size();
+    if (size >= size_in_bytes) {
+      // Large enough node found.  Unlink it from the list.
+      node = *cur;
+      *node_size = size;
+      *cur = node->next();
+      break;
+    }
+  }
+
+  return node;
 }
 
 
+// Allocation on the old space free list.  If it succeeds then a new linear
+// allocation space has been set up with the top and limit of the space.  If
+// the allocation fails then NULL is returned, and the caller can perform a GC
+// or allocate a new page before retrying.
+HeapObject* FreeList::Allocate(int size_in_bytes) {
+  ASSERT(0 < size_in_bytes);
+  ASSERT(size_in_bytes <= kMaxBlockSize);
+  ASSERT(IsAligned(size_in_bytes, kPointerSize));
+  // Don't free list allocate if there is linear space available.
+  ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
+
+  int new_node_size = 0;
+  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+  if (new_node == NULL) return NULL;
+
+  available_ -= new_node_size;
+  ASSERT(IsVeryLong() || available_ == SumFreeLists());
+
+  int bytes_left = new_node_size - size_in_bytes;
+  ASSERT(bytes_left >= 0);
+
+  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
+  // Mark the old linear allocation area with a free space map so it can be
+  // skipped when scanning the heap.  This also puts it back in the free list
+  // if it is big enough.
+  owner_->Free(owner_->top(), old_linear_size);
+  owner_->heap()->incremental_marking()->OldSpaceStep(
+      size_in_bytes - old_linear_size);
+
+  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+
+  // Memory in the linear allocation area is counted as allocated.  We may free
+  // a little of this again immediately - see below.
+  owner_->Allocate(new_node_size);
+
+  if (bytes_left > kThreshold &&
+      owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
+      FLAG_incremental_marking_steps) {
+    int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+    // We don't want to give too large linear areas to the allocator while
+    // incremental marking is going on, because we won't check again whether
+    // we want to do another increment until the linear area is used up.
+    owner_->Free(new_node->address() + size_in_bytes + linear_size,
+                 new_node_size - size_in_bytes - linear_size);
+    owner_->SetTop(new_node->address() + size_in_bytes,
+                   new_node->address() + size_in_bytes + linear_size);
+  } else if (bytes_left > 0) {
+    // Normally we give the rest of the node to the allocator as its new
+    // linear allocation area.
+    owner_->SetTop(new_node->address() + size_in_bytes,
+                   new_node->address() + new_node_size);
+  } else {
+    // TODO(gc) Try not freeing linear allocation region when bytes_left
+    // are zero.
+    owner_->SetTop(NULL, NULL);
+  }
+
+  return new_node;
+}
+
+
+static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
+  intptr_t sum = 0;
+  while (n != NULL) {
+    if (Page::FromAddress(n->address()) == p) {
+      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
+      sum += free_space->Size();
+    }
+    n = n->next();
+  }
+  return sum;
+}
+
+
+void FreeList::CountFreeListItems(Page* p, intptr_t* sizes) {
+  sizes[0] = CountFreeListItemsInList(small_list_, p);
+  sizes[1] = CountFreeListItemsInList(medium_list_, p);
+  sizes[2] = CountFreeListItemsInList(large_list_, p);
+  sizes[3] = CountFreeListItemsInList(huge_list_, p);
+}
+
+#ifdef DEBUG
+intptr_t FreeList::SumFreeList(FreeListNode* cur) {
+  intptr_t sum = 0;
+  while (cur != NULL) {
+    ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
+    sum += cur_as_free_space->Size();
+    cur = cur->next();
+  }
+  return sum;
+}
+
+
+static const int kVeryLongFreeList = 500;
+
+
+int FreeList::FreeListLength(FreeListNode* cur) {
+  int length = 0;
+  while (cur != NULL) {
+    length++;
+    cur = cur->next();
+    if (length == kVeryLongFreeList) return length;
+  }
+  return length;
+}
+
+
+bool FreeList::IsVeryLong() {
+  if (FreeListLength(small_list_) == kVeryLongFreeList) return  true;
+  if (FreeListLength(medium_list_) == kVeryLongFreeList) return  true;
+  if (FreeListLength(large_list_) == kVeryLongFreeList) return  true;
+  if (FreeListLength(huge_list_) == kVeryLongFreeList) return  true;
+  return false;
+}
+
+
+// This can take a very long time because it is linear in the number of entries
+// on the free list, so it should not be called if FreeListLength returns
+// kVeryLongFreeList.
+intptr_t FreeList::SumFreeLists() {
+  intptr_t sum = SumFreeList(small_list_);
+  sum += SumFreeList(medium_list_);
+  sum += SumFreeList(large_list_);
+  sum += SumFreeList(huge_list_);
+  return sum;
+}
+#endif
+
+
 // -----------------------------------------------------------------------------
 // OldSpace implementation
 
-void OldSpace::PrepareForMarkCompact(bool will_compact) {
-  // Call prepare of the super class.
-  PagedSpace::PrepareForMarkCompact(will_compact);
-
-  if (will_compact) {
-    // Reset relocation info.  During a compacting collection, everything in
-    // the space is considered 'available' and we will rediscover live data
-    // and waste during the collection.
-    MCResetRelocationInfo();
-    ASSERT(Available() == Capacity());
-  } else {
-    // During a non-compacting collection, everything below the linear
-    // allocation pointer is considered allocated (everything above is
-    // available) and we will rediscover available and wasted bytes during
-    // the collection.
-    accounting_stats_.AllocateBytes(free_list_.available());
-    accounting_stats_.FillWastedBytes(Waste());
-  }
-
-  // Clear the free list before a full GC---it will be rebuilt afterward.
-  free_list_.Reset();
-}
-
-
-void OldSpace::MCCommitRelocationInfo() {
-  // Update fast allocation info.
-  allocation_info_.top = mc_forwarding_info_.top;
-  allocation_info_.limit = mc_forwarding_info_.limit;
-  ASSERT(allocation_info_.VerifyPagedAllocation());
-
-  // The space is compacted and we haven't yet built free lists or
-  // wasted any space.
-  ASSERT(Waste() == 0);
-  ASSERT(AvailableFree() == 0);
-
-  // Build the free list for the space.
-  int computed_size = 0;
-  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
-  while (it.has_next()) {
-    Page* p = it.next();
-    // Space below the relocation pointer is allocated.
-    computed_size +=
-        static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
-    if (it.has_next()) {
-      // Free the space at the top of the page.
-      int extra_size =
-          static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
-      if (extra_size > 0) {
-        int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
-                                           extra_size);
-        // The bytes we have just "freed" to add to the free list were
-        // already accounted as available.
-        accounting_stats_.WasteBytes(wasted_bytes);
-      }
-    }
-  }
-
-  // Make sure the computed size - based on the used portion of the pages in
-  // use - matches the size obtained while computing forwarding addresses.
-  ASSERT(computed_size == Size());
-}
-
-
 bool NewSpace::ReserveSpace(int bytes) {
   // We can't reliably unpack a partial snapshot that needs more new space
   // space than the minimum NewSpace size.
@@ -2050,151 +1912,59 @@
 }
 
 
-void PagedSpace::FreePages(Page* prev, Page* last) {
-  if (last == AllocationTopPage()) {
-    // Pages are already at the end of used pages.
-    return;
-  }
+void PagedSpace::PrepareForMarkCompact() {
+  // We don't have a linear allocation area while sweeping.  It will be restored
+  // on the first allocation after the sweep.
+  // Mark the old linear allocation area with a free space map so it can be
+  // skipped when scanning the heap.
+  int old_linear_size = static_cast<int>(limit() - top());
+  Free(top(), old_linear_size);
+  SetTop(NULL, NULL);
 
-  Page* first = NULL;
-
-  // Remove pages from the list.
-  if (prev == NULL) {
-    first = first_page_;
-    first_page_ = last->next_page();
-  } else {
-    first = prev->next_page();
-    heap()->isolate()->memory_allocator()->SetNextPage(
-        prev, last->next_page());
-  }
-
-  // Attach it after the last page.
-  heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
-  last_page_ = last;
-  heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
-
-  // Clean them up.
-  do {
-    first->InvalidateWatermark(true);
-    first->SetAllocationWatermark(first->ObjectAreaStart());
-    first->SetCachedAllocationWatermark(first->ObjectAreaStart());
-    first->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    first = first->next_page();
-  } while (first != NULL);
-
-  // Order of pages in this space might no longer be consistent with
-  // order of pages in chunks.
-  page_list_is_chunk_ordered_ = false;
-}
-
-
-void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
-  const bool add_to_freelist = true;
-
-  // Mark used and unused pages to properly fill unused pages
-  // after reordering.
-  PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
-  Page* last_in_use = AllocationTopPage();
-  bool in_use = true;
-
-  while (all_pages_iterator.has_next()) {
-    Page* p = all_pages_iterator.next();
-    p->SetWasInUseBeforeMC(in_use);
-    if (p == last_in_use) {
-      // We passed a page containing allocation top. All consequent
-      // pages are not used.
-      in_use = false;
-    }
-  }
-
-  if (page_list_is_chunk_ordered_) return;
-
-  Page* new_last_in_use = Page::FromAddress(NULL);
-  heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
-      this, &first_page_, &last_page_, &new_last_in_use);
-  ASSERT(new_last_in_use->is_valid());
-
-  if (new_last_in_use != last_in_use) {
-    // Current allocation top points to a page which is now in the middle
-    // of page list. We should move allocation top forward to the new last
-    // used page so various object iterators will continue to work properly.
-    int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
-                                         last_in_use->AllocationTop());
-
-    last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
-    if (size_in_bytes > 0) {
-      Address start = last_in_use->AllocationTop();
-      if (deallocate_blocks) {
-        accounting_stats_.AllocateBytes(size_in_bytes);
-        DeallocateBlock(start, size_in_bytes, add_to_freelist);
-      } else {
-        heap()->CreateFillerObjectAt(start, size_in_bytes);
+  // Stop lazy sweeping and clear marking bits for unswept pages.
+  if (first_unswept_page_ != NULL) {
+    Page* last = last_unswept_page_;
+    Page* p = first_unswept_page_;
+    do {
+      // Do not use ShouldBeSweptLazily predicate here.
+      // New evacuation candidates were selected but they still have
+      // to be swept before collection starts.
+      if (!p->WasSwept()) {
+        Bitmap::Clear(p);
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
       }
-    }
-
-    // New last in use page was in the middle of the list before
-    // sorting so it full.
-    SetTop(new_last_in_use->AllocationTop());
-
-    ASSERT(AllocationTopPage() == new_last_in_use);
-    ASSERT(AllocationTopPage()->WasInUseBeforeMC());
+      p = p->next_page();
+    } while (p != last);
   }
+  first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
 
-  PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
-  while (pages_in_use_iterator.has_next()) {
-    Page* p = pages_in_use_iterator.next();
-    if (!p->WasInUseBeforeMC()) {
-      // Empty page is in the middle of a sequence of used pages.
-      // Allocate it as a whole and deallocate immediately.
-      int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
-                                           p->ObjectAreaStart());
-
-      p->SetAllocationWatermark(p->ObjectAreaStart());
-      Address start = p->ObjectAreaStart();
-      if (deallocate_blocks) {
-        accounting_stats_.AllocateBytes(size_in_bytes);
-        DeallocateBlock(start, size_in_bytes, add_to_freelist);
-      } else {
-        heap()->CreateFillerObjectAt(start, size_in_bytes);
-      }
-    }
-  }
-
-  page_list_is_chunk_ordered_ = true;
+  // Clear the free list before a full GC---it will be rebuilt afterward.
+  free_list_.Reset();
 }
 
 
-void PagedSpace::PrepareForMarkCompact(bool will_compact) {
-  if (will_compact) {
-    RelinkPageListInChunkOrder(false);
-  }
-}
+bool PagedSpace::ReserveSpace(int size_in_bytes) {
+  ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
+  ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
+  Address current_top = allocation_info_.top;
+  Address new_top = current_top + size_in_bytes;
+  if (new_top <= allocation_info_.limit) return true;
 
+  HeapObject* new_area = free_list_.Allocate(size_in_bytes);
+  if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
+  if (new_area == NULL) return false;
 
-bool PagedSpace::ReserveSpace(int bytes) {
-  Address limit = allocation_info_.limit;
-  Address top = allocation_info_.top;
-  if (limit - top >= bytes) return true;
+  int old_linear_size = static_cast<int>(limit() - top());
+  // Mark the old linear allocation area with a free space so it can be
+  // skipped when scanning the heap.  This also puts it back in the free list
+  // if it is big enough.
+  Free(top(), old_linear_size);
 
-  // There wasn't enough space in the current page.  Lets put the rest
-  // of the page on the free list and start a fresh page.
-  PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
-
-  Page* reserved_page = TopPageOf(allocation_info_);
-  int bytes_left_to_reserve = bytes;
-  while (bytes_left_to_reserve > 0) {
-    if (!reserved_page->next_page()->is_valid()) {
-      if (heap()->OldGenerationAllocationLimitReached()) return false;
-      Expand(reserved_page);
-    }
-    bytes_left_to_reserve -= Page::kPageSize;
-    reserved_page = reserved_page->next_page();
-    if (!reserved_page->is_valid()) return false;
-  }
-  ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
-  TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
-  SetAllocationInfo(&allocation_info_,
-                    TopPageOf(allocation_info_)->next_page());
+  SetTop(new_area->address(), new_area->address() + size_in_bytes);
+  Allocate(size_in_bytes);
   return true;
 }
 
@@ -2206,45 +1976,56 @@
 }
 
 
-// Slow case for normal allocation.  Try in order: (1) allocate in the next
-// page in the space, (2) allocate off the space's free list, (3) expand the
-// space, (4) fail.
-HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
-  // Linear allocation in this space has failed.  If there is another page
-  // in the space, move to that page and allocate there.  This allocation
-  // should succeed (size_in_bytes should not be greater than a page's
-  // object area size).
-  Page* current_page = TopPageOf(allocation_info_);
-  if (current_page->next_page()->is_valid()) {
-    return AllocateInNextPage(current_page, size_in_bytes);
-  }
+bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
+  if (IsSweepingComplete()) return true;
 
-  // There is no next page in this space.  Try free list allocation unless that
-  // is currently forbidden.
-  if (!heap()->linear_allocation()) {
-    int wasted_bytes;
-    Object* result;
-    MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
-    accounting_stats_.WasteBytes(wasted_bytes);
-    if (maybe->ToObject(&result)) {
-      accounting_stats_.AllocateBytes(size_in_bytes);
-
-      HeapObject* obj = HeapObject::cast(result);
-      Page* p = Page::FromAddress(obj->address());
-
-      if (obj->address() >= p->AllocationWatermark()) {
-        // There should be no hole between the allocation watermark
-        // and allocated object address.
-        // Memory above the allocation watermark was not swept and
-        // might contain garbage pointers to new space.
-        ASSERT(obj->address() == p->AllocationWatermark());
-        p->SetAllocationWatermark(obj->address() + size_in_bytes);
+  intptr_t freed_bytes = 0;
+  Page* last = last_unswept_page_;
+  Page* p = first_unswept_page_;
+  do {
+    Page* next_page = p->next_page();
+    if (ShouldBeSweptLazily(p)) {
+      if (FLAG_gc_verbose) {
+        PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
+               reinterpret_cast<intptr_t>(p));
       }
-
-      return obj;
+      freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
     }
+    p = next_page;
+  } while (p != last && freed_bytes < bytes_to_sweep);
+
+  if (p == last) {
+    last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL);
+  } else {
+    first_unswept_page_ = p;
   }
 
+  heap()->LowerOldGenLimits(freed_bytes);
+
+  heap()->FreeQueuedChunks();
+
+  return IsSweepingComplete();
+}
+
+
+void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
+  if (allocation_info_.top >= allocation_info_.limit) return;
+
+  if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) {
+    // Create filler object to keep page iterable if it was iterable.
+    int remaining =
+        static_cast<int>(allocation_info_.limit - allocation_info_.top);
+    heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
+
+    allocation_info_.top = NULL;
+    allocation_info_.limit = NULL;
+  }
+}
+
+
+HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+  // Allocation in this space has failed.
+
   // Free list allocation failed and there is no next page.  Fail if we have
   // hit the old generation size limit that should cause a garbage
   // collection.
@@ -2253,10 +2034,26 @@
     return NULL;
   }
 
+  // If there are unswept pages advance lazy sweeper.
+  if (first_unswept_page_->is_valid()) {
+    AdvanceSweeper(size_in_bytes);
+
+    // Retry the free list allocation.
+    HeapObject* object = free_list_.Allocate(size_in_bytes);
+    if (object != NULL) return object;
+
+    if (!IsSweepingComplete()) {
+      AdvanceSweeper(kMaxInt);
+
+      // Retry the free list allocation.
+      object = free_list_.Allocate(size_in_bytes);
+      if (object != NULL) return object;
+    }
+  }
+
   // Try to expand the space and allocate in the new next page.
-  ASSERT(!current_page->next_page()->is_valid());
-  if (Expand(current_page)) {
-    return AllocateInNextPage(current_page, size_in_bytes);
+  if (Expand()) {
+    return free_list_.Allocate(size_in_bytes);
   }
 
   // Finally, fail.
@@ -2264,53 +2061,6 @@
 }
 
 
-void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
-  current_page->SetAllocationWatermark(allocation_info_.top);
-  int free_size =
-      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
-  if (free_size > 0) {
-    int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
-    accounting_stats_.WasteBytes(wasted_bytes);
-  }
-}
-
-
-void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
-  current_page->SetAllocationWatermark(allocation_info_.top);
-  int free_size =
-      static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
-  // In the fixed space free list all the free list items have the right size.
-  // We use up the rest of the page while preserving this invariant.
-  while (free_size >= object_size_in_bytes_) {
-    free_list_.Free(allocation_info_.top);
-    allocation_info_.top += object_size_in_bytes_;
-    free_size -= object_size_in_bytes_;
-    accounting_stats_.WasteBytes(object_size_in_bytes_);
-  }
-}
-
-
-// Add the block at the top of the page to the space's free list, set the
-// allocation info to the next page (assumed to be one), and allocate
-// linearly there.
-HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
-                                         int size_in_bytes) {
-  ASSERT(current_page->next_page()->is_valid());
-  Page* next_page = current_page->next_page();
-  next_page->ClearGCFields();
-  PutRestOfCurrentPageOnFreeList(current_page);
-  SetAllocationInfo(&allocation_info_, next_page);
-  return AllocateLinearly(&allocation_info_, size_in_bytes);
-}
-
-
-void OldSpace::DeallocateBlock(Address start,
-                                 int size_in_bytes,
-                                 bool add_to_freelist) {
-  Free(start, size_in_bytes, add_to_freelist);
-}
-
-
 #ifdef DEBUG
 void PagedSpace::ReportCodeStatistics() {
   Isolate* isolate = Isolate::Current();
@@ -2413,7 +2163,7 @@
 void PagedSpace::CollectCodeStatistics() {
   Isolate* isolate = heap()->isolate();
   HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       isolate->code_kind_statistics()[code->kind()] += code->Size();
@@ -2438,16 +2188,17 @@
 }
 
 
-void OldSpace::ReportStatistics() {
+void PagedSpace::ReportStatistics() {
   int pct = static_cast<int>(Available() * 100 / Capacity());
   PrintF("  capacity: %" V8_PTR_PREFIX "d"
              ", waste: %" V8_PTR_PREFIX "d"
              ", available: %" V8_PTR_PREFIX "d, %%%d\n",
          Capacity(), Waste(), Available(), pct);
 
+  if (was_swept_conservatively_) return;
   ClearHistograms();
   HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
     CollectHistogramInfo(obj);
   ReportHistogram(true);
 }
@@ -2456,192 +2207,28 @@
 // -----------------------------------------------------------------------------
 // FixedSpace implementation
 
-void FixedSpace::PrepareForMarkCompact(bool will_compact) {
+void FixedSpace::PrepareForMarkCompact() {
   // Call prepare of the super class.
-  PagedSpace::PrepareForMarkCompact(will_compact);
+  PagedSpace::PrepareForMarkCompact();
 
-  if (will_compact) {
-    // Reset relocation info.
-    MCResetRelocationInfo();
-
-    // During a compacting collection, everything in the space is considered
-    // 'available' (set by the call to MCResetRelocationInfo) and we will
-    // rediscover live and wasted bytes during the collection.
-    ASSERT(Available() == Capacity());
-  } else {
-    // During a non-compacting collection, everything below the linear
-    // allocation pointer except wasted top-of-page blocks is considered
-    // allocated and we will rediscover available bytes during the
-    // collection.
-    accounting_stats_.AllocateBytes(free_list_.available());
-  }
+  // During a non-compacting collection, everything below the linear
+  // allocation pointer except wasted top-of-page blocks is considered
+  // allocated and we will rediscover available bytes during the
+  // collection.
+  accounting_stats_.AllocateBytes(free_list_.available());
 
   // Clear the free list before a full GC---it will be rebuilt afterward.
   free_list_.Reset();
 }
 
 
-void FixedSpace::MCCommitRelocationInfo() {
-  // Update fast allocation info.
-  allocation_info_.top = mc_forwarding_info_.top;
-  allocation_info_.limit = mc_forwarding_info_.limit;
-  ASSERT(allocation_info_.VerifyPagedAllocation());
-
-  // The space is compacted and we haven't yet wasted any space.
-  ASSERT(Waste() == 0);
-
-  // Update allocation_top of each page in use and compute waste.
-  int computed_size = 0;
-  PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
-  while (it.has_next()) {
-    Page* page = it.next();
-    Address page_top = page->AllocationTop();
-    computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
-    if (it.has_next()) {
-      accounting_stats_.WasteBytes(
-          static_cast<int>(page->ObjectAreaEnd() - page_top));
-      page->SetAllocationWatermark(page_top);
-    }
-  }
-
-  // Make sure the computed size - based on the used portion of the
-  // pages in use - matches the size we adjust during allocation.
-  ASSERT(computed_size == Size());
-}
-
-
-// Slow case for normal allocation. Try in order: (1) allocate in the next
-// page in the space, (2) allocate off the space's free list, (3) expand the
-// space, (4) fail.
-HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
-  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
-  // Linear allocation in this space has failed.  If there is another page
-  // in the space, move to that page and allocate there.  This allocation
-  // should succeed.
-  Page* current_page = TopPageOf(allocation_info_);
-  if (current_page->next_page()->is_valid()) {
-    return AllocateInNextPage(current_page, size_in_bytes);
-  }
-
-  // There is no next page in this space.  Try free list allocation unless
-  // that is currently forbidden.  The fixed space free list implicitly assumes
-  // that all free blocks are of the fixed size.
-  if (!heap()->linear_allocation()) {
-    Object* result;
-    MaybeObject* maybe = free_list_.Allocate();
-    if (maybe->ToObject(&result)) {
-      accounting_stats_.AllocateBytes(size_in_bytes);
-      HeapObject* obj = HeapObject::cast(result);
-      Page* p = Page::FromAddress(obj->address());
-
-      if (obj->address() >= p->AllocationWatermark()) {
-        // There should be no hole between the allocation watermark
-        // and allocated object address.
-        // Memory above the allocation watermark was not swept and
-        // might contain garbage pointers to new space.
-        ASSERT(obj->address() == p->AllocationWatermark());
-        p->SetAllocationWatermark(obj->address() + size_in_bytes);
-      }
-
-      return obj;
-    }
-  }
-
-  // Free list allocation failed and there is no next page.  Fail if we have
-  // hit the old generation size limit that should cause a garbage
-  // collection.
-  if (!heap()->always_allocate() &&
-      heap()->OldGenerationAllocationLimitReached()) {
-    return NULL;
-  }
-
-  // Try to expand the space and allocate in the new next page.
-  ASSERT(!current_page->next_page()->is_valid());
-  if (Expand(current_page)) {
-    return AllocateInNextPage(current_page, size_in_bytes);
-  }
-
-  // Finally, fail.
-  return NULL;
-}
-
-
-// Move to the next page (there is assumed to be one) and allocate there.
-// The top of page block is always wasted, because it is too small to hold a
-// map.
-HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
-                                           int size_in_bytes) {
-  ASSERT(current_page->next_page()->is_valid());
-  ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
-  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
-  Page* next_page = current_page->next_page();
-  next_page->ClearGCFields();
-  current_page->SetAllocationWatermark(allocation_info_.top);
-  accounting_stats_.WasteBytes(page_extra_);
-  SetAllocationInfo(&allocation_info_, next_page);
-  return AllocateLinearly(&allocation_info_, size_in_bytes);
-}
-
-
-void FixedSpace::DeallocateBlock(Address start,
-                                 int size_in_bytes,
-                                 bool add_to_freelist) {
-  // Free-list elements in fixed space are assumed to have a fixed size.
-  // We break the free block into chunks and add them to the free list
-  // individually.
-  int size = object_size_in_bytes();
-  ASSERT(size_in_bytes % size == 0);
-  Address end = start + size_in_bytes;
-  for (Address a = start; a < end; a += size) {
-    Free(a, add_to_freelist);
-  }
-}
-
-
-#ifdef DEBUG
-void FixedSpace::ReportStatistics() {
-  int pct = static_cast<int>(Available() * 100 / Capacity());
-  PrintF("  capacity: %" V8_PTR_PREFIX "d"
-             ", waste: %" V8_PTR_PREFIX "d"
-             ", available: %" V8_PTR_PREFIX "d, %%%d\n",
-         Capacity(), Waste(), Available(), pct);
-
-  ClearHistograms();
-  HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
-    CollectHistogramInfo(obj);
-  ReportHistogram(false);
-}
-#endif
-
-
 // -----------------------------------------------------------------------------
 // MapSpace implementation
 
-void MapSpace::PrepareForMarkCompact(bool will_compact) {
-  // Call prepare of the super class.
-  FixedSpace::PrepareForMarkCompact(will_compact);
-
-  if (will_compact) {
-    // Initialize map index entry.
-    int page_count = 0;
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    while (it.has_next()) {
-      ASSERT_MAP_PAGE_INDEX(page_count);
-
-      Page* p = it.next();
-      ASSERT(p->mc_page_index == page_count);
-
-      page_addresses_[page_count++] = p->address();
-    }
-  }
-}
-
-
 #ifdef DEBUG
 void MapSpace::VerifyObject(HeapObject* object) {
   // The object should be a map or a free-list node.
-  ASSERT(object->IsMap() || object->IsByteArray());
+  ASSERT(object->IsMap() || object->IsFreeSpace());
 }
 #endif
 
@@ -2662,107 +2249,40 @@
 // LargeObjectIterator
 
 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
-  current_ = space->first_chunk_;
+  current_ = space->first_page_;
   size_func_ = NULL;
 }
 
 
 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
                                          HeapObjectCallback size_func) {
-  current_ = space->first_chunk_;
+  current_ = space->first_page_;
   size_func_ = size_func;
 }
 
 
-HeapObject* LargeObjectIterator::next() {
+HeapObject* LargeObjectIterator::Next() {
   if (current_ == NULL) return NULL;
 
   HeapObject* object = current_->GetObject();
-  current_ = current_->next();
+  current_ = current_->next_page();
   return object;
 }
 
 
 // -----------------------------------------------------------------------------
-// LargeObjectChunk
-
-LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
-                                        Executability executable) {
-  size_t requested = ChunkSizeFor(size_in_bytes);
-  size_t size;
-  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
-  Isolate* isolate = Isolate::Current();
-  void* mem = isolate->memory_allocator()->AllocateRawMemory(
-      requested + guard_size, &size, executable);
-  if (mem == NULL) return NULL;
-
-  // The start of the chunk may be overlayed with a page so we have to
-  // make sure that the page flags fit in the size field.
-  ASSERT((size & Page::kPageFlagMask) == 0);
-
-  LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
-  if (size < requested + guard_size) {
-    isolate->memory_allocator()->FreeRawMemory(
-        mem, size, executable);
-    LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
-    return NULL;
-  }
-
-  if (guard_size != 0) {
-    OS::Guard(mem, guard_size);
-    size -= guard_size;
-    mem = static_cast<Address>(mem) + guard_size;
-  }
-
-  ObjectSpace space = (executable == EXECUTABLE)
-      ? kObjectSpaceCodeSpace
-      : kObjectSpaceLoSpace;
-  isolate->memory_allocator()->PerformAllocationCallback(
-      space, kAllocationActionAllocate, size);
-
-  LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
-  chunk->size_ = size;
-  chunk->GetPage()->heap_ = isolate->heap();
-  return chunk;
-}
-
-
-void LargeObjectChunk::Free(Executability executable) {
-  size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
-  ObjectSpace space =
-      (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
-  // Do not access instance fields after FreeRawMemory!
-  Address my_address = address();
-  size_t my_size = size();
-  Isolate* isolate = GetPage()->heap_->isolate();
-  MemoryAllocator* a = isolate->memory_allocator();
-  a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable);
-  a->PerformAllocationCallback(space, kAllocationActionFree, my_size);
-  LOG(isolate, DeleteEvent("LargeObjectChunk", my_address));
-}
-
-
-int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
-  int os_alignment = static_cast<int>(OS::AllocateAlignment());
-  if (os_alignment < Page::kPageSize) {
-    size_in_bytes += (Page::kPageSize - os_alignment);
-  }
-  return size_in_bytes + Page::kObjectStartOffset;
-}
-
-// -----------------------------------------------------------------------------
 // LargeObjectSpace
 
 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
     : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
-      first_chunk_(NULL),
+      first_page_(NULL),
       size_(0),
       page_count_(0),
       objects_size_(0) {}
 
 
 bool LargeObjectSpace::Setup() {
-  first_chunk_ = NULL;
+  first_page_ = NULL;
   size_ = 0;
   page_count_ = 0;
   objects_size_ = 0;
@@ -2771,20 +2291,22 @@
 
 
 void LargeObjectSpace::TearDown() {
-  while (first_chunk_ != NULL) {
-    LargeObjectChunk* chunk = first_chunk_;
-    first_chunk_ = first_chunk_->next();
-    chunk->Free(chunk->GetPage()->PageExecutability());
+  while (first_page_ != NULL) {
+    LargePage* page = first_page_;
+    first_page_ = first_page_->next_page();
+    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
+
+    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
+    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
+        space, kAllocationActionFree, page->size());
+    heap()->isolate()->memory_allocator()->Free(page);
   }
   Setup();
 }
 
 
-MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
-                                                   int object_size,
-                                                   Executability executable) {
-  ASSERT(0 < object_size && object_size <= requested_size);
-
+MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
+                                           Executability executable) {
   // Check if we want to force a GC before growing the old space further.
   // If so, fail the allocation.
   if (!heap()->always_allocate() &&
@@ -2792,75 +2314,42 @@
     return Failure::RetryAfterGC(identity());
   }
 
-  LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable);
-  if (chunk == NULL) {
-    return Failure::RetryAfterGC(identity());
-  }
+  LargePage* page = heap()->isolate()->memory_allocator()->
+      AllocateLargePage(object_size, executable, this);
+  if (page == NULL) return Failure::RetryAfterGC(identity());
+  ASSERT(page->body_size() >= object_size);
 
-  size_ += static_cast<int>(chunk->size());
-  objects_size_ += requested_size;
+  size_ += static_cast<int>(page->size());
+  objects_size_ += object_size;
   page_count_++;
-  chunk->set_next(first_chunk_);
-  first_chunk_ = chunk;
+  page->set_next_page(first_page_);
+  first_page_ = page;
 
-  // Initialize page header.
-  Page* page = chunk->GetPage();
-  Address object_address = page->ObjectAreaStart();
-
-  // Clear the low order bit of the second word in the page to flag it as a
-  // large object page.  If the chunk_size happened to be written there, its
-  // low order bit should already be clear.
-  page->SetIsLargeObjectPage(true);
-  page->SetPageExecutability(executable);
-  page->SetRegionMarks(Page::kAllRegionsCleanMarks);
-  return HeapObject::FromAddress(object_address);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
-  ASSERT(0 < size_in_bytes);
-  return AllocateRawInternal(size_in_bytes,
-                             size_in_bytes,
-                             EXECUTABLE);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
-  ASSERT(0 < size_in_bytes);
-  return AllocateRawInternal(size_in_bytes,
-                             size_in_bytes,
-                             NOT_EXECUTABLE);
-}
-
-
-MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
-  ASSERT(0 < size_in_bytes);
-  return AllocateRawInternal(size_in_bytes,
-                             size_in_bytes,
-                             NOT_EXECUTABLE);
+  heap()->incremental_marking()->OldSpaceStep(object_size);
+  return page->GetObject();
 }
 
 
 // GC support
 MaybeObject* LargeObjectSpace::FindObject(Address a) {
-  for (LargeObjectChunk* chunk = first_chunk_;
-       chunk != NULL;
-       chunk = chunk->next()) {
-    Address chunk_address = chunk->address();
-    if (chunk_address <= a && a < chunk_address + chunk->size()) {
-      return chunk->GetObject();
+  for (LargePage* page = first_page_;
+       page != NULL;
+       page = page->next_page()) {
+    Address page_address = page->address();
+    if (page_address <= a && a < page_address + page->size()) {
+      return page->GetObject();
     }
   }
   return Failure::Exception();
 }
 
 
-LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
+LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
   // TODO(853): Change this implementation to only find executable
   // chunks and use some kind of hash-based approach to speed it up.
-  for (LargeObjectChunk* chunk = first_chunk_;
+  for (LargePage* chunk = first_page_;
        chunk != NULL;
-       chunk = chunk->next()) {
+       chunk = chunk->next_page()) {
     Address chunk_address = chunk->address();
     if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
       return chunk;
@@ -2870,112 +2359,57 @@
 }
 
 
-void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
-  LargeObjectIterator it(this);
-  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
-    // We only have code, sequential strings, or fixed arrays in large
-    // object space, and only fixed arrays can possibly contain pointers to
-    // the young generation.
-    if (object->IsFixedArray()) {
-      Page* page = Page::FromAddress(object->address());
-      uint32_t marks = page->GetRegionMarks();
-      uint32_t newmarks = Page::kAllRegionsCleanMarks;
-
-      if (marks != Page::kAllRegionsCleanMarks) {
-        // For a large page a single dirty mark corresponds to several
-        // regions (modulo 32). So we treat a large page as a sequence of
-        // normal pages of size Page::kPageSize having same dirty marks
-        // and subsequently iterate dirty regions on each of these pages.
-        Address start = object->address();
-        Address end = page->ObjectAreaEnd();
-        Address object_end = start + object->Size();
-
-        // Iterate regions of the first normal page covering object.
-        uint32_t first_region_number = page->GetRegionNumberForAddress(start);
-        newmarks |=
-            heap()->IterateDirtyRegions(marks >> first_region_number,
-                                        start,
-                                        end,
-                                        &Heap::IteratePointersInDirtyRegion,
-                                        copy_object) << first_region_number;
-
-        start = end;
-        end = start + Page::kPageSize;
-        while (end <= object_end) {
-          // Iterate next 32 regions.
-          newmarks |=
-              heap()->IterateDirtyRegions(marks,
-                                          start,
-                                          end,
-                                          &Heap::IteratePointersInDirtyRegion,
-                                          copy_object);
-          start = end;
-          end = start + Page::kPageSize;
-        }
-
-        if (start != object_end) {
-          // Iterate the last piece of an object which is less than
-          // Page::kPageSize.
-          newmarks |=
-              heap()->IterateDirtyRegions(marks,
-                                          start,
-                                          object_end,
-                                          &Heap::IteratePointersInDirtyRegion,
-                                          copy_object);
-        }
-
-        page->SetRegionMarks(newmarks);
-      }
-    }
-  }
-}
-
-
 void LargeObjectSpace::FreeUnmarkedObjects() {
-  LargeObjectChunk* previous = NULL;
-  LargeObjectChunk* current = first_chunk_;
+  LargePage* previous = NULL;
+  LargePage* current = first_page_;
   while (current != NULL) {
     HeapObject* object = current->GetObject();
-    if (object->IsMarked()) {
-      object->ClearMark();
-      heap()->mark_compact_collector()->tracer()->decrement_marked_count();
+    // Can this large page contain pointers to non-trivial objects.  No other
+    // pointer object is this big.
+    bool is_pointer_object = object->IsFixedArray();
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) {
+      mark_bit.Clear();
+      MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
       previous = current;
-      current = current->next();
+      current = current->next_page();
     } else {
+      LargePage* page = current;
       // Cut the chunk out from the chunk list.
-      LargeObjectChunk* current_chunk = current;
-      current = current->next();
+      current = current->next_page();
       if (previous == NULL) {
-        first_chunk_ = current;
+        first_page_ = current;
       } else {
-        previous->set_next(current);
+        previous->set_next_page(current);
       }
 
       // Free the chunk.
       heap()->mark_compact_collector()->ReportDeleteIfNeeded(
           object, heap()->isolate());
-      LiveObjectList::ProcessNonLive(object);
-
-      size_ -= static_cast<int>(current_chunk->size());
+      size_ -= static_cast<int>(page->size());
       objects_size_ -= object->Size();
       page_count_--;
-      current_chunk->Free(current_chunk->GetPage()->PageExecutability());
+
+      if (is_pointer_object) {
+        heap()->QueueMemoryChunkForFree(page);
+      } else {
+        heap()->isolate()->memory_allocator()->Free(page);
+      }
     }
   }
+  heap()->FreeQueuedChunks();
 }
 
 
 bool LargeObjectSpace::Contains(HeapObject* object) {
   Address address = object->address();
-  if (heap()->new_space()->Contains(address)) {
-    return false;
-  }
-  Page* page = Page::FromAddress(address);
+  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
 
-  SLOW_ASSERT(!page->IsLargeObjectPage()
-              || !FindObject(address)->IsFailure());
+  bool owned = (chunk->owner() == this);
 
-  return page->IsLargeObjectPage();
+  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
+
+  return owned;
 }
 
 
@@ -2983,9 +2417,9 @@
 // We do not assume that the large object iterator works, because it depends
 // on the invariants we are checking during verification.
 void LargeObjectSpace::Verify() {
-  for (LargeObjectChunk* chunk = first_chunk_;
+  for (LargePage* chunk = first_page_;
        chunk != NULL;
-       chunk = chunk->next()) {
+       chunk = chunk->next_page()) {
     // Each chunk contains an object that starts at the large object page's
     // object area start.
     HeapObject* object = chunk->GetObject();
@@ -3015,9 +2449,6 @@
                           object->Size(),
                           &code_visitor);
     } else if (object->IsFixedArray()) {
-      // We loop over fixed arrays ourselves, rather then using the visitor,
-      // because the visitor doesn't support the start/offset iteration
-      // needed for IsRegionDirty.
       FixedArray* array = FixedArray::cast(object);
       for (int j = 0; j < array->length(); j++) {
         Object* element = array->get(j);
@@ -3025,13 +2456,6 @@
           HeapObject* element_object = HeapObject::cast(element);
           ASSERT(heap()->Contains(element_object));
           ASSERT(element_object->map()->IsMap());
-          if (heap()->InNewSpace(element_object)) {
-            Address array_addr = object->address();
-            Address element_addr = array_addr + FixedArray::kHeaderSize +
-                j * kPointerSize;
-
-            ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
-          }
         }
       }
     }
@@ -3041,7 +2465,7 @@
 
 void LargeObjectSpace::Print() {
   LargeObjectIterator it(this);
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     obj->Print();
   }
 }
@@ -3052,7 +2476,7 @@
   int num_objects = 0;
   ClearHistograms();
   LargeObjectIterator it(this);
-  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
     num_objects++;
     CollectHistogramInfo(obj);
   }
@@ -3066,13 +2490,38 @@
 void LargeObjectSpace::CollectCodeStatistics() {
   Isolate* isolate = heap()->isolate();
   LargeObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       isolate->code_kind_statistics()[code->kind()] += code->Size();
     }
   }
 }
+
+
+void Page::Print() {
+  // Make a best-effort to print the objects in the page.
+  PrintF("Page@%p in %s\n",
+         this->address(),
+         AllocationSpaceName(this->owner()->identity()));
+  printf(" --------------------------------------\n");
+  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
+  unsigned mark_size = 0;
+  for (HeapObject* object = objects.Next();
+       object != NULL;
+       object = objects.Next()) {
+    bool is_marked = Marking::MarkBitFrom(object).Get();
+    PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
+    if (is_marked) {
+      mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
+    }
+    object->ShortPrint();
+    PrintF("\n");
+  }
+  printf(" --------------------------------------\n");
+  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
+}
+
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/spaces.h b/src/spaces.h
index f156496..6149334 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -49,45 +49,47 @@
 //
 // The semispaces of the young generation are contiguous.  The old and map
 // spaces consists of a list of pages. A page has a page header and an object
-// area. A page size is deliberately chosen as 8K bytes.
-// The first word of a page is an opaque page header that has the
-// address of the next page and its ownership information. The second word may
-// have the allocation top address of this page. Heap objects are aligned to the
-// pointer size.
+// area.
 //
 // There is a separate large object space for objects larger than
 // Page::kMaxHeapObjectSize, so that they do not have to move during
 // collection. The large object space is paged. Pages in large object space
-// may be larger than 8K.
+// may be larger than the page size.
 //
-// A card marking write barrier is used to keep track of intergenerational
-// references. Old space pages are divided into regions of Page::kRegionSize
-// size. Each region has a corresponding dirty bit in the page header which is
-// set if the region might contain pointers to new space. For details about
-// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
-// method body.
+// A store-buffer based write barrier is used to keep track of intergenerational
+// references.  See store-buffer.h.
 //
-// During scavenges and mark-sweep collections we iterate intergenerational
-// pointers without decoding heap object maps so if the page belongs to old
-// pointer space or large object space it is essential to guarantee that
-// the page does not contain any garbage pointers to new space: every pointer
-// aligned word which satisfies the Heap::InNewSpace() predicate must be a
-// pointer to a live heap object in new space. Thus objects in old pointer
-// and large object spaces should have a special layout (e.g. no bare integer
-// fields). This requirement does not apply to map space which is iterated in
-// a special fashion. However we still require pointer fields of dead maps to
-// be cleaned.
+// During scavenges and mark-sweep collections we sometimes (after a store
+// buffer overflow) iterate intergenerational pointers without decoding heap
+// object maps so if the page belongs to old pointer space or large object
+// space it is essential to guarantee that the page does not contain any
+// garbage pointers to new space: every pointer aligned word which satisfies
+// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
+// new space. Thus objects in old pointer and large object spaces should have a
+// special layout (e.g. no bare integer fields). This requirement does not
+// apply to map space which is iterated in a special fashion. However we still
+// require pointer fields of dead maps to be cleaned.
 //
-// To enable lazy cleaning of old space pages we use a notion of allocation
-// watermark. Every pointer under watermark is considered to be well formed.
-// Page allocation watermark is not necessarily equal to page allocation top but
-// all alive objects on page should reside under allocation watermark.
-// During scavenge allocation watermark might be bumped and invalid pointers
-// might appear below it. To avoid following them we store a valid watermark
-// into special field in the page header and set a page WATERMARK_INVALIDATED
-// flag. For details see comments in the Page::SetAllocationWatermark() method
-// body.
+// To enable lazy cleaning of old space pages we can mark chunks of the page
+// as being garbage.  Garbage sections are marked with a special map.  These
+// sections are skipped when scanning the page, even if we are otherwise
+// scanning without regard for object boundaries.  Garbage sections are chained
+// together to form a free list after a GC.  Garbage sections created outside
+// of GCs by object trunctation etc. may not be in the free list chain.  Very
+// small free spaces are ignored, they need only be cleaned of bogus pointers
+// into new space.
 //
+// Each page may have up to one special garbage section.  The start of this
+// section is denoted by the top field in the space.  The end of the section
+// is denoted by the limit field in the space.  This special garbage section
+// is not marked with a free space map in the data.  The point of this section
+// is to enable linear allocation without having to constantly update the byte
+// array every time the top field is updated and a new object is created.  The
+// special garbage section is not in the chain of garbage sections.
+//
+// Since the top and limit fields are in the space, not the page, only one page
+// has a special garbage section, and if the top and limit are equal then there
+// is no special garbage section.
 
 // Some assertion macros used in the debugging mode.
 
@@ -114,30 +116,522 @@
 class PagedSpace;
 class MemoryAllocator;
 class AllocationInfo;
+class Space;
+class FreeList;
+class MemoryChunk;
+
+class MarkBit {
+ public:
+  typedef uint32_t CellType;
+
+  inline MarkBit(CellType* cell, CellType mask, bool data_only)
+      : cell_(cell), mask_(mask), data_only_(data_only) { }
+
+  inline CellType* cell() { return cell_; }
+  inline CellType mask() { return mask_; }
+
+#ifdef DEBUG
+  bool operator==(const MarkBit& other) {
+    return cell_ == other.cell_ && mask_ == other.mask_;
+  }
+#endif
+
+  inline void Set() { *cell_ |= mask_; }
+  inline bool Get() { return (*cell_ & mask_) != 0; }
+  inline void Clear() { *cell_ &= ~mask_; }
+
+  inline bool data_only() { return data_only_; }
+
+  inline MarkBit Next() {
+    CellType new_mask = mask_ << 1;
+    if (new_mask == 0) {
+      return MarkBit(cell_ + 1, 1, data_only_);
+    } else {
+      return MarkBit(cell_, new_mask, data_only_);
+    }
+  }
+
+ private:
+  CellType* cell_;
+  CellType mask_;
+  // This boolean indicates that the object is in a data-only space with no
+  // pointers.  This enables some optimizations when marking.
+  // It is expected that this field is inlined and turned into control flow
+  // at the place where the MarkBit object is created.
+  bool data_only_;
+};
+
+
+// Bitmap is a sequence of cells each containing fixed number of bits.
+class Bitmap {
+ public:
+  static const uint32_t kBitsPerCell = 32;
+  static const uint32_t kBitsPerCellLog2 = 5;
+  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
+  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
+  static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
+
+  static const size_t kLength =
+    (1 << kPageSizeBits) >> (kPointerSizeLog2);
+
+  static const size_t kSize =
+    (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
+
+
+  static int CellsForLength(int length) {
+    return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
+  }
+
+  int CellsCount() {
+    return CellsForLength(kLength);
+  }
+
+  static int SizeFor(int cells_count) {
+    return sizeof(MarkBit::CellType) * cells_count;
+  }
+
+  INLINE(static uint32_t IndexToCell(uint32_t index)) {
+    return index >> kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellToIndex(uint32_t index)) {
+    return index << kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
+    return (index + kBitIndexMask) & ~kBitIndexMask;
+  }
+
+  INLINE(MarkBit::CellType* cells()) {
+    return reinterpret_cast<MarkBit::CellType*>(this);
+  }
+
+  INLINE(Address address()) {
+    return reinterpret_cast<Address>(this);
+  }
+
+  INLINE(static Bitmap* FromAddress(Address addr)) {
+    return reinterpret_cast<Bitmap*>(addr);
+  }
+
+  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
+    MarkBit::CellType mask = 1 << (index & kBitIndexMask);
+    MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
+    return MarkBit(cell, mask, data_only);
+  }
+
+  static inline void Clear(MemoryChunk* chunk);
+
+  static void PrintWord(uint32_t word, uint32_t himask = 0) {
+    for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+      if ((mask & himask) != 0) PrintF("[");
+      PrintF((mask & word) ? "1" : "0");
+      if ((mask & himask) != 0) PrintF("]");
+    }
+  }
+
+  class CellPrinter {
+   public:
+    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
+
+    void Print(uint32_t pos, uint32_t cell) {
+      if (cell == seq_type) {
+        seq_length++;
+        return;
+      }
+
+      Flush();
+
+      if (IsSeq(cell)) {
+        seq_start = pos;
+        seq_length = 0;
+        seq_type = cell;
+        return;
+      }
+
+      PrintF("%d: ", pos);
+      PrintWord(cell);
+      PrintF("\n");
+    }
+
+    void Flush() {
+      if (seq_length > 0) {
+        PrintF("%d: %dx%d\n",
+               seq_start,
+               seq_type == 0 ? 0 : 1,
+               seq_length * kBitsPerCell);
+        seq_length = 0;
+      }
+    }
+
+    static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+
+   private:
+    uint32_t seq_start;
+    uint32_t seq_type;
+    uint32_t seq_length;
+  };
+
+  void Print() {
+    CellPrinter printer;
+    for (int i = 0; i < CellsCount(); i++) {
+      printer.Print(i, cells()[i]);
+    }
+    printer.Flush();
+    PrintF("\n");
+  }
+
+  bool IsClean() {
+    for (int i = 0; i < CellsCount(); i++) {
+      if (cells()[i] != 0) return false;
+    }
+    return true;
+  }
+};
+
+
+class SkipList;
+class SlotsBuffer;
+
+// MemoryChunk represents a memory region owned by a specific space.
+// It is divided into the header and the body. Chunk start is always
+// 1MB aligned. Start of the body is aligned so it can accomodate
+// any heap object.
+class MemoryChunk {
+ public:
+  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
+  static MemoryChunk* FromAddress(Address a) {
+    return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
+  }
+
+  // Only works for addresses in pointer spaces, not data or code spaces.
+  static inline MemoryChunk* FromAnyPointerAddress(Address addr);
+
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  bool is_valid() { return address() != NULL; }
+
+  MemoryChunk* next_chunk() const { return next_chunk_; }
+  MemoryChunk* prev_chunk() const { return prev_chunk_; }
+
+  void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
+  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
+
+  Space* owner() const {
+    if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
+        kFailureTag) {
+      return reinterpret_cast<Space*>(owner_ - kFailureTag);
+    } else {
+      return NULL;
+    }
+  }
+
+  void set_owner(Space* space) {
+    ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
+    owner_ = reinterpret_cast<Address>(space) + kFailureTag;
+    ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
+           kFailureTag);
+  }
+
+  VirtualMemory* reserved_memory() {
+    return &reservation_;
+  }
+
+  void InitializeReservedMemory() {
+    reservation_.Reset();
+  }
+
+  void set_reserved_memory(VirtualMemory* reservation) {
+    ASSERT_NOT_NULL(reservation);
+    reservation_.TakeControl(reservation);
+  }
+
+  bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
+  void initialize_scan_on_scavenge(bool scan) {
+    if (scan) {
+      SetFlag(SCAN_ON_SCAVENGE);
+    } else {
+      ClearFlag(SCAN_ON_SCAVENGE);
+    }
+  }
+  inline void set_scan_on_scavenge(bool scan);
+
+  int store_buffer_counter() { return store_buffer_counter_; }
+  void set_store_buffer_counter(int counter) {
+    store_buffer_counter_ = counter;
+  }
+
+  Address body() { return address() + kObjectStartOffset; }
+
+  Address body_limit() { return address() + size(); }
+
+  int body_size() { return static_cast<int>(size() - kObjectStartOffset); }
+
+  bool Contains(Address addr) {
+    return addr >= body() && addr < address() + size();
+  }
+
+  // Checks whether addr can be a limit of addresses in this page.
+  // It's a limit if it's in the page, or if it's just after the
+  // last byte of the page.
+  bool ContainsLimit(Address addr) {
+    return addr >= body() && addr <= address() + size();
+  }
+
+  enum MemoryChunkFlags {
+    IS_EXECUTABLE,
+    ABOUT_TO_BE_FREED,
+    POINTERS_TO_HERE_ARE_INTERESTING,
+    POINTERS_FROM_HERE_ARE_INTERESTING,
+    SCAN_ON_SCAVENGE,
+    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
+    IN_TO_SPACE,    // All pages in new space has one of these two set.
+    NEW_SPACE_BELOW_AGE_MARK,
+    CONTAINS_ONLY_DATA,
+    EVACUATION_CANDIDATE,
+    RESCAN_ON_EVACUATION,
+
+    // Pages swept precisely can be iterated, hitting only the live objects.
+    // Whereas those swept conservatively cannot be iterated over. Both flags
+    // indicate that marking bits have been cleared by the sweeper, otherwise
+    // marking bits are still intact.
+    WAS_SWEPT_PRECISELY,
+    WAS_SWEPT_CONSERVATIVELY,
+
+    // Last flag, keep at bottom.
+    NUM_MEMORY_CHUNK_FLAGS
+  };
+
+
+  static const int kPointersToHereAreInterestingMask =
+      1 << POINTERS_TO_HERE_ARE_INTERESTING;
+
+  static const int kPointersFromHereAreInterestingMask =
+      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
+
+  static const int kEvacuationCandidateMask =
+      1 << EVACUATION_CANDIDATE;
+
+  static const int kSkipEvacuationSlotsRecordingMask =
+      (1 << EVACUATION_CANDIDATE) |
+      (1 << RESCAN_ON_EVACUATION) |
+      (1 << IN_FROM_SPACE) |
+      (1 << IN_TO_SPACE);
+
+
+  void SetFlag(int flag) {
+    flags_ |= static_cast<uintptr_t>(1) << flag;
+  }
+
+  void ClearFlag(int flag) {
+    flags_ &= ~(static_cast<uintptr_t>(1) << flag);
+  }
+
+  void SetFlagTo(int flag, bool value) {
+    if (value) {
+      SetFlag(flag);
+    } else {
+      ClearFlag(flag);
+    }
+  }
+
+  bool IsFlagSet(int flag) {
+    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
+  }
+
+  // Set or clear multiple flags at a time. The flags in the mask
+  // are set to the value in "flags", the rest retain the current value
+  // in flags_.
+  void SetFlags(intptr_t flags, intptr_t mask) {
+    flags_ = (flags_ & ~mask) | (flags & mask);
+  }
+
+  // Return all current flags.
+  intptr_t GetFlags() { return flags_; }
+
+  // Manage live byte count (count of bytes known to be live,
+  // because they are marked black).
+  void ResetLiveBytes() {
+    if (FLAG_trace_live_byte_count) {
+      PrintF("ResetLiveBytes:%p:%x->0\n",
+             static_cast<void*>(this), live_byte_count_);
+    }
+    live_byte_count_ = 0;
+  }
+  void IncrementLiveBytes(int by) {
+    ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
+    if (FLAG_trace_live_byte_count) {
+      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
+             static_cast<void*>(this), live_byte_count_,
+             ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
+             live_byte_count_ + by);
+    }
+    live_byte_count_ += by;
+    ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
+  }
+  int LiveBytes() {
+    ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
+    return live_byte_count_;
+  }
+  static void IncrementLiveBytes(Address address, int by) {
+    MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
+  }
+
+  static const intptr_t kAlignment =
+      (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+  static const intptr_t kAlignmentMask = kAlignment - 1;
+
+  static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
+
+  static const intptr_t kLiveBytesOffset =
+      kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
+      kPointerSize + kPointerSize + kPointerSize + kIntSize;
+
+  static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
+
+  static const size_t kHeaderSize =
+      kSlotsBufferOffset + kPointerSize + kPointerSize;
+
+  static const int kBodyOffset =
+    CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
+
+  // The start offset of the object area in a page. Aligned to both maps and
+  // code alignment to be suitable for both.  Also aligned to 32 words because
+  // the marking bitmap is arranged in 32 bit chunks.
+  static const int kObjectStartAlignment = 32 * kPointerSize;
+  static const int kObjectStartOffset = kBodyOffset - 1 +
+      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
+
+  size_t size() const { return size_; }
+
+  Executability executable() {
+    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+  }
+
+  bool ContainsOnlyData() {
+    return IsFlagSet(CONTAINS_ONLY_DATA);
+  }
+
+  bool InNewSpace() {
+    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
+  }
+
+  bool InToSpace() {
+    return IsFlagSet(IN_TO_SPACE);
+  }
+
+  bool InFromSpace() {
+    return IsFlagSet(IN_FROM_SPACE);
+  }
+
+  // ---------------------------------------------------------------------
+  // Markbits support
+
+  inline Bitmap* markbits() {
+    return Bitmap::FromAddress(address() + kHeaderSize);
+  }
+
+  void PrintMarkbits() { markbits()->Print(); }
+
+  inline uint32_t AddressToMarkbitIndex(Address addr) {
+    return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
+  }
+
+  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
+    const intptr_t offset =
+        reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+
+    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
+  }
+
+  inline Address MarkbitIndexToAddress(uint32_t index) {
+    return this->address() + (index << kPointerSizeLog2);
+  }
+
+  void InsertAfter(MemoryChunk* other);
+  void Unlink();
+
+  inline Heap* heap() { return heap_; }
+
+  static const int kFlagsOffset = kPointerSize * 3;
+
+  bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
+
+  bool ShouldSkipEvacuationSlotRecording() {
+    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+  }
+
+  inline SkipList* skip_list() {
+    return skip_list_;
+  }
+
+  inline void set_skip_list(SkipList* skip_list) {
+    skip_list_ = skip_list;
+  }
+
+  inline SlotsBuffer* slots_buffer() {
+    return slots_buffer_;
+  }
+
+  inline SlotsBuffer** slots_buffer_address() {
+    return &slots_buffer_;
+  }
+
+  void MarkEvacuationCandidate() {
+    ASSERT(slots_buffer_ == NULL);
+    SetFlag(EVACUATION_CANDIDATE);
+  }
+
+  void ClearEvacuationCandidate() {
+    ASSERT(slots_buffer_ == NULL);
+    ClearFlag(EVACUATION_CANDIDATE);
+  }
+
+
+ protected:
+  MemoryChunk* next_chunk_;
+  MemoryChunk* prev_chunk_;
+  size_t size_;
+  intptr_t flags_;
+  // If the chunk needs to remember its memory reservation, it is stored here.
+  VirtualMemory reservation_;
+  // The identity of the owning space.  This is tagged as a failure pointer, but
+  // no failure can be in an object, so this can be distinguished from any entry
+  // in a fixed array.
+  Address owner_;
+  Heap* heap_;
+  // Used by the store buffer to keep track of which pages to mark scan-on-
+  // scavenge.
+  int store_buffer_counter_;
+  // Count of bytes marked black on page.
+  int live_byte_count_;
+  SlotsBuffer* slots_buffer_;
+  SkipList* skip_list_;
+
+  static MemoryChunk* Initialize(Heap* heap,
+                                 Address base,
+                                 size_t size,
+                                 Executability executable,
+                                 Space* owner);
+
+  friend class MemoryAllocator;
+};
+
+STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
 
 // -----------------------------------------------------------------------------
-// A page normally has 8K bytes. Large object pages may be larger.  A page
-// address is always aligned to the 8K page size.
-//
-// Each page starts with a header of Page::kPageHeaderSize size which contains
-// bookkeeping data.
-//
-// The mark-compact collector transforms a map pointer into a page index and a
-// page offset. The exact encoding is described in the comments for
-// class MapWord in objects.h.
+// A page is a memory chunk of a size 1MB. Large object pages may be larger.
 //
 // The only way to get a page pointer is by calling factory methods:
 //   Page* p = Page::FromAddress(addr); or
 //   Page* p = Page::FromAllocationTop(top);
-class Page {
+class Page : public MemoryChunk {
  public:
   // Returns the page containing a given address. The address ranges
   // from [page_addr .. page_addr + kPageSize[
-  //
-  // Note that this function only works for addresses in normal paged
-  // spaces and addresses in the first 8K of large object pages (i.e.,
-  // the start of large objects but not necessarily derived pointers
-  // within them).
+  // This only works if the object is in fact in a page.  See also MemoryChunk::
+  // FromAddress() and FromAnyAddress().
   INLINE(static Page* FromAddress(Address a)) {
     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
   }
@@ -152,30 +646,11 @@
     return p;
   }
 
-  // Returns the start address of this page.
-  Address address() { return reinterpret_cast<Address>(this); }
-
-  // Checks whether this is a valid page address.
-  bool is_valid() { return address() != NULL; }
-
-  // Returns the next page of this page.
+  // Returns the next page in the chain of pages owned by a space.
   inline Page* next_page();
-
-  // Return the end of allocation in this page. Undefined for unused pages.
-  inline Address AllocationTop();
-
-  // Return the allocation watermark for the page.
-  // For old space pages it is guaranteed that the area under the watermark
-  // does not contain any garbage pointers to new space.
-  inline Address AllocationWatermark();
-
-  // Return the allocation watermark offset from the beginning of the page.
-  inline uint32_t AllocationWatermarkOffset();
-
-  inline void SetAllocationWatermark(Address allocation_watermark);
-
-  inline void SetCachedAllocationWatermark(Address allocation_watermark);
-  inline Address CachedAllocationWatermark();
+  inline Page* prev_page();
+  inline void set_next_page(Page* page);
+  inline void set_prev_page(Page* page);
 
   // Returns the start address of the object area in this page.
   Address ObjectAreaStart() { return address() + kObjectStartOffset; }
@@ -188,22 +663,6 @@
     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
   }
 
-  // True if this page was in use before current compaction started.
-  // Result is valid only for pages owned by paged spaces and
-  // only after PagedSpace::PrepareForMarkCompact was called.
-  inline bool WasInUseBeforeMC();
-
-  inline void SetWasInUseBeforeMC(bool was_in_use);
-
-  // True if this page is a large object page.
-  inline bool IsLargeObjectPage();
-
-  inline void SetIsLargeObjectPage(bool is_large_object_page);
-
-  inline Executability PageExecutability();
-
-  inline void SetPageExecutability(Executability executable);
-
   // Returns the offset of a given address to this page.
   INLINE(int Offset(Address a)) {
     int offset = static_cast<int>(a - address());
@@ -218,24 +677,6 @@
   }
 
   // ---------------------------------------------------------------------
-  // Card marking support
-
-  static const uint32_t kAllRegionsCleanMarks = 0x0;
-  static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
-
-  inline uint32_t GetRegionMarks();
-  inline void SetRegionMarks(uint32_t dirty);
-
-  inline uint32_t GetRegionMaskForAddress(Address addr);
-  inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
-  inline int GetRegionNumberForAddress(Address addr);
-
-  inline void MarkRegionDirty(Address addr);
-  inline bool IsRegionDirty(Address addr);
-
-  inline void ClearRegionMarks(Address start,
-                               Address end,
-                               bool reaches_limit);
 
   // Page size in bytes.  This must be a multiple of the OS page size.
   static const int kPageSize = 1 << kPageSizeBits;
@@ -243,119 +684,70 @@
   // Page size mask.
   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
 
-  static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
-    kIntSize + kPointerSize + kPointerSize;
-
-  // The start offset of the object area in a page. Aligned to both maps and
-  // code alignment to be suitable for both.
-  static const int kObjectStartOffset =
-      CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
-
   // Object area size in bytes.
   static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
 
   // Maximum object size that fits in a page.
   static const int kMaxHeapObjectSize = kObjectAreaSize;
 
-  static const int kDirtyFlagOffset = 2 * kPointerSize;
-  static const int kRegionSizeLog2 = 8;
-  static const int kRegionSize = 1 << kRegionSizeLog2;
-  static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
+  static const int kFirstUsedCell =
+    (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2;
 
-  STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
-
-  enum PageFlag {
-    IS_NORMAL_PAGE = 0,
-    WAS_IN_USE_BEFORE_MC,
-
-    // Page allocation watermark was bumped by preallocation during scavenge.
-    // Correct watermark can be retrieved by CachedAllocationWatermark() method
-    WATERMARK_INVALIDATED,
-    IS_EXECUTABLE,
-    NUM_PAGE_FLAGS  // Must be last
-  };
-  static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
-
-  // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
-  // scavenge we just invalidate the watermark on each old space page after
-  // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
-  // flag at the beginning of the next scavenge and each page becomes marked as
-  // having a valid watermark.
-  //
-  // The following invariant must hold for pages in old pointer and map spaces:
-  //     If page is in use then page is marked as having invalid watermark at
-  //     the beginning and at the end of any GC.
-  //
-  // This invariant guarantees that after flipping flag meaning at the
-  // beginning of scavenge all pages in use will be marked as having valid
-  // watermark.
-  static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
-
-  // Returns true if the page allocation watermark was not altered during
-  // scavenge.
-  inline bool IsWatermarkValid();
-
-  inline void InvalidateWatermark(bool value);
-
-  inline bool GetPageFlag(PageFlag flag);
-  inline void SetPageFlag(PageFlag flag, bool value);
-  inline void ClearPageFlags();
+  static const int kLastUsedCell =
+    ((kPageSize - kPointerSize)/kPointerSize) >>
+      Bitmap::kBitsPerCellLog2;
 
   inline void ClearGCFields();
 
-  static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
-  static const int kAllocationWatermarkOffsetBits  = kPageSizeBits + 1;
-  static const uint32_t kAllocationWatermarkOffsetMask =
-      ((1 << kAllocationWatermarkOffsetBits) - 1) <<
-      kAllocationWatermarkOffsetShift;
+  static inline Page* Initialize(Heap* heap,
+                                 MemoryChunk* chunk,
+                                 Executability executable,
+                                 PagedSpace* owner);
 
-  static const uint32_t kFlagsMask =
-    ((1 << kAllocationWatermarkOffsetShift) - 1);
+  void InitializeAsAnchor(PagedSpace* owner);
 
-  STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
-               kAllocationWatermarkOffsetBits);
+  bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
+  bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
+  bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
 
-  //---------------------------------------------------------------------------
-  // Page header description.
-  //
-  // If a page is not in the large object space, the first word,
-  // opaque_header, encodes the next page address (aligned to kPageSize 8K)
-  // and the chunk number (0 ~ 8K-1).  Only MemoryAllocator should use
-  // opaque_header. The value range of the opaque_header is [0..kPageSize[,
-  // or [next_page_start, next_page_end[. It cannot point to a valid address
-  // in the current page.  If a page is in the large object space, the first
-  // word *may* (if the page start and large object chunk start are the
-  // same) contain the address of the next large object chunk.
-  intptr_t opaque_header;
+  void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
+  void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
 
-  // If the page is not in the large object space, the low-order bit of the
-  // second word is set. If the page is in the large object space, the
-  // second word *may* (if the page start and large object chunk start are
-  // the same) contain the large object chunk size.  In either case, the
-  // low-order bit for large object pages will be cleared.
-  // For normal pages this word is used to store page flags and
-  // offset of allocation top.
-  intptr_t flags_;
+  void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
+  void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
 
-  // This field contains dirty marks for regions covering the page. Only dirty
-  // regions might contain intergenerational references.
-  // Only 32 dirty marks are supported so for large object pages several regions
-  // might be mapped to a single dirty mark.
-  uint32_t dirty_regions_;
+#ifdef DEBUG
+  void Print();
+#endif  // DEBUG
 
-  // The index of the page in its owner space.
-  int mc_page_index;
-
-  // During mark-compact collections this field contains the forwarding address
-  // of the first live object in this page.
-  // During scavenge collection this field is used to store allocation watermark
-  // if it is altered during scavenge.
-  Address mc_first_forwarded;
-
-  Heap* heap_;
+  friend class MemoryAllocator;
 };
 
 
+STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
+
+
+class LargePage : public MemoryChunk {
+ public:
+  HeapObject* GetObject() {
+    return HeapObject::FromAddress(body());
+  }
+
+  inline LargePage* next_page() const {
+    return static_cast<LargePage*>(next_chunk());
+  }
+
+  inline void set_next_page(LargePage* page) {
+    set_next_chunk(page);
+  }
+ private:
+  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
+
+  friend class MemoryAllocator;
+};
+
+STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+
 // ----------------------------------------------------------------------------
 // Space is the abstract superclass for all allocation spaces.
 class Space : public Malloced {
@@ -380,6 +772,14 @@
   // (e.g. see LargeObjectSpace).
   virtual intptr_t SizeOfObjects() { return Size(); }
 
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (id_ == CODE_SPACE) {
+      return RoundDown(size, kCodeAlignment);
+    } else {
+      return RoundDown(size, kPointerSize);
+    }
+  }
+
 #ifdef DEBUG
   virtual void Print() = 0;
 #endif
@@ -430,9 +830,9 @@
   // Allocates a chunk of memory from the large-object portion of
   // the code range.  On platforms with no separate code range, should
   // not be called.
-  MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
-                                          size_t* allocated);
-  void FreeRawMemory(void* buf, size_t length);
+  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
+                                            size_t* allocated);
+  void FreeRawMemory(Address buf, size_t length);
 
  private:
   Isolate* isolate_;
@@ -443,9 +843,15 @@
   class FreeBlock {
    public:
     FreeBlock(Address start_arg, size_t size_arg)
-        : start(start_arg), size(size_arg) {}
+        : start(start_arg), size(size_arg) {
+      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
+      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
+    }
     FreeBlock(void* start_arg, size_t size_arg)
-        : start(static_cast<Address>(start_arg)), size(size_arg) {}
+        : start(static_cast<Address>(start_arg)), size(size_arg) {
+      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
+      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
+    }
 
     Address start;
     size_t size;
@@ -473,30 +879,63 @@
 };
 
 
+class SkipList {
+ public:
+  SkipList() {
+    Clear();
+  }
+
+  void Clear() {
+    for (int idx = 0; idx < kSize; idx++) {
+      starts_[idx] = reinterpret_cast<Address>(-1);
+    }
+  }
+
+  Address StartFor(Address addr) {
+    return starts_[RegionNumber(addr)];
+  }
+
+  void AddObject(Address addr, int size) {
+    int start_region = RegionNumber(addr);
+    int end_region = RegionNumber(addr + size - kPointerSize);
+    for (int idx = start_region; idx <= end_region; idx++) {
+      if (starts_[idx] > addr) starts_[idx] = addr;
+    }
+  }
+
+  static inline int RegionNumber(Address addr) {
+    return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
+  }
+
+  static void Update(Address addr, int size) {
+    Page* page = Page::FromAddress(addr);
+    SkipList* list = page->skip_list();
+    if (list == NULL) {
+      list = new SkipList();
+      page->set_skip_list(list);
+    }
+
+    list->AddObject(addr, size);
+  }
+
+ private:
+  static const int kRegionSizeLog2 = 13;
+  static const int kRegionSize = 1 << kRegionSizeLog2;
+  static const int kSize = Page::kPageSize / kRegionSize;
+
+  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
+
+  Address starts_[kSize];
+};
+
+
 // ----------------------------------------------------------------------------
 // A space acquires chunks of memory from the operating system. The memory
-// allocator manages chunks for the paged heap spaces (old space and map
-// space).  A paged chunk consists of pages. Pages in a chunk have contiguous
-// addresses and are linked as a list.
+// allocator allocated and deallocates pages for the paged heap spaces and large
+// pages for large object space.
 //
-// The allocator keeps an initial chunk which is used for the new space.  The
-// leftover regions of the initial chunk are used for the initial chunks of
-// old space and map space if they are big enough to hold at least one page.
-// The allocator assumes that there is one old space and one map space, each
-// expands the space by allocating kPagesPerChunk pages except the last
-// expansion (before running out of space).  The first chunk may contain fewer
-// than kPagesPerChunk pages as well.
+// Each space has to manage it's own pages.
 //
-// The memory allocator also allocates chunks for the large object space, but
-// they are managed by the space itself.  The new space does not expand.
-//
-// The fact that pages for paged spaces are allocated and deallocated in chunks
-// induces a constraint on the order of pages in a linked lists. We say that
-// pages are linked in the chunk-order if and only if every two consecutive
-// pages from the same chunk are consecutive in the linked list.
-//
-
-
 class MemoryAllocator {
  public:
   explicit MemoryAllocator(Isolate* isolate);
@@ -505,91 +944,15 @@
   // Max capacity of the total space and executable memory limit.
   bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
 
-  // Deletes valid chunks.
   void TearDown();
 
-  // Reserves an initial address range of virtual memory to be split between
-  // the two new space semispaces, the old space, and the map space.  The
-  // memory is not yet committed or assigned to spaces and split into pages.
-  // The initial chunk is unmapped when the memory allocator is torn down.
-  // This function should only be called when there is not already a reserved
-  // initial chunk (initial_chunk_ should be NULL).  It returns the start
-  // address of the initial chunk if successful, with the side effect of
-  // setting the initial chunk, or else NULL if unsuccessful and leaves the
-  // initial chunk NULL.
-  void* ReserveInitialChunk(const size_t requested);
+  Page* AllocatePage(PagedSpace* owner, Executability executable);
 
-  // Commits pages from an as-yet-unmanaged block of virtual memory into a
-  // paged space.  The block should be part of the initial chunk reserved via
-  // a call to ReserveInitialChunk.  The number of pages is always returned in
-  // the output parameter num_pages.  This function assumes that the start
-  // address is non-null and that it is big enough to hold at least one
-  // page-aligned page.  The call always succeeds, and num_pages is always
-  // greater than zero.
-  Page* CommitPages(Address start, size_t size, PagedSpace* owner,
-                    int* num_pages);
+  LargePage* AllocateLargePage(intptr_t object_size,
+                                      Executability executable,
+                                      Space* owner);
 
-  // Commit a contiguous block of memory from the initial chunk.  Assumes that
-  // the address is not NULL, the size is greater than zero, and that the
-  // block is contained in the initial chunk.  Returns true if it succeeded
-  // and false otherwise.
-  bool CommitBlock(Address start, size_t size, Executability executable);
-
-  // Uncommit a contiguous block of memory [start..(start+size)[.
-  // start is not NULL, the size is greater than zero, and the
-  // block is contained in the initial chunk.  Returns true if it succeeded
-  // and false otherwise.
-  bool UncommitBlock(Address start, size_t size);
-
-  // Zaps a contiguous block of memory [start..(start+size)[ thus
-  // filling it up with a recognizable non-NULL bit pattern.
-  void ZapBlock(Address start, size_t size);
-
-  // Attempts to allocate the requested (non-zero) number of pages from the
-  // OS.  Fewer pages might be allocated than requested. If it fails to
-  // allocate memory for the OS or cannot allocate a single page, this
-  // function returns an invalid page pointer (NULL). The caller must check
-  // whether the returned page is valid (by calling Page::is_valid()).  It is
-  // guaranteed that allocated pages have contiguous addresses.  The actual
-  // number of allocated pages is returned in the output parameter
-  // allocated_pages.  If the PagedSpace owner is executable and there is
-  // a code range, the pages are allocated from the code range.
-  Page* AllocatePages(int requested_pages, int* allocated_pages,
-                      PagedSpace* owner);
-
-  // Frees pages from a given page and after. Requires pages to be
-  // linked in chunk-order (see comment for class).
-  // If 'p' is the first page of a chunk, pages from 'p' are freed
-  // and this function returns an invalid page pointer.
-  // Otherwise, the function searches a page after 'p' that is
-  // the first page of a chunk. Pages after the found page
-  // are freed and the function returns 'p'.
-  Page* FreePages(Page* p);
-
-  // Frees all pages owned by given space.
-  void FreeAllPages(PagedSpace* space);
-
-  // Allocates and frees raw memory of certain size.
-  // These are just thin wrappers around OS::Allocate and OS::Free,
-  // but keep track of allocated bytes as part of heap.
-  // If the flag is EXECUTABLE and a code range exists, the requested
-  // memory is allocated from the code range.  If a code range exists
-  // and the freed memory is in it, the code range manages the freed memory.
-  MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
-                                          size_t* allocated,
-                                          Executability executable);
-  void FreeRawMemory(void* buf,
-                     size_t length,
-                     Executability executable);
-  void PerformAllocationCallback(ObjectSpace space,
-                                 AllocationAction action,
-                                 size_t size);
-
-  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
-                                   ObjectSpace space,
-                                   AllocationAction action);
-  void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
-  bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
+  void Free(MemoryChunk* chunk);
 
   // Returns the maximum available bytes of heaps.
   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
@@ -611,67 +974,68 @@
     return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
   }
 
-  // Links two pages.
-  inline void SetNextPage(Page* prev, Page* next);
-
-  // Returns the next page of a given page.
-  inline Page* GetNextPage(Page* p);
-
-  // Checks whether a page belongs to a space.
-  inline bool IsPageInSpace(Page* p, PagedSpace* space);
-
-  // Returns the space that owns the given page.
-  inline PagedSpace* PageOwner(Page* page);
-
-  // Finds the first/last page in the same chunk as a given page.
-  Page* FindFirstPageInSameChunk(Page* p);
-  Page* FindLastPageInSameChunk(Page* p);
-
-  // Relinks list of pages owned by space to make it chunk-ordered.
-  // Returns new first and last pages of space.
-  // Also returns last page in relinked list which has WasInUsedBeforeMC
-  // flag set.
-  void RelinkPageListInChunkOrder(PagedSpace* space,
-                                  Page** first_page,
-                                  Page** last_page,
-                                  Page** last_page_in_use);
-
 #ifdef DEBUG
   // Reports statistic info of the space.
   void ReportStatistics();
 #endif
 
-  // Due to encoding limitation, we can only have 8K chunks.
-  static const int kMaxNofChunks = 1 << kPageSizeBits;
-  // If a chunk has at least 16 pages, the maximum heap size is about
-  // 8K * 8K * 16 = 1G bytes.
-#ifdef V8_TARGET_ARCH_X64
-  static const int kPagesPerChunk = 32;
-  // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
-  static const int kChunkTableLevels = 4;
-  static const int kChunkTableBitsPerLevel = 12;
-#else
-  static const int kPagesPerChunk = 16;
-  // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
-  static const int kChunkTableLevels = 2;
-  static const int kChunkTableBitsPerLevel = 8;
-#endif
+  MemoryChunk* AllocateChunk(intptr_t body_size,
+                             Executability executable,
+                             Space* space);
+
+  Address ReserveAlignedMemory(size_t requested,
+                               size_t alignment,
+                               VirtualMemory* controller);
+  Address AllocateAlignedMemory(size_t requested,
+                                size_t alignment,
+                                Executability executable,
+                                VirtualMemory* controller);
+
+  void FreeMemory(VirtualMemory* reservation, Executability executable);
+  void FreeMemory(Address addr, size_t size, Executability executable);
+
+  // Commit a contiguous block of memory from the initial chunk.  Assumes that
+  // the address is not NULL, the size is greater than zero, and that the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool CommitBlock(Address start, size_t size, Executability executable);
+
+  // Uncommit a contiguous block of memory [start..(start+size)[.
+  // start is not NULL, the size is greater than zero, and the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool UncommitBlock(Address start, size_t size);
+
+  // Zaps a contiguous block of memory [start..(start+size)[ thus
+  // filling it up with a recognizable non-NULL bit pattern.
+  void ZapBlock(Address start, size_t size);
+
+  void PerformAllocationCallback(ObjectSpace space,
+                                 AllocationAction action,
+                                 size_t size);
+
+  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+                                          ObjectSpace space,
+                                          AllocationAction action);
+
+  void RemoveMemoryAllocationCallback(
+      MemoryAllocationCallback callback);
+
+  bool MemoryAllocationCallbackRegistered(
+      MemoryAllocationCallback callback);
 
  private:
-  static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
-
   Isolate* isolate_;
 
   // Maximum space size in bytes.
-  intptr_t capacity_;
+  size_t capacity_;
   // Maximum subset of capacity_ that can be executable
-  intptr_t capacity_executable_;
+  size_t capacity_executable_;
 
   // Allocated space size in bytes.
-  intptr_t size_;
-
+  size_t size_;
   // Allocated executable space size in bytes.
-  intptr_t size_executable_;
+  size_t size_executable_;
 
   struct MemoryAllocationCallbackRegistration {
     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -683,64 +1047,11 @@
     ObjectSpace space;
     AllocationAction action;
   };
+
   // A List of callback that are triggered when memory is allocated or free'd
   List<MemoryAllocationCallbackRegistration>
       memory_allocation_callbacks_;
 
-  // The initial chunk of virtual memory.
-  VirtualMemory* initial_chunk_;
-
-  // Allocated chunk info: chunk start address, chunk size, and owning space.
-  class ChunkInfo BASE_EMBEDDED {
-   public:
-    ChunkInfo() : address_(NULL),
-                  size_(0),
-                  owner_(NULL),
-                  executable_(NOT_EXECUTABLE),
-                  owner_identity_(FIRST_SPACE) {}
-    inline void init(Address a, size_t s, PagedSpace* o);
-    Address address() { return address_; }
-    size_t size() { return size_; }
-    PagedSpace* owner() { return owner_; }
-    // We save executability of the owner to allow using it
-    // when collecting stats after the owner has been destroyed.
-    Executability executable() const { return executable_; }
-    AllocationSpace owner_identity() const { return owner_identity_; }
-
-   private:
-    Address address_;
-    size_t size_;
-    PagedSpace* owner_;
-    Executability executable_;
-    AllocationSpace owner_identity_;
-  };
-
-  // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
-  List<ChunkInfo> chunks_;
-  List<int> free_chunk_ids_;
-  int max_nof_chunks_;
-  int top_;
-
-  // Push/pop a free chunk id onto/from the stack.
-  void Push(int free_chunk_id);
-  int Pop();
-  bool OutOfChunkIds() { return top_ == 0; }
-
-  // Frees a chunk.
-  void DeleteChunk(int chunk_id);
-
-  // Basic check whether a chunk id is in the valid range.
-  inline bool IsValidChunkId(int chunk_id);
-
-  // Checks whether a chunk id identifies an allocated chunk.
-  inline bool IsValidChunk(int chunk_id);
-
-  // Returns the chunk id that a page belongs to.
-  inline int GetChunkId(Page* p);
-
-  // True if the address lies in the initial chunk.
-  inline bool InInitialChunk(Address address);
-
   // Initializes pages in a chunk. Returns the first page address.
   // This function and GetChunkId() are provided for the mark-compact
   // collector to rebuild page headers in the from space, which is
@@ -748,13 +1059,7 @@
   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
                                PagedSpace* owner);
 
-  Page* RelinkPagesInChunk(int chunk_id,
-                           Address chunk_start,
-                           size_t chunk_size,
-                           Page* prev,
-                           Page** last_page_in_use);
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
 };
 
 
@@ -777,71 +1082,58 @@
 // -----------------------------------------------------------------------------
 // Heap object iterator in new/old/map spaces.
 //
-// A HeapObjectIterator iterates objects from a given address to the
-// top of a space. The given address must be below the current
-// allocation pointer (space top). There are some caveats.
+// A HeapObjectIterator iterates objects from the bottom of the given space
+// to its top or from the bottom of the given page to its top.
 //
-// (1) If the space top changes upward during iteration (because of
-//     allocating new objects), the iterator does not iterate objects
-//     above the original space top. The caller must create a new
-//     iterator starting from the old top in order to visit these new
-//     objects.
-//
-// (2) If new objects are allocated below the original allocation top
-//     (e.g., free-list allocation in paged spaces), the new objects
-//     may or may not be iterated depending on their position with
-//     respect to the current point of iteration.
-//
-// (3) The space top should not change downward during iteration,
-//     otherwise the iterator will return not-necessarily-valid
-//     objects.
-
+// If objects are allocated in the page during iteration the iterator may
+// or may not iterate over those objects.  The caller must create a new
+// iterator in order to be sure to visit these new objects.
 class HeapObjectIterator: public ObjectIterator {
  public:
-  // Creates a new object iterator in a given space. If a start
-  // address is not given, the iterator starts from the space bottom.
+  // Creates a new object iterator in a given space.
   // If the size function is not given, the iterator calls the default
   // Object::Size().
   explicit HeapObjectIterator(PagedSpace* space);
   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
-  HeapObjectIterator(PagedSpace* space, Address start);
-  HeapObjectIterator(PagedSpace* space,
-                     Address start,
-                     HeapObjectCallback size_func);
   HeapObjectIterator(Page* page, HeapObjectCallback size_func);
 
-  inline HeapObject* next() {
-    return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
+  // Advance to the next object, skipping free spaces and other fillers and
+  // skipping the special garbage section of which there is one per space.
+  // Returns NULL when the iteration has ended.
+  inline HeapObject* Next() {
+    do {
+      HeapObject* next_obj = FromCurrentPage();
+      if (next_obj != NULL) return next_obj;
+    } while (AdvanceToNextPage());
+    return NULL;
   }
 
-  // implementation of ObjectIterator.
-  virtual HeapObject* next_object() { return next(); }
+  virtual HeapObject* next_object() {
+    return Next();
+  }
 
  private:
-  Address cur_addr_;  // current iteration point
-  Address end_addr_;  // end iteration point
-  Address cur_limit_;  // current page limit
-  HeapObjectCallback size_func_;  // size function
-  Page* end_page_;  // caches the page of the end address
+  enum PageMode { kOnePageOnly, kAllPagesInSpace };
 
-  HeapObject* FromCurrentPage() {
-    ASSERT(cur_addr_ < cur_limit_);
+  Address cur_addr_;  // Current iteration point.
+  Address cur_end_;   // End iteration point.
+  HeapObjectCallback size_func_;  // Size function or NULL.
+  PagedSpace* space_;
+  PageMode page_mode_;
 
-    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
-    ASSERT_OBJECT_SIZE(obj_size);
+  // Fast (inlined) path of next().
+  inline HeapObject* FromCurrentPage();
 
-    cur_addr_ += obj_size;
-    ASSERT(cur_addr_ <= cur_limit_);
-
-    return obj;
-  }
-
-  // Slow path of next, goes into the next page.
-  HeapObject* FromNextPage();
+  // Slow path of next(), goes into the next page.  Returns false if the
+  // iteration has ended.
+  bool AdvanceToNextPage();
 
   // Initializes fields.
-  void Initialize(Address start, Address end, HeapObjectCallback size_func);
+  inline void Initialize(PagedSpace* owner,
+                         Address start,
+                         Address end,
+                         PageMode mode,
+                         HeapObjectCallback size_func);
 
 #ifdef DEBUG
   // Verifies whether fields have valid values.
@@ -852,36 +1144,10 @@
 
 // -----------------------------------------------------------------------------
 // A PageIterator iterates the pages in a paged space.
-//
-// The PageIterator class provides three modes for iterating pages in a space:
-//   PAGES_IN_USE iterates pages containing allocated objects.
-//   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
-//                    mark-compact collection.
-//   ALL_PAGES iterates all pages in the space.
-//
-// There are some caveats.
-//
-// (1) If the space expands during iteration, new pages will not be
-//     returned by the iterator in any mode.
-//
-// (2) If new objects are allocated during iteration, they will appear
-//     in pages returned by the iterator.  Allocation may cause the
-//     allocation pointer or MC allocation pointer in the last page to
-//     change between constructing the iterator and iterating the last
-//     page.
-//
-// (3) The space should not shrink during iteration, otherwise the
-//     iterator will return deallocated pages.
 
 class PageIterator BASE_EMBEDDED {
  public:
-  enum Mode {
-    PAGES_IN_USE,
-    PAGES_USED_BY_MC,
-    ALL_PAGES
-  };
-
-  PageIterator(PagedSpace* space, Mode mode);
+  explicit inline PageIterator(PagedSpace* space);
 
   inline bool has_next();
   inline Page* next();
@@ -889,21 +1155,25 @@
  private:
   PagedSpace* space_;
   Page* prev_page_;  // Previous page returned.
-  Page* stop_page_;  // Page to stop at (last page returned by the iterator).
+  // Next page that will be returned.  Cached here so that we can use this
+  // iterator for operations that deallocate pages.
+  Page* next_page_;
 };
 
 
 // -----------------------------------------------------------------------------
-// A space has a list of pages. The next page can be accessed via
-// Page::next_page() call. The next page of the last page is an
-// invalid page pointer. A space can expand and shrink dynamically.
+// A space has a circular list of pages. The next page can be accessed via
+// Page::next_page() call.
 
 // An abstraction of allocation and relocation pointers in a page-structured
 // space.
 class AllocationInfo {
  public:
-  Address top;  // current allocation top
-  Address limit;  // current allocation limit
+  AllocationInfo() : top(NULL), limit(NULL) {
+  }
+
+  Address top;  // Current allocation top.
+  Address limit;  // Current allocation limit.
 
 #ifdef DEBUG
   bool VerifyPagedAllocation() {
@@ -935,70 +1205,199 @@
   // Zero out all the allocation statistics (ie, no capacity).
   void Clear() {
     capacity_ = 0;
-    available_ = 0;
     size_ = 0;
     waste_ = 0;
   }
 
+  void ClearSizeWaste() {
+    size_ = capacity_;
+    waste_ = 0;
+  }
+
   // Reset the allocation statistics (ie, available = capacity with no
   // wasted or allocated bytes).
   void Reset() {
-    available_ = capacity_;
     size_ = 0;
     waste_ = 0;
   }
 
   // Accessors for the allocation statistics.
   intptr_t Capacity() { return capacity_; }
-  intptr_t Available() { return available_; }
   intptr_t Size() { return size_; }
   intptr_t Waste() { return waste_; }
 
-  // Grow the space by adding available bytes.
+  // Grow the space by adding available bytes.  They are initially marked as
+  // being in use (part of the size), but will normally be immediately freed,
+  // putting them on the free list and removing them from size_.
   void ExpandSpace(int size_in_bytes) {
     capacity_ += size_in_bytes;
-    available_ += size_in_bytes;
+    size_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
-  // Shrink the space by removing available bytes.
+  // Shrink the space by removing available bytes.  Since shrinking is done
+  // during sweeping, bytes have been marked as being in use (part of the size)
+  // and are hereby freed.
   void ShrinkSpace(int size_in_bytes) {
     capacity_ -= size_in_bytes;
-    available_ -= size_in_bytes;
+    size_ -= size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
   // Allocate from available bytes (available -> size).
   void AllocateBytes(intptr_t size_in_bytes) {
-    available_ -= size_in_bytes;
     size_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
   // Free allocated bytes, making them available (size -> available).
   void DeallocateBytes(intptr_t size_in_bytes) {
     size_ -= size_in_bytes;
-    available_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
   // Waste free bytes (available -> waste).
   void WasteBytes(int size_in_bytes) {
-    available_ -= size_in_bytes;
+    size_ -= size_in_bytes;
     waste_ += size_in_bytes;
-  }
-
-  // Consider the wasted bytes to be allocated, as they contain filler
-  // objects (waste -> size).
-  void FillWastedBytes(intptr_t size_in_bytes) {
-    waste_ -= size_in_bytes;
-    size_ += size_in_bytes;
+    ASSERT(size_ >= 0);
   }
 
  private:
   intptr_t capacity_;
-  intptr_t available_;
   intptr_t size_;
   intptr_t waste_;
 };
 
 
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap.  They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object).  They have a size and a next pointer.  The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode: public HeapObject {
+ public:
+  // Obtain a free-list node from a raw address.  This is not a cast because
+  // it does not check nor require that the first word at the address is a map
+  // pointer.
+  static FreeListNode* FromAddress(Address address) {
+    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+  }
+
+  static inline bool IsFreeListNode(HeapObject* object);
+
+  // Set the size in bytes, which can be read with HeapObject::Size().  This
+  // function also writes a map to the first word of the block so that it
+  // looks like a heap object to the garbage collector and heap iteration
+  // functions.
+  void set_size(Heap* heap, int size_in_bytes);
+
+  // Accessors for the next field.
+  inline FreeListNode* next();
+  inline FreeListNode** next_address();
+  inline void set_next(FreeListNode* next);
+
+  inline void Zap();
+
+ private:
+  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list for the old space.  The free list is organized in such a way
+// as to encourage objects allocated around the same time to be near each
+// other.  The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer.  When the limit is hit we need to
+// find a new space to allocate from.  This is done with the free list, which
+// is divided up into rough categories to cut down on waste.  Having finer
+// categories would scatter allocation more.
+
+// The old space free list is organized in categories.
+// 1-31 words:  Such small free areas are discarded for efficiency reasons.
+//     They can be reclaimed by the compactor.  However the distance between top
+//     and limit may be this small.
+// 32-255 words: There is a list of spaces this large.  It is used for top and
+//     limit when the object we need to allocate is 1-31 words in size.  These
+//     spaces are called small.
+// 256-2047 words: There is a list of spaces this large.  It is used for top and
+//     limit when the object we need to allocate is 32-255 words in size.  These
+//     spaces are called medium.
+// 1048-16383 words: There is a list of spaces this large.  It is used for top
+//     and limit when the object we need to allocate is 256-2047 words in size.
+//     These spaces are call large.
+// At least 16384 words.  This list is for objects of 2048 words or larger.
+//     Empty pages are added to this list.  These spaces are called huge.
+class FreeList BASE_EMBEDDED {
+ public:
+  explicit FreeList(PagedSpace* owner);
+
+  // Clear the free list.
+  void Reset();
+
+  // Return the number of bytes available on the free list.
+  intptr_t available() { return available_; }
+
+  // Place a node on the free list.  The block of size 'size_in_bytes'
+  // starting at 'start' is placed on the free list.  The return value is the
+  // number of bytes that have been lost due to internal fragmentation by
+  // freeing the block.  Bookkeeping information will be written to the block,
+  // ie, its contents will be destroyed.  The start address should be word
+  // aligned, and the size should be a non-zero multiple of the word size.
+  int Free(Address start, int size_in_bytes);
+
+  // Allocate a block of size 'size_in_bytes' from the free list.  The block
+  // is unitialized.  A failure is returned if no block is available.  The
+  // number of bytes lost to fragmentation is returned in the output parameter
+  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
+  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+
+  void MarkNodes();
+
+#ifdef DEBUG
+  void Zap();
+  static intptr_t SumFreeList(FreeListNode* node);
+  static int FreeListLength(FreeListNode* cur);
+  intptr_t SumFreeLists();
+  bool IsVeryLong();
+#endif
+
+  void CountFreeListItems(Page* p, intptr_t* sizes);
+
+ private:
+  // The size range of blocks, in bytes.
+  static const int kMinBlockSize = 3 * kPointerSize;
+  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+
+  FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
+
+  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+
+  PagedSpace* owner_;
+  Heap* heap_;
+
+  // Total available bytes in all blocks on this free list.
+  int available_;
+
+  static const int kSmallListMin = 0x20 * kPointerSize;
+  static const int kSmallListMax = 0xff * kPointerSize;
+  static const int kMediumListMax = 0x7ff * kPointerSize;
+  static const int kLargeListMax = 0x3fff * kPointerSize;
+  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
+  static const int kMediumAllocationMax = kSmallListMax;
+  static const int kLargeAllocationMax = kMediumListMax;
+  FreeListNode* small_list_;
+  FreeListNode* medium_list_;
+  FreeListNode* large_list_;
+  FreeListNode* huge_list_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
+};
+
+
 class PagedSpace : public Space {
  public:
   // Creates a space with a maximum capacity, and an id.
@@ -1013,7 +1412,7 @@
   // the memory allocator's initial chunk) if possible.  If the block of
   // addresses is not big enough to contain a single page-aligned page, a
   // fresh chunk will be allocated.
-  bool Setup(Address start, size_t size);
+  bool Setup();
 
   // Returns true if the space has been successfully set up and not
   // subsequently torn down.
@@ -1026,8 +1425,6 @@
   // Checks whether an object/address is in this space.
   inline bool Contains(Address a);
   bool Contains(HeapObject* o) { return Contains(o->address()); }
-  // Never crashes even if a is not a valid pointer.
-  inline bool SafeContains(Address a);
 
   // Given an address occupied by a live object, return that object if it is
   // in this space, or Failure::Exception() if it is not. The implementation
@@ -1035,104 +1432,91 @@
   // linear in the number of objects in the page. It may be slow.
   MUST_USE_RESULT MaybeObject* FindObject(Address addr);
 
-  // Checks whether page is currently in use by this space.
-  bool IsUsed(Page* page);
-
-  void MarkAllPagesClean();
-
   // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact(bool will_compact);
+  virtual void PrepareForMarkCompact();
 
-  // The top of allocation in a page in this space. Undefined if page is unused.
-  Address PageAllocationTop(Page* page) {
-    return page == TopPageOf(allocation_info_) ? top()
-        : PageAllocationLimit(page);
-  }
-
-  // The limit of allocation for a page in this space.
-  virtual Address PageAllocationLimit(Page* page) = 0;
-
-  void FlushTopPageWatermark() {
-    AllocationTopPage()->SetCachedAllocationWatermark(top());
-    AllocationTopPage()->InvalidateWatermark(true);
-  }
-
-  // Current capacity without growing (Size() + Available() + Waste()).
+  // Current capacity without growing (Size() + Available()).
   intptr_t Capacity() { return accounting_stats_.Capacity(); }
 
   // Total amount of memory committed for this space.  For paged
   // spaces this equals the capacity.
   intptr_t CommittedMemory() { return Capacity(); }
 
-  // Available bytes without growing.
-  intptr_t Available() { return accounting_stats_.Available(); }
+  // Sets the capacity, the available space and the wasted space to zero.
+  // The stats are rebuilt during sweeping by adding each page to the
+  // capacity and the size when it is encountered.  As free spaces are
+  // discovered during the sweeping they are subtracted from the size and added
+  // to the available and wasted totals.
+  void ClearStats() {
+    accounting_stats_.ClearSizeWaste();
+  }
 
-  // Allocated bytes in this space.
+  // Available bytes without growing.  These are the bytes on the free list.
+  // The bytes in the linear allocation area are not included in this total
+  // because updating the stats would slow down allocation.  New pages are
+  // immediately added to the free list so they show up here.
+  intptr_t Available() { return free_list_.available(); }
+
+  // Allocated bytes in this space.  Garbage bytes that were not found due to
+  // lazy sweeping are counted as being allocated!  The bytes in the current
+  // linear allocation area (between top and limit) are also counted here.
   virtual intptr_t Size() { return accounting_stats_.Size(); }
 
-  // Wasted bytes due to fragmentation and not recoverable until the
-  // next GC of this space.
-  intptr_t Waste() { return accounting_stats_.Waste(); }
+  // As size, but the bytes in the current linear allocation area are not
+  // included.
+  virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); }
 
-  // Returns the address of the first object in this space.
-  Address bottom() { return first_page_->ObjectAreaStart(); }
+  // Wasted bytes in this space.  These are just the bytes that were thrown away
+  // due to being too small to use for allocation.  They do not include the
+  // free bytes that were not found at all due to lazy sweeping.
+  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
 
   // Returns the allocation pointer in this space.
-  Address top() { return allocation_info_.top; }
+  Address top() {
+    return allocation_info_.top;
+  }
+  Address limit() { return allocation_info_.limit; }
 
   // Allocate the requested number of bytes in the space if possible, return a
   // failure object if not.
   MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
 
-  // Allocate the requested number of bytes for relocation during mark-compact
-  // collection.
-  MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
-
   virtual bool ReserveSpace(int bytes);
 
-  // Used by ReserveSpace.
-  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
-
-  // Free all pages in range from prev (exclusive) to last (inclusive).
-  // Freed pages are moved to the end of page list.
-  void FreePages(Page* prev, Page* last);
-
-  // Deallocates a block.
-  virtual void DeallocateBlock(Address start,
-                               int size_in_bytes,
-                               bool add_to_freelist) = 0;
+  // Give a block of memory to the space's free list.  It might be added to
+  // the free list or accounted as waste.
+  // If add_to_freelist is false then just accounting stats are updated and
+  // no attempt to add area to free list is made.
+  int Free(Address start, int size_in_bytes) {
+    int wasted = free_list_.Free(start, size_in_bytes);
+    accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
+    return size_in_bytes - wasted;
+  }
 
   // Set space allocation info.
-  void SetTop(Address top) {
+  void SetTop(Address top, Address limit) {
+    ASSERT(top == limit ||
+           Page::FromAddress(top) == Page::FromAddress(limit - 1));
     allocation_info_.top = top;
-    allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
+    allocation_info_.limit = limit;
   }
 
-  // ---------------------------------------------------------------------------
-  // Mark-compact collection support functions
-
-  // Set the relocation point to the beginning of the space.
-  void MCResetRelocationInfo();
-
-  // Writes relocation info to the top page.
-  void MCWriteRelocationInfoToPage() {
-    TopPageOf(mc_forwarding_info_)->
-        SetAllocationWatermark(mc_forwarding_info_.top);
+  void Allocate(int bytes) {
+    accounting_stats_.AllocateBytes(bytes);
   }
 
-  // Computes the offset of a given address in this space to the beginning
-  // of the space.
-  int MCSpaceOffsetForAddress(Address addr);
+  void IncreaseCapacity(int size) {
+    accounting_stats_.ExpandSpace(size);
+  }
 
-  // Updates the allocation pointer to the relocation top after a mark-compact
-  // collection.
-  virtual void MCCommitRelocationInfo() = 0;
+  // Releases an unused page and shrinks the space.
+  void ReleasePage(Page* page);
 
-  // Releases half of unused pages.
-  void Shrink();
+  // Releases all of the unused pages.
+  void ReleaseAllUnusedPages();
 
-  // Ensures that the capacity is at least 'capacity'. Returns false on failure.
-  bool EnsureCapacity(int capacity);
+  // The dummy page that anchors the linked list of pages.
+  Page* anchor() { return &anchor_; }
 
 #ifdef DEBUG
   // Print meta info and objects in this space.
@@ -1141,6 +1525,9 @@
   // Verify integrity of this space.
   virtual void Verify(ObjectVisitor* visitor);
 
+  // Reports statistics for the space
+  void ReportStatistics();
+
   // Overridden by subclasses to verify space-specific object
   // properties (e.g., only maps or free-list nodes are in map space).
   virtual void VerifyObject(HeapObject* obj) {}
@@ -1151,10 +1538,67 @@
   static void ResetCodeStatistics();
 #endif
 
-  // Returns the page of the allocation pointer.
-  Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+  bool was_swept_conservatively() { return was_swept_conservatively_; }
+  void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
 
-  void RelinkPageListInChunkOrder(bool deallocate_blocks);
+  // Evacuation candidates are swept by evacuator.  Needs to return a valid
+  // result before _and_ after evacuation has finished.
+  static bool ShouldBeSweptLazily(Page* p) {
+    return !p->IsEvacuationCandidate() &&
+           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
+           !p->WasSweptPrecisely();
+  }
+
+  void SetPagesToSweep(Page* first, Page* last) {
+    first_unswept_page_ = first;
+    last_unswept_page_ = last;
+  }
+
+  bool AdvanceSweeper(intptr_t bytes_to_sweep);
+
+  bool IsSweepingComplete() {
+    return !first_unswept_page_->is_valid();
+  }
+
+  Page* FirstPage() { return anchor_.next_page(); }
+  Page* LastPage() { return anchor_.prev_page(); }
+
+  bool IsFragmented(Page* p) {
+    intptr_t sizes[4];
+    free_list_.CountFreeListItems(p, sizes);
+
+    intptr_t ratio;
+    intptr_t ratio_threshold;
+    if (identity() == CODE_SPACE) {
+      ratio = (sizes[1] * 10 + sizes[2] * 2) * 100 / Page::kObjectAreaSize;
+      ratio_threshold = 10;
+    } else {
+      ratio = (sizes[0] * 5 + sizes[1]) * 100 / Page::kObjectAreaSize;
+      ratio_threshold = 15;
+    }
+
+    if (FLAG_trace_fragmentation) {
+      PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
+             reinterpret_cast<void*>(p),
+             identity(),
+             static_cast<int>(sizes[0]),
+             static_cast<double>(sizes[0] * 100) / Page::kObjectAreaSize,
+             static_cast<int>(sizes[1]),
+             static_cast<double>(sizes[1] * 100) / Page::kObjectAreaSize,
+             static_cast<int>(sizes[2]),
+             static_cast<double>(sizes[2] * 100) / Page::kObjectAreaSize,
+             static_cast<int>(sizes[3]),
+             static_cast<double>(sizes[3] * 100) / Page::kObjectAreaSize,
+             (ratio > ratio_threshold) ? "[fragmented]" : "");
+    }
+
+    return (ratio > ratio_threshold) ||
+        (FLAG_always_compact && sizes[3] != Page::kObjectAreaSize);
+  }
+
+  void EvictEvacuationCandidatesFromFreeLists();
+
+  bool CanExpand();
 
  protected:
   // Maximum capacity of this space.
@@ -1163,80 +1607,42 @@
   // Accounting information for this space.
   AllocationStats accounting_stats_;
 
-  // The first page in this space.
-  Page* first_page_;
+  // The dummy page that anchors the double linked list of pages.
+  Page anchor_;
 
-  // The last page in this space.  Initially set in Setup, updated in
-  // Expand and Shrink.
-  Page* last_page_;
-
-  // True if pages owned by this space are linked in chunk-order.
-  // See comment for class MemoryAllocator for definition of chunk-order.
-  bool page_list_is_chunk_ordered_;
+  // The space's free list.
+  FreeList free_list_;
 
   // Normal allocation information.
   AllocationInfo allocation_info_;
 
-  // Relocation information during mark-compact collections.
-  AllocationInfo mc_forwarding_info_;
-
   // Bytes of each page that cannot be allocated.  Possibly non-zero
   // for pages in spaces with only fixed-size objects.  Always zero
   // for pages in spaces with variable sized objects (those pages are
   // padded with free-list nodes).
   int page_extra_;
 
-  // Sets allocation pointer to a page bottom.
-  static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
+  bool was_swept_conservatively_;
 
-  // Returns the top page specified by an allocation info structure.
-  static Page* TopPageOf(AllocationInfo alloc_info) {
-    return Page::FromAllocationTop(alloc_info.limit);
-  }
-
-  int CountPagesToTop() {
-    Page* p = Page::FromAllocationTop(allocation_info_.top);
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    int counter = 1;
-    while (it.has_next()) {
-      if (it.next() == p) return counter;
-      counter++;
-    }
-    UNREACHABLE();
-    return -1;
-  }
+  Page* first_unswept_page_;
+  Page* last_unswept_page_;
 
   // Expands the space by allocating a fixed number of pages. Returns false if
-  // it cannot allocate requested number of pages from OS. Newly allocated
-  // pages are append to the last_page;
-  bool Expand(Page* last_page);
+  // it cannot allocate requested number of pages from OS.
+  bool Expand();
 
-  // Generic fast case allocation function that tries linear allocation in
-  // the top page of 'alloc_info'.  Returns NULL on failure.
-  inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
-                                      int size_in_bytes);
-
-  // During normal allocation or deserialization, roll to the next page in
-  // the space (there is assumed to be one) and allocate there.  This
-  // function is space-dependent.
-  virtual HeapObject* AllocateInNextPage(Page* current_page,
-                                         int size_in_bytes) = 0;
+  // Generic fast case allocation function that tries linear allocation at the
+  // address denoted by top in allocation_info_.
+  inline HeapObject* AllocateLinearly(int size_in_bytes);
 
   // Slow path of AllocateRaw.  This function is space-dependent.
-  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
-
-  // Slow path of MCAllocateRaw.
-  MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
+  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
 
 #ifdef DEBUG
   // Returns the number of total pages in this space.
   int CountTotalPages();
 #endif
 
- private:
-  // Returns a pointer to the page of the relocation pointer.
-  Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
-
   friend class PageIterator;
 };
 
@@ -1276,20 +1682,113 @@
 };
 
 
+enum SemiSpaceId {
+  kFromSpace = 0,
+  kToSpace = 1
+};
+
+
+class SemiSpace;
+
+
+class NewSpacePage : public MemoryChunk {
+ public:
+  // GC related flags copied from from-space to to-space when
+  // flipping semispaces.
+  static const intptr_t kCopyOnFlipFlagsMask =
+    (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+    (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+    (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+
+  inline NewSpacePage* next_page() const {
+    return static_cast<NewSpacePage*>(next_chunk());
+  }
+
+  inline void set_next_page(NewSpacePage* page) {
+    set_next_chunk(page);
+  }
+
+  inline NewSpacePage* prev_page() const {
+    return static_cast<NewSpacePage*>(prev_chunk());
+  }
+
+  inline void set_prev_page(NewSpacePage* page) {
+    set_prev_chunk(page);
+  }
+
+  SemiSpace* semi_space() {
+    return reinterpret_cast<SemiSpace*>(owner());
+  }
+
+  bool is_anchor() { return !this->InNewSpace(); }
+
+  static bool IsAtStart(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
+        == kObjectStartOffset;
+  }
+
+  static bool IsAtEnd(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
+  }
+
+  Address address() {
+    return reinterpret_cast<Address>(this);
+  }
+
+  // Finds the NewSpacePage containg the given address.
+  static inline NewSpacePage* FromAddress(Address address_in_page) {
+    Address page_start =
+        reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
+                                  ~Page::kPageAlignmentMask);
+    NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
+    ASSERT(page->InNewSpace());
+    return page;
+  }
+
+  // Find the page for a limit address. A limit address is either an address
+  // inside a page, or the address right after the last byte of a page.
+  static inline NewSpacePage* FromLimit(Address address_limit) {
+    return NewSpacePage::FromAddress(address_limit - 1);
+  }
+
+ private:
+  // Create a NewSpacePage object that is only used as anchor
+  // for the doubly-linked list of real pages.
+  explicit NewSpacePage(SemiSpace* owner) {
+    InitializeAsAnchor(owner);
+  }
+
+  static NewSpacePage* Initialize(Heap* heap,
+                                  Address start,
+                                  SemiSpace* semi_space);
+
+  // Intialize a fake NewSpacePage used as sentinel at the ends
+  // of a doubly-linked list of real NewSpacePages.
+  // Only uses the prev/next links, and sets flags to not be in new-space.
+  void InitializeAsAnchor(SemiSpace* owner);
+
+  friend class SemiSpace;
+  friend class SemiSpaceIterator;
+};
+
+
 // -----------------------------------------------------------------------------
 // SemiSpace in young generation
 //
-// A semispace is a contiguous chunk of memory. The mark-compact collector
-// uses the memory in the from space as a marking stack when tracing live
-// objects.
+// A semispace is a contiguous chunk of memory holding page-like memory
+// chunks. The mark-compact collector  uses the memory of the first page in
+// the from space as a marking stack when tracing live objects.
 
 class SemiSpace : public Space {
  public:
   // Constructor.
-  explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
-    start_ = NULL;
-    age_mark_ = NULL;
-  }
+  SemiSpace(Heap* heap, SemiSpaceId semispace)
+    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+      start_(NULL),
+      age_mark_(NULL),
+      id_(semispace),
+      anchor_(this),
+      current_page_(NULL) { }
 
   // Sets up the semispace using the given chunk.
   bool Setup(Address start, int initial_capacity, int maximum_capacity);
@@ -1301,14 +1800,9 @@
   // True if the space has been set up but not torn down.
   bool HasBeenSetup() { return start_ != NULL; }
 
-  // Grow the size of the semispace by committing extra virtual memory.
-  // Assumes that the caller has checked that the semispace has not reached
-  // its maximum capacity (and thus there is space available in the reserved
-  // address range to grow).
-  bool Grow();
-
   // Grow the semispace to the new capacity.  The new capacity
-  // requested must be larger than the current capacity.
+  // requested must be larger than the current capacity and less than
+  // the maximum capacity.
   bool GrowTo(int new_capacity);
 
   // Shrinks the semispace to the new capacity.  The new capacity
@@ -1316,14 +1810,41 @@
   // semispace and less than the current capacity.
   bool ShrinkTo(int new_capacity);
 
-  // Returns the start address of the space.
-  Address low() { return start_; }
+  // Returns the start address of the first page of the space.
+  Address space_start() {
+    ASSERT(anchor_.next_page() != &anchor_);
+    return anchor_.next_page()->body();
+  }
+
+  // Returns the start address of the current page of the space.
+  Address page_low() {
+    ASSERT(anchor_.next_page() != &anchor_);
+    return current_page_->body();
+  }
+
   // Returns one past the end address of the space.
-  Address high() { return low() + capacity_; }
+  Address space_end() {
+    return anchor_.prev_page()->body_limit();
+  }
+
+  // Returns one past the end address of the current page of the space.
+  Address page_high() {
+    return current_page_->body_limit();
+  }
+
+  bool AdvancePage() {
+    NewSpacePage* next_page = current_page_->next_page();
+    if (next_page == anchor()) return false;
+    current_page_ = next_page;
+    return true;
+  }
+
+  // Resets the space to using the first page.
+  void Reset();
 
   // Age mark accessors.
   Address age_mark() { return age_mark_; }
-  void set_age_mark(Address mark) { age_mark_ = mark; }
+  void set_age_mark(Address mark);
 
   // True if the address is in the address range of this semispace (not
   // necessarily below the allocation pointer).
@@ -1338,11 +1859,6 @@
     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
   }
 
-  // The offset of an address from the beginning of the space.
-  int SpaceOffsetForAddress(Address addr) {
-    return static_cast<int>(addr - low());
-  }
-
   // If we don't have these here then SemiSpace will be abstract.  However
   // they should never be called.
   virtual intptr_t Size() {
@@ -1359,9 +1875,19 @@
   bool Commit();
   bool Uncommit();
 
+  NewSpacePage* first_page() { return anchor_.next_page(); }
+  NewSpacePage* current_page() { return current_page_; }
+
 #ifdef DEBUG
   virtual void Print();
   virtual void Verify();
+  // Validate a range of of addresses in a SemiSpace.
+  // The "from" address must be on a page prior to the "to" address,
+  // in the linked page order, or it must be earlier on the same page.
+  static void AssertValidRange(Address from, Address to);
+#else
+  // Do nothing.
+  inline static void AssertValidRange(Address from, Address to) {}
 #endif
 
   // Returns the current capacity of the semi space.
@@ -1373,7 +1899,17 @@
   // Returns the initial capacity of the semi space.
   int InitialCapacity() { return initial_capacity_; }
 
+  SemiSpaceId id() { return id_; }
+
+  static void Swap(SemiSpace* from, SemiSpace* to);
+
  private:
+  // Flips the semispace between being from-space and to-space.
+  // Copies the flags into the masked positions on all pages in the space.
+  void FlipPages(intptr_t flags, intptr_t flag_mask);
+
+  NewSpacePage* anchor() { return &anchor_; }
+
   // The current and maximum capacity of the space.
   int capacity_;
   int maximum_capacity_;
@@ -1390,7 +1926,13 @@
   uintptr_t object_expected_;
 
   bool committed_;
+  SemiSpaceId id_;
 
+  NewSpacePage anchor_;
+  NewSpacePage* current_page_;
+
+  friend class SemiSpaceIterator;
+  friend class NewSpacePageIterator;
  public:
   TRACK_MEMORY("SemiSpace")
 };
@@ -1406,12 +1948,26 @@
   // Create an iterator over the objects in the given space.  If no start
   // address is given, the iterator starts from the bottom of the space.  If
   // no size function is given, the iterator calls Object::Size().
-  explicit SemiSpaceIterator(NewSpace* space);
-  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
-  SemiSpaceIterator(NewSpace* space, Address start);
 
-  HeapObject* next() {
+  // Iterate over all of allocated to-space.
+  explicit SemiSpaceIterator(NewSpace* space);
+  // Iterate over all of allocated to-space, with a custome size function.
+  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
+  // Iterate over part of allocated to-space, from start to the end
+  // of allocation.
+  SemiSpaceIterator(NewSpace* space, Address start);
+  // Iterate from one address to another in the same semi-space.
+  SemiSpaceIterator(Address from, Address to);
+
+  HeapObject* Next() {
     if (current_ == limit_) return NULL;
+    if (NewSpacePage::IsAtEnd(current_)) {
+      NewSpacePage* page = NewSpacePage::FromLimit(current_);
+      page = page->next_page();
+      ASSERT(!page->is_anchor());
+      current_ = page->body();
+      if (current_ == limit_) return NULL;
+    }
 
     HeapObject* object = HeapObject::FromAddress(current_);
     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1421,14 +1977,13 @@
   }
 
   // Implementation of the ObjectIterator functions.
-  virtual HeapObject* next_object() { return next(); }
+  virtual HeapObject* next_object() { return Next(); }
 
  private:
-  void Initialize(NewSpace* space, Address start, Address end,
+  void Initialize(Address start,
+                  Address end,
                   HeapObjectCallback size_func);
 
-  // The semispace.
-  SemiSpace* space_;
   // The current iteration point.
   Address current_;
   // The end of iteration.
@@ -1439,6 +1994,34 @@
 
 
 // -----------------------------------------------------------------------------
+// A PageIterator iterates the pages in a semi-space.
+class NewSpacePageIterator BASE_EMBEDDED {
+ public:
+  // Make an iterator that runs over all pages in to-space.
+  explicit inline NewSpacePageIterator(NewSpace* space);
+
+  // Make an iterator that runs over all pages in the given semispace,
+  // even those not used in allocation.
+  explicit inline NewSpacePageIterator(SemiSpace* space);
+
+  // Make iterator that iterates from the page containing start
+  // to the page that contains limit in the same semispace.
+  inline NewSpacePageIterator(Address start, Address limit);
+
+  inline bool has_next();
+  inline NewSpacePage* next();
+
+ private:
+  NewSpacePage* prev_page_;  // Previous page returned.
+  // Next page that will be returned.  Cached here so that we can use this
+  // iterator for operations that deallocate pages.
+  NewSpacePage* next_page_;
+  // Last page returned.
+  NewSpacePage* last_page_;
+};
+
+
+// -----------------------------------------------------------------------------
 // The young generation space.
 //
 // The new space consists of a contiguous pair of semispaces.  It simply
@@ -1449,11 +2032,13 @@
   // Constructor.
   explicit NewSpace(Heap* heap)
     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
-      to_space_(heap),
-      from_space_(heap) {}
+      to_space_(heap, kToSpace),
+      from_space_(heap, kFromSpace),
+      reservation_(),
+      inline_allocation_limit_step_(0) {}
 
   // Sets up the new space using the given chunk.
-  bool Setup(Address start, int size);
+  bool Setup(int reserved_semispace_size_, int max_semispace_size);
 
   // Tears down the space.  Heap memory was not allocated by the space, so it
   // is not deallocated here.
@@ -1480,18 +2065,30 @@
     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
         == reinterpret_cast<uintptr_t>(start_);
   }
+
   bool Contains(Object* o) {
-    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
+    Address a = reinterpret_cast<Address>(o);
+    return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
   }
 
   // Return the allocated bytes in the active semispace.
-  virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
+  virtual intptr_t Size() {
+    return pages_used_ * Page::kObjectAreaSize +
+        static_cast<int>(top() - to_space_.page_low());
+  }
+
   // The same, but returning an int.  We have to have the one that returns
   // intptr_t because it is inherited, but if we know we are dealing with the
   // new space, which can't get as big as the other spaces then this is useful:
   int SizeAsInt() { return static_cast<int>(Size()); }
 
   // Return the current capacity of a semispace.
+  intptr_t EffectiveCapacity() {
+    ASSERT(to_space_.Capacity() == from_space_.Capacity());
+    return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
+  }
+
+  // Return the current capacity of a semispace.
   intptr_t Capacity() {
     ASSERT(to_space_.Capacity() == from_space_.Capacity());
     return to_space_.Capacity();
@@ -1503,8 +2100,11 @@
     return Capacity();
   }
 
-  // Return the available bytes without growing in the active semispace.
-  intptr_t Available() { return Capacity() - Size(); }
+  // Return the available bytes without growing or switching page in the
+  // active semispace.
+  intptr_t Available() {
+    return allocation_info_.limit - allocation_info_.top;
+  }
 
   // Return the maximum capacity of a semispace.
   int MaximumCapacity() {
@@ -1519,9 +2119,12 @@
   }
 
   // Return the address of the allocation pointer in the active semispace.
-  Address top() { return allocation_info_.top; }
+  Address top() {
+    ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
+    return allocation_info_.top;
+  }
   // Return the address of the first object in the active semispace.
-  Address bottom() { return to_space_.low(); }
+  Address bottom() { return to_space_.space_start(); }
 
   // Get the age mark of the inactive semispace.
   Address age_mark() { return from_space_.age_mark(); }
@@ -1533,54 +2136,70 @@
   Address start() { return start_; }
   uintptr_t mask() { return address_mask_; }
 
+  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
+    ASSERT(Contains(addr));
+    ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
+           IsAligned(OffsetFrom(addr) - 1, kPointerSize));
+    return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
+  }
+
+  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
+    return reinterpret_cast<Address>(index << kPointerSizeLog2);
+  }
+
   // The allocation top and limit addresses.
   Address* allocation_top_address() { return &allocation_info_.top; }
   Address* allocation_limit_address() { return &allocation_info_.limit; }
 
   MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
-    return AllocateRawInternal(size_in_bytes, &allocation_info_);
-  }
-
-  // Allocate the requested number of bytes for relocation during mark-compact
-  // collection.
-  MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
-    return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
+    return AllocateRawInternal(size_in_bytes);
   }
 
   // Reset the allocation pointer to the beginning of the active semispace.
   void ResetAllocationInfo();
-  // Reset the reloction pointer to the bottom of the inactive semispace in
-  // preparation for mark-compact collection.
-  void MCResetRelocationInfo();
-  // Update the allocation pointer in the active semispace after a
-  // mark-compact collection.
-  void MCCommitRelocationInfo();
 
-  // Get the extent of the inactive semispace (for use as a marking stack).
-  Address FromSpaceLow() { return from_space_.low(); }
-  Address FromSpaceHigh() { return from_space_.high(); }
-
-  // Get the extent of the active semispace (to sweep newly copied objects
-  // during a scavenge collection).
-  Address ToSpaceLow() { return to_space_.low(); }
-  Address ToSpaceHigh() { return to_space_.high(); }
-
-  // Offsets from the beginning of the semispaces.
-  int ToSpaceOffsetForAddress(Address a) {
-    return to_space_.SpaceOffsetForAddress(a);
+  void LowerInlineAllocationLimit(intptr_t step) {
+    inline_allocation_limit_step_ = step;
+    if (step == 0) {
+      allocation_info_.limit = to_space_.page_high();
+    } else {
+      allocation_info_.limit = Min(
+          allocation_info_.top + inline_allocation_limit_step_,
+          allocation_info_.limit);
+    }
+    top_on_previous_step_ = allocation_info_.top;
   }
-  int FromSpaceOffsetForAddress(Address a) {
-    return from_space_.SpaceOffsetForAddress(a);
+
+  // Get the extent of the inactive semispace (for use as a marking stack,
+  // or to zap it). Notice: space-addresses are not necessarily on the
+  // same page, so FromSpaceStart() might be above FromSpaceEnd().
+  Address FromSpacePageLow() { return from_space_.page_low(); }
+  Address FromSpacePageHigh() { return from_space_.page_high(); }
+  Address FromSpaceStart() { return from_space_.space_start(); }
+  Address FromSpaceEnd() { return from_space_.space_end(); }
+
+  // Get the extent of the active semispace's pages' memory.
+  Address ToSpaceStart() { return to_space_.space_start(); }
+  Address ToSpaceEnd() { return to_space_.space_end(); }
+
+  inline bool ToSpaceContains(Address address) {
+    return to_space_.Contains(address);
+  }
+  inline bool FromSpaceContains(Address address) {
+    return from_space_.Contains(address);
   }
 
   // True if the object is a heap object in the address range of the
   // respective semispace (not necessarily below the allocation pointer of the
   // semispace).
-  bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
-  bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
 
-  bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
-  bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
+  // Try to switch the active semispace to a new, empty, page.
+  // Returns false if this isn't possible or reasonable (i.e., there
+  // are no pages, or the current page is already empty), or true
+  // if successful.
+  bool AddFreshPage();
 
   virtual bool ReserveSpace(int bytes);
 
@@ -1620,10 +2239,24 @@
     return from_space_.Uncommit();
   }
 
+  inline intptr_t inline_allocation_limit_step() {
+    return inline_allocation_limit_step_;
+  }
+
+  SemiSpace* active_space() { return &to_space_; }
+
  private:
+  // Update allocation info to match the current to-space page.
+  void UpdateAllocationInfo();
+
+  Address chunk_base_;
+  uintptr_t chunk_size_;
+
   // The semispaces.
   SemiSpace to_space_;
   SemiSpace from_space_;
+  VirtualMemory reservation_;
+  int pages_used_;
 
   // Start address and bit mask for containment testing.
   Address start_;
@@ -1634,15 +2267,20 @@
   // Allocation pointer and limit for normal allocation and allocation during
   // mark-compact collection.
   AllocationInfo allocation_info_;
-  AllocationInfo mc_forwarding_info_;
+
+  // When incremental marking is active we will set allocation_info_.limit
+  // to be lower than actual limit and then will gradually increase it
+  // in steps to guarantee that we do incremental marking steps even
+  // when all allocation is performed from inlined generated code.
+  intptr_t inline_allocation_limit_step_;
+
+  Address top_on_previous_step_;
 
   HistogramInfo* allocated_histogram_;
   HistogramInfo* promoted_histogram_;
 
-  // Implementation of AllocateRaw and MCAllocateRaw.
-  MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
-      int size_in_bytes,
-      AllocationInfo* alloc_info);
+  // Implementation of AllocateRaw.
+  MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(int size_in_bytes);
 
   friend class SemiSpaceIterator;
 
@@ -1652,193 +2290,6 @@
 
 
 // -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap.  They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object).  They have a size and a next pointer.  The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
- public:
-  // Obtain a free-list node from a raw address.  This is not a cast because
-  // it does not check nor require that the first word at the address is a map
-  // pointer.
-  static FreeListNode* FromAddress(Address address) {
-    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
-  }
-
-  static inline bool IsFreeListNode(HeapObject* object);
-
-  // Set the size in bytes, which can be read with HeapObject::Size().  This
-  // function also writes a map to the first word of the block so that it
-  // looks like a heap object to the garbage collector and heap iteration
-  // functions.
-  void set_size(Heap* heap, int size_in_bytes);
-
-  // Accessors for the next field.
-  inline Address next(Heap* heap);
-  inline void set_next(Heap* heap, Address next);
-
- private:
-  static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list for the old space.
-class OldSpaceFreeList BASE_EMBEDDED {
- public:
-  OldSpaceFreeList(Heap* heap, AllocationSpace owner);
-
-  // Clear the free list.
-  void Reset();
-
-  // Return the number of bytes available on the free list.
-  intptr_t available() { return available_; }
-
-  // Place a node on the free list.  The block of size 'size_in_bytes'
-  // starting at 'start' is placed on the free list.  The return value is the
-  // number of bytes that have been lost due to internal fragmentation by
-  // freeing the block.  Bookkeeping information will be written to the block,
-  // ie, its contents will be destroyed.  The start address should be word
-  // aligned, and the size should be a non-zero multiple of the word size.
-  int Free(Address start, int size_in_bytes);
-
-  // Allocate a block of size 'size_in_bytes' from the free list.  The block
-  // is unitialized.  A failure is returned if no block is available.  The
-  // number of bytes lost to fragmentation is returned in the output parameter
-  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
-  MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
-
-  void MarkNodes();
-
- private:
-  // The size range of blocks, in bytes. (Smaller allocations are allowed, but
-  // will always result in waste.)
-  static const int kMinBlockSize = 2 * kPointerSize;
-  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
-
-  Heap* heap_;
-
-  // The identity of the owning space, for building allocation Failure
-  // objects.
-  AllocationSpace owner_;
-
-  // Total available bytes in all blocks on this free list.
-  int available_;
-
-  // Blocks are put on exact free lists in an array, indexed by size in words.
-  // The available sizes are kept in an increasingly ordered list. Entries
-  // corresponding to sizes < kMinBlockSize always have an empty free list
-  // (but index kHead is used for the head of the size list).
-  struct SizeNode {
-    // Address of the head FreeListNode of the implied block size or NULL.
-    Address head_node_;
-    // Size (words) of the next larger available size if head_node_ != NULL.
-    int next_size_;
-  };
-  static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
-  SizeNode free_[kFreeListsLength];
-
-  // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
-  static const int kHead = kMinBlockSize / kPointerSize - 1;
-  static const int kEnd = kMaxInt;
-
-  // We keep a "finger" in the size list to speed up a common pattern:
-  // repeated requests for the same or increasing sizes.
-  int finger_;
-
-  // Starting from *prev, find and return the smallest size >= index (words),
-  // or kEnd. Update *prev to be the largest size < index, or kHead.
-  int FindSize(int index, int* prev) {
-    int cur = free_[*prev].next_size_;
-    while (cur < index) {
-      *prev = cur;
-      cur = free_[cur].next_size_;
-    }
-    return cur;
-  }
-
-  // Remove an existing element from the size list.
-  void RemoveSize(int index) {
-    int prev = kHead;
-    int cur = FindSize(index, &prev);
-    ASSERT(cur == index);
-    free_[prev].next_size_ = free_[cur].next_size_;
-    finger_ = prev;
-  }
-
-  // Insert a new element into the size list.
-  void InsertSize(int index) {
-    int prev = kHead;
-    int cur = FindSize(index, &prev);
-    ASSERT(cur != index);
-    free_[prev].next_size_ = index;
-    free_[index].next_size_ = cur;
-  }
-
-  // The size list is not updated during a sequence of calls to Free, but is
-  // rebuilt before the next allocation.
-  void RebuildSizeList();
-  bool needs_rebuild_;
-
-#ifdef DEBUG
-  // Does this free list contain a free block located at the address of 'node'?
-  bool Contains(FreeListNode* node);
-#endif
-
-  DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
-};
-
-
-// The free list for the map space.
-class FixedSizeFreeList BASE_EMBEDDED {
- public:
-  FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
-
-  // Clear the free list.
-  void Reset();
-
-  // Return the number of bytes available on the free list.
-  intptr_t available() { return available_; }
-
-  // Place a node on the free list.  The block starting at 'start' (assumed to
-  // have size object_size_) is placed on the free list.  Bookkeeping
-  // information will be written to the block, ie, its contents will be
-  // destroyed.  The start address should be word aligned.
-  void Free(Address start);
-
-  // Allocate a fixed sized block from the free list.  The block is unitialized.
-  // A failure is returned if no block is available.
-  MUST_USE_RESULT MaybeObject* Allocate();
-
-  void MarkNodes();
-
- private:
-  Heap* heap_;
-
-  // Available bytes on the free list.
-  intptr_t available_;
-
-  // The head of the free list.
-  Address head_;
-
-  // The tail of the free list.
-  Address tail_;
-
-  // The identity of the owning space, for building allocation Failure
-  // objects.
-  AllocationSpace owner_;
-
-  // The size of the objects in this space.
-  int object_size_;
-
-  DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
-};
-
-
-// -----------------------------------------------------------------------------
 // Old object space (excluding map objects)
 
 class OldSpace : public PagedSpace {
@@ -1849,71 +2300,28 @@
            intptr_t max_capacity,
            AllocationSpace id,
            Executability executable)
-      : PagedSpace(heap, max_capacity, id, executable),
-        free_list_(heap, id) {
+      : PagedSpace(heap, max_capacity, id, executable) {
     page_extra_ = 0;
   }
 
-  // The bytes available on the free list (ie, not above the linear allocation
-  // pointer).
-  intptr_t AvailableFree() { return free_list_.available(); }
-
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
     return page->ObjectAreaEnd();
   }
 
-  // Give a block of memory to the space's free list.  It might be added to
-  // the free list or accounted as waste.
-  // If add_to_freelist is false then just accounting stats are updated and
-  // no attempt to add area to free list is made.
-  void Free(Address start, int size_in_bytes, bool add_to_freelist) {
-    accounting_stats_.DeallocateBytes(size_in_bytes);
-
-    if (add_to_freelist) {
-      int wasted_bytes = free_list_.Free(start, size_in_bytes);
-      accounting_stats_.WasteBytes(wasted_bytes);
-    }
-  }
-
-  virtual void DeallocateBlock(Address start,
-                               int size_in_bytes,
-                               bool add_to_freelist);
-
-  // Prepare for full garbage collection.  Resets the relocation pointer and
-  // clears the free list.
-  virtual void PrepareForMarkCompact(bool will_compact);
-
-  // Updates the allocation pointer to the relocation top after a mark-compact
-  // collection.
-  virtual void MCCommitRelocationInfo();
-
-  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
-
-  void MarkFreeListNodes() { free_list_.MarkNodes(); }
-
-#ifdef DEBUG
-  // Reports statistics for the space
-  void ReportStatistics();
-#endif
-
- protected:
-  // Virtual function in the superclass.  Slow path of AllocateRaw.
-  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
-  // Virtual function in the superclass.  Allocate linearly at the start of
-  // the page after current_page (there is assumed to be one).
-  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
-
- private:
-  // The space's free list.
-  OldSpaceFreeList free_list_;
-
  public:
   TRACK_MEMORY("OldSpace")
 };
 
 
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
+  ASSERT((space).page_low() <= (info).top             \
+         && (info).top <= (space).page_high()         \
+         && (info).limit <= (space).page_high())
+
+
 // -----------------------------------------------------------------------------
 // Old space for objects of a fixed size
 
@@ -1926,8 +2334,7 @@
              const char* name)
       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
         object_size_in_bytes_(object_size_in_bytes),
-        name_(name),
-        free_list_(heap, id, object_size_in_bytes) {
+        name_(name) {
     page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
   }
 
@@ -1938,44 +2345,12 @@
 
   int object_size_in_bytes() { return object_size_in_bytes_; }
 
-  // Give a fixed sized block of memory to the space's free list.
-  // If add_to_freelist is false then just accounting stats are updated and
-  // no attempt to add area to free list is made.
-  void Free(Address start, bool add_to_freelist) {
-    if (add_to_freelist) {
-      free_list_.Free(start);
-    }
-    accounting_stats_.DeallocateBytes(object_size_in_bytes_);
-  }
-
   // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact(bool will_compact);
-
-  // Updates the allocation pointer to the relocation top after a mark-compact
-  // collection.
-  virtual void MCCommitRelocationInfo();
-
-  virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
-
-  virtual void DeallocateBlock(Address start,
-                               int size_in_bytes,
-                               bool add_to_freelist);
+  virtual void PrepareForMarkCompact();
 
   void MarkFreeListNodes() { free_list_.MarkNodes(); }
 
-#ifdef DEBUG
-  // Reports statistic info of the space
-  void ReportStatistics();
-#endif
-
  protected:
-  // Virtual function in the superclass.  Slow path of AllocateRaw.
-  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
-  // Virtual function in the superclass.  Allocate linearly at the start of
-  // the page after current_page (there is assumed to be one).
-  HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
-
   void ResetFreeList() {
     free_list_.Reset();
   }
@@ -1986,9 +2361,6 @@
 
   // The name of this space.
   const char* name_;
-
-  // The space's free list.
-  FixedSizeFreeList free_list_;
 };
 
 
@@ -2004,83 +2376,18 @@
            AllocationSpace id)
       : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
         max_map_space_pages_(max_map_space_pages) {
-    ASSERT(max_map_space_pages < kMaxMapPageIndex);
   }
 
-  // Prepares for a mark-compact GC.
-  virtual void PrepareForMarkCompact(bool will_compact);
-
   // Given an index, returns the page address.
-  Address PageAddress(int page_index) { return page_addresses_[page_index]; }
+  // TODO(1600): this limit is artifical just to keep code compilable
+  static const int kMaxMapPageIndex = 1 << 16;
 
-  static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
-
-  // Are map pointers encodable into map word?
-  bool MapPointersEncodable() {
-    if (!FLAG_use_big_map_space) {
-      ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
-      return true;
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (IsPowerOf2(Map::kSize)) {
+      return RoundDown(size, Map::kSize);
+    } else {
+      return (size / Map::kSize) * Map::kSize;
     }
-    return CountPagesToTop() <= max_map_space_pages_;
-  }
-
-  // Should be called after forced sweep to find out if map space needs
-  // compaction.
-  bool NeedsCompaction(int live_maps) {
-    return !MapPointersEncodable() && live_maps <= CompactionThreshold();
-  }
-
-  Address TopAfterCompaction(int live_maps) {
-    ASSERT(NeedsCompaction(live_maps));
-
-    int pages_left = live_maps / kMapsPerPage;
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    while (pages_left-- > 0) {
-      ASSERT(it.has_next());
-      it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    }
-    ASSERT(it.has_next());
-    Page* top_page = it.next();
-    top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    ASSERT(top_page->is_valid());
-
-    int offset = live_maps % kMapsPerPage * Map::kSize;
-    Address top = top_page->ObjectAreaStart() + offset;
-    ASSERT(top < top_page->ObjectAreaEnd());
-    ASSERT(Contains(top));
-
-    return top;
-  }
-
-  void FinishCompaction(Address new_top, int live_maps) {
-    Page* top_page = Page::FromAddress(new_top);
-    ASSERT(top_page->is_valid());
-
-    SetAllocationInfo(&allocation_info_, top_page);
-    allocation_info_.top = new_top;
-
-    int new_size = live_maps * Map::kSize;
-    accounting_stats_.DeallocateBytes(accounting_stats_.Size());
-    accounting_stats_.AllocateBytes(new_size);
-
-    // Flush allocation watermarks.
-    for (Page* p = first_page_; p != top_page; p = p->next_page()) {
-      p->SetAllocationWatermark(p->AllocationTop());
-    }
-    top_page->SetAllocationWatermark(new_top);
-
-#ifdef DEBUG
-    if (FLAG_enable_slow_asserts) {
-      intptr_t actual_size = 0;
-      for (Page* p = first_page_; p != top_page; p = p->next_page())
-        actual_size += kMapsPerPage * Map::kSize;
-      actual_size += (new_top - top_page->ObjectAreaStart());
-      ASSERT(accounting_stats_.Size() == actual_size);
-    }
-#endif
-
-    Shrink();
-    ResetFreeList();
   }
 
  protected:
@@ -2098,9 +2405,6 @@
 
   const int max_map_space_pages_;
 
-  // An array of page start address in a map space.
-  Address page_addresses_[kMaxMapPageIndex];
-
  public:
   TRACK_MEMORY("MapSpace")
 };
@@ -2116,6 +2420,14 @@
       : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
   {}
 
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (IsPowerOf2(JSGlobalPropertyCell::kSize)) {
+      return RoundDown(size, JSGlobalPropertyCell::kSize);
+    } else {
+      return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize;
+    }
+  }
+
  protected:
 #ifdef DEBUG
   virtual void VerifyObject(HeapObject* obj);
@@ -2133,64 +2445,6 @@
 // A large object always starts at Page::kObjectStartOffset to a page.
 // Large objects do not move during garbage collections.
 
-// A LargeObjectChunk holds exactly one large object page with exactly one
-// large object.
-class LargeObjectChunk {
- public:
-  // Allocates a new LargeObjectChunk that contains a large object page
-  // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
-  // object) bytes after the object area start of that page.
-  static LargeObjectChunk* New(int size_in_bytes, Executability executable);
-
-  // Free the memory associated with the chunk.
-  void Free(Executability executable);
-
-  // Interpret a raw address as a large object chunk.
-  static LargeObjectChunk* FromAddress(Address address) {
-    return reinterpret_cast<LargeObjectChunk*>(address);
-  }
-
-  // Returns the address of this chunk.
-  Address address() { return reinterpret_cast<Address>(this); }
-
-  Page* GetPage() {
-    return Page::FromAddress(RoundUp(address(), Page::kPageSize));
-  }
-
-  // Accessors for the fields of the chunk.
-  LargeObjectChunk* next() { return next_; }
-  void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
-  size_t size() { return size_ & ~Page::kPageFlagMask; }
-
-  // Compute the start address in the chunk.
-  Address GetStartAddress() { return GetPage()->ObjectAreaStart(); }
-
-  // Returns the object in this chunk.
-  HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
-
-  // Given a requested size returns the physical size of a chunk to be
-  // allocated.
-  static int ChunkSizeFor(int size_in_bytes);
-
-  // Given a chunk size, returns the object size it can accommodate.  Used by
-  // LargeObjectSpace::Available.
-  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
-    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
-    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
-  }
-
- private:
-  // A pointer to the next large object chunk in the space or NULL.
-  LargeObjectChunk* next_;
-
-  // The total size of this chunk.
-  size_t size_;
-
- public:
-  TRACK_MEMORY("LargeObjectChunk")
-};
-
-
 class LargeObjectSpace : public Space {
  public:
   LargeObjectSpace(Heap* heap, AllocationSpace id);
@@ -2202,12 +2456,15 @@
   // Releases internal resources, frees objects in this space.
   void TearDown();
 
-  // Allocates a (non-FixedArray, non-Code) large object.
-  MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
-  // Allocates a large Code object.
-  MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
-  // Allocates a large FixedArray.
-  MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
+  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
+    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
+    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
+  }
+
+  // Shared implementation of AllocateRaw, AllocateRawCode and
+  // AllocateRawFixedArray.
+  MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
+                                           Executability executable);
 
   // Available bytes for objects in this space.
   inline intptr_t Available();
@@ -2231,10 +2488,7 @@
 
   // Finds a large object page containing the given pc, returns NULL
   // if such a page doesn't exist.
-  LargeObjectChunk* FindChunkContainingPc(Address pc);
-
-  // Iterates objects covered by dirty regions.
-  void IterateDirtyRegions(ObjectSlotCallback func);
+  LargePage* FindPageContainingPc(Address pc);
 
   // Frees unmarked objects.
   void FreeUnmarkedObjects();
@@ -2243,13 +2497,15 @@
   bool Contains(HeapObject* obj);
 
   // Checks whether the space is empty.
-  bool IsEmpty() { return first_chunk_ == NULL; }
+  bool IsEmpty() { return first_page_ == NULL; }
 
   // See the comments for ReserveSpace in the Space class.  This has to be
   // called after ReserveSpace has been called on the paged spaces, since they
   // may use some memory, leaving less for large objects.
   virtual bool ReserveSpace(int bytes);
 
+  LargePage* first_page() { return first_page_; }
+
 #ifdef DEBUG
   virtual void Verify();
   virtual void Print();
@@ -2262,17 +2518,11 @@
 
  private:
   // The head of the linked list of large object chunks.
-  LargeObjectChunk* first_chunk_;
+  LargePage* first_page_;
   intptr_t size_;  // allocated bytes
   int page_count_;  // number of chunks
   intptr_t objects_size_;  // size of objects
 
-  // Shared implementation of AllocateRaw, AllocateRawCode and
-  // AllocateRawFixedArray.
-  MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
-                                                   int object_size,
-                                                   Executability executable);
-
   friend class LargeObjectIterator;
 
  public:
@@ -2285,17 +2535,78 @@
   explicit LargeObjectIterator(LargeObjectSpace* space);
   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
 
-  HeapObject* next();
+  HeapObject* Next();
 
   // implementation of ObjectIterator.
-  virtual HeapObject* next_object() { return next(); }
+  virtual HeapObject* next_object() { return Next(); }
 
  private:
-  LargeObjectChunk* current_;
+  LargePage* current_;
   HeapObjectCallback size_func_;
 };
 
 
+// Iterates over the chunks (pages and large object pages) that can contain
+// pointers to new space.
+class PointerChunkIterator BASE_EMBEDDED {
+ public:
+  inline explicit PointerChunkIterator(Heap* heap);
+
+  // Return NULL when the iterator is done.
+  MemoryChunk* next() {
+    switch (state_) {
+      case kOldPointerState: {
+        if (old_pointer_iterator_.has_next()) {
+          return old_pointer_iterator_.next();
+        }
+        state_ = kMapState;
+        // Fall through.
+      }
+      case kMapState: {
+        if (map_iterator_.has_next()) {
+          return map_iterator_.next();
+        }
+        state_ = kLargeObjectState;
+        // Fall through.
+      }
+      case kLargeObjectState: {
+        HeapObject* heap_object;
+        do {
+          heap_object = lo_iterator_.Next();
+          if (heap_object == NULL) {
+            state_ = kFinishedState;
+            return NULL;
+          }
+          // Fixed arrays are the only pointer-containing objects in large
+          // object space.
+        } while (!heap_object->IsFixedArray());
+        MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
+        return answer;
+      }
+      case kFinishedState:
+        return NULL;
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+
+ private:
+  enum State {
+    kOldPointerState,
+    kMapState,
+    kLargeObjectState,
+    kFinishedState
+  };
+  State state_;
+  PageIterator old_pointer_iterator_;
+  PageIterator map_iterator_;
+  LargeObjectIterator lo_iterator_;
+};
+
+
 #ifdef DEBUG
 struct CommentStatistic {
   const char* comment;
diff --git a/src/store-buffer-inl.h b/src/store-buffer-inl.h
new file mode 100644
index 0000000..34f35a4
--- /dev/null
+++ b/src/store-buffer-inl.h
@@ -0,0 +1,79 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STORE_BUFFER_INL_H_
+#define V8_STORE_BUFFER_INL_H_
+
+#include "store-buffer.h"
+
+namespace v8 {
+namespace internal {
+
+Address StoreBuffer::TopAddress() {
+  return reinterpret_cast<Address>(heap_->store_buffer_top_address());
+}
+
+
+void StoreBuffer::Mark(Address addr) {
+  ASSERT(!heap_->cell_space()->Contains(addr));
+  ASSERT(!heap_->code_space()->Contains(addr));
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+  *top++ = addr;
+  heap_->public_set_store_buffer_top(top);
+  if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
+    ASSERT(top == limit_);
+    Compact();
+  } else {
+    ASSERT(top < limit_);
+  }
+}
+
+
+void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
+  if (store_buffer_rebuilding_enabled_) {
+    ASSERT(!heap_->cell_space()->Contains(addr));
+    ASSERT(!heap_->code_space()->Contains(addr));
+    ASSERT(!heap_->old_data_space()->Contains(addr));
+    ASSERT(!heap_->new_space()->Contains(addr));
+    Address* top = old_top_;
+    *top++ = addr;
+    old_top_ = top;
+    old_buffer_is_sorted_ = false;
+    old_buffer_is_filtered_ = false;
+    if (top >= old_limit_) {
+      ASSERT(callback_ != NULL);
+      (*callback_)(heap_,
+                   MemoryChunk::FromAnyPointerAddress(addr),
+                   kStoreBufferFullEvent);
+    }
+  }
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_STORE_BUFFER_INL_H_
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
new file mode 100644
index 0000000..ab810e4
--- /dev/null
+++ b/src/store-buffer.cc
@@ -0,0 +1,694 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "store-buffer.h"
+#include "store-buffer-inl.h"
+#include "v8-counters.h"
+
+namespace v8 {
+namespace internal {
+
+StoreBuffer::StoreBuffer(Heap* heap)
+    : heap_(heap),
+      start_(NULL),
+      limit_(NULL),
+      old_start_(NULL),
+      old_limit_(NULL),
+      old_top_(NULL),
+      old_buffer_is_sorted_(false),
+      old_buffer_is_filtered_(false),
+      during_gc_(false),
+      store_buffer_rebuilding_enabled_(false),
+      callback_(NULL),
+      may_move_store_buffer_entries_(true),
+      virtual_memory_(NULL),
+      hash_map_1_(NULL),
+      hash_map_2_(NULL) {
+}
+
+
+void StoreBuffer::Setup() {
+  virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
+  uintptr_t start_as_int =
+      reinterpret_cast<uintptr_t>(virtual_memory_->address());
+  start_ =
+      reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
+  limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
+
+  old_top_ = old_start_ = new Address[kOldStoreBufferLength];
+  old_limit_ = old_start_ + kOldStoreBufferLength;
+
+  ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
+  ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
+  Address* vm_limit = reinterpret_cast<Address*>(
+      reinterpret_cast<char*>(virtual_memory_->address()) +
+          virtual_memory_->size());
+  ASSERT(start_ <= vm_limit);
+  ASSERT(limit_ <= vm_limit);
+  USE(vm_limit);
+  ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
+  ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
+         0);
+
+  virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+                          kStoreBufferSize,
+                          false);  // Not executable.
+  heap_->public_set_store_buffer_top(start_);
+
+  hash_map_1_ = new uintptr_t[kHashMapLength];
+  hash_map_2_ = new uintptr_t[kHashMapLength];
+
+  ZapHashTables();
+}
+
+
+void StoreBuffer::TearDown() {
+  delete virtual_memory_;
+  delete[] hash_map_1_;
+  delete[] hash_map_2_;
+  delete[] old_start_;
+  old_start_ = old_top_ = old_limit_ = NULL;
+  start_ = limit_ = NULL;
+  heap_->public_set_store_buffer_top(start_);
+}
+
+
+void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
+  isolate->heap()->store_buffer()->Compact();
+}
+
+
+#if V8_TARGET_ARCH_X64
+static int CompareAddresses(const void* void_a, const void* void_b) {
+  intptr_t a =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
+  intptr_t b =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
+  // Unfortunately if int is smaller than intptr_t there is no branch-free
+  // way to return a number with the same sign as the difference between the
+  // pointers.
+  if (a == b) return 0;
+  if (a < b) return -1;
+  ASSERT(a > b);
+  return 1;
+}
+#else
+static int CompareAddresses(const void* void_a, const void* void_b) {
+  intptr_t a =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
+  intptr_t b =
+      reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
+  ASSERT(sizeof(1) == sizeof(a));
+  // Shift down to avoid wraparound.
+  return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
+}
+#endif
+
+
+void StoreBuffer::Uniq() {
+  ASSERT(HashTablesAreZapped());
+  // Remove adjacent duplicates and cells that do not point at new space.
+  Address previous = NULL;
+  Address* write = old_start_;
+  ASSERT(may_move_store_buffer_entries_);
+  for (Address* read = old_start_; read < old_top_; read++) {
+    Address current = *read;
+    if (current != previous) {
+      if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
+        *write++ = current;
+      }
+    }
+    previous = current;
+  }
+  old_top_ = write;
+}
+
+
+void StoreBuffer::HandleFullness() {
+  if (old_buffer_is_filtered_) return;
+  ASSERT(may_move_store_buffer_entries_);
+  Compact();
+
+  old_buffer_is_filtered_ = true;
+  bool page_has_scan_on_scavenge_flag = false;
+
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  while ((chunk = it.next()) != NULL) {
+    if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+  }
+
+  if (page_has_scan_on_scavenge_flag) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  // If filtering out the entries from scan_on_scavenge pages got us down to
+  // less than half full, then we are satisfied with that.
+  if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+
+  // Sample 1 entry in 97 and filter out the pages where we estimate that more
+  // than 1 in 8 pointers are to new space.
+  static const int kSampleFinenesses = 5;
+  static const struct Samples {
+    int prime_sample_step;
+    int threshold;
+  } samples[kSampleFinenesses] =  {
+    { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
+    { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
+    { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
+    { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
+    { 1, 0}
+  };
+  for (int i = kSampleFinenesses - 1; i >= 0; i--) {
+    ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
+    // As a last resort we mark all pages as being exempt from the store buffer.
+    ASSERT(i != 0 || old_top_ == old_start_);
+    if (old_limit_ - old_top_ > old_top_ - old_start_) return;
+  }
+  UNREACHABLE();
+}
+
+
+// Sample the store buffer to see if some pages are taking up a lot of space
+// in the store buffer.
+void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  while ((chunk = it.next()) != NULL) {
+    chunk->set_store_buffer_counter(0);
+  }
+  bool created_new_scan_on_scavenge_pages = false;
+  MemoryChunk* previous_chunk = NULL;
+  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
+    Address addr = *p;
+    MemoryChunk* containing_chunk = NULL;
+    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+      containing_chunk = previous_chunk;
+    } else {
+      containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+    }
+    int old_counter = containing_chunk->store_buffer_counter();
+    if (old_counter == threshold) {
+      containing_chunk->set_scan_on_scavenge(true);
+      created_new_scan_on_scavenge_pages = true;
+    }
+    containing_chunk->set_store_buffer_counter(old_counter + 1);
+    previous_chunk = containing_chunk;
+  }
+  if (created_new_scan_on_scavenge_pages) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+  old_buffer_is_filtered_ = true;
+}
+
+
+void StoreBuffer::Filter(int flag) {
+  Address* new_top = old_start_;
+  MemoryChunk* previous_chunk = NULL;
+  for (Address* p = old_start_; p < old_top_; p++) {
+    Address addr = *p;
+    MemoryChunk* containing_chunk = NULL;
+    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+      containing_chunk = previous_chunk;
+    } else {
+      containing_chunk = MemoryChunk::FromAnyPointerAddress(addr);
+      previous_chunk = containing_chunk;
+    }
+    if (!containing_chunk->IsFlagSet(flag)) {
+      *new_top++ = addr;
+    }
+  }
+  old_top_ = new_top;
+}
+
+
+void StoreBuffer::SortUniq() {
+  Compact();
+  if (old_buffer_is_sorted_) return;
+  ZapHashTables();
+  qsort(reinterpret_cast<void*>(old_start_),
+        old_top_ - old_start_,
+        sizeof(*old_top_),
+        &CompareAddresses);
+  Uniq();
+
+  old_buffer_is_sorted_ = true;
+}
+
+
+bool StoreBuffer::PrepareForIteration() {
+  Compact();
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  bool page_has_scan_on_scavenge_flag = false;
+  while ((chunk = it.next()) != NULL) {
+    if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
+  }
+
+  if (page_has_scan_on_scavenge_flag) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+  ZapHashTables();
+  return page_has_scan_on_scavenge_flag;
+}
+
+
+#ifdef DEBUG
+void StoreBuffer::Clean() {
+  ZapHashTables();
+  Uniq();  // Also removes things that no longer point to new space.
+  CheckForFullBuffer();
+}
+
+
+static bool Zapped(char* start, int size) {
+  for (int i = 0; i < size; i++) {
+    if (start[i] != 0) return false;
+  }
+  return true;
+}
+
+
+bool StoreBuffer::HashTablesAreZapped() {
+  return Zapped(reinterpret_cast<char*>(hash_map_1_),
+                sizeof(uintptr_t) * kHashMapLength) &&
+      Zapped(reinterpret_cast<char*>(hash_map_2_),
+             sizeof(uintptr_t) * kHashMapLength);
+}
+
+
+static Address* in_store_buffer_1_element_cache = NULL;
+
+
+bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
+  if (!FLAG_enable_slow_asserts) return true;
+  if (in_store_buffer_1_element_cache != NULL &&
+      *in_store_buffer_1_element_cache == cell_address) {
+    return true;
+  }
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+  for (Address* current = top - 1; current >= start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  return false;
+}
+#endif
+
+
+void StoreBuffer::ZapHashTables() {
+  memset(reinterpret_cast<void*>(hash_map_1_),
+         0,
+         sizeof(uintptr_t) * kHashMapLength);
+  memset(reinterpret_cast<void*>(hash_map_2_),
+         0,
+         sizeof(uintptr_t) * kHashMapLength);
+}
+
+
+void StoreBuffer::GCPrologue() {
+  ZapHashTables();
+  during_gc_ = true;
+}
+
+
+#ifdef DEBUG
+static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
+  // Do nothing.
+}
+
+
+void StoreBuffer::VerifyPointers(PagedSpace* space,
+                                 RegionCallback region_callback) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* page = it.next();
+    FindPointersToNewSpaceOnPage(
+        reinterpret_cast<PagedSpace*>(page->owner()),
+        page,
+        region_callback,
+        &DummyScavengePointer);
+  }
+}
+
+
+void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
+  LargeObjectIterator it(space);
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    if (object->IsFixedArray()) {
+      Address slot_address = object->address();
+      Address end = object->address() + object->Size();
+
+      while (slot_address < end) {
+        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+        // When we are not in GC the Heap::InNewSpace() predicate
+        // checks that pointers which satisfy predicate point into
+        // the active semispace.
+        heap_->InNewSpace(*slot);
+        slot_address += kPointerSize;
+      }
+    }
+  }
+}
+#endif
+
+
+void StoreBuffer::Verify() {
+#ifdef DEBUG
+  VerifyPointers(heap_->old_pointer_space(),
+                 &StoreBuffer::FindPointersToNewSpaceInRegion);
+  VerifyPointers(heap_->map_space(),
+                 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
+  VerifyPointers(heap_->lo_space());
+#endif
+}
+
+
+void StoreBuffer::GCEpilogue() {
+  during_gc_ = false;
+  Verify();
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInRegion(
+    Address start, Address end, ObjectSlotCallback slot_callback) {
+  for (Address slot_address = start;
+       slot_address < end;
+       slot_address += kPointerSize) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    if (heap_->InNewSpace(*slot)) {
+      HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
+      ASSERT(object->IsHeapObject());
+      slot_callback(reinterpret_cast<HeapObject**>(slot), object);
+      if (heap_->InNewSpace(*slot)) {
+        EnterDirectlyIntoStoreBuffer(slot_address);
+      }
+    }
+  }
+}
+
+
+// Compute start address of the first map following given addr.
+static inline Address MapStartAlign(Address addr) {
+  Address page = Page::FromAddress(addr)->ObjectAreaStart();
+  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
+}
+
+
+// Compute end address of the first map preceding given addr.
+static inline Address MapEndAlign(Address addr) {
+  Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
+  return page + ((addr - page) / Map::kSize * Map::kSize);
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInMaps(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback) {
+  ASSERT(MapStartAlign(start) == start);
+  ASSERT(MapEndAlign(end) == end);
+
+  Address map_address = start;
+  while (map_address < end) {
+    ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
+    ASSERT(Memory::Object_at(map_address)->IsMap());
+
+    Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
+    Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
+
+    FindPointersToNewSpaceInRegion(pointer_fields_start,
+                                   pointer_fields_end,
+                                   slot_callback);
+    map_address += Map::kSize;
+  }
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback) {
+  Address map_aligned_start = MapStartAlign(start);
+  Address map_aligned_end   = MapEndAlign(end);
+
+  ASSERT(map_aligned_start == start);
+  ASSERT(map_aligned_end == end);
+
+  FindPointersToNewSpaceInMaps(map_aligned_start,
+                               map_aligned_end,
+                               slot_callback);
+}
+
+
+// This function iterates over all the pointers in a paged space in the heap,
+// looking for pointers into new space.  Within the pages there may be dead
+// objects that have not been overwritten by free spaces or fillers because of
+// lazy sweeping.  These dead objects may not contain pointers to new space.
+// The garbage areas that have been swept properly (these will normally be the
+// large ones) will be marked with free space and filler map words.  In
+// addition any area that has never been used at all for object allocation must
+// be marked with a free space or filler.  Because the free space and filler
+// maps do not move we can always recognize these even after a compaction.
+// Normal objects like FixedArrays and JSObjects should not contain references
+// to these maps.  The special garbage section (see comment in spaces.h) is
+// skipped since it can contain absolutely anything.  Any objects that are
+// allocated during iteration may or may not be visited by the iteration, but
+// they will not be partially visited.
+void StoreBuffer::FindPointersToNewSpaceOnPage(
+    PagedSpace* space,
+    Page* page,
+    RegionCallback region_callback,
+    ObjectSlotCallback slot_callback) {
+  Address visitable_start = page->ObjectAreaStart();
+  Address end_of_page = page->ObjectAreaEnd();
+
+  Address visitable_end = visitable_start;
+
+  Object* free_space_map = heap_->free_space_map();
+  Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
+
+  while (visitable_end < end_of_page) {
+    Object* o = *reinterpret_cast<Object**>(visitable_end);
+    // Skip fillers but not things that look like fillers in the special
+    // garbage section which can contain anything.
+    if (o == free_space_map ||
+        o == two_pointer_filler_map ||
+        (visitable_end == space->top() && visitable_end != space->limit())) {
+      if (visitable_start != visitable_end) {
+        // After calling this the special garbage section may have moved.
+        (this->*region_callback)(visitable_start,
+                                 visitable_end,
+                                 slot_callback);
+        if (visitable_end >= space->top() && visitable_end < space->limit()) {
+          visitable_end = space->limit();
+          visitable_start = visitable_end;
+          continue;
+        }
+      }
+      if (visitable_end == space->top() && visitable_end != space->limit()) {
+        visitable_start = visitable_end = space->limit();
+      } else {
+        // At this point we are either at the start of a filler or we are at
+        // the point where the space->top() used to be before the
+        // visit_pointer_region call above.  Either way we can skip the
+        // object at the current spot:  We don't promise to visit objects
+        // allocated during heap traversal, and if space->top() moved then it
+        // must be because an object was allocated at this point.
+        visitable_start =
+            visitable_end + HeapObject::FromAddress(visitable_end)->Size();
+        visitable_end = visitable_start;
+      }
+    } else {
+      ASSERT(o != free_space_map);
+      ASSERT(o != two_pointer_filler_map);
+      ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
+      visitable_end += kPointerSize;
+    }
+  }
+  ASSERT(visitable_end == end_of_page);
+  if (visitable_start != visitable_end) {
+    (this->*region_callback)(visitable_start,
+                             visitable_end,
+                             slot_callback);
+  }
+}
+
+
+void StoreBuffer::IteratePointersInStoreBuffer(
+    ObjectSlotCallback slot_callback) {
+  Address* limit = old_top_;
+  old_top_ = old_start_;
+  {
+    DontMoveStoreBufferEntriesScope scope(this);
+    for (Address* current = old_start_; current < limit; current++) {
+#ifdef DEBUG
+      Address* saved_top = old_top_;
+#endif
+      Object** slot = reinterpret_cast<Object**>(*current);
+      Object* object = *slot;
+      if (heap_->InFromSpace(object)) {
+        HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+        slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+        if (heap_->InNewSpace(*slot)) {
+          EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
+        }
+      }
+      ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
+    }
+  }
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
+  // We do not sort or remove duplicated entries from the store buffer because
+  // we expect that callback will rebuild the store buffer thus removing
+  // all duplicates and pointers to old space.
+  bool some_pages_to_scan = PrepareForIteration();
+
+  // TODO(gc): we want to skip slots on evacuation candidates
+  // but we can't simply figure that out from slot address
+  // because slot can belong to a large object.
+  IteratePointersInStoreBuffer(slot_callback);
+
+  // We are done scanning all the pointers that were in the store buffer, but
+  // there may be some pages marked scan_on_scavenge that have pointers to new
+  // space that are not in the store buffer.  We must scan them now.  As we
+  // scan, the surviving pointers to new space will be added to the store
+  // buffer.  If there are still a lot of pointers to new space then we will
+  // keep the scan_on_scavenge flag on the page and discard the pointers that
+  // were added to the store buffer.  If there are not many pointers to new
+  // space left on the page we will keep the pointers in the store buffer and
+  // remove the flag from the page.
+  if (some_pages_to_scan) {
+    if (callback_ != NULL) {
+      (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
+    }
+    PointerChunkIterator it(heap_);
+    MemoryChunk* chunk;
+    while ((chunk = it.next()) != NULL) {
+      if (chunk->scan_on_scavenge()) {
+        chunk->set_scan_on_scavenge(false);
+        if (callback_ != NULL) {
+          (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
+        }
+        if (chunk->owner() == heap_->lo_space()) {
+          LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
+          HeapObject* array = large_page->GetObject();
+          ASSERT(array->IsFixedArray());
+          Address start = array->address();
+          Address end = start + array->Size();
+          FindPointersToNewSpaceInRegion(start, end, slot_callback);
+        } else {
+          Page* page = reinterpret_cast<Page*>(chunk);
+          PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+          FindPointersToNewSpaceOnPage(
+              owner,
+              page,
+              (owner == heap_->map_space() ?
+                 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
+                 &StoreBuffer::FindPointersToNewSpaceInRegion),
+              slot_callback);
+        }
+      }
+    }
+    if (callback_ != NULL) {
+      (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
+    }
+  }
+}
+
+
+void StoreBuffer::Compact() {
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+
+  if (top == start_) return;
+
+  // There's no check of the limit in the loop below so we check here for
+  // the worst case (compaction doesn't eliminate any pointers).
+  ASSERT(top <= limit_);
+  heap_->public_set_store_buffer_top(start_);
+  if (top - start_ > old_limit_ - old_top_) {
+    HandleFullness();
+  }
+  ASSERT(may_move_store_buffer_entries_);
+  // Goes through the addresses in the store buffer attempting to remove
+  // duplicates.  In the interest of speed this is a lossy operation.  Some
+  // duplicates will remain.  We have two hash tables with different hash
+  // functions to reduce the number of unnecessary clashes.
+  for (Address* current = start_; current < top; current++) {
+    ASSERT(!heap_->cell_space()->Contains(*current));
+    ASSERT(!heap_->code_space()->Contains(*current));
+    ASSERT(!heap_->old_data_space()->Contains(*current));
+    uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
+    // Shift out the last bits including any tags.
+    int_addr >>= kPointerSizeLog2;
+    int hash1 =
+        ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
+    if (hash_map_1_[hash1] == int_addr) continue;
+    int hash2 =
+        ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
+    hash2 ^= hash2 >> (kHashMapLengthLog2 * 2);
+    if (hash_map_2_[hash2] == int_addr) continue;
+    if (hash_map_1_[hash1] == 0) {
+      hash_map_1_[hash1] = int_addr;
+    } else if (hash_map_2_[hash2] == 0) {
+      hash_map_2_[hash2] = int_addr;
+    } else {
+      // Rather than slowing down we just throw away some entries.  This will
+      // cause some duplicates to remain undetected.
+      hash_map_1_[hash1] = int_addr;
+      hash_map_2_[hash2] = 0;
+    }
+    old_buffer_is_sorted_ = false;
+    old_buffer_is_filtered_ = false;
+    *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
+    ASSERT(old_top_ <= old_limit_);
+  }
+  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
+  CheckForFullBuffer();
+}
+
+
+void StoreBuffer::CheckForFullBuffer() {
+  if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
+    HandleFullness();
+  }
+}
+
+} }  // namespace v8::internal
diff --git a/src/store-buffer.h b/src/store-buffer.h
new file mode 100644
index 0000000..61b97d9
--- /dev/null
+++ b/src/store-buffer.h
@@ -0,0 +1,248 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_STORE_BUFFER_H_
+#define V8_STORE_BUFFER_H_
+
+#include "allocation.h"
+#include "checks.h"
+#include "globals.h"
+#include "platform.h"
+#include "v8globals.h"
+
+namespace v8 {
+namespace internal {
+
+class StoreBuffer;
+
+typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
+
+typedef void (StoreBuffer::*RegionCallback)(
+    Address start, Address end, ObjectSlotCallback slot_callback);
+
+// Used to implement the write barrier by collecting addresses of pointers
+// between spaces.
+class StoreBuffer {
+ public:
+  explicit StoreBuffer(Heap* heap);
+
+  static void StoreBufferOverflow(Isolate* isolate);
+
+  inline Address TopAddress();
+
+  void Setup();
+  void TearDown();
+
+  // This is used by the mutator to enter addresses into the store buffer.
+  inline void Mark(Address addr);
+
+  // This is used by the heap traversal to enter the addresses into the store
+  // buffer that should still be in the store buffer after GC.  It enters
+  // addresses directly into the old buffer because the GC starts by wiping the
+  // old buffer and thereafter only visits each cell once so there is no need
+  // to attempt to remove any dupes.  During the first part of a GC we
+  // are using the store buffer to access the old spaces and at the same time
+  // we are rebuilding the store buffer using this function.  There is, however
+  // no issue of overwriting the buffer we are iterating over, because this
+  // stage of the scavenge can only reduce the number of addresses in the store
+  // buffer (some objects are promoted so pointers to them do not need to be in
+  // the store buffer).  The later parts of the GC scan the pages that are
+  // exempt from the store buffer and process the promotion queue.  These steps
+  // can overflow this buffer.  We check for this and on overflow we call the
+  // callback set up with the StoreBufferRebuildScope object.
+  inline void EnterDirectlyIntoStoreBuffer(Address addr);
+
+  // Iterates over all pointers that go from old space to new space.  It will
+  // delete the store buffer as it starts so the callback should reenter
+  // surviving old-to-new pointers into the store buffer to rebuild it.
+  void IteratePointersToNewSpace(ObjectSlotCallback callback);
+
+  static const int kStoreBufferOverflowBit = 1 << 16;
+  static const int kStoreBufferSize = kStoreBufferOverflowBit;
+  static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
+  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
+  static const int kHashMapLengthLog2 = 12;
+  static const int kHashMapLength = 1 << kHashMapLengthLog2;
+
+  void Compact();
+
+  void GCPrologue();
+  void GCEpilogue();
+
+  Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
+  Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
+  Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
+  void SetTop(Object*** top) {
+    ASSERT(top >= Start());
+    ASSERT(top <= Limit());
+    old_top_ = reinterpret_cast<Address*>(top);
+  }
+
+  bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
+  bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
+
+  // Goes through the store buffer removing pointers to things that have
+  // been promoted.  Rebuilds the store buffer completely if it overflowed.
+  void SortUniq();
+
+  void HandleFullness();
+  void Verify();
+
+  bool PrepareForIteration();
+
+#ifdef DEBUG
+  void Clean();
+  // Slow, for asserts only.
+  bool CellIsInStoreBuffer(Address cell);
+#endif
+
+  void Filter(int flag);
+
+ private:
+  Heap* heap_;
+
+  // The store buffer is divided up into a new buffer that is constantly being
+  // filled by mutator activity and an old buffer that is filled with the data
+  // from the new buffer after compression.
+  Address* start_;
+  Address* limit_;
+
+  Address* old_start_;
+  Address* old_limit_;
+  Address* old_top_;
+
+  bool old_buffer_is_sorted_;
+  bool old_buffer_is_filtered_;
+  bool during_gc_;
+  // The garbage collector iterates over many pointers to new space that are not
+  // handled by the store buffer.  This flag indicates whether the pointers
+  // found by the callbacks should be added to the store buffer or not.
+  bool store_buffer_rebuilding_enabled_;
+  StoreBufferCallback callback_;
+  bool may_move_store_buffer_entries_;
+
+  VirtualMemory* virtual_memory_;
+  uintptr_t* hash_map_1_;
+  uintptr_t* hash_map_2_;
+
+  void CheckForFullBuffer();
+  void Uniq();
+  void ZapHashTables();
+  bool HashTablesAreZapped();
+  void ExemptPopularPages(int prime_sample_step, int threshold);
+
+  void FindPointersToNewSpaceInRegion(Address start,
+                                      Address end,
+                                      ObjectSlotCallback slot_callback);
+
+  // For each region of pointers on a page in use from an old space call
+  // visit_pointer_region callback.
+  // If either visit_pointer_region or callback can cause an allocation
+  // in old space and changes in allocation watermark then
+  // can_preallocate_during_iteration should be set to true.
+  void IteratePointersOnPage(
+      PagedSpace* space,
+      Page* page,
+      RegionCallback region_callback,
+      ObjectSlotCallback slot_callback);
+
+  void FindPointersToNewSpaceInMaps(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback);
+
+  void FindPointersToNewSpaceInMapsRegion(
+    Address start,
+    Address end,
+    ObjectSlotCallback slot_callback);
+
+  void FindPointersToNewSpaceOnPage(
+    PagedSpace* space,
+    Page* page,
+    RegionCallback region_callback,
+    ObjectSlotCallback slot_callback);
+
+  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
+
+#ifdef DEBUG
+  void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
+  void VerifyPointers(LargeObjectSpace* space);
+#endif
+
+  friend class StoreBufferRebuildScope;
+  friend class DontMoveStoreBufferEntriesScope;
+};
+
+
+class StoreBufferRebuildScope {
+ public:
+  explicit StoreBufferRebuildScope(Heap* heap,
+                                   StoreBuffer* store_buffer,
+                                   StoreBufferCallback callback)
+      : heap_(heap),
+        store_buffer_(store_buffer),
+        stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
+        stored_callback_(store_buffer->callback_) {
+    store_buffer_->store_buffer_rebuilding_enabled_ = true;
+    store_buffer_->callback_ = callback;
+    (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
+  }
+
+  ~StoreBufferRebuildScope() {
+    store_buffer_->callback_ = stored_callback_;
+    store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
+    store_buffer_->CheckForFullBuffer();
+  }
+
+ private:
+  Heap* heap_;
+  StoreBuffer* store_buffer_;
+  bool stored_state_;
+  StoreBufferCallback stored_callback_;
+};
+
+
+class DontMoveStoreBufferEntriesScope {
+ public:
+  explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer),
+        stored_state_(store_buffer->may_move_store_buffer_entries_) {
+    store_buffer_->may_move_store_buffer_entries_ = false;
+  }
+
+  ~DontMoveStoreBufferEntriesScope() {
+    store_buffer_->may_move_store_buffer_entries_ = stored_state_;
+  }
+
+ private:
+  StoreBuffer* store_buffer_;
+  bool stored_state_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_STORE_BUFFER_H_
diff --git a/src/string.js b/src/string.js
index 297105d..be955c8 100644
--- a/src/string.js
+++ b/src/string.js
@@ -568,7 +568,6 @@
   }
   var subject = TO_STRING_INLINE(this);
   limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
-  if (limit === 0) return [];
 
   // ECMA-262 says that if separator is undefined, the result should
   // be an array of size 1 containing the entire string.  SpiderMonkey
@@ -582,6 +581,9 @@
   var length = subject.length;
   if (!IS_REGEXP(separator)) {
     separator = TO_STRING_INLINE(separator);
+
+    if (limit === 0) return [];
+
     var separator_length = separator.length;
 
     // If the separator string is empty then return the elements in the subject.
@@ -592,6 +594,8 @@
     return result;
   }
 
+  if (limit === 0) return [];
+
   %_Log('regexp', 'regexp-split,%0S,%1r', [subject, separator]);
 
   if (length === 0) {
diff --git a/src/strtod.cc b/src/strtod.cc
index c89c8f3..be79c80 100644
--- a/src/strtod.cc
+++ b/src/strtod.cc
@@ -27,7 +27,6 @@
 
 #include <stdarg.h>
 #include <math.h>
-#include <limits>
 
 #include "globals.h"
 #include "utils.h"
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 5596330..09ee793 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -55,7 +55,15 @@
   ASSERT(IsPowerOf2(kSecondaryTableSize));
   if (create_heap_objects) {
     HandleScope scope;
-    Clear();
+    Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
+    for (int i = 0; i < kPrimaryTableSize; i++) {
+      primary_[i].key = heap()->empty_string();
+      primary_[i].value = empty;
+    }
+    for (int j = 0; j < kSecondaryTableSize; j++) {
+      secondary_[j].key = heap()->empty_string();
+      secondary_[j].value = empty;
+    }
   }
 }
 
@@ -1099,15 +1107,14 @@
 
 
 void StubCache::Clear() {
+  Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
   for (int i = 0; i < kPrimaryTableSize; i++) {
     primary_[i].key = heap()->empty_string();
-    primary_[i].value = isolate_->builtins()->builtin(
-        Builtins::kIllegal);
+    primary_[i].value = empty;
   }
   for (int j = 0; j < kSecondaryTableSize; j++) {
     secondary_[j].key = heap()->empty_string();
-    secondary_[j].value = isolate_->builtins()->builtin(
-        Builtins::kIllegal);
+    secondary_[j].value = empty;
   }
 }
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 18c157b..fcb58e1 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -704,7 +704,8 @@
       CodeList* handler_ics);
 
   static void GenerateStoreFastElement(MacroAssembler* masm,
-                                       bool is_js_array);
+                                       bool is_js_array,
+                                       ElementsKind element_kind);
 
   static void GenerateStoreFastDoubleElement(MacroAssembler* masm,
                                              bool is_js_array);
diff --git a/src/token.h b/src/token.h
index eb825c1..de4972d 100644
--- a/src/token.h
+++ b/src/token.h
@@ -216,6 +216,10 @@
     return op == LT || op == LTE || op == GT || op == GTE;
   }
 
+  static bool IsEqualityOp(Value op) {
+    return op == EQ || op == EQ_STRICT;
+  }
+
   static Value NegateCompareOp(Value op) {
     ASSERT(IsCompareOp(op));
     switch (op) {
diff --git a/src/type-info.cc b/src/type-info.cc
index c64368e..293632b 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -76,7 +76,7 @@
 
 
 bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
@@ -90,7 +90,7 @@
 
 
 bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     Builtins* builtins = Isolate::Current()->builtins();
@@ -103,7 +103,7 @@
 
 
 bool TypeFeedbackOracle::StoreIsMonomorphicNormal(Expression* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsMap()) return true;
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
@@ -116,7 +116,7 @@
 
 
 bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(Expression* expr) {
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     Builtins* builtins = Isolate::Current()->builtins();
@@ -131,13 +131,13 @@
 
 bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
   Handle<Object> value = GetInfo(expr->id());
-  return value->IsMap() || value->IsSmi();
+  return value->IsMap() || value->IsSmi() || value->IsJSFunction();
 }
 
 
 Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
   ASSERT(LoadIsMonomorphicNormal(expr));
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     Map* first_map = code->FindFirstMap();
@@ -150,7 +150,7 @@
 
 Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(Expression* expr) {
   ASSERT(StoreIsMonomorphicNormal(expr));
-  Handle<Object> map_or_code(GetInfo(expr->id()));
+  Handle<Object> map_or_code = GetInfo(expr->id());
   if (map_or_code->IsCode()) {
     Handle<Code> code = Handle<Code>::cast(map_or_code);
     return Handle<Map>(code->FindFirstMap());
@@ -203,6 +203,7 @@
   return check;
 }
 
+
 Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
     CheckType check) {
   JSFunction* function = NULL;
@@ -225,6 +226,11 @@
 }
 
 
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
+  return Handle<JSFunction>::cast(GetInfo(expr->id()));
+}
+
+
 bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
   return *GetInfo(expr->id()) ==
       Isolate::Current()->builtins()->builtin(id);
@@ -488,14 +494,16 @@
 
 void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
   for (int i = 0; i < infos->length(); i++) {
+    Address target_address = (*infos)[i].target_address();
     unsigned ast_id = static_cast<unsigned>((*infos)[i].data());
-    Code* target = Code::GetCodeFromTargetAddress((*infos)[i].target_address());
-    ProcessTarget(ast_id, target);
+    ProcessTargetAt(target_address, ast_id);
   }
 }
 
 
-void TypeFeedbackOracle::ProcessTarget(unsigned ast_id, Code* target) {
+void TypeFeedbackOracle::ProcessTargetAt(Address target_address,
+                                         unsigned ast_id) {
+  Code* target = Code::GetCodeFromTargetAddress(target_address);
   switch (target->kind()) {
     case Code::LOAD_IC:
     case Code::STORE_IC:
@@ -504,7 +512,7 @@
       if (target->ic_state() == MONOMORPHIC) {
         if (target->kind() == Code::CALL_IC &&
             target->check_type() != RECEIVER_MAP_CHECK) {
-          SetInfo(ast_id,  Smi::FromInt(target->check_type()));
+          SetInfo(ast_id, Smi::FromInt(target->check_type()));
         } else {
           Object* map = target->FindFirstMap();
           SetInfo(ast_id, map == NULL ? static_cast<Object*>(target) : map);
@@ -529,6 +537,16 @@
       SetInfo(ast_id, target);
       break;
 
+    case Code::STUB:
+      if (target->major_key() == CodeStub::CallFunction &&
+          target->has_function_cache()) {
+        Object* value = CallFunctionStub::GetCachedValue(target_address);
+        if (value->IsJSFunction()) {
+          SetInfo(ast_id, value);
+        }
+      }
+      break;
+
     default:
       break;
   }
diff --git a/src/type-info.h b/src/type-info.h
index 448e4c9..e1ccc28 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -243,6 +243,8 @@
   CheckType GetCallCheckType(Call* expr);
   Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
 
+  Handle<JSFunction> GetCallTarget(Call* expr);
+
   bool LoadIsBuiltin(Property* expr, Builtins::Name id);
 
   // TODO(1571) We can't use ToBooleanStub::Types as the return value because
@@ -273,7 +275,7 @@
                           byte* old_start,
                           byte* new_start);
   void ProcessRelocInfos(ZoneList<RelocInfo>* infos);
-  void ProcessTarget(unsigned ast_id, Code* target);
+  void ProcessTargetAt(Address target_address, unsigned ast_id);
 
   // Returns an element from the backing store. Returns undefined if
   // there is no information.
diff --git a/src/uri.js b/src/uri.js
index c910d75..1656664 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -111,47 +111,59 @@
     var o1 = octets[1];
     if (o0 < 0xe0) {
       var a = o0 & 0x1f;
-      if ((o1 < 0x80) || (o1 > 0xbf))
+      if ((o1 < 0x80) || (o1 > 0xbf)) {
         throw new $URIError("URI malformed");
+      }
       var b = o1 & 0x3f;
       value = (a << 6) + b;
-      if (value < 0x80 || value > 0x7ff)
+      if (value < 0x80 || value > 0x7ff) {
         throw new $URIError("URI malformed");
+      }
     } else {
       var o2 = octets[2];
       if (o0 < 0xf0) {
         var a = o0 & 0x0f;
-        if ((o1 < 0x80) || (o1 > 0xbf))
+        if ((o1 < 0x80) || (o1 > 0xbf)) {
           throw new $URIError("URI malformed");
+        }
         var b = o1 & 0x3f;
-        if ((o2 < 0x80) || (o2 > 0xbf))
+        if ((o2 < 0x80) || (o2 > 0xbf)) {
           throw new $URIError("URI malformed");
+        }
         var c = o2 & 0x3f;
         value = (a << 12) + (b << 6) + c;
-        if ((value < 0x800) || (value > 0xffff))
+        if ((value < 0x800) || (value > 0xffff)) {
           throw new $URIError("URI malformed");
+        }
       } else {
         var o3 = octets[3];
         if (o0 < 0xf8) {
           var a = (o0 & 0x07);
-          if ((o1 < 0x80) || (o1 > 0xbf))
+          if ((o1 < 0x80) || (o1 > 0xbf)) {
             throw new $URIError("URI malformed");
+          }
           var b = (o1 & 0x3f);
-          if ((o2 < 0x80) || (o2 > 0xbf))
+          if ((o2 < 0x80) || (o2 > 0xbf)) {
             throw new $URIError("URI malformed");
+          }
           var c = (o2 & 0x3f);
-          if ((o3 < 0x80) || (o3 > 0xbf))
+          if ((o3 < 0x80) || (o3 > 0xbf)) {
             throw new $URIError("URI malformed");
+          }
           var d = (o3 & 0x3f);
           value = (a << 18) + (b << 12) + (c << 6) + d;
-          if ((value < 0x10000) || (value > 0x10ffff))
+          if ((value < 0x10000) || (value > 0x10ffff)) {
             throw new $URIError("URI malformed");
+          }
         } else {
           throw new $URIError("URI malformed");
         }
       }
     }
   }
+  if (0xD800 <= value && value <= 0xDFFF) {
+    throw new $URIError("URI malformed");
+  }
   if (value < 0x10000) {
     result[index++] = value;
     return index;
@@ -214,7 +226,8 @@
         if (k + 3 * (n - 1) >= uriLength) throw new $URIError("URI malformed");
         for (var i = 1; i < n; i++) {
           if (uri.charAt(++k) != '%') throw new $URIError("URI malformed");
-          octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
+          octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
+                                            uri.charCodeAt(++k));
         }
         index = URIDecodeOctets(octets, result, index);
       } else {
@@ -366,7 +379,9 @@
 function IsValidHex(s) {
   for (var i = 0; i < s.length; ++i) {
     var cc = s.charCodeAt(i);
-    if ((48 <= cc && cc <= 57) || (65 <= cc && cc <= 70) || (97 <= cc && cc <= 102)) {
+    if ((48 <= cc && cc <= 57) ||
+        (65 <= cc && cc <= 70) ||
+        (97 <= cc && cc <= 102)) {
       // '0'..'9', 'A'..'F' and 'a' .. 'f'.
     } else {
       return false;
diff --git a/src/utils.h b/src/utils.h
index 26c522b..a523118 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -113,7 +113,7 @@
 
 // Return the largest multiple of m which is <= x.
 template <typename T>
-static inline T RoundDown(T x, int m) {
+static inline T RoundDown(T x, intptr_t m) {
   ASSERT(IsPowerOf2(m));
   return AddressFrom<T>(OffsetFrom(x) & -m);
 }
@@ -121,8 +121,8 @@
 
 // Return the smallest multiple of m which is >= x.
 template <typename T>
-static inline T RoundUp(T x, int m) {
-  return RoundDown(x + m - 1, m);
+static inline T RoundUp(T x, intptr_t m) {
+  return RoundDown<T>(static_cast<T>(x + m - 1), m);
 }
 
 
@@ -159,9 +159,15 @@
 }
 
 
+static inline uint32_t RoundDownToPowerOf2(uint32_t x) {
+  uint32_t rounded_up = RoundUpToPowerOf2(x);
+  if (rounded_up > x) return rounded_up >> 1;
+  return rounded_up;
+}
 
-template <typename T>
-static inline bool IsAligned(T value, T alignment) {
+
+template <typename T, typename U>
+static inline bool IsAligned(T value, U alignment) {
   ASSERT(IsPowerOf2(alignment));
   return (value & (alignment - 1)) == 0;
 }
@@ -170,7 +176,7 @@
 // Returns true if (addr + offset) is aligned.
 static inline bool IsAddressAligned(Address addr,
                                     intptr_t alignment,
-                                    int offset) {
+                                    int offset = 0) {
   intptr_t offs = OffsetFrom(addr + offset);
   return IsAligned(offs, alignment);
 }
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 2de8303..47341e7 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -107,7 +107,10 @@
   SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot)      \
   /* Number of code objects found from pc. */                         \
   SC(pc_to_code, V8.PcToCode)                                         \
-  SC(pc_to_code_cached, V8.PcToCodeCached)
+  SC(pc_to_code_cached, V8.PcToCodeCached)                            \
+  /* The store-buffer implementation of the write barrier. */         \
+  SC(store_buffer_compactions, V8.StoreBufferCompactions)             \
+  SC(store_buffer_overflows, V8.StoreBufferOverflows)
 
 
 #define STATS_COUNTER_LIST_2(SC)                                      \
@@ -126,10 +129,6 @@
      V8.GCCompactorCausedByWeakHandles)                               \
   SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                   \
   SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)         \
-  SC(map_to_fast_elements, V8.MapToFastElements)                      \
-  SC(map_to_fast_double_elements, V8.MapToFastDoubleElements)         \
-  SC(map_to_slow_elements, V8.MapToSlowElements)                      \
-  SC(map_to_external_array_elements, V8.MapToExternalArrayElements)   \
   /* How is the generic keyed-load stub used? */                      \
   SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                  \
   SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)            \
diff --git a/src/v8.cc b/src/v8.cc
index 1e9b5dc..aebcc2f 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -38,6 +38,7 @@
 #include "log.h"
 #include "runtime-profiler.h"
 #include "serialize.h"
+#include "store-buffer.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/v8.h b/src/v8.h
index e565ca5..2e039d4 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -60,10 +60,11 @@
 #include "objects-inl.h"
 #include "spaces-inl.h"
 #include "heap-inl.h"
+#include "incremental-marking-inl.h"
+#include "mark-compact-inl.h"
 #include "log-inl.h"
 #include "cpu-profiler-inl.h"
 #include "handles-inl.h"
-#include "isolate-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -124,6 +125,15 @@
   static bool use_crankshaft_;
 };
 
+
+// JavaScript defines two kinds of 'nil'.
+enum NilValue { kNullValue, kUndefinedValue };
+
+
+// JavaScript defines two kinds of equality.
+enum EqualityKind { kStrictEquality, kNonStrictEquality };
+
+
 } }  // namespace v8::internal
 
 namespace i = v8::internal;
diff --git a/src/v8globals.h b/src/v8globals.h
index eb5c49d..bd3d02b 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -79,18 +79,20 @@
     reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
 const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
 const uint64_t kSlotsZapValue = V8_UINT64_C(0xbeefdeadbeefdeef);
+const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
 #else
 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
 const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
 const uint32_t kSlotsZapValue = 0xbeefdeef;
 const uint32_t kDebugZapValue = 0xbadbaddb;
+const uint32_t kFreeListZapValue = 0xfeed1eaf;
 #endif
 
 
-// Number of bits to represent the page size for paged spaces. The value of 13
-// gives 8K bytes per page.
-const int kPageSizeBits = 13;
+// Number of bits to represent the page size for paged spaces. The value of 20
+// gives 1Mb bytes per page.
+const int kPageSizeBits = 20;
 
 // On Intel architecture, cache line size is 64 bytes.
 // On ARM it may be less (32 bytes), but as far this constant is
@@ -98,10 +100,6 @@
 const int kProcessorCacheLineSize = 64;
 
 // Constants relevant to double precision floating point numbers.
-
-// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
-// other bits set.
-const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
 // If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
 const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
 
@@ -131,6 +129,7 @@
 class FunctionEntry;
 class FunctionLiteral;
 class FunctionTemplateInfo;
+class MemoryChunk;
 class NumberDictionary;
 class StringDictionary;
 template <typename T> class Handle;
@@ -254,12 +253,6 @@
 };
 
 
-// Callback function on object slots, used for iterating heap object slots in
-// HeapObjects, global pointers to heap objects, etc. The callback allows the
-// callback function to change the value of the slot.
-typedef void (*ObjectSlotCallback)(HeapObject** pointer);
-
-
 // Callback function used for iterating objects in heap spaces,
 // for example, scanning heap objects.
 typedef int (*HeapObjectCallback)(HeapObject* obj);
@@ -306,7 +299,9 @@
   NO_CALL_FUNCTION_FLAGS = 0,
   // Receiver might implicitly be the global objects. If it is, the
   // hole is passed to the call function stub.
-  RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0
+  RECEIVER_MIGHT_BE_IMPLICIT = 1 << 0,
+  // The call target is cached in the instruction stream.
+  RECORD_CALL_TARGET = 1 << 1
 };
 
 
@@ -316,6 +311,19 @@
 };
 
 
+// The Store Buffer (GC).
+typedef enum {
+  kStoreBufferFullEvent,
+  kStoreBufferStartScanningPagesEvent,
+  kStoreBufferScanningPageEvent
+} StoreBufferEvent;
+
+
+typedef void (*StoreBufferCallback)(Heap* heap,
+                                    MemoryChunk* page,
+                                    StoreBufferEvent event);
+
+
 // Type of properties.
 // Order of properties is significant.
 // Must fit in the BitField PropertyDetails::TypeField.
diff --git a/src/v8natives.js b/src/v8natives.js
index 588bdb2..3c48264 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -193,13 +193,14 @@
 function SetUpGlobal() {
   %CheckIsBootstrapping();
   // ECMA 262 - 15.1.1.1.
-  %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE);
+  %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 - 15.1.1.2.
-  %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE);
+  %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // ECMA-262 - 15.1.1.3.
-  %SetProperty(global, "undefined", void 0, DONT_ENUM | DONT_DELETE);
+  %SetProperty(global, "undefined", void 0,
+               DONT_ENUM | DONT_DELETE | READ_ONLY);
 
   // Set up non-enumerable function on the global object.
   InstallFunctions(global, DONT_ENUM, $Array(
@@ -1042,12 +1043,21 @@
     throw MakeTypeError("handler_returned_undefined", [handler, "fix"]);
   }
 
-  if (IS_SPEC_FUNCTION(obj)) {
+  if (%IsJSFunctionProxy(obj)) {
     var callTrap = %GetCallTrap(obj);
     var constructTrap = %GetConstructTrap(obj);
     var code = DelegateCallAndConstruct(callTrap, constructTrap);
     %Fix(obj);  // becomes a regular function
     %SetCode(obj, code);
+    // TODO(rossberg): What about length and other properties? Not specified.
+    // We just put in some half-reasonable defaults for now.
+    var prototype = new $Object();
+    $Object.defineProperty(prototype, "constructor",
+      {value: obj, writable: true, enumerable: false, configrable: true});
+    $Object.defineProperty(obj, "prototype",
+      {value: prototype, writable: true, enumerable: false, configrable: false})
+    $Object.defineProperty(obj, "length",
+      {value: 0, writable: true, enumerable: false, configrable: false});
   } else {
     %Fix(obj);
   }
diff --git a/src/version.cc b/src/version.cc
index efd9790..ecaf951 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     6
-#define BUILD_NUMBER      4
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      5
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/win32-headers.h b/src/win32-headers.h
index fca5c13..0ee3306 100644
--- a/src/win32-headers.h
+++ b/src/win32-headers.h
@@ -75,6 +75,7 @@
 // makes it impossible to have them elsewhere.
 #include <winsock2.h>
 #include <ws2tcpip.h>
+#include <wspiapi.h>
 #include <process.h>  // for _beginthreadex()
 #include <stdlib.h>
 #endif  // V8_WIN32_HEADERS_FULL
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 8db54f0..fabaf25 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -242,6 +242,11 @@
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   if (IsCodeTarget(rmode_)) {
     Assembler::set_target_address_at(pc_, target);
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    if (host() != NULL) {
+      host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+          host(), this, HeapObject::cast(target_code));
+    }
   } else {
     Memory::Address_at(pc_) = target;
     CPU::FlushICache(pc_, sizeof(Address));
@@ -279,8 +284,12 @@
 
 void RelocInfo::set_target_object(Object* target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  *reinterpret_cast<Object**>(pc_) = target;
+  Memory::Object_at(pc_) = target;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (host() != NULL && target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
 }
 
 
@@ -306,6 +315,12 @@
   Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
   Memory::Address_at(pc_) = address;
   CPU::FlushICache(pc_, sizeof(Address));
+  if (host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
 }
 
 
@@ -344,6 +359,11 @@
       target;
   CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
                    sizeof(Address));
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
 }
 
 
@@ -368,7 +388,7 @@
 void RelocInfo::Visit(ObjectVisitor* visitor) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    visitor->VisitPointer(target_object_address());
+    visitor->VisitEmbeddedPointer(host(), target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
@@ -396,7 +416,7 @@
 void RelocInfo::Visit(Heap* heap) {
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
-    StaticVisitor::VisitPointer(heap, target_object_address());
+    StaticVisitor::VisitEmbeddedPointer(heap, host(), target_object_address());
     CPU::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 745fdae..1c4980e 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -47,7 +47,7 @@
 
 
 void CpuFeatures::Probe() {
-  ASSERT(!initialized_);
+  ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
 #ifdef DEBUG
   initialized_ = true;
 #endif
@@ -2983,7 +2983,7 @@
       return;
     }
   }
-  RelocInfo rinfo(pc_, rmode, data);
+  RelocInfo rinfo(pc_, rmode, data, NULL);
   reloc_info_writer.Write(&rinfo);
 }
 
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 2e373fa..0d87053 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -215,6 +215,12 @@
     return names[index];
   }
 
+  static XMMRegister from_code(int code) {
+    ASSERT(code >= 0);
+    ASSERT(code < kNumRegisters);
+    XMMRegister r = { code };
+    return r;
+  }
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(XMMRegister reg) const { return code_ == reg.code_; }
   int code() const {
@@ -735,6 +741,10 @@
     immediate_arithmetic_op_32(0x0, dst, src);
   }
 
+  void addl(const Operand& dst, Register src) {
+    arithmetic_op_32(0x01, src, dst);
+  }
+
   void addq(Register dst, Register src) {
     arithmetic_op(0x03, dst, src);
   }
@@ -1394,13 +1404,14 @@
   static const int kMaximalBufferSize = 512*MB;
   static const int kMinimalBufferSize = 4*KB;
 
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
  protected:
   bool emit_debug_code() const { return emit_debug_code_; }
 
  private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
-  byte byte_at(int pos)  { return buffer_[pos]; }
-  void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
   uint32_t long_at(int pos)  {
     return *reinterpret_cast<uint32_t*>(addr_at(pos));
   }
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index db06909..79ddb13 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -79,12 +79,12 @@
   //  -- rdi: constructor function
   // -----------------------------------
 
-  Label non_function_call;
+  Label slow, non_function_call;
   // Check that function is not a smi.
   __ JumpIfSmi(rdi, &non_function_call);
   // Check that function is a JSFunction.
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &non_function_call);
+  __ j(not_equal, &slow);
 
   // Jump to the function-specific construct stub.
   __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -94,10 +94,19 @@
 
   // rdi: called object
   // rax: number of arguments
+  // rcx: object map
+  Label do_call;
+  __ bind(&slow);
+  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+  __ j(not_equal, &non_function_call);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
   __ bind(&non_function_call);
+  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
   // Set expected number of arguments to zero (not changing rax).
   __ Set(rbx, 0);
-  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
   __ SetCallKind(rcx, CALL_AS_METHOD);
   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
           RelocInfo::CODE_TARGET);
@@ -110,273 +119,279 @@
   // Should never count constructions for api objects.
   ASSERT(!is_api_function || !count_constructions);
 
-    // Enter a construct frame.
-  __ EnterConstructFrame();
+  // Enter a construct frame.
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
 
-  // Store a smi-tagged arguments count on the stack.
-  __ Integer32ToSmi(rax, rax);
-  __ push(rax);
+    // Store a smi-tagged arguments count on the stack.
+    __ Integer32ToSmi(rax, rax);
+    __ push(rax);
 
-  // Push the function to invoke on the stack.
-  __ push(rdi);
+    // Push the function to invoke on the stack.
+    __ push(rdi);
 
-  // Try to allocate the object without transitioning into C code. If any of the
-  // preconditions is not met, the code bails out to the runtime call.
-  Label rt_call, allocated;
-  if (FLAG_inline_new) {
-    Label undo_allocation;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    Label rt_call, allocated;
+    if (FLAG_inline_new) {
+      Label undo_allocation;
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-    ExternalReference debug_step_in_fp =
-        ExternalReference::debug_step_in_fp_address(masm->isolate());
-    __ movq(kScratchRegister, debug_step_in_fp);
-    __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
-    __ j(not_equal, &rt_call);
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(masm->isolate());
+      __ movq(kScratchRegister, debug_step_in_fp);
+      __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+      __ j(not_equal, &rt_call);
 #endif
 
-    // Verified that the constructor is a JSFunction.
-    // Load the initial map and verify that it is in fact a map.
-    // rdi: constructor
-    __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
-    // Will both indicate a NULL and a Smi
-    STATIC_ASSERT(kSmiTag == 0);
-    __ JumpIfSmi(rax, &rt_call);
-    // rdi: constructor
-    // rax: initial map (if proven valid below)
-    __ CmpObjectType(rax, MAP_TYPE, rbx);
-    __ j(not_equal, &rt_call);
+      // Verified that the constructor is a JSFunction.
+      // Load the initial map and verify that it is in fact a map.
+      // rdi: constructor
+      __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+      // Will both indicate a NULL and a Smi
+      ASSERT(kSmiTag == 0);
+      __ JumpIfSmi(rax, &rt_call);
+      // rdi: constructor
+      // rax: initial map (if proven valid below)
+      __ CmpObjectType(rax, MAP_TYPE, rbx);
+      __ j(not_equal, &rt_call);
 
-    // Check that the constructor is not constructing a JSFunction (see comments
-    // in Runtime_NewObject in runtime.cc). In which case the initial map's
-    // instance type would be JS_FUNCTION_TYPE.
-    // rdi: constructor
-    // rax: initial map
-    __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
-    __ j(equal, &rt_call);
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // rdi: constructor
+      // rax: initial map
+      __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+      __ j(equal, &rt_call);
 
-    if (count_constructions) {
-      Label allocate;
-      // Decrease generous allocation count.
-      __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-      __ decb(FieldOperand(rcx, SharedFunctionInfo::kConstructionCountOffset));
-      __ j(not_zero, &allocate);
-
-      __ push(rax);
-      __ push(rdi);
-
-      __ push(rdi);  // constructor
-      // The call will replace the stub, so the countdown is only done once.
-      __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
-
-      __ pop(rdi);
-      __ pop(rax);
-
-      __ bind(&allocate);
-    }
-
-    // Now allocate the JSObject on the heap.
-    __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
-    __ shl(rdi, Immediate(kPointerSizeLog2));
-    // rdi: size of new object
-    __ AllocateInNewSpace(rdi,
-                          rbx,
-                          rdi,
-                          no_reg,
-                          &rt_call,
-                          NO_ALLOCATION_FLAGS);
-    // Allocated the JSObject, now initialize the fields.
-    // rax: initial map
-    // rbx: JSObject (not HeapObject tagged - the actual address).
-    // rdi: start of next object
-    __ movq(Operand(rbx, JSObject::kMapOffset), rax);
-    __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
-    __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
-    __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
-    // Set extra fields in the newly allocated object.
-    // rax: initial map
-    // rbx: JSObject
-    // rdi: start of next object
-    { Label loop, entry;
-      // To allow for truncation.
       if (count_constructions) {
-        __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
-      } else {
-        __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+        Label allocate;
+        // Decrease generous allocation count.
+        __ movq(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+        __ decb(FieldOperand(rcx,
+                             SharedFunctionInfo::kConstructionCountOffset));
+        __ j(not_zero, &allocate);
+
+        __ push(rax);
+        __ push(rdi);
+
+        __ push(rdi);  // constructor
+        // The call will replace the stub, so the countdown is only done once.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ pop(rdi);
+        __ pop(rax);
+
+        __ bind(&allocate);
       }
+
+      // Now allocate the JSObject on the heap.
+      __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+      __ shl(rdi, Immediate(kPointerSizeLog2));
+      // rdi: size of new object
+      __ AllocateInNewSpace(rdi,
+                            rbx,
+                            rdi,
+                            no_reg,
+                            &rt_call,
+                            NO_ALLOCATION_FLAGS);
+      // Allocated the JSObject, now initialize the fields.
+      // rax: initial map
+      // rbx: JSObject (not HeapObject tagged - the actual address).
+      // rdi: start of next object
+      __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+      __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+      __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+      __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+      // Set extra fields in the newly allocated object.
+      // rax: initial map
+      // rbx: JSObject
+      // rdi: start of next object
       __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ movq(Operand(rcx, 0), rdx);
-      __ addq(rcx, Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmpq(rcx, rdi);
-      __ j(less, &loop);
-    }
-
-    // Add the object tag to make the JSObject real, so that we can continue and
-    // jump into the continuation code at any time from now on. Any failures
-    // need to undo the allocation, so that the heap is in a consistent state
-    // and verifiable.
-    // rax: initial map
-    // rbx: JSObject
-    // rdi: start of next object
-    __ or_(rbx, Immediate(kHeapObjectTag));
-
-    // Check if a non-empty properties array is needed.
-    // Allocate and initialize a FixedArray if it is.
-    // rax: initial map
-    // rbx: JSObject
-    // rdi: start of next object
-    // Calculate total properties described map.
-    __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
-    __ movzxbq(rcx, FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
-    __ addq(rdx, rcx);
-    // Calculate unused properties past the end of the in-object properties.
-    __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
-    __ subq(rdx, rcx);
-    // Done if no extra properties are to be allocated.
-    __ j(zero, &allocated);
-    __ Assert(positive, "Property allocation count failed.");
-
-    // Scale the number of elements by pointer size and add the header for
-    // FixedArrays to the start of the next object calculation from above.
-    // rbx: JSObject
-    // rdi: start of next object (will be start of FixedArray)
-    // rdx: number of elements in properties array
-    __ AllocateInNewSpace(FixedArray::kHeaderSize,
-                          times_pointer_size,
-                          rdx,
-                          rdi,
-                          rax,
-                          no_reg,
-                          &undo_allocation,
-                          RESULT_CONTAINS_TOP);
-
-    // Initialize the FixedArray.
-    // rbx: JSObject
-    // rdi: FixedArray
-    // rdx: number of elements
-    // rax: start of next object
-    __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
-    __ movq(Operand(rdi, HeapObject::kMapOffset), rcx);  // setup the map
-    __ Integer32ToSmi(rdx, rdx);
-    __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx);  // and length
-
-    // Initialize the fields to undefined.
-    // rbx: JSObject
-    // rdi: FixedArray
-    // rax: start of next object
-    // rdx: number of elements
-    { Label loop, entry;
       __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
-      __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
-      __ jmp(&entry);
-      __ bind(&loop);
-      __ movq(Operand(rcx, 0), rdx);
-      __ addq(rcx, Immediate(kPointerSize));
-      __ bind(&entry);
-      __ cmpq(rcx, rax);
-      __ j(below, &loop);
+      if (count_constructions) {
+        __ movzxbq(rsi,
+                   FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+        __ lea(rsi,
+               Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
+        // rsi: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ cmpq(rsi, rdi);
+          __ Assert(less_equal,
+                    "Unexpected number of pre-allocated property fields.");
+        }
+        __ InitializeFieldsWithFiller(rcx, rsi, rdx);
+        __ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+      }
+      __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      // rax: initial map
+      // rbx: JSObject
+      // rdi: start of next object
+      __ or_(rbx, Immediate(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed.
+      // Allocate and initialize a FixedArray if it is.
+      // rax: initial map
+      // rbx: JSObject
+      // rdi: start of next object
+      // Calculate total properties described map.
+      __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+      __ movzxbq(rcx,
+                 FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
+      __ addq(rdx, rcx);
+      // Calculate unused properties past the end of the in-object properties.
+      __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+      __ subq(rdx, rcx);
+      // Done if no extra properties are to be allocated.
+      __ j(zero, &allocated);
+      __ Assert(positive, "Property allocation count failed.");
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // rbx: JSObject
+      // rdi: start of next object (will be start of FixedArray)
+      // rdx: number of elements in properties array
+      __ AllocateInNewSpace(FixedArray::kHeaderSize,
+                            times_pointer_size,
+                            rdx,
+                            rdi,
+                            rax,
+                            no_reg,
+                            &undo_allocation,
+                            RESULT_CONTAINS_TOP);
+
+      // Initialize the FixedArray.
+      // rbx: JSObject
+      // rdi: FixedArray
+      // rdx: number of elements
+      // rax: start of next object
+      __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+      __ movq(Operand(rdi, HeapObject::kMapOffset), rcx);  // setup the map
+      __ Integer32ToSmi(rdx, rdx);
+      __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx);  // and length
+
+      // Initialize the fields to undefined.
+      // rbx: JSObject
+      // rdi: FixedArray
+      // rax: start of next object
+      // rdx: number of elements
+      { Label loop, entry;
+        __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+        __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ movq(Operand(rcx, 0), rdx);
+        __ addq(rcx, Immediate(kPointerSize));
+        __ bind(&entry);
+        __ cmpq(rcx, rax);
+        __ j(below, &loop);
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject
+      // rbx: JSObject
+      // rdi: FixedArray
+      __ or_(rdi, Immediate(kHeapObjectTag));  // add the heap tag
+      __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+
+
+      // Continue with JSObject being successfully allocated
+      // rbx: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // rbx: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(rbx);
     }
 
-    // Store the initialized FixedArray into the properties field of
-    // the JSObject
-    // rbx: JSObject
-    // rdi: FixedArray
-    __ or_(rdi, Immediate(kHeapObjectTag));  // add the heap tag
-    __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+    // Allocate the new receiver object using the runtime call.
+    // rdi: function (constructor)
+    __ bind(&rt_call);
+    // Must restore rdi (constructor) before calling runtime.
+    __ movq(rdi, Operand(rsp, 0));
+    __ push(rdi);
+    __ CallRuntime(Runtime::kNewObject, 1);
+    __ movq(rbx, rax);  // store result in rbx
 
+    // New object allocated.
+    // rbx: newly allocated object
+    __ bind(&allocated);
+    // Retrieve the function from the stack.
+    __ pop(rdi);
 
-    // Continue with JSObject being successfully allocated
-    // rbx: JSObject
-    __ jmp(&allocated);
+    // Retrieve smi-tagged arguments count from the stack.
+    __ movq(rax, Operand(rsp, 0));
+    __ SmiToInteger32(rax, rax);
 
-    // Undo the setting of the new top so that the heap is verifiable. For
-    // example, the map's unused properties potentially do not match the
-    // allocated objects unused properties.
-    // rbx: JSObject (previous new top)
-    __ bind(&undo_allocation);
-    __ UndoAllocationInNewSpace(rbx);
+    // Push the allocated receiver to the stack. We need two copies
+    // because we may have to return the original one and the calling
+    // conventions dictate that the called function pops the receiver.
+    __ push(rbx);
+    __ push(rbx);
+
+    // Setup pointer to last argument.
+    __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+    // Copy arguments and receiver to the expression stack.
+    Label loop, entry;
+    __ movq(rcx, rax);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ push(Operand(rbx, rcx, times_pointer_size, 0));
+    __ bind(&entry);
+    __ decq(rcx);
+    __ j(greater_equal, &loop);
+
+    // Call the function.
+    if (is_api_function) {
+      __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      ParameterCount expected(0);
+      __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
+                    CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+    } else {
+      ParameterCount actual(rax);
+      __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+
+    // Restore context from the frame.
+    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    __ JumpIfSmi(rax, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+    __ j(above_equal, &exit);
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ movq(rax, Operand(rsp, 0));
+
+    // Restore the arguments count and leave the construct frame.
+    __ bind(&exit);
+    __ movq(rbx, Operand(rsp, kPointerSize));  // Get arguments count.
+
+    // Leave construct frame.
   }
 
-  // Allocate the new receiver object using the runtime call.
-  // rdi: function (constructor)
-  __ bind(&rt_call);
-  // Must restore rdi (constructor) before calling runtime.
-  __ movq(rdi, Operand(rsp, 0));
-  __ push(rdi);
-  __ CallRuntime(Runtime::kNewObject, 1);
-  __ movq(rbx, rax);  // store result in rbx
-
-  // New object allocated.
-  // rbx: newly allocated object
-  __ bind(&allocated);
-  // Retrieve the function from the stack.
-  __ pop(rdi);
-
-  // Retrieve smi-tagged arguments count from the stack.
-  __ movq(rax, Operand(rsp, 0));
-  __ SmiToInteger32(rax, rax);
-
-  // Push the allocated receiver to the stack. We need two copies
-  // because we may have to return the original one and the calling
-  // conventions dictate that the called function pops the receiver.
-  __ push(rbx);
-  __ push(rbx);
-
-  // Setup pointer to last argument.
-  __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
-
-  // Copy arguments and receiver to the expression stack.
-  Label loop, entry;
-  __ movq(rcx, rax);
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ push(Operand(rbx, rcx, times_pointer_size, 0));
-  __ bind(&entry);
-  __ decq(rcx);
-  __ j(greater_equal, &loop);
-
-  // Call the function.
-  if (is_api_function) {
-    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-    Handle<Code> code =
-        masm->isolate()->builtins()->HandleApiCallConstruct();
-    ParameterCount expected(0);
-    __ InvokeCode(code, expected, expected, RelocInfo::CODE_TARGET,
-                  CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
-  } else {
-    ParameterCount actual(rax);
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
-  }
-
-  // Restore context from the frame.
-  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
-  // If the result is an object (in the ECMA sense), we should get rid
-  // of the receiver and use the result; see ECMA-262 section 13.2.2-7
-  // on page 74.
-  Label use_receiver, exit;
-  // If the result is a smi, it is *not* an object in the ECMA sense.
-  __ JumpIfSmi(rax, &use_receiver);
-
-  // If the type of the result (stored in its map) is less than
-  // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
-  __ j(above_equal, &exit);
-
-  // Throw away the result of the constructor invocation and use the
-  // on-stack receiver as the result.
-  __ bind(&use_receiver);
-  __ movq(rax, Operand(rsp, 0));
-
-  // Restore the arguments count and leave the construct frame.
-  __ bind(&exit);
-  __ movq(rbx, Operand(rsp, kPointerSize));  // get arguments count
-  __ LeaveConstructFrame();
-
   // Remove caller arguments from the stack and return.
   __ pop(rcx);
   SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
@@ -413,104 +428,108 @@
   // - Object*** argv
   // (see Handle::Invoke in execution.cc).
 
-  // Platform specific argument handling. After this, the stack contains
-  // an internal frame and the pushed function and receiver, and
-  // register rax and rbx holds the argument count and argument array,
-  // while rdi holds the function pointer and rsi the context.
+  // Open a C++ scope for the FrameScope.
+  {
+    // Platform specific argument handling. After this, the stack contains
+    // an internal frame and the pushed function and receiver, and
+    // register rax and rbx holds the argument count and argument array,
+    // while rdi holds the function pointer and rsi the context.
+
 #ifdef _WIN64
-  // MSVC parameters in:
-  // rcx : entry (ignored)
-  // rdx : function
-  // r8 : receiver
-  // r9 : argc
-  // [rsp+0x20] : argv
+    // MSVC parameters in:
+    // rcx : entry (ignored)
+    // rdx : function
+    // r8 : receiver
+    // r9 : argc
+    // [rsp+0x20] : argv
 
-  // Clear the context before we push it when entering the JS frame.
-  __ Set(rsi, 0);
-  __ EnterInternalFrame();
+    // Clear the context before we push it when entering the internal frame.
+    __ Set(rsi, 0);
+    // Enter an internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Load the function context into rsi.
-  __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+    // Load the function context into rsi.
+    __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
 
-  // Push the function and the receiver onto the stack.
-  __ push(rdx);
-  __ push(r8);
+    // Push the function and the receiver onto the stack.
+    __ push(rdx);
+    __ push(r8);
 
-  // Load the number of arguments and setup pointer to the arguments.
-  __ movq(rax, r9);
-  // Load the previous frame pointer to access C argument on stack
-  __ movq(kScratchRegister, Operand(rbp, 0));
-  __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
-  // Load the function pointer into rdi.
-  __ movq(rdi, rdx);
+    // Load the number of arguments and setup pointer to the arguments.
+    __ movq(rax, r9);
+    // Load the previous frame pointer to access C argument on stack
+    __ movq(kScratchRegister, Operand(rbp, 0));
+    __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+    // Load the function pointer into rdi.
+    __ movq(rdi, rdx);
 #else  // _WIN64
-  // GCC parameters in:
-  // rdi : entry (ignored)
-  // rsi : function
-  // rdx : receiver
-  // rcx : argc
-  // r8  : argv
+    // GCC parameters in:
+    // rdi : entry (ignored)
+    // rsi : function
+    // rdx : receiver
+    // rcx : argc
+    // r8  : argv
 
-  __ movq(rdi, rsi);
-  // rdi : function
+    __ movq(rdi, rsi);
+    // rdi : function
 
-  // Clear the context before we push it when entering the JS frame.
-  __ Set(rsi, 0);
-  // Enter an internal frame.
-  __ EnterInternalFrame();
+    // Clear the context before we push it when entering the internal frame.
+    __ Set(rsi, 0);
+    // Enter an internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the function and receiver and setup the context.
-  __ push(rdi);
-  __ push(rdx);
-  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+    // Push the function and receiver and setup the context.
+    __ push(rdi);
+    __ push(rdx);
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  // Load the number of arguments and setup pointer to the arguments.
-  __ movq(rax, rcx);
-  __ movq(rbx, r8);
+    // Load the number of arguments and setup pointer to the arguments.
+    __ movq(rax, rcx);
+    __ movq(rbx, r8);
 #endif  // _WIN64
 
-  // Current stack contents:
-  // [rsp + 2 * kPointerSize ... ]: Internal frame
-  // [rsp + kPointerSize]         : function
-  // [rsp]                        : receiver
-  // Current register contents:
-  // rax : argc
-  // rbx : argv
-  // rsi : context
-  // rdi : function
+    // Current stack contents:
+    // [rsp + 2 * kPointerSize ... ]: Internal frame
+    // [rsp + kPointerSize]         : function
+    // [rsp]                        : receiver
+    // Current register contents:
+    // rax : argc
+    // rbx : argv
+    // rsi : context
+    // rdi : function
 
-  // Copy arguments to the stack in a loop.
-  // Register rbx points to array of pointers to handle locations.
-  // Push the values of these handles.
-  Label loop, entry;
-  __ Set(rcx, 0);  // Set loop variable to 0.
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
-  __ push(Operand(kScratchRegister, 0));  // dereference handle
-  __ addq(rcx, Immediate(1));
-  __ bind(&entry);
-  __ cmpq(rcx, rax);
-  __ j(not_equal, &loop);
+    // Copy arguments to the stack in a loop.
+    // Register rbx points to array of pointers to handle locations.
+    // Push the values of these handles.
+    Label loop, entry;
+    __ Set(rcx, 0);  // Set loop variable to 0.
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
+    __ push(Operand(kScratchRegister, 0));  // dereference handle
+    __ addq(rcx, Immediate(1));
+    __ bind(&entry);
+    __ cmpq(rcx, rax);
+    __ j(not_equal, &loop);
 
-  // Invoke the code.
-  if (is_construct) {
-    // Expects rdi to hold function pointer.
-    __ Call(masm->isolate()->builtins()->JSConstructCall(),
-            RelocInfo::CODE_TARGET);
-  } else {
-    ParameterCount actual(rax);
-    // Function must be in rdi.
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                      NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the code.
+    if (is_construct) {
+      // Expects rdi to hold function pointer.
+      __ Call(masm->isolate()->builtins()->JSConstructCall(),
+              RelocInfo::CODE_TARGET);
+    } else {
+      ParameterCount actual(rax);
+      // Function must be in rdi.
+      __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                        NullCallWrapper(), CALL_AS_METHOD);
+    }
+    // Exit the internal frame. Notice that this also removes the empty
+    // context and the function left on the stack by the code
+    // invocation.
   }
 
-  // Exit the JS frame. Notice that this also removes the empty
-  // context and the function left on the stack by the code
-  // invocation.
-  __ LeaveInternalFrame();
   // TODO(X64): Is argument correct? Is there a receiver to remove?
-  __ ret(1 * kPointerSize);  // remove receiver
+  __ ret(1 * kPointerSize);  // Remove receiver.
 }
 
 
@@ -526,23 +545,24 @@
 
 void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function onto the stack.
-  __ push(rdi);
-  // Push call kind information.
-  __ push(rcx);
+    // Push a copy of the function onto the stack.
+    __ push(rdi);
+    // Push call kind information.
+    __ push(rcx);
 
-  __ push(rdi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyCompile, 1);
+    __ push(rdi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyCompile, 1);
 
-  // Restore call kind information.
-  __ pop(rcx);
-  // Restore receiver.
-  __ pop(rdi);
+    // Restore call kind information.
+    __ pop(rcx);
+    // Restore receiver.
+    __ pop(rdi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -552,23 +572,24 @@
 
 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push a copy of the function onto the stack.
-  __ push(rdi);
-  // Push call kind information.
-  __ push(rcx);
+    // Push a copy of the function onto the stack.
+    __ push(rdi);
+    // Push call kind information.
+    __ push(rcx);
 
-  __ push(rdi);  // Function is also the parameter to the runtime call.
-  __ CallRuntime(Runtime::kLazyRecompile, 1);
+    __ push(rdi);  // Function is also the parameter to the runtime call.
+    __ CallRuntime(Runtime::kLazyRecompile, 1);
 
-  // Restore call kind information.
-  __ pop(rcx);
-  // Restore function.
-  __ pop(rdi);
+    // Restore call kind information.
+    __ pop(rcx);
+    // Restore function.
+    __ pop(rdi);
 
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    // Tear down internal frame.
+  }
 
   // Do a tail-call of the compiled function.
   __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
@@ -579,14 +600,15 @@
 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
                                              Deoptimizer::BailoutType type) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Pass the deoptimization type to the runtime system.
-  __ Push(Smi::FromInt(static_cast<int>(type)));
+    // Pass the deoptimization type to the runtime system.
+    __ Push(Smi::FromInt(static_cast<int>(type)));
 
-  __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
-  // Tear down temporary frame.
-  __ LeaveInternalFrame();
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+    // Tear down internal frame.
+  }
 
   // Get the full codegen state from the stack and untag it.
   __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
@@ -623,9 +645,10 @@
   // the registers without worrying about which of them contain
   // pointers. This seems a bit fragile.
   __ Pushad();
-  __ EnterInternalFrame();
-  __ CallRuntime(Runtime::kNotifyOSR, 0);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kNotifyOSR, 0);
+  }
   __ Popad();
   __ ret(0);
 }
@@ -695,18 +718,21 @@
     __ j(above_equal, &shift_arguments);
 
     __ bind(&convert_to_object);
-    __ EnterInternalFrame();  // In order to preserve argument count.
-    __ Integer32ToSmi(rax, rax);
-    __ push(rax);
+    {
+      // Enter an internal frame in order to preserve argument count.
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Integer32ToSmi(rax, rax);
+      __ push(rax);
 
-    __ push(rbx);
-    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-    __ movq(rbx, rax);
-    __ Set(rdx, 0);  // indicate regular JS_FUNCTION
+      __ push(rbx);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ movq(rbx, rax);
+      __ Set(rdx, 0);  // indicate regular JS_FUNCTION
 
-    __ pop(rax);
-    __ SmiToInteger32(rax, rax);
-    __ LeaveInternalFrame();
+      __ pop(rax);
+      __ SmiToInteger32(rax, rax);
+    }
+
     // Restore the function to rdi.
     __ movq(rdi, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
     __ jmp(&patch_receiver, Label::kNear);
@@ -807,160 +833,162 @@
   //  rsp+8: arguments
   // rsp+16: receiver ("this")
   // rsp+24: function
-  __ EnterInternalFrame();
-  // Stack frame:
-  //    rbp: Old base pointer
-  // rbp[1]: return address
-  // rbp[2]: function arguments
-  // rbp[3]: receiver
-  // rbp[4]: function
-  static const int kArgumentsOffset = 2 * kPointerSize;
-  static const int kReceiverOffset = 3 * kPointerSize;
-  static const int kFunctionOffset = 4 * kPointerSize;
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Stack frame:
+    //    rbp: Old base pointer
+    // rbp[1]: return address
+    // rbp[2]: function arguments
+    // rbp[3]: receiver
+    // rbp[4]: function
+    static const int kArgumentsOffset = 2 * kPointerSize;
+    static const int kReceiverOffset = 3 * kPointerSize;
+    static const int kFunctionOffset = 4 * kPointerSize;
 
-  __ push(Operand(rbp, kFunctionOffset));
-  __ push(Operand(rbp, kArgumentsOffset));
-  __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+    __ push(Operand(rbp, kFunctionOffset));
+    __ push(Operand(rbp, kArgumentsOffset));
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  // Check the stack for overflow. We are not trying to catch
-  // interruptions (e.g. debug break and preemption) here, so the "real stack
-  // limit" is checked.
-  Label okay;
-  __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
-  __ movq(rcx, rsp);
-  // Make rcx the space we have left. The stack might already be overflowed
-  // here which will cause rcx to become negative.
-  __ subq(rcx, kScratchRegister);
-  // Make rdx the space we need for the array when it is unrolled onto the
-  // stack.
-  __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
-  // Check if the arguments will overflow the stack.
-  __ cmpq(rcx, rdx);
-  __ j(greater, &okay);  // Signed comparison.
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+    __ movq(rcx, rsp);
+    // Make rcx the space we have left. The stack might already be overflowed
+    // here which will cause rcx to become negative.
+    __ subq(rcx, kScratchRegister);
+    // Make rdx the space we need for the array when it is unrolled onto the
+    // stack.
+    __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+    // Check if the arguments will overflow the stack.
+    __ cmpq(rcx, rdx);
+    __ j(greater, &okay);  // Signed comparison.
 
-  // Out of stack space.
-  __ push(Operand(rbp, kFunctionOffset));
-  __ push(rax);
-  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-  __ bind(&okay);
-  // End of stack check.
+    // Out of stack space.
+    __ push(Operand(rbp, kFunctionOffset));
+    __ push(rax);
+    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+    __ bind(&okay);
+    // End of stack check.
 
-  // Push current index and limit.
-  const int kLimitOffset =
-      StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
-  const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
-  __ push(rax);  // limit
-  __ push(Immediate(0));  // index
+    // Push current index and limit.
+    const int kLimitOffset =
+        StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+    const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+    __ push(rax);  // limit
+    __ push(Immediate(0));  // index
 
-  // Get the receiver.
-  __ movq(rbx, Operand(rbp, kReceiverOffset));
+    // Get the receiver.
+    __ movq(rbx, Operand(rbp, kReceiverOffset));
 
-  // Check that the function is a JS function (otherwise it must be a proxy).
-  Label push_receiver;
-  __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &push_receiver);
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ movq(rdi, Operand(rbp, kFunctionOffset));
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+    __ j(not_equal, &push_receiver);
 
-  // Change context eagerly to get the right global object if necessary.
-  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+    // Change context eagerly to get the right global object if necessary.
+    __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
-  // Do not transform the receiver for strict mode functions.
-  Label call_to_object, use_global_receiver;
-  __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
-           Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
-  __ j(not_equal, &push_receiver);
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_receiver;
+    __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
+             Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+    __ j(not_equal, &push_receiver);
 
-  // Do not transform the receiver for natives.
-  __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
-           Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
-  __ j(not_equal, &push_receiver);
+    // Do not transform the receiver for natives.
+    __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+             Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+    __ j(not_equal, &push_receiver);
 
-  // Compute the receiver in non-strict mode.
-  __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
-  __ CompareRoot(rbx, Heap::kNullValueRootIndex);
-  __ j(equal, &use_global_receiver);
-  __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &use_global_receiver);
+    // Compute the receiver in non-strict mode.
+    __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
+    __ CompareRoot(rbx, Heap::kNullValueRootIndex);
+    __ j(equal, &use_global_receiver);
+    __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
+    __ j(equal, &use_global_receiver);
 
-  // If given receiver is already a JavaScript object then there's no
-  // reason for converting it.
-  STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
-  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
-  __ j(above_equal, &push_receiver);
+    // If given receiver is already a JavaScript object then there's no
+    // reason for converting it.
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
+    __ j(above_equal, &push_receiver);
 
-  // Convert the receiver to an object.
-  __ bind(&call_to_object);
-  __ push(rbx);
-  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
-  __ movq(rbx, rax);
-  __ jmp(&push_receiver, Label::kNear);
+    // Convert the receiver to an object.
+    __ bind(&call_to_object);
+    __ push(rbx);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ movq(rbx, rax);
+    __ jmp(&push_receiver, Label::kNear);
 
-  // Use the current global receiver object as the receiver.
-  __ bind(&use_global_receiver);
-  const int kGlobalOffset =
-      Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
-  __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
-  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
-  __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
-  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+    // Use the current global receiver object as the receiver.
+    __ bind(&use_global_receiver);
+    const int kGlobalOffset =
+        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+    __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+    __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
 
-  // Push the receiver.
-  __ bind(&push_receiver);
-  __ push(rbx);
+    // Push the receiver.
+    __ bind(&push_receiver);
+    __ push(rbx);
 
-  // Copy all arguments from the array to the stack.
-  Label entry, loop;
-  __ movq(rax, Operand(rbp, kIndexOffset));
-  __ jmp(&entry);
-  __ bind(&loop);
-  __ movq(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ movq(rax, Operand(rbp, kIndexOffset));
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ movq(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
 
-  // Use inline caching to speed up access to arguments.
-  Handle<Code> ic =
-      masm->isolate()->builtins()->KeyedLoadIC_Initialize();
-  __ Call(ic, RelocInfo::CODE_TARGET);
-  // It is important that we do not have a test instruction after the
-  // call.  A test instruction after the call is used to indicate that
-  // we have generated an inline version of the keyed load.  In this
-  // case, we know that we are not generating a test instruction next.
+    // Use inline caching to speed up access to arguments.
+    Handle<Code> ic =
+        masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // It is important that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to indicate that
+    // we have generated an inline version of the keyed load.  In this
+    // case, we know that we are not generating a test instruction next.
 
-  // Push the nth argument.
-  __ push(rax);
+    // Push the nth argument.
+    __ push(rax);
 
-  // Update the index on the stack and in register rax.
-  __ movq(rax, Operand(rbp, kIndexOffset));
-  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
-  __ movq(Operand(rbp, kIndexOffset), rax);
+    // Update the index on the stack and in register rax.
+    __ movq(rax, Operand(rbp, kIndexOffset));
+    __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+    __ movq(Operand(rbp, kIndexOffset), rax);
 
-  __ bind(&entry);
-  __ cmpq(rax, Operand(rbp, kLimitOffset));
-  __ j(not_equal, &loop);
+    __ bind(&entry);
+    __ cmpq(rax, Operand(rbp, kLimitOffset));
+    __ j(not_equal, &loop);
 
-  // Invoke the function.
-  Label call_proxy;
-  ParameterCount actual(rax);
-  __ SmiToInteger32(rax, rax);
-  __ movq(rdi, Operand(rbp, kFunctionOffset));
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &call_proxy);
-  __ InvokeFunction(rdi, actual, CALL_FUNCTION,
-                    NullCallWrapper(), CALL_AS_METHOD);
+    // Invoke the function.
+    Label call_proxy;
+    ParameterCount actual(rax);
+    __ SmiToInteger32(rax, rax);
+    __ movq(rdi, Operand(rbp, kFunctionOffset));
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+    __ j(not_equal, &call_proxy);
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION,
+                      NullCallWrapper(), CALL_AS_METHOD);
 
-  __ LeaveInternalFrame();
-  __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
+    frame_scope.GenerateLeaveFrame();
+    __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 
-  // Invoke the function proxy.
-  __ bind(&call_proxy);
-  __ push(rdi);  // add function proxy as last argument
-  __ incq(rax);
-  __ Set(rbx, 0);
-  __ SetCallKind(rcx, CALL_AS_METHOD);
-  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
-  __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
-          RelocInfo::CODE_TARGET);
+    // Invoke the function proxy.
+    __ bind(&call_proxy);
+    __ push(rdi);  // add function proxy as last argument
+    __ incq(rax);
+    __ Set(rbx, 0);
+    __ SetCallKind(rcx, CALL_AS_METHOD);
+    __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
+    __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
 
-  __ LeaveInternalFrame();
+    // Leave internal frame.
+  }
   __ ret(3 * kPointerSize);  // remove this, receiver, and arguments
 }
 
@@ -1520,10 +1548,11 @@
 
   // Pass the function to optimize as the argument to the on-stack
   // replacement runtime function.
-  __ EnterInternalFrame();
-  __ push(rax);
-  __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rax);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
 
   // If the result was -1 it means that we couldn't optimize the
   // function. Just return and continue in the unoptimized version.
@@ -1541,7 +1570,9 @@
 
   StackCheckStub stub;
   __ TailCallStub(&stub);
-  __ Abort("Unreachable code: returned from tail call.");
+  if (FLAG_debug_code) {
+    __ Abort("Unreachable code: returned from tail call.");
+  }
   __ bind(&ok);
   __ ret(0);
 
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index df4438b..8d9dba7 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -233,6 +233,8 @@
 // The stub expects its argument on the stack and returns its result in tos_:
 // zero for false, and a non-zero value for true.
 void ToBooleanStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   Label patch;
   const Register argument = rax;
   const Register map = rdx;
@@ -328,6 +330,25 @@
 }
 
 
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  __ PushCallerSaved(save_doubles_);
+  const int argument_count = 1;
+  __ PrepareCallCFunction(argument_count);
+#ifdef _WIN64
+  __ LoadAddress(rcx, ExternalReference::isolate_address());
+#else
+  __ LoadAddress(rdi, ExternalReference::isolate_address());
+#endif
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(masm->isolate()),
+      argument_count);
+  __ PopCallerSaved(save_doubles_);
+  __ ret(0);
+}
+
+
 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
                                  Type type,
                                  Heap::RootListIndex value,
@@ -622,12 +643,13 @@
     __ jmp(&heapnumber_allocated);
 
     __ bind(&slow_allocate_heapnumber);
-    __ EnterInternalFrame();
-    __ push(rax);
-    __ CallRuntime(Runtime::kNumberAlloc, 0);
-    __ movq(rcx, rax);
-    __ pop(rax);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(rax);
+      __ CallRuntime(Runtime::kNumberAlloc, 0);
+      __ movq(rcx, rax);
+      __ pop(rax);
+    }
     __ bind(&heapnumber_allocated);
     // rcx: allocated 'empty' number
 
@@ -751,6 +773,10 @@
 
 
 void BinaryOpStub::Generate(MacroAssembler* masm) {
+  // Explicitly allow generation of nested stubs. It is safe here because
+  // generation code does not use any raw pointers.
+  AllowStubCallsScope allow_stub_calls(masm, true);
+
   switch (operands_type_) {
     case BinaryOpIC::UNINITIALIZED:
       GenerateTypeTransition(masm);
@@ -1453,11 +1479,12 @@
     __ addq(rsp, Immediate(kDoubleSize));
     // We return the value in xmm1 without adding it to the cache, but
     // we cause a scavenging GC so that future allocations will succeed.
-    __ EnterInternalFrame();
-    // Allocate an unused object bigger than a HeapNumber.
-    __ Push(Smi::FromInt(2 * kDoubleSize));
-    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      // Allocate an unused object bigger than a HeapNumber.
+      __ Push(Smi::FromInt(2 * kDoubleSize));
+      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
+    }
     __ Ret();
   }
 
@@ -1473,10 +1500,11 @@
     __ bind(&runtime_call);
     __ AllocateHeapNumber(rax, rdi, &skip_cache);
     __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
-    __ EnterInternalFrame();
-    __ push(rax);
-    __ CallRuntime(RuntimeFunction(), 1);
-    __ LeaveInternalFrame();
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(rax);
+      __ CallRuntime(RuntimeFunction(), 1);
+    }
     __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
     __ Ret();
   }
@@ -2670,12 +2698,18 @@
   // Store last subject and last input.
   __ movq(rax, Operand(rsp, kSubjectOffset));
   __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
-  __ movq(rcx, rbx);
-  __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
+  __ RecordWriteField(rbx,
+                      RegExpImpl::kLastSubjectOffset,
+                      rax,
+                      rdi,
+                      kDontSaveFPRegs);
   __ movq(rax, Operand(rsp, kSubjectOffset));
   __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
-  __ movq(rcx, rbx);
-  __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
+  __ RecordWriteField(rbx,
+                      RegExpImpl::kLastInputOffset,
+                      rax,
+                      rdi,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   __ LoadAddress(rcx,
@@ -3231,6 +3265,22 @@
 }
 
 
+void CallFunctionStub::FinishCode(Code* code) {
+  code->set_has_function_cache(false);
+}
+
+
+void CallFunctionStub::Clear(Heap* heap, Address address) {
+  UNREACHABLE();
+}
+
+
+Object* CallFunctionStub::GetCachedValue(Address address) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
 void CallFunctionStub::Generate(MacroAssembler* masm) {
   Label slow, non_function;
 
@@ -3319,6 +3369,35 @@
 }
 
 
+bool CEntryStub::IsPregenerated() {
+#ifdef _WIN64
+  return result_size_ == 1;
+#else
+  return true;
+#endif
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime() {
+  CEntryStub::GenerateAheadOfTime();
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+  // It is important that the store buffer overflow stubs are generated first.
+  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+}
+
+
+void CodeStub::GenerateFPStubs() {
+}
+
+
+void CEntryStub::GenerateAheadOfTime() {
+  CEntryStub stub(1, kDontSaveFPRegs);
+  stub.GetCode()->set_is_pregenerated(true);
+  CEntryStub save_doubles(1, kSaveFPRegs);
+  save_doubles.GetCode()->set_is_pregenerated(true);
+}
+
+
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   // Throw exception in eax.
   __ Throw(rax);
@@ -3757,6 +3836,7 @@
     __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
     __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
   } else {
+    // Get return address and delta to inlined map check.
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
     __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
@@ -3791,9 +3871,11 @@
     __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
     // Store offset of true in the root array at the inline check site.
-    ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
-        == 0xB0 - 0x100);
-    __ movl(rax, Immediate(0xB0));  // TrueValue is at -10 * kPointerSize.
+    int true_offset = 0x100 +
+        (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
+    // Assert it is a 1-byte signed value.
+    ASSERT(true_offset >= 0 && true_offset < 0x100);
+    __ movl(rax, Immediate(true_offset));
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
     __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -3812,9 +3894,11 @@
     __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
   } else {
     // Store offset of false in the root array at the inline check site.
-    ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
-        == 0xB8 - 0x100);
-    __ movl(rax, Immediate(0xB8));  // FalseValue is at -9 * kPointerSize.
+    int false_offset = 0x100 +
+        (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
+    // Assert it is a 1-byte signed value.
+    ASSERT(false_offset >= 0 && false_offset < 0x100);
+    __ movl(rax, Immediate(false_offset));
     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
     __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
@@ -5271,12 +5355,13 @@
   // Call the runtime system in a fresh internal frame.
   ExternalReference miss =
       ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
-  __ EnterInternalFrame();
-  __ push(rdx);
-  __ push(rax);
-  __ Push(Smi::FromInt(op_));
-  __ CallExternalReference(miss, 3);
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rdx);
+    __ push(rax);
+    __ Push(Smi::FromInt(op_));
+    __ CallExternalReference(miss, 3);
+  }
 
   // Compute the entry point of the rewritten stub.
   __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
@@ -5407,6 +5492,8 @@
 
 
 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
   // Stack frame on entry:
   //  esp[0 * kPointerSize]: return address.
   //  esp[1 * kPointerSize]: key's hash.
@@ -5492,6 +5579,279 @@
 }
 
 
+struct AheadOfTimeWriteBarrierStubList {
+  Register object, value, address;
+  RememberedSetAction action;
+};
+
+
+struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
+  // Used in RegExpExecStub.
+  { rbx, rax, rdi, EMIT_REMEMBERED_SET },
+  // Used in CompileArrayPushCall.
+  { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
+  // Used in CompileStoreGlobal.
+  { rbx, rcx, rdx, OMIT_REMEMBERED_SET },
+  // Used in StoreStubCompiler::CompileStoreField and
+  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
+  { rdx, rcx, rbx, EMIT_REMEMBERED_SET },
+  // GenerateStoreField calls the stub with two different permutations of
+  // registers.  This is the second.
+  { rbx, rcx, rdx, EMIT_REMEMBERED_SET },
+  // StoreIC::GenerateNormal via GenerateDictionaryStore.
+  { rbx, r8, r9, EMIT_REMEMBERED_SET },
+  // KeyedStoreIC::GenerateGeneric.
+  { rbx, rdx, rcx, EMIT_REMEMBERED_SET},
+  // KeyedStoreStubCompiler::GenerateStoreFastElement.
+  { rdi, rdx, rcx, EMIT_REMEMBERED_SET},
+  // Null termination.
+  { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
+};
+
+
+bool RecordWriteStub::IsPregenerated() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    if (object_.is(entry->object) &&
+        value_.is(entry->value) &&
+        address_.is(entry->address) &&
+        remembered_set_action_ == entry->action &&
+        save_fp_regs_mode_ == kDontSaveFPRegs) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
+  stub1.GetCode()->set_is_pregenerated(true);
+  StoreBufferOverflowStub stub2(kSaveFPRegs);
+  stub2.GetCode()->set_is_pregenerated(true);
+}
+
+
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
+       !entry->object.is(no_reg);
+       entry++) {
+    RecordWriteStub stub(entry->object,
+                         entry->value,
+                         entry->address,
+                         entry->action,
+                         kDontSaveFPRegs);
+    stub.GetCode()->set_is_pregenerated(true);
+  }
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two instructions are generated with labels so as to get the
+  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
+  // forth between a compare instructions (a nop in this position) and the
+  // real branch when we start and stop incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
+  __ jmp(&skip_to_incremental_compacting, Label::kFar);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  masm->set_byte_at(0, kTwoByteNopInstruction);
+  masm->set_byte_at(2, kFiveByteNopInstruction);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     not_zero,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm, mode);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm, mode);
+  regs_.Restore(masm);
+  __ ret(0);
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+#ifdef _WIN64
+  Register arg3 = r8;
+  Register arg2 = rdx;
+  Register arg1 = rcx;
+#else
+  Register arg3 = rdx;
+  Register arg2 = rsi;
+  Register arg1 = rdi;
+#endif
+  Register address =
+      arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
+  ASSERT(!address.is(regs_.object()));
+  ASSERT(!address.is(arg1));
+  __ Move(address, regs_.address());
+  __ Move(arg1, regs_.object());
+  if (mode == INCREMENTAL_COMPACTION) {
+    // TODO(gc) Can we just set address arg2 in the beginning?
+    __ Move(arg2, address);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ movq(arg2, Operand(address, 0));
+  }
+  __ LoadAddress(arg3, ExternalReference::isolate_address());
+  int argument_count = 3;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count);
+  if (mode == INCREMENTAL_COMPACTION) {
+    __ CallCFunction(
+        ExternalReference::incremental_evacuation_record_write_function(
+            masm->isolate()),
+        argument_count);
+  } else {
+    ASSERT(mode == INCREMENTAL);
+    __ CallCFunction(
+        ExternalReference::incremental_marking_record_write_function(
+            masm->isolate()),
+        argument_count);
+  }
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_object;
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(),
+                 regs_.scratch0(),
+                 regs_.scratch1(),
+                 &on_black,
+                 Label::kNear);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     zero,
+                     &ensure_not_white,
+                     Label::kNear);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     zero,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need an extra register for this, so we push the object register
+  // temporarily.
+  __ push(regs_.object());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    &need_incremental_pop_object,
+                    Label::kNear);
+  __ pop(regs_.object());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object_,
+                           address_,
+                           value_,
+                           save_fp_regs_mode_,
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ ret(0);
+  }
+
+  __ bind(&need_incremental_pop_object);
+  __ pop(regs_.object());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 4058118..698ba40 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -59,6 +59,32 @@
 };
 
 
+class StoreBufferOverflowStub: public CodeStub {
+ public:
+  explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
+      : save_doubles_(save_fp) { }
+
+  void Generate(MacroAssembler* masm);
+
+  virtual bool IsPregenerated() { return true; }
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  SaveFPRegsMode save_doubles_;
+
+  Major MajorKey() { return StoreBufferOverflow; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+  NO_GENERIC_BINARY_FLAGS = 0,
+  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
+};
+
+
 class UnaryOpStub: public CodeStub {
  public:
   UnaryOpStub(Token::Value op,
@@ -413,6 +439,8 @@
                                      Register r0,
                                      Register r1);
 
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
  private:
   static const int kInlinedProbes = 4;
   static const int kTotalProbes = 20;
@@ -425,7 +453,7 @@
       StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return StringDictionaryNegativeLookup; }
+  Major MajorKey() { return StringDictionaryLookup; }
 
   int MinorKey() {
     return DictionaryBits::encode(dictionary_.code()) |
@@ -446,6 +474,253 @@
 };
 
 
+class RecordWriteStub: public CodeStub {
+ public:
+  RecordWriteStub(Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : object_(object),
+        value_(value),
+        address_(address),
+        remembered_set_action_(remembered_set_action),
+        save_fp_regs_mode_(fp_mode),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+  }
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool IsPregenerated();
+  static void GenerateFixedRegStubsAheadOfTime();
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
+  static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
+
+  static const byte kFiveByteNopInstruction = 0x3d;  // Cmpl eax, #imm32.
+  static const byte kFiveByteJumpInstruction = 0xe9;  // Jmp #imm32.
+
+  static Mode GetMode(Code* stub) {
+    byte first_instruction = stub->instruction_start()[0];
+    byte second_instruction = stub->instruction_start()[2];
+
+    if (first_instruction == kTwoByteJumpInstruction) {
+      return INCREMENTAL;
+    }
+
+    ASSERT(first_instruction == kTwoByteNopInstruction);
+
+    if (second_instruction == kFiveByteJumpInstruction) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    ASSERT(second_instruction == kFiveByteNopInstruction);
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        ASSERT(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteNopInstruction;
+        break;
+      case INCREMENTAL:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteJumpInstruction;
+        break;
+      case INCREMENTAL_COMPACTION:
+        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        stub->instruction_start()[0] = kTwoByteNopInstruction;
+        stub->instruction_start()[2] = kFiveByteJumpInstruction;
+        break;
+    }
+    ASSERT(GetMode(stub) == mode);
+    CPU::FlushICache(stub->instruction_start(), 7);
+  }
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers, where the third
+  // is always rcx (needed for shift operations).  The input is two registers
+  // that must be preserved and one scratch register provided by the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_orig_(object),
+          address_orig_(address),
+          scratch0_orig_(scratch0),
+          object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
+      if (scratch0.is(rcx)) {
+        scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
+      }
+      if (object.is(rcx)) {
+        object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_);
+      }
+      if (address.is(rcx)) {
+        address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
+      }
+      ASSERT(!AreAliased(scratch0_, object_, address_, rcx));
+    }
+
+    void Save(MacroAssembler* masm) {
+      ASSERT(!address_orig_.is(object_));
+      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
+      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+      // We don't have to save scratch0_orig_ because it was given to us as
+      // a scratch register.  But if we had to switch to a different reg then
+      // we should save the new scratch0_.
+      if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+      if (!rcx.is(scratch0_orig_) &&
+          !rcx.is(object_orig_) &&
+          !rcx.is(address_orig_)) {
+        masm->push(rcx);
+      }
+      masm->push(scratch1_);
+      if (!address_.is(address_orig_)) {
+        masm->push(address_);
+        masm->movq(address_, address_orig_);
+      }
+      if (!object_.is(object_orig_)) {
+        masm->push(object_);
+        masm->movq(object_, object_orig_);
+      }
+    }
+
+    void Restore(MacroAssembler* masm) {
+      // These will have been preserved the entire time, so we just need to move
+      // them back.  Only in one case is the orig_ reg different from the plain
+      // one, since only one of them can alias with rcx.
+      if (!object_.is(object_orig_)) {
+        masm->movq(object_orig_, object_);
+        masm->pop(object_);
+      }
+      if (!address_.is(address_orig_)) {
+        masm->movq(address_orig_, address_);
+        masm->pop(address_);
+      }
+      masm->pop(scratch1_);
+      if (!rcx.is(scratch0_orig_) &&
+          !rcx.is(object_orig_) &&
+          !rcx.is(address_orig_)) {
+        masm->pop(rcx);
+      }
+      if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.
+
+    // The three scratch registers (incl. rcx) will be restored by other means
+    // so we don't bother pushing them here.  Rbx, rbp and r12-15 are callee
+    // save and don't need to be preserved.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx);
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx);
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_orig_;
+    Register address_orig_;
+    Register scratch0_orig_;
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+    // Third scratch register is always rcx.
+
+    Register GetRegThatIsNotRcxOr(Register r1,
+                                  Register r2,
+                                  Register r3) {
+      for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+        Register candidate = Register::FromAllocationIndex(i);
+        if (candidate.is(rcx)) continue;
+        if (candidate.is(r1)) continue;
+        if (candidate.is(r2)) continue;
+        if (candidate.is(r3)) continue;
+        return candidate;
+      }
+      UNREACHABLE();
+      return no_reg;
+    }
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  void Generate(MacroAssembler* masm);
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+
+  Major MajorKey() { return RecordWrite; }
+
+  int MinorKey() {
+    return ObjectBits::encode(object_.code()) |
+        ValueBits::encode(value_.code()) |
+        AddressBits::encode(address_.code()) |
+        RememberedSetActionBits::encode(remembered_set_action_) |
+        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+  }
+
+  bool MustBeInStubCache() {
+    // All stubs must be registered in the stub cache
+    // otherwise IncrementalMarker would not be able to find
+    // and patch it.
+    return true;
+  }
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  class ObjectBits: public BitField<int, 0, 4> {};
+  class ValueBits: public BitField<int, 4, 4> {};
+  class AddressBits: public BitField<int, 8, 4> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+
+  Register object_;
+  Register value_;
+  Register address_;
+  RememberedSetAction remembered_set_action_;
+  SaveFPRegsMode save_fp_regs_mode_;
+  Label slow_;
+  RegisterAllocation regs_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODE_STUBS_X64_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 507bbd4..f6102c7 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -38,12 +38,16 @@
 // Platform-specific RuntimeCallHelper functions.
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
-  masm->EnterInternalFrame();
+  masm->EnterFrame(StackFrame::INTERNAL);
+  ASSERT(!masm->has_frame());
+  masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
-  masm->LeaveInternalFrame();
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  ASSERT(masm->has_frame());
+  masm->set_has_frame(false);
 }
 
 
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 423e6f2..2149fc2 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -100,65 +100,66 @@
                                           RegList non_object_regs,
                                           bool convert_call_to_jmp) {
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Store the registers containing live values on the expression stack to
-  // make sure that these are correctly updated during GC. Non object values
-  // are stored as as two smis causing it to be untouched by GC.
-  ASSERT((object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-  ASSERT((object_regs & non_object_regs) == 0);
-  for (int i = 0; i < kNumJSCallerSaved; i++) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    ASSERT(!reg.is(kScratchRegister));
-    if ((object_regs & (1 << r)) != 0) {
-      __ push(reg);
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as as two smis causing it to be untouched by GC.
+    ASSERT((object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
+    ASSERT((object_regs & non_object_regs) == 0);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      ASSERT(!reg.is(kScratchRegister));
+      if ((object_regs & (1 << r)) != 0) {
+        __ push(reg);
+      }
+      // Store the 64-bit value as two smis.
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ movq(kScratchRegister, reg);
+        __ Integer32ToSmi(reg, reg);
+        __ push(reg);
+        __ sar(kScratchRegister, Immediate(32));
+        __ Integer32ToSmi(kScratchRegister, kScratchRegister);
+        __ push(kScratchRegister);
+      }
     }
-    // Store the 64-bit value as two smis.
-    if ((non_object_regs & (1 << r)) != 0) {
-      __ movq(kScratchRegister, reg);
-      __ Integer32ToSmi(reg, reg);
-      __ push(reg);
-      __ sar(kScratchRegister, Immediate(32));
-      __ Integer32ToSmi(kScratchRegister, kScratchRegister);
-      __ push(kScratchRegister);
-    }
-  }
 
 #ifdef DEBUG
-  __ RecordComment("// Calling from debug break to runtime - come in - over");
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
 #endif
-  __ Set(rax, 0);  // No arguments (argc == 0).
-  __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
+    __ Set(rax, 0);  // No arguments (argc == 0).
+    __ movq(rbx, ExternalReference::debug_break(masm->isolate()));
 
-  CEntryStub ceb(1);
-  __ CallStub(&ceb);
+    CEntryStub ceb(1);
+    __ CallStub(&ceb);
 
-  // Restore the register values from the expression stack.
-  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
-    int r = JSCallerSavedCode(i);
-    Register reg = { r };
-    if (FLAG_debug_code) {
-      __ Set(reg, kDebugZapValue);
+    // Restore the register values from the expression stack.
+    for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if (FLAG_debug_code) {
+        __ Set(reg, kDebugZapValue);
+      }
+      if ((object_regs & (1 << r)) != 0) {
+        __ pop(reg);
+      }
+      // Reconstruct the 64-bit value from two smis.
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ pop(kScratchRegister);
+        __ SmiToInteger32(kScratchRegister, kScratchRegister);
+        __ shl(kScratchRegister, Immediate(32));
+        __ pop(reg);
+        __ SmiToInteger32(reg, reg);
+        __ or_(reg, kScratchRegister);
+      }
     }
-    if ((object_regs & (1 << r)) != 0) {
-      __ pop(reg);
-    }
-    // Reconstruct the 64-bit value from two smis.
-    if ((non_object_regs & (1 << r)) != 0) {
-      __ pop(kScratchRegister);
-      __ SmiToInteger32(kScratchRegister, kScratchRegister);
-      __ shl(kScratchRegister, Immediate(32));
-      __ pop(reg);
-      __ SmiToInteger32(reg, reg);
-      __ or_(reg, kScratchRegister);
-    }
+
+    // Get rid of the internal frame.
   }
 
-  // Get rid of the internal frame.
-  __ LeaveInternalFrame();
-
   // If this call did not replace a call but patched other code then there will
   // be an unwanted return address left on the stack. Here we get rid of that.
   if (convert_call_to_jmp) {
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index b52e659..b7e334e 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -197,13 +197,19 @@
   // Destroy the code which is not supposed to run again.
   ZapCodeRange(previous_pc, jump_table_address);
 #endif
+  Isolate* isolate = code->GetIsolate();
 
   // Add the deoptimizing code to the list.
   DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
-  DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
+  DeoptimizerData* data = isolate->deoptimizer_data();
   node->set_next(data->deoptimizing_code_list_);
   data->deoptimizing_code_list_ = node;
 
+  // We might be in the middle of incremental marking with compaction.
+  // Tell collector to treat this code object in a special way and
+  // ignore all slots that might have been recorded on it.
+  isolate->heap()->mark_compact_collector()->InvalidateCode(code);
+
   // Set the code for the function to non-optimized version.
   function->ReplaceCode(function->shared()->code());
 
@@ -220,7 +226,8 @@
 }
 
 
-void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+                                        Address pc_after,
                                         Code* check_code,
                                         Code* replacement_code) {
   Address call_target_address = pc_after - kIntSize;
@@ -250,6 +257,13 @@
   *(call_target_address - 2) = 0x90;  // nop
   Assembler::set_target_address_at(call_target_address,
                                    replacement_code->entry());
+
+  RelocInfo rinfo(call_target_address,
+                  RelocInfo::CODE_TARGET,
+                  0,
+                  unoptimized_code);
+  unoptimized_code->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+      unoptimized_code, &rinfo, replacement_code);
 }
 
 
@@ -268,6 +282,8 @@
   *(call_target_address - 2) = 0x07;  // offset
   Assembler::set_target_address_at(call_target_address,
                                    check_code->entry());
+  check_code->GetHeap()->incremental_marking()->
+      RecordCodeTargetPatch(call_target_address, check_code);
 }
 
 
@@ -713,7 +729,10 @@
 
   Isolate* isolate = masm()->isolate();
 
-  __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
+  }
   // Preserve deoptimizer object in register rax and get the input
   // frame descriptor pointer.
   __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
@@ -759,8 +778,11 @@
   __ PrepareCallCFunction(2);
   __ movq(arg1, rax);
   __ LoadAddress(arg2, ExternalReference::isolate_address());
-  __ CallCFunction(
-      ExternalReference::compute_output_frames_function(isolate), 2);
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate), 2);
+  }
   __ pop(rax);
 
   // Replace the current frame with the output frames.
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index bd3e769..1d9b8ff 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -147,6 +147,11 @@
     __ bind(&ok);
   }
 
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   __ push(rbp);  // Caller's frame pointer.
   __ movq(rbp, rsp);
   __ push(rsi);  // Callee's context.
@@ -195,11 +200,9 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ movq(Operand(rsi, context_offset), rax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have use a third register to avoid
-        // clobbering rsi.
-        __ movq(rcx, rsi);
-        __ RecordWrite(rcx, context_offset, rax, rbx);
+        // Update the write barrier.  This clobbers rax and rbx.
+        __ RecordWriteContextSlot(
+            rsi, context_offset, rax, rbx, kDontSaveFPRegs);
       }
     }
   }
@@ -638,10 +641,11 @@
   ASSERT(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ movq(location, src);
+
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
-    __ RecordWrite(scratch0, offset, src, scratch1);
+    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
   }
 }
 
@@ -715,8 +719,14 @@
         VisitForAccumulatorValue(function);
         __ movq(ContextOperand(rsi, variable->index()), result_register());
         int offset = Context::SlotOffset(variable->index());
-        __ movq(rbx, rsi);
-        __ RecordWrite(rbx, offset, result_register(), rcx);
+        // We know that we have written a function, which is not a smi.
+        __ RecordWriteContextSlot(rsi,
+                                  offset,
+                                  result_register(),
+                                  rcx,
+                                  kDontSaveFPRegs,
+                                  EMIT_REMEMBERED_SET,
+                                  OMIT_SMI_CHECK);
         PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
       } else if (mode == Variable::CONST || mode == Variable::LET) {
         Comment cmnt(masm_, "[ Declaration");
@@ -1445,13 +1455,25 @@
     VisitForAccumulatorValue(subexpr);
 
     // Store the subexpression value in the array's elements.
-    __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
-    __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+    __ movq(r8, Operand(rsp, 0));  // Copy of array literal.
+    __ movq(rbx, FieldOperand(r8, JSObject::kElementsOffset));
     int offset = FixedArray::kHeaderSize + (i * kPointerSize);
     __ movq(FieldOperand(rbx, offset), result_register());
 
+    Label no_map_change;
+    __ JumpIfSmi(result_register(), &no_map_change);
     // Update the write barrier for the array store.
-    __ RecordWrite(rbx, offset, result_register(), rcx);
+    __ RecordWriteField(rbx, offset, result_register(), rcx,
+                        kDontSaveFPRegs,
+                        EMIT_REMEMBERED_SET,
+                        OMIT_SMI_CHECK);
+    if (FLAG_smi_only_arrays) {
+      __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
+      __ CheckFastSmiOnlyElements(rdi, &no_map_change, Label::kNear);
+      __ push(r8);
+      __ CallRuntime(Runtime::kNonSmiElementStored, 1);
+    }
+    __ bind(&no_map_change);
 
     PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
   }
@@ -1777,7 +1799,8 @@
       __ movq(location, rax);
       if (var->IsContextSlot()) {
         __ movq(rdx, rax);
-        __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+        __ RecordWriteContextSlot(
+            rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
       }
     }
 
@@ -1795,7 +1818,8 @@
       __ movq(location, rax);
       if (var->IsContextSlot()) {
         __ movq(rdx, rax);
-        __ RecordWrite(rcx, Context::SlotOffset(var->index()), rdx, rbx);
+        __ RecordWriteContextSlot(
+            rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
       }
     } else {
       ASSERT(var->IsLookupSlot());
@@ -2545,20 +2569,24 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
   // Map is now in rax.
   __ j(below, &null);
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ j(equal, &function);
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-  __ CmpInstanceType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
-  __ j(above_equal, &function);
+  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ j(equal, &function);
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
 
-  // Check if the constructor in the map is a function.
+  // Check if the constructor in the map is a JS function.
   __ movq(rax, FieldOperand(rax, Map::kConstructorOffset));
   __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
   __ j(not_equal, &non_function_constructor);
@@ -2726,7 +2754,7 @@
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
   __ movq(rdx, rax);
-  __ RecordWrite(rbx, JSValue::kValueOffset, rdx, rcx);
+  __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(rax);
@@ -3010,14 +3038,33 @@
   __ movq(Operand(index_2, 0), object);
   __ movq(Operand(index_1, 0), temp);
 
-  Label new_space;
-  __ InNewSpace(elements, temp, equal, &new_space);
+  Label no_remembered_set;
+  __ CheckPageFlag(elements,
+                   temp,
+                   1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                   not_zero,
+                   &no_remembered_set,
+                   Label::kNear);
+  // Possible optimization: do a check that both values are Smis
+  // (or them and test against Smi mask.)
 
-  __ movq(object, elements);
-  __ RecordWriteHelper(object, index_1, temp);
-  __ RecordWriteHelper(elements, index_2, temp);
+  // We are swapping two objects in an array and the incremental marker never
+  // pauses in the middle of scanning a single object.  Therefore the
+  // incremental marker is not disturbed, so we don't need to call the
+  // RecordWrite stub that notifies the incremental marker.
+  __ RememberedSetHelper(elements,
+                         index_1,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
+  __ RememberedSetHelper(elements,
+                         index_2,
+                         temp,
+                         kDontSaveFPRegs,
+                         MacroAssembler::kFallThroughAtEnd);
 
-  __ bind(&new_space);
+  __ bind(&no_remembered_set);
+
   // We are done. Drop elements from the stack, and return undefined.
   __ addq(rsp, Immediate(3 * kPointerSize));
   __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3833,10 +3880,14 @@
 
 
 void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
-                                                 Handle<String> check,
-                                                 Label* if_true,
-                                                 Label* if_false,
-                                                 Label* fall_through) {
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
   { AccumulatorValueContext context(this);
     VisitForTypeofValue(expr);
   }
@@ -3875,9 +3926,11 @@
     Split(not_zero, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->function_symbol())) {
     __ JumpIfSmi(rax, if_false);
-    STATIC_ASSERT(LAST_CALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE);
-    __ CmpObjectType(rax, FIRST_CALLABLE_SPEC_OBJECT_TYPE, rdx);
-    Split(above_equal, if_true, if_false, fall_through);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
+    __ j(equal, if_true);
+    __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
+    Split(equal, if_true, if_false, fall_through);
   } else if (check->Equals(isolate()->heap()->object_symbol())) {
     __ JumpIfSmi(rax, if_false);
     if (!FLAG_harmony_typeof) {
@@ -3895,18 +3948,7 @@
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
-}
-
-
-void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
-                                                    Label* if_true,
-                                                    Label* if_false,
-                                                    Label* fall_through) {
-  VisitForAccumulatorValue(expr);
-  PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  Split(equal, if_true, if_false, fall_through);
+  context()->Plug(if_true, if_false);
 }
 
 
@@ -3914,6 +3956,10 @@
   Comment cmnt(masm_, "[ CompareOperation");
   SetSourcePosition(expr->position());
 
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
   Label materialize_true, materialize_false;
@@ -3923,13 +3969,6 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  // First we try a fast inlined version of the compare when one of
-  // the operands is a literal.
-  if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
-    context()->Plug(if_true, if_false);
-    return;
-  }
-
   Token::Value op = expr->op();
   VisitForStackValue(expr->left());
   switch (op) {
@@ -3957,7 +3996,6 @@
       Condition cc = no_condition;
       switch (op) {
         case Token::EQ_STRICT:
-          // Fall through.
         case Token::EQ:
           cc = equal;
           __ pop(rdx);
@@ -4018,8 +4056,9 @@
 }
 
 
-void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
-  Comment cmnt(masm_, "[ CompareToNull");
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
   Label* if_false = NULL;
@@ -4027,14 +4066,20 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
-  VisitForAccumulatorValue(expr->expression());
+  VisitForAccumulatorValue(sub_expr);
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
-  __ CompareRoot(rax, Heap::kNullValueRootIndex);
-  if (expr->is_strict()) {
+  Heap::RootListIndex nil_value = nil == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ CompareRoot(rax, nil_value);
+  if (expr->op() == Token::EQ_STRICT) {
     Split(equal, if_true, if_false, fall_through);
   } else {
+    Heap::RootListIndex other_nil_value = nil == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     __ j(equal, if_true);
-    __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+    __ CompareRoot(rax, other_nil_value);
     __ j(equal, if_true);
     __ JumpIfSmi(rax, if_false);
     // It can be an undetectable object.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 9d55594..4ae1785 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -221,7 +221,7 @@
 
   // Update write barrier. Make sure not to clobber the value.
   __ movq(scratch0, value);
-  __ RecordWrite(elements, scratch1, scratch0);
+  __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
 }
 
 
@@ -606,45 +606,40 @@
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
-  Label slow, slow_with_tagged_index, fast, array, extra;
+  Label slow, slow_with_tagged_index, fast, array, extra, check_extra_double;
+  Label fast_object_with_map_check, fast_object_without_map_check;
+  Label fast_double_with_map_check, fast_double_without_map_check;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(rdx, &slow_with_tagged_index);
   // Get the map from the receiver.
-  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
-  __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
+  __ testb(FieldOperand(r9, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsAccessCheckNeeded));
   __ j(not_zero, &slow_with_tagged_index);
   // Check that the key is a smi.
   __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
   __ SmiToInteger32(rcx, rcx);
 
-  __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
+  __ CmpInstanceType(r9, JS_ARRAY_TYPE);
   __ j(equal, &array);
   // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
+  __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
   __ j(below, &slow);
-  __ CmpInstanceType(rbx, JS_PROXY_TYPE);
-  __ j(equal, &slow);
-  __ CmpInstanceType(rbx, JS_FUNCTION_PROXY_TYPE);
-  __ j(equal, &slow);
 
   // Object case: Check key against length in the elements array.
   // rax: value
   // rdx: JSObject
   // rcx: index
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  // Check that the object is in fast mode and writable.
-  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &slow);
+  // Check array bounds.
   __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
   // rax: value
   // rbx: FixedArray
   // rcx: index
-  __ j(above, &fast);
+  __ j(above, &fast_object_with_map_check);
 
   // Slow case: call runtime.
   __ bind(&slow);
@@ -666,9 +661,20 @@
   __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
   __ j(below_equal, &slow);
   // Increment index to get new length.
+  __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &check_extra_double);
   __ leal(rdi, Operand(rcx, 1));
   __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
-  __ jmp(&fast);
+  __ jmp(&fast_object_without_map_check);
+
+  __ bind(&check_extra_double);
+  // rdi: elements array's map
+  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  __ leal(rdi, Operand(rcx, 1));
+  __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+  __ jmp(&fast_double_without_map_check);
 
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode (and writable); if it
@@ -678,9 +684,6 @@
   // rdx: receiver (a JSArray)
   // rcx: index
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &slow);
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
@@ -688,20 +691,47 @@
   __ j(below_equal, &extra);
 
   // Fast case: Do the store.
-  __ bind(&fast);
+  __ bind(&fast_object_with_map_check);
   // rax: value
   // rbx: receiver's elements array (a FixedArray)
   // rcx: index
+  // rdx: receiver (a JSArray)
+  __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &fast_double_with_map_check);
+  __ bind(&fast_object_without_map_check);
+  // Smi stores don't require further checks.
   Label non_smi_value;
+  __ JumpIfNotSmi(rax, &non_smi_value);
+  // It's irrelevant whether array is smi-only or not when writing a smi.
   __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
           rax);
-  __ JumpIfNotSmi(rax, &non_smi_value, Label::kNear);
   __ ret(0);
+
   __ bind(&non_smi_value);
-  // Slow case that needs to retain rcx for use by RecordWrite.
-  // Update write barrier for the elements array address.
+  if (FLAG_smi_only_arrays) {
+    // Writing a non-smi, check whether array allows non-smi elements.
+    // r9: receiver's map
+    __ CheckFastObjectElements(r9, &slow, Label::kNear);
+  }
+  __ lea(rcx,
+         FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize));
+  __ movq(Operand(rcx, 0), rax);
   __ movq(rdx, rax);
-  __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
+  __ RecordWrite(
+      rbx, rcx, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ ret(0);
+
+  __ bind(&fast_double_with_map_check);
+  // Check for fast double array case. If this fails, call through to the
+  // runtime.
+  // rdi: elements array's map
+  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  __ bind(&fast_double_without_map_check);
+  // If the value is a number, store it as a double in the FastDoubleElements
+  // array.
+  __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0, &slow);
   __ ret(0);
 }
 
@@ -846,21 +876,22 @@
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
   // Enter an internal frame.
-  __ EnterInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
 
-  // Push the receiver and the name of the function.
-  __ push(rdx);
-  __ push(rcx);
+    // Push the receiver and the name of the function.
+    __ push(rdx);
+    __ push(rcx);
 
-  // Call the entry.
-  CEntryStub stub(1);
-  __ Set(rax, 2);
-  __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
-  __ CallStub(&stub);
+    // Call the entry.
+    CEntryStub stub(1);
+    __ Set(rax, 2);
+    __ LoadAddress(rbx, ExternalReference(IC_Utility(id), masm->isolate()));
+    __ CallStub(&stub);
 
-  // Move result to rdi and exit the internal frame.
-  __ movq(rdi, rax);
-  __ LeaveInternalFrame();
+    // Move result to rdi and exit the internal frame.
+    __ movq(rdi, rax);
+  }
 
   // Check if the receiver is a global object of some sort.
   // This can happen only for regular CallIC but not KeyedCallIC.
@@ -1002,13 +1033,14 @@
   // This branch is taken when calling KeyedCallIC_Miss is neither required
   // nor beneficial.
   __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1);
-  __ EnterInternalFrame();
-  __ push(rcx);  // save the key
-  __ push(rdx);  // pass the receiver
-  __ push(rcx);  // pass the key
-  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
-  __ pop(rcx);  // restore the key
-  __ LeaveInternalFrame();
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(rcx);  // save the key
+    __ push(rdx);  // pass the receiver
+    __ push(rcx);  // pass the key
+    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+    __ pop(rcx);  // restore the key
+  }
   __ movq(rdi, rax);
   __ jmp(&do_call);
 
@@ -1212,7 +1244,12 @@
   __ movq(mapped_location, rax);
   __ lea(r9, mapped_location);
   __ movq(r8, rax);
-  __ RecordWrite(rbx, r9, r8);
+  __ RecordWrite(rbx,
+                 r9,
+                 r8,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 INLINE_SMI_CHECK);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in rbx.
@@ -1221,7 +1258,12 @@
   __ movq(unmapped_location, rax);
   __ lea(r9, unmapped_location);
   __ movq(r8, rax);
-  __ RecordWrite(rbx, r9, r8);
+  __ RecordWrite(rbx,
+                 r9,
+                 r8,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 INLINE_SMI_CHECK);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm, false);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 9064a26..45aaad7 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -81,6 +81,12 @@
   HPhase phase("Code generation", chunk());
   ASSERT(is_unused());
   status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
   return GeneratePrologue() &&
       GenerateBody() &&
       GenerateDeferredCode() &&
@@ -217,11 +223,8 @@
         // Store it in the context.
         int context_offset = Context::SlotOffset(var->index());
         __ movq(Operand(rsi, context_offset), rax);
-        // Update the write barrier. This clobbers all involved
-        // registers, so we have use a third register to avoid
-        // clobbering rsi.
-        __ movq(rcx, rsi);
-        __ RecordWrite(rcx, context_offset, rax, rbx);
+        // Update the write barrier. This clobbers rax and rbx.
+        __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
       }
     }
     Comment(";;; End allocate local context");
@@ -280,6 +283,9 @@
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
       __ bind(code->entry());
+      Comment(";;; Deferred code @%d: %s.",
+              code->instruction_index(),
+              code->instr()->Mnemonic());
       code->Generate();
       __ jmp(code->exit());
     }
@@ -667,7 +673,7 @@
     int deoptimization_index) {
   ASSERT(kind == expected_safepoint_kind_);
 
-  const ZoneList<LOperand*>* operands = pointers->operands();
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
 
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
       kind, arguments, deoptimization_index);
@@ -1577,30 +1583,33 @@
 }
 
 
-void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
+void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
   Register reg = ToRegister(instr->InputAt(0));
-
   int false_block = chunk_->LookupDestination(instr->false_block_id());
 
+  // If the expression is known to be untagged or a smi, then it's definitely
+  // not null, and it can't be a an undetectable object.
   if (instr->hydrogen()->representation().IsSpecialization() ||
       instr->hydrogen()->type().IsSmi()) {
-    // If the expression is known to untagged or smi, then it's definitely
-    // not null, and it can't be a an undetectable object.
-    // Jump directly to the false block.
     EmitGoto(false_block);
     return;
   }
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
-
-  __ CompareRoot(reg, Heap::kNullValueRootIndex);
-  if (instr->is_strict()) {
+  Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
+      Heap::kNullValueRootIndex :
+      Heap::kUndefinedValueRootIndex;
+  __ CompareRoot(reg, nil_value);
+  if (instr->kind() == kStrictEquality) {
     EmitBranch(true_block, false_block, equal);
   } else {
+    Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
+        Heap::kUndefinedValueRootIndex :
+        Heap::kNullValueRootIndex;
     Label* true_label = chunk_->GetAssemblyLabel(true_block);
     Label* false_label = chunk_->GetAssemblyLabel(false_block);
     __ j(equal, true_label);
-    __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+    __ CompareRoot(reg, other_nil_value);
     __ j(equal, true_label);
     __ JumpIfSmi(reg, false_label);
     // Check for undetectable objects by looking in the bit field in
@@ -1752,30 +1761,40 @@
                                Label* is_false,
                                Handle<String> class_name,
                                Register input,
-                               Register temp) {
+                               Register temp,
+                               Register scratch) {
   __ JumpIfSmi(input, is_false);
-  __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
-  __ j(below, is_false);
 
-  // Map is now in temp.
-  // Functions have class 'Function'.
-  __ CmpInstanceType(temp, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
   if (class_name->IsEqualTo(CStrVector("Function"))) {
-    __ j(above_equal, is_true);
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
+    __ j(below, is_false);
+    __ j(equal, is_true);
+    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
+    __ j(equal, is_true);
   } else {
-    __ j(above_equal, is_false);
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ movq(temp, FieldOperand(input, HeapObject::kMapOffset));
+    __ movq(scratch, FieldOperand(temp, Map::kInstanceTypeOffset));
+    __ subb(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ cmpb(scratch,
+            Immediate(static_cast<int8_t>(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                                          FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)));
+    __ j(above, is_false);
   }
 
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   // Check if the constructor in the map is a function.
   __ movq(temp, FieldOperand(temp, Map::kConstructorOffset));
 
-  // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last type and
-  // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
-  // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
-  STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
-  STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
-                LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
-
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
   if (class_name->IsEqualTo(CStrVector("Object"))) {
@@ -1804,6 +1823,7 @@
 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   Register input = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
+  Register temp2 = ToRegister(instr->TempAt(1));
   Handle<String> class_name = instr->hydrogen()->class_name();
 
   int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -1812,7 +1832,7 @@
   Label* true_label = chunk_->GetAssemblyLabel(true_block);
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
-  EmitClassOfTest(true_label, false_label, class_name, input, temp);
+  EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
 
   EmitBranch(true_block, false_block, equal);
 }
@@ -1853,9 +1873,8 @@
     virtual void Generate() {
       codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
     }
-
+    virtual LInstruction* instr() { return instr_; }
     Label* map_check() { return &map_check_; }
-
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
@@ -1996,7 +2015,7 @@
     __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
     __ movq(result, Operand(result, 0));
   }
-  if (instr->hydrogen()->check_hole_value()) {
+  if (instr->hydrogen()->RequiresHoleCheck()) {
     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
     DeoptimizeIf(equal, instr->environment());
   }
@@ -2016,25 +2035,39 @@
 
 
 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+  Register object = ToRegister(instr->TempAt(0));
+  Register address = ToRegister(instr->TempAt(1));
   Register value = ToRegister(instr->InputAt(0));
-  Register temp = ToRegister(instr->TempAt(0));
-  ASSERT(!value.is(temp));
-  bool check_hole = instr->hydrogen()->check_hole_value();
-  if (!check_hole && value.is(rax)) {
-    __ store_rax(instr->hydrogen()->cell().location(),
-                 RelocInfo::GLOBAL_PROPERTY_CELL);
-    return;
-  }
+  ASSERT(!value.is(object));
+  Handle<JSGlobalPropertyCell> cell_handle(instr->hydrogen()->cell());
+
+  __ movq(address, cell_handle, RelocInfo::GLOBAL_PROPERTY_CELL);
+
   // If the cell we are storing to contains the hole it could have
   // been deleted from the property dictionary. In that case, we need
   // to update the property details in the property dictionary to mark
   // it as no longer deleted. We deoptimize in that case.
-  __ movq(temp, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
-  if (check_hole) {
-    __ CompareRoot(Operand(temp, 0), Heap::kTheHoleValueRootIndex);
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(Operand(address, 0), Heap::kTheHoleValueRootIndex);
     DeoptimizeIf(equal, instr->environment());
   }
-  __ movq(Operand(temp, 0), value);
+
+  // Store the value.
+  __ movq(Operand(address, 0), value);
+
+  Label smi_store;
+  __ JumpIfSmi(value, &smi_store, Label::kNear);
+
+  int offset = JSGlobalPropertyCell::kValueOffset - kHeapObjectTag;
+  __ lea(object, Operand(address, -offset));
+  // Cells are always in the remembered set.
+  __ RecordWrite(object,
+                 address,
+                 value,
+                 kSaveFPRegs,
+                 OMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ bind(&smi_store);
 }
 
 
@@ -2064,7 +2097,7 @@
   if (instr->needs_write_barrier()) {
     int offset = Context::SlotOffset(instr->slot_index());
     Register scratch = ToRegister(instr->TempAt(0));
-    __ RecordWrite(context, offset, value, scratch);
+    __ RecordWriteContextSlot(context, offset, value, scratch, kSaveFPRegs);
   }
 }
 
@@ -2283,17 +2316,15 @@
     LLoadKeyedFastDoubleElement* instr) {
   XMMRegister result(ToDoubleRegister(instr->result()));
 
-  if (instr->hydrogen()->RequiresHoleCheck()) {
-    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
-        sizeof(kHoleNanLower32);
-    Operand hole_check_operand = BuildFastArrayOperand(
-        instr->elements(),
-        instr->key(),
-        FAST_DOUBLE_ELEMENTS,
-        offset);
-    __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr->environment());
-  }
+  int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
+      sizeof(kHoleNanLower32);
+  Operand hole_check_operand = BuildFastArrayOperand(
+      instr->elements(),
+      instr->key(),
+      FAST_DOUBLE_ELEMENTS,
+      offset);
+  __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+  DeoptimizeIf(equal, instr->environment());
 
   Operand double_load_operand = BuildFastArrayOperand(
       instr->elements(), instr->key(), FAST_DOUBLE_ELEMENTS,
@@ -2365,6 +2396,7 @@
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -2681,6 +2713,7 @@
     virtual void Generate() {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LUnaryMathOperation* instr_;
   };
@@ -2977,7 +3010,7 @@
   ASSERT(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
-  CallFunctionStub stub(arity, RECEIVER_MIGHT_BE_IMPLICIT);
+  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   __ Drop(1);
@@ -3033,7 +3066,7 @@
     if (instr->needs_write_barrier()) {
       Register temp = ToRegister(instr->TempAt(0));
       // Update the write barrier for the object for in-object properties.
-      __ RecordWrite(object, offset, value, temp);
+      __ RecordWriteField(object, offset, value, temp, kSaveFPRegs);
     }
   } else {
     Register temp = ToRegister(instr->TempAt(0));
@@ -3042,7 +3075,7 @@
     if (instr->needs_write_barrier()) {
       // Update the write barrier for the properties array.
       // object is used as a scratch register.
-      __ RecordWrite(temp, offset, value, object);
+      __ RecordWriteField(temp, offset, value, object, kSaveFPRegs);
     }
   }
 }
@@ -3090,6 +3123,7 @@
       case EXTERNAL_FLOAT_ELEMENTS:
       case EXTERNAL_DOUBLE_ELEMENTS:
       case FAST_ELEMENTS:
+      case FAST_SMI_ONLY_ELEMENTS:
       case FAST_DOUBLE_ELEMENTS:
       case DICTIONARY_ELEMENTS:
       case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3125,6 +3159,13 @@
   Register elements = ToRegister(instr->object());
   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
 
+  // This instruction cannot handle the FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+  // conversion, so it deopts in that case.
+  if (instr->hydrogen()->ValueNeedsSmiCheck()) {
+    Condition cc = masm()->CheckSmi(value);
+    DeoptimizeIf(NegateCondition(cc), instr->environment());
+  }
+
   // Do the store.
   if (instr->key()->IsConstantOperand()) {
     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
@@ -3146,7 +3187,7 @@
                              key,
                              times_pointer_size,
                              FixedArray::kHeaderSize));
-    __ RecordWrite(elements, key, value);
+    __ RecordWrite(elements, key, value, kSaveFPRegs);
   }
 }
 
@@ -3196,6 +3237,7 @@
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
@@ -3316,6 +3358,7 @@
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -3392,6 +3435,7 @@
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -3487,16 +3531,6 @@
 }
 
 
-class DeferredTaggedToI: public LDeferredCode {
- public:
-  DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
-      : LDeferredCode(codegen), instr_(instr) { }
-  virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
- private:
-  LTaggedToI* instr_;
-};
-
-
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   Label done, heap_number;
   Register input_reg = ToRegister(instr->InputAt(0));
@@ -3545,6 +3579,16 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
   LOperand* input = instr->InputAt(0);
   ASSERT(input->IsRegister());
   ASSERT(input->Equals(instr->result()));
@@ -3981,9 +4025,12 @@
     final_branch_condition = not_zero;
 
   } else if (type_name->Equals(heap()->function_symbol())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
     __ JumpIfSmi(input, false_label);
-    __ CmpObjectType(input, FIRST_CALLABLE_SPEC_OBJECT_TYPE, input);
-    final_branch_condition = above_equal;
+    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
+    __ j(equal, true_label);
+    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+    final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->object_symbol())) {
     __ JumpIfSmi(input, false_label);
@@ -4109,6 +4156,7 @@
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
    private:
     LStackCheck* instr_;
   };
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 8cb4cec..106d7bb 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -140,7 +140,8 @@
                        Label* if_false,
                        Handle<String> class_name,
                        Register input,
-                       Register temporary);
+                       Register temporary,
+                       Register scratch);
 
   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
   int GetParameterCount() const { return scope()->num_parameters(); }
@@ -345,16 +346,20 @@
 class LDeferredCode: public ZoneObject {
  public:
   explicit LDeferredCode(LCodeGen* codegen)
-      : codegen_(codegen), external_exit_(NULL) {
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
     codegen->AddDeferredCode(this);
   }
 
   virtual ~LDeferredCode() { }
   virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
 
   void SetExit(Label *exit) { external_exit_ = exit; }
   Label* entry() { return &entry_; }
   Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
 
  protected:
   LCodeGen* codegen() const { return codegen_; }
@@ -365,6 +370,7 @@
   Label entry_;
   Label exit_;
   Label* external_exit_;
+  int instruction_index_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 5fc5646..a67a593 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -214,10 +214,11 @@
 }
 
 
-void LIsNullAndBranch::PrintDataTo(StringStream* stream) {
+void LIsNilAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if ");
   InputAt(0)->PrintTo(stream);
-  stream->Add(is_strict() ? " === null" : " == null");
+  stream->Add(kind() == kStrictEquality ? " === " : " == ");
+  stream->Add(nil() == kNullValue ? "null" : "undefined");
   stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
 }
 
@@ -706,7 +707,9 @@
 
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
-  instr->set_environment(CreateEnvironment(hydrogen_env));
+  int argument_index_accumulator = 0;
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator));
   return instr;
 }
 
@@ -989,10 +992,13 @@
 }
 
 
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+    HEnvironment* hydrogen_env,
+    int* argument_index_accumulator) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
   int ast_id = hydrogen_env->ast_id();
   ASSERT(ast_id != AstNode::kNoNumber);
   int value_count = hydrogen_env->length();
@@ -1002,7 +1008,6 @@
                                           argument_count_,
                                           value_count,
                                           outer);
-  int argument_index = 0;
   for (int i = 0; i < value_count; ++i) {
     if (hydrogen_env->is_special_index(i)) continue;
 
@@ -1011,7 +1016,7 @@
     if (value->IsArgumentsObject()) {
       op = NULL;
     } else if (value->IsPushArgument()) {
-      op = new LArgument(argument_index++);
+      op = new LArgument((*argument_index_accumulator)++);
     } else {
       op = UseAny(value);
     }
@@ -1436,10 +1441,10 @@
 }
 
 
-LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
+LInstruction* LChunkBuilder::DoIsNilAndBranch(HIsNilAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
-  LOperand* temp = instr->is_strict() ? NULL : TempRegister();
-  return new LIsNullAndBranch(UseRegisterAtStart(instr->value()), temp);
+  LOperand* temp = instr->kind() == kStrictEquality ? NULL : TempRegister();
+  return new LIsNilAndBranch(UseRegisterAtStart(instr->value()), temp);
 }
 
 
@@ -1489,6 +1494,7 @@
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
   return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+                                   TempRegister(),
                                    TempRegister());
 }
 
@@ -1716,7 +1722,7 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
   LLoadGlobalCell* result = new LLoadGlobalCell;
-  return instr->check_hole_value()
+  return instr->RequiresHoleCheck()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
@@ -1731,8 +1737,10 @@
 
 LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
   LStoreGlobalCell* result =
-      new LStoreGlobalCell(UseRegister(instr->value()), TempRegister());
-  return instr->check_hole_value() ? AssignEnvironment(result) : result;
+      new LStoreGlobalCell(UseTempRegister(instr->value()),
+                           TempRegister(),
+                           TempRegister());
+  return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
 }
 
 
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index d169bf6..d43a86a 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -107,7 +107,7 @@
   V(Integer32ToDouble)                          \
   V(InvokeFunction)                             \
   V(IsConstructCallAndBranch)                   \
-  V(IsNullAndBranch)                            \
+  V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
@@ -609,17 +609,18 @@
 };
 
 
-class LIsNullAndBranch: public LControlInstruction<1, 1> {
+class LIsNilAndBranch: public LControlInstruction<1, 1> {
  public:
-  LIsNullAndBranch(LOperand* value, LOperand* temp) {
+  LIsNilAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
-  DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
+  DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch, "is-nil-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsNilAndBranch)
 
-  bool is_strict() const { return hydrogen()->is_strict(); }
+  EqualityKind kind() const { return hydrogen()->kind(); }
+  NilValue nil() const { return hydrogen()->nil(); }
 
   virtual void PrintDataTo(StringStream* stream);
 };
@@ -705,11 +706,12 @@
 };
 
 
-class LClassOfTestAndBranch: public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch: public LControlInstruction<1, 2> {
  public:
-  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
     inputs_[0] = value;
     temps_[0] = temp;
+    temps_[1] = temp2;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
@@ -1197,11 +1199,12 @@
 };
 
 
-class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell: public LTemplateInstruction<0, 1, 2> {
  public:
-  explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
+  explicit LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
-    temps_[0] = temp;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
@@ -2146,7 +2149,8 @@
       LInstruction* instr, int ast_id);
   void ClearInstructionPendingDeoptimizationEnvironment();
 
-  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator);
 
   void VisitInstruction(HInstruction* current);
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 9cfc9b6..7fe6d58 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -44,6 +44,7 @@
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
       allow_stub_calls_(true),
+      has_frame_(false),
       root_array_available_(true) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
@@ -196,28 +197,47 @@
 }
 
 
-void MacroAssembler::RecordWriteHelper(Register object,
-                                       Register addr,
-                                       Register scratch) {
-  if (emit_debug_code()) {
-    // Check that the object is not in new space.
-    Label not_in_new_space;
-    InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
-    Abort("new-space object passed to RecordWriteHelper");
-    bind(&not_in_new_space);
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register addr,
+                                         Register scratch,
+                                         SaveFPRegsMode save_fp,
+                                         RememberedSetFinalAction and_then) {
+  if (FLAG_debug_code) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
+    int3();
+    bind(&ok);
   }
-
-  // Compute the page start address from the heap object pointer, and reuse
-  // the 'object' register for it.
-  and_(object, Immediate(~Page::kPageAlignmentMask));
-
-  // Compute number of region covering addr. See Page::GetRegionNumberForAddress
-  // method for more details.
-  shrl(addr, Immediate(Page::kRegionSizeLog2));
-  andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
-
-  // Set dirty mark for region.
-  bts(Operand(object, Page::kDirtyFlagOffset), addr);
+  // Load store buffer top.
+  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
+  // Store pointer to buffer.
+  movq(Operand(scratch, 0), addr);
+  // Increment buffer top.
+  addq(scratch, Immediate(kPointerSize));
+  // Write back new top of buffer.
+  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
+  // Call stub on end of buffer.
+  Label done;
+  // Check for end of buffer.
+  testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+  if (and_then == kReturnAtEnd) {
+    Label buffer_overflowed;
+    j(not_equal, &buffer_overflowed, Label::kNear);
+    ret(0);
+    bind(&buffer_overflowed);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    j(equal, &done, Label::kNear);
+  }
+  StoreBufferOverflowStub store_buffer_overflow =
+      StoreBufferOverflowStub(save_fp);
+  CallStub(&store_buffer_overflow);
+  if (and_then == kReturnAtEnd) {
+    ret(0);
+  } else {
+    ASSERT(and_then == kFallThroughAtEnd);
+    bind(&done);
+  }
 }
 
 
@@ -225,7 +245,7 @@
                                 Register scratch,
                                 Condition cc,
                                 Label* branch,
-                                Label::Distance near_jump) {
+                                Label::Distance distance) {
   if (Serializer::enabled()) {
     // Can't do arithmetic on external references if it might get serialized.
     // The mask isn't really an address.  We load it as an external reference in
@@ -240,7 +260,7 @@
     }
     movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
     cmpq(scratch, kScratchRegister);
-    j(cc, branch, near_jump);
+    j(cc, branch, distance);
   } else {
     ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
     intptr_t new_space_start =
@@ -252,127 +272,128 @@
       lea(scratch, Operand(object, kScratchRegister, times_1, 0));
     }
     and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
-    j(cc, branch, near_jump);
+    j(cc, branch, distance);
   }
 }
 
 
-void MacroAssembler::RecordWrite(Register object,
-                                 int offset,
-                                 Register value,
-                                 Register index) {
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are rsi.
-  ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
+  ASSERT(!value.is(rsi) && !dst.is(rsi));
 
   // First, check if a write barrier is even needed. The tests below
-  // catch stores of smis and stores into the young generation.
+  // catch stores of Smis.
   Label done;
-  JumpIfSmi(value, &done);
 
-  RecordWriteNonSmi(object, offset, value, index);
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
+
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  ASSERT(IsAligned(offset, kPointerSize));
+
+  lea(dst, FieldOperand(object, offset));
+  if (emit_debug_code()) {
+    Label ok;
+    testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
+  RecordWrite(
+      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
-  // turned on to provoke errors. This clobbering repeats the
-  // clobbering done inside RecordWriteNonSmi but it's necessary to
-  // avoid having the fast case for smis leave the registers
-  // unchanged.
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
   if (emit_debug_code()) {
-    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
 }
 
 
 void MacroAssembler::RecordWrite(Register object,
                                  Register address,
-                                 Register value) {
+                                 Register value,
+                                 SaveFPRegsMode fp_mode,
+                                 RememberedSetAction remembered_set_action,
+                                 SmiCheck smi_check) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are rsi.
-  ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
+  ASSERT(!value.is(rsi) && !address.is(rsi));
+
+  ASSERT(!object.is(value));
+  ASSERT(!object.is(address));
+  ASSERT(!value.is(address));
+  if (emit_debug_code()) {
+    AbortIfSmi(object);
+  }
+
+  if (remembered_set_action == OMIT_REMEMBERED_SET &&
+      !FLAG_incremental_marking) {
+    return;
+  }
+
+  if (FLAG_debug_code) {
+    Label ok;
+    cmpq(value, Operand(address, 0));
+    j(equal, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
 
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
-  JumpIfSmi(value, &done);
 
-  InNewSpace(object, value, equal, &done);
+  if (smi_check == INLINE_SMI_CHECK) {
+    // Skip barrier if writing a smi.
+    JumpIfSmi(value, &done);
+  }
 
-  RecordWriteHelper(object, address, value);
+  CheckPageFlag(value,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
+
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                zero,
+                &done,
+                Label::kNear);
+
+  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+  CallStub(&stub);
 
   bind(&done);
 
-  // Clobber all input registers when running with the debug-code flag
+  // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
 }
 
 
-void MacroAssembler::RecordWriteNonSmi(Register object,
-                                       int offset,
-                                       Register scratch,
-                                       Register index) {
-  Label done;
-
-  if (emit_debug_code()) {
-    Label okay;
-    JumpIfNotSmi(object, &okay, Label::kNear);
-    Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
-    bind(&okay);
-
-    if (offset == 0) {
-      // index must be int32.
-      Register tmp = index.is(rax) ? rbx : rax;
-      push(tmp);
-      movl(tmp, index);
-      cmpq(tmp, index);
-      Check(equal, "Index register for RecordWrite must be untagged int32.");
-      pop(tmp);
-    }
-  }
-
-  // Test that the object address is not in the new space. We cannot
-  // update page dirty marks for new space pages.
-  InNewSpace(object, scratch, equal, &done);
-
-  // The offset is relative to a tagged or untagged HeapObject pointer,
-  // so either offset or offset + kHeapObjectTag must be a
-  // multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize) ||
-         IsAligned(offset + kHeapObjectTag, kPointerSize));
-
-  Register dst = index;
-  if (offset != 0) {
-    lea(dst, Operand(object, offset));
-  } else {
-    // array access: calculate the destination address in the same manner as
-    // KeyedStoreIC::GenerateGeneric.
-    lea(dst, FieldOperand(object,
-                          index,
-                          times_pointer_size,
-                          FixedArray::kHeaderSize));
-  }
-  RecordWriteHelper(object, dst, scratch);
-
-  bind(&done);
-
-  // Clobber all input registers when running with the debug-code flag
-  // turned on to provoke errors.
-  if (emit_debug_code()) {
-    movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-  }
-}
-
 void MacroAssembler::Assert(Condition cc, const char* msg) {
   if (emit_debug_code()) Check(cc, msg);
 }
@@ -400,7 +421,7 @@
   Label L;
   j(cc, &L, Label::kNear);
   Abort(msg);
-  // will not return here
+  // Control will not return here.
   bind(&L);
 }
 
@@ -448,9 +469,6 @@
     RecordComment(msg);
   }
 #endif
-  // Disable stub call restrictions to always allow calls to abort.
-  AllowStubCallsScope allow_scope(this, true);
-
   push(rax);
   movq(kScratchRegister, p0, RelocInfo::NONE);
   push(kScratchRegister);
@@ -458,20 +476,28 @@
        reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
        RelocInfo::NONE);
   push(kScratchRegister);
-  CallRuntime(Runtime::kAbort, 2);
-  // will not return here
+
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 2);
+  } else {
+    CallRuntime(Runtime::kAbort, 2);
+  }
+  // Control will not return here.
   int3();
 }
 
 
 void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
-  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
+  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
 MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
   MaybeObject* result = stub->TryGetCode();
   if (!result->IsFailure()) {
     call(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
@@ -482,13 +508,12 @@
 
 
 void MacroAssembler::TailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
+  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
   Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
 MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub) {
-  ASSERT(allow_stub_calls());  // Calls are not allowed in some stubs.
   MaybeObject* result = stub->TryGetCode();
   if (!result->IsFailure()) {
     jmp(Handle<Code>(Code::cast(result->ToObjectUnchecked())),
@@ -504,6 +529,12 @@
 }
 
 
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
+  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
+}
+
+
 void MacroAssembler::IllegalOperation(int num_arguments) {
   if (num_arguments > 0) {
     addq(rsp, Immediate(num_arguments * kPointerSize));
@@ -540,8 +571,7 @@
   const Runtime::Function* function = Runtime::FunctionForId(id);
   Set(rax, function->nargs);
   LoadAddress(rbx, ExternalReference(function, isolate()));
-  CEntryStub ces(1);
-  ces.SaveDoubles();
+  CEntryStub ces(1, kSaveFPRegs);
   CallStub(&ces);
 }
 
@@ -795,8 +825,8 @@
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
-  // Calls are not allowed in some stubs.
-  ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
+  // You can't call a builtin without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -825,6 +855,57 @@
 }
 
 
+static const Register saved_regs[] =
+    { rax, rcx, rdx, rbx, rbp, rsi, rdi, r8, r9, r10, r11 };
+static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+                                     Register exclusion1,
+                                     Register exclusion2,
+                                     Register exclusion3) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  for (int i = 0; i < kNumberOfSavedRegs; i++) {
+    Register reg = saved_regs[i];
+    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+      push(reg);
+    }
+  }
+  // R12 to r15 are callee save on all platforms.
+  if (fp_mode == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movsd(Operand(rsp, i * kDoubleSize), reg);
+    }
+  }
+}
+
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
+                                    Register exclusion1,
+                                    Register exclusion2,
+                                    Register exclusion3) {
+  if (fp_mode == kSaveFPRegs) {
+    CpuFeatures::Scope scope(SSE2);
+    for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+      XMMRegister reg = XMMRegister::from_code(i);
+      movsd(reg, Operand(rsp, i * kDoubleSize));
+    }
+    addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+  }
+  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+    Register reg = saved_regs[i];
+    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+      pop(reg);
+    }
+  }
+}
+
+
 void MacroAssembler::Set(Register dst, int64_t x) {
   if (x == 0) {
     xorl(dst, dst);
@@ -2567,13 +2648,91 @@
 void MacroAssembler::CheckFastElements(Register map,
                                        Label* fail,
                                        Label::Distance distance) {
-  STATIC_ASSERT(FAST_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
   cmpb(FieldOperand(map, Map::kBitField2Offset),
        Immediate(Map::kMaximumBitField2FastElementValue));
   j(above, fail, distance);
 }
 
 
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Label* fail,
+                                             Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_ELEMENTS == 1);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  j(below_equal, fail, distance);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Immediate(Map::kMaximumBitField2FastElementValue));
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::CheckFastSmiOnlyElements(Register map,
+                                              Label* fail,
+                                              Label::Distance distance) {
+  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
+  cmpb(FieldOperand(map, Map::kBitField2Offset),
+       Immediate(Map::kMaximumBitField2FastSmiOnlyElementValue));
+  j(above, fail, distance);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+    Register maybe_number,
+    Register elements,
+    Register key,
+    XMMRegister xmm_scratch,
+    Label* fail) {
+  Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
+
+  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
+
+  CheckMap(maybe_number,
+           isolate()->factory()->heap_number_map(),
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Double value, canonicalize NaN.
+  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
+  cmpl(FieldOperand(maybe_number, offset),
+       Immediate(kNaNOrInfinityLowerBoundUpper32));
+  j(greater_equal, &maybe_nan, Label::kNear);
+
+  bind(&not_nan);
+  movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
+  bind(&have_double_value);
+  movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
+        xmm_scratch);
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+  // it's an Infinity, and the non-NaN code path applies.
+  j(greater, &is_nan, Label::kNear);
+  cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
+  j(zero, &not_nan);
+  bind(&is_nan);
+  // Convert all NaNs to the same canonical NaN value when they are stored in
+  // the double array.
+  Set(kScratchRegister, BitCast<uint64_t>(
+      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+  movq(xmm_scratch, kScratchRegister);
+  jmp(&have_double_value, Label::kNear);
+
+  bind(&smi_value);
+  // Value is a smi. convert to a double and store.
+  // Preserve original value.
+  SmiToInteger32(kScratchRegister, maybe_number);
+  cvtlsi2sd(xmm_scratch, kScratchRegister);
+  movsd(FieldOperand(elements, key, times_8, FixedDoubleArray::kHeaderSize),
+        xmm_scratch);
+  bind(&done);
+}
+
+
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
@@ -2787,10 +2946,10 @@
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 void MacroAssembler::DebugBreak() {
-  ASSERT(allow_stub_calls());
   Set(rax, 0);  // No arguments.
   LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
   CEntryStub ces(1);
+  ASSERT(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 #endif  // ENABLE_DEBUGGER_SUPPORT
@@ -2816,6 +2975,9 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
   InvokePrologue(expected,
                  actual,
@@ -2847,6 +3009,9 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper,
                                 CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   Label done;
   Register dummy = rax;
   InvokePrologue(expected,
@@ -2877,6 +3042,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   ASSERT(function.is(rdi));
   movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -2896,6 +3064,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper,
                                     CallKind call_kind) {
+  // You can't call a function without a valid frame.
+  ASSERT(flag == JUMP_FUNCTION || has_frame());
+
   ASSERT(function->is_compiled());
   // Get the function and setup the context.
   Move(rdi, Handle<JSFunction>(function));
@@ -3759,6 +3930,20 @@
 }
 
 
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  jmp(&entry);
+  bind(&loop);
+  movq(Operand(start_offset, 0), filler);
+  addq(start_offset, Immediate(kPointerSize));
+  bind(&entry);
+  cmpq(start_offset, end_offset);
+  j(less, &loop);
+}
+
+
 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
   if (context_chain_length > 0) {
     // Move up the chain of contexts to the context containing the slot.
@@ -3858,6 +4043,7 @@
 
 
 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+  ASSERT(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
@@ -3872,6 +4058,17 @@
 }
 
 
+bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
+  if (r1.is(r2)) return true;
+  if (r1.is(r3)) return true;
+  if (r1.is(r4)) return true;
+  if (r2.is(r3)) return true;
+  if (r2.is(r4)) return true;
+  if (r3.is(r4)) return true;
+  return false;
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address),
       size_(size),
@@ -3892,6 +4089,195 @@
   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
+
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met,
+    Label::Distance condition_met_distance) {
+  ASSERT(cc == zero || cc == not_zero);
+  if (scratch.is(object)) {
+    and_(scratch, Immediate(~Page::kPageAlignmentMask));
+  } else {
+    movq(scratch, Immediate(~Page::kPageAlignmentMask));
+    and_(scratch, object);
+  }
+  if (mask < (1 << kBitsPerByte)) {
+    testb(Operand(scratch, MemoryChunk::kFlagsOffset),
+          Immediate(static_cast<uint8_t>(mask)));
+  } else {
+    testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
+  }
+  j(cc, condition_met, condition_met_distance);
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register bitmap_scratch,
+                                 Register mask_scratch,
+                                 Label* on_black,
+                                 Label::Distance on_black_distance) {
+  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  // The mask_scratch register contains a 1 at the position of the first bit
+  // and a 0 at all other positions, including the position of the second bit.
+  movq(rcx, mask_scratch);
+  // Make rcx into a mask that covers both marking bits using the operation
+  // rcx = mask | (mask << 1).
+  lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+  // Note that we are using a 4-byte aligned 8-byte load.
+  and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  cmpq(mask_scratch, rcx);
+  j(equal, on_black, on_black_distance);
+}
+
+
+// Detect some, but not all, common pointer-free objects.  This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(
+    Register value,
+    Register scratch,
+    Label* not_data_object,
+    Label::Distance not_data_object_distance) {
+  Label is_data_object;
+  movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
+  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+  j(equal, &is_data_object, Label::kNear);
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
+        Immediate(kIsIndirectStringMask | kIsNotStringMask));
+  j(not_zero, not_data_object, not_data_object_distance);
+  bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+  movq(bitmap_reg, addr_reg);
+  // Sign extended 32 bit immediate.
+  and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+  movq(rcx, addr_reg);
+  int shift =
+      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
+  shrl(rcx, Immediate(shift));
+  and_(rcx,
+       Immediate((Page::kPageAlignmentMask >> shift) &
+                 ~(Bitmap::kBytesPerCell - 1)));
+
+  addq(bitmap_reg, rcx);
+  movq(rcx, addr_reg);
+  shrl(rcx, Immediate(kPointerSizeLog2));
+  and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+  movl(mask_reg, Immediate(1));
+  shl_cl(mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Label* value_is_white_and_not_data,
+    Label::Distance distance) {
+  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+  j(not_zero, &done, Label::kNear);
+
+  if (FLAG_debug_code) {
+    // Check for impossible bit pattern.
+    Label ok;
+    push(mask_scratch);
+    // shl.  May overflow making the check conservative.
+    addq(mask_scratch, mask_scratch);
+    testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+    j(zero, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+    pop(mask_scratch);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = rcx;  // Holds map while checking type.
+  Register length = rcx;  // Holds length of object after checking type.
+  Label not_heap_number;
+  Label is_data_object;
+
+  // Check for heap-number
+  movq(map, FieldOperand(value, HeapObject::kMapOffset));
+  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+  j(not_equal, &not_heap_number, Label::kNear);
+  movq(length, Immediate(HeapNumber::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_heap_number);
+  // Check for strings.
+  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = rcx;
+  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+  testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
+  j(not_zero, value_is_white_and_not_data);
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  Label not_external;
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  testb(instance_type, Immediate(kExternalStringTag));
+  j(zero, &not_external, Label::kNear);
+  movq(length, Immediate(ExternalString::kSize));
+  jmp(&is_data_object, Label::kNear);
+
+  bind(&not_external);
+  // Sequential string, either ASCII or UC16.
+  ASSERT(kAsciiStringTag == 0x04);
+  and_(length, Immediate(kStringEncodingMask));
+  xor_(length, Immediate(kStringEncodingMask));
+  addq(length, Immediate(0x04));
+  // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
+  imul(length, FieldOperand(value, String::kLengthOffset));
+  shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
+  addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+  and_(length, Immediate(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+
+  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+  addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
+
+  bind(&done);
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index e7eb104..7e0ba00 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -29,6 +29,7 @@
 #define V8_X64_MACRO_ASSEMBLER_X64_H_
 
 #include "assembler.h"
+#include "frames.h"
 #include "v8globals.h"
 
 namespace v8 {
@@ -61,6 +62,11 @@
 // Convenience for platform-independent signatures.
 typedef Operand MemOperand;
 
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+
+bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+
 // Forward declaration.
 class JumpTarget;
 
@@ -72,6 +78,7 @@
   ScaleFactor scale;
 };
 
+
 // MacroAssembler implements a collection of frequently used macros.
 class MacroAssembler: public Assembler {
  public:
@@ -134,56 +141,145 @@
   void CompareRoot(const Operand& with, Heap::RootListIndex index);
   void PushRoot(Heap::RootListIndex index);
 
-  // ---------------------------------------------------------------------------
-  // GC Support
+  // These functions do not arrange the registers in any particular order so
+  // they are not useful for calls that can cause a GC.  The caller can
+  // exclude up to 3 registers that do not need to be saved and restored.
+  void PushCallerSaved(SaveFPRegsMode fp_mode,
+                       Register exclusion1 = no_reg,
+                       Register exclusion2 = no_reg,
+                       Register exclusion3 = no_reg);
+  void PopCallerSaved(SaveFPRegsMode fp_mode,
+                      Register exclusion1 = no_reg,
+                      Register exclusion2 = no_reg,
+                      Register exclusion3 = no_reg);
 
-  // For page containing |object| mark region covering |addr| dirty.
-  // RecordWriteHelper only works if the object is not in new
-  // space.
-  void RecordWriteHelper(Register object,
-                         Register addr,
-                         Register scratch);
+// ---------------------------------------------------------------------------
+// GC Support
 
-  // Check if object is in new space. The condition cc can be equal or
-  // not_equal. If it is equal a jump will be done if the object is on new
-  // space. The register scratch can be object itself, but it will be clobbered.
-  void InNewSpace(Register object,
-                  Register scratch,
-                  Condition cc,
-                  Label* branch,
-                  Label::Distance near_jump = Label::kFar);
 
-  // For page containing |object| mark region covering [object+offset]
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
+
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
+
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met,
+                     Label::Distance condition_met_distance = Label::kFar);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch,
+                           Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, not_equal, branch, distance);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch,
+                        Label::Distance distance = Label::kFar) {
+    InNewSpace(object, scratch, equal, branch, distance);
+  }
+
+  // Check if an object has the black incremental marking color.  Also uses rcx!
+  void JumpIfBlack(Register object,
+                   Register scratch0,
+                   Register scratch1,
+                   Label* on_black,
+                   Label::Distance on_black_distance = Label::kFar);
+
+  // Detects conservatively whether an object is data-only, ie it does need to
+  // be scanned by the garbage collector.
+  void JumpIfDataObject(Register value,
+                        Register scratch,
+                        Label* not_data_object,
+                        Label::Distance not_data_object_distance);
+
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Label* object_is_white_and_not_data,
+                      Label::Distance distance);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // Operand(reg, off).
+  void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check);
+  }
+
+  // Notify the garbage collector that we wrote a pointer into a fixed array.
+  // |array| is the array being stored into, |value| is the
+  // object being stored.  |index| is the array index represented as a
+  // Smi. All registers are clobbered by the operation RecordWriteArray
+  // filters out smis so it does not update the write barrier if the
+  // value is a smi.
+  void RecordWriteArray(
+      Register array,
+      Register value,
+      Register index,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
+
+  // For page containing |object| mark region covering |address|
   // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. If |offset| is zero, then the |scratch|
-  // register contains the array index into the elements array
-  // represented as an untagged 32-bit integer. All registers are
-  // clobbered by the operation. RecordWrite filters out smis so it
-  // does not update the write barrier if the value is a smi.
-  void RecordWrite(Register object,
-                   int offset,
-                   Register value,
-                   Register scratch);
-
-  // For page containing |object| mark region covering [address]
-  // dirty. |object| is the object being stored into, |value| is the
-  // object being stored. All registers are clobbered by the
+  // object being stored. The address and value registers are clobbered by the
   // operation.  RecordWrite filters out smis so it does not update
   // the write barrier if the value is a smi.
-  void RecordWrite(Register object,
-                   Register address,
-                   Register value);
-
-  // For page containing |object| mark region covering [object+offset] dirty.
-  // The value is known to not be a smi.
-  // object is the object being stored into, value is the object being stored.
-  // If offset is zero, then the scratch register contains the array index into
-  // the elements array represented as an untagged 32-bit integer.
-  // All registers are clobbered by the operation.
-  void RecordWriteNonSmi(Register object,
-                         int offset,
-                         Register value,
-                         Register scratch);
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK);
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
@@ -192,15 +288,6 @@
   void DebugBreak();
 #endif
 
-  // ---------------------------------------------------------------------------
-  // Activation frames
-
-  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
-  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
-
-  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
-  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
-
   // Enter specific kind of exit frame; either in normal or
   // debug mode. Expects the number of arguments in register rax and
   // sets up the number of arguments in register rdi and the pointer
@@ -760,6 +847,28 @@
                          Label* fail,
                          Label::Distance distance = Label::kFar);
 
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Label* fail,
+                               Label::Distance distance = Label::kFar);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiOnlyElements(Register map,
+                                Label* fail,
+                                Label::Distance distance = Label::kFar);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements, otherwise jump to fail.
+  // Note that key must not be smi-tagged.
+  void StoreNumberToDoubleElements(Register maybe_number,
+                                   Register elements,
+                                   Register key,
+                                   XMMRegister xmm_scratch,
+                                   Label* fail);
+
   // Check if the map of an object is equal to a specified map and
   // branch to label if not. Skip the smi check if not required
   // (object is known to be a heap object)
@@ -1119,6 +1228,13 @@
                  int min_length = 0,
                  Register scratch = kScratchRegister);
 
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
 
   // ---------------------------------------------------------------------------
   // StatsCounter support
@@ -1151,11 +1267,18 @@
   bool generating_stub() { return generating_stub_; }
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
 
   static int SafepointRegisterStackIndex(Register reg) {
     return SafepointRegisterStackIndex(reg.code());
   }
 
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
  private:
   // Order general registers are pushed by Pushad.
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
@@ -1165,6 +1288,7 @@
 
   bool generating_stub_;
   bool allow_stub_calls_;
+  bool has_frame_;
   bool root_array_available_;
 
   // Returns a register holding the smi value. The register MUST NOT be
@@ -1188,10 +1312,6 @@
                       const CallWrapper& call_wrapper = NullCallWrapper(),
                       CallKind call_kind = CALL_AS_METHOD);
 
-  // Activation support.
-  void EnterFrame(StackFrame::Type type);
-  void LeaveFrame(StackFrame::Type type);
-
   void EnterExitFramePrologue(bool save_rax);
 
   // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
@@ -1218,6 +1338,20 @@
                                Register scratch,
                                bool gc_allowed);
 
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cc,
+                  Label* branch,
+                  Label::Distance distance = Label::kFar);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Uses rcx as scratch and leaves addr_reg
+  // unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
 
   // Compute memory operands for safepoint stack slots.
   Operand SafepointRegisterSlot(Register reg);
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index a782bd7..55fabc0 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -193,7 +193,7 @@
 void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
   Label not_at_start;
   // Did we start the match at the start of the string at all?
-  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+  __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
   BranchOrBacktrack(not_equal, &not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -205,7 +205,7 @@
 
 void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
   // Did we start the match at the start of the string at all?
-  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+  __ cmpl(Operand(rbp, kStartIndex), Immediate(0));
   BranchOrBacktrack(not_equal, on_not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(rax, Operand(rsi, rdi, times_1, 0));
@@ -431,9 +431,14 @@
     // Isolate.
     __ LoadAddress(rcx, ExternalReference::isolate_address());
 #endif
-    ExternalReference compare =
-        ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
-    __ CallCFunction(compare, num_arguments);
+
+    { // NOLINT: Can't find a way to open this scope without confusing the
+      // linter.
+      AllowExternalCallThatCantCauseGC scope(&masm_);
+      ExternalReference compare =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
+      __ CallCFunction(compare, num_arguments);
+    }
 
     // Restore original values before reacting on result value.
     __ Move(code_object_pointer(), masm_.CodeObject());
@@ -706,7 +711,12 @@
   // registers we need.
   // Entry code:
   __ bind(&entry_label_);
-  // Start new stack frame.
+
+  // Tell the system that we have a stack frame.  Because the type is MANUAL, no
+  // is generated.
+  FrameScope scope(&masm_, StackFrame::MANUAL);
+
+  // Actually emit code to start a new stack frame.
   __ push(rbp);
   __ movq(rbp, rsp);
   // Save parameters and callee-save registers. Order here should correspond
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 76d2555..14ac003 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -645,7 +645,7 @@
                                         scratch1, scratch2, scratch3, name,
                                         miss_label);
 
-    __ EnterInternalFrame();
+    FrameScope scope(masm, StackFrame::INTERNAL);
     // Save the name_ register across the call.
     __ push(name_);
 
@@ -662,7 +662,8 @@
 
     // Restore the name_ register.
     __ pop(name_);
-    __ LeaveInternalFrame();
+
+    // Leave the internal frame.
   }
 
   void LoadWithInterceptor(MacroAssembler* masm,
@@ -670,19 +671,21 @@
                            Register holder,
                            JSObject* holder_obj,
                            Label* interceptor_succeeded) {
-    __ EnterInternalFrame();
-    __ push(holder);  // Save the holder.
-    __ push(name_);  // Save the name.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ push(holder);  // Save the holder.
+      __ push(name_);  // Save the name.
 
-    CompileCallLoadPropertyWithInterceptor(masm,
-                                           receiver,
-                                           holder,
-                                           name_,
-                                           holder_obj);
+      CompileCallLoadPropertyWithInterceptor(masm,
+                                             receiver,
+                                             holder,
+                                             name_,
+                                             holder_obj);
 
-    __ pop(name_);  // Restore the name.
-    __ pop(receiver);  // Restore the holder.
-    __ LeaveInternalFrame();
+      __ pop(name_);  // Restore the name.
+      __ pop(receiver);  // Restore the holder.
+      // Leave the internal frame.
+    }
 
     __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
     __ j(not_equal, interceptor_succeeded);
@@ -781,7 +784,8 @@
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
     __ movq(name_reg, rax);
-    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+    __ RecordWriteField(
+        receiver_reg, offset, name_reg, scratch, kDontSaveFPRegs);
   } else {
     // Write to the properties array.
     int offset = index * kPointerSize + FixedArray::kHeaderSize;
@@ -792,7 +796,8 @@
     // Update the write barrier for the array address.
     // Pass the value being stored in the now unused name_reg.
     __ movq(name_reg, rax);
-    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+    __ RecordWriteField(
+        scratch, offset, name_reg, receiver_reg, kDontSaveFPRegs);
   }
 
   // Return the value (register rax).
@@ -1139,40 +1144,42 @@
 
     // Save necessary data before invoking an interceptor.
     // Requires a frame to make GC aware of pushed pointers.
-    __ EnterInternalFrame();
+    {
+      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
 
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      // CALLBACKS case needs a receiver to be passed into C++ callback.
-      __ push(receiver);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        // CALLBACKS case needs a receiver to be passed into C++ callback.
+        __ push(receiver);
+      }
+      __ push(holder_reg);
+      __ push(name_reg);
+
+      // Invoke an interceptor.  Note: map checks from receiver to
+      // interceptor's holder has been compiled before (see a caller
+      // of this method.)
+      CompileCallLoadPropertyWithInterceptor(masm(),
+                                             receiver,
+                                             holder_reg,
+                                             name_reg,
+                                             interceptor_holder);
+
+      // Check if interceptor provided a value for property.  If it's
+      // the case, return immediately.
+      Label interceptor_failed;
+      __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+      __ j(equal, &interceptor_failed);
+      frame_scope.GenerateLeaveFrame();
+      __ ret(0);
+
+      __ bind(&interceptor_failed);
+      __ pop(name_reg);
+      __ pop(holder_reg);
+      if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+        __ pop(receiver);
+      }
+
+      // Leave the internal frame.
     }
-    __ push(holder_reg);
-    __ push(name_reg);
-
-    // Invoke an interceptor.  Note: map checks from receiver to
-    // interceptor's holder has been compiled before (see a caller
-    // of this method.)
-    CompileCallLoadPropertyWithInterceptor(masm(),
-                                           receiver,
-                                           holder_reg,
-                                           name_reg,
-                                           interceptor_holder);
-
-    // Check if interceptor provided a value for property.  If it's
-    // the case, return immediately.
-    Label interceptor_failed;
-    __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
-    __ j(equal, &interceptor_failed);
-    __ LeaveInternalFrame();
-    __ ret(0);
-
-    __ bind(&interceptor_failed);
-    __ pop(name_reg);
-    __ pop(holder_reg);
-    if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
-      __ pop(receiver);
-    }
-
-    __ LeaveInternalFrame();
 
     // Check that the maps from interceptor's holder to lookup's holder
     // haven't changed.  And load lookup's holder into |holder| register.
@@ -1421,7 +1428,7 @@
     __ j(not_equal, &call_builtin);
 
     if (argc == 1) {  // Otherwise fall through to call builtin.
-      Label exit, with_write_barrier, attempt_to_grow_elements;
+      Label attempt_to_grow_elements, with_write_barrier;
 
       // Get the array's length into rax and calculate new length.
       __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1435,30 +1442,42 @@
       __ cmpl(rax, rcx);
       __ j(greater, &attempt_to_grow_elements);
 
+      // Check if value is a smi.
+      __ movq(rcx, Operand(rsp, argc * kPointerSize));
+      __ JumpIfNotSmi(rcx, &with_write_barrier);
+
       // Save new length.
       __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
 
       // Push the element.
-      __ movq(rcx, Operand(rsp, argc * kPointerSize));
       __ lea(rdx, FieldOperand(rbx,
                                rax, times_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
       __ movq(Operand(rdx, 0), rcx);
 
-      // Check if value is a smi.
       __ Integer32ToSmi(rax, rax);  // Return new length as smi.
-
-      __ JumpIfNotSmi(rcx, &with_write_barrier);
-
-      __ bind(&exit);
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&with_write_barrier);
 
-      __ InNewSpace(rbx, rcx, equal, &exit);
+      if (FLAG_smi_only_arrays) {
+        __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+        __ CheckFastObjectElements(rdi, &call_builtin);
+      }
 
-      __ RecordWriteHelper(rbx, rdx, rcx);
+      // Save new length.
+      __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
 
+      // Push the element.
+      __ lea(rdx, FieldOperand(rbx,
+                               rax, times_pointer_size,
+                               FixedArray::kHeaderSize - argc * kPointerSize));
+      __ movq(Operand(rdx, 0), rcx);
+
+      __ RecordWrite(
+          rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+      __ Integer32ToSmi(rax, rax);  // Return new length as smi.
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&attempt_to_grow_elements);
@@ -1466,6 +1485,17 @@
         __ jmp(&call_builtin);
       }
 
+      __ movq(rdi, Operand(rsp, argc * kPointerSize));
+      if (FLAG_smi_only_arrays) {
+        // Growing elements that are SMI-only requires special handling in case
+        // the new element is non-Smi. For now, delegate to the builtin.
+        Label no_fast_elements_check;
+        __ JumpIfSmi(rdi, &no_fast_elements_check);
+        __ movq(rsi, FieldOperand(rdx, HeapObject::kMapOffset));
+        __ CheckFastObjectElements(rsi, &call_builtin, Label::kFar);
+        __ bind(&no_fast_elements_check);
+      }
+
       ExternalReference new_space_allocation_top =
           ExternalReference::new_space_allocation_top_address(isolate());
       ExternalReference new_space_allocation_limit =
@@ -1489,16 +1519,22 @@
 
       // We fit and could grow elements.
       __ Store(new_space_allocation_top, rcx);
-      __ movq(rcx, Operand(rsp, argc * kPointerSize));
 
       // Push the argument...
-      __ movq(Operand(rdx, 0), rcx);
+      __ movq(Operand(rdx, 0), rdi);
       // ... and fill the rest with holes.
       __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
       for (int i = 1; i < kAllocationDelta; i++) {
         __ movq(Operand(rdx, i * kPointerSize), kScratchRegister);
       }
 
+      // We know the elements array is in new space so we don't need the
+      // remembered set, but we just pushed a value onto it so we may have to
+      // tell the incremental marker to rescan the object that we just grew.  We
+      // don't need to worry about the holes because they are in old space and
+      // already marked black.
+      __ RecordWrite(rbx, rdx, rdi, kDontSaveFPRegs, OMIT_REMEMBERED_SET);
+
       // Restore receiver to rdx as finish sequence assumes it's here.
       __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
@@ -1510,7 +1546,6 @@
       __ Integer32ToSmi(rax, rax);
       __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
 
-      // Elements are in new space, so write barrier is not required.
       __ ret((argc + 1) * kPointerSize);
     }
 
@@ -2463,19 +2498,36 @@
          Handle<Map>(object->map()));
   __ j(not_equal, &miss);
 
+  // Compute the cell operand to use.
+  __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+  Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset);
+
   // Check that the value in the cell is not the hole. If it is, this
   // cell could have been deleted and reintroducing the global needs
   // to update the property details in the property dictionary of the
   // global object. We bail out to the runtime system to do that.
-  __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
-  __ CompareRoot(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
-                 Heap::kTheHoleValueRootIndex);
+  __ CompareRoot(cell_operand, Heap::kTheHoleValueRootIndex);
   __ j(equal, &miss);
 
   // Store the value in the cell.
-  __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
+  __ movq(cell_operand, rax);
+  Label done;
+  __ JumpIfSmi(rax, &done);
+
+  __ movq(rcx, rax);
+  __ lea(rdx, cell_operand);
+  // Cells are always in the remembered set.
+  __ RecordWrite(rbx,  // Object.
+                 rdx,  // Address.
+                 rcx,  // Value.
+                 kDontSaveFPRegs,
+                 OMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+
 
   // Return the value (register rax).
+  __ bind(&done);
+
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_store_global_inline(), 1);
   __ ret(0);
@@ -3436,6 +3488,7 @@
       __ movsd(Operand(rbx, rdi, times_8, 0), xmm0);
       break;
     case FAST_ELEMENTS:
+    case FAST_SMI_ONLY_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS:
     case DICTIONARY_ELEMENTS:
     case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3503,6 +3556,7 @@
         case EXTERNAL_FLOAT_ELEMENTS:
         case EXTERNAL_DOUBLE_ELEMENTS:
         case FAST_ELEMENTS:
+        case FAST_SMI_ONLY_ELEMENTS:
         case FAST_DOUBLE_ELEMENTS:
         case DICTIONARY_ELEMENTS:
         case NON_STRICT_ARGUMENTS_ELEMENTS:
@@ -3634,8 +3688,10 @@
 }
 
 
-void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
-                                                      bool is_js_array) {
+void KeyedStoreStubCompiler::GenerateStoreFastElement(
+    MacroAssembler* masm,
+    bool is_js_array,
+    ElementsKind elements_kind) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
@@ -3665,13 +3721,22 @@
     __ j(above_equal, &miss_force_generic);
   }
 
-  // Do the store and update the write barrier. Make sure to preserve
-  // the value in register eax.
-  __ movq(rdx, rax);
-  __ SmiToInteger32(rcx, rcx);
-  __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
-          rax);
-  __ RecordWrite(rdi, 0, rdx, rcx);
+  // Do the store and update the write barrier.
+  if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
+    __ JumpIfNotSmi(rax, &miss_force_generic);
+    __ SmiToInteger32(rcx, rcx);
+    __ movq(FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize),
+            rax);
+  } else {
+    ASSERT(elements_kind == FAST_ELEMENTS);
+    __ SmiToInteger32(rcx, rcx);
+    __ lea(rcx,
+           FieldOperand(rdi, rcx, times_pointer_size, FixedArray::kHeaderSize));
+    __ movq(Operand(rcx, 0), rax);
+    // Make sure to preserve the value in register rax.
+    __ movq(rdx, rax);
+    __ RecordWrite(rdi, rcx, rdx, kDontSaveFPRegs);
+  }
 
   // Done.
   __ ret(0);
@@ -3693,8 +3758,7 @@
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label miss_force_generic, smi_value, is_nan, maybe_nan;
-  Label have_double_value, not_nan;
+  Label miss_force_generic;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
@@ -3715,50 +3779,8 @@
   __ j(above_equal, &miss_force_generic);
 
   // Handle smi values specially
-  __ JumpIfSmi(rax, &smi_value, Label::kNear);
-
-  __ CheckMap(rax,
-              masm->isolate()->factory()->heap_number_map(),
-              &miss_force_generic,
-              DONT_DO_SMI_CHECK);
-
-  // Double value, canonicalize NaN.
-  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
-  __ cmpl(FieldOperand(rax, offset),
-          Immediate(kNaNOrInfinityLowerBoundUpper32));
-  __ j(greater_equal, &maybe_nan, Label::kNear);
-
-  __ bind(&not_nan);
-  __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
-  __ bind(&have_double_value);
   __ SmiToInteger32(rcx, rcx);
-  __ movsd(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize),
-           xmm0);
-  __ ret(0);
-
-  __ bind(&maybe_nan);
-  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
-  // it's an Infinity, and the non-NaN code path applies.
-  __ j(greater, &is_nan, Label::kNear);
-  __ cmpl(FieldOperand(rax, HeapNumber::kValueOffset), Immediate(0));
-  __ j(zero, &not_nan);
-  __ bind(&is_nan);
-  // Convert all NaNs to the same canonical NaN value when they are stored in
-  // the double array.
-  __ Set(kScratchRegister, BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
-  __ movq(xmm0, kScratchRegister);
-  __ jmp(&have_double_value, Label::kNear);
-
-  __ bind(&smi_value);
-  // Value is a smi. convert to a double and store.
-  // Preserve original value.
-  __ SmiToInteger32(rdx, rax);
-  __ push(rdx);
-  __ fild_s(Operand(rsp, 0));
-  __ pop(rdx);
-  __ SmiToInteger32(rcx, rcx);
-  __ fstp_d(FieldOperand(rdi, rcx, times_8, FixedDoubleArray::kHeaderSize));
+  __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0, &miss_force_generic);
   __ ret(0);
 
   // Handle store cache miss, replacing the ic with the generic stub.
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index 5d0cab3..efcbad7 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -91,7 +91,8 @@
         'test-threads.cc',
         'test-unbound-queue.cc',
         'test-utils.cc',
-        'test-version.cc'
+        'test-version.cc',
+        'test-weakmaps.cc'
       ],
       'conditions': [
         ['v8_target_arch=="ia32"', {
@@ -134,6 +135,12 @@
           'sources': [
             'test-platform-win32.cc',
           ],
+          'msvs_settings': {
+            'VCCLCompilerTool': {
+              # MSVS wants this for gay-{precision,shortest}.cc.
+              'AdditionalOptions': ['/bigobj'],
+            },
+          },
         }],
         ['component=="shared_library"', {
           # cctest can't be built against a shared library, so we need to
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 5122da5..759f69f 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -33,14 +33,28 @@
 # BUG(382): Weird test. Can't guarantee that it never times out.
 test-api/ApplyInterruption: PASS || TIMEOUT
 
+# BUG(484): This test which we thought was originally corrected in r5236
+# is re-appearing. Disabled until bug in test is fixed. This only fails
+# when snapshot is on, so I am marking it PASS || FAIL
+test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
+
 # These tests always fail.  They are here to test test.py.  If
 # they don't fail then test.py has failed.
 test-serialize/TestThatAlwaysFails: FAIL
 test-serialize/DependentTestThatAlwaysFails: FAIL
 
+# TODO(gc): Temporarily disabled in the GC branch.
+test-log/EquivalenceOfLoggingAndTraversal: PASS || FAIL
+
+# BUG(1261): Flakey test.
+test-profile-generator/RecordStackTraceAtStartProfiling: PASS || FAIL
+
 # We do not yet shrink weak maps after they have been emptied by the GC
 test-weakmaps/Shrinking: FAIL
 
+# NewGC: BUG(1717)
+test-api/OutOfMemoryNested: PASS || TIMEOUT
+
 ##############################################################################
 [ $arch == arm ]
 
diff --git a/test/cctest/test-accessors.cc b/test/cctest/test-accessors.cc
index d95536d..b1900f9 100644
--- a/test/cctest/test-accessors.cc
+++ b/test/cctest/test-accessors.cc
@@ -241,7 +241,7 @@
   ApiTestFuzzer::Fuzz();
   CHECK(info.This() == info.Holder());
   CHECK(info.Data()->Equals(v8::String::New("data")));
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK(info.This() == info.Holder());
   CHECK(info.Data()->Equals(v8::String::New("data")));
   return v8::Integer::New(17);
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index 9767192..899c902 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -72,11 +72,29 @@
   }
   CHECK(!heap->AllocateRawAsciiString(100, TENURED)->IsFailure());
 
-  // Large object space.
-  while (!heap->OldGenerationAllocationLimitReached()) {
-    CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
+  // Old pointer space.
+  OldSpace* old_pointer_space = heap->old_pointer_space();
+  static const int kOldPointerSpaceFillerLength = 10000;
+  static const int kOldPointerSpaceFillerSize = FixedArray::SizeFor(
+      kOldPointerSpaceFillerLength);
+  while (old_pointer_space->Available() > kOldPointerSpaceFillerSize) {
+    CHECK(!heap->AllocateFixedArray(kOldPointerSpaceFillerLength, TENURED)->
+          IsFailure());
   }
-  CHECK(!heap->AllocateFixedArray(10000, TENURED)->IsFailure());
+  CHECK(!heap->AllocateFixedArray(kOldPointerSpaceFillerLength, TENURED)->
+        IsFailure());
+
+  // Large object space.
+  static const int kLargeObjectSpaceFillerLength = 300000;
+  static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
+      kLargeObjectSpaceFillerLength);
+  ASSERT(kLargeObjectSpaceFillerSize > heap->MaxObjectSizeInPagedSpace());
+  while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
+    CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
+          IsFailure());
+  }
+  CHECK(!heap->AllocateFixedArray(kLargeObjectSpaceFillerLength, TENURED)->
+        IsFailure());
 
   // Map space.
   MapSpace* map_space = heap->map_space();
@@ -175,16 +193,16 @@
 // Plain old data class.  Represents a block of allocated memory.
 class Block {
  public:
-  Block(void* base_arg, int size_arg)
+  Block(Address base_arg, int size_arg)
       : base(base_arg), size(size_arg) {}
 
-  void *base;
+  Address base;
   int size;
 };
 
 
 TEST(CodeRange) {
-  const int code_range_size = 16*MB;
+  const int code_range_size = 32*MB;
   OS::Setup();
   Isolate::Current()->InitializeLoggingAndCounters();
   CodeRange* code_range = new CodeRange(Isolate::Current());
@@ -196,11 +214,13 @@
   while (total_allocated < 5 * code_range_size) {
     if (current_allocated < code_range_size / 10) {
       // Allocate a block.
-      // Geometrically distributed sizes, greater than Page::kPageSize.
-      size_t requested = (Page::kPageSize << (Pseudorandom() % 6)) +
+      // Geometrically distributed sizes, greater than Page::kMaxHeapObjectSize.
+      // TODO(gc): instead of using 3 use some contant based on code_range_size
+      // kMaxHeapObjectSize.
+      size_t requested = (Page::kMaxHeapObjectSize << (Pseudorandom() % 3)) +
            Pseudorandom() % 5000 + 1;
       size_t allocated = 0;
-      void* base = code_range->AllocateRawMemory(requested, &allocated);
+      Address base = code_range->AllocateRawMemory(requested, &allocated);
       CHECK(base != NULL);
       blocks.Add(Block(base, static_cast<int>(allocated)));
       current_allocated += static_cast<int>(allocated);
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index c1c8aae..167c4cd 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -80,6 +80,11 @@
   CHECK_EQ(expected, *ascii);
 }
 
+static void ExpectInt32(const char* code, int expected) {
+  Local<Value> result = CompileRun(code);
+  CHECK(result->IsInt32());
+  CHECK_EQ(expected, result->Int32Value());
+}
 
 static void ExpectBoolean(const char* code, bool expected) {
   Local<Value> result = CompileRun(code);
@@ -393,11 +398,11 @@
     CHECK(source->IsExternal());
     CHECK_EQ(resource,
              static_cast<TestResource*>(source->GetExternalStringResource()));
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   v8::internal::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, dispose_count);
 }
 
@@ -415,11 +420,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, dispose_count);
 }
 
@@ -441,11 +446,12 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  // TODO(1608): This should use kAbortIncrementalMarking.
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
   CHECK_EQ(1, dispose_count);
 }
 
@@ -467,11 +473,12 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     CHECK_EQ(0, dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  // TODO(1608): This should use kAbortIncrementalMarking.
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
   CHECK_EQ(1, dispose_count);
 }
 
@@ -572,8 +579,8 @@
     i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
     CHECK(isymbol->IsSymbol());
   }
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -590,8 +597,8 @@
     i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
     CHECK(isymbol->IsSymbol());
   }
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -672,11 +679,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllAvailableGarbage();
     CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
   CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
 
@@ -693,11 +700,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllAvailableGarbage();
     CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllAvailableGarbage();
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
   CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_count);
 }
@@ -744,8 +751,8 @@
     CHECK_EQ(68, value->Int32Value());
   }
   i::Isolate::Current()->compilation_cache()->Clear();
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -1294,6 +1301,197 @@
   return name;
 }
 
+// Helper functions for Interceptor/Accessor interaction tests
+
+Handle<Value> SimpleAccessorGetter(Local<String> name,
+                                   const AccessorInfo& info) {
+  Handle<Object> self = info.This();
+  return self->Get(String::Concat(v8_str("accessor_"), name));
+}
+
+void SimpleAccessorSetter(Local<String> name, Local<Value> value,
+                          const AccessorInfo& info) {
+  Handle<Object> self = info.This();
+  self->Set(String::Concat(v8_str("accessor_"), name), value);
+}
+
+Handle<Value> EmptyInterceptorGetter(Local<String> name,
+                                     const AccessorInfo& info) {
+  return Handle<Value>();
+}
+
+Handle<Value> EmptyInterceptorSetter(Local<String> name,
+                                     Local<Value> value,
+                                     const AccessorInfo& info) {
+  return Handle<Value>();
+}
+
+Handle<Value> InterceptorGetter(Local<String> name,
+                                const AccessorInfo& info) {
+  // Intercept names that start with 'interceptor_'.
+  String::AsciiValue ascii(name);
+  char* name_str = *ascii;
+  char prefix[] = "interceptor_";
+  int i;
+  for (i = 0; name_str[i] && prefix[i]; ++i) {
+    if (name_str[i] != prefix[i]) return Handle<Value>();
+  }
+  Handle<Object> self = info.This();
+  return self->GetHiddenValue(v8_str(name_str + i));
+}
+
+Handle<Value> InterceptorSetter(Local<String> name,
+                                Local<Value> value,
+                                const AccessorInfo& info) {
+  // Intercept accesses that set certain integer values.
+  if (value->IsInt32() && value->Int32Value() < 10000) {
+    Handle<Object> self = info.This();
+    self->SetHiddenValue(name, value);
+    return value;
+  }
+  return Handle<Value>();
+}
+
+void AddAccessor(Handle<FunctionTemplate> templ,
+                 Handle<String> name,
+                 v8::AccessorGetter getter,
+                 v8::AccessorSetter setter) {
+  templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
+}
+
+void AddInterceptor(Handle<FunctionTemplate> templ,
+                    v8::NamedPropertyGetter getter,
+                    v8::NamedPropertySetter setter) {
+  templ->InstanceTemplate()->SetNamedPropertyHandler(getter, setter);
+}
+
+THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddAccessor(parent, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "child.age = 10;");
+  ExpectBoolean("child.hasOwnProperty('age')", false);
+  ExpectInt32("child.age", 10);
+  ExpectInt32("child.accessor_age", 10);
+}
+
+THREADED_TEST(EmptyInterceptorDoesNotShadowJSAccessors) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "var parent = child.__proto__;"
+             "Object.defineProperty(parent, 'age', "
+             "  {get: function(){ return this.accessor_age; }, "
+             "   set: function(v){ this.accessor_age = v; }, "
+             "   enumerable: true, configurable: true});"
+             "child.age = 10;");
+  ExpectBoolean("child.hasOwnProperty('age')", false);
+  ExpectInt32("child.age", 10);
+  ExpectInt32("child.accessor_age", 10);
+}
+
+THREADED_TEST(EmptyInterceptorDoesNotAffectJSProperties) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, EmptyInterceptorGetter, EmptyInterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "var parent = child.__proto__;"
+             "parent.name = 'Alice';");
+  ExpectBoolean("child.hasOwnProperty('name')", false);
+  ExpectString("child.name", "Alice");
+  CompileRun("child.name = 'Bob';");
+  ExpectString("child.name", "Bob");
+  ExpectBoolean("child.hasOwnProperty('name')", true);
+  ExpectString("parent.name", "Alice");
+}
+
+THREADED_TEST(SwitchFromInterceptorToAccessor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddAccessor(parent, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 0; i <= 10000; i++) setAge(i);");
+  // All i < 10000 go to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+  // The last i goes to the accessor.
+  ExpectInt32("child.accessor_age", 10000);
+}
+
+THREADED_TEST(SwitchFromAccessorToInterceptor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddAccessor(parent, v8_str("age"),
+              SimpleAccessorGetter, SimpleAccessorSetter);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 20000; i >= 9999; i--) setAge(i);");
+  // All i >= 10000 go to the accessor.
+  ExpectInt32("child.accessor_age", 10000);
+  // The last i goes to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+}
+
+THREADED_TEST(SwitchFromInterceptorToProperty) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 0; i <= 10000; i++) setAge(i);");
+  // All i < 10000 go to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+  // The last i goes to child's own property.
+  ExpectInt32("child.age", 10000);
+}
+
+THREADED_TEST(SwitchFromPropertyToInterceptor) {
+  v8::HandleScope scope;
+  Handle<FunctionTemplate> parent = FunctionTemplate::New();
+  Handle<FunctionTemplate> child = FunctionTemplate::New();
+  child->Inherit(parent);
+  AddInterceptor(child, InterceptorGetter, InterceptorSetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  CompileRun("var child = new Child;"
+             "function setAge(i){ child.age = i; };"
+             "for(var i = 20000; i >= 9999; i--) setAge(i);");
+  // All i >= 10000 go to child's own property.
+  ExpectInt32("child.age", 10000);
+  // The last i goes to the interceptor.
+  ExpectInt32("child.interceptor_age", 9999);
+}
 
 THREADED_TEST(NamedPropertyHandlerGetter) {
   echo_named_call_count = 0;
@@ -1666,12 +1864,12 @@
 
   // Check reading and writing aligned pointers.
   obj->SetPointerInInternalField(0, aligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
 
   // Check reading and writing unaligned pointers.
   obj->SetPointerInInternalField(0, unaligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
 
   delete[] data;
@@ -1697,19 +1895,19 @@
   CHECK_EQ(1, static_cast<int>(reinterpret_cast<uintptr_t>(unaligned) & 0x1));
 
   obj->SetPointerInInternalField(0, aligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(aligned, v8::External::Unwrap(obj->GetInternalField(0)));
 
   obj->SetPointerInInternalField(0, unaligned);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(unaligned, v8::External::Unwrap(obj->GetInternalField(0)));
 
   obj->SetInternalField(0, v8::External::Wrap(aligned));
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
 
   obj->SetInternalField(0, v8::External::Wrap(unaligned));
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
 
   delete[] data;
@@ -1722,7 +1920,7 @@
 
   // Ensure that the test starts with an fresh heap to test whether the hash
   // code is based on the address.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   Local<v8::Object> obj = v8::Object::New();
   int hash = obj->GetIdentityHash();
   int hash1 = obj->GetIdentityHash();
@@ -1732,7 +1930,7 @@
   // objects should not be assigned the same hash code. If the test below fails
   // the random number generator should be evaluated.
   CHECK_NE(hash, hash2);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   int hash3 = v8::Object::New()->GetIdentityHash();
   // Make sure that the identity hash is not based on the initial address of
   // the object alone. If the test below fails the random number generator
@@ -1769,7 +1967,7 @@
   v8::Local<v8::String> empty = v8_str("");
   v8::Local<v8::String> prop_name = v8_str("prop_name");
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   // Make sure delete of a non-existent hidden value works
   CHECK(obj->DeleteHiddenValue(key));
@@ -1779,7 +1977,7 @@
   CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   // Make sure we do not find the hidden property.
   CHECK(!obj->Has(empty));
@@ -1790,7 +1988,7 @@
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
   CHECK_EQ(2003, obj->Get(empty)->Int32Value());
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   // Add another property and delete it afterwards to force the object in
   // slow case.
@@ -1801,7 +1999,7 @@
   CHECK(obj->Delete(prop_name));
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   CHECK(obj->DeleteHiddenValue(key));
   CHECK(obj->GetHiddenValue(key).IsEmpty());
@@ -1908,19 +2106,30 @@
 }
 
 
-static int NumberOfWeakCalls = 0;
+class WeakCallCounter {
+ public:
+  explicit WeakCallCounter(int id) : id_(id), number_of_weak_calls_(0) { }
+  int id() { return id_; }
+  void increment() { number_of_weak_calls_++; }
+  int NumberOfWeakCalls() { return number_of_weak_calls_; }
+ private:
+  int id_;
+  int number_of_weak_calls_;
+};
+
+
 static void WeakPointerCallback(Persistent<Value> handle, void* id) {
-  CHECK_EQ(reinterpret_cast<void*>(1234), id);
-  NumberOfWeakCalls++;
+  WeakCallCounter* counter = reinterpret_cast<WeakCallCounter*>(id);
+  CHECK_EQ(1234, counter->id());
+  counter->increment();
   handle.Dispose();
 }
 
+
 THREADED_TEST(ApiObjectGroups) {
   HandleScope scope;
   LocalContext env;
 
-  NumberOfWeakCalls = 0;
-
   Persistent<Object> g1s1;
   Persistent<Object> g1s2;
   Persistent<Object> g1c1;
@@ -1928,21 +2137,23 @@
   Persistent<Object> g2s2;
   Persistent<Object> g2c1;
 
+  WeakCallCounter counter(1234);
+
   {
     HandleScope scope;
     g1s1 = Persistent<Object>::New(Object::New());
     g1s2 = Persistent<Object>::New(Object::New());
     g1c1 = Persistent<Object>::New(Object::New());
-    g1s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g1s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g1c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g1c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
     g2s1 = Persistent<Object>::New(Object::New());
     g2s2 = Persistent<Object>::New(Object::New());
     g2c1 = Persistent<Object>::New(Object::New());
-    g2s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g2s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g2c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g2c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
   }
 
   Persistent<Object> root = Persistent<Object>::New(g1s1);  // make a root.
@@ -1961,14 +2172,15 @@
     V8::AddObjectGroup(g2_objects, 2);
     V8::AddImplicitReferences(g2s2, g2_children, 1);
   }
-  // Do a full GC
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  // Do a single full GC. Use kMakeHeapIterableMask to ensure that
+  // incremental garbage collection is stopped.
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
 
   // All object should be alive.
-  CHECK_EQ(0, NumberOfWeakCalls);
+  CHECK_EQ(0, counter.NumberOfWeakCalls());
 
   // Weaken the root.
-  root.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+  root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
   // But make children strong roots---all the objects (except for children)
   // should be collectable now.
   g1c1.ClearWeak();
@@ -1986,17 +2198,17 @@
     V8::AddImplicitReferences(g2s2, g2_children, 1);
   }
 
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
 
   // All objects should be gone. 5 global handles in total.
-  CHECK_EQ(5, NumberOfWeakCalls);
+  CHECK_EQ(5, counter.NumberOfWeakCalls());
 
   // And now make children weak again and collect them.
-  g1c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-  g2c1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+  g1c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+  g2c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
-  CHECK_EQ(7, NumberOfWeakCalls);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
+  CHECK_EQ(7, counter.NumberOfWeakCalls());
 }
 
 
@@ -2004,7 +2216,7 @@
   HandleScope scope;
   LocalContext env;
 
-  NumberOfWeakCalls = 0;
+  WeakCallCounter counter(1234);
 
   Persistent<Object> g1s1;
   Persistent<Object> g1s2;
@@ -2017,18 +2229,18 @@
     HandleScope scope;
     g1s1 = Persistent<Object>::New(Object::New());
     g1s2 = Persistent<Object>::New(Object::New());
-    g1s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g1s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
     g2s1 = Persistent<Object>::New(Object::New());
     g2s2 = Persistent<Object>::New(Object::New());
-    g2s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g2s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
     g3s1 = Persistent<Object>::New(Object::New());
     g3s2 = Persistent<Object>::New(Object::New());
-    g3s1.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
-    g3s2.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+    g3s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+    g3s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
   }
 
   Persistent<Object> root = Persistent<Object>::New(g1s1);  // make a root.
@@ -2050,14 +2262,14 @@
     V8::AddObjectGroup(g3_objects, 2);
     V8::AddImplicitReferences(g3s1, g3_children, 1);
   }
-  // Do a full GC
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  // Do a single full GC
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
 
   // All object should be alive.
-  CHECK_EQ(0, NumberOfWeakCalls);
+  CHECK_EQ(0, counter.NumberOfWeakCalls());
 
   // Weaken the root.
-  root.MakeWeak(reinterpret_cast<void*>(1234), &WeakPointerCallback);
+  root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
 
   // Groups are deleted, rebuild groups.
   {
@@ -2075,10 +2287,10 @@
     V8::AddImplicitReferences(g3s1, g3_children, 1);
   }
 
-  HEAP->CollectGarbage(i::OLD_POINTER_SPACE);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
 
   // All objects should be gone. 7 global handles in total.
-  CHECK_EQ(7, NumberOfWeakCalls);
+  CHECK_EQ(7, counter.NumberOfWeakCalls());
 }
 
 
@@ -4305,6 +4517,47 @@
 }
 
 
+static const char* kEmbeddedExtensionSource =
+    "function Ret54321(){return 54321;}~~@@$"
+    "$%% THIS IS A SERIES OF NON-NULL-TERMINATED STRINGS.";
+static const int kEmbeddedExtensionSourceValidLen = 34;
+
+
+THREADED_TEST(ExtensionMissingSourceLength) {
+  v8::HandleScope handle_scope;
+  v8::RegisterExtension(new Extension("srclentest_fail",
+                                      kEmbeddedExtensionSource));
+  const char* extension_names[] = { "srclentest_fail" };
+  v8::ExtensionConfiguration extensions(1, extension_names);
+  v8::Handle<Context> context = Context::New(&extensions);
+  CHECK_EQ(0, *context);
+}
+
+
+THREADED_TEST(ExtensionWithSourceLength) {
+  for (int source_len = kEmbeddedExtensionSourceValidLen - 1;
+       source_len <= kEmbeddedExtensionSourceValidLen + 1; ++source_len) {
+    v8::HandleScope handle_scope;
+    i::ScopedVector<char> extension_name(32);
+    i::OS::SNPrintF(extension_name, "ext #%d", source_len);
+    v8::RegisterExtension(new Extension(extension_name.start(),
+                                        kEmbeddedExtensionSource, 0, 0,
+                                        source_len));
+    const char* extension_names[1] = { extension_name.start() };
+    v8::ExtensionConfiguration extensions(1, extension_names);
+    v8::Handle<Context> context = Context::New(&extensions);
+    if (source_len == kEmbeddedExtensionSourceValidLen) {
+      Context::Scope lock(context);
+      v8::Handle<Value> result = Script::Compile(v8_str("Ret54321()"))->Run();
+      CHECK_EQ(v8::Integer::New(54321), result);
+    } else {
+      // Anything but exactly the right length should fail to compile.
+      CHECK_EQ(0, *context);
+    }
+  }
+}
+
+
 static const char* kEvalExtensionSource1 =
   "function UseEval1() {"
   "  var x = 42;"
@@ -4805,7 +5058,7 @@
 
 
 static void InvokeMarkSweep() {
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -4898,7 +5151,7 @@
   CHECK_EQ(v8::Integer::New(3), args[2]);
   CHECK_EQ(v8::Undefined(), args[3]);
   v8::HandleScope scope;
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   return v8::Undefined();
 }
 
@@ -7883,7 +8136,7 @@
     Local<String> name,
     const AccessorInfo& info) {
   ApiTestFuzzer::Fuzz();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   return v8::Handle<Value>();
 }
 
@@ -8613,7 +8866,7 @@
   int* call_count = reinterpret_cast<int*>(v8::External::Unwrap(info.Data()));
   ++(*call_count);
   if ((*call_count) % 20 == 0) {
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   }
   return v8::Handle<Value>();
 }
@@ -9964,6 +10217,7 @@
 
 
 static int GetGlobalObjectsCount() {
+  i::Isolate::Current()->heap()->EnsureHeapIsIterable();
   int count = 0;
   i::HeapIterator it;
   for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
@@ -9978,9 +10232,8 @@
   // the first garbage collection but some of the maps have already
   // been marked at that point.  Therefore some of the maps are not
   // collected until the second garbage collection.
-  HEAP->global_context_map();
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
   int count = GetGlobalObjectsCount();
 #ifdef DEBUG
   if (count != expected) HEAP->TracePathToGlobal();
@@ -10049,7 +10302,7 @@
   // weak callback of the first handle would be able to 'reallocate' it.
   handle1.MakeWeak(NULL, NewPersistentHandleCallback);
   handle2.Dispose();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -10057,7 +10310,7 @@
 
 void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) {
   to_be_disposed.Dispose();
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   handle.Dispose();
 }
 
@@ -10073,7 +10326,7 @@
   }
   handle1.MakeWeak(NULL, DisposeAndForceGcCallback);
   to_be_disposed = handle2;
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 void DisposingCallback(v8::Persistent<v8::Value> handle, void*) {
@@ -10099,7 +10352,7 @@
   }
   handle2.MakeWeak(NULL, DisposingCallback);
   handle3.MakeWeak(NULL, HandleCreatingCallback);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -10915,7 +11168,7 @@
       {
         v8::Locker lock;
         // TODO(lrn): Perhaps create some garbage before collecting.
-        HEAP->CollectAllGarbage(false);
+        HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
         gc_count_++;
       }
       i::OS::Sleep(1);
@@ -11037,7 +11290,7 @@
     while (gc_during_apply_ < kRequiredGCs) {
       {
         v8::Locker lock;
-        HEAP->CollectAllGarbage(false);
+        HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
         gc_count_++;
       }
       i::OS::Sleep(1);
@@ -11753,13 +12006,15 @@
   i::Handle<i::ExternalPixelArray> pixels =
       i::Handle<i::ExternalPixelArray>::cast(
           FACTORY->NewExternalArray(kElementCount,
-                                       v8::kExternalPixelArray,
-                                       pixel_data));
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+                                    v8::kExternalPixelArray,
+                                    pixel_data));
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     pixels->set(i, i % 256);
   }
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     CHECK_EQ(i % 256, pixels->get_scalar(i));
     CHECK_EQ(i % 256, pixel_data[i]);
@@ -12235,11 +12490,13 @@
   i::Handle<ExternalArrayClass> array =
       i::Handle<ExternalArrayClass>::cast(
           FACTORY->NewExternalArray(kElementCount, array_type, array_data));
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     array->set(i, static_cast<ElementType>(i));
   }
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   for (int i = 0; i < kElementCount; i++) {
     CHECK_EQ(static_cast<int64_t>(i),
              static_cast<int64_t>(array->get_scalar(i)));
@@ -12357,7 +12614,8 @@
                       "  }"
                       "}"
                       "sum;");
-  HEAP->CollectAllGarbage(false);  // Force GC to trigger verification.
+  // Force GC to trigger verification.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(28, result->Int32Value());
 
   // Make sure out-of-range loads do not throw.
@@ -13337,7 +13595,7 @@
     other_context->Enter();
     CompileRun(source_simple);
     other_context->Exit();
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
@@ -13359,7 +13617,7 @@
     other_context->Enter();
     CompileRun(source_eval);
     other_context->Exit();
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
@@ -13386,7 +13644,7 @@
     other_context->Enter();
     CompileRun(source_exception);
     other_context->Exit();
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     if (GetGlobalObjectsCount() == 1) break;
   }
   CHECK_GE(2, gc_count);
@@ -13604,26 +13862,26 @@
   v8::V8::AddGCEpilogueCallback(EpilogueCallback);
   CHECK_EQ(0, prologue_call_count);
   CHECK_EQ(0, epilogue_call_count);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(1, prologue_call_count);
   CHECK_EQ(1, epilogue_call_count);
   v8::V8::AddGCPrologueCallback(PrologueCallbackSecond);
   v8::V8::AddGCEpilogueCallback(EpilogueCallbackSecond);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(2, prologue_call_count);
   CHECK_EQ(2, epilogue_call_count);
   CHECK_EQ(1, prologue_call_count_second);
   CHECK_EQ(1, epilogue_call_count_second);
   v8::V8::RemoveGCPrologueCallback(PrologueCallback);
   v8::V8::RemoveGCEpilogueCallback(EpilogueCallback);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(2, prologue_call_count);
   CHECK_EQ(2, epilogue_call_count);
   CHECK_EQ(2, prologue_call_count_second);
   CHECK_EQ(2, epilogue_call_count_second);
   v8::V8::RemoveGCPrologueCallback(PrologueCallbackSecond);
   v8::V8::RemoveGCEpilogueCallback(EpilogueCallbackSecond);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(2, prologue_call_count);
   CHECK_EQ(2, epilogue_call_count);
   CHECK_EQ(2, prologue_call_count_second);
@@ -13840,7 +14098,7 @@
 void FailedAccessCheckCallbackGC(Local<v8::Object> target,
                                  v8::AccessType type,
                                  Local<v8::Value> data) {
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
 
 
@@ -14414,7 +14672,7 @@
                  "})()",
                  "ReferenceError: cell is not defined");
     CompileRun("cell = \"new_second\";");
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
     ExpectString("readCell()", "new_second");
     ExpectString("readCell()", "new_second");
   }
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index 839b7f5..cdab8f7 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -93,15 +93,15 @@
   Label L, C;
 
   __ mov(edx, Operand(esp, 4));
-  __ xor_(eax, Operand(eax));  // clear eax
+  __ xor_(eax, eax);  // clear eax
   __ jmp(&C);
 
   __ bind(&L);
-  __ add(eax, Operand(edx));
-  __ sub(Operand(edx), Immediate(1));
+  __ add(eax, edx);
+  __ sub(edx, Immediate(1));
 
   __ bind(&C);
-  __ test(edx, Operand(edx));
+  __ test(edx, edx);
   __ j(not_zero, &L);
   __ ret(0);
 
@@ -135,11 +135,11 @@
   __ jmp(&C);
 
   __ bind(&L);
-  __ imul(eax, Operand(edx));
-  __ sub(Operand(edx), Immediate(1));
+  __ imul(eax, edx);
+  __ sub(edx, Immediate(1));
 
   __ bind(&C);
-  __ test(edx, Operand(edx));
+  __ test(edx, edx);
   __ j(not_zero, &L);
   __ ret(0);
 
@@ -275,10 +275,10 @@
   __ subsd(xmm0, xmm1);
   __ divsd(xmm0, xmm1);
   // Copy xmm0 to st(0) using eight bytes of stack.
-  __ sub(Operand(esp), Immediate(8));
+  __ sub(esp, Immediate(8));
   __ movdbl(Operand(esp, 0), xmm0);
   __ fld_d(Operand(esp, 0));
-  __ add(Operand(esp), Immediate(8));
+  __ add(esp, Immediate(8));
   __ ret(0);
 
   CodeDesc desc;
@@ -314,12 +314,12 @@
   v8::internal::byte buffer[256];
   Assembler assm(Isolate::Current(), buffer, sizeof buffer);
   __ mov(eax, Operand(esp, 4));
-  __ cvtsi2sd(xmm0, Operand(eax));
+  __ cvtsi2sd(xmm0, eax);
   // Copy xmm0 to st(0) using eight bytes of stack.
-  __ sub(Operand(esp), Immediate(8));
+  __ sub(esp, Immediate(8));
   __ movdbl(Operand(esp, 0), xmm0);
   __ fld_d(Operand(esp, 0));
-  __ add(Operand(esp), Immediate(8));
+  __ add(esp, Immediate(8));
   __ ret(0);
   CodeDesc desc;
   assm.GetCode(&desc);
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 45da6dc..de60d49 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -425,8 +425,8 @@
   CHECK_EQ(NULL, Isolate::Current()->debug()->debug_info_list_);
 
   // Collect garbage to ensure weak handles are cleared.
-  HEAP->CollectAllGarbage(false);
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
 
   // Iterate the head and check that there are no debugger related objects left.
   HeapIterator iterator;
@@ -944,7 +944,7 @@
       HEAP->CollectGarbage(v8::internal::NEW_SPACE);
     } else {
       // Mark sweep compact.
-      HEAP->CollectAllGarbage(true);
+      HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     }
   }
 }
@@ -1417,8 +1417,7 @@
 // Call the function three times with different garbage collections in between
 // and make sure that the break point survives.
 static void CallAndGC(v8::Local<v8::Object> recv,
-                      v8::Local<v8::Function> f,
-                      bool force_compaction) {
+                      v8::Local<v8::Function> f) {
   break_point_hit_count = 0;
 
   for (int i = 0; i < 3; i++) {
@@ -1432,14 +1431,15 @@
     CHECK_EQ(2 + i * 3, break_point_hit_count);
 
     // Mark sweep (and perhaps compact) and call function.
-    HEAP->CollectAllGarbage(force_compaction);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     f->Call(recv, 0, NULL);
     CHECK_EQ(3 + i * 3, break_point_hit_count);
   }
 }
 
 
-static void TestBreakPointSurviveGC(bool force_compaction) {
+// Test that a break point can be set at a return store location.
+TEST(BreakPointSurviveGC) {
   break_point_hit_count = 0;
   v8::HandleScope scope;
   DebugLocalContext env;
@@ -1455,7 +1455,7 @@
     foo = CompileFunction(&env, "function foo(){bar=0;}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test IC load break point with garbage collection.
   {
@@ -1464,7 +1464,7 @@
     foo = CompileFunction(&env, "bar=1;function foo(){var x=bar;}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test IC call break point with garbage collection.
   {
@@ -1475,7 +1475,7 @@
                           "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test return break point with garbage collection.
   {
@@ -1484,7 +1484,7 @@
     foo = CompileFunction(&env, "function foo(){}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
   // Test non IC break point with garbage collection.
   {
@@ -1493,7 +1493,7 @@
     foo = CompileFunction(&env, "function foo(){var bar=0;}", "foo");
     SetBreakPoint(foo, 0);
   }
-  CallAndGC(env->Global(), foo, force_compaction);
+  CallAndGC(env->Global(), foo);
 
 
   v8::Debug::SetDebugEventListener(NULL);
@@ -1501,13 +1501,6 @@
 }
 
 
-// Test that a break point can be set at a return store location.
-TEST(BreakPointSurviveGC) {
-  TestBreakPointSurviveGC(false);
-  TestBreakPointSurviveGC(true);
-}
-
-
 // Test that break points can be set using the global Debug object.
 TEST(BreakPointThroughJavaScript) {
   break_point_hit_count = 0;
@@ -2259,7 +2252,7 @@
   }
   f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
 
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   SetScriptBreakPointByNameFromJS("test.html", 3, -1);
 
@@ -6472,7 +6465,7 @@
 
   // Do garbage collection to ensure that only the script in this test will be
   // collected afterwards.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   script_collected_count = 0;
   v8::Debug::SetDebugEventListener(DebugEventScriptCollectedEvent,
@@ -6484,7 +6477,7 @@
 
   // Do garbage collection to collect the script above which is no longer
   // referenced.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   CHECK_EQ(2, script_collected_count);
 
@@ -6520,7 +6513,7 @@
 
     // Do garbage collection to ensure that only the script in this test will be
     // collected afterwards.
-    HEAP->CollectAllGarbage(false);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
     v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
     {
@@ -6531,7 +6524,7 @@
 
   // Do garbage collection to collect the script above which is no longer
   // referenced.
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   CHECK_EQ(2, script_collected_message_count);
 
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index 6198391..aa733c7 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -232,7 +232,7 @@
     context.Check("const x; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initialization
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());
   }
 
@@ -240,7 +240,7 @@
     context.Check("const x = 0; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initialization
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());  // SB 0 - BUG 1213579
   }
 }
@@ -285,18 +285,18 @@
 
   { PresentPropertyContext context;
     context.Check("const x; x",
-                  0,
-                  0,
+                  1,  // access
+                  1,  // initialization
                   1,  // (re-)declaration
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  EXPECT_RESULT, Undefined());
   }
 
   { PresentPropertyContext context;
     context.Check("const x = 0; x",
-                  0,
-                  0,
+                  1,  // access
+                  1,  // initialization
                   1,  // (re-)declaration
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  EXPECT_RESULT, Number::New(0));
   }
 }
 
@@ -341,7 +341,7 @@
     context.Check("const x; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initializetion
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());
   }
 
@@ -349,7 +349,7 @@
     context.Check("const x = 0; x",
                   1,  // access
                   2,  // declaration + initialization
-                  2,  // declaration + initialization
+                  1,  // declaration
                   EXPECT_RESULT, Undefined());  // SB 0 - BUG 1213579
   }
 
@@ -429,18 +429,20 @@
 
   { AppearingPropertyContext context;
     context.Check("const x; x",
-                  0,
-                  1,  // declaration
+                  1,  // access
                   2,  // declaration + initialization
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  1,  // declaration
+                  EXPECT_RESULT, Undefined());
   }
 
   { AppearingPropertyContext context;
     context.Check("const x = 0; x",
-                  0,
-                  1,  // declaration
+                  1,  // access
                   2,  // declaration + initialization
-                  EXPECT_EXCEPTION);  //  x has already been declared!
+                  1,  // declaration
+                  EXPECT_RESULT, Undefined());
+                  // Result is undefined because declaration succeeded but
+                  // initialization to 0 failed (due to context behavior).
   }
 }
 
@@ -496,9 +498,9 @@
   { ReappearingPropertyContext context;
     context.Check("const x; var x = 0",
                   0,
-                  2,  // var declaration + const initialization
-                  4,  // 2 x declaration + 2 x initialization
-                  EXPECT_EXCEPTION);  // x has already been declared!
+                  3,  // const declaration+initialization, var initialization
+                  3,  // 2 x declaration + var initialization
+                  EXPECT_RESULT, Undefined());
   }
 }
 
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 9f7d0bb..1e38e4e 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -63,9 +63,9 @@
 
   // Short immediate instructions
   __ adc(eax, 12345678);
-  __ add(Operand(eax), Immediate(12345678));
+  __ add(eax, Immediate(12345678));
   __ or_(eax, 12345678);
-  __ sub(Operand(eax), Immediate(12345678));
+  __ sub(eax, Immediate(12345678));
   __ xor_(eax, 12345678);
   __ and_(eax, 12345678);
   Handle<FixedArray> foo = FACTORY->NewFixedArray(10, TENURED);
@@ -75,7 +75,7 @@
   __ mov(ebx,  Operand(esp, ecx, times_2, 0));  // [esp+ecx*4]
 
   // ---- All instructions that I can think of
-  __ add(edx, Operand(ebx));
+  __ add(edx, ebx);
   __ add(edx, Operand(12, RelocInfo::NONE));
   __ add(edx, Operand(ebx, 0));
   __ add(edx, Operand(ebx, 16));
@@ -89,7 +89,7 @@
   __ add(Operand(ebp, ecx, times_4, 12), Immediate(12));
 
   __ nop();
-  __ add(Operand(ebx), Immediate(12));
+  __ add(ebx, Immediate(12));
   __ nop();
   __ adc(ecx, 12);
   __ adc(ecx, 1000);
@@ -116,16 +116,16 @@
     CpuFeatures::Scope fscope(RDTSC);
     __ rdtsc();
   }
-  __ movsx_b(edx, Operand(ecx));
-  __ movsx_w(edx, Operand(ecx));
-  __ movzx_b(edx, Operand(ecx));
-  __ movzx_w(edx, Operand(ecx));
+  __ movsx_b(edx, ecx);
+  __ movsx_w(edx, ecx);
+  __ movzx_b(edx, ecx);
+  __ movzx_w(edx, ecx);
 
   __ nop();
-  __ imul(edx, Operand(ecx));
-  __ shld(edx, Operand(ecx));
-  __ shrd(edx, Operand(ecx));
-  __ bts(Operand(edx), ecx);
+  __ imul(edx, ecx);
+  __ shld(edx, ecx);
+  __ shrd(edx, ecx);
+  __ bts(edx, ecx);
   __ bts(Operand(ebx, ecx, times_4, 0), ecx);
   __ nop();
   __ pushad();
@@ -146,9 +146,9 @@
   __ nop();
 
   __ add(edx, Operand(esp, 16));
-  __ add(edx, Operand(ecx));
-  __ mov_b(edx, Operand(ecx));
-  __ mov_b(Operand(ecx), 6);
+  __ add(edx, ecx);
+  __ mov_b(edx, ecx);
+  __ mov_b(ecx, 6);
   __ mov_b(Operand(ebx, ecx, times_4, 10000), 6);
   __ mov_b(Operand(esp, 16), edx);
   __ mov_w(edx, Operand(esp, 16));
@@ -216,22 +216,20 @@
 
   __ adc(edx, 12345);
 
-  __ add(Operand(ebx), Immediate(12));
+  __ add(ebx, Immediate(12));
   __ add(Operand(edx, ecx, times_4, 10000), Immediate(12));
 
   __ and_(ebx, 12345);
 
   __ cmp(ebx, 12345);
-  __ cmp(Operand(ebx), Immediate(12));
+  __ cmp(ebx, Immediate(12));
   __ cmp(Operand(edx, ecx, times_4, 10000), Immediate(12));
+  __ cmpb(eax, 100);
 
   __ or_(ebx, 12345);
 
-  __ sub(Operand(ebx), Immediate(12));
+  __ sub(ebx, Immediate(12));
   __ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
-  __ subb(Operand(edx, ecx, times_4, 10000), 100);
-  __ subb(Operand(eax), 100);
-  __ subb(eax, Operand(edx, ecx, times_4, 10000));
 
   __ xor_(ebx, 12345);
 
@@ -244,7 +242,7 @@
   __ stos();
 
   __ sub(edx, Operand(ebx, ecx, times_4, 10000));
-  __ sub(edx, Operand(ebx));
+  __ sub(edx, ebx);
 
   __ test(edx, Immediate(12345));
   __ test(edx, Operand(ebx, ecx, times_8, 10000));
@@ -446,8 +444,8 @@
   {
     if (CpuFeatures::IsSupported(SSE4_1)) {
       CpuFeatures::Scope scope(SSE4_1);
-      __ pextrd(Operand(eax), xmm0, 1);
-      __ pinsrd(xmm1, Operand(eax), 0);
+      __ pextrd(eax, xmm0, 1);
+      __ pinsrd(xmm1, eax, 0);
     }
   }
 
diff --git a/test/cctest/test-func-name-inference.cc b/test/cctest/test-func-name-inference.cc
index bfae4d1..8f405b7 100644
--- a/test/cctest/test-func-name-inference.cc
+++ b/test/cctest/test-func-name-inference.cc
@@ -361,3 +361,42 @@
   // Can't infer the function name statically.
   CheckFunctionName(script, "return 1", "obj.(anonymous function)");
 }
+
+
+TEST(GlobalAssignmentAndCall) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  v8::Handle<v8::Script> script = Compile(
+      "var Foo = function() {\n"
+      "  return 1;\n"
+      "}();\n"
+      "var Baz = Bar = function() {\n"
+      "  return 2;\n"
+      "}");
+  // The inferred name is empty, because this is an assignment of a result.
+  CheckFunctionName(script, "return 1", "");
+  // See MultipleAssignments test.
+  CheckFunctionName(script, "return 2", "Bar");
+}
+
+
+TEST(AssignmentAndCall) {
+  InitializeVM();
+  v8::HandleScope scope;
+
+  v8::Handle<v8::Script> script = Compile(
+      "(function Enclosing() {\n"
+      "  var Foo;\n"
+      "  Foo = function() {\n"
+      "    return 1;\n"
+      "  }();\n"
+      "  var Baz = Bar = function() {\n"
+      "    return 2;\n"
+      "  }\n"
+      "})();");
+  // The inferred name is empty, because this is an assignment of a result.
+  CheckFunctionName(script, "return 1", "");
+  // See MultipleAssignments test.
+  CheckFunctionName(script, "return 2", "Enclosing.Bar");
+}
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 1769c1b..d695d74 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -85,7 +85,7 @@
       "var b2_1 = new B2(a2), b2_2 = new B2(a2);\n"
       "var c2 = new C2(a2);");
   const v8::HeapSnapshot* snapshot_env2 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("env2"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("env2"));
   i::HeapSnapshot* i_snapshot_env2 =
       const_cast<i::HeapSnapshot*>(
           reinterpret_cast<const i::HeapSnapshot*>(snapshot_env2));
@@ -124,7 +124,7 @@
       "x = new X(new X(), new X());\n"
       "(function() { x.a.a = x.b; })();");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("sizes"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("sizes"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   const v8::HeapGraphNode* x =
       GetProperty(global, v8::HeapGraphEdge::kShortcut, "x");
@@ -155,7 +155,7 @@
       "function A() { }\n"
       "a = new A;");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("children"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("children"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   for (int i = 0, count = global->GetChildrenCount(); i < count; ++i) {
     const v8::HeapGraphEdge* prop = global->GetChild(i);
@@ -181,7 +181,7 @@
       "var anonymous = (function() { return function() { return 0; } })();\n"
       "compiled(1)");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("code"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("code"));
 
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   const v8::HeapGraphNode* compiled =
@@ -243,7 +243,7 @@
       "a = 1;    // a is Smi\n"
       "b = 2.5;  // b is HeapNumber");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("numbers"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("numbers"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_EQ(NULL, GetProperty(global, v8::HeapGraphEdge::kShortcut, "a"));
   const v8::HeapGraphNode* b =
@@ -265,7 +265,7 @@
   global->SetInternalField(0, v8_num(17));
   global->SetInternalField(1, obj);
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("internals"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("internals"));
   const v8::HeapGraphNode* global_node = GetGlobalObject(snapshot);
   // The first reference will not present, because it's a Smi.
   CHECK_EQ(NULL, GetProperty(global_node, v8::HeapGraphEdge::kInternal, "0"));
@@ -292,12 +292,12 @@
       "var a = new A();\n"
       "var b = new B(a);");
   const v8::HeapSnapshot* snapshot1 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("s1"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("s1"));
 
-  HEAP->CollectAllGarbage(true);  // Enforce compaction.
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   const v8::HeapSnapshot* snapshot2 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("s2"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("s2"));
 
   const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
   const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
@@ -342,7 +342,7 @@
   v8::HandleScope scope;
   LocalContext env;
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("s"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("s"));
   const v8::HeapGraphNode* root1 = snapshot->GetRoot();
   const_cast<i::HeapSnapshot*>(reinterpret_cast<const i::HeapSnapshot*>(
       snapshot))->GetSortedEntriesList();
@@ -380,7 +380,7 @@
       "})();");
 
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("dominators"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("dominators"));
 
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_NE(NULL, global);
@@ -463,7 +463,7 @@
       "var a = new A(" STRING_LITERAL_FOR_TEST ");\n"
       "var b = new B(a);");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("json"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("json"));
   TestJSONStream stream;
   snapshot->Serialize(&stream, v8::HeapSnapshot::kJSON);
   CHECK_GT(stream.size(), 0);
@@ -474,17 +474,17 @@
   // Verify that snapshot string is valid JSON.
   AsciiResource json_res(json);
   v8::Local<v8::String> json_string = v8::String::NewExternal(&json_res);
-  env->Global()->Set(v8::String::New("json_snapshot"), json_string);
+  env->Global()->Set(v8_str("json_snapshot"), json_string);
   v8::Local<v8::Value> snapshot_parse_result = CompileRun(
       "var parsed = JSON.parse(json_snapshot); true;");
   CHECK(!snapshot_parse_result.IsEmpty());
 
   // Verify that snapshot object has required fields.
   v8::Local<v8::Object> parsed_snapshot =
-      env->Global()->Get(v8::String::New("parsed"))->ToObject();
-  CHECK(parsed_snapshot->Has(v8::String::New("snapshot")));
-  CHECK(parsed_snapshot->Has(v8::String::New("nodes")));
-  CHECK(parsed_snapshot->Has(v8::String::New("strings")));
+      env->Global()->Get(v8_str("parsed"))->ToObject();
+  CHECK(parsed_snapshot->Has(v8_str("snapshot")));
+  CHECK(parsed_snapshot->Has(v8_str("nodes")));
+  CHECK(parsed_snapshot->Has(v8_str("strings")));
 
   // Get node and edge "member" offsets.
   v8::Local<v8::Value> meta_analysis_result = CompileRun(
@@ -536,12 +536,12 @@
   int string_obj_pos =
       static_cast<int>(string_obj_pos_val->ToNumber()->Value());
   v8::Local<v8::Object> nodes_array =
-      parsed_snapshot->Get(v8::String::New("nodes"))->ToObject();
+      parsed_snapshot->Get(v8_str("nodes"))->ToObject();
   int string_index = static_cast<int>(
       nodes_array->Get(string_obj_pos + 1)->ToNumber()->Value());
   CHECK_GT(string_index, 0);
   v8::Local<v8::Object> strings_array =
-      parsed_snapshot->Get(v8::String::New("strings"))->ToObject();
+      parsed_snapshot->Get(v8_str("strings"))->ToObject();
   v8::Local<v8::String> string = strings_array->Get(string_index)->ToString();
   v8::Local<v8::String> ref_string =
       CompileRun(STRING_LITERAL_FOR_TEST)->ToString();
@@ -555,7 +555,7 @@
   v8::HandleScope scope;
   LocalContext env;
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("abort"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("abort"));
   TestJSONStream stream(5);
   snapshot->Serialize(&stream, v8::HeapSnapshot::kJSON);
   CHECK_GT(stream.size(), 0);
@@ -568,7 +568,7 @@
   LocalContext env;
 
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("id"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("id"));
   const v8::HeapGraphNode* root = snapshot->GetRoot();
   CHECK_EQ(root, snapshot->GetNodeById(root->GetId()));
   for (int i = 0, count = root->GetChildrenCount(); i < count; ++i) {
@@ -609,7 +609,7 @@
   const int snapshots_count = v8::HeapProfiler::GetSnapshotsCount();
   TestActivityControl aborting_control(3);
   const v8::HeapSnapshot* no_snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("abort"),
+      v8::HeapProfiler::TakeSnapshot(v8_str("abort"),
                                      v8::HeapSnapshot::kFull,
                                      &aborting_control);
   CHECK_EQ(NULL, no_snapshot);
@@ -618,7 +618,7 @@
 
   TestActivityControl control(-1);  // Don't abort.
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("full"),
+      v8::HeapProfiler::TakeSnapshot(v8_str("full"),
                                      v8::HeapSnapshot::kFull,
                                      &control);
   CHECK_NE(NULL, snapshot);
@@ -728,7 +728,7 @@
   p_CCC.SetWrapperClassId(2);
   CHECK_EQ(0, TestRetainedObjectInfo::instances.length());
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("retained"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("retained"));
 
   CHECK_EQ(3, TestRetainedObjectInfo::instances.length());
   for (int i = 0; i < TestRetainedObjectInfo::instances.length(); ++i) {
@@ -772,12 +772,12 @@
   CHECK_EQ(0, v8::HeapProfiler::GetSnapshotsCount());
   v8::HeapProfiler::DeleteAllSnapshots();
   CHECK_EQ(0, v8::HeapProfiler::GetSnapshotsCount());
-  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8::String::New("1")));
+  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8_str("1")));
   CHECK_EQ(1, v8::HeapProfiler::GetSnapshotsCount());
   v8::HeapProfiler::DeleteAllSnapshots();
   CHECK_EQ(0, v8::HeapProfiler::GetSnapshotsCount());
-  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8::String::New("1")));
-  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8::String::New("2")));
+  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8_str("1")));
+  CHECK_NE(NULL, v8::HeapProfiler::TakeSnapshot(v8_str("2")));
   CHECK_EQ(2, v8::HeapProfiler::GetSnapshotsCount());
   v8::HeapProfiler::DeleteAllSnapshots();
   CHECK_EQ(0, v8::HeapProfiler::GetSnapshotsCount());
@@ -790,7 +790,7 @@
 
   CHECK_EQ(0, v8::HeapProfiler::GetSnapshotsCount());
   const v8::HeapSnapshot* s1 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("1"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("1"));
   CHECK_NE(NULL, s1);
   CHECK_EQ(1, v8::HeapProfiler::GetSnapshotsCount());
   unsigned uid1 = s1->GetUid();
@@ -800,14 +800,14 @@
   CHECK_EQ(NULL, v8::HeapProfiler::FindSnapshot(uid1));
 
   const v8::HeapSnapshot* s2 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("2"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("2"));
   CHECK_NE(NULL, s2);
   CHECK_EQ(1, v8::HeapProfiler::GetSnapshotsCount());
   unsigned uid2 = s2->GetUid();
   CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid2));
   CHECK_EQ(s2, v8::HeapProfiler::FindSnapshot(uid2));
   const v8::HeapSnapshot* s3 =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("3"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("3"));
   CHECK_NE(NULL, s3);
   CHECK_EQ(2, v8::HeapProfiler::GetSnapshotsCount());
   unsigned uid3 = s3->GetUid();
@@ -830,7 +830,7 @@
   CompileRun("document = { URL:\"abcdefgh\" };");
 
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("document"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("document"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_NE(NULL, global);
   CHECK_EQ("Object / abcdefgh",
@@ -846,7 +846,7 @@
   CompileRun(
       "this.__defineGetter__(\"document\", function() { throw new Error(); })");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("document"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("document"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_NE(NULL, global);
   CHECK_EQ("Object",
@@ -864,7 +864,7 @@
       "URLWithException.prototype = { get URL() { throw new Error(); } };\n"
       "document = { URL: new URLWithException() };");
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("document"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("document"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_NE(NULL, global);
   CHECK_EQ("Object",
@@ -877,7 +877,7 @@
   v8::HandleScope scope;
   LocalContext env;
   const v8::HeapSnapshot* snapshot =
-      v8::HeapProfiler::TakeSnapshot(v8::String::New("iteration"));
+      v8::HeapProfiler::TakeSnapshot(v8_str("iteration"));
   const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
   CHECK_NE(NULL, global);
   // Verify that we can find this object by iteration.
@@ -891,6 +891,62 @@
 }
 
 
+TEST(GetHeapValue) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  CompileRun("a = { s_prop: \'value\', n_prop: 0.1 };");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("value"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  CHECK(global->GetHeapValue()->IsObject());
+  v8::Local<v8::Object> js_global =
+      env->Global()->GetPrototype().As<v8::Object>();
+  CHECK(js_global == global->GetHeapValue());
+  const v8::HeapGraphNode* obj = GetProperty(
+      global, v8::HeapGraphEdge::kShortcut, "a");
+  CHECK(obj->GetHeapValue()->IsObject());
+  v8::Local<v8::Object> js_obj = js_global->Get(v8_str("a")).As<v8::Object>();
+  CHECK(js_obj == obj->GetHeapValue());
+  const v8::HeapGraphNode* s_prop =
+      GetProperty(obj, v8::HeapGraphEdge::kProperty, "s_prop");
+  v8::Local<v8::String> js_s_prop =
+      js_obj->Get(v8_str("s_prop")).As<v8::String>();
+  CHECK(js_s_prop == s_prop->GetHeapValue());
+  const v8::HeapGraphNode* n_prop =
+      GetProperty(obj, v8::HeapGraphEdge::kProperty, "n_prop");
+  v8::Local<v8::Number> js_n_prop =
+      js_obj->Get(v8_str("n_prop")).As<v8::Number>();
+  CHECK(js_n_prop == n_prop->GetHeapValue());
+}
+
+
+TEST(GetHeapValueForDeletedObject) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  // It is impossible to delete a global property, so we are about to delete a
+  // property of the "a" object. Also, the "p" object can't be an empty one
+  // because the empty object is static and isn't actually deleted.
+  CompileRun("a = { p: { r: {} } };");
+  const v8::HeapSnapshot* snapshot =
+      v8::HeapProfiler::TakeSnapshot(v8_str("snapshot"));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  const v8::HeapGraphNode* obj = GetProperty(
+      global, v8::HeapGraphEdge::kShortcut, "a");
+  const v8::HeapGraphNode* prop = GetProperty(
+      obj, v8::HeapGraphEdge::kProperty, "p");
+  {
+    // Perform the check inside a nested local scope to avoid creating a
+    // reference to the object we are deleting.
+    v8::HandleScope scope;
+    CHECK(prop->GetHeapValue()->IsObject());
+  }
+  CompileRun("delete a.p;");
+  CHECK(prop->GetHeapValue()->IsUndefined());
+}
+
+
 static int StringCmp(const char* ref, i::String* act) {
   i::SmartArrayPointer<char> s_act = act->ToCString();
   int result = strcmp(ref, *s_act);
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 11b8813..8ed5bf7 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 #include <stdlib.h>
 
@@ -672,7 +672,8 @@
   // Set array length to 0.
   ok = array->SetElementsLength(Smi::FromInt(0))->ToObjectChecked();
   CHECK_EQ(Smi::FromInt(0), array->length());
-  CHECK(array->HasFastElements());  // Must be in fast mode.
+  // Must be in fast mode.
+  CHECK(array->HasFastTypeElements());
 
   // array[length] = name.
   ok = array->SetElement(0, *name, kNonStrictMode, true)->ToObjectChecked();
@@ -838,49 +839,6 @@
 }
 
 
-TEST(LargeObjectSpaceContains) {
-  InitializeVM();
-
-  HEAP->CollectGarbage(NEW_SPACE);
-
-  Address current_top = HEAP->new_space()->top();
-  Page* page = Page::FromAddress(current_top);
-  Address current_page = page->address();
-  Address next_page = current_page + Page::kPageSize;
-  int bytes_to_page = static_cast<int>(next_page - current_top);
-  if (bytes_to_page <= FixedArray::kHeaderSize) {
-    // Alas, need to cross another page to be able to
-    // put desired value.
-    next_page += Page::kPageSize;
-    bytes_to_page = static_cast<int>(next_page - current_top);
-  }
-  CHECK(bytes_to_page > FixedArray::kHeaderSize);
-
-  intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags_;
-  Address flags_addr = reinterpret_cast<Address>(flags_ptr);
-
-  int bytes_to_allocate =
-      static_cast<int>(flags_addr - current_top) + kPointerSize;
-
-  int n_elements = (bytes_to_allocate - FixedArray::kHeaderSize) /
-      kPointerSize;
-  CHECK_EQ(bytes_to_allocate, FixedArray::SizeFor(n_elements));
-  FixedArray* array = FixedArray::cast(
-      HEAP->AllocateFixedArray(n_elements)->ToObjectChecked());
-
-  int index = n_elements - 1;
-  CHECK_EQ(flags_ptr,
-           HeapObject::RawField(array, FixedArray::OffsetOfElementAt(index)));
-  array->set(index, Smi::FromInt(0));
-  // This chould have turned next page into LargeObjectPage:
-  // CHECK(Page::FromAddress(next_page)->IsLargeObjectPage());
-
-  HeapObject* addr = HeapObject::FromAddress(next_page + 2 * kPointerSize);
-  CHECK(HEAP->new_space()->Contains(addr));
-  CHECK(!HEAP->lo_space()->Contains(addr));
-}
-
-
 TEST(EmptyHandleEscapeFrom) {
   InitializeVM();
 
@@ -907,8 +865,7 @@
   InitializeVM();
 
   // Increase the chance of 'bump-the-pointer' allocation in old space.
-  bool force_compaction = true;
-  HEAP->CollectAllGarbage(force_compaction);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   v8::HandleScope scope;
 
@@ -975,12 +932,6 @@
     return;
   }
   CHECK(HEAP->old_pointer_space()->Contains(clone->address()));
-
-  // Step 5: verify validity of region dirty marks.
-  Address clone_addr = clone->address();
-  Page* page = Page::FromAddress(clone_addr);
-  // Check that region covering inobject property 1 is marked dirty.
-  CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
 }
 
 
@@ -1010,17 +961,18 @@
   Handle<JSFunction> function(JSFunction::cast(func_value));
   CHECK(function->shared()->is_compiled());
 
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
+  // TODO(1609) Currently incremental marker does not support code flushing.
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
 
   CHECK(function->shared()->is_compiled());
 
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
 
   // foo should no longer be in the compilation cache
   CHECK(!function->shared()->is_compiled() || function->IsOptimized());
@@ -1109,7 +1061,7 @@
     }
 
     // Mark compact handles the weak references.
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
 
     // Get rid of f3 and f5 in the same way.
@@ -1118,21 +1070,21 @@
       HEAP->PerformScavenge();
       CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
     }
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
     CompileRun("f5=null");
     for (int j = 0; j < 10; j++) {
       HEAP->PerformScavenge();
       CHECK_EQ(opt ? 3 : 0, CountOptimizedUserFunctions(ctx[i]));
     }
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(opt ? 2 : 0, CountOptimizedUserFunctions(ctx[i]));
 
     ctx[i]->Exit();
   }
 
   // Force compilation cache cleanup.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   // Dispose the global contexts one by one.
   for (int i = 0; i < kNumTestContexts; i++) {
@@ -1146,7 +1098,7 @@
     }
 
     // Mark compact handles the weak references.
-    HEAP->CollectAllGarbage(true);
+    HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     CHECK_EQ(kNumTestContexts - i - 1, CountGlobalContexts());
   }
 
@@ -1161,7 +1113,7 @@
   Handle<Object> object(HEAP->global_contexts_list());
   while (!object->IsUndefined()) {
     count++;
-    if (count == n) HEAP->CollectAllGarbage(true);
+    if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     object =
         Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK));
   }
@@ -1180,7 +1132,7 @@
   while (object->IsJSFunction() &&
          !Handle<JSFunction>::cast(object)->IsBuiltin()) {
     count++;
-    if (count == n) HEAP->CollectAllGarbage(true);
+    if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
     object = Handle<Object>(
         Object::cast(JSFunction::cast(*object)->next_function_link()));
   }
@@ -1240,90 +1192,84 @@
 
 TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
   InitializeVM();
+  HEAP->EnsureHeapIsIterable();
   intptr_t size_of_objects_1 = HEAP->SizeOfObjects();
-  HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
+  HeapIterator iterator;
   intptr_t size_of_objects_2 = 0;
   for (HeapObject* obj = iterator.next();
        obj != NULL;
        obj = iterator.next()) {
     size_of_objects_2 += obj->Size();
   }
-  // Delta must be within 1% of the larger result.
+  // Delta must be within 5% of the larger result.
+  // TODO(gc): Tighten this up by distinguishing between byte
+  // arrays that are real and those that merely mark free space
+  // on the heap.
   if (size_of_objects_1 > size_of_objects_2) {
     intptr_t delta = size_of_objects_1 - size_of_objects_2;
     PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
            "Iterator: %" V8_PTR_PREFIX "d, "
            "delta: %" V8_PTR_PREFIX "d\n",
            size_of_objects_1, size_of_objects_2, delta);
-    CHECK_GT(size_of_objects_1 / 100, delta);
+    CHECK_GT(size_of_objects_1 / 20, delta);
   } else {
     intptr_t delta = size_of_objects_2 - size_of_objects_1;
     PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
            "Iterator: %" V8_PTR_PREFIX "d, "
            "delta: %" V8_PTR_PREFIX "d\n",
            size_of_objects_1, size_of_objects_2, delta);
-    CHECK_GT(size_of_objects_2 / 100, delta);
+    CHECK_GT(size_of_objects_2 / 20, delta);
   }
 }
 
 
-class HeapIteratorTestHelper {
- public:
-  HeapIteratorTestHelper(Object* a, Object* b)
-      : a_(a), b_(b), a_found_(false), b_found_(false) {}
-  bool a_found() { return a_found_; }
-  bool b_found() { return b_found_; }
-  void IterateHeap(HeapIterator::HeapObjectsFiltering mode) {
-    HeapIterator iterator(mode);
-    for (HeapObject* obj = iterator.next();
-         obj != NULL;
-         obj = iterator.next()) {
-      if (obj == a_)
-        a_found_ = true;
-      else if (obj == b_)
-        b_found_ = true;
+TEST(GrowAndShrinkNewSpace) {
+  InitializeVM();
+  NewSpace* new_space = HEAP->new_space();
+
+  // Explicitly growing should double the space capacity.
+  intptr_t old_capacity, new_capacity;
+  old_capacity = new_space->Capacity();
+  new_space->Grow();
+  new_capacity = new_space->Capacity();
+  CHECK(2 * old_capacity == new_capacity);
+
+  // Fill up new space to the point that it is completely full. Make sure
+  // that the scavenger does not undo the filling.
+  old_capacity = new_space->Capacity();
+  {
+    v8::HandleScope scope;
+    AlwaysAllocateScope always_allocate;
+    intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
+    intptr_t number_of_fillers = (available / FixedArray::SizeFor(1000)) - 10;
+    for (intptr_t i = 0; i < number_of_fillers; i++) {
+      CHECK(HEAP->InNewSpace(*FACTORY->NewFixedArray(1000, NOT_TENURED)));
     }
   }
- private:
-  Object* a_;
-  Object* b_;
-  bool a_found_;
-  bool b_found_;
-};
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
 
-TEST(HeapIteratorFilterUnreachable) {
-  InitializeVM();
-  v8::HandleScope scope;
-  CompileRun("a = {}; b = {};");
-  v8::Handle<Object> a(ISOLATE->context()->global()->GetProperty(
-      *FACTORY->LookupAsciiSymbol("a"))->ToObjectChecked());
-  v8::Handle<Object> b(ISOLATE->context()->global()->GetProperty(
-      *FACTORY->LookupAsciiSymbol("b"))->ToObjectChecked());
-  CHECK_NE(*a, *b);
-  {
-    HeapIteratorTestHelper helper(*a, *b);
-    helper.IterateHeap(HeapIterator::kFilterUnreachable);
-    CHECK(helper.a_found());
-    CHECK(helper.b_found());
-  }
-  CHECK(ISOLATE->context()->global()->DeleteProperty(
-      *FACTORY->LookupAsciiSymbol("a"), JSObject::FORCE_DELETION));
-  // We ensure that GC will not happen, so our raw pointer stays valid.
-  AssertNoAllocation no_alloc;
-  Object* a_saved = *a;
-  a.Clear();
-  // Verify that "a" object still resides in the heap...
-  {
-    HeapIteratorTestHelper helper(a_saved, *b);
-    helper.IterateHeap(HeapIterator::kNoFiltering);
-    CHECK(helper.a_found());
-    CHECK(helper.b_found());
-  }
-  // ...but is now unreachable.
-  {
-    HeapIteratorTestHelper helper(a_saved, *b);
-    helper.IterateHeap(HeapIterator::kFilterUnreachable);
-    CHECK(!helper.a_found());
-    CHECK(helper.b_found());
-  }
+  // Explicitly shrinking should not affect space capacity.
+  old_capacity = new_space->Capacity();
+  new_space->Shrink();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
+
+  // Let the scavenger empty the new space.
+  HEAP->CollectGarbage(NEW_SPACE);
+  CHECK_LE(new_space->Size(), old_capacity);
+
+  // Explicitly shrinking should halve the space capacity.
+  old_capacity = new_space->Capacity();
+  new_space->Shrink();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == 2 * new_capacity);
+
+  // Consecutive shrinking should not affect space capacity.
+  old_capacity = new_space->Capacity();
+  new_space->Shrink();
+  new_space->Shrink();
+  new_space->Shrink();
+  new_capacity = new_space->Capacity();
+  CHECK(old_capacity == new_capacity);
 }
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 72e663c..6f2324d 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -494,7 +494,7 @@
       "    (function a(j) { return function b() { return j; } })(100);\n"
       "})(this);");
   v8::V8::PauseProfiler();
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
   LOGGER->StringEvent("test-logging-done", "");
 
   // Iterate heap to find compiled functions, will write to log.
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index dcb51a0..e99e1e5 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -44,21 +44,21 @@
 }
 
 
-TEST(MarkingStack) {
+TEST(MarkingDeque) {
   int mem_size = 20 * kPointerSize;
   byte* mem = NewArray<byte>(20*kPointerSize);
   Address low = reinterpret_cast<Address>(mem);
   Address high = low + mem_size;
-  MarkingStack s;
+  MarkingDeque s;
   s.Initialize(low, high);
 
   Address address = NULL;
-  while (!s.is_full()) {
-    s.Push(HeapObject::FromAddress(address));
+  while (!s.IsFull()) {
+    s.PushBlack(HeapObject::FromAddress(address));
     address += kPointerSize;
   }
 
-  while (!s.is_empty()) {
+  while (!s.IsEmpty()) {
     Address value = s.Pop()->address();
     address -= kPointerSize;
     CHECK_EQ(address, value);
@@ -78,7 +78,7 @@
   // from new space.
   FLAG_gc_global = true;
   FLAG_always_compact = true;
-  HEAP->ConfigureHeap(2*256*KB, 4*MB, 4*MB);
+  HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
 
   InitializeVM();
 
@@ -104,7 +104,7 @@
 
 
 TEST(NoPromotion) {
-  HEAP->ConfigureHeap(2*256*KB, 4*MB, 4*MB);
+  HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
 
   // Test the situation that some objects in new space are promoted to
   // the old space
@@ -116,9 +116,12 @@
   HEAP->CollectGarbage(OLD_POINTER_SPACE);
 
   // Allocate a big Fixed array in the new space.
-  int size = (HEAP->MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
-      kPointerSize;
-  Object* obj = HEAP->AllocateFixedArray(size)->ToObjectChecked();
+  int max_size =
+      Min(HEAP->MaxObjectSizeInPagedSpace(), HEAP->MaxObjectSizeInNewSpace());
+
+  int length = (max_size - FixedArray::kHeaderSize) / (2*kPointerSize);
+  Object* obj = i::Isolate::Current()->heap()->AllocateFixedArray(length)->
+      ToObjectChecked();
 
   Handle<FixedArray> array(FixedArray::cast(obj));
 
@@ -228,6 +231,8 @@
 }
 
 
+// TODO(1600): compaction of map space is temporary removed from GC.
+#if 0
 static Handle<Map> CreateMap() {
   return FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
 }
@@ -252,11 +257,11 @@
   // be able to trigger map compaction.
   // To give an additional chance to fail, try to force compaction which
   // should be impossible right now.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kForceCompactionMask);
   // And now map pointers should be encodable again.
   CHECK(HEAP->map_space()->MapPointersEncodable());
 }
-
+#endif
 
 static int gc_starts = 0;
 static int gc_ends = 0;
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index 76fd244..def829c 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -52,7 +52,7 @@
     CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
   }
   CHECK(!i::TokenEnumeratorTester::token_removed(&te)->at(2));
-  HEAP->CollectAllGarbage(false);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK(i::TokenEnumeratorTester::token_removed(&te)->at(2));
   CHECK_EQ(1, te.GetTokenId(*v8::Utils::OpenHandle(*token2)));
   CHECK_EQ(0, te.GetTokenId(*v8::Utils::OpenHandle(*token1)));
diff --git a/test/cctest/test-reloc-info.cc b/test/cctest/test-reloc-info.cc
index 5bdc4c3..e638201 100644
--- a/test/cctest/test-reloc-info.cc
+++ b/test/cctest/test-reloc-info.cc
@@ -34,7 +34,7 @@
 
 static void WriteRinfo(RelocInfoWriter* writer,
                        byte* pc, RelocInfo::Mode mode, intptr_t data) {
-  RelocInfo rinfo(pc, mode, data);
+  RelocInfo rinfo(pc, mode, data, NULL);
   writer->Write(&rinfo);
 }
 
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 8e85444..cccd2ee 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -114,10 +114,6 @@
       ExternalReference(isolate->counters()->keyed_load_function_prototype());
   CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
            encoder.Encode(keyed_load_function_prototype.address()));
-  ExternalReference the_hole_value_location =
-      ExternalReference::the_hole_value_location(isolate);
-  CHECK_EQ(make_code(UNCLASSIFIED, 2),
-           encoder.Encode(the_hole_value_location.address()));
   ExternalReference stack_limit_address =
       ExternalReference::address_of_stack_limit(isolate);
   CHECK_EQ(make_code(UNCLASSIFIED, 4),
@@ -127,7 +123,7 @@
   CHECK_EQ(make_code(UNCLASSIFIED, 5),
            encoder.Encode(real_stack_limit_address.address()));
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  CHECK_EQ(make_code(UNCLASSIFIED, 15),
+  CHECK_EQ(make_code(UNCLASSIFIED, 16),
            encoder.Encode(ExternalReference::debug_break(isolate).address()));
 #endif  // ENABLE_DEBUGGER_SUPPORT
   CHECK_EQ(make_code(UNCLASSIFIED, 10),
@@ -157,15 +153,13 @@
            decoder.Decode(
                make_code(STATS_COUNTER,
                          Counters::k_keyed_load_function_prototype)));
-  CHECK_EQ(ExternalReference::the_hole_value_location(isolate).address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 2)));
   CHECK_EQ(ExternalReference::address_of_stack_limit(isolate).address(),
            decoder.Decode(make_code(UNCLASSIFIED, 4)));
   CHECK_EQ(ExternalReference::address_of_real_stack_limit(isolate).address(),
            decoder.Decode(make_code(UNCLASSIFIED, 5)));
 #ifdef ENABLE_DEBUGGER_SUPPORT
   CHECK_EQ(ExternalReference::debug_break(isolate).address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 15)));
+           decoder.Decode(make_code(UNCLASSIFIED, 16)));
 #endif  // ENABLE_DEBUGGER_SUPPORT
   CHECK_EQ(ExternalReference::new_space_start(isolate).address(),
            decoder.Decode(make_code(UNCLASSIFIED, 10)));
@@ -365,8 +359,8 @@
       Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
     }
   }
-  HEAP->CollectAllGarbage(true);
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   Object* raw_foo;
   {
@@ -490,7 +484,7 @@
   }
   // If we don't do this then we end up with a stray root pointing at the
   // context even after we have disposed of env.
-  HEAP->CollectAllGarbage(true);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
 
   int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
   Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
@@ -563,16 +557,19 @@
 TEST(LinearAllocation) {
   v8::V8::Initialize();
   int new_space_max = 512 * KB;
+  int paged_space_max = Page::kMaxHeapObjectSize;
 
   for (int size = 1000; size < 5 * MB; size += size >> 1) {
+    size &= ~8;  // Round.
     int new_space_size = (size < new_space_max) ? size : new_space_max;
+    int paged_space_size = (size < paged_space_max) ? size : paged_space_max;
     HEAP->ReserveSpace(
         new_space_size,
-        size,              // Old pointer space.
-        size,              // Old data space.
-        size,              // Code space.
-        size,              // Map space.
-        size,              // Cell space.
+        paged_space_size,  // Old pointer space.
+        paged_space_size,  // Old data space.
+        HEAP->code_space()->RoundSizeDownToObjectAlignment(paged_space_size),
+        HEAP->map_space()->RoundSizeDownToObjectAlignment(paged_space_size),
+        HEAP->cell_space()->RoundSizeDownToObjectAlignment(paged_space_size),
         size);             // Large object space.
     LinearAllocationScope linear_allocation_scope;
     const int kSmallFixedArrayLength = 4;
@@ -599,7 +596,7 @@
 
     Object* pointer_last = NULL;
     for (int i = 0;
-         i + kSmallFixedArraySize <= size;
+         i + kSmallFixedArraySize <= paged_space_size;
          i += kSmallFixedArraySize) {
       Object* obj = HEAP->AllocateFixedArray(kSmallFixedArrayLength,
                                              TENURED)->ToObjectChecked();
@@ -618,7 +615,9 @@
     }
 
     Object* data_last = NULL;
-    for (int i = 0; i + kSmallStringSize <= size; i += kSmallStringSize) {
+    for (int i = 0;
+         i + kSmallStringSize <= paged_space_size;
+         i += kSmallStringSize) {
       Object* obj = HEAP->AllocateRawAsciiString(kSmallStringLength,
                                                  TENURED)->ToObjectChecked();
       int old_page_fullness = i % Page::kPageSize;
@@ -636,7 +635,7 @@
     }
 
     Object* map_last = NULL;
-    for (int i = 0; i + kMapSize <= size; i += kMapSize) {
+    for (int i = 0; i + kMapSize <= paged_space_size; i += kMapSize) {
       Object* obj = HEAP->AllocateMap(JS_OBJECT_TYPE,
                                       42 * kPointerSize)->ToObjectChecked();
       int old_page_fullness = i % Page::kPageSize;
diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc
index 0f22ce1..ee60086 100644
--- a/test/cctest/test-spaces.cc
+++ b/test/cctest/test-spaces.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -32,7 +32,9 @@
 
 using namespace v8::internal;
 
+#if 0
 static void VerifyRegionMarking(Address page_start) {
+#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
   Page* p = Page::FromAddress(page_start);
 
   p->SetRegionMarks(Page::kAllRegionsCleanMarks);
@@ -54,9 +56,13 @@
        addr += kPointerSize) {
     CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
   }
+#endif
 }
+#endif
 
 
+// TODO(gc) you can no longer allocate pages like this. Details are hidden.
+#if 0
 TEST(Page) {
   byte* mem = NewArray<byte>(2*Page::kPageSize);
   CHECK(mem != NULL);
@@ -89,6 +95,7 @@
 
   DeleteArray(mem);
 }
+#endif
 
 
 namespace v8 {
@@ -122,62 +129,46 @@
   Isolate* isolate = Isolate::Current();
   isolate->InitializeLoggingAndCounters();
   Heap* heap = isolate->heap();
-  CHECK(heap->ConfigureHeapDefault());
+  CHECK(isolate->heap()->ConfigureHeapDefault());
+
   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
   CHECK(memory_allocator->Setup(heap->MaxReserved(),
                                 heap->MaxExecutableSize()));
-  TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
 
+  int total_pages = 0;
   OldSpace faked_space(heap,
                        heap->MaxReserved(),
                        OLD_POINTER_SPACE,
                        NOT_EXECUTABLE);
-  int total_pages = 0;
-  int requested = MemoryAllocator::kPagesPerChunk;
-  int allocated;
-  // If we request n pages, we should get n or n - 1.
-  Page* first_page = memory_allocator->AllocatePages(
-      requested, &allocated, &faked_space);
-  CHECK(first_page->is_valid());
-  CHECK(allocated == requested || allocated == requested - 1);
-  total_pages += allocated;
+  Page* first_page =
+      memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
 
-  Page* last_page = first_page;
-  for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
-    CHECK(memory_allocator->IsPageInSpace(p, &faked_space));
-    last_page = p;
+  first_page->InsertAfter(faked_space.anchor()->prev_page());
+  CHECK(first_page->is_valid());
+  CHECK(first_page->next_page() == faked_space.anchor());
+  total_pages++;
+
+  for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
+    CHECK(p->owner() == &faked_space);
   }
 
   // Again, we should get n or n - 1 pages.
-  Page* others = memory_allocator->AllocatePages(
-      requested, &allocated, &faked_space);
-  CHECK(others->is_valid());
-  CHECK(allocated == requested || allocated == requested - 1);
-  total_pages += allocated;
-
-  memory_allocator->SetNextPage(last_page, others);
+  Page* other =
+      memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
+  CHECK(other->is_valid());
+  total_pages++;
+  other->InsertAfter(first_page);
   int page_count = 0;
-  for (Page* p = first_page; p->is_valid(); p = p->next_page()) {
-    CHECK(memory_allocator->IsPageInSpace(p, &faked_space));
+  for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
+    CHECK(p->owner() == &faked_space);
     page_count++;
   }
   CHECK(total_pages == page_count);
 
   Page* second_page = first_page->next_page();
   CHECK(second_page->is_valid());
-
-  // Freeing pages at the first chunk starting at or after the second page
-  // should free the entire second chunk.  It will return the page it was passed
-  // (since the second page was in the first chunk).
-  Page* free_return = memory_allocator->FreePages(second_page);
-  CHECK(free_return == second_page);
-  memory_allocator->SetNextPage(first_page, free_return);
-
-  // Freeing pages in the first chunk starting at the first page should free
-  // the first chunk and return an invalid page.
-  Page* invalid_page = memory_allocator->FreePages(first_page);
-  CHECK(!invalid_page->is_valid());
-
+  memory_allocator->Free(first_page);
+  memory_allocator->Free(second_page);
   memory_allocator->TearDown();
   delete memory_allocator;
 }
@@ -196,12 +187,8 @@
 
   NewSpace new_space(heap);
 
-  void* chunk =
-      memory_allocator->ReserveInitialChunk(4 * heap->ReservedSemiSpaceSize());
-  CHECK(chunk != NULL);
-  Address start = RoundUp(static_cast<Address>(chunk),
-                          2 * heap->ReservedSemiSpaceSize());
-  CHECK(new_space.Setup(start, 2 * heap->ReservedSemiSpaceSize()));
+  CHECK(new_space.Setup(HEAP->ReservedSemiSpaceSize(),
+                        HEAP->ReservedSemiSpaceSize()));
   CHECK(new_space.HasBeenSetup());
 
   while (new_space.Available() >= Page::kMaxHeapObjectSize) {
@@ -233,13 +220,7 @@
                              NOT_EXECUTABLE);
   CHECK(s != NULL);
 
-  void* chunk = memory_allocator->ReserveInitialChunk(
-      4 * heap->ReservedSemiSpaceSize());
-  CHECK(chunk != NULL);
-  Address start = static_cast<Address>(chunk);
-  size_t size = RoundUp(start, 2 * heap->ReservedSemiSpaceSize()) - start;
-
-  CHECK(s->Setup(start, size));
+  CHECK(s->Setup());
 
   while (s->Available() > 0) {
     s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked();
@@ -258,14 +239,12 @@
   LargeObjectSpace* lo = HEAP->lo_space();
   CHECK(lo != NULL);
 
-  Map* faked_map = reinterpret_cast<Map*>(HeapObject::FromAddress(0));
   int lo_size = Page::kPageSize;
 
-  Object* obj = lo->AllocateRaw(lo_size)->ToObjectUnchecked();
+  Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked();
   CHECK(obj->IsHeapObject());
 
   HeapObject* ho = HeapObject::cast(obj);
-  ho->set_map(faked_map);
 
   CHECK(lo->Contains(HeapObject::cast(obj)));
 
@@ -275,14 +254,13 @@
 
   while (true) {
     intptr_t available = lo->Available();
-    { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size);
+    { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
       if (!maybe_obj->ToObject(&obj)) break;
     }
-    HeapObject::cast(obj)->set_map(faked_map);
     CHECK(lo->Available() < available);
   };
 
   CHECK(!lo->IsEmpty());
 
-  CHECK(lo->AllocateRaw(lo_size)->IsFailure());
+  CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure());
 }
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 55c2141..93f7588 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 
 // Check that we can traverse very deep stacks of ConsStrings using
 // StringInputBuffer.  Check that Get(int) works on very deep stacks
@@ -502,6 +502,35 @@
 }
 
 
+class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
+ public:
+  explicit AsciiVectorResource(i::Vector<const char> vector)
+      : data_(vector) {}
+  virtual ~AsciiVectorResource() {}
+  virtual size_t length() const { return data_.length(); }
+  virtual const char* data() const { return data_.start(); }
+ private:
+  i::Vector<const char> data_;
+};
+
+
+TEST(SliceFromExternal) {
+  FLAG_string_slices = true;
+  InitializeVM();
+  v8::HandleScope scope;
+  AsciiVectorResource resource(
+      i::Vector<const char>("abcdefghijklmnopqrstuvwxyz", 26));
+  Handle<String> string = FACTORY->NewExternalStringFromAscii(&resource);
+  CHECK(string->IsExternalString());
+  Handle<String> slice = FACTORY->NewSubString(string, 1, 25);
+  CHECK(slice->IsSlicedString());
+  CHECK(string->IsExternalString());
+  CHECK_EQ(SlicedString::cast(*slice)->parent(), *string);
+  CHECK(SlicedString::cast(*slice)->parent()->IsExternalString());
+  CHECK(slice->IsFlat());
+}
+
+
 TEST(TrivialSlice) {
   // This tests whether a slice that contains the entire parent string
   // actually creates a new string (it should not).
diff --git a/test/cctest/test-threads.cc b/test/cctest/test-threads.cc
index 985b9e5..713d1e8 100644
--- a/test/cctest/test-threads.cc
+++ b/test/cctest/test-threads.cc
@@ -63,7 +63,7 @@
 static Turn turn = FILL_CACHE;
 
 
-class ThreadA: public v8::internal::Thread {
+class ThreadA : public v8::internal::Thread {
  public:
   ThreadA() : Thread("ThreadA") { }
   void Run() {
@@ -99,7 +99,7 @@
 };
 
 
-class ThreadB: public v8::internal::Thread {
+class ThreadB : public v8::internal::Thread {
  public:
   ThreadB() : Thread("ThreadB") { }
   void Run() {
@@ -111,7 +111,7 @@
           v8::Context::Scope context_scope(v8::Context::New());
 
           // Clear the caches by forcing major GC.
-          HEAP->CollectAllGarbage(false);
+          HEAP->CollectAllGarbage(v8::internal::Heap::kNoGCFlags);
           turn = SECOND_TIME_FILL_CACHE;
           break;
         }
@@ -190,3 +190,19 @@
     delete threads[i];
   }
 }
+
+
+class ThreadC : public v8::internal::Thread {
+ public:
+  ThreadC() : Thread("ThreadC") { }
+  void Run() {
+    Join();
+  }
+};
+
+
+TEST(ThreadJoinSelf) {
+  ThreadC thread;
+  thread.Start();
+  thread.Join();
+}
diff --git a/test/cctest/test-weakmaps.cc b/test/cctest/test-weakmaps.cc
index db4db25..56d5936 100644
--- a/test/cctest/test-weakmaps.cc
+++ b/test/cctest/test-weakmaps.cc
@@ -50,7 +50,7 @@
                            Handle<JSObject> key,
                            int value) {
   Handle<ObjectHashTable> table = PutIntoObjectHashTable(
-      Handle<ObjectHashTable>(weakmap->table()),
+      Handle<ObjectHashTable>(ObjectHashTable::cast(weakmap->table())),
       Handle<JSObject>(JSObject::cast(*key)),
       Handle<Smi>(Smi::FromInt(value)));
   weakmap->set_table(*table);
@@ -85,13 +85,14 @@
     v8::HandleScope scope;
     PutIntoWeakMap(weakmap, Handle<JSObject>(JSObject::cast(*key)), 23);
   }
-  CHECK_EQ(1, weakmap->table()->NumberOfElements());
+  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
 
   // Force a full GC.
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(0, NumberOfWeakCalls);
-  CHECK_EQ(1, weakmap->table()->NumberOfElements());
-  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 
   // Make the global reference to the key weak.
   {
@@ -107,12 +108,14 @@
   // weak references whereas the second one will also clear weak maps.
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(1, NumberOfWeakCalls);
-  CHECK_EQ(1, weakmap->table()->NumberOfElements());
-  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
   HEAP->CollectAllGarbage(false);
   CHECK_EQ(1, NumberOfWeakCalls);
-  CHECK_EQ(0, weakmap->table()->NumberOfElements());
-  CHECK_EQ(1, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      1, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 }
 
 
@@ -122,7 +125,7 @@
   Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
 
   // Check initial capacity.
-  CHECK_EQ(32, weakmap->table()->Capacity());
+  CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->Capacity());
 
   // Fill up weak map to trigger capacity change.
   {
@@ -135,15 +138,17 @@
   }
 
   // Check increased capacity.
-  CHECK_EQ(128, weakmap->table()->Capacity());
+  CHECK_EQ(128, ObjectHashTable::cast(weakmap->table())->Capacity());
 
   // Force a full GC.
-  CHECK_EQ(32, weakmap->table()->NumberOfElements());
-  CHECK_EQ(0, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
   HEAP->CollectAllGarbage(false);
-  CHECK_EQ(0, weakmap->table()->NumberOfElements());
-  CHECK_EQ(32, weakmap->table()->NumberOfDeletedElements());
+  CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(
+      32, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 
   // Check shrunk capacity.
-  CHECK_EQ(32, weakmap->table()->Capacity());
+  CHECK_EQ(32, ObjectHashTable::cast(weakmap->table())->Capacity());
 }
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index d095a24..65bc50b 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -41,16 +41,6 @@
 # We are compatible with Safari and Firefox.
 chapter11/11.1/11.1.5: UNIMPLEMENTED
 
-# We do not have a global object called 'global' as required by tests.
-chapter15/15.1: FAIL_OK
-
-# NaN is writable. We are compatible with JSC.
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
-# Infinity is writable. We are compatible with JSC.
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-179: FAIL_OK
-# undefined is writable. We are compatible with JSC.
-chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-180: FAIL_OK
-
 # Our Function object has an "arguments" property which is used as a
 # non-property in the test.
 chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-183: FAIL_OK
@@ -106,9 +96,6 @@
 # SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-11: FAIL_OK
 
-# We do not implement all methods on RegExp.
-chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-13: FAIL
-
 # SUBSETFAIL
 chapter15/15.2/15.2.3/15.2.3.4/15.2.3.4-4-14: FAIL_OK
 
@@ -201,22 +188,6 @@
 chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-18: FAIL
 chapter15/15.5/15.5.4/15.5.4.20/15.5.4.20-4-34: FAIL
 
-# RegExp.prototype is not of type RegExp - we are bug compatible with JSC.
-chapter15/15.10/15.10.6/15.10.6: FAIL_OK
-
-# We do not have the properties of a RegExp instance on RegExp.prototype.
-# The spec says we should - but we are currently bug compatible with JSC.
-chapter15/15.10/15.10.7/15.10.7.1/15.10.7.1-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.1/15.10.7.1-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.2/15.10.7.2-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.2/15.10.7.2-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.3/15.10.7.3-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.3/15.10.7.3-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.4/15.10.7.4-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.4/15.10.7.4-2: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-1: FAIL_OK
-chapter15/15.10/15.10.7/15.10.7.5/15.10.7.5-2: FAIL_OK
-
 ##############################################################################
 # Unimplemented parts of strict mode
 # Setting expectations to fail only so that the tests trigger as soon as
diff --git a/test/mjsunit/array-tostring.js b/test/mjsunit/array-tostring.js
new file mode 100644
index 0000000..6708657
--- /dev/null
+++ b/test/mjsunit/array-tostring.js
@@ -0,0 +1,159 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Array's toString should call the object's own join method, if one exists and
+// is callable. Otherwise, just use the original Object.toString function.
+
+var success = "[test success]";
+var expectedThis;
+function testJoin() {
+  assertEquals(0, arguments.length);
+  assertSame(expectedThis, this);
+  return success;
+}
+
+
+// On an Array object.
+
+// Default case.
+var a1 = [1, 2, 3];
+assertEquals(a1.join(), a1.toString());
+
+// Non-standard "join" function is called correctly.
+var a2 = [1, 2, 3];
+a2.join = testJoin;
+expectedThis = a2;
+assertEquals(success, a2.toString());
+
+// Non-callable join function is ignored and Object.prototype.toString is
+// used instead.
+var a3 = [1, 2, 3];
+a3.join = "not callable";
+assertEquals("[object Array]", a3.toString());
+
+// Non-existing join function is treated same as non-callable.
+var a4 = [1, 2, 3];
+a4.__proto__ = { toString: Array.prototype.toString };
+// No join on Array.
+assertEquals("[object Array]", a4.toString());
+
+
+// On a non-Array object.
+
+// Default looks-like-an-array case.
+var o1 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString,
+          join: Array.prototype.join};
+assertEquals(o1.join(), o1.toString());
+
+
+// Non-standard join is called correctly.
+// Check that we don't read, e.g., length before calling join.
+var o2 = {toString : Array.prototype.toString,
+          join: testJoin,
+          get length() { assertUnreachable(); },
+          get 0() { assertUnreachable(); }};
+expectedThis = o2;
+assertEquals(success, o2.toString());
+
+// Non-standard join is called even if it looks like an array.
+var o3 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString,
+          join: testJoin};
+expectedThis = o3;
+assertEquals(success, o3.toString());
+
+// Non-callable join works same as for Array.
+var o4 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString,
+          join: "not callable"};
+assertEquals("[object Object]", o4.toString());
+
+
+// Non-existing join works same as for Array.
+var o5 = {length: 3, 0: 1, 1: 2, 2: 3,
+          toString: Array.prototype.toString
+          /* no join */};
+assertEquals("[object Object]", o5.toString());
+
+
+// Test that ToObject is called before getting "join", so the instance
+// that "join" is read from is the same one passed as receiver later.
+var called_before = false;
+expectedThis = null;
+Object.defineProperty(Number.prototype, "join", {get: function() {
+            assertFalse(called_before);
+            called_before = true;
+            expectedThis = this;
+            return testJoin;
+        }});
+Number.prototype.arrayToString = Array.prototype.toString;
+assertEquals(success, (42).arrayToString());
+
+// ----------------------------------------------------------
+// Testing Array.prototype.toLocaleString
+
+// Ensure that it never uses Array.prototype.toString for anything.
+Array.prototype.toString = function() { assertUnreachable(); };
+
+// Default case.
+var la1 = [1, [2, 3], 4];
+assertEquals("1,2,3,4", la1.toLocaleString());
+
+// Used on a string (which looks like an array of characters).
+String.prototype.toLocaleString = Array.prototype.toLocaleString;
+assertEquals("1,2,3,4", "1234".toLocaleString());
+
+// If toLocaleString of element is not callable, throw a TypeError.
+var la2 = [1, {toLocaleString: "not callable"}, 3];
+assertThrows(function() { la2.toLocaleString(); }, TypeError);
+
+// If toLocaleString of element is callable, call it.
+var la3 = [1, {toLocaleString: function() { return "XX";}}, 3];
+assertEquals("1,XX,3", la3.toLocaleString());
+
+// Omitted elements, as well as undefined and null, become empty string.
+var la4 = [1, null, 3, undefined, 5,, 7];
+assertEquals("1,,3,,5,,7", la4.toLocaleString());
+
+
+// ToObject is called first and the same object is being used for the
+// rest of the operations.
+Object.defineProperty(Number.prototype, "length", {
+    get: function() {
+      exptectedThis = this;
+      return 3;
+    }});
+for (var i = 0; i < 3; i++) {
+  Object.defineProperty(Number.prototype, i, {
+      get: function() {
+        assertEquals(expectedThis, this);
+        return +this;
+      }});
+}
+Number.prototype.arrayToLocaleString = Array.prototype.toLocaleString;
+assertEquals("42,42,42", (42).arrayToLocaleString());
\ No newline at end of file
diff --git a/test/mjsunit/assert-opt-and-deopt.js b/test/mjsunit/assert-opt-and-deopt.js
index c9adb5b..51cb99a 100644
--- a/test/mjsunit/assert-opt-and-deopt.js
+++ b/test/mjsunit/assert-opt-and-deopt.js
@@ -150,11 +150,6 @@
 
 f(1);
 
-tracker.AssertOptCount(f, 0);
-tracker.AssertIsOptimized(f, false);
-tracker.AssertDeoptHappened(f, false);
-tracker.AssertDeoptCount(f, 0);
-
 %OptimizeFunctionOnNextCall(f);
 f(1);
 
@@ -172,6 +167,7 @@
 
 // Let's trigger optimization for another type.
 for (var i = 0; i < 5; i++) f("a");
+
 %OptimizeFunctionOnNextCall(f);
 f("b");
 
diff --git a/test/mjsunit/compiler/inline-context-slots.js b/test/mjsunit/compiler/inline-context-slots.js
new file mode 100644
index 0000000..d0e907b
--- /dev/null
+++ b/test/mjsunit/compiler/inline-context-slots.js
@@ -0,0 +1,49 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test inlining of functions with context slots.
+
+// Flags: --allow-natives-syntax
+
+
+// Caller/callee without a local context.
+
+(function() {
+  var X = 5;
+  var Y = 10;
+  function F() {}
+  F.prototype.max = function() {
+    return X > Y ? X : Y;
+  }
+  F.prototype.run = function() {
+    return this.max();
+  }
+  var f = new F();
+  for (var i=0; i<5; i++) f.run();
+  %OptimizeFunctionOnNextCall(f.run);
+  assertEquals(10, f.run());
+})();
diff --git a/test/mjsunit/compiler/regress-96989.js b/test/mjsunit/compiler/regress-96989.js
new file mode 100644
index 0000000..aedeb24
--- /dev/null
+++ b/test/mjsunit/compiler/regress-96989.js
@@ -0,0 +1,43 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Flags: --allow-natives-syntax
+
+// Test correct handling of uninitialized const.
+
+function test() {
+  for (var i = 41; i < 42; i++) {
+    var c = t ^ i;
+  }
+  const t;
+  return c;
+}
+
+for (var i=0; i<10; i++) test();
+%OptimizeFunctionOnNextCall(test);
+assertEquals(41, test());
diff --git a/test/mjsunit/const-redecl.js b/test/mjsunit/const-redecl.js
index 9459708..c0b97e6 100644
--- a/test/mjsunit/const-redecl.js
+++ b/test/mjsunit/const-redecl.js
@@ -98,7 +98,8 @@
   var msg = s;
   if (opt_e) { e = opt_e; msg += "; " + opt_e; }
   assertEquals(expected, TestLocal(s,e), "local:'" + msg + "'");
-  assertEquals(expected, TestGlobal(s,e), "global:'" + msg + "'");
+  // Redeclarations of global consts do not throw, they are silently ignored.
+  assertEquals(42, TestGlobal(s, 42), "global:'" + msg + "'");
   assertEquals(expected, TestContext(s,e), "context:'" + msg + "'");
 }
 
@@ -218,3 +219,62 @@
 // Test that const inside with behaves correctly.
 TestAll(87, "with ({x:42}) { const x = 87; }", "x");
 TestAll(undefined, "with ({x:42}) { const x; }", "x");
+
+
+// Additional tests for how various combinations of re-declarations affect
+// the values of the var/const in question.
+try {
+  eval("var undefined;");
+} catch (ex) {
+  assertUnreachable("undefined (1) has thrown");
+}
+
+var original_undef = undefined;
+var undefined = 1;  // Should be silently ignored.
+assertEquals(original_undef, undefined, "undefined got overwritten");
+undefined = original_undef;
+
+var a; const a; const a = 1;
+assertEquals(1, a, "a has wrong value");
+a = 2;
+assertEquals(2, a, "a should be writable");
+
+var b = 1; const b = 2;
+assertEquals(2, b, "b has wrong value");
+
+var c = 1; const c = 2; const c = 3;
+assertEquals(3, c, "c has wrong value");
+
+const d = 1; const d = 2;
+assertEquals(1, d, "d has wrong value");
+
+const e = 1; var e = 2;
+assertEquals(1, e, "e has wrong value");
+
+const f = 1; const f;
+assertEquals(1, f, "f has wrong value");
+
+var g; const g = 1;
+assertEquals(1, g, "g has wrong value");
+g = 2;
+assertEquals(2, g, "g should be writable");
+
+const h; var h = 1;
+assertEquals(undefined,h,  "h has wrong value");
+
+eval("Object.defineProperty(this, 'i', { writable: true });"
+   + "const i = 7;"
+   + "assertEquals(7, i, \"i has wrong value\");");
+
+var global = this;
+assertThrows(function() {
+  Object.defineProperty(global, 'j', { writable: true })
+}, TypeError);
+const j = 2;  // This is what makes the function above throw, because the
+// const declaration gets hoisted and makes the property non-configurable.
+assertEquals(2, j, "j has wrong value");
+
+var k = 1; const k;
+// You could argue about the expected result here. For now, the winning
+// argument is that "const k;" is equivalent to "const k = undefined;".
+assertEquals(undefined, k, "k has wrong value");
diff --git a/test/mjsunit/element-kind.js b/test/mjsunit/element-kind.js
index 48a029f..d61e26a 100644
--- a/test/mjsunit/element-kind.js
+++ b/test/mjsunit/element-kind.js
@@ -25,10 +25,25 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax
-// Test element kind of objects
+// Flags: --allow-natives-syntax --smi-only-arrays
+// Test element kind of objects.
+// Since --smi-only-arrays affects builtins, its default setting at compile
+// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
+// by default, only a no-snapshot build actually has smi-only arrays enabled
+// in this test case.  Depending on whether smi-only arrays are actually
+// enabled, this test takes the appropriate code path to check smi-only arrays.
+
+
+support_smi_only_arrays = %HasFastSmiOnlyElements([]);
+
+if (support_smi_only_arrays) {
+  print("Tests include smi-only arrays.");
+} else {
+  print("Tests do NOT include smi-only arrays.");
+}
 
 var element_kind = {
+  fast_smi_only_elements            :  0,
   fast_elements                     :  1,
   fast_double_elements              :  2,
   dictionary_elements               :  3,
@@ -44,9 +59,17 @@
 }
 
 // We expect an object to only be of one element kind.
-function assertKind(expected, obj){
-  assertEquals(expected == element_kind.fast_elements,
-               %HasFastElements(obj));
+function assertKind(expected, obj) {
+  if (support_smi_only_arrays) {
+    assertEquals(expected == element_kind.fast_smi_only_elements,
+                 %HasFastSmiOnlyElements(obj));
+    assertEquals(expected == element_kind.fast_elements,
+                 %HasFastElements(obj));
+  } else {
+    assertEquals(expected == element_kind.fast_elements ||
+                 expected == element_kind.fast_smi_only_elements,
+                 %HasFastElements(obj));
+  }
   assertEquals(expected == element_kind.fast_double_elements,
                %HasFastDoubleElements(obj));
   assertEquals(expected == element_kind.dictionary_elements,
@@ -80,16 +103,30 @@
 me.drink = 0xC0C0A;
 assertKind(element_kind.fast_elements, me);
 
+var too = [1,2,3];
+assertKind(element_kind.fast_smi_only_elements, too);
+too.dance = 0xD15C0;
+too.drink = 0xC0C0A;
+assertKind(element_kind.fast_smi_only_elements, too);
+
+// Make sure the element kind transitions from smionly when a non-smi is stored.
 var you = new Array();
-for(i = 0; i < 1337; i++) {
-  you[i] = i;
+assertKind(element_kind.fast_smi_only_elements, you);
+for (var i = 0; i < 1337; i++) {
+  var val = i;
+  if (i == 1336) {
+    assertKind(element_kind.fast_smi_only_elements, you);
+    val = new Object();
+  }
+  you[i] = val;
 }
 assertKind(element_kind.fast_elements, you);
 
-assertKind(element_kind.dictionary_elements, new Array(0xC0C0A));
+assertKind(element_kind.dictionary_elements, new Array(0xDECAF));
 
-// fast_double_elements not yet available
-
+var fast_double_array = new Array(0xDECAF);
+for (var i = 0; i < 0xDECAF; i++) fast_double_array[i] = i / 2;
+assertKind(element_kind.fast_double_elements, fast_double_array);
 
 assertKind(element_kind.external_byte_elements,           new Int8Array(9001));
 assertKind(element_kind.external_unsigned_byte_elements,  new Uint8Array(007));
@@ -100,3 +137,70 @@
 assertKind(element_kind.external_float_elements,          new Float32Array(7));
 assertKind(element_kind.external_double_elements,         new Float64Array(0));
 assertKind(element_kind.external_pixel_elements,          new PixelArray(512));
+
+// Crankshaft support for smi-only array elements.
+function monomorphic(array) {
+  for (var i = 0; i < 3; i++) {
+    array[i] = i + 10;
+  }
+  assertKind(element_kind.fast_smi_only_elements, array);
+  for (var i = 0; i < 3; i++) {
+    var a = array[i];
+    assertEquals(i + 10, a);
+  }
+}
+var smi_only = [1, 2, 3];
+for (var i = 0; i < 3; i++) monomorphic(smi_only);
+%OptimizeFunctionOnNextCall(monomorphic);
+monomorphic(smi_only);
+function polymorphic(array, expected_kind) {
+  array[1] = 42;
+  assertKind(expected_kind, array);
+  var a = array[1];
+  assertEquals(42, a);
+}
+var smis = [1, 2, 3];
+var strings = ["one", "two", "three"];
+var doubles = [0, 0, 0]; doubles[0] = 1.5; doubles[1] = 2.5; doubles[2] = 3.5;
+assertKind(support_smi_only_arrays
+               ? element_kind.fast_double_elements
+               : element_kind.fast_elements,
+           doubles);
+for (var i = 0; i < 3; i++) {
+  polymorphic(smis, element_kind.fast_smi_only_elements);
+  polymorphic(strings, element_kind.fast_elements);
+  polymorphic(doubles, support_smi_only_arrays
+                           ? element_kind.fast_double_elements
+                           : element_kind.fast_elements);
+}
+%OptimizeFunctionOnNextCall(polymorphic);
+polymorphic(smis, element_kind.fast_smi_only_elements);
+polymorphic(strings, element_kind.fast_elements);
+polymorphic(doubles, support_smi_only_arrays
+    ? element_kind.fast_double_elements
+    : element_kind.fast_elements);
+
+// Crankshaft support for smi-only elements in dynamic array literals.
+function get(foo) { return foo; }  // Used to generate dynamic values.
+
+//function crankshaft_test(expected_kind) {
+function crankshaft_test() {
+  var a = [get(1), get(2), get(3)];
+  assertKind(element_kind.fast_smi_only_elements, a);
+  var b = [get(1), get(2), get("three")];
+  assertKind(element_kind.fast_elements, b);
+  var c = [get(1), get(2), get(3.5)];
+  // The full code generator doesn't support conversion to fast_double_elements
+  // yet. Crankshaft does, but only with --smi-only-arrays support.
+  if ((%GetOptimizationStatus(crankshaft_test) & 1) &&
+      support_smi_only_arrays) {
+    assertKind(element_kind.fast_double_elements, c);
+  } else {
+    assertKind(element_kind.fast_elements, c);
+  }
+}
+for (var i = 0; i < 3; i++) {
+  crankshaft_test();
+}
+%OptimizeFunctionOnNextCall(crankshaft_test);
+crankshaft_test();
diff --git a/test/mjsunit/global-const-var-conflicts.js b/test/mjsunit/global-const-var-conflicts.js
index d38d0ee..2fca96f 100644
--- a/test/mjsunit/global-const-var-conflicts.js
+++ b/test/mjsunit/global-const-var-conflicts.js
@@ -26,7 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Check that dynamically introducing conflicting consts/vars
-// leads to exceptions.
+// is silently ignored (and does not lead to exceptions).
 
 var caught = 0;
 
@@ -46,12 +46,12 @@
 try { eval("const c"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
 assertTrue(typeof c == 'undefined');
 try { eval("const c = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertTrue(typeof c == 'undefined');
+assertEquals(1, c);
 
 eval("var d = 0");
 try { eval("const d"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(0, d);
+assertEquals(undefined, d);
 try { eval("const d = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(0, d);
+assertEquals(1, d);
 
-assertEquals(8, caught);
+assertEquals(0, caught);
diff --git a/test/mjsunit/harmony/block-let-declaration.js b/test/mjsunit/harmony/block-let-declaration.js
index 49b6348..bddf0c2 100644
--- a/test/mjsunit/harmony/block-let-declaration.js
+++ b/test/mjsunit/harmony/block-let-declaration.js
@@ -47,19 +47,70 @@
   assertEquals(undefined, y);
 }
 
+// Invalid declarations are early errors in harmony mode and thus should trigger
+// an exception in eval code during parsing, before even compiling or executing
+// the code. Thus the generated function is not called here.
 function TestLocalThrows(str, expect) {
-  assertThrows("(function(){" + str + "})()", expect);
+  assertThrows("(function(){" + str + "})", expect);
 }
 
 function TestLocalDoesNotThrow(str) {
   assertDoesNotThrow("(function(){" + str + "})()");
 }
 
-// Unprotected statement
+// Test let declarations statement positions.
 TestLocalThrows("if (true) let x;", SyntaxError);
+TestLocalThrows("if (true) {} else let x;", SyntaxError);
 TestLocalThrows("do let x; while (false)", SyntaxError);
 TestLocalThrows("while (false) let x;", SyntaxError);
+TestLocalThrows("label: let x;", SyntaxError);
+TestLocalThrows("for (;false;) let x;", SyntaxError);
+TestLocalThrows("switch (true) { case true: let x; }", SyntaxError);
+TestLocalThrows("switch (true) { default: let x; }", SyntaxError);
 
+// Test var declarations statement positions.
 TestLocalDoesNotThrow("if (true) var x;");
+TestLocalDoesNotThrow("if (true) {} else var x;");
 TestLocalDoesNotThrow("do var x; while (false)");
 TestLocalDoesNotThrow("while (false) var x;");
+TestLocalDoesNotThrow("label: var x;");
+TestLocalDoesNotThrow("for (;false;) var x;");
+TestLocalDoesNotThrow("switch (true) { case true: var x; }");
+TestLocalDoesNotThrow("switch (true) { default: var x; }");
+
+// Test function declarations in source element and
+// non-strict statement positions.
+function f() {
+  // Non-strict source element positions.
+  function g0() {
+    "use strict";
+    // Strict source element positions.
+    function h() { }
+    {
+      function h1() { }
+    }
+  }
+  {
+    function g1() { }
+  }
+  // Non-strict statement positions.
+  if (true) function g2() { }
+  if (true) {} else function g3() { }
+  do function g4() { } while (false)
+  while (false) function g5() { }
+  label: function g6() { }
+  for (;false;) function g7() { }
+  switch (true) { case true: function g8() { } }
+  switch (true) { default: function g9() { } }
+}
+f();
+
+// Test function declarations in statement position in strict mode.
+TestLocalThrows("function f() { 'use strict'; if (true) function g() {}", SyntaxError);
+TestLocalThrows("function f() { 'use strict'; if (true) {} else function g() {}", SyntaxError);
+TestLocalThrows("function f() { 'use strict'; do function g() {} while (false)", SyntaxError);
+TestLocalThrows("function f() { 'use strict'; while (false) function g() {}", SyntaxError);
+TestLocalThrows("function f() { 'use strict'; label: function g() {}", SyntaxError);
+TestLocalThrows("function f() { 'use strict'; for (;false;) function g() {}", SyntaxError);
+TestLocalThrows("function f() { 'use strict'; switch (true) { case true: function g() {} }", SyntaxError);
+TestLocalThrows("function f() { 'use strict'; switch (true) { default: function g() {} }", SyntaxError);
diff --git a/test/mjsunit/harmony/proxies-hash.js b/test/mjsunit/harmony/proxies-hash.js
new file mode 100644
index 0000000..2bf1830
--- /dev/null
+++ b/test/mjsunit/harmony/proxies-hash.js
@@ -0,0 +1,66 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-proxies --harmony-weakmaps
+
+
+// Helper.
+
+function TestWithProxies(test, handler) {
+  test(handler, Proxy.create)
+  test(handler, function(h) {return Proxy.createFunction(h, function() {})})
+}
+
+
+// Weak maps.
+
+function TestWeakMap(fix) {
+  TestWithProxies(TestWeakMap2, fix)
+}
+
+function TestWeakMap2(fix, create) {
+  var handler = {fix: function() { return {} }}
+  var p1 = create(handler)
+  var p2 = create(handler)
+  var p3 = create(handler)
+  fix(p3)
+
+  var m = new WeakMap
+  m.set(p1, 123);
+  m.set(p2, 321);
+  assertSame(123, m.get(p1));
+  assertSame(321, m.get(p2));
+
+  fix(p1)
+  fix(p2)
+  assertSame(123, m.get(p1));
+  assertSame(321, m.get(p2));
+}
+
+TestWeakMap(Object.seal)
+TestWeakMap(Object.freeze)
+TestWeakMap(Object.preventExtensions)
diff --git a/test/mjsunit/harmony/proxies.js b/test/mjsunit/harmony/proxies.js
index 3c4e5f6..845dc20 100644
--- a/test/mjsunit/harmony/proxies.js
+++ b/test/mjsunit/harmony/proxies.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,9 +28,7 @@
 // Flags: --harmony-proxies
 
 
-// TODO(rossberg): for-in for proxies not implemented.
-// TODO(rossberg): inheritance from proxies not implemented.
-// TODO(rossberg): function proxies as constructors not implemented.
+// TODO(rossberg): for-in not implemented on proxies.
 
 
 // Helper.
@@ -41,7 +39,92 @@
 }
 
 
-// Getters.
+
+// Getting property descriptors (Object.getOwnPropertyDescriptor).
+
+var key
+
+function TestGetOwnProperty(handler) {
+  TestWithProxies(TestGetOwnProperty2, handler)
+}
+
+function TestGetOwnProperty2(handler, create) {
+  var p = create(handler)
+  assertEquals(42, Object.getOwnPropertyDescriptor(p, "a").value)
+  assertEquals("a", key)
+  assertEquals(42, Object.getOwnPropertyDescriptor(p, 99).value)
+  assertEquals("99", key)
+}
+
+TestGetOwnProperty({
+  getOwnPropertyDescriptor: function(k) {
+    key = k
+    return {value: 42, configurable: true}
+  }
+})
+
+TestGetOwnProperty({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) {
+    key = k
+    return {value: 42, configurable: true}
+  }
+})
+
+TestGetOwnProperty({
+  getOwnPropertyDescriptor: function(k) {
+    key = k
+    return {get value() { return 42 }, get configurable() { return true }}
+  }
+})
+
+TestGetOwnProperty(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { key = k; return {value: 42, configurable: true} }
+  }
+}))
+
+
+function TestGetOwnPropertyThrow(handler) {
+  TestWithProxies(TestGetOwnPropertyThrow2, handler)
+}
+
+function TestGetOwnPropertyThrow2(handler, create) {
+  var p = create(handler)
+  assertThrows(function(){ Object.getOwnPropertyDescriptor(p, "a") }, "myexn")
+  assertThrows(function(){ Object.getOwnPropertyDescriptor(p, 77) }, "myexn")
+}
+
+TestGetOwnPropertyThrow({
+  getOwnPropertyDescriptor: function(k) { throw "myexn" }
+})
+
+TestGetOwnPropertyThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) { throw "myexn" }
+})
+
+TestGetOwnPropertyThrow({
+  getOwnPropertyDescriptor: function(k) {
+    return {get value() { throw "myexn" }}
+  }
+})
+
+TestGetOwnPropertyThrow(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) { throw "myexn" }
+  }
+}))
+
+
+
+// Getters (dot, brackets).
+
+var key
 
 function TestGet(handler) {
   TestWithProxies(TestGet2, handler)
@@ -50,48 +133,56 @@
 function TestGet2(handler, create) {
   var p = create(handler)
   assertEquals(42, p.a)
+  assertEquals("a", key)
   assertEquals(42, p["b"])
+  assertEquals("b", key)
+  assertEquals(42, p[99])
+  assertEquals("99", key)
 
-  // TODO(rossberg): inheritance from proxies not yet implemented.
-  // var o = Object.create(p, {x: {value: 88}})
-  // assertEquals(42, o.a)
-  // assertEquals(42, o["b"])
-  // assertEquals(88, o.x)
-  // assertEquals(88, o["x"])
+  var o = Object.create(p, {x: {value: 88}})
+  assertEquals(42, o.a)
+  assertEquals("a", key)
+  assertEquals(42, o["b"])
+  assertEquals("b", key)
+  assertEquals(42, o[99])
+  assertEquals("99", key)
+  assertEquals(88, o.x)
+  assertEquals(88, o["x"])
 }
 
 TestGet({
-  get: function(r, k) { return 42 }
+  get: function(r, k) { key = k; return 42 }
 })
 
 TestGet({
   get: function(r, k) { return this.get2(r, k) },
-  get2: function(r, k) { return 42 }
+  get2: function(r, k) { key = k; return 42 }
 })
 
 TestGet({
-  getPropertyDescriptor: function(k) { return {value: 42} }
+  getPropertyDescriptor: function(k) { key = k; return {value: 42} }
 })
 
 TestGet({
   getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
-  getPropertyDescriptor2: function(k) { return {value: 42} }
+  getPropertyDescriptor2: function(k) { key = k; return {value: 42} }
 })
 
 TestGet({
   getPropertyDescriptor: function(k) {
+    key = k;
     return {get value() { return 42 }}
   }
 })
 
 TestGet({
   get: undefined,
-  getPropertyDescriptor: function(k) { return {value: 42} }
+  getPropertyDescriptor: function(k) { key = k; return {value: 42} }
 })
 
 TestGet(Proxy.create({
   get: function(pr, pk) {
-    return function(r, k) { return 42 }
+    return function(r, k) { key = k; return 42 }
   }
 }))
 
@@ -103,11 +194,29 @@
 function TestGetCall2(handler, create) {
   var p = create(handler)
   assertEquals(55, p.f())
+  assertEquals(55, p["f"]())
   assertEquals(55, p.f("unused", "arguments"))
   assertEquals(55, p.f.call(p))
+  assertEquals(55, p["f"].call(p))
+  assertEquals(55, p[101].call(p))
   assertEquals(55, p.withargs(45, 5))
   assertEquals(55, p.withargs.call(p, 11, 22))
   assertEquals("6655", "66" + p)  // calls p.toString
+
+  var o = Object.create(p, {g: {value: function(x) { return x + 88 }}})
+  assertEquals(55, o.f())
+  assertEquals(55, o["f"]())
+  assertEquals(55, o.f("unused", "arguments"))
+  assertEquals(55, o.f.call(o))
+  assertEquals(55, o.f.call(p))
+  assertEquals(55, o["f"].call(p))
+  assertEquals(55, o[101].call(p))
+  assertEquals(55, o.withargs(45, 5))
+  assertEquals(55, o.withargs.call(p, 11, 22))
+  assertEquals(90, o.g(2))
+  assertEquals(91, o.g.call(o, 3))
+  assertEquals(92, o.g.call(p, 4))
+  assertEquals("6655", "66" + o)  // calls o.toString
 }
 
 TestGetCall({
@@ -172,6 +281,15 @@
   var p = create(handler)
   assertThrows(function(){ p.a }, "myexn")
   assertThrows(function(){ p["b"] }, "myexn")
+  assertThrows(function(){ p[3] }, "myexn")
+
+  var o = Object.create(p, {x: {value: 88}, '4': {value: 89}})
+  assertThrows(function(){ o.a }, "myexn")
+  assertThrows(function(){ o["b"] }, "myexn")
+  assertThrows(function(){ o[3] }, "myexn")
+  assertEquals(88, o.x)
+  assertEquals(88, o["x"])
+  assertEquals(89, o[4])
 }
 
 TestGetThrow({
@@ -232,6 +350,9 @@
   assertEquals(43, p["b"] = 43)
   assertEquals("b", key)
   assertEquals(43, val)
+  assertEquals(44, p[77] = 44)
+  assertEquals("77", key)
+  assertEquals(44, val)
 }
 
 TestSet({
@@ -304,7 +425,6 @@
 }))
 
 
-
 function TestSetThrow(handler, create) {
   TestWithProxies(TestSetThrow2, handler)
 }
@@ -313,6 +433,7 @@
   var p = create(handler)
   assertThrows(function(){ p.a = 42 }, "myexn")
   assertThrows(function(){ p["b"] = 42 }, "myexn")
+  assertThrows(function(){ p[22] = 42 }, "myexn")
 }
 
 TestSetThrow({
@@ -424,6 +545,90 @@
 }))
 
 
+var key
+var val
+
+function TestSetForDerived(handler, create) {
+  TestWithProxies(TestSetForDerived2, handler)
+}
+
+function TestSetForDerived2(handler, create) {
+  var p = create(handler)
+  var o = Object.create(p, {x: {value: 88, writable: true},
+                            '1': {value: 89, writable: true}})
+
+  key = ""
+  assertEquals(48, o.x = 48)
+  assertEquals("", key)  // trap not invoked
+  assertEquals(48, o.x)
+
+  assertEquals(47, o[1] = 47)
+  assertEquals("", key)  // trap not invoked
+  assertEquals(47, o[1])
+
+  assertEquals(49, o.y = 49)
+  assertEquals("y", key)
+  assertEquals(49, o.y)
+
+  assertEquals(50, o[2] = 50)
+  assertEquals("2", key)
+  assertEquals(50, o[2])
+
+  assertEquals(44, o.p_writable = 44)
+  assertEquals("p_writable", key)
+  assertEquals(44, o.p_writable)
+
+  assertEquals(45, o.p_nonwritable = 45)
+  assertEquals("p_nonwritable", key)
+  assertEquals(45, o.p_nonwritable)
+
+  assertEquals(46, o.p_setter = 46)
+  assertEquals("p_setter", key)
+  assertEquals(46, val)  // written to parent
+  assertFalse(Object.prototype.hasOwnProperty.call(o, "p_setter"))
+
+  val = ""
+  assertEquals(47, o.p_nosetter = 47)
+  assertEquals("p_nosetter", key)
+  assertEquals("", val)  // not written at all
+  assertFalse(Object.prototype.hasOwnProperty.call(o, "p_nosetter"));
+
+  key = ""
+  assertThrows(function(){ "use strict"; o.p_nosetter = 50 }, TypeError)
+  assertEquals("p_nosetter", key)
+  assertEquals("", val)  // not written at all
+
+  assertThrows(function(){ o.p_nonconf = 53 }, TypeError)
+  assertEquals("p_nonconf", key)
+
+  assertThrows(function(){ o.p_throw = 51 }, "myexn")
+  assertEquals("p_throw", key)
+
+  assertThrows(function(){ o.p_setterthrow = 52 }, "myexn")
+  assertEquals("p_setterthrow", key)
+}
+
+TestSetForDerived({
+  getOwnPropertyDescriptor: function(k) {
+    key = k;
+    switch (k) {
+      case "p_writable": return {writable: true, configurable: true}
+      case "p_nonwritable": return {writable: false, configurable: true}
+      case "p_setter":return {set: function(x) { val = x }, configurable: true}
+      case "p_nosetter": return {get: function() { return 1 }, configurable: true}
+      case "p_nonconf":return {}
+      case "p_throw": throw "myexn"
+      case "p_setterthrow": return {set: function(x) { throw "myexn" }}
+      default: return undefined
+    }
+  }
+})
+
+
+// TODO(rossberg): TestSetReject, returning false
+// TODO(rossberg): TestGetProperty, TestSetProperty
+
+
 
 // Property definition (Object.defineProperty and Object.defineProperties).
 
@@ -453,6 +658,12 @@
   assertEquals(46, desc.value)
   assertEquals(false, desc.enumerable)
 
+  assertEquals(p, Object.defineProperty(p, 101, {value: 47, enumerable: false}))
+  assertEquals("101", key)
+  assertEquals(2, Object.getOwnPropertyNames(desc).length)
+  assertEquals(47, desc.value)
+  assertEquals(false, desc.enumerable)
+
   var attributes = {configurable: true, mine: 66, minetoo: 23}
   assertEquals(p, Object.defineProperty(p, "d", attributes))
   assertEquals("d", key)
@@ -487,7 +698,7 @@
 //  assertEquals(77, desc.value)
 
   var props = {
-    'bla': {},
+    '11': {},
     blub: {get: function() { return true }},
     '': {get value() { return 20 }},
     last: {value: 21, configurable: true, mine: "eyes"}
@@ -527,6 +738,7 @@
 function TestDefineThrow2(handler, create) {
   var p = create(handler)
   assertThrows(function(){ Object.defineProperty(p, "a", {value: 44})}, "myexn")
+  assertThrows(function(){ Object.defineProperty(p, 0, {value: 44})}, "myexn")
 
 // TODO(rossberg): These tests require for-in on proxies.
 //  var d1 = create({
@@ -579,6 +791,8 @@
   assertEquals("a", key)
   assertEquals(true, delete p["b"])
   assertEquals("b", key)
+  assertEquals(true, delete p[1])
+  assertEquals("1", key)
 
   assertEquals(false, delete p.z1)
   assertEquals("z1", key)
@@ -591,6 +805,8 @@
     assertEquals("c", key)
     assertEquals(true, delete p["d"])
     assertEquals("d", key)
+    assertEquals(true, delete p[2])
+    assertEquals("2", key)
 
     assertThrows(function(){ delete p.z3 }, TypeError)
     assertEquals("z3", key)
@@ -623,11 +839,13 @@
   var p = create(handler)
   assertThrows(function(){ delete p.a }, "myexn")
   assertThrows(function(){ delete p["b"] }, "myexn");
+  assertThrows(function(){ delete p[3] }, "myexn");
 
   (function() {
     "use strict"
     assertThrows(function(){ delete p.c }, "myexn")
     assertThrows(function(){ delete p["d"] }, "myexn")
+    assertThrows(function(){ delete p[4] }, "myexn");
   })()
 }
 
@@ -778,6 +996,7 @@
   assertEquals(0, ("zzz" in p) ? 2 : 0)
   assertEquals(2, !("zzz" in p) ? 2 : 0)
 
+  // Test compilation in conditionals.
   if ("b" in p) {
   } else {
     assertTrue(false)
@@ -830,7 +1049,7 @@
 })
 
 TestIn({
-  get: undefined,
+  has: undefined,
   getPropertyDescriptor: function(k) {
     key = k; return k < "z" ? {value: 42} : void 0
   }
@@ -850,6 +1069,7 @@
 function TestInThrow2(handler, create) {
   var p = create(handler)
   assertThrows(function(){ return "a" in o }, "myexn")
+  assertThrows(function(){ return 99 in o }, "myexn")
   assertThrows(function(){ return !("a" in o) }, "myexn")
   assertThrows(function(){ return ("a" in o) ? 2 : 3 }, "myexn")
   assertThrows(function(){ if ("b" in o) {} }, "myexn")
@@ -876,7 +1096,7 @@
 })
 
 TestInThrow({
-  get: undefined,
+  has: undefined,
   getPropertyDescriptor: function(k) { throw "myexn" }
 })
 
@@ -891,6 +1111,158 @@
 }))
 
 
+function TestInForDerived(handler) {
+  TestWithProxies(TestInForDerived2, handler)
+}
+
+function TestInForDerived2(handler, create) {
+  var p = create(handler)
+  var o = Object.create(p)
+
+  assertTrue("a" in o)
+  assertEquals("a", key)
+  assertTrue(99 in o)
+  assertEquals("99", key)
+  assertFalse("z" in o)
+  assertEquals("z", key)
+
+  assertEquals(2, ("a" in o) ? 2 : 0)
+  assertEquals(0, !("a" in o) ? 2 : 0)
+  assertEquals(0, ("zzz" in o) ? 2 : 0)
+  assertEquals(2, !("zzz" in o) ? 2 : 0)
+
+  if ("b" in o) {
+  } else {
+    assertTrue(false)
+  }
+  assertEquals("b", key)
+
+  if ("zz" in o) {
+    assertTrue(false)
+  }
+  assertEquals("zz", key)
+
+  if (!("c" in o)) {
+    assertTrue(false)
+  }
+  assertEquals("c", key)
+
+  if (!("zzz" in o)) {
+  } else {
+    assertTrue(false)
+  }
+  assertEquals("zzz", key)
+}
+
+TestInForDerived({
+  getPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getPropertyDescriptor: function(k) { return this.getPropertyDescriptor2(k) },
+  getPropertyDescriptor2: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getPropertyDescriptor: function(k) {
+    key = k;
+    return k < "z" ? {get value() { return 42 }, configurable: true} : void 0
+  }
+})
+
+/* TODO(rossberg): this will work once we implement the newest proposal
+ * regarding default traps for getPropertyDescriptor.
+TestInForDerived({
+  getOwnPropertyDescriptor: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getOwnPropertyDescriptor: function(k) {
+    return this.getOwnPropertyDescriptor2(k)
+  },
+  getOwnPropertyDescriptor2: function(k) {
+    key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+  }
+})
+
+TestInForDerived({
+  getOwnPropertyDescriptor: function(k) {
+    key = k;
+    return k < "z" ? {get value() { return 42 }, configurable: true} : void 0
+  }
+})
+*/
+
+TestInForDerived(Proxy.create({
+  get: function(pr, pk) {
+    return function(k) {
+      key = k; return k < "z" ? {value: 42, configurable: true} : void 0
+    }
+  }
+}))
+
+
+
+// Property descriptor conversion.
+
+var descget
+
+function TestDescriptorGetOrder(handler) {
+  var p = Proxy.create(handler)
+  var o = Object.create(p, {b: {value: 0}})
+  TestDescriptorGetOrder2(function(n) { return p[n] }, "vV")
+  TestDescriptorGetOrder2(function(n) { return n in p }, "")
+  TestDescriptorGetOrder2(function(n) { return o[n] }, "vV")
+  TestDescriptorGetOrder2(function(n) { return n in o }, "eEcCvVwWgs")
+}
+
+function TestDescriptorGetOrder2(f, access) {
+  descget = ""
+  assertTrue(f("a"))
+  assertEquals(access, descget)
+  descget = ""
+  assertTrue(f(99))
+  assertEquals(access, descget)
+  descget = ""
+  assertFalse(!!f("z"))
+  assertEquals("", descget)
+}
+
+TestDescriptorGetOrder({
+  getPropertyDescriptor: function(k) {
+    if (k >= "z") return void 0
+    // Return a proxy as property descriptor, so that we can log accesses.
+    return Proxy.create({
+      get: function(r, attr) {
+        descget += attr[0].toUpperCase()
+        return true
+      },
+      has: function(attr) {
+        descget += attr[0]
+        switch (attr) {
+          case "writable":
+          case "enumerable":
+          case "configurable":
+          case "value":
+            return true
+          case "get":
+          case "set":
+            return false
+          default:
+            assertUnreachable()
+        }
+      }
+    })
+  }
+})
+
+
 
 // Own Properties (Object.prototype.hasOwnProperty).
 
@@ -1006,34 +1378,46 @@
 // Instanceof (instanceof)
 
 function TestInstanceof() {
-  var o = {}
+  var o1 = {}
   var p1 = Proxy.create({})
-  var p2 = Proxy.create({}, o)
+  var p2 = Proxy.create({}, o1)
   var p3 = Proxy.create({}, p2)
+  var o2 = Object.create(p2)
 
   var f0 = function() {}
-  f0.prototype = o
+  f0.prototype = o1
   var f1 = function() {}
   f1.prototype = p1
   var f2 = function() {}
   f2.prototype = p2
+  var f3 = function() {}
+  f3.prototype = o2
 
-  assertTrue(o instanceof Object)
-  assertFalse(o instanceof f0)
-  assertFalse(o instanceof f1)
-  assertFalse(o instanceof f2)
+  assertTrue(o1 instanceof Object)
+  assertFalse(o1 instanceof f0)
+  assertFalse(o1 instanceof f1)
+  assertFalse(o1 instanceof f2)
+  assertFalse(o1 instanceof f3)
   assertFalse(p1 instanceof Object)
   assertFalse(p1 instanceof f0)
   assertFalse(p1 instanceof f1)
   assertFalse(p1 instanceof f2)
+  assertFalse(p1 instanceof f3)
   assertTrue(p2 instanceof Object)
   assertTrue(p2 instanceof f0)
   assertFalse(p2 instanceof f1)
   assertFalse(p2 instanceof f2)
+  assertFalse(p2 instanceof f3)
   assertTrue(p3 instanceof Object)
   assertTrue(p3 instanceof f0)
   assertFalse(p3 instanceof f1)
   assertTrue(p3 instanceof f2)
+  assertFalse(p3 instanceof f3)
+  assertTrue(o2 instanceof Object)
+  assertTrue(o2 instanceof f0)
+  assertFalse(o2 instanceof f1)
+  assertTrue(o2 instanceof f2)
+  assertFalse(o2 instanceof f3)
 
   var f = Proxy.createFunction({}, function() {})
   assertTrue(f instanceof Function)
@@ -1046,43 +1430,57 @@
 // Prototype (Object.getPrototypeOf, Object.prototype.isPrototypeOf).
 
 function TestPrototype() {
-  var o = {}
+  var o1 = {}
   var p1 = Proxy.create({})
-  var p2 = Proxy.create({}, o)
+  var p2 = Proxy.create({}, o1)
   var p3 = Proxy.create({}, p2)
   var p4 = Proxy.create({}, 666)
+  var o2 = Object.create(p3)
 
-  assertSame(Object.getPrototypeOf(o), Object.prototype)
+  assertSame(Object.getPrototypeOf(o1), Object.prototype)
   assertSame(Object.getPrototypeOf(p1), null)
-  assertSame(Object.getPrototypeOf(p2), o)
+  assertSame(Object.getPrototypeOf(p2), o1)
   assertSame(Object.getPrototypeOf(p3), p2)
   assertSame(Object.getPrototypeOf(p4), null)
+  assertSame(Object.getPrototypeOf(o2), p3)
 
-  assertTrue(Object.prototype.isPrototypeOf(o))
+  assertTrue(Object.prototype.isPrototypeOf(o1))
   assertFalse(Object.prototype.isPrototypeOf(p1))
   assertTrue(Object.prototype.isPrototypeOf(p2))
   assertTrue(Object.prototype.isPrototypeOf(p3))
   assertFalse(Object.prototype.isPrototypeOf(p4))
-  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o))
+  assertTrue(Object.prototype.isPrototypeOf(o2))
+  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o1))
   assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p1))
   assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p2))
   assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, p3))
   assertFalse(Object.prototype.isPrototypeOf.call(Object.prototype, p4))
-  assertFalse(Object.prototype.isPrototypeOf.call(o, o))
-  assertFalse(Object.prototype.isPrototypeOf.call(o, p1))
-  assertTrue(Object.prototype.isPrototypeOf.call(o, p2))
-  assertTrue(Object.prototype.isPrototypeOf.call(o, p3))
-  assertFalse(Object.prototype.isPrototypeOf.call(o, p4))
+  assertTrue(Object.prototype.isPrototypeOf.call(Object.prototype, o2))
+  assertFalse(Object.prototype.isPrototypeOf.call(o1, o1))
+  assertFalse(Object.prototype.isPrototypeOf.call(o1, p1))
+  assertTrue(Object.prototype.isPrototypeOf.call(o1, p2))
+  assertTrue(Object.prototype.isPrototypeOf.call(o1, p3))
+  assertFalse(Object.prototype.isPrototypeOf.call(o1, p4))
+  assertTrue(Object.prototype.isPrototypeOf.call(o1, o2))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p1))
-  assertFalse(Object.prototype.isPrototypeOf.call(p1, o))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, o1))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p2))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p3))
   assertFalse(Object.prototype.isPrototypeOf.call(p1, p4))
+  assertFalse(Object.prototype.isPrototypeOf.call(p1, o2))
   assertFalse(Object.prototype.isPrototypeOf.call(p2, p1))
   assertFalse(Object.prototype.isPrototypeOf.call(p2, p2))
   assertTrue(Object.prototype.isPrototypeOf.call(p2, p3))
   assertFalse(Object.prototype.isPrototypeOf.call(p2, p4))
+  assertTrue(Object.prototype.isPrototypeOf.call(p2, o2))
   assertFalse(Object.prototype.isPrototypeOf.call(p3, p2))
+  assertTrue(Object.prototype.isPrototypeOf.call(p3, o2))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, o1))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p1))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p2))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p3))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, p4))
+  assertFalse(Object.prototype.isPrototypeOf.call(o2, o2))
 
   var f = Proxy.createFunction({}, function() {})
   assertSame(Object.getPrototypeOf(f), Function.prototype)
@@ -1267,7 +1665,6 @@
 // Fixing (Object.freeze, Object.seal, Object.preventExtensions,
 //         Object.isFrozen, Object.isSealed, Object.isExtensible)
 
-// TODO(rossberg): use TestWithProxies to include funciton proxies
 function TestFix(names, handler) {
   var proto = {p: 77}
   var assertFixing = function(o, s, f, e) {
@@ -1314,19 +1711,27 @@
                     Object.keys(p3).sort())
   assertEquals(proto, Object.getPrototypeOf(p3))
   assertEquals(77, p3.p)
+
+  var p = Proxy.create(handler, proto)
+  var o = Object.create(p)
+  assertFixing(p, false, false, true)
+  assertFixing(o, false, false, true)
+  Object.freeze(o)
+  assertFixing(p, false, false, true)
+  assertFixing(o, true, true, false)
 }
 
 TestFix([], {
   fix: function() { return {} }
 })
 
-TestFix(["a", "b", "c", "d", "zz"], {
+TestFix(["a", "b", "c", "3", "zz"], {
   fix: function() {
     return {
       a: {value: "a", writable: true, configurable: false, enumerable: true},
       b: {value: 33, writable: false, configurable: false, enumerable: true},
       c: {value: 0, writable: true, configurable: true, enumerable: true},
-      d: {value: true, writable: false, configurable: true, enumerable: true},
+      '3': {value: true, writable: false, configurable: true, enumerable: true},
       zz: {value: 0, enumerable: false}
     }
   }
@@ -1426,6 +1831,13 @@
   assertEquals("my_proxy", Object.prototype.toLocaleString.call(f))
   assertEquals("toString", key)
   assertDoesNotThrow(function(){ Function.prototype.toString.call(f) })
+
+  var o = Object.create(p)
+  key = ""
+  assertEquals("[object Object]", Object.prototype.toString.call(o))
+  assertEquals("", key)
+  assertEquals("my_proxy", Object.prototype.toLocaleString.call(o))
+  assertEquals("toString", key)
 }
 
 TestToString({
@@ -1452,6 +1864,10 @@
   var f = Proxy.createFunction(handler, function() {})
   assertEquals("[object Function]", Object.prototype.toString.call(f))
   assertThrows(function(){ Object.prototype.toLocaleString.call(f) }, "myexn")
+
+  var o = Object.create(p)
+  assertEquals("[object Object]", Object.prototype.toString.call(o))
+  assertThrows(function(){ Object.prototype.toLocaleString.call(o) }, "myexn")
 }
 
 TestToStringThrow({
@@ -1510,6 +1926,11 @@
   assertEquals("2", key)
   assertFalse(Object.prototype.propertyIsEnumerable.call(p, "z"))
   assertEquals("z", key)
+
+  var o = Object.create(p)
+  key = ""
+  assertFalse(Object.prototype.propertyIsEnumerable.call(o, "a"))
+  assertEquals("", key)  // trap not invoked
 }
 
 TestIsEnumerable({
@@ -1586,23 +2007,30 @@
 // Calling (call, Function.prototype.call, Function.prototype.apply,
 //          Function.prototype.bind).
 
-var global = this
+var global_object = this
 var receiver
 
+function CreateFrozen(handler, callTrap, constructTrap) {
+  if (handler.fix === undefined) handler.fix = function() { return {} }
+  var f = Proxy.createFunction(handler, callTrap, constructTrap)
+  Object.freeze(f)
+  return f
+}
+
 function TestCall(isStrict, callTrap) {
   assertEquals(42, callTrap(5, 37))
-// TODO(rossberg): unrelated bug: this does not succeed for optimized code.
-// assertEquals(isStrict ? undefined : global, receiver)
+  // TODO(rossberg): unrelated bug: this does not succeed for optimized code:
+  // assertEquals(isStrict ? undefined : global_object, receiver)
 
-  var f = Proxy.createFunction({fix: function() { return {} }}, callTrap)
+  var f = Proxy.createFunction({}, callTrap)
   receiver = 333
   assertEquals(42, f(11, 31))
-  assertEquals(isStrict ? undefined : global, receiver)
+  assertEquals(isStrict ? undefined : global_object, receiver)
   var o = {}
   assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
   assertEquals(o, receiver)
   assertEquals(43, Function.prototype.call.call(f, null, 20, 23))
-  assertEquals(isStrict ? null : global, receiver)
+  assertEquals(isStrict ? null : global_object, receiver)
   assertEquals(44, Function.prototype.call.call(f, 2, 21, 23))
   assertEquals(2, receiver.valueOf())
   receiver = 333
@@ -1616,11 +2044,11 @@
   assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
   assertEquals(o, receiver)
 
-  Object.freeze(f)
+  var f = CreateFrozen({}, callTrap)
   receiver = 333
   assertEquals(42, f(11, 31))
-// TODO(rossberg): unrelated bug: this does not succeed for optimized code.
-// assertEquals(isStrict ? undefined : global, receiver)
+  // TODO(rossberg): unrelated bug: this does not succeed for optimized code.
+  // assertEquals(isStrict ? undefined : global, receiver)
   receiver = 333
   assertEquals(42, Function.prototype.call.call(f, o, 20, 22))
   assertEquals(o, receiver)
@@ -1653,21 +2081,18 @@
   receiver = this; return x + y
 }))
 
-var p = Proxy.createFunction({fix: function() {return {}}}, function(x, y) {
+TestCall(false, CreateFrozen({}, function(x, y) {
   receiver = this; return x + y
-})
-TestCall(false, p)
-Object.freeze(p)
-TestCall(false, p)
+}))
 
 
 function TestCallThrow(callTrap) {
-  var f = Proxy.createFunction({fix: function() {return {}}}, callTrap)
+  var f = Proxy.createFunction({}, callTrap)
   assertThrows(function(){ f(11) }, "myexn")
   assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
   assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
 
-  Object.freeze(f)
+  var f = CreateFrozen({}, callTrap)
   assertThrows(function(){ f(11) }, "myexn")
   assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
   assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
@@ -1675,8 +2100,256 @@
 
 TestCallThrow(function() { throw "myexn" })
 TestCallThrow(Proxy.createFunction({}, function() { throw "myexn" }))
+TestCallThrow(CreateFrozen({}, function() { throw "myexn" }))
 
-var p = Proxy.createFunction(
-  {fix: function() {return {}}}, function() { throw "myexn" })
-Object.freeze(p)
-TestCallThrow(p)
+
+
+// Construction (new).
+
+var prototype = {}
+var receiver
+
+var handlerWithPrototype = {
+  fix: function() { return {prototype: prototype} },
+  get: function(r, n) { assertEquals("prototype", n); return prototype }
+}
+
+var handlerSansPrototype = {
+  fix: function() { return {} },
+  get: function(r, n) { assertEquals("prototype", n); return undefined }
+}
+
+function ReturnUndef(x, y) { "use strict"; receiver = this; this.sum = x + y }
+function ReturnThis(x, y) { "use strict"; receiver = this; this.sum = x + y; return this }
+function ReturnNew(x, y) { "use strict"; receiver = this; return {sum: x + y} }
+function ReturnNewWithProto(x, y) {
+  "use strict";
+  receiver = this;
+  var result = Object.create(prototype)
+  result.sum = x + y
+  return result
+}
+
+function TestConstruct(proto, constructTrap) {
+  TestConstruct2(proto, constructTrap, handlerWithPrototype)
+  TestConstruct2(proto, constructTrap, handlerSansPrototype)
+}
+
+function TestConstruct2(proto, constructTrap, handler) {
+  var f = Proxy.createFunction(handler, function() {}, constructTrap)
+  var o = new f(11, 31)
+  // TODO(rossberg): doesn't hold, due to unrelated bug.
+  // assertEquals(undefined, receiver)
+  assertEquals(42, o.sum)
+  assertSame(proto, Object.getPrototypeOf(o))
+
+  var f = CreateFrozen(handler, function() {}, constructTrap)
+  var o = new f(11, 32)
+  // TODO(rossberg): doesn't hold, due to unrelated bug.
+  // assertEquals(undefined, receiver)
+  assertEquals(43, o.sum)
+  assertSame(proto, Object.getPrototypeOf(o))
+}
+
+TestConstruct(Object.prototype, ReturnNew)
+TestConstruct(prototype, ReturnNewWithProto)
+
+TestConstruct(Object.prototype, Proxy.createFunction({}, ReturnNew))
+TestConstruct(prototype, Proxy.createFunction({}, ReturnNewWithProto))
+
+TestConstruct(Object.prototype, CreateFrozen({}, ReturnNew))
+TestConstruct(prototype, CreateFrozen({}, ReturnNewWithProto))
+
+
+function TestConstructFromCall(proto, returnsThis, callTrap) {
+  TestConstructFromCall2(proto, returnsThis, callTrap, handlerWithPrototype)
+  TestConstructFromCall2(proto, returnsThis, callTrap, handlerSansPrototype)
+}
+
+function TestConstructFromCall2(proto, returnsThis, callTrap, handler) {
+  var f = Proxy.createFunction(handler, callTrap)
+  var o = new f(11, 31)
+  if (returnsThis) assertEquals(o, receiver)
+  assertEquals(42, o.sum)
+  assertSame(proto, Object.getPrototypeOf(o))
+
+  var f = CreateFrozen(handler, callTrap)
+  var o = new f(11, 32)
+  if (returnsThis) assertEquals(o, receiver)
+  assertEquals(43, o.sum)
+  assertSame(proto, Object.getPrototypeOf(o))
+}
+
+TestConstructFromCall(Object.prototype, true, ReturnUndef)
+TestConstructFromCall(Object.prototype, true, ReturnThis)
+TestConstructFromCall(Object.prototype, false, ReturnNew)
+TestConstructFromCall(prototype, false, ReturnNewWithProto)
+
+TestConstructFromCall(Object.prototype, true, Proxy.createFunction({}, ReturnUndef))
+TestConstructFromCall(Object.prototype, true, Proxy.createFunction({}, ReturnThis))
+TestConstructFromCall(Object.prototype, false, Proxy.createFunction({}, ReturnNew))
+TestConstructFromCall(prototype, false, Proxy.createFunction({}, ReturnNewWithProto))
+
+TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnUndef))
+TestConstructFromCall(Object.prototype, true, CreateFrozen({}, ReturnThis))
+TestConstructFromCall(Object.prototype, false, CreateFrozen({}, ReturnNew))
+TestConstructFromCall(prototype, false, CreateFrozen({}, ReturnNewWithProto))
+
+ReturnUndef.prototype = prototype
+ReturnThis.prototype = prototype
+ReturnNew.prototype = prototype
+ReturnNewWithProto.prototype = prototype
+
+TestConstructFromCall(prototype, true, ReturnUndef)
+TestConstructFromCall(prototype, true, ReturnThis)
+TestConstructFromCall(Object.prototype, false, ReturnNew)
+TestConstructFromCall(prototype, false, ReturnNewWithProto)
+
+TestConstructFromCall(Object.prototype, true, Proxy.createFunction({}, ReturnUndef))
+TestConstructFromCall(Object.prototype, true, Proxy.createFunction({}, ReturnThis))
+TestConstructFromCall(Object.prototype, false, Proxy.createFunction({}, ReturnNew))
+TestConstructFromCall(prototype, false, Proxy.createFunction({}, ReturnNewWithProto))
+
+TestConstructFromCall(prototype, true, Proxy.createFunction(handlerWithPrototype, ReturnUndef))
+TestConstructFromCall(prototype, true, Proxy.createFunction(handlerWithPrototype, ReturnThis))
+TestConstructFromCall(Object.prototype, false, Proxy.createFunction(handlerWithPrototype, ReturnNew))
+TestConstructFromCall(prototype, false, Proxy.createFunction(handlerWithPrototype, ReturnNewWithProto))
+
+TestConstructFromCall(prototype, true, CreateFrozen(handlerWithPrototype, ReturnUndef))
+TestConstructFromCall(prototype, true, CreateFrozen(handlerWithPrototype, ReturnThis))
+TestConstructFromCall(Object.prototype, false, CreateFrozen(handlerWithPrototype, ReturnNew))
+TestConstructFromCall(prototype, false, CreateFrozen(handlerWithPrototype, ReturnNewWithProto))
+
+
+function TestConstructThrow(trap) {
+  TestConstructThrow2(Proxy.createFunction({fix: function() {return {}}}, trap))
+  TestConstructThrow2(Proxy.createFunction({fix: function() {return {}}},
+    function() {}, trap))
+}
+
+function TestConstructThrow2(f) {
+  assertThrows(function(){ new f(11) }, "myexn")
+  Object.freeze(f)
+  assertThrows(function(){ new f(11) }, "myexn")
+}
+
+TestConstructThrow(function() { throw "myexn" })
+TestConstructThrow(Proxy.createFunction({}, function() { throw "myexn" }))
+TestConstructThrow(CreateFrozen({}, function() { throw "myexn" }))
+
+
+
+// Getters and setters.
+
+var value
+var receiver
+
+function TestAccessorCall(getterCallTrap, setterCallTrap) {
+  var handler = {fix: function() { return {} }}
+  var pgetter = Proxy.createFunction(handler, getterCallTrap)
+  var psetter = Proxy.createFunction(handler, setterCallTrap)
+
+  var o = {}
+  var oo = Object.create(o)
+  Object.defineProperty(o, "a", {get: pgetter, set: psetter})
+  Object.defineProperty(o, "b", {get: pgetter})
+  Object.defineProperty(o, "c", {set: psetter})
+  Object.defineProperty(o, "3", {get: pgetter, set: psetter})
+  Object.defineProperty(oo, "a", {value: 43})
+
+  receiver = ""
+  assertEquals(42, o.a)
+  assertSame(o, receiver)
+  receiver = ""
+  assertEquals(42, o.b)
+  assertSame(o, receiver)
+  receiver = ""
+  assertEquals(undefined, o.c)
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(42, o["a"])
+  assertSame(o, receiver)
+  receiver = ""
+  assertEquals(42, o[3])
+  assertSame(o, receiver)
+
+  receiver = ""
+  assertEquals(43, oo.a)
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(42, oo.b)
+  assertSame(o, receiver)
+  receiver = ""
+  assertEquals(undefined, oo.c)
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(43, oo["a"])
+  assertEquals("", receiver)
+  receiver = ""
+  assertEquals(42, oo[3])
+  assertSame(o, receiver)
+
+  receiver = ""
+  assertEquals(50, o.a = 50)
+  assertSame(o, receiver)
+  assertEquals(50, value)
+  receiver = ""
+  assertEquals(51, o.b = 51)
+  assertEquals("", receiver)
+  assertEquals(50, value)  // no setter
+  assertThrows(function() { "use strict"; o.b = 51 }, TypeError)
+  receiver = ""
+  assertEquals(52, o.c = 52)
+  assertSame(o, receiver)
+  assertEquals(52, value)
+  receiver = ""
+  assertEquals(53, o["a"] = 53)
+  assertSame(o, receiver)
+  assertEquals(53, value)
+  receiver = ""
+  assertEquals(54, o[3] = 54)
+  assertSame(o, receiver)
+  assertEquals(54, value)
+
+  value = 0
+  receiver = ""
+  assertEquals(60, oo.a = 60)
+  assertEquals("", receiver)
+  assertEquals(0, value)  // oo has own 'a'
+  assertEquals(61, oo.b = 61)
+  assertSame("", receiver)
+  assertEquals(0, value)  // no setter
+  assertThrows(function() { "use strict"; oo.b = 61 }, TypeError)
+  receiver = ""
+  assertEquals(62, oo.c = 62)
+  assertSame(oo, receiver)
+  assertEquals(62, value)
+  receiver = ""
+  assertEquals(63, oo["c"] = 63)
+  assertSame(oo, receiver)
+  assertEquals(63, value)
+  receiver = ""
+  assertEquals(64, oo[3] = 64)
+  assertSame(oo, receiver)
+  assertEquals(64, value)
+}
+
+TestAccessorCall(
+  function() { receiver = this; return 42 },
+  function(x) { receiver = this; value = x }
+)
+
+TestAccessorCall(
+  function() { "use strict"; receiver = this; return 42 },
+  function(x) { "use strict"; receiver = this; value = x }
+)
+
+TestAccessorCall(
+  Proxy.createFunction({}, function() { receiver = this; return 42 }),
+  Proxy.createFunction({}, function(x) { receiver = this; value = x })
+)
+
+TestAccessorCall(
+  CreateFrozen({}, function() { receiver = this; return 42 }),
+  CreateFrozen({}, function(x) { receiver = this; value = x })
+)
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 027da58..941e0e8 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -35,6 +35,11 @@
 regress/regress-1119: FAIL
 
 ##############################################################################
+
+# NewGC: BUG(1719) slow to collect arrays over several contexts.
+regress/regress-524: SKIP
+
+##############################################################################
 # Too slow in debug mode with --stress-opt
 compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
 compiler/regress-funcaller: PASS, SKIP if $mode == debug
@@ -60,7 +65,6 @@
 debug-liveedit-check-stack: SKIP
 debug-liveedit-patch-positions-replace: SKIP
 
-
 ##############################################################################
 [ $arch == arm ]
 
diff --git a/test/mjsunit/regress/regress-1170.js b/test/mjsunit/regress/regress-1170.js
index 95684c5..66ed9f2 100644
--- a/test/mjsunit/regress/regress-1170.js
+++ b/test/mjsunit/regress/regress-1170.js
@@ -49,7 +49,7 @@
   exception = true;
   assertTrue(/TypeError/.test(e));
 }
-assertTrue(exception);
+assertFalse(exception);
 
 exception = false;
 try {
diff --git a/test/mjsunit/regress/regress-1213575.js b/test/mjsunit/regress/regress-1213575.js
index 9d82064..f3a11db 100644
--- a/test/mjsunit/regress/regress-1213575.js
+++ b/test/mjsunit/regress/regress-1213575.js
@@ -25,17 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Make sure that a const definition always
-// conflicts with a defined setter. This avoid
-// trying to pass 'the hole' to the setter.
+// Make sure that a const definition does not try
+// to pass 'the hole' to a defined setter.
 
-this.__defineSetter__('x', function(value) { assertTrue(false); });
+this.__defineSetter__('x', function(value) { assertTrue(value === 1); });
 
 var caught = false;
 try {
-  eval('const x');
+  eval('const x = 1');
 } catch(e) {
   assertTrue(e instanceof TypeError);
   caught = true;
 }
-assertTrue(caught);
+assertFalse(caught);
diff --git a/test/mjsunit/regress/regress-1217.js b/test/mjsunit/regress/regress-1217.js
new file mode 100644
index 0000000..6530549
--- /dev/null
+++ b/test/mjsunit/regress/regress-1217.js
@@ -0,0 +1,50 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that RegExp.prototype is itself a RegExp object.
+
+var proto = RegExp.prototype;
+assertEquals("[object RegExp]", Object.prototype.toString.call(proto));
+
+assertEquals("", proto.source);
+assertEquals(false, proto.global);
+assertEquals(false, proto.multiline);
+assertEquals(false, proto.ignoreCase);
+assertEquals(0, proto.lastIndex);
+
+assertEquals("/(?:)/", proto.toString());
+
+var execResult = proto.exec("argle");
+assertEquals(1, execResult.length);
+assertEquals("", execResult[0]);
+assertEquals("argle", execResult.input);
+assertEquals(0, execResult.index);
+
+assertTrue(proto.test("argle"));
+
+// We disallow re-compiling the RegExp.prototype object.
+assertThrows(function(){ proto.compile("something"); }, TypeError);
diff --git a/test/mjsunit/regress/regress-1415.js b/test/mjsunit/regress/regress-1415.js
new file mode 100644
index 0000000..f993e9b
--- /dev/null
+++ b/test/mjsunit/regress/regress-1415.js
@@ -0,0 +1,42 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Surrogate pair range.
+// U+D800
+assertThrows(function(){ decodeURIComponent("%ED%A0%80"); }, URIError);
+// U+DBFF
+assertThrows(function(){ decodeURIComponent("%ED%AF%BF"); }, URIError);
+// U+DC00
+assertThrows(function(){ decodeURIComponent("%ED%B0%80"); }, URIError);
+// U+DFFF
+assertThrows(function(){ decodeURIComponent("%ED%BF%BF"); }, URIError);
+
+// Overlong encodings
+// U+007F in two bytes.
+assertThrows(function(){ decodeURIComponent("%C1%BF"); }, URIError);
+// U+07FF in three bytes.
+assertThrows(function(){ decodeURIComponent("%E0%9F%BF"); }, URIError);
diff --git a/test/mjsunit/regress/regress-1639-2.js b/test/mjsunit/regress/regress-1639-2.js
new file mode 100644
index 0000000..c439dd8
--- /dev/null
+++ b/test/mjsunit/regress/regress-1639-2.js
@@ -0,0 +1,93 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+function sendCommand(state, cmd) {
+  // Get the debug command processor in paused state.
+  var dcp = state.debugCommandProcessor(false);
+  var request = JSON.stringify(cmd);
+  var response = dcp.processDebugJSONRequest(request);
+}
+
+var state = 0;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      var line = event_data.sourceLineText();
+      print('break: ' + line);
+      print('event data: ' + event_data.toJSONProtocol());
+      print();
+      assertEquals('// BREAK', line.substr(-8),
+                   "should not break outside evaluate");
+
+      switch (state) {
+      case 0:
+        state = 1;
+        // While in the debugger and stepping through a set of instructions
+        // executed in the evaluate command, the stepping must stop at the end
+        // of the said set of instructions and not step further into native
+        // debugger code.
+        sendCommand(exec_state, {
+          seq : 0,
+          type : "request",
+          command : "evaluate",
+          arguments : {
+            'expression' : 'print("A"); debugger; print("B"); // BREAK',
+            'global' : true
+          }
+        });
+        break;
+      case 1:
+        sendCommand(exec_state, {
+          seq : 0,
+          type : "request",
+          command : "continue",
+          arguments : {
+            stepaction : "next"
+          }
+        });
+        break;
+      }
+    }
+  } catch (e) {
+    print(e);
+  }
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function a() {
+} // BREAK
+
+// Set a break point and call to invoke the debug event listener.
+Debug.setBreakPoint(a, 0, 0);
+a();
diff --git a/test/mjsunit/regress/regress-1692.js b/test/mjsunit/regress/regress-1692.js
new file mode 100644
index 0000000..06bd66c
--- /dev/null
+++ b/test/mjsunit/regress/regress-1692.js
@@ -0,0 +1,89 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that Object.prototype.propertyIsEnumerable handles array indices
+// correctly.
+
+var p = Object.create({}, {
+  a : { value : 42, enumerable : true },
+  b : { value : 42, enumerable : false },
+  1 : { value : 42, enumerable : true },
+  2 : { value : 42, enumerable : false },
+  f : { get: function(){}, enumerable: true },
+  g : { get: function(){}, enumerable: false },
+  11 : { get: function(){}, enumerable: true },
+  12 : { get: function(){}, enumerable: false }
+});
+var o = Object.create(p, {
+  c : { value : 42, enumerable : true },
+  d : { value : 42, enumerable : false },
+  3 : { value : 42, enumerable : true },
+  4 : { value : 42, enumerable : false },
+  h : { get: function(){}, enumerable: true },
+  k : { get: function(){}, enumerable: false },
+  13 : { get: function(){}, enumerable: true },
+  14 : { get: function(){}, enumerable: false }
+});
+
+// Inherited properties are ignored.
+assertFalse(o.propertyIsEnumerable("a"));
+assertFalse(o.propertyIsEnumerable("b"));
+assertFalse(o.propertyIsEnumerable("1"));
+assertFalse(o.propertyIsEnumerable("2"));
+
+// Own properties.
+assertTrue(o.propertyIsEnumerable("c"));
+assertFalse(o.propertyIsEnumerable("d"));
+assertTrue(o.propertyIsEnumerable("3"));
+assertFalse(o.propertyIsEnumerable("4"));
+
+// Inherited accessors.
+assertFalse(o.propertyIsEnumerable("f"));
+assertFalse(o.propertyIsEnumerable("g"));
+assertFalse(o.propertyIsEnumerable("11"));
+assertFalse(o.propertyIsEnumerable("12"));
+
+// Own accessors.
+assertTrue(o.propertyIsEnumerable("h"));
+assertFalse(o.propertyIsEnumerable("k"));
+assertTrue(o.propertyIsEnumerable("13"));
+assertFalse(o.propertyIsEnumerable("14"));
+
+// Nonexisting properties.
+assertFalse(o.propertyIsEnumerable("xxx"));
+assertFalse(o.propertyIsEnumerable("999"));
+
+// String object properties.
+var o = Object("string");
+// Non-string property on String object.
+o[10] = 42;
+assertTrue(o.propertyIsEnumerable(10));
+assertFalse(o.propertyIsEnumerable(0));
+
+// Fast elements.
+var o = [1,2,3,4,5];
+assertTrue(o.propertyIsEnumerable(3));
diff --git a/test/mjsunit/regress/regress-1708.js b/test/mjsunit/regress/regress-1708.js
new file mode 100644
index 0000000..ab50e07
--- /dev/null
+++ b/test/mjsunit/regress/regress-1708.js
@@ -0,0 +1,63 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test of a very rare corner case where left-trimming an
+// array caused invalid marking bit patterns on lazily swept pages.
+
+// Flags: --expose-gc --noincremental-marking --max-new-space-size 1000
+
+(function() {
+  var head = new Array(1);
+  var tail = head;
+
+  // Fill heap to increase old-space size and trigger lazy sweeping on
+  // some of the old-space pages.
+  for (var i = 0; i < 200; i++) {
+    tail[1] = new Array(1000);
+    tail = tail[1];
+  }
+  array = new Array(100);
+  gc(); gc();
+
+  // At this point "array" should have been promoted to old-space and be
+  // located in a lazy swept page with intact marking bits. Now shift
+  // the array to trigger left-trimming operations.
+  assertEquals(100, array.length);
+  for (var i = 0; i < 50; i++) {
+    array.shift();
+  }
+  assertEquals(50, array.length);
+
+  // At this point "array" should have been trimmed from the left with
+  // marking bits being correctly transfered to the new object start.
+  // Scavenging operations cause lazy sweeping to advance and verify
+  // that marking bit patterns are still sane.
+  for (var i = 0; i < 200; i++) {
+    tail[1] = new Array(1000);
+    tail = tail[1];
+  }
+})();
diff --git a/test/mjsunit/regress/regress-1711.js b/test/mjsunit/regress/regress-1711.js
new file mode 100644
index 0000000..15591b1
--- /dev/null
+++ b/test/mjsunit/regress/regress-1711.js
@@ -0,0 +1,38 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// string.split needs to evaluate the separator's toString even if limit
+// is 0 because toString may have side effects.
+
+var side_effect = false;
+var separator = new Object();
+separator.toString = function() {
+  side_effect = true;
+  return undefined;
+}
+'subject'.split(separator, 0);
+assertTrue(side_effect);
diff --git a/test/mjsunit/regress/regress-1713.js b/test/mjsunit/regress/regress-1713.js
new file mode 100644
index 0000000..0af1144
--- /dev/null
+++ b/test/mjsunit/regress/regress-1713.js
@@ -0,0 +1,127 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --always-compact --expose-gc
+
+var O = { get f() { return 0; } };
+
+var CODE = [];
+
+var R = [];
+
+function Allocate4Kb(N) {
+  var arr = [];
+  do {arr.push(new Array(1024));} while (--N > 0);
+  return arr;
+}
+
+function AllocateXMb(X) {
+  return Allocate4Kb((1024 * X) / 4);
+}
+
+function Node(v, next) { this.v = v; this.next = next; }
+
+Node.prototype.execute = function (O) {
+  var n = this;
+  while (n.next !== null) n = n.next;
+  n.v(O);
+};
+
+function LongList(N, x) {
+  if (N == 0) return new Node(x, null);
+  return new Node(new Array(1024), LongList(N - 1, x));
+}
+
+var L = LongList(1024, function (O) {
+  for (var i = 0; i < 5; i++) O.f;
+});
+
+
+
+function Incremental(O, x) {
+  if (!x) {
+    return;
+  }
+  function CreateCode(i) {
+    var f = new Function("return O.f_" + i);
+    CODE.push(f);
+    f(); // compile
+    f(); // compile
+    f(); // compile
+  }
+
+  for (var i = 0; i < 1e4; i++) CreateCode(i);
+  gc();
+  gc();
+  gc();
+
+  print(">>> 1 <<<");
+
+  L.execute(O);
+
+  try {} catch (e) {}
+
+  L = null;
+  print(">>> 2 <<<");
+  AllocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+ //rint("1");
+ //llocateXMb(8);
+
+}
+
+function foo(O, x) {
+  Incremental(O, x);
+
+  print('f');
+
+  for (var i = 0; i < 5; i++) O.f;
+
+
+  print('g');
+
+  bar(x);
+}
+
+function bar(x) {
+  if (!x) return;
+  %DeoptimizeFunction(foo);
+  AllocateXMb(8);
+  AllocateXMb(8);
+}
+
+var O1 = {};
+var O2 = {};
+var O3 = {};
+var O4 = {f:0};
+
+foo(O1, false);
+foo(O2, false);
+foo(O3, false);
+%OptimizeFunctionOnNextCall(foo);
+foo(O4, true);
diff --git a/test/mjsunit/regress/regress-1748.js b/test/mjsunit/regress/regress-1748.js
new file mode 100644
index 0000000..e287e55
--- /dev/null
+++ b/test/mjsunit/regress/regress-1748.js
@@ -0,0 +1,35 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that /^/ only matches at beginning of string.
+// Bug in x64 caused it to match when executing the RegExp on a part
+// of a string that starts at a multiplum of 256.
+
+var str = Array(10000).join("X");
+str.replace(/^|X/g, function(m, i, s) {
+  if (i > 0) assertEquals("X", m, "at position 0x" + i.toString(16));
+});
\ No newline at end of file
diff --git a/test/mjsunit/regress/regress-877615.js b/test/mjsunit/regress/regress-877615.js
index d35aba6..bec5a4d 100644
--- a/test/mjsunit/regress/regress-877615.js
+++ b/test/mjsunit/regress/regress-877615.js
@@ -25,13 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-Number.prototype.toLocaleString = function() { return 'invalid'};
-assertEquals([1].toLocaleString(), 'invalid');  // invalid
+Number.prototype.toLocaleString = function() { return 'invalid'; };
+assertEquals('invalid', [1].toLocaleString());  // invalid
 
 Number.prototype.toLocaleString = 'invalid';
-assertEquals([1].toLocaleString(), '1');  // 1
+assertThrows(function() { [1].toLocaleString(); });  // Not callable.
 
+delete Number.prototype.toLocaleString;
 Number.prototype.toString = function() { return 'invalid' };
-assertEquals([1].toLocaleString(), '1');  // 1
-assertEquals([1].toString(), '1');        // 1
-
+assertEquals([1].toLocaleString(), 'invalid');  // Uses ToObject on elements.
+assertEquals([1].toString(), '1');        // Uses ToString directly on elements.
diff --git a/test/mjsunit/regress/regress-94873.js b/test/mjsunit/regress/regress-94873.js
new file mode 100644
index 0000000..41ca992
--- /dev/null
+++ b/test/mjsunit/regress/regress-94873.js
@@ -0,0 +1,78 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug;
+
+function sendCommand(state, cmd) {
+  // Get the debug command processor in paused state.
+  var dcp = state.debugCommandProcessor(false);
+  var request = JSON.stringify(cmd);
+  var response = dcp.processDebugJSONRequest(request);
+  return JSON.parse(response);
+}
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      var line = event_data.sourceLineText();
+      print('break: ' + line);
+
+      var frame = sendCommand(exec_state, {
+        seq: 0,
+        type: "request",
+        command: "frame"
+      });
+
+      sendCommand(exec_state, {
+        seq: 0,
+        type: "request",
+        command: "evaluate",
+        arguments: {
+          expression: "obj.x.toString()",
+          additional_context: [{
+            name: "obj",
+            handle: frame.body.receiver.ref
+          }]
+        }
+      });
+    }
+  } catch (e) {
+    print(e);
+  }
+}
+
+Debug.setListener(listener);
+
+function a(x, y) {
+  this.x = x;
+  this.y = y;
+}
+
+Debug.setBreakPoint(a, 0, 0);
+new a(1, 2);
\ No newline at end of file
diff --git a/test/mjsunit/regress/regress-98773.js b/test/mjsunit/regress/regress-98773.js
new file mode 100644
index 0000000..eb24eb5
--- /dev/null
+++ b/test/mjsunit/regress/regress-98773.js
@@ -0,0 +1,39 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Calling Array.sort on an external array is not supposed to crash.
+
+var array = new Int16Array(23);
+array[7] = 7; array[9] = 9;
+assertEquals(23, array.length);
+assertEquals(7, array[7]);
+assertEquals(9, array[9]);
+
+Array.prototype.sort.call(array);
+assertEquals(23, array.length);
+assertEquals(7, array[21]);
+assertEquals(9, array[22]);
diff --git a/test/mjsunit/regress/regress-deopt-gc.js b/test/mjsunit/regress/regress-deopt-gc.js
index 7b7c29a..a74e2c5 100644
--- a/test/mjsunit/regress/regress-deopt-gc.js
+++ b/test/mjsunit/regress/regress-deopt-gc.js
@@ -42,7 +42,7 @@
   // Make sure we don't inline this function
   try { var a = 42; } catch(o) {};
   %DeoptimizeFunction(opt_me);
-  gc(true);
+  gc();
 }
 
 
diff --git a/test/mjsunit/regress/short-circuit.js b/test/mjsunit/regress/short-circuit.js
new file mode 100644
index 0000000..25363d6
--- /dev/null
+++ b/test/mjsunit/regress/short-circuit.js
@@ -0,0 +1,32 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var arr = [];
+
+for (var i = 0; i < 28000; i++) {
+  arr.push(new RegExp("prefix" + i.toString() + i.toString() + i.toString()));
+}
diff --git a/test/mjsunit/string-slices-regexp.js b/test/mjsunit/string-slices-regexp.js
index a8cadae..df01574 100644
--- a/test/mjsunit/string-slices-regexp.js
+++ b/test/mjsunit/string-slices-regexp.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
diff --git a/test/mjsunit/string-slices.js b/test/mjsunit/string-slices.js
index 8cc1f81..7c40229 100755
--- a/test/mjsunit/string-slices.js
+++ b/test/mjsunit/string-slices.js
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -189,11 +189,16 @@
 assertEquals("\u03B2\u03B3\u03B4\u03B5\u03B4\u03B5\u03B6\u03B7",
     utf.substring(5,1) + utf.substring(3,7));
 
-/*
 // Externalizing strings.
-var a = "123456789qwertyuiopasdfghjklzxcvbnm";
-var b = a.slice(1,-1);
+var a = "123456789" + "qwertyuiopasdfghjklzxcvbnm";
+var b = "23456789qwertyuiopasdfghjklzxcvbn"
 assertEquals(a.slice(1,-1), b);
-externalizeString(a);
+
+assertTrue(isAsciiString(a));
+externalizeString(a, true);
+assertFalse(isAsciiString(a));
+
 assertEquals(a.slice(1,-1), b);
-*/
+assertTrue(/3456789qwe/.test(a));
+assertEquals(5, a.indexOf("678"));
+assertEquals("12345", a.split("6")[0]);
diff --git a/test/mjsunit/undeletable-functions.js b/test/mjsunit/undeletable-functions.js
index 04fd060..bbb798f 100644
--- a/test/mjsunit/undeletable-functions.js
+++ b/test/mjsunit/undeletable-functions.js
@@ -76,6 +76,8 @@
   "execScript"];
 CheckEcmaSemantics(this, array, "Global");
 CheckReadOnlyAttr(this, "Infinity");
+CheckReadOnlyAttr(this, "NaN");
+CheckReadOnlyAttr(this, "undefined");
 
 array = ["exec", "test", "toString", "compile"];
 CheckEcmaSemantics(RegExp.prototype, array, "RegExp prototype");
@@ -189,7 +191,7 @@
   assertFalse(deleted, "delete operator returned true: " + prop);
   assertTrue(type.hasOwnProperty(prop), "not there after delete: " + prop);
   type[prop] = "foo";
-  assertEquals("foo", type[prop], "overwritable: " + prop);
+  assertEquals(old, type[prop], "overwritable: " + prop);
 }
 
 print("OK");
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 3a27130..6a5c086 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -1,4 +1,4 @@
-# Copyright 2009 the V8 project authors. All rights reserved.
+# Copyright 2011 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -69,7 +69,6 @@
 ecma_3/Date/15.9.3.2-1: SKIP
 js1_2/function/Number: SKIP
 
-
 ##################### SLOW TESTS #####################
 
 # This takes a long time to run (~100 seconds). It should only be run
@@ -227,7 +226,7 @@
 ecma/String/15.5.4.12-5: FAIL_OK
 
 # Creates a linked list of arrays until we run out of memory or timeout.
-js1_5/Regress/regress-312588: FAIL || TIMEOUT
+js1_5/Regress/regress-312588: SKIP
 
 
 # Runs out of memory because it compiles huge functions.
@@ -619,6 +618,10 @@
 # We do not correctly handle assignments within "with"
 /ecma_3/Statements/12.10-01: FAIL
 
+# We do not throw an exception when a const is redeclared.
+# (We only fail section 1 of the test.)
+js1_5/Regress/regress-103602: FAIL
+
 ##################### MOZILLA EXTENSION TESTS #####################
 
 ecma/extensions/15.1.2.1-1: FAIL_OK
diff --git a/test/preparser/strict-identifiers.pyt b/test/preparser/strict-identifiers.pyt
index 72808e2..aa3d521 100644
--- a/test/preparser/strict-identifiers.pyt
+++ b/test/preparser/strict-identifiers.pyt
@@ -138,6 +138,38 @@
   var x = {set foo($id) { }};
 """)
 
+label_normal = Template("label-normal-$id", """
+  $id: '';
+""")
+
+label_strict = StrictTemplate("label-strict-$id", """
+  $id: '';
+""")
+
+break_normal = Template("break-normal-$id", """
+  for (;;) {
+    break $id;
+  }
+""")
+
+break_strict = StrictTemplate("break-strict-$id", """
+  for (;;) {
+    break $id;
+  }
+""")
+
+continue_normal = Template("continue-normal-$id", """
+  for (;;) {
+    continue $id;
+  }
+""")
+
+continue_strict = StrictTemplate("continue-strict-$id", """
+  for (;;) {
+    continue $id;
+  }
+""")
+
 non_strict_use = Template("nonstrict-$id", """
   var $id = 42;
   $id++;
@@ -162,6 +194,7 @@
   function $id($id) { }
   x = {$id: 42};
   x = {get $id() {}, set $id(value) {}};
+  $id: '';
 """)
 
 identifier_name_source = """
@@ -197,6 +230,12 @@
   prefix_var({"id": id, "op":"--", "opname":"dec"}, "strict_lhs_prefix")
   postfix_var({"id": id, "op":"++", "opname":"inc"}, "strict_lhs_postfix")
   postfix_var({"id": id, "op":"--", "opname":"dec"}, "strict_lhs_postfix")
+  label_normal({"id": id}, None)
+  label_strict({"id": id}, None)
+  break_normal({"id": id}, None)
+  break_strict({"id": id}, None)
+  continue_normal({"id": id}, None)
+  continue_strict({"id": id}, None)
   non_strict_use({"id": id}, None)
 
 
@@ -205,10 +244,13 @@
 for reserved_word in reserved_words + strict_reserved_words:
   if (reserved_word in strict_reserved_words):
     message = "strict_reserved_word"
+    label_message = None
   elif (reserved_word == "const"):
     message = "unexpected_token"
+    label_message = message
   else:
     message = "reserved_word"
+    label_message = message
   arg_name_own({"id":reserved_word}, message)
   arg_name_nested({"id":reserved_word}, message)
   setter_arg({"id": reserved_word}, message)
@@ -225,6 +267,19 @@
   read_var({"id": reserved_word}, message)
   identifier_name({"id": reserved_word}, None);
   identifier_name_strict({"id": reserved_word}, None);
+  label_normal({"id": reserved_word}, label_message)
+  break_normal({"id": reserved_word}, label_message)
+  continue_normal({"id": reserved_word}, label_message)
+  if (reserved_word == "const"):
+    # The error message for this case is different because
+    # ParseLabelledStatementOrExpression will try to parse this as an expression
+    # first, effectively disallowing the use in ParseVariableDeclarations, i.e.
+    # the preparser never sees that 'const' was intended to be a label.
+    label_strict({"id": reserved_word}, "strict_const")
+  else:
+    label_strict({"id": reserved_word}, message)
+  break_strict({"id": reserved_word}, message)
+  continue_strict({"id": reserved_word}, message)
 
 
 # Future reserved words in strict mode behave like normal identifiers
diff --git a/test/sputnik/sputnik.status b/test/sputnik/sputnik.status
index 868509d..99db598 100644
--- a/test/sputnik/sputnik.status
+++ b/test/sputnik/sputnik.status
@@ -52,6 +52,9 @@
 S15.10.6.2_A12: FAIL_OK
 S15.10.6.3_A1_T16: FAIL_OK
 
+# Sputnik tests (r97) assume RegExp.prototype is an Object, not a RegExp.
+S15.10.6_A2: FAIL_OK
+
 # We are silent in some regexp cases where the spec wants us to give
 # errors, for compatibility.
 S15.10.2.11_A1_T2: FAIL
@@ -176,6 +179,19 @@
 S15.5.4.14_A1_T3: FAIL_OK
 S15.5.4.15_A1_T3: FAIL_OK
 
+# NaN, Infinity and undefined are read-only according to ES5.
+S15.1.1.1_A2_T1: FAIL_OK  # NaN
+S15.1.1.1_A2_T2: FAIL_OK  # NaN
+S15.1.1.2_A2_T1: FAIL_OK  # Infinity
+# S15.1.1.2_A2_T2 would fail if it weren't bogus in r97. sputnik bug #45.
+S15.1.1.3_A2_T1: FAIL_OK  # undefined
+S15.1.1.3_A2_T2: FAIL_OK  # undefined
+
+# Array.prototype.to[Locale]String is generic in ES5.
+S15.4.4.2_A2_T1: FAIL_OK
+S15.4.4.3_A2_T1: FAIL_OK
+
+
 ##################### SKIPPED TESTS #####################
 
 # These tests take a looong time to run in debug mode.
diff --git a/tools/gc-nvp-trace-processor.py b/tools/gc-nvp-trace-processor.py
index 511ab2b..de3dc90 100755
--- a/tools/gc-nvp-trace-processor.py
+++ b/tools/gc-nvp-trace-processor.py
@@ -226,6 +226,10 @@
     return r['pause'] - r['external']
   return 0
 
+
+def real_mutator(r):
+  return r['mutator'] - r['stepstook']
+
 plots = [
   [
     Set('style fill solid 0.5 noborder'),
@@ -236,7 +240,24 @@
          Item('Sweep', 'sweep', lc = 'blue'),
          Item('Compaction', 'compact', lc = 'red'),
          Item('External', 'external', lc = '#489D43'),
-         Item('Other', other_scope, lc = 'grey'))
+         Item('Other', other_scope, lc = 'grey'),
+         Item('IGC Steps', 'stepstook', lc = '#FF6347'))
+  ],
+  [
+    Set('style fill solid 0.5 noborder'),
+    Set('style histogram rowstacked'),
+    Set('style data histograms'),
+    Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
+         Item('Marking', 'mark', lc = 'purple'),
+         Item('Sweep', 'sweep', lc = 'blue'),
+         Item('Compaction', 'compact', lc = 'red'),
+         Item('External', 'external', lc = '#489D43'),
+         Item('Other', other_scope, lc = '#ADD8E6'),
+         Item('External', 'external', lc = '#D3D3D3'))
+  ],
+
+  [
+    Plot(Item('Mutator', real_mutator, lc = 'black', style = 'lines'))
   ],
   [
     Set('style histogram rowstacked'),
@@ -275,7 +296,7 @@
   return reduce(lambda t,r: f(t, r[field]), trace, init)
 
 def calc_total(trace, field):
-  return freduce(lambda t,v: t + v, field, trace, 0)
+  return freduce(lambda t,v: t + long(v), field, trace, long(0))
 
 def calc_max(trace, field):
   return freduce(lambda t,r: max(t, r), field, trace, 0)
@@ -290,6 +311,8 @@
   marksweeps = filter(lambda r: r['gc'] == 'ms', trace)
   markcompacts = filter(lambda r: r['gc'] == 'mc', trace)
   scavenges = filter(lambda r: r['gc'] == 's', trace)
+  globalgcs = filter(lambda r: r['gc'] != 's', trace)
+
 
   charts = plot_all(plots, trace, filename)
 
@@ -302,7 +325,7 @@
     else:
       avg = 0
     if n > 1:
-      dev = math.sqrt(freduce(lambda t,r: (r - avg) ** 2, field, trace, 0) /
+      dev = math.sqrt(freduce(lambda t,r: t + (r - avg) ** 2, field, trace, 0) /
                       (n - 1))
     else:
       dev = 0
@@ -311,6 +334,31 @@
               '<td>%d</td><td>%d [dev %f]</td></tr>' %
               (prefix, n, total, max, avg, dev))
 
+  def HumanReadable(size):
+    suffixes = ['bytes', 'kB', 'MB', 'GB']
+    power = 1
+    for i in range(len(suffixes)):
+      if size < power*1024:
+        return "%.1f" % (float(size) / power) + " " + suffixes[i]
+      power *= 1024
+
+  def throughput(name, trace):
+    total_live_after = calc_total(trace, 'total_size_after')
+    total_live_before = calc_total(trace, 'total_size_before')
+    total_gc = calc_total(trace, 'pause')
+    if total_gc == 0:
+      return
+    out.write('GC %s Throughput (after): %s / %s ms = %s/ms<br/>' %
+              (name,
+               HumanReadable(total_live_after),
+               total_gc,
+               HumanReadable(total_live_after / total_gc)))
+    out.write('GC %s Throughput (before): %s / %s ms = %s/ms<br/>' %
+              (name,
+               HumanReadable(total_live_before),
+               total_gc,
+               HumanReadable(total_live_before / total_gc)))
+
 
   with open(filename + '.html', 'w') as out:
     out.write('<html><body>')
@@ -329,6 +377,11 @@
           filter(lambda r: r['external'] != 0, trace),
           'external')
     out.write('</table>')
+    throughput('TOTAL', trace)
+    throughput('MS', marksweeps)
+    throughput('MC', markcompacts)
+    throughput('OLDSPACE', globalgcs)
+    out.write('<br/>')
     for chart in charts:
       out.write('<img src="%s">' % chart)
       out.write('</body></html>')
diff --git a/tools/gcmole/gccause.lua b/tools/gcmole/gccause.lua
index a6fe542..b989176 100644
--- a/tools/gcmole/gccause.lua
+++ b/tools/gcmole/gccause.lua
@@ -48,6 +48,8 @@
 	    T[f] = true
 	    TrackCause(f, (lvl or 0) + 1)
 	 end
+
+	 if f == '<GC>' then break end
       end
    end
 end
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 5014417..f53b0e7 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -340,6 +340,8 @@
             '../../src/ic-inl.h',
             '../../src/ic.cc',
             '../../src/ic.h',
+            '../../src/incremental-marking.cc',
+            '../../src/incremental-marking.h',
             '../../src/inspector.cc',
             '../../src/inspector.h',
             '../../src/interpreter-irregexp.cc',
@@ -431,6 +433,9 @@
             '../../src/spaces-inl.h',
             '../../src/spaces.cc',
             '../../src/spaces.h',
+            '../../src/store-buffer-inl.h',
+            '../../src/store-buffer.cc',
+            '../../src/store-buffer.h',
             '../../src/string-search.cc',
             '../../src/string-search.h',
             '../../src/string-stream.cc',
@@ -641,6 +646,13 @@
                 ],
               }
             ],
+            ['OS=="solaris"', {
+                'sources': [
+                  '../../src/platform-solaris.cc',
+                  '../../src/platform-posix.cc',
+                ],
+              }
+            ],
             ['OS=="mac"', {
               'sources': [
                 '../../src/platform-macos.cc',
diff --git a/tools/linux-tick-processor b/tools/linux-tick-processor
index 0b0a1fb..7070ce6 100755
--- a/tools/linux-tick-processor
+++ b/tools/linux-tick-processor
@@ -1,20 +1,5 @@
 #!/bin/sh
 
-tools_path=`cd $(dirname "$0");pwd`
-if [ ! "$D8_PATH" ]; then
-  d8_public=`which d8`
-  if [ -x $d8_public ]; then D8_PATH=$(dirname "$d8_public"); fi
-fi
-[ "$D8_PATH" ] || D8_PATH=$tools_path/..
-d8_exec=$D8_PATH/d8
-
-if [ ! -x $d8_exec ]; then
-  echo "d8 shell not found in $D8_PATH"
-  echo "To build, execute 'scons <flags> d8' from the V8 directory"
-  exit 1
-fi
-
-
 # find the name of the log file to process, it must not start with a dash.
 log_file="v8.log"
 for arg in "$@"
@@ -24,6 +9,28 @@
   fi
 done
 
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+  d8_public=`which d8`
+  if [ -x $d8_public ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x $d8_exec ]; then
+  D8_PATH=`pwd`/out/native
+  d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x $d8_exec ]; then
+  d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x $d8_exec ]; then
+  echo "d8 shell not found in $D8_PATH"
+  echo "To build, execute 'make native' from the V8 directory"
+  exit 1
+fi
 
 # nm spits out 'no symbols found' messages to stderr.
 cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
diff --git a/tools/ll_prof.py b/tools/ll_prof.py
index 58cbb95..30d10c3 100755
--- a/tools/ll_prof.py
+++ b/tools/ll_prof.py
@@ -399,12 +399,16 @@
         code = Code(name, start_address, end_address, origin, origin_offset)
         conficting_code = self.code_map.Find(start_address)
         if conficting_code:
-          LogReader._HandleCodeConflict(conficting_code, code)
-          # TODO(vitalyr): this warning is too noisy because of our
-          # attempts to reconstruct code log from the snapshot.
-          # print >>sys.stderr, \
-          #     "Warning: Skipping duplicate code log entry %s" % code
-          continue
+          if not (conficting_code.start_address == code.start_address and
+            conficting_code.end_address == code.end_address):
+            self.code_map.Remove(conficting_code)
+          else:
+            LogReader._HandleCodeConflict(conficting_code, code)
+            # TODO(vitalyr): this warning is too noisy because of our
+            # attempts to reconstruct code log from the snapshot.
+            # print >>sys.stderr, \
+            #     "Warning: Skipping duplicate code log entry %s" % code
+            continue
         self.code_map.Add(code)
         continue
 
diff --git a/tools/logreader.js b/tools/logreader.js
index 315e721..a8141da 100644
--- a/tools/logreader.js
+++ b/tools/logreader.js
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -134,9 +134,8 @@
 LogReader.prototype.dispatchLogRow_ = function(fields) {
   // Obtain the dispatch.
   var command = fields[0];
-  if (!(command in this.dispatchTable_)) {
-    throw new Error('unknown command: ' + command);
-  }
+  if (!(command in this.dispatchTable_)) return;
+
   var dispatch = this.dispatchTable_[command];
 
   if (dispatch === null || this.skipDispatch(dispatch)) {
diff --git a/tools/push-to-trunk.sh b/tools/push-to-trunk.sh
index 761b733..bd5d003 100755
--- a/tools/push-to-trunk.sh
+++ b/tools/push-to-trunk.sh
@@ -202,10 +202,14 @@
   for commit in $COMMITS ; do
     # Get the commit's title line.
     git log -1 $commit --format="%w(80,8,8)%s" >> "$CHANGELOG_ENTRY_FILE"
-    # Grep for "BUG=xxxx" lines in the commit message.
-    git log -1 $commit --format="%b" | grep BUG= | grep -v "BUG=$" \
-                                     | sed -e 's/^/        /' \
-                                     >> "$CHANGELOG_ENTRY_FILE"
+    # Grep for "BUG=xxxx" lines in the commit message and convert them to
+    # "(issue xxxx)".
+    git log -1 $commit --format="%B" \
+        | grep "^BUG=" | grep -v "BUG=$" \
+        | sed -e 's/^/        /' \
+        | sed -e 's/BUG=v8:\(.*\)$/(issue \1)/' \
+        | sed -e 's/BUG=\(.*\)$/(Chromium issue \1)/' \
+        >> "$CHANGELOG_ENTRY_FILE"
     # Append the commit's author for reference.
     git log -1 $commit --format="%w(80,8,8)(%an)" >> "$CHANGELOG_ENTRY_FILE"
     echo "" >> "$CHANGELOG_ENTRY_FILE"
diff --git a/tools/test-wrapper-gypbuild.py b/tools/test-wrapper-gypbuild.py
index ad5449a..a990b7e 100755
--- a/tools/test-wrapper-gypbuild.py
+++ b/tools/test-wrapper-gypbuild.py
@@ -131,16 +131,20 @@
 
 
 def ProcessOptions(options):
-  if options.arch_and_mode != None and options.arch_and_mode != "":
-    tokens = options.arch_and_mode.split(".")
-    options.arch = tokens[0]
-    options.mode = tokens[1]
-  options.mode = options.mode.split(',')
+  if options.arch_and_mode == ".":
+    options.arch = []
+    options.mode = []
+  else:
+    if options.arch_and_mode != None and options.arch_and_mode != "":
+      tokens = options.arch_and_mode.split(".")
+      options.arch = tokens[0]
+      options.mode = tokens[1]
+    options.mode = options.mode.split(',')
+    options.arch = options.arch.split(',')
   for mode in options.mode:
     if not mode in ['debug', 'release']:
       print "Unknown mode %s" % mode
       return False
-  options.arch = options.arch.split(',')
   for arch in options.arch:
     if not arch in ['ia32', 'x64', 'arm']:
       print "Unknown architecture %s" % arch
@@ -165,7 +169,7 @@
   if options.snapshot:
     result += ['--snapshot']
   if options.special_command:
-    result += ['--special-command=' + options.special_command]
+    result += ['--special-command="%s"' % options.special_command]
   if options.valgrind:
     result += ['--valgrind']
   if options.cat:
@@ -232,6 +236,18 @@
                                env=env)
       returncodes += child.wait()
 
+  if len(options.mode) == 0 and len(options.arch) == 0:
+    print ">>> running tests"
+    shellpath = workspace + '/' + options.outdir
+    env['LD_LIBRARY_PATH'] = shellpath + '/lib.target'
+    shell = shellpath + '/d8'
+    child = subprocess.Popen(' '.join(args_for_children +
+                                      ['--shell=' + shell]),
+                             shell=True,
+                             cwd=workspace,
+                             env=env)
+    returncodes = child.wait()
+
   return returncodes