Version 3.9.10

Fixed V8 issues 1322, 1772 and 1969.

Conformance improvements.

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@10829 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index 42de36b..94840dc 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -1,4 +1,4 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
+# Copyright 2012 the V8 project authors. All rights reserved.
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
 # met:
@@ -78,7 +78,6 @@
     fast-dtoa.cc
     fixed-dtoa.cc
     handles.cc
-    hashmap.cc
     heap-profiler.cc
     heap.cc
     hydrogen.cc
@@ -246,7 +245,6 @@
     dtoa.cc
     fast-dtoa.cc
     fixed-dtoa.cc
-    hashmap.cc
     preparse-data.cc
     preparser.cc
     preparser-api.cc
diff --git a/src/api.cc b/src/api.cc
index 67fded8..775c884 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2760,6 +2760,7 @@
       self,
       index,
       value_obj,
+      NONE,
       i::kNonStrictMode);
   has_pending_exception = obj.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, false);
diff --git a/src/ast.cc b/src/ast.cc
index c98aaa9..ca3ab78 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -237,8 +237,8 @@
 
 
 void ObjectLiteral::CalculateEmitStore() {
-  HashMap properties(&IsEqualString);
-  HashMap elements(&IsEqualNumber);
+  ZoneHashMap properties(&IsEqualString);
+  ZoneHashMap elements(&IsEqualNumber);
   for (int i = this->properties()->length() - 1; i >= 0; i--) {
     ObjectLiteral::Property* property = this->properties()->at(i);
     Literal* literal = property->key();
@@ -249,7 +249,7 @@
     }
 
     uint32_t hash;
-    HashMap* table;
+    ZoneHashMap* table;
     void* key;
     Factory* factory = Isolate::Current()->factory();
     if (handle->IsSymbol()) {
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 0f5b9c8..19a6a15 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -214,13 +214,12 @@
   };
 
   class ExtensionStates {
-  public:
+   public:
     ExtensionStates();
     ExtensionTraversalState get_state(RegisteredExtension* extension);
     void set_state(RegisteredExtension* extension,
                    ExtensionTraversalState state);
-  private:
-    Allocator allocator_;
+   private:
     HashMap map_;
     DISALLOW_COPY_AND_ASSIGN(ExtensionStates);
   };
@@ -1961,9 +1960,7 @@
 }
 
 Genesis::ExtensionStates::ExtensionStates()
-  : allocator_(),
-    map_(MatchRegisteredExtensions, &allocator_, 8)
-  {}
+  : map_(MatchRegisteredExtensions, 8) { }
 
 Genesis::ExtensionTraversalState Genesis::ExtensionStates::get_state(
     RegisteredExtension* extension) {
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 31f2909..2f2fbad 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -31,8 +31,6 @@
 namespace v8 {
 namespace internal {
 
-class HashMap;
-
 // The compilation cache consists of several generational sub-caches which uses
 // this class as a base class. A sub-cache contains a compilation cache tables
 // for each generation of the sub-cache. Since the same source code string has
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 3f4fec5..6e2e771 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -41,7 +41,6 @@
 class CodeMap;
 class CpuProfile;
 class CpuProfilesCollection;
-class HashMap;
 class ProfileGenerator;
 class TokenEnumerator;
 
diff --git a/src/d8.js b/src/d8.js
index d136393..bf26923 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -122,13 +122,15 @@
 };
 var trace_compile = false;  // Tracing all compile events?
 var trace_debug_json = false; // Tracing all debug json packets?
-var last_cmd_line = '';
+var last_cmd = '';
 //var lol_is_enabled;  // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
 var lol_next_dump_index = 0;
 var kDefaultLolLinesToPrintAtATime = 10;
 var kMaxLolLinesToPrintAtATime = 1000;
 var repeat_cmd_line = '';
 var is_running = true;
+// Global variable used to store whether a handle was requested.
+var lookup_handle = null;
 
 // Copied from debug-delay.js.  This is needed below:
 function ScriptTypeFlag(type) {
@@ -155,7 +157,7 @@
 }
 
 function DebugEventDetails(response) {
-  details = {text:'', running:false};
+  var details = {text:'', running:false};
 
   // Get the running state.
   details.running = response.running();
@@ -588,7 +590,6 @@
 
 // Create a JSON request for the evaluation command.
 DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
-  // Global varaible used to store whether a handle was requested.
   lookup_handle = null;
 
   if (lol_is_enabled) {
@@ -1948,7 +1949,7 @@
 
 // Convert a JSON response to text for display in a text based debugger.
 function DebugResponseDetails(response) {
-  details = { text: '', running: false };
+  var details = { text: '', running: false };
 
   try {
     if (!response.success()) {
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 682eb53..55ecc71 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -451,7 +451,7 @@
     base = data->lazy_deoptimization_entry_code_;
   }
   return
-      static_cast<Address>(base->body()) + (id * table_entry_size_);
+      static_cast<Address>(base->area_start()) + (id * table_entry_size_);
 }
 
 
@@ -464,14 +464,14 @@
     base = data->lazy_deoptimization_entry_code_;
   }
   if (base == NULL ||
-      addr < base->body() ||
-      addr >= base->body() +
+      addr < base->area_start() ||
+      addr >= base->area_start() +
           (kNumberOfEntries * table_entry_size_)) {
     return kNotDeoptimizationEntry;
   }
   ASSERT_EQ(0,
-      static_cast<int>(addr - base->body()) % table_entry_size_);
-  return static_cast<int>(addr - base->body()) / table_entry_size_;
+      static_cast<int>(addr - base->area_start()) % table_entry_size_);
+  return static_cast<int>(addr - base->area_start()) / table_entry_size_;
 }
 
 
@@ -1152,11 +1152,12 @@
       Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
                                                             EXECUTABLE,
                                                             NULL);
+  ASSERT(chunk->area_size() >= desc.instr_size);
   if (chunk == NULL) {
     V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
   }
-  memcpy(chunk->body(), desc.buffer, desc.instr_size);
-  CPU::FlushICache(chunk->body(), desc.instr_size);
+  memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
+  CPU::FlushICache(chunk->area_start(), desc.instr_size);
   return chunk;
 }
 
diff --git a/src/elements.cc b/src/elements.cc
index c15c44d..d951b0e 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -705,10 +705,20 @@
     } else {
       // Object is not mapped, defer to the arguments.
       FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-      return ElementsAccessor::ForArray(arguments)->Get(arguments,
-                                                        key,
-                                                        obj,
-                                                        receiver);
+      MaybeObject* maybe_result = ElementsAccessor::ForArray(arguments)->Get(
+          arguments, key, obj, receiver);
+      Object* result;
+      if (!maybe_result->ToObject(&result)) return maybe_result;
+      // Elements of the arguments object in slow mode might be slow aliases.
+      if (result->IsAliasedArgumentsEntry()) {
+        AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(result);
+        Context* context = Context::cast(parameter_map->get(0));
+        int context_index = entry->aliased_context_slot();
+        ASSERT(!context->get(context_index)->IsTheHole());
+        return context->get(context_index);
+      } else {
+        return result;
+      }
     }
   }
 
diff --git a/src/hashmap.cc b/src/hashmap.cc
deleted file mode 100644
index 0b404a9..0000000
--- a/src/hashmap.cc
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "../include/v8stdint.h"
-#include "globals.h"
-#include "checks.h"
-#include "utils.h"
-#include "allocation.h"
-
-#include "hashmap.h"
-
-namespace v8 {
-namespace internal {
-
-Allocator* HashMap::DefaultAllocator = ::new Allocator();
-
-
-HashMap::HashMap(MatchFun match,
-                 Allocator* allocator,
-                 uint32_t initial_capacity) {
-  allocator_ = allocator;
-  match_ = match;
-  Initialize(initial_capacity);
-}
-
-
-HashMap::~HashMap() {
-  if (allocator_) {
-    allocator_->Delete(map_);
-  }
-}
-
-
-HashMap::Entry* HashMap::Lookup(void* key, uint32_t hash, bool insert) {
-  // Find a matching entry.
-  Entry* p = Probe(key, hash);
-  if (p->key != NULL) {
-    return p;
-  }
-
-  // No entry found; insert one if necessary.
-  if (insert) {
-    p->key = key;
-    p->value = NULL;
-    p->hash = hash;
-    occupancy_++;
-
-    // Grow the map if we reached >= 80% occupancy.
-    if (occupancy_ + occupancy_/4 >= capacity_) {
-      Resize();
-      p = Probe(key, hash);
-    }
-
-    return p;
-  }
-
-  // No entry found and none inserted.
-  return NULL;
-}
-
-
-void HashMap::Remove(void* key, uint32_t hash) {
-  // Lookup the entry for the key to remove.
-  Entry* p = Probe(key, hash);
-  if (p->key == NULL) {
-    // Key not found nothing to remove.
-    return;
-  }
-
-  // To remove an entry we need to ensure that it does not create an empty
-  // entry that will cause the search for another entry to stop too soon. If all
-  // the entries between the entry to remove and the next empty slot have their
-  // initial position inside this interval, clearing the entry to remove will
-  // not break the search. If, while searching for the next empty entry, an
-  // entry is encountered which does not have its initial position between the
-  // entry to remove and the position looked at, then this entry can be moved to
-  // the place of the entry to remove without breaking the search for it. The
-  // entry made vacant by this move is now the entry to remove and the process
-  // starts over.
-  // Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
-
-  // This guarantees loop termination as there is at least one empty entry so
-  // eventually the removed entry will have an empty entry after it.
-  ASSERT(occupancy_ < capacity_);
-
-  // p is the candidate entry to clear. q is used to scan forwards.
-  Entry* q = p;  // Start at the entry to remove.
-  while (true) {
-    // Move q to the next entry.
-    q = q + 1;
-    if (q == map_end()) {
-      q = map_;
-    }
-
-    // All entries between p and q have their initial position between p and q
-    // and the entry p can be cleared without breaking the search for these
-    // entries.
-    if (q->key == NULL) {
-      break;
-    }
-
-    // Find the initial position for the entry at position q.
-    Entry* r = map_ + (q->hash & (capacity_ - 1));
-
-    // If the entry at position q has its initial position outside the range
-    // between p and q it can be moved forward to position p and will still be
-    // found. There is now a new candidate entry for clearing.
-    if ((q > p && (r <= p || r > q)) ||
-        (q < p && (r <= p && r > q))) {
-      *p = *q;
-      p = q;
-    }
-  }
-
-  // Clear the entry which is allowed to en emptied.
-  p->key = NULL;
-  occupancy_--;
-}
-
-
-void HashMap::Clear() {
-  // Mark all entries as empty.
-  const Entry* end = map_end();
-  for (Entry* p = map_; p < end; p++) {
-    p->key = NULL;
-  }
-  occupancy_ = 0;
-}
-
-
-HashMap::Entry* HashMap::Start() const {
-  return Next(map_ - 1);
-}
-
-
-HashMap::Entry* HashMap::Next(Entry* p) const {
-  const Entry* end = map_end();
-  ASSERT(map_ - 1 <= p && p < end);
-  for (p++; p < end; p++) {
-    if (p->key != NULL) {
-      return p;
-    }
-  }
-  return NULL;
-}
-
-
-HashMap::Entry* HashMap::Probe(void* key, uint32_t hash) {
-  ASSERT(key != NULL);
-
-  ASSERT(IsPowerOf2(capacity_));
-  Entry* p = map_ + (hash & (capacity_ - 1));
-  const Entry* end = map_end();
-  ASSERT(map_ <= p && p < end);
-
-  ASSERT(occupancy_ < capacity_);  // Guarantees loop termination.
-  while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
-    p++;
-    if (p >= end) {
-      p = map_;
-    }
-  }
-
-  return p;
-}
-
-
-void HashMap::Initialize(uint32_t capacity) {
-  ASSERT(IsPowerOf2(capacity));
-  map_ = reinterpret_cast<Entry*>(allocator_->New(capacity * sizeof(Entry)));
-  if (map_ == NULL) {
-    v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
-    return;
-  }
-  capacity_ = capacity;
-  Clear();
-}
-
-
-void HashMap::Resize() {
-  Entry* map = map_;
-  uint32_t n = occupancy_;
-
-  // Allocate larger map.
-  Initialize(capacity_ * 2);
-
-  // Rehash all current entries.
-  for (Entry* p = map; n > 0; p++) {
-    if (p->key != NULL) {
-      Lookup(p->key, p->hash, true)->value = p->value;
-      n--;
-    }
-  }
-
-  // Delete old map.
-  allocator_->Delete(map);
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/hashmap.h b/src/hashmap.h
index d2d1faf..ede098c 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -29,34 +29,22 @@
 #define V8_HASHMAP_H_
 
 #include "allocation.h"
+#include "checks.h"
+#include "utils.h"
 
 namespace v8 {
 namespace internal {
 
-
-// Allocator defines the memory allocator interface
-// used by HashMap and implements a default allocator.
-class Allocator BASE_EMBEDDED {
+template<class AllocationPolicy>
+class TemplateHashMap {
  public:
-  virtual ~Allocator()  {}
-  virtual void* New(size_t size)  { return Malloced::New(size); }
-  virtual void Delete(void* p)  { Malloced::Delete(p); }
-};
-
-
-class HashMap {
- public:
-  static Allocator* DefaultAllocator;
-
   typedef bool (*MatchFun) (void* key1, void* key2);
 
   // initial_capacity is the size of the initial hash map;
   // it must be a power of 2 (and thus must not be 0).
-  explicit HashMap(MatchFun match,
-                   Allocator* allocator = DefaultAllocator,
-                   uint32_t initial_capacity = 8);
+  TemplateHashMap(MatchFun match, uint32_t initial_capacity = 8);
 
-  ~HashMap();
+  ~TemplateHashMap();
 
   // HashMap entries are (key, value, hash) triplets.
   // Some clients may not need to use the value slot
@@ -100,7 +88,6 @@
   Entry* Next(Entry* p) const;
 
  private:
-  Allocator* allocator_;
   MatchFun match_;
   Entry* map_;
   uint32_t capacity_;
@@ -112,6 +99,196 @@
   void Resize();
 };
 
+typedef TemplateHashMap<FreeStoreAllocationPolicy> HashMap;
+
+template<class P>
+TemplateHashMap<P>::TemplateHashMap(MatchFun match,
+                    uint32_t initial_capacity) {
+  match_ = match;
+  Initialize(initial_capacity);
+}
+
+
+template<class P>
+TemplateHashMap<P>::~TemplateHashMap() {
+  P::Delete(map_);
+}
+
+
+template<class P>
+typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Lookup(
+    void* key, uint32_t hash, bool insert) {
+  // Find a matching entry.
+  Entry* p = Probe(key, hash);
+  if (p->key != NULL) {
+    return p;
+  }
+
+  // No entry found; insert one if necessary.
+  if (insert) {
+    p->key = key;
+    p->value = NULL;
+    p->hash = hash;
+    occupancy_++;
+
+    // Grow the map if we reached >= 80% occupancy.
+    if (occupancy_ + occupancy_/4 >= capacity_) {
+      Resize();
+      p = Probe(key, hash);
+    }
+
+    return p;
+  }
+
+  // No entry found and none inserted.
+  return NULL;
+}
+
+
+template<class P>
+void TemplateHashMap<P>::Remove(void* key, uint32_t hash) {
+  // Lookup the entry for the key to remove.
+  Entry* p = Probe(key, hash);
+  if (p->key == NULL) {
+    // Key not found nothing to remove.
+    return;
+  }
+
+  // To remove an entry we need to ensure that it does not create an empty
+  // entry that will cause the search for another entry to stop too soon. If all
+  // the entries between the entry to remove and the next empty slot have their
+  // initial position inside this interval, clearing the entry to remove will
+  // not break the search. If, while searching for the next empty entry, an
+  // entry is encountered which does not have its initial position between the
+  // entry to remove and the position looked at, then this entry can be moved to
+  // the place of the entry to remove without breaking the search for it. The
+  // entry made vacant by this move is now the entry to remove and the process
+  // starts over.
+  // Algorithm from http://en.wikipedia.org/wiki/Open_addressing.
+
+  // This guarantees loop termination as there is at least one empty entry so
+  // eventually the removed entry will have an empty entry after it.
+  ASSERT(occupancy_ < capacity_);
+
+  // p is the candidate entry to clear. q is used to scan forwards.
+  Entry* q = p;  // Start at the entry to remove.
+  while (true) {
+    // Move q to the next entry.
+    q = q + 1;
+    if (q == map_end()) {
+      q = map_;
+    }
+
+    // All entries between p and q have their initial position between p and q
+    // and the entry p can be cleared without breaking the search for these
+    // entries.
+    if (q->key == NULL) {
+      break;
+    }
+
+    // Find the initial position for the entry at position q.
+    Entry* r = map_ + (q->hash & (capacity_ - 1));
+
+    // If the entry at position q has its initial position outside the range
+    // between p and q it can be moved forward to position p and will still be
+    // found. There is now a new candidate entry for clearing.
+    if ((q > p && (r <= p || r > q)) ||
+        (q < p && (r <= p && r > q))) {
+      *p = *q;
+      p = q;
+    }
+  }
+
+  // Clear the entry which is allowed to en emptied.
+  p->key = NULL;
+  occupancy_--;
+}
+
+
+template<class P>
+void TemplateHashMap<P>::Clear() {
+  // Mark all entries as empty.
+  const Entry* end = map_end();
+  for (Entry* p = map_; p < end; p++) {
+    p->key = NULL;
+  }
+  occupancy_ = 0;
+}
+
+
+template<class P>
+typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Start() const {
+  return Next(map_ - 1);
+}
+
+
+template<class P>
+typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Next(Entry* p) const {
+  const Entry* end = map_end();
+  ASSERT(map_ - 1 <= p && p < end);
+  for (p++; p < end; p++) {
+    if (p->key != NULL) {
+      return p;
+    }
+  }
+  return NULL;
+}
+
+
+template<class P>
+typename TemplateHashMap<P>::Entry* TemplateHashMap<P>::Probe(void* key,
+                                                            uint32_t hash) {
+  ASSERT(key != NULL);
+
+  ASSERT(IsPowerOf2(capacity_));
+  Entry* p = map_ + (hash & (capacity_ - 1));
+  const Entry* end = map_end();
+  ASSERT(map_ <= p && p < end);
+
+  ASSERT(occupancy_ < capacity_);  // Guarantees loop termination.
+  while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
+    p++;
+    if (p >= end) {
+      p = map_;
+    }
+  }
+
+  return p;
+}
+
+
+template<class P>
+void TemplateHashMap<P>::Initialize(uint32_t capacity) {
+  ASSERT(IsPowerOf2(capacity));
+  map_ = reinterpret_cast<Entry*>(P::New(capacity * sizeof(Entry)));
+  if (map_ == NULL) {
+    v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
+    return;
+  }
+  capacity_ = capacity;
+  Clear();
+}
+
+
+template<class P>
+void TemplateHashMap<P>::Resize() {
+  Entry* map = map_;
+  uint32_t n = occupancy_;
+
+  // Allocate larger map.
+  Initialize(capacity_ * 2);
+
+  // Rehash all current entries.
+  for (Entry* p = map; n > 0; p++) {
+    if (p->key != NULL) {
+      Lookup(p->key, p->hash, true)->value = p->value;
+      n--;
+    }
+  }
+
+  // Delete old map.
+  P::Delete(map);
+}
 
 } }  // namespace v8::internal
 
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 39cdf13..81ed448 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -49,7 +49,7 @@
     NewSpacePage* rear_page =
         NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
     ASSERT(!rear_page->prev_page()->is_anchor());
-    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->body_limit());
+    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
     ActivateGuardIfOnTheSamePage();
   }
 
@@ -81,11 +81,6 @@
 }
 
 
-int Heap::MaxObjectSizeInPagedSpace() {
-  return Page::kMaxHeapObjectSize;
-}
-
-
 MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
                                           PretenureFlag pretenure) {
   // Check for ASCII first since this is the common case.
@@ -119,7 +114,7 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -153,7 +148,7 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
diff --git a/src/heap.cc b/src/heap.cc
index 4c54e84..e0b1e50 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1092,7 +1092,7 @@
   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
   intptr_t* head_start = rear_;
   intptr_t* head_end =
-      Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
+      Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
 
   int entries_count =
       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
@@ -1435,7 +1435,7 @@
           NewSpaceScavenger::IterateBody(object->map(), object);
       } else {
         new_space_front =
-            NewSpacePage::FromLimit(new_space_front)->next_page()->body();
+            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
       }
     }
 
@@ -1597,7 +1597,7 @@
                                     HeapObject* object,
                                     int object_size) {
     SLOW_ASSERT((size_restriction != SMALL) ||
-                (object_size <= Page::kMaxHeapObjectSize));
+                (object_size <= Page::kMaxNonCodeHeapObjectSize));
     SLOW_ASSERT(object->Size() == object_size);
 
     Heap* heap = map->GetHeap();
@@ -1605,7 +1605,7 @@
       MaybeObject* maybe_result;
 
       if ((size_restriction != SMALL) &&
-          (object_size > Page::kMaxHeapObjectSize)) {
+          (object_size > Page::kMaxNonCodeHeapObjectSize)) {
         maybe_result = heap->lo_space()->AllocateRaw(object_size,
                                                      NOT_EXECUTABLE);
       } else {
@@ -1951,6 +1951,16 @@
 }
 
 
+MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
+  AliasedArgumentsEntry* entry;
+  { MaybeObject* maybe_result = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
+    if (!maybe_result->To(&entry)) return maybe_result;
+  }
+  entry->set_aliased_context_slot(aliased_context_slot);
+  return entry;
+}
+
+
 const Heap::StringTypeTable Heap::string_type_table[] = {
 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
   {type, size, k##camel_name##MapRootIndex},
@@ -2264,7 +2274,7 @@
 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate heap numbers in paged
   // spaces.
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
 
   Object* result;
@@ -2285,7 +2295,7 @@
 
   // This version of AllocateHeapNumber is optimized for
   // allocation in new space.
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
   Object* result;
   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
@@ -2856,7 +2866,7 @@
 
 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate foreigns in paged spaces.
-  STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
+  STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
   Foreign* result;
   MaybeObject* maybe_result = Allocate(foreign_map(), space);
@@ -3274,7 +3284,7 @@
   }
   int size = ByteArray::SizeFor(length);
   Object* result;
-  { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
+  { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
                    ? old_data_space_->AllocateRaw(size)
                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3293,7 +3303,7 @@
   }
   int size = ByteArray::SizeFor(length);
   AllocationSpace space =
-      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
+      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
   Object* result;
   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -3359,7 +3369,7 @@
   MaybeObject* maybe_result;
   // Large code objects and code objects which should stay at a fixed address
   // are allocated in large object space.
-  if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
+  if (obj_size > code_space()->AreaSize() || immovable) {
     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
@@ -3408,7 +3418,7 @@
   // Allocate an object the same size as the code object.
   int obj_size = code->Size();
   MaybeObject* maybe_result;
-  if (obj_size > MaxObjectSizeInPagedSpace()) {
+  if (obj_size > code_space()->AreaSize()) {
     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(obj_size);
@@ -3451,7 +3461,7 @@
       static_cast<size_t>(code->instruction_end() - old_addr);
 
   MaybeObject* maybe_result;
-  if (new_obj_size > MaxObjectSizeInPagedSpace()) {
+  if (new_obj_size > code_space()->AreaSize()) {
     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
   } else {
     maybe_result = code_space_->AllocateRaw(new_obj_size);
@@ -3772,7 +3782,7 @@
   // Allocate the JSObject.
   AllocationSpace space =
       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
-  if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
+  if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
   Object* obj;
   { MaybeObject* maybe_obj = Allocate(map, space);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -4280,7 +4290,7 @@
 
   // Allocate string.
   Object* result;
-  { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
+  { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
                    : old_data_space_->AllocateRaw(size);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4317,11 +4327,12 @@
     if (size > kMaxObjectSizeInNewSpace) {
       // Allocate in large object space, retry space will be ignored.
       space = LO_SPACE;
-    } else if (size > MaxObjectSizeInPagedSpace()) {
+    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Allocate in new space, retry in large object space.
       retry_space = LO_SPACE;
     }
-  } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+  } else if (space == OLD_DATA_SPACE &&
+             size > Page::kMaxNonCodeHeapObjectSize) {
     space = LO_SPACE;
   }
   Object* result;
@@ -4352,11 +4363,12 @@
     if (size > kMaxObjectSizeInNewSpace) {
       // Allocate in large object space, retry space will be ignored.
       space = LO_SPACE;
-    } else if (size > MaxObjectSizeInPagedSpace()) {
+    } else if (size > Page::kMaxNonCodeHeapObjectSize) {
       // Allocate in new space, retry in large object space.
       retry_space = LO_SPACE;
     }
-  } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+  } else if (space == OLD_DATA_SPACE &&
+             size > Page::kMaxNonCodeHeapObjectSize) {
     space = LO_SPACE;
   }
   Object* result;
@@ -4495,13 +4507,13 @@
     // Too big for new space.
     space = LO_SPACE;
   } else if (space == OLD_POINTER_SPACE &&
-             size > MaxObjectSizeInPagedSpace()) {
+             size > Page::kMaxNonCodeHeapObjectSize) {
     // Too big for old pointer space.
     space = LO_SPACE;
   }
 
   AllocationSpace retry_space =
-      (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
+      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
 
   return AllocateRaw(size, space, retry_space);
 }
@@ -4628,13 +4640,13 @@
     // Too big for new space.
     space = LO_SPACE;
   } else if (space == OLD_DATA_SPACE &&
-             size > MaxObjectSizeInPagedSpace()) {
+             size > Page::kMaxNonCodeHeapObjectSize) {
     // Too big for old data space.
     space = LO_SPACE;
   }
 
   AllocationSpace retry_space =
-      (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
+      (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
 
   return AllocateRaw(size, space, retry_space);
 }
@@ -4763,7 +4775,7 @@
   }
   int size = map->instance_size();
   AllocationSpace space =
-      (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
+      (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
   Object* result;
   { MaybeObject* maybe_result = Allocate(map, space);
     if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -5210,7 +5222,7 @@
                           new_space_.FromSpaceEnd());
   while (it.has_next()) {
     NewSpacePage* page = it.next();
-    for (Address cursor = page->body(), limit = page->body_limit();
+    for (Address cursor = page->area_start(), limit = page->area_end();
          cursor < limit;
          cursor += kPointerSize) {
       Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -5349,9 +5361,9 @@
 
   while (pages.has_next()) {
     Page* page = pages.next();
-    Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+    Object** current = reinterpret_cast<Object**>(page->area_start());
 
-    Address end = page->ObjectAreaEnd();
+    Address end = page->area_end();
 
     Object*** store_buffer_position = store_buffer()->Start();
     Object*** store_buffer_top = store_buffer()->Top();
@@ -5377,9 +5389,9 @@
 
   while (pages.has_next()) {
     Page* page = pages.next();
-    Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+    Object** current = reinterpret_cast<Object**>(page->area_start());
 
-    Address end = page->ObjectAreaEnd();
+    Address end = page->area_end();
 
     Object*** store_buffer_position = store_buffer()->Start();
     Object*** store_buffer_top = store_buffer()->Top();
diff --git a/src/heap.h b/src/heap.h
index bb5c375..3a3c8f4 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -345,7 +345,7 @@
           NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
       ASSERT(!front_page->prev_page()->is_anchor());
       front_ =
-          reinterpret_cast<intptr_t*>(front_page->prev_page()->body_limit());
+          reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
     }
     *target = reinterpret_cast<HeapObject*>(*(--front_));
     *size = static_cast<int>(*(--front_));
@@ -484,9 +484,6 @@
   // all available bytes. Check MaxHeapObjectSize() instead.
   intptr_t Available();
 
-  // Returns the maximum object size in paged space.
-  inline int MaxObjectSizeInPagedSpace();
-
   // Returns of size of all objects residing in the heap.
   intptr_t SizeOfObjects();
 
@@ -644,6 +641,9 @@
   // Allocates an empty TypeFeedbackInfo.
   MUST_USE_RESULT MaybeObject* AllocateTypeFeedbackInfo();
 
+  // Allocates an AliasedArgumentsEntry.
+  MUST_USE_RESULT MaybeObject* AllocateAliasedArgumentsEntry(int slot);
+
   // Clear the Instanceof cache (used when a prototype changes).
   inline void ClearInstanceofCache();
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index c59c9e4..6cd9998 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -276,6 +276,15 @@
 }
 
 
+HUseListNode* HUseListNode::tail() {
+  // Skip and remove dead items in the use list.
+  while (tail_ != NULL && tail_->value()->CheckFlag(HValue::kIsDead)) {
+    tail_ = tail_->tail_;
+  }
+  return tail_;
+}
+
+
 HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
   Advance();
 }
@@ -374,7 +383,7 @@
   // We replace all uses first, so Delete can assert that there are none.
   if (other != NULL) ReplaceAllUsesWith(other);
   ASSERT(HasNoUses());
-  ClearOperands();
+  Kill();
   DeleteFromGraph();
 }
 
@@ -392,9 +401,17 @@
 }
 
 
-void HValue::ClearOperands() {
+void HValue::Kill() {
+  // Instead of going through the entire use list of each operand, we only
+  // check the first item in each use list and rely on the tail() method to
+  // skip dead items, removing them lazily next time we traverse the list.
+  SetFlag(kIsDead);
   for (int i = 0; i < OperandCount(); ++i) {
-    SetOperandAt(i, NULL);
+    HValue* operand = OperandAt(i);
+    HUseListNode* first = operand->use_list_;
+    if (first != NULL && first->value() == this && first->index() == i) {
+      operand->use_list_ = first->tail();
+    }
   }
 }
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 4d7b7ba..92645a2 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -448,7 +448,7 @@
       : tail_(tail), value_(value), index_(index) {
   }
 
-  HUseListNode* tail() const { return tail_; }
+  HUseListNode* tail();
   HValue* value() const { return value_; }
   int index() const { return index_; }
 
@@ -530,7 +530,8 @@
     kDeoptimizeOnUndefined,
     kIsArguments,
     kTruncatingToInt32,
-    kLastFlag = kTruncatingToInt32
+    kIsDead,
+    kLastFlag = kIsDead
   };
 
   STATIC_ASSERT(kLastFlag < kBitsPerInt);
@@ -630,7 +631,9 @@
     return use_list_ != NULL && use_list_->tail() != NULL;
   }
   int UseCount() const;
-  void ClearOperands();
+
+  // Mark this HValue as dead and to be removed from other HValues' use lists.
+  void Kill();
 
   int flags() const { return flags_; }
   void SetFlag(Flag f) { flags_ |= (1 << f); }
@@ -2454,7 +2457,12 @@
 
   virtual intptr_t Hashcode() {
     ASSERT(!HEAP->allow_allocation(false));
-    return reinterpret_cast<intptr_t>(*handle());
+    intptr_t hash = reinterpret_cast<intptr_t>(*handle());
+    // Prevent smis from having fewer hash values when truncated to
+    // the least significant bits.
+    const int kShiftSize = kSmiShiftSize + kSmiTagSize;
+    STATIC_ASSERT(kShiftSize != 0);
+    return hash ^ (hash >> kShiftSize);
   }
 
 #ifdef DEBUG
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 9918e85..6bc8af5 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -97,7 +97,7 @@
   ASSERT(phi->block() == this);
   ASSERT(phis_.Contains(phi));
   ASSERT(phi->HasNoUses() || !phi->is_live());
-  phi->ClearOperands();
+  phi->Kill();
   phis_.RemoveElement(phi);
   phi->SetBlock(NULL);
 }
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 120ab14..32704b0 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1935,13 +1935,14 @@
 LInstruction* LChunkBuilder::DoLoadNamedFieldPolymorphic(
     HLoadNamedFieldPolymorphic* instr) {
   ASSERT(instr->representation().IsTagged());
-  LOperand* context = UseFixed(instr->context(), esi);
   if (instr->need_generic()) {
+    LOperand* context = UseFixed(instr->context(), esi);
     LOperand* obj = UseFixed(instr->object(), eax);
     LLoadNamedFieldPolymorphic* result =
         new(zone()) LLoadNamedFieldPolymorphic(context, obj);
     return MarkAsCall(DefineFixed(result, eax), instr);
   } else {
+    LOperand* context = UseAny(instr->context());  // Not actually used.
     LOperand* obj = UseRegisterAtStart(instr->object());
     LLoadNamedFieldPolymorphic* result =
         new(zone()) LLoadNamedFieldPolymorphic(context, obj);
diff --git a/src/ic.cc b/src/ic.cc
index 642a9e2..c4c204b 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1329,7 +1329,7 @@
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
     Handle<Object> result =
-        JSObject::SetElement(receiver, index, value, strict_mode);
+        JSObject::SetElement(receiver, index, value, NONE, strict_mode);
     RETURN_IF_EMPTY_HANDLE(isolate(), result);
     return *value;
   }
@@ -1786,7 +1786,7 @@
     uint32_t index;
     if (name->AsArrayIndex(&index)) {
       Handle<Object> result =
-          JSObject::SetElement(receiver, index, value, strict_mode);
+          JSObject::SetElement(receiver, index, value, NONE, strict_mode);
       RETURN_IF_EMPTY_HANDLE(isolate(), result);
       return *value;
     }
diff --git a/src/isolate.h b/src/isolate.h
index 80edee9..71fe883 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -38,6 +38,7 @@
 #include "frames.h"
 #include "global-handles.h"
 #include "handles.h"
+#include "hashmap.h"
 #include "heap.h"
 #include "regexp-stack.h"
 #include "runtime-profiler.h"
@@ -280,23 +281,6 @@
   Address try_catch_handler_address_;
 };
 
-#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
-
-#define ISOLATE_PLATFORM_INIT_LIST(V)                                          \
-  /* VirtualFrame::SpilledScope state */                                       \
-  V(bool, is_virtual_frame_in_spilled_scope, false)                            \
-  /* CodeGenerator::EmitNamedStore state */                                    \
-  V(int, inlined_write_barrier_size, -1)
-
-#if !defined(__arm__) && !defined(__mips__)
-class HashMap;
-#endif
-
-#else
-
-#define ISOLATE_PLATFORM_INIT_LIST(V)
-
-#endif
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
@@ -367,7 +351,6 @@
   V(uint64_t, enabled_cpu_features, 0)                                         \
   V(CpuProfiler*, cpu_profiler, NULL)                                          \
   V(HeapProfiler*, heap_profiler, NULL)                                        \
-  ISOLATE_PLATFORM_INIT_LIST(V)                                                \
   ISOLATE_DEBUGGER_INIT_LIST(V)
 
 class Isolate {
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 5ff8ff9..9c5294a 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -53,8 +53,8 @@
   // Ignore return value from SetElement. It can only be a failure if there
   // are element setters causing exceptions and the debugger context has none
   // of these.
-  Handle<Object> no_failure;
-  no_failure = JSObject::SetElement(object, index, value, kNonStrictMode);
+  Handle<Object> no_failure =
+      JSObject::SetElement(object, index, value, NONE, kNonStrictMode);
   ASSERT(!no_failure.is_null());
   USE(no_failure);
 }
diff --git a/src/log.h b/src/log.h
index 86bcad6..e54f041 100644
--- a/src/log.h
+++ b/src/log.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -71,7 +71,6 @@
 // tick profiler requires code events, so --prof implies --log-code.
 
 // Forward declarations.
-class HashMap;
 class LogMessageBuilder;
 class Profiler;
 class Semaphore;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 9d83d90..7c59c04 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -107,14 +107,14 @@
   Address end = space->top();
   NewSpacePageIterator it(space->bottom(), end);
   // The bottom position is at the start of its page. Allows us to use
-  // page->body() as start of range on all pages.
+  // page->area_start() as start of range on all pages.
   ASSERT_EQ(space->bottom(),
-            NewSpacePage::FromAddress(space->bottom())->body());
+            NewSpacePage::FromAddress(space->bottom())->area_start());
   while (it.has_next()) {
     NewSpacePage* page = it.next();
-    Address limit = it.has_next() ? page->body_limit() : end;
+    Address limit = it.has_next() ? page->area_end() : end;
     ASSERT(limit == end || !page->Contains(end));
-    VerifyMarking(page->body(), limit);
+    VerifyMarking(page->area_start(), limit);
   }
 }
 
@@ -124,7 +124,7 @@
 
   while (it.has_next()) {
     Page* p = it.next();
-    VerifyMarking(p->ObjectAreaStart(), p->ObjectAreaEnd());
+    VerifyMarking(p->area_start(), p->area_end());
   }
 }
 
@@ -187,8 +187,8 @@
 
   while (it.has_next()) {
     NewSpacePage* page = it.next();
-    Address current = page->body();
-    Address limit = it.has_next() ? page->body_limit() : space->top();
+    Address current = page->area_start();
+    Address limit = it.has_next() ? page->area_end() : space->top();
     ASSERT(limit == space->top() || !page->Contains(space->top()));
     while (current < limit) {
       HeapObject* object = HeapObject::FromAddress(current);
@@ -205,7 +205,7 @@
   while (it.has_next()) {
     Page* p = it.next();
     if (p->IsEvacuationCandidate()) continue;
-    VerifyEvacuation(p->ObjectAreaStart(), p->ObjectAreaEnd());
+    VerifyEvacuation(p->area_start(), p->area_end());
   }
 }
 
@@ -232,7 +232,7 @@
 
 static void TraceFragmentation(PagedSpace* space) {
   int number_of_pages = space->CountTotalPages();
-  intptr_t reserved = (number_of_pages * Page::kObjectAreaSize);
+  intptr_t reserved = (number_of_pages * space->AreaSize());
   intptr_t free = reserved - space->SizeOfObjects();
   PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
          AllocationSpaceName(space->identity()),
@@ -453,13 +453,14 @@
 
   intptr_t ratio;
   intptr_t ratio_threshold;
+  intptr_t area_size = space->AreaSize();
   if (space->identity() == CODE_SPACE) {
     ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
-        Page::kObjectAreaSize;
+        area_size;
     ratio_threshold = 10;
   } else {
     ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
-        Page::kObjectAreaSize;
+        area_size;
     ratio_threshold = 15;
   }
 
@@ -469,20 +470,20 @@
            AllocationSpaceName(space->identity()),
            static_cast<int>(sizes.small_size_),
            static_cast<double>(sizes.small_size_ * 100) /
-           Page::kObjectAreaSize,
+           area_size,
            static_cast<int>(sizes.medium_size_),
            static_cast<double>(sizes.medium_size_ * 100) /
-           Page::kObjectAreaSize,
+           area_size,
            static_cast<int>(sizes.large_size_),
            static_cast<double>(sizes.large_size_ * 100) /
-           Page::kObjectAreaSize,
+           area_size,
            static_cast<int>(sizes.huge_size_),
            static_cast<double>(sizes.huge_size_ * 100) /
-           Page::kObjectAreaSize,
+           area_size,
            (ratio > ratio_threshold) ? "[fragmented]" : "");
   }
 
-  if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
+  if (FLAG_always_compact && sizes.Total() != area_size) {
     return 1;
   }
 
@@ -528,11 +529,11 @@
 
   CompactionMode mode = COMPACT_FREE_LISTS;
 
-  intptr_t reserved = number_of_pages * Page::kObjectAreaSize;
+  intptr_t reserved = number_of_pages * space->AreaSize();
   intptr_t over_reserved = reserved - space->SizeOfObjects();
   static const intptr_t kFreenessThreshold = 50;
 
-  if (over_reserved >= 2 * Page::kObjectAreaSize &&
+  if (over_reserved >= 2 * space->AreaSize() &&
       reduce_memory_footprint_) {
     mode = REDUCE_MEMORY_FOOTPRINT;
 
@@ -575,18 +576,17 @@
       intptr_t free_bytes = 0;
 
       if (!p->WasSwept()) {
-        free_bytes = (Page::kObjectAreaSize - p->LiveBytes());
+        free_bytes = (p->area_size() - p->LiveBytes());
       } else {
         FreeList::SizeStats sizes;
         space->CountFreeListItems(p, &sizes);
         free_bytes = sizes.Total();
       }
 
-      int free_pct = static_cast<int>(free_bytes * 100 / Page::kObjectAreaSize);
+      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
 
       if (free_pct >= kFreenessThreshold) {
-        estimated_release += Page::kObjectAreaSize +
-            (Page::kObjectAreaSize - free_bytes);
+        estimated_release += 2 * p->area_size() - free_bytes;
         fragmentation = free_pct;
       } else {
         fragmentation = 0;
@@ -597,7 +597,7 @@
                reinterpret_cast<void*>(p),
                AllocationSpaceName(space->identity()),
                static_cast<int>(free_bytes),
-               static_cast<double>(free_bytes * 100) / Page::kObjectAreaSize,
+               static_cast<double>(free_bytes * 100) / p->area_size(),
                (fragmentation > 0) ? "[fragmented]" : "");
       }
     } else {
@@ -1977,12 +1977,15 @@
   int last_cell_index =
       Bitmap::IndexToCell(
           Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+              p->AddressToMarkbitIndex(p->area_end())));
 
-  int cell_index = Page::kFirstUsedCell;
-  Address cell_base = p->ObjectAreaStart();
+  Address cell_base = p->area_start();
+  int cell_index = Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(cell_base)));
 
-  for (cell_index = Page::kFirstUsedCell;
+
+  for (;
        cell_index < last_cell_index;
        cell_index++, cell_base += 32 * kPointerSize) {
     ASSERT((unsigned)cell_index ==
@@ -2786,7 +2789,7 @@
                                             int object_size) {
   Object* result;
 
-  if (object_size > heap()->MaxObjectSizeInPagedSpace()) {
+  if (object_size > Page::kMaxNonCodeHeapObjectSize) {
     MaybeObject* maybe_result =
         heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
     if (maybe_result->ToObject(&result)) {
@@ -2904,13 +2907,16 @@
   int last_cell_index =
       Bitmap::IndexToCell(
           Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+              p->AddressToMarkbitIndex(p->area_end())));
 
-  int cell_index = Page::kFirstUsedCell;
-  Address cell_base = p->ObjectAreaStart();
+  Address cell_base = p->area_start();
+  int cell_index = Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(cell_base)));
+
   int offsets[16];
 
-  for (cell_index = Page::kFirstUsedCell;
+  for (;
        cell_index < last_cell_index;
        cell_index++, cell_base += 32 * kPointerSize) {
     ASSERT((unsigned)cell_index ==
@@ -3065,12 +3071,16 @@
   int last_cell_index =
       Bitmap::IndexToCell(
           Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+              p->AddressToMarkbitIndex(p->area_end())));
 
-  int cell_index = Page::kFirstUsedCell;
-  Address free_start = p->ObjectAreaStart();
+  Address free_start = p->area_start();
+  int cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(free_start)));
+
   ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
-  Address object_address = p->ObjectAreaStart();
+  Address object_address = free_start;
   int offsets[16];
 
   SkipList* skip_list = p->skip_list();
@@ -3079,7 +3089,7 @@
     skip_list->Clear();
   }
 
-  for (cell_index = Page::kFirstUsedCell;
+  for (;
        cell_index < last_cell_index;
        cell_index++, object_address += 32 * kPointerSize) {
     ASSERT((unsigned)cell_index ==
@@ -3116,8 +3126,8 @@
     // Clear marking bits for current cell.
     cells[cell_index] = 0;
   }
-  if (free_start != p->ObjectAreaEnd()) {
-    space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
+  if (free_start != p->area_end()) {
+    space->Free(free_start, static_cast<int>(p->area_end() - free_start));
   }
   p->ResetLiveBytes();
 }
@@ -3412,7 +3422,7 @@
     Page* p = evacuation_candidates_[i];
     if (!p->IsEvacuationCandidate()) continue;
     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
+    space->Free(p->area_start(), p->area_size());
     p->set_scan_on_scavenge(false);
     slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
     p->ClearEvacuationCandidate();
@@ -3715,23 +3725,27 @@
   int last_cell_index =
       Bitmap::IndexToCell(
           Bitmap::CellAlignIndex(
-              p->AddressToMarkbitIndex(p->ObjectAreaEnd())));
+              p->AddressToMarkbitIndex(p->area_end())));
 
-  int cell_index = Page::kFirstUsedCell;
+  int cell_index =
+      Bitmap::IndexToCell(
+          Bitmap::CellAlignIndex(
+              p->AddressToMarkbitIndex(p->area_start())));
+
   intptr_t freed_bytes = 0;
 
   // This is the start of the 32 word block that we are currently looking at.
-  Address block_address = p->ObjectAreaStart();
+  Address block_address = p->area_start();
 
   // Skip over all the dead objects at the start of the page and mark them free.
-  for (cell_index = Page::kFirstUsedCell;
+  for (;
        cell_index < last_cell_index;
        cell_index++, block_address += 32 * kPointerSize) {
     if (cells[cell_index] != 0) break;
   }
-  size_t size = block_address - p->ObjectAreaStart();
+  size_t size = block_address - p->area_start();
   if (cell_index == last_cell_index) {
-    freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
+    freed_bytes += static_cast<int>(space->Free(p->area_start(),
                                                 static_cast<int>(size)));
     ASSERT_EQ(0, p->LiveBytes());
     return freed_bytes;
@@ -3740,8 +3754,8 @@
   // first live object.
   Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
   // Free the first free space.
-  size = free_end - p->ObjectAreaStart();
-  freed_bytes += space->Free(p->ObjectAreaStart(),
+  size = free_end - p->area_start();
+  freed_bytes += space->Free(p->area_start(),
                              static_cast<int>(size));
   // The start of the current free area is represented in undigested form by
   // the address of the last 32-word section that contained a live object and
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 6f00997..7aef912 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -333,6 +333,11 @@
 }
 
 
+void AliasedArgumentsEntry::AliasedArgumentsEntryVerify() {
+  VerifySmiField(kAliasedContextSlot);
+}
+
+
 void FixedArray::FixedArrayVerify() {
   for (int i = 0; i < length(); i++) {
     Object* e = get(i);
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 326c088..f4d8c42 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3331,7 +3331,7 @@
 DescriptorArray* Map::instance_descriptors() {
   Object* object = READ_FIELD(this, kInstanceDescriptorsOrBitField3Offset);
   if (object->IsSmi()) {
-    return HEAP->empty_descriptor_array();
+    return GetHeap()->empty_descriptor_array();
   } else {
     return DescriptorArray::cast(object);
   }
@@ -3645,7 +3645,7 @@
 
 
 bool SharedFunctionInfo::IsInobjectSlackTrackingInProgress() {
-  return initial_map() != HEAP->undefined_value();
+  return initial_map() != GetHeap()->undefined_value();
 }
 
 
@@ -4806,6 +4806,9 @@
           kTypeFeedbackCellsOffset)
 
 
+SMI_ACCESSORS(AliasedArgumentsEntry, aliased_context_slot, kAliasedContextSlot)
+
+
 Relocatable::Relocatable(Isolate* isolate) {
   ASSERT(isolate == Isolate::Current());
   isolate_ = isolate;
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index d6e8920..d5c02f4 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -563,6 +563,12 @@
 }
 
 
+void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(FILE* out) {
+  HeapObject::PrintHeader(out, "AliasedArgumentsEntry");
+  PrintF(out, "\n - aliased_context_slot: %d", aliased_context_slot());
+}
+
+
 void FixedArray::FixedArrayPrint(FILE* out) {
   HeapObject::PrintHeader(out, "FixedArray");
   PrintF(out, " - length: %d", length());
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index e6ddfed..26e79ae 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -135,7 +135,7 @@
            (base == kVisitJSObject));
     ASSERT(IsAligned(object_size, kPointerSize));
     ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
-    ASSERT(object_size < Page::kMaxHeapObjectSize);
+    ASSERT(object_size < Page::kMaxNonCodeHeapObjectSize);
 
     const VisitorId specialization = static_cast<VisitorId>(
         base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
diff --git a/src/objects.cc b/src/objects.cc
index 85ba646..8941151 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -4450,10 +4450,7 @@
   }
   accessors->set(is_getter, fun);
 
-  { MaybeObject* maybe_ok = SetElementCallback(index, accessors, attributes);
-    if (maybe_ok->IsFailure()) return maybe_ok;
-  }
-  return GetHeap()->undefined_value();
+  return SetElementCallback(index, accessors, attributes);
 }
 
 
@@ -4471,12 +4468,14 @@
       Object* obj = result.GetCallbackObject();
       // Need to preserve old getters/setters.
       if (obj->IsAccessorPair()) {
-        AccessorPair::cast(obj)->set(is_getter, fun);
-        // Use set to update attributes.
-        { MaybeObject* maybe_ok = SetPropertyCallback(name, obj, attributes);
-          if (maybe_ok->IsFailure()) return maybe_ok;
+        AccessorPair* copy;
+        { MaybeObject* maybe_copy =
+              AccessorPair::cast(obj)->CopyWithoutTransitions();
+          if (!maybe_copy->To(&copy)) return maybe_copy;
         }
-        return GetHeap()->undefined_value();
+        copy->set(is_getter, fun);
+        // Use set to update attributes.
+        return SetPropertyCallback(name, copy, attributes);
       }
     }
   }
@@ -4487,10 +4486,7 @@
   }
   accessors->set(is_getter, fun);
 
-  { MaybeObject* maybe_ok = SetPropertyCallback(name, accessors, attributes);
-    if (maybe_ok->IsFailure()) return maybe_ok;
-  }
-  return GetHeap()->undefined_value();
+  return SetPropertyCallback(name, accessors, attributes);
 }
 
 
@@ -9248,8 +9244,10 @@
 
 MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index,
                                                  Object* value,
+                                                 PropertyAttributes attributes,
                                                  StrictModeFlag strict_mode,
-                                                 bool check_prototype) {
+                                                 bool check_prototype,
+                                                 SetPropertyMode set_mode) {
   Isolate* isolate = GetIsolate();
   // Make sure that the top context does not change when doing
   // callbacks or interceptor calls.
@@ -9277,8 +9275,10 @@
   MaybeObject* raw_result =
       this_handle->SetElementWithoutInterceptor(index,
                                                 *value_handle,
+                                                attributes,
                                                 strict_mode,
-                                                check_prototype);
+                                                check_prototype,
+                                                set_mode);
   RETURN_IF_SCHEDULED_EXCEPTION(isolate);
   return raw_result;
 }
@@ -9476,7 +9476,8 @@
     if (convert_to_slow) {
       MaybeObject* result = NormalizeElements();
       if (result->IsFailure()) return result;
-      return SetDictionaryElement(index, value, strict_mode, check_prototype);
+      return SetDictionaryElement(index, value, NONE, strict_mode,
+                                  check_prototype);
     }
   }
   // Convert to fast double elements if appropriate.
@@ -9526,8 +9527,10 @@
 
 MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
                                             Object* value,
+                                            PropertyAttributes attributes,
                                             StrictModeFlag strict_mode,
-                                            bool check_prototype) {
+                                            bool check_prototype,
+                                            SetPropertyMode set_mode) {
   ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
   Isolate* isolate = GetIsolate();
   Heap* heap = isolate->heap();
@@ -9547,24 +9550,40 @@
   if (entry != SeededNumberDictionary::kNotFound) {
     Object* element = dictionary->ValueAt(entry);
     PropertyDetails details = dictionary->DetailsAt(entry);
-    if (details.type() == CALLBACKS) {
+    if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
       return SetElementWithCallback(element, index, value, this, strict_mode);
     } else {
       dictionary->UpdateMaxNumberKey(index);
       // If a value has not been initialized we allow writing to it even if it
-      // is read-only (a declared const that has not been initialized).
-      if (!dictionary->DetailsAt(entry).IsReadOnly() ||
-          dictionary->ValueAt(entry)->IsTheHole()) {
-        dictionary->ValueAtPut(entry, value);
-      } else if (strict_mode == kStrictMode) {
-        Handle<Object> holder(this);
-        Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
-        Handle<Object> args[2] = { number, holder };
-        Handle<Object> error =
-            isolate->factory()->NewTypeError("strict_read_only_property",
-                                             HandleVector(args, 2));
-        return isolate->Throw(*error);
+      // is read-only (a declared const that has not been initialized).  If a
+      // value is being defined we skip attribute checks completely.
+      if (set_mode == DEFINE_PROPERTY) {
+        details = PropertyDetails(attributes, NORMAL, details.index());
+        dictionary->DetailsAtPut(entry, details);
+      } else if (details.IsReadOnly() && !element->IsTheHole()) {
+        if (strict_mode == kNonStrictMode) {
+          return isolate->heap()->undefined_value();
+        } else {
+          Handle<Object> holder(this);
+          Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+          Handle<Object> args[2] = { number, holder };
+          Handle<Object> error =
+              isolate->factory()->NewTypeError("strict_read_only_property",
+                                               HandleVector(args, 2));
+          return isolate->Throw(*error);
+        }
       }
+      // Elements of the arguments object in slow mode might be slow aliases.
+      if (is_arguments && element->IsAliasedArgumentsEntry()) {
+        AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(element);
+        Context* context = Context::cast(elements->get(0));
+        int context_index = entry->aliased_context_slot();
+        ASSERT(!context->get(context_index)->IsTheHole());
+        context->set(context_index, value);
+        // For elements that are still writable we keep slow aliasing.
+        if (!details.IsReadOnly()) value = element;
+      }
+      dictionary->ValueAtPut(entry, value);
     }
   } else {
     // Index not already used. Look for an accessor in the prototype chain.
@@ -9591,7 +9610,8 @@
       }
     }
     FixedArrayBase* new_dictionary;
-    MaybeObject* maybe = dictionary->AtNumberPut(index, value);
+    PropertyDetails details = PropertyDetails(attributes, NORMAL);
+    MaybeObject* maybe = dictionary->AddNumberEntry(index, value, details);
     if (!maybe->To<FixedArrayBase>(&new_dictionary)) return maybe;
     if (dictionary != SeededNumberDictionary::cast(new_dictionary)) {
       if (is_arguments) {
@@ -9732,18 +9752,22 @@
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   ASSERT(HasDictionaryElements());
-  return SetElement(index, value, strict_mode, check_prototype);
+  return SetElement(index, value, NONE, strict_mode, check_prototype);
 }
 
 
 MaybeObject* JSReceiver::SetElement(uint32_t index,
                                     Object* value,
+                                    PropertyAttributes attributes,
                                     StrictModeFlag strict_mode,
                                     bool check_proto) {
-  return IsJSProxy()
-      ? JSProxy::cast(this)->SetElementWithHandler(index, value, strict_mode)
-      : JSObject::cast(this)->SetElement(index, value, strict_mode, check_proto)
-  ;
+  if (IsJSProxy()) {
+    return JSProxy::cast(this)->SetElementWithHandler(
+        index, value, strict_mode);
+  } else {
+    return JSObject::cast(this)->SetElement(
+        index, value, attributes, strict_mode, check_proto);
+  }
 }
 
 
@@ -9752,16 +9776,19 @@
                                        Handle<Object> value,
                                        StrictModeFlag strict_mode) {
   ASSERT(!object->HasExternalArrayElements());
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->SetElement(index, *value, strict_mode, false),
-                     Object);
+  CALL_HEAP_FUNCTION(
+      object->GetIsolate(),
+      object->SetElement(index, *value, NONE, strict_mode, false),
+      Object);
 }
 
 
 Handle<Object> JSObject::SetElement(Handle<JSObject> object,
                                     uint32_t index,
                                     Handle<Object> value,
-                                    StrictModeFlag strict_mode) {
+                                    PropertyAttributes attr,
+                                    StrictModeFlag strict_mode,
+                                    SetPropertyMode set_mode) {
   if (object->HasExternalArrayElements()) {
     if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
       bool has_exception;
@@ -9770,16 +9797,19 @@
       value = number;
     }
   }
-  CALL_HEAP_FUNCTION(object->GetIsolate(),
-                     object->SetElement(index, *value, strict_mode, true),
-                     Object);
+  CALL_HEAP_FUNCTION(
+      object->GetIsolate(),
+      object->SetElement(index, *value, attr, strict_mode, true, set_mode),
+      Object);
 }
 
 
 MaybeObject* JSObject::SetElement(uint32_t index,
                                   Object* value,
+                                  PropertyAttributes attributes,
                                   StrictModeFlag strict_mode,
-                                  bool check_prototype) {
+                                  bool check_prototype,
+                                  SetPropertyMode set_mode) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded()) {
     Heap* heap = GetHeap();
@@ -9797,29 +9827,59 @@
     ASSERT(proto->IsJSGlobalObject());
     return JSObject::cast(proto)->SetElement(index,
                                              value,
+                                             attributes,
                                              strict_mode,
-                                             check_prototype);
+                                             check_prototype,
+                                             set_mode);
+  }
+
+  // Don't allow element properties to be redefined for external arrays.
+  if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
+    Isolate* isolate = GetHeap()->isolate();
+    Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
+    Handle<Object> args[] = { Handle<Object>(this), number };
+    Handle<Object> error = isolate->factory()->NewTypeError(
+        "redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
+    return isolate->Throw(*error);
+  }
+
+  // Normalize the elements to enable attributes on the property.
+  if ((attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0) {
+    SeededNumberDictionary* dictionary;
+    MaybeObject* maybe_object = NormalizeElements();
+    if (!maybe_object->To(&dictionary)) return maybe_object;
+    // Make sure that we never go back to fast case.
+    dictionary->set_requires_slow_elements();
   }
 
   // Check for lookup interceptor
   if (HasIndexedInterceptor()) {
     return SetElementWithInterceptor(index,
                                      value,
+                                     attributes,
                                      strict_mode,
-                                     check_prototype);
+                                     check_prototype,
+                                     set_mode);
   }
 
   return SetElementWithoutInterceptor(index,
                                       value,
+                                      attributes,
                                       strict_mode,
-                                      check_prototype);
+                                      check_prototype,
+                                      set_mode);
 }
 
 
 MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
                                                     Object* value,
+                                                    PropertyAttributes attr,
                                                     StrictModeFlag strict_mode,
-                                                    bool check_prototype) {
+                                                    bool check_prototype,
+                                                    SetPropertyMode set_mode) {
+  ASSERT(HasDictionaryElements() ||
+         HasDictionaryArgumentsElements() ||
+         (attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
   Isolate* isolate = GetIsolate();
   switch (GetElementsKind()) {
     case FAST_SMI_ONLY_ELEMENTS:
@@ -9867,7 +9927,8 @@
       return array->SetValue(index, value);
     }
     case DICTIONARY_ELEMENTS:
-      return SetDictionaryElement(index, value, strict_mode, check_prototype);
+      return SetDictionaryElement(index, value, attr, strict_mode,
+                                  check_prototype, set_mode);
     case NON_STRICT_ARGUMENTS_ELEMENTS: {
       FixedArray* parameter_map = FixedArray::cast(elements());
       uint32_t length = parameter_map->length();
@@ -9878,17 +9939,23 @@
         int context_index = Smi::cast(probe)->value();
         ASSERT(!context->get(context_index)->IsTheHole());
         context->set(context_index, value);
-        return value;
-      } else {
-        // Object is not mapped, defer to the arguments.
-        FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
-        if (arguments->IsDictionary()) {
-          return SetDictionaryElement(index, value, strict_mode,
-                                      check_prototype);
-        } else {
-          return SetFastElement(index, value, strict_mode, check_prototype);
+        // Redefining attributes of an aliased element destroys fast aliasing.
+        if (set_mode == SET_PROPERTY || attr == NONE) return value;
+        parameter_map->set_the_hole(index + 2);
+        // For elements that are still writable we re-establish slow aliasing.
+        if ((attr & READ_ONLY) == 0) {
+          MaybeObject* maybe_entry =
+              isolate->heap()->AllocateAliasedArgumentsEntry(context_index);
+          if (!maybe_entry->ToObject(&value)) return maybe_entry;
         }
       }
+      FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+      if (arguments->IsDictionary()) {
+        return SetDictionaryElement(index, value, attr, strict_mode,
+                                    check_prototype, set_mode);
+      } else {
+        return SetFastElement(index, value, strict_mode, check_prototype);
+      }
     }
   }
   // All possible cases have been handled above. Add a return to avoid the
diff --git a/src/objects.h b/src/objects.h
index d870cce..632e7d4 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -440,7 +440,8 @@
   V(SCRIPT, Script, script)                                                    \
   V(CODE_CACHE, CodeCache, code_cache)                                         \
   V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache)      \
-  V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info)
+  V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info)                  \
+  V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 #define STRUCT_LIST_DEBUGGER(V)                                                \
@@ -596,6 +597,7 @@
   CODE_CACHE_TYPE,
   POLYMORPHIC_CODE_CACHE_TYPE,
   TYPE_FEEDBACK_INFO_TYPE,
+  ALIASED_ARGUMENTS_ENTRY_TYPE,
   // The following two instance types are only used when ENABLE_DEBUGGER_SUPPORT
   // is defined. However as include/v8.h contain some of the instance type
   // constants always having them avoids them getting different numbers
@@ -1348,6 +1350,16 @@
 };
 
 
+// Indicates whether a property should be set or (re)defined.  Setting of a
+// property causes attributes to remain unchanged, writability to be checked
+// and callbacks to be called.  Defining of a property causes attributes to
+// be updated and callbacks to be overridden.
+enum SetPropertyMode {
+  SET_PROPERTY,
+  DEFINE_PROPERTY
+};
+
+
 // JSReceiver includes types on which properties can be defined, i.e.,
 // JSObject and JSProxy.
 class JSReceiver: public HeapObject {
@@ -1386,6 +1398,7 @@
   // Can cause GC, or return failure if GC is required.
   MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
                                           Object* value,
+                                          PropertyAttributes attributes,
                                           StrictModeFlag strict_mode,
                                           bool check_prototype);
 
@@ -1739,10 +1752,13 @@
                                               StrictModeFlag strict_mode,
                                               bool check_prototype);
 
-  MUST_USE_RESULT MaybeObject* SetDictionaryElement(uint32_t index,
-                                                    Object* value,
-                                                    StrictModeFlag strict_mode,
-                                                    bool check_prototype);
+  MUST_USE_RESULT MaybeObject* SetDictionaryElement(
+      uint32_t index,
+      Object* value,
+      PropertyAttributes attributes,
+      StrictModeFlag strict_mode,
+      bool check_prototype,
+      SetPropertyMode set_mode = SET_PROPERTY);
 
   MUST_USE_RESULT MaybeObject* SetFastDoubleElement(
       uint32_t index,
@@ -1750,23 +1766,28 @@
       StrictModeFlag strict_mode,
       bool check_prototype = true);
 
-
   static Handle<Object> SetOwnElement(Handle<JSObject> object,
                                       uint32_t index,
                                       Handle<Object> value,
                                       StrictModeFlag strict_mode);
 
   // Empty handle is returned if the element cannot be set to the given value.
-  static MUST_USE_RESULT Handle<Object> SetElement(Handle<JSObject> object,
-                                                   uint32_t index,
-                                                   Handle<Object> value,
-                                                   StrictModeFlag strict_mode);
+  static MUST_USE_RESULT Handle<Object> SetElement(
+      Handle<JSObject> object,
+      uint32_t index,
+      Handle<Object> value,
+      PropertyAttributes attr,
+      StrictModeFlag strict_mode,
+      SetPropertyMode set_mode = SET_PROPERTY);
 
   // A Failure object is returned if GC is needed.
-  MUST_USE_RESULT MaybeObject* SetElement(uint32_t index,
-                                          Object* value,
-                                          StrictModeFlag strict_mode,
-                                          bool check_prototype);
+  MUST_USE_RESULT MaybeObject* SetElement(
+      uint32_t index,
+      Object* value,
+      PropertyAttributes attributes,
+      StrictModeFlag strict_mode,
+      bool check_prototype = true,
+      SetPropertyMode set_mode = SET_PROPERTY);
 
   // Returns the index'th element.
   // The undefined object if index is out of bounds.
@@ -2087,13 +2108,17 @@
   MUST_USE_RESULT MaybeObject* SetElementWithInterceptor(
       uint32_t index,
       Object* value,
+      PropertyAttributes attributes,
       StrictModeFlag strict_mode,
-      bool check_prototype);
+      bool check_prototype,
+      SetPropertyMode set_mode);
   MUST_USE_RESULT MaybeObject* SetElementWithoutInterceptor(
       uint32_t index,
       Object* value,
+      PropertyAttributes attributes,
       StrictModeFlag strict_mode,
-      bool check_prototype);
+      bool check_prototype,
+      SetPropertyMode set_mode);
 
   // Searches the prototype chain for a callback setter and sets the property
   // with the setter if it finds one. The '*found' flag indicates whether
@@ -6412,6 +6437,39 @@
 };
 
 
+// Representation of a slow alias as part of a non-strict arguments objects.
+// For fast aliases (if HasNonStrictArgumentsElements()):
+// - the parameter map contains an index into the context
+// - all attributes of the element have default values
+// For slow aliases (if HasDictionaryArgumentsElements()):
+// - the parameter map contains no fast alias mapping (i.e. the hole)
+// - this struct (in the slow backing store) contains an index into the context
+// - all attributes are available as part if the property details
+class AliasedArgumentsEntry: public Struct {
+ public:
+  inline int aliased_context_slot();
+  inline void set_aliased_context_slot(int count);
+
+  static inline AliasedArgumentsEntry* cast(Object* obj);
+
+#ifdef OBJECT_PRINT
+  inline void AliasedArgumentsEntryPrint() {
+    AliasedArgumentsEntryPrint(stdout);
+  }
+  void AliasedArgumentsEntryPrint(FILE* out);
+#endif
+#ifdef DEBUG
+  void AliasedArgumentsEntryVerify();
+#endif
+
+  static const int kAliasedContextSlot = HeapObject::kHeaderSize;
+  static const int kSize = kAliasedContextSlot + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AliasedArgumentsEntry);
+};
+
+
 enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
 enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
 
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 79134da..2dc1ed8 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -355,6 +355,17 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_READONLY | PAGE_GUARD)) {
+    return false;
+  }
+  return true;
+}
+
+
 class Thread::PlatformData : public Malloced {
  public:
   PlatformData() : thread_(kNoThread) {}
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index a5981b1..f6a426f 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -411,6 +411,12 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
 void* VirtualMemory::ReserveRegion(size_t size) {
   void* result = mmap(OS::GetRandomMmapAddr(),
                       size,
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 1429748..0da1c08 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -666,6 +666,12 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
 void* VirtualMemory::ReserveRegion(size_t size) {
   void* result = mmap(OS::GetRandomMmapAddr(),
                       size,
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index e367d21..89abf39 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -429,6 +429,12 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
 bool VirtualMemory::CommitRegion(void* address,
                                  size_t size,
                                  bool is_executable) {
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index 094f950..918327a 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -295,6 +295,12 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  UNIMPLEMENTED();
+  return false;
+}
+
+
 class Thread::PlatformData : public Malloced {
  public:
   PlatformData() {
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 7e27a01..0d69971 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -458,6 +458,12 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
 void* VirtualMemory::ReserveRegion(size_t size) {
   void* result = mmap(GetRandomMmapAddr(),
                       size,
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index 349da01..004a6ed 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -401,6 +401,12 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
 void* VirtualMemory::ReserveRegion(size_t size) {
   void* result = mmap(OS::GetRandomMmapAddr(),
                       size,
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 6f77b3b..e9e9924 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1511,6 +1511,17 @@
 }
 
 
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_READONLY | PAGE_GUARD)) {
+    return false;
+  }
+  return true;
+}
+
+
 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
   return VirtualFree(base, size, MEM_DECOMMIT) != 0;
 }
diff --git a/src/platform.h b/src/platform.h
index a0186d5..38e633a 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -356,6 +356,9 @@
   // Uncommit real memory.  Returns whether the operation succeeded.
   bool Uncommit(void* address, size_t size);
 
+  // Creates a single guard page at the given address.
+  bool Guard(void* address);
+
   void Release() {
     ASSERT(IsReserved());
     // Notice: Order is important here. The VirtualMemory object might live
diff --git a/src/preparser.h b/src/preparser.h
index 886d81a..1455561 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,7 @@
 #ifndef V8_PREPARSER_H
 #define V8_PREPARSER_H
 
+#include "hashmap.h"
 #include "token.h"
 #include "scanner.h"
 
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 1ba68a1..b936e79 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -3293,29 +3293,26 @@
   for (int i = 0; i < root_index; ++i) (*dominators)[i] = kNoDominator;
   (*dominators)[root_index] = root_index;
 
-  // We use time_stamps array to stamp entries with the iteration number
-  // when the dominance for the entry has been updated.
-  ScopedVector<int> time_stamps(entries_length);
-  for (int i = 0; i < entries_length; ++i) time_stamps[i] = -1;
+  // The affected array is used to mark those entries that may
+  // be affected because of dominators change among their retainers.
+  ScopedVector<bool> affected(entries_length);
+  for (int i = 0; i < entries_length; ++i) affected[i] = false;
   Vector<HeapGraphEdge> children = entries[root_index]->children();
   for (int i = 0; i < children.length(); ++i) {
-    // Mark the root direct children as affected on iteration zero.
-    time_stamps[children[i].to()->ordered_index()] = 0;
+    // Mark the root direct children as affected.
+    affected[children[i].to()->ordered_index()] = true;
   }
 
   int changed = 1;
-  int iteration = 0;
   const int base_progress_counter = progress_counter_;
   while (changed != 0) {
-    ++iteration;
     changed = 0;
     for (int i = root_index - 1; i >= 0; --i) {
       // If dominator of the entry has already been set to root,
       // then it can't propagate any further.
       if ((*dominators)[i] == root_index) continue;
-      // If no retainers of the entry had been updated on current
-      // or previous iteration, then this entry is not affected.
-      if (time_stamps[i] < iteration - 1) continue;
+      if (!affected[i]) continue;
+      affected[i] = false;
       int new_idom_index = kNoDominator;
       Vector<HeapGraphEdge*> rets = entries[i]->retainers();
       for (int j = 0; j < rets.length(); ++j) {
@@ -3336,7 +3333,7 @@
         ++changed;
         Vector<HeapGraphEdge> children = entries[i]->children();
         for (int j = 0; j < children.length(); ++j) {
-          time_stamps[children[j].to()->ordered_index()] = iteration;
+          affected[children[j].to()->ordered_index()] = true;
         }
       }
     }
diff --git a/src/runtime.cc b/src/runtime.cc
index 9597681..b377e6e 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1039,7 +1039,7 @@
         elms->set(IS_ACCESSOR_INDEX, heap->false_value());
         elms->set(VALUE_INDEX, *substr);
         elms->set(WRITABLE_INDEX, heap->false_value());
-        elms->set(ENUMERABLE_INDEX,  heap->false_value());
+        elms->set(ENUMERABLE_INDEX,  heap->true_value());
         elms->set(CONFIGURABLE_INDEX, heap->false_value());
         return *desc;
       }
@@ -4355,53 +4355,6 @@
   RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
   PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
 
-  // Check if this is an element.
-  uint32_t index;
-  bool is_element = name->AsArrayIndex(&index);
-
-  // Special case for elements if any of the flags might be involved.
-  // If elements are in fast case we always implicitly assume that:
-  // DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
-  if (is_element && (attr != NONE ||
-      js_object->HasLocalElement(index) == JSObject::DICTIONARY_ELEMENT)) {
-    // Normalize the elements to enable attributes on the property.
-    if (js_object->IsJSGlobalProxy()) {
-      // We do not need to do access checks here since these has already
-      // been performed by the call to GetOwnProperty.
-      Handle<Object> proto(js_object->GetPrototype());
-      // If proxy is detached, ignore the assignment. Alternatively,
-      // we could throw an exception.
-      if (proto->IsNull()) return *obj_value;
-      js_object = Handle<JSObject>::cast(proto);
-    }
-
-    // Don't allow element properties to be redefined on objects with external
-    // array elements.
-    if (js_object->HasExternalArrayElements()) {
-      Handle<Object> args[2] = { js_object, name };
-      Handle<Object> error =
-          isolate->factory()->NewTypeError("redef_external_array_element",
-                                           HandleVector(args, 2));
-      return isolate->Throw(*error);
-    }
-
-    Handle<SeededNumberDictionary> dictionary =
-        JSObject::NormalizeElements(js_object);
-    // Make sure that we never go back to fast case.
-    dictionary->set_requires_slow_elements();
-    PropertyDetails details = PropertyDetails(attr, NORMAL);
-    Handle<SeededNumberDictionary> extended_dictionary =
-        SeededNumberDictionary::Set(dictionary, index, obj_value, details);
-    if (*extended_dictionary != *dictionary) {
-      if (js_object->GetElementsKind() == NON_STRICT_ARGUMENTS_ELEMENTS) {
-        FixedArray::cast(js_object->elements())->set(1, *extended_dictionary);
-      } else {
-        js_object->set_elements(*extended_dictionary);
-      }
-    }
-    return *obj_value;
-  }
-
   LookupResult result(isolate);
   js_object->LocalLookupRealNamedProperty(*name, &result);
 
@@ -4457,35 +4410,13 @@
 }
 
 
-// Special case for elements if any of the flags are true.
-// If elements are in fast case we always implicitly assume that:
-// DONT_DELETE: false, DONT_ENUM: false, READ_ONLY: false.
-static MaybeObject* NormalizeObjectSetElement(Isolate* isolate,
-                                              Handle<JSObject> js_object,
-                                              uint32_t index,
-                                              Handle<Object> value,
-                                              PropertyAttributes attr) {
-  // Normalize the elements to enable attributes on the property.
-  Handle<SeededNumberDictionary> dictionary =
-      JSObject::NormalizeElements(js_object);
-  // Make sure that we never go back to fast case.
-  dictionary->set_requires_slow_elements();
-  PropertyDetails details = PropertyDetails(attr, NORMAL);
-  Handle<SeededNumberDictionary> extended_dictionary =
-      SeededNumberDictionary::Set(dictionary, index, value, details);
-  if (*extended_dictionary != *dictionary) {
-    js_object->set_elements(*extended_dictionary);
-  }
-  return *value;
-}
-
-
 MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
                                         Handle<Object> object,
                                         Handle<Object> key,
                                         Handle<Object> value,
                                         PropertyAttributes attr,
                                         StrictModeFlag strict_mode) {
+  SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
   HandleScope scope(isolate);
 
   if (object->IsUndefined() || object->IsNull()) {
@@ -4523,12 +4454,8 @@
       return *value;
     }
 
-    if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
-      return NormalizeObjectSetElement(isolate, js_object, index, value, attr);
-    }
-
-    Handle<Object> result =
-        JSObject::SetElement(js_object, index, value, strict_mode);
+    Handle<Object> result = JSObject::SetElement(
+        js_object, index, value, attr, strict_mode, set_mode);
     if (result.is_null()) return Failure::Exception();
     return *value;
   }
@@ -4536,15 +4463,8 @@
   if (key->IsString()) {
     Handle<Object> result;
     if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
-      if (((attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) != 0)) {
-        return NormalizeObjectSetElement(isolate,
-                                         js_object,
-                                         index,
-                                         value,
-                                         attr);
-      }
-      result =
-          JSObject::SetElement(js_object, index, value, strict_mode);
+      result = JSObject::SetElement(
+          js_object, index, value, attr, strict_mode, set_mode);
     } else {
       Handle<String> key_string = Handle<String>::cast(key);
       key_string->TryFlatten();
@@ -4562,7 +4482,8 @@
   Handle<String> name = Handle<String>::cast(converted);
 
   if (name->AsArrayIndex(&index)) {
-    return js_object->SetElement(index, *value, strict_mode, true);
+    return js_object->SetElement(
+        index, *value, attr, strict_mode, true, set_mode);
   } else {
     return js_object->SetProperty(*name, *value, attr, strict_mode);
   }
@@ -4590,12 +4511,14 @@
       return *value;
     }
 
-    return js_object->SetElement(index, *value, kNonStrictMode, true);
+    return js_object->SetElement(
+        index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
   }
 
   if (key->IsString()) {
     if (Handle<String>::cast(key)->AsArrayIndex(&index)) {
-      return js_object->SetElement(index, *value, kNonStrictMode, true);
+      return js_object->SetElement(
+          index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
     } else {
       Handle<String> key_string = Handle<String>::cast(key);
       key_string->TryFlatten();
@@ -4612,7 +4535,8 @@
   Handle<String> name = Handle<String>::cast(converted);
 
   if (name->AsArrayIndex(&index)) {
-    return js_object->SetElement(index, *value, kNonStrictMode, true);
+    return js_object->SetElement(
+        index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY);
   } else {
     return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr);
   }
@@ -10316,9 +10240,9 @@
   RETURN_IF_EMPTY_HANDLE(isolate, tmp2);
 
   RETURN_IF_EMPTY_HANDLE(
-      isolate, JSObject::SetElement(jsobject, index1, tmp2, kStrictMode));
+      isolate, JSObject::SetElement(jsobject, index1, tmp2, NONE, kStrictMode));
   RETURN_IF_EMPTY_HANDLE(
-      isolate, JSObject::SetElement(jsobject, index2, tmp1, kStrictMode));
+      isolate, JSObject::SetElement(jsobject, index2, tmp1, NONE, kStrictMode));
 
   return isolate->heap()->undefined_value();
 }
diff --git a/src/scopes.cc b/src/scopes.cc
index 9835108..8d71f8a 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -40,26 +40,6 @@
 namespace internal {
 
 // ----------------------------------------------------------------------------
-// A Zone allocator for use with LocalsMap.
-
-// TODO(isolates): It is probably worth it to change the Allocator class to
-//                 take a pointer to an isolate.
-class ZoneAllocator: public Allocator {
- public:
-  /* nothing to do */
-  virtual ~ZoneAllocator()  {}
-
-  virtual void* New(size_t size)  { return ZONE->New(static_cast<int>(size)); }
-
-  /* ignored - Zone is freed in one fell swoop */
-  virtual void Delete(void* p)  {}
-};
-
-
-static ZoneAllocator* LocalsMapAllocator = ::new ZoneAllocator();
-
-
-// ----------------------------------------------------------------------------
 // Implementation of LocalsMap
 //
 // Note: We are storing the handle locations as key values in the hash map.
@@ -77,7 +57,7 @@
 }
 
 
-VariableMap::VariableMap() : HashMap(Match, LocalsMapAllocator, 8) {}
+VariableMap::VariableMap() : ZoneHashMap(Match, 8) {}
 VariableMap::~VariableMap() {}
 
 
@@ -88,7 +68,7 @@
     bool is_valid_lhs,
     Variable::Kind kind,
     InitializationFlag initialization_flag) {
-  HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
+  Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true);
   if (p->value == NULL) {
     // The variable has not been declared yet -> insert it.
     ASSERT(p->key == name.location());
@@ -104,7 +84,7 @@
 
 
 Variable* VariableMap::Lookup(Handle<String> name) {
-  HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), false);
+  Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false);
   if (p != NULL) {
     ASSERT(*reinterpret_cast<String**>(p->key) == *name);
     ASSERT(p->value != NULL);
diff --git a/src/scopes.h b/src/scopes.h
index 5b645f2..30c95ee 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -29,7 +29,7 @@
 #define V8_SCOPES_H_
 
 #include "ast.h"
-#include "hashmap.h"
+#include "zone.h"
 
 namespace v8 {
 namespace internal {
@@ -38,7 +38,7 @@
 
 
 // A hash map to support fast variable declaration and lookup.
-class VariableMap: public HashMap {
+class VariableMap: public ZoneHashMap {
  public:
   VariableMap();
 
diff --git a/src/serialize.cc b/src/serialize.cc
index d9fc2b7..81a94dd 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1088,9 +1088,10 @@
       external_reference_encoder_(new ExternalReferenceEncoder),
       large_object_total_(0),
       root_index_wave_front_(0) {
+  isolate_ = Isolate::Current();
   // The serializer is meant to be used only to generate initial heap images
   // from a context in which there is only one isolate.
-  ASSERT(Isolate::Current()->IsDefaultIsolate());
+  ASSERT(isolate_->IsDefaultIsolate());
   for (int i = 0; i <= LAST_SPACE; i++) {
     fullness_[i] = 0;
   }
@@ -1642,8 +1643,8 @@
     // serialized address.
     CHECK(IsPowerOf2(Page::kPageSize));
     int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
-    CHECK(size <= Page::kObjectAreaSize);
-    if (used_in_this_page + size > Page::kObjectAreaSize) {
+    CHECK(size <= SpaceAreaSize(space));
+    if (used_in_this_page + size > SpaceAreaSize(space)) {
       *new_page = true;
       fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
     }
@@ -1654,4 +1655,13 @@
 }
 
 
+int Serializer::SpaceAreaSize(int space) {
+  if (space == CODE_SPACE) {
+    return isolate_->memory_allocator()->CodePageAreaSize();
+  } else {
+    return Page::kPageSize - Page::kObjectStartOffset;
+  }
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/serialize.h b/src/serialize.h
index 72eed5a..02bf58a 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -556,6 +556,9 @@
     return external_reference_encoder_->Encode(addr);
   }
 
+  int SpaceAreaSize(int space);
+
+  Isolate* isolate_;
   // Keep track of the fullness of each space in order to generate
   // relative addresses for back references.  Large objects are
   // just numbered sequentially since relative addresses make no
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index d0cddeb..3709009 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -166,10 +166,8 @@
   Page* page = reinterpret_cast<Page*>(chunk);
   ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
   ASSERT(chunk->owner() == owner);
-  owner->IncreaseCapacity(Page::kObjectAreaSize);
-  owner->Free(page->ObjectAreaStart(),
-              static_cast<int>(page->ObjectAreaEnd() -
-                               page->ObjectAreaStart()));
+  owner->IncreaseCapacity(page->area_size());
+  owner->Free(page->area_start(), page->area_size());
 
   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
 
diff --git a/src/spaces.cc b/src/spaces.cc
index 05c5876..1fbad55 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -75,8 +75,8 @@
          owner == HEAP->cell_space() ||
          owner == HEAP->code_space());
   Initialize(reinterpret_cast<PagedSpace*>(owner),
-             page->ObjectAreaStart(),
-             page->ObjectAreaEnd(),
+             page->area_start(),
+             page->area_end(),
              kOnePageOnly,
              size_func);
   ASSERT(page->WasSweptPrecisely());
@@ -108,12 +108,12 @@
     cur_page = space_->anchor();
   } else {
     cur_page = Page::FromAddress(cur_addr_ - 1);
-    ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
+    ASSERT(cur_addr_ == cur_page->area_end());
   }
   cur_page = cur_page->next_page();
   if (cur_page == space_->anchor()) return false;
-  cur_addr_ = cur_page->ObjectAreaStart();
-  cur_end_ = cur_page->ObjectAreaEnd();
+  cur_addr_ = cur_page->area_start();
+  cur_end_ = cur_page->area_end();
   ASSERT(cur_page->WasSweptPrecisely());
   return true;
 }
@@ -227,7 +227,9 @@
   }
   ASSERT(*allocated <= current.size);
   ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
-  if (!code_range_->Commit(current.start, *allocated, true)) {
+  if (!MemoryAllocator::CommitCodePage(code_range_,
+                                       current.start,
+                                       *allocated)) {
     *allocated = 0;
     return NULL;
   }
@@ -358,11 +360,17 @@
   VirtualMemory reservation;
   Address base = ReserveAlignedMemory(size, alignment, &reservation);
   if (base == NULL) return NULL;
-  if (!reservation.Commit(base,
-                          size,
-                          executable == EXECUTABLE)) {
-    return NULL;
+
+  if (executable == EXECUTABLE) {
+    CommitCodePage(&reservation, base, size);
+  } else {
+    if (!reservation.Commit(base,
+                            size,
+                            executable == EXECUTABLE)) {
+      return NULL;
+    }
   }
+
   controller->TakeControl(&reservation);
   return base;
 }
@@ -378,9 +386,14 @@
 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
                                        Address start,
                                        SemiSpace* semi_space) {
+  Address area_start = start + NewSpacePage::kObjectStartOffset;
+  Address area_end = start + Page::kPageSize;
+
   MemoryChunk* chunk = MemoryChunk::Initialize(heap,
                                                start,
                                                Page::kPageSize,
+                                               area_start,
+                                               area_end,
                                                NOT_EXECUTABLE,
                                                semi_space);
   chunk->set_next_chunk(NULL);
@@ -410,6 +423,8 @@
 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
                                      Address base,
                                      size_t size,
+                                     Address area_start,
+                                     Address area_end,
                                      Executability executable,
                                      Space* owner) {
   MemoryChunk* chunk = FromAddress(base);
@@ -418,6 +433,8 @@
 
   chunk->heap_ = heap;
   chunk->size_ = size;
+  chunk->area_start_ = area_start;
+  chunk->area_end_ = area_end;
   chunk->flags_ = 0;
   chunk->set_owner(owner);
   chunk->InitializeReservedMemory();
@@ -431,9 +448,13 @@
   ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
   ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
 
-  if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
+  if (executable == EXECUTABLE) {
+    chunk->SetFlag(IS_EXECUTABLE);
+  }
 
-  if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
+  if (owner == heap->old_data_space()) {
+    chunk->SetFlag(CONTAINS_ONLY_DATA);
+  }
 
   return chunk;
 }
@@ -462,11 +483,16 @@
 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
                                             Executability executable,
                                             Space* owner) {
-  size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+  size_t chunk_size;
   Heap* heap = isolate_->heap();
   Address base = NULL;
   VirtualMemory reservation;
+  Address area_start = NULL;
+  Address area_end = NULL;
   if (executable == EXECUTABLE) {
+    chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
+                         OS::CommitPageSize()) + CodePageGuardSize();
+
     // Check executable memory limit.
     if (size_executable_ + chunk_size > capacity_executable_) {
       LOG(isolate_,
@@ -494,18 +520,30 @@
       // Update executable memory size.
       size_executable_ += reservation.size();
     }
+
+#ifdef DEBUG
+    ZapBlock(base, CodePageGuardStartOffset());
+    ZapBlock(base + CodePageAreaStartOffset(), body_size);
+#endif
+    area_start = base + CodePageAreaStartOffset();
+    area_end = area_start + body_size;
   } else {
+    chunk_size = MemoryChunk::kObjectStartOffset + body_size;
     base = AllocateAlignedMemory(chunk_size,
                                  MemoryChunk::kAlignment,
                                  executable,
                                  &reservation);
 
     if (base == NULL) return NULL;
-  }
 
 #ifdef DEBUG
-  ZapBlock(base, chunk_size);
+    ZapBlock(base, chunk_size);
 #endif
+
+    area_start = base + Page::kObjectStartOffset;
+    area_end = base + chunk_size;
+  }
+
   isolate_->counters()->memory_allocated()->
       Increment(static_cast<int>(chunk_size));
 
@@ -518,6 +556,8 @@
   MemoryChunk* result = MemoryChunk::Initialize(heap,
                                                 base,
                                                 chunk_size,
+                                                area_start,
+                                                area_end,
                                                 executable,
                                                 owner);
   result->set_reserved_memory(&reservation);
@@ -527,7 +567,9 @@
 
 Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
                                     Executability executable) {
-  MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
+  MemoryChunk* chunk = AllocateChunk(owner->AreaSize(),
+                                     executable,
+                                     owner);
 
   if (chunk == NULL) return NULL;
 
@@ -648,6 +690,65 @@
 }
 #endif
 
+
+int MemoryAllocator::CodePageGuardStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
+}
+
+
+int MemoryAllocator::CodePageGuardSize() {
+  return static_cast<int>(OS::CommitPageSize());
+}
+
+
+int MemoryAllocator::CodePageAreaStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return CodePageGuardStartOffset() + CodePageGuardSize();
+}
+
+
+int MemoryAllocator::CodePageAreaEndOffset() {
+  // We are guarding code pages: the last OS page will be protected as
+  // non-writable.
+  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
+}
+
+
+bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
+                                     Address start,
+                                     size_t size) {
+  // Commit page header (not executable).
+  if (!vm->Commit(start,
+                  CodePageGuardStartOffset(),
+                  false)) {
+    return false;
+  }
+
+  // Create guard page after the header.
+  if (!vm->Guard(start + CodePageGuardStartOffset())) {
+    return false;
+  }
+
+  // Commit page body (executable).
+  size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
+  if (!vm->Commit(start + CodePageAreaStartOffset(),
+                  area_size,
+                  true)) {
+    return false;
+  }
+
+  // Create guard page after the allocatable area.
+  if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
+    return false;
+  }
+
+  return true;
+}
+
+
 // -----------------------------------------------------------------------------
 // MemoryChunk implementation
 
@@ -671,8 +772,14 @@
       was_swept_conservatively_(false),
       first_unswept_page_(Page::FromAddress(NULL)),
       unswept_free_bytes_(0) {
+  if (id == CODE_SPACE) {
+    area_size_ = heap->isolate()->memory_allocator()->
+        CodePageAreaSize();
+  } else {
+    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
+  }
   max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
-                  * Page::kObjectAreaSize;
+      * AreaSize();
   accounting_stats_.Clear();
 
   allocation_info_.top = NULL;
@@ -722,8 +829,8 @@
 }
 
 bool PagedSpace::CanExpand() {
-  ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
-  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+  ASSERT(max_capacity_ % AreaSize() == 0);
+  ASSERT(Capacity() % AreaSize() == 0);
 
   if (Capacity() == max_capacity_) return false;
 
@@ -763,6 +870,7 @@
 
 void PagedSpace::ReleasePage(Page* page) {
   ASSERT(page->LiveBytes() == 0);
+  ASSERT(AreaSize() == page->area_size());
 
   // Adjust list of unswept pages if the page is the head of the list.
   if (first_unswept_page_ == page) {
@@ -775,7 +883,7 @@
   if (page->WasSwept()) {
     intptr_t size = free_list_.EvictFreeListItems(page);
     accounting_stats_.AllocateBytes(size);
-    ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
+    ASSERT_EQ(AreaSize(), static_cast<int>(size));
   } else {
     DecreaseUnsweptFreeBytes(page);
   }
@@ -792,8 +900,8 @@
   }
 
   ASSERT(Capacity() > 0);
-  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
-  accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
+  ASSERT(Capacity() % AreaSize() == 0);
+  accounting_stats_.ShrinkSpace(AreaSize());
 }
 
 
@@ -804,9 +912,9 @@
     if (!page->WasSwept()) {
       if (page->LiveBytes() == 0) ReleasePage(page);
     } else {
-      HeapObject* obj = HeapObject::FromAddress(page->body());
+      HeapObject* obj = HeapObject::FromAddress(page->area_start());
       if (obj->IsFreeSpace() &&
-          FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) {
+          FreeSpace::cast(obj)->size() == AreaSize()) {
         // Sometimes we allocate memory from free list but don't
         // immediately initialize it (e.g. see PagedSpace::ReserveSpace
         // called from Heap::ReserveSpace that can cause GC before
@@ -817,7 +925,7 @@
         // by free list items.
         FreeList::SizeStats sizes;
         free_list_.CountFreeListItems(page, &sizes);
-        if (sizes.Total() == Page::kObjectAreaSize) {
+        if (sizes.Total() == AreaSize()) {
           ReleasePage(page);
         }
       }
@@ -848,8 +956,8 @@
     }
     ASSERT(page->WasSweptPrecisely());
     HeapObjectIterator it(page, NULL);
-    Address end_of_previous_object = page->ObjectAreaStart();
-    Address top = page->ObjectAreaEnd();
+    Address end_of_previous_object = page->area_start();
+    Address top = page->area_end();
     int black_size = 0;
     for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
       ASSERT(end_of_previous_object <= object->address());
@@ -1061,7 +1169,7 @@
   }
 
   // Clear remainder of current page.
-  Address limit = NewSpacePage::FromLimit(top)->body_limit();
+  Address limit = NewSpacePage::FromLimit(top)->area_end();
   if (heap()->gc_state() == Heap::SCAVENGE) {
     heap()->promotion_queue()->SetNewLimit(limit);
     heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
@@ -1111,7 +1219,7 @@
 
   // There should be objects packed in from the low address up to the
   // allocation pointer.
-  Address current = to_space_.first_page()->body();
+  Address current = to_space_.first_page()->area_start();
   CHECK_EQ(current, to_space_.space_start());
 
   while (current != top()) {
@@ -1146,7 +1254,7 @@
       NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
       // Next page should be valid.
       CHECK(!page->is_anchor());
-      current = page->body();
+      current = page->area_start();
     }
   }
 
@@ -1932,7 +2040,7 @@
 
 void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
   sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
-  if (sizes->huge_size_ < Page::kObjectAreaSize) {
+  if (sizes->huge_size_ < p->area_size()) {
     sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
     sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
     sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
@@ -1962,7 +2070,7 @@
 intptr_t FreeList::EvictFreeListItems(Page* p) {
   intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
 
-  if (sum < Page::kObjectAreaSize) {
+  if (sum < p->area_size()) {
     sum += EvictFreeListItemsInList(&small_list_, p) +
         EvictFreeListItemsInList(&medium_list_, p) +
         EvictFreeListItemsInList(&large_list_, p);
@@ -2084,7 +2192,7 @@
 
 
 bool PagedSpace::ReserveSpace(int size_in_bytes) {
-  ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
+  ASSERT(size_in_bytes <= AreaSize());
   ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
   Address current_top = allocation_info_.top;
   Address new_top = current_top + size_in_bytes;
@@ -2464,7 +2572,7 @@
   LargePage* page = heap()->isolate()->memory_allocator()->
       AllocateLargePage(object_size, executable, this);
   if (page == NULL) return Failure::RetryAfterGC(identity());
-  ASSERT(page->body_size() >= object_size);
+  ASSERT(page->area_size() >= object_size);
 
   size_ += static_cast<int>(page->size());
   objects_size_ += object_size;
@@ -2580,7 +2688,7 @@
     // object area start.
     HeapObject* object = chunk->GetObject();
     Page* page = Page::FromAddress(object->address());
-    ASSERT(object->address() == page->ObjectAreaStart());
+    ASSERT(object->address() == page->area_start());
 
     // The first word should be a map, and we expect all map pointers to be
     // in map space.
diff --git a/src/spaces.h b/src/spaces.h
index 0ff62b5..599e9dd 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -103,7 +103,7 @@
   ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
 
 #define ASSERT_OBJECT_SIZE(size)                                               \
-  ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
+  ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
 
 #define ASSERT_PAGE_OFFSET(offset)                                             \
   ASSERT((Page::kObjectStartOffset <= offset)                                  \
@@ -361,21 +361,15 @@
     store_buffer_counter_ = counter;
   }
 
-  Address body() { return address() + kObjectStartOffset; }
-
-  Address body_limit() { return address() + size(); }
-
-  int body_size() { return static_cast<int>(size() - kObjectStartOffset); }
-
   bool Contains(Address addr) {
-    return addr >= body() && addr < address() + size();
+    return addr >= area_start() && addr < area_end();
   }
 
   // Checks whether addr can be a limit of addresses in this page.
   // It's a limit if it's in the page, or if it's just after the
   // last byte of the page.
   bool ContainsLimit(Address addr) {
-    return addr >= body() && addr <= address() + size();
+    return addr >= area_start() && addr <= area_end();
   }
 
   enum MemoryChunkFlags {
@@ -487,8 +481,9 @@
   static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
 
   static const intptr_t kLiveBytesOffset =
-      kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
-      kPointerSize + kPointerSize + kPointerSize + kIntSize;
+     kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
+     kPointerSize + kPointerSize +
+     kPointerSize + kPointerSize + kPointerSize + kIntSize;
 
   static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
 
@@ -594,12 +589,22 @@
     ClearFlag(EVACUATION_CANDIDATE);
   }
 
+  Address area_start() { return area_start_; }
+  Address area_end() { return area_end_; }
+  int area_size() {
+    return static_cast<int>(area_end() - area_start());
+  }
 
  protected:
   MemoryChunk* next_chunk_;
   MemoryChunk* prev_chunk_;
   size_t size_;
   intptr_t flags_;
+
+  // Start and end of allocatable memory on this chunk.
+  Address area_start_;
+  Address area_end_;
+
   // If the chunk needs to remember its memory reservation, it is stored here.
   VirtualMemory reservation_;
   // The identity of the owning space.  This is tagged as a failure pointer, but
@@ -618,6 +623,8 @@
   static MemoryChunk* Initialize(Heap* heap,
                                  Address base,
                                  size_t size,
+                                 Address area_start,
+                                 Address area_end,
                                  Executability executable,
                                  Space* owner);
 
@@ -657,12 +664,6 @@
   inline void set_next_page(Page* page);
   inline void set_prev_page(Page* page);
 
-  // Returns the start address of the object area in this page.
-  Address ObjectAreaStart() { return address() + kObjectStartOffset; }
-
-  // Returns the end address (exclusive) of the object area in this page.
-  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
-
   // Checks whether an address is page aligned.
   static bool IsAlignedToPageSize(Address a) {
     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
@@ -685,21 +686,14 @@
   // Page size in bytes.  This must be a multiple of the OS page size.
   static const int kPageSize = 1 << kPageSizeBits;
 
-  // Page size mask.
-  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
-
   // Object area size in bytes.
-  static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
+  static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
 
   // Maximum object size that fits in a page.
-  static const int kMaxHeapObjectSize = kObjectAreaSize;
+  static const int kMaxNonCodeHeapObjectSize = kNonCodeObjectAreaSize;
 
-  static const int kFirstUsedCell =
-    (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2;
-
-  static const int kLastUsedCell =
-    ((kPageSize - kPointerSize)/kPointerSize) >>
-      Bitmap::kBitsPerCellLog2;
+  // Page size mask.
+  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
 
   inline void ClearGCFields();
 
@@ -734,7 +728,7 @@
 class LargePage : public MemoryChunk {
  public:
   HeapObject* GetObject() {
-    return HeapObject::FromAddress(body());
+    return HeapObject::FromAddress(area_start());
   }
 
   inline LargePage* next_page() const {
@@ -975,7 +969,7 @@
 
   // Returns maximum available bytes that the old space can have.
   intptr_t MaxAvailable() {
-    return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
+    return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
   }
 
 #ifdef DEBUG
@@ -1028,6 +1022,20 @@
   bool MemoryAllocationCallbackRegistered(
       MemoryAllocationCallback callback);
 
+  static int CodePageGuardStartOffset();
+
+  static int CodePageGuardSize();
+
+  static int CodePageAreaStartOffset();
+
+  static int CodePageAreaEndOffset();
+
+  static int CodePageAreaSize() {
+    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
+  }
+
+  static bool CommitCodePage(VirtualMemory* vm, Address start, size_t size);
+
  private:
   Isolate* isolate_;
 
@@ -1380,7 +1388,7 @@
  private:
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
-  static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
+  static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
 
   FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
 
@@ -1572,12 +1580,12 @@
 
   void IncreaseUnsweptFreeBytes(Page* p) {
     ASSERT(ShouldBeSweptLazily(p));
-    unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
+    unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
   }
 
   void DecreaseUnsweptFreeBytes(Page* p) {
     ASSERT(ShouldBeSweptLazily(p));
-    unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
+    unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
   }
 
   bool AdvanceSweeper(intptr_t bytes_to_sweep);
@@ -1600,7 +1608,14 @@
   // Returns the number of total pages in this space.
   int CountTotalPages();
 
+  // Return size of allocatable area on a page in this space.
+  inline int AreaSize() {
+    return area_size_;
+  }
+
  protected:
+  int area_size_;
+
   // Maximum capacity of this space.
   intptr_t max_capacity_;
 
@@ -1702,6 +1717,8 @@
     (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
     (1 << MemoryChunk::SCAN_ON_SCAVENGE);
 
+  static const int kAreaSize = Page::kNonCodeObjectAreaSize;
+
   inline NewSpacePage* next_page() const {
     return static_cast<NewSpacePage*>(next_chunk());
   }
@@ -1814,22 +1831,22 @@
   // Returns the start address of the first page of the space.
   Address space_start() {
     ASSERT(anchor_.next_page() != &anchor_);
-    return anchor_.next_page()->body();
+    return anchor_.next_page()->area_start();
   }
 
   // Returns the start address of the current page of the space.
   Address page_low() {
-    return current_page_->body();
+    return current_page_->area_start();
   }
 
   // Returns one past the end address of the space.
   Address space_end() {
-    return anchor_.prev_page()->body_limit();
+    return anchor_.prev_page()->area_end();
   }
 
   // Returns one past the end address of the current page of the space.
   Address page_high() {
-    return current_page_->body_limit();
+    return current_page_->area_end();
   }
 
   bool AdvancePage() {
@@ -1965,7 +1982,7 @@
       NewSpacePage* page = NewSpacePage::FromLimit(current_);
       page = page->next_page();
       ASSERT(!page->is_anchor());
-      current_ = page->body();
+      current_ = page->area_start();
       if (current_ == limit_) return NULL;
     }
 
@@ -2073,7 +2090,7 @@
 
   // Return the allocated bytes in the active semispace.
   virtual intptr_t Size() {
-    return pages_used_ * Page::kObjectAreaSize +
+    return pages_used_ * NewSpacePage::kAreaSize +
         static_cast<int>(top() - to_space_.page_low());
   }
 
@@ -2085,7 +2102,7 @@
   // Return the current capacity of a semispace.
   intptr_t EffectiveCapacity() {
     SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
-    return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
+    return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
   }
 
   // Return the current capacity of a semispace.
@@ -2302,7 +2319,7 @@
 
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
-    return page->ObjectAreaEnd();
+    return page->area_end();
   }
 
  public:
@@ -2331,12 +2348,12 @@
       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
         object_size_in_bytes_(object_size_in_bytes),
         name_(name) {
-    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
+    page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
   }
 
   // The limit of allocation for a page in this space.
   virtual Address PageAllocationLimit(Page* page) {
-    return page->ObjectAreaEnd() - page_extra_;
+    return page->area_end() - page_extra_;
   }
 
   int object_size_in_bytes() { return object_size_in_bytes_; }
@@ -2387,7 +2404,7 @@
 #endif
 
  private:
-  static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
+  static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
 
   // Do map space compaction if there is a page gap.
   int CompactionThreshold() {
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
index 9022b3b..3852155 100644
--- a/src/store-buffer.cc
+++ b/src/store-buffer.cc
@@ -453,14 +453,14 @@
 
 // Compute start address of the first map following given addr.
 static inline Address MapStartAlign(Address addr) {
-  Address page = Page::FromAddress(addr)->ObjectAreaStart();
+  Address page = Page::FromAddress(addr)->area_start();
   return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
 }
 
 
 // Compute end address of the first map preceding given addr.
 static inline Address MapEndAlign(Address addr) {
-  Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
+  Address page = Page::FromAllocationTop(addr)->area_start();
   return page + ((addr - page) / Map::kSize * Map::kSize);
 }
 
@@ -523,8 +523,8 @@
     Page* page,
     RegionCallback region_callback,
     ObjectSlotCallback slot_callback) {
-  Address visitable_start = page->ObjectAreaStart();
-  Address end_of_page = page->ObjectAreaEnd();
+  Address visitable_start = page->area_start();
+  Address end_of_page = page->area_end();
 
   Address visitable_end = visitable_start;
 
diff --git a/src/version.cc b/src/version.cc
index 0f50f1c..be6c0c4 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     9
-#define BUILD_NUMBER      9
+#define BUILD_NUMBER      10
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/zone.h b/src/zone.h
index 25c4f9a..bc092b5 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -30,6 +30,7 @@
 
 #include "allocation.h"
 #include "checks.h"
+#include "hashmap.h"
 #include "globals.h"
 #include "list.h"
 #include "splay-tree.h"
@@ -239,6 +240,8 @@
 };
 
 
+typedef TemplateHashMap<ZoneListAllocationPolicy> ZoneHashMap;
+
 } }  // namespace v8::internal
 
 #endif  // V8_ZONE_H_