Version 1.2.14.

Added separate paged heap space for global property cells and avoid updating the write barrier when storing into them.

Improved peep-hole optimization on ARM platforms by not emitting unnecessary debug information.

Re-enabled ICs for loads and calls that skip a global object during lookup through the prototype chain.

Allowed access through global proxies to use ICs.

Fixed issue 401.


git-svn-id: http://v8.googlecode.com/svn/trunk@2438 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.h b/src/api.h
index 85b13ec..f1057a8 100644
--- a/src/api.h
+++ b/src/api.h
@@ -244,9 +244,10 @@
 
 // Implementations of ToLocal
 
-#define MAKE_TO_LOCAL(Name, From, To) \
+#define MAKE_TO_LOCAL(Name, From, To)                                       \
   Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \
-    return Local<To>(reinterpret_cast<To*>(obj.location())); \
+    ASSERT(!obj->IsTheHole());                                              \
+    return Local<To>(reinterpret_cast<To*>(obj.location()));                \
   }
 
 MAKE_TO_LOCAL(ToLocal, Context, Context)
diff --git a/src/apinatives.js b/src/apinatives.js
index 2981eec..6451e62 100644
--- a/src/apinatives.js
+++ b/src/apinatives.js
@@ -51,6 +51,7 @@
       var Constructor = %GetTemplateField(data, kApiConstructorOffset);
       var result = Constructor ? new (Instantiate(Constructor))() : {};
       ConfigureTemplateInstance(result, data);
+      result = %ToFastProperties(result);
       return result;
     default:
       throw 'Unknown API tag <' + tag + '>';
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index a393ac0..3ed99f9 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -697,6 +697,7 @@
 
 
 void Assembler::blx(int branch_offset) {  // v5 and above
+  WriteRecordedPositions();
   ASSERT((branch_offset & 1) == 0);
   int h = ((branch_offset & 2) >> 1)*B24;
   int imm24 = branch_offset >> 2;
@@ -706,12 +707,14 @@
 
 
 void Assembler::blx(Register target, Condition cond) {  // v5 and above
+  WriteRecordedPositions();
   ASSERT(!target.is(pc));
   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
 }
 
 
 void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
+  WriteRecordedPositions();
   ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
 }
@@ -810,6 +813,9 @@
 
 
 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
+  if (dst.is(pc)) {
+    WriteRecordedPositions();
+  }
   addrmod1(cond | 13*B21 | s, r0, dst, src);
 }
 
@@ -937,6 +943,9 @@
 
 // Load/Store instructions
 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
+  if (dst.is(pc)) {
+    WriteRecordedPositions();
+  }
   addrmod2(cond | B26 | L, dst, src);
 
   // Eliminate pattern: push(r), pop(r)
@@ -1274,7 +1283,6 @@
   if (pos == RelocInfo::kNoPosition) return;
   ASSERT(pos >= 0);
   current_position_ = pos;
-  WriteRecordedPositions();
 }
 
 
@@ -1282,7 +1290,6 @@
   if (pos == RelocInfo::kNoPosition) return;
   ASSERT(pos >= 0);
   current_statement_position_ = pos;
-  WriteRecordedPositions();
 }
 
 
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 07c767e..2ca74a9 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -192,11 +192,14 @@
   //  -- [sp]  : receiver
   // -----------------------------------
 
-  // NOTE: Right now, this code always misses on ARM which is
-  // sub-optimal. We should port the fast case code from IA-32.
+  Label miss;
 
-  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
-  __ Jump(ic, RelocInfo::CODE_TARGET);
+  // Load receiver.
+  __ ldr(r0, MemOperand(sp, 0));
+
+  StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
+  __ bind(&miss);
+  StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
 }
 
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index e3e5502..6d9ace8 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -171,110 +171,6 @@
 }
 
 
-void StubCompiler::GenerateLoadField(MacroAssembler* masm,
-                                     JSObject* object,
-                                     JSObject* holder,
-                                     Register receiver,
-                                     Register scratch1,
-                                     Register scratch2,
-                                     int index,
-                                     Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, miss_label);
-
-  // Check that the maps haven't changed.
-  Register reg =
-      masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
-  GenerateFastPropertyLoad(masm, r0, reg, holder, index);
-  __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
-                                        JSObject* object,
-                                        JSObject* holder,
-                                        Register receiver,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Object* value,
-                                        Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, miss_label);
-
-  // Check that the maps haven't changed.
-  Register reg =
-      masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
-
-  // Return the constant value.
-  __ mov(r0, Operand(Handle<Object>(value)));
-  __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
-                                        JSObject* object,
-                                        JSObject* holder,
-                                        Register receiver,
-                                        Register name,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        AccessorInfo* callback,
-                                        Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, miss_label);
-
-  // Check that the maps haven't changed.
-  Register reg =
-      masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
-
-  // Push the arguments on the JS stack of the caller.
-  __ push(receiver);  // receiver
-  __ mov(ip, Operand(Handle<AccessorInfo>(callback)));  // callback data
-  __ push(ip);
-  __ push(name);  // name
-  __ push(reg);  // holder
-
-  // Do tail-call to the runtime system.
-  ExternalReference load_callback_property =
-      ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
-  __ TailCallRuntime(load_callback_property, 4);
-}
-
-
-void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
-                                           JSObject* object,
-                                           JSObject* holder,
-                                           Smi* lookup_hint,
-                                           Register receiver,
-                                           Register name,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ tst(receiver, Operand(kSmiTagMask));
-  __ b(eq, miss_label);
-
-  // Check that the maps haven't changed.
-  Register reg =
-      masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
-
-  // Push the arguments on the JS stack of the caller.
-  __ push(receiver);  // receiver
-  __ push(reg);  // holder
-  __ push(name);  // name
-  __ mov(scratch1, Operand(lookup_hint));
-  __ push(scratch1);
-
-  // Do tail-call to the runtime system.
-  ExternalReference load_ic_property =
-      ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
-  __ TailCallRuntime(load_ic_property, 4);
-}
-
-
 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
                                            Register receiver,
                                            Register scratch,
@@ -351,6 +247,17 @@
 }
 
 
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+                                                 Register receiver,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mov(r0, scratch1);
+  __ Ret();
+}
+
+
 // Generate StoreField code, value is passed in r0 register.
 // After executing generated code, the receiver_reg and name_reg
 // may be clobbered.
@@ -462,6 +369,147 @@
 #define __ ACCESS_MASM(masm())
 
 
+Register StubCompiler::CheckPrototypes(JSObject* object,
+                                       Register object_reg,
+                                       JSObject* holder,
+                                       Register holder_reg,
+                                       Register scratch,
+                                       String* name,
+                                       Label* miss) {
+  // Check that the maps haven't changed.
+  Register result =
+      masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.
+  while (object != holder) {
+    if (object->IsGlobalObject()) {
+      GlobalObject* global = GlobalObject::cast(object);
+      Object* probe = global->EnsurePropertyCell(name);
+      if (probe->IsFailure()) {
+        set_failure(Failure::cast(probe));
+        return result;
+      }
+      JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+      ASSERT(cell->value()->IsTheHole());
+      __ mov(scratch, Operand(Handle<Object>(cell)));
+      __ ldr(scratch,
+             FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+      __ cmp(scratch, Operand(Factory::the_hole_value()));
+      __ b(ne, miss);
+    }
+    object = JSObject::cast(object->GetPrototype());
+  }
+
+  // Return the register containin the holder.
+  return result;
+}
+
+
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
+                                     Register receiver,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     int index,
+                                     String* name,
+                                     Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+  GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
+  __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Object* value,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+
+  // Return the constant value.
+  __ mov(r0, Operand(Handle<Object>(value)));
+  __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadCallback(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        AccessorInfo* callback,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+
+  // Push the arguments on the JS stack of the caller.
+  __ push(receiver);  // receiver
+  __ mov(ip, Operand(Handle<AccessorInfo>(callback)));  // callback data
+  __ push(ip);
+  __ push(name_reg);  // name
+  __ push(reg);  // holder
+
+  // Do tail-call to the runtime system.
+  ExternalReference load_callback_property =
+      ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+  __ TailCallRuntime(load_callback_property, 4);
+}
+
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* holder,
+                                           Smi* lookup_hint,
+                                           Register receiver,
+                                           Register name_reg,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           String* name,
+                                           Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
+
+  // Push the arguments on the JS stack of the caller.
+  __ push(receiver);  // receiver
+  __ push(reg);  // holder
+  __ push(name_reg);  // name
+  __ mov(scratch1, Operand(lookup_hint));
+  __ push(scratch1);
+
+  // Do tail-call to the runtime system.
+  ExternalReference load_ic_property =
+      ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
+  __ TailCallRuntime(load_ic_property, 4);
+}
+
+
 Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
   // ----------- S t a t e -------------
   //  -- r1: function
@@ -513,7 +561,7 @@
 
   // Do the right check and compute the holder register.
   Register reg =
-      masm()->CheckMaps(JSObject::cast(object), r0, holder, r3, r2, &miss);
+      CheckPrototypes(JSObject::cast(object), r0, holder, r3, r2, name, &miss);
   GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
 
   // Check that the function really is a function.
@@ -546,6 +594,7 @@
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
+                                              String* name,
                                               CheckType check) {
   // ----------- S t a t e -------------
   //  -- lr: return address
@@ -569,7 +618,7 @@
   switch (check) {
     case RECEIVER_MAP_CHECK:
       // Check that the maps haven't changed.
-      __ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss);
+      CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -587,8 +636,8 @@
       GenerateLoadGlobalFunctionPrototype(masm(),
                                           Context::STRING_FUNCTION_INDEX,
                                           r2);
-      __ CheckMaps(JSObject::cast(object->GetPrototype()),
-                   r2, holder, r3, r1, &miss);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+                      r1, name, &miss);
       break;
 
     case NUMBER_CHECK: {
@@ -603,8 +652,8 @@
       GenerateLoadGlobalFunctionPrototype(masm(),
                                           Context::NUMBER_FUNCTION_INDEX,
                                           r2);
-      __ CheckMaps(JSObject::cast(object->GetPrototype()),
-                   r2, holder, r3, r1, &miss);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+                      r1, name, &miss);
       break;
     }
 
@@ -620,13 +669,13 @@
       GenerateLoadGlobalFunctionPrototype(masm(),
                                           Context::BOOLEAN_FUNCTION_INDEX,
                                           r2);
-      __ CheckMaps(JSObject::cast(object->GetPrototype()),
-                   r2, holder, r3, r1, &miss);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), r2, holder, r3,
+                      r1, name, &miss);
       break;
     }
 
     case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
-      __ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss);
+      CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
       // Make sure object->elements()->map() != Heap::hash_table_map()
       // Get the elements array of the object.
       __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
@@ -685,7 +734,8 @@
 }
 
 
-Object* CallStubCompiler::CompileCallGlobal(GlobalObject* object,
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                            GlobalObject* holder,
                                             JSGlobalPropertyCell* cell,
                                             JSFunction* function,
                                             String* name) {
@@ -699,11 +749,19 @@
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
-  // Check that the map of the global has not changed.
-  __ ldr(r2, MemOperand(sp, argc * kPointerSize));
-  __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ cmp(r3, Operand(Handle<Map>(object->map())));
-  __ b(ne, &miss);
+  // Get the receiver from the stack.
+  __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ tst(r0, Operand(kSmiTagMask));
+    __ b(eq, &miss);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, r0, holder, r3, r2, name, &miss);
 
   // Get the value from the cell.
   __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
@@ -715,8 +773,10 @@
 
   // Patch the receiver on the stack with the global proxy if
   // necessary.
-  __ ldr(r3, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
-  __ str(r3, MemOperand(sp, argc * kPointerSize));
+  if (object->IsGlobalObject()) {
+    __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+    __ str(r3, MemOperand(sp, argc * kPointerSize));
+  }
 
   // Setup the context (function already in r1).
   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@@ -902,8 +962,6 @@
   // Store the value in the cell.
   __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
   __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
-  __ mov(r1, Operand(JSGlobalPropertyCell::kValueOffset));
-  __ RecordWrite(r2, r1, r3);
 
   __ Ret();
 
@@ -932,7 +990,7 @@
 
   __ ldr(r0, MemOperand(sp, 0));
 
-  GenerateLoadField(masm(), object, holder, r0, r3, r1, index, &miss);
+  GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -953,7 +1011,7 @@
   Label miss;
 
   __ ldr(r0, MemOperand(sp, 0));
-  GenerateLoadCallback(masm(), object, holder, r0, r2, r3, r1, callback, &miss);
+  GenerateLoadCallback(object, holder, r0, r2, r3, r1, callback, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -975,7 +1033,7 @@
 
   __ ldr(r0, MemOperand(sp, 0));
 
-  GenerateLoadConstant(masm(), object, holder, r0, r3, r1, value, &miss);
+  GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -996,14 +1054,14 @@
 
   __ ldr(r0, MemOperand(sp, 0));
 
-  GenerateLoadInterceptor(masm(),
-                          object,
+  GenerateLoadInterceptor(object,
                           holder,
                           holder->InterceptorPropertyLookupHint(name),
                           r0,
                           r2,
                           r3,
                           r1,
+                          name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1013,7 +1071,8 @@
 }
 
 
-Object* LoadStubCompiler::CompileLoadGlobal(GlobalObject* object,
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                            GlobalObject* holder,
                                             JSGlobalPropertyCell* cell,
                                             String* name,
                                             bool is_dont_delete) {
@@ -1026,11 +1085,19 @@
 
   __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
 
-  // Check that the map of the global has not changed.
+  // Get the receiver from the stack.
   __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ cmp(r3, Operand(Handle<Map>(object->map())));
-  __ b(ne, &miss);
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ tst(r1, Operand(kSmiTagMask));
+    __ b(eq, &miss);
+  }
+
+  // Check that the map of the global has not changed.
+  CheckPrototypes(object, r1, holder, r3, r0, name, &miss);
 
   // Get the value from the cell.
   __ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
@@ -1073,7 +1140,7 @@
   __ cmp(r2, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadField(masm(), receiver, holder, r0, r3, r1, index, &miss);
+  GenerateLoadField(receiver, holder, r0, r3, r1, index, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -1098,8 +1165,7 @@
   __ cmp(r2, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadCallback(masm(), receiver, holder, r0, r2, r3,
-                       r1, callback, &miss);
+  GenerateLoadCallback(receiver, holder, r0, r2, r3, r1, callback, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -1125,7 +1191,7 @@
   __ cmp(r2, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadConstant(masm(), receiver, holder, r0, r3, r1, value, &miss);
+  GenerateLoadConstant(receiver, holder, r0, r3, r1, value, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
 
@@ -1151,14 +1217,14 @@
   __ cmp(r2, Operand(Handle<String>(name)));
   __ b(ne, &miss);
 
-  GenerateLoadInterceptor(masm(),
-                          receiver,
+  GenerateLoadInterceptor(receiver,
                           holder,
                           Smi::FromInt(JSObject::kLookupInHolder),
                           r0,
                           r2,
                           r3,
                           r1,
+                          name,
                           &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index c64b92a..ad5396e 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1373,43 +1373,35 @@
   if (from->HasFastProperties()) {
     Handle<DescriptorArray> descs =
         Handle<DescriptorArray>(from->map()->instance_descriptors());
-    int offset = 0;
-    while (true) {
-      // Iterating through the descriptors is not gc safe so we have to
-      // store the value in a handle and create a new stream for each entry.
-      DescriptorReader stream(*descs, offset);
-      if (stream.eos()) break;
-      // We have to read out the next offset before we do anything that may
-      // cause a gc, since the DescriptorReader is not gc safe.
-      offset = stream.next_position();
-      PropertyDetails details = stream.GetDetails();
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      PropertyDetails details = PropertyDetails(descs->GetDetails(i));
       switch (details.type()) {
         case FIELD: {
           HandleScope inner;
-          Handle<String> key = Handle<String>(stream.GetKey());
-          int index = stream.GetFieldIndex();
+          Handle<String> key = Handle<String>(descs->GetKey(i));
+          int index = descs->GetFieldIndex(i);
           Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
           SetProperty(to, key, value, details.attributes());
           break;
         }
         case CONSTANT_FUNCTION: {
           HandleScope inner;
-          Handle<String> key = Handle<String>(stream.GetKey());
+          Handle<String> key = Handle<String>(descs->GetKey(i));
           Handle<JSFunction> fun =
-              Handle<JSFunction>(stream.GetConstantFunction());
+              Handle<JSFunction>(descs->GetConstantFunction(i));
           SetProperty(to, key, fun, details.attributes());
           break;
         }
         case CALLBACKS: {
           LookupResult result;
-          to->LocalLookup(stream.GetKey(), &result);
+          to->LocalLookup(descs->GetKey(i), &result);
           // If the property is already there we skip it
           if (result.IsValid()) continue;
           HandleScope inner;
           Handle<DescriptorArray> inst_descs =
               Handle<DescriptorArray>(to->map()->instance_descriptors());
-          Handle<String> key = Handle<String>(stream.GetKey());
-          Handle<Object> entry = Handle<Object>(stream.GetCallbacksObject());
+          Handle<String> key = Handle<String>(descs->GetKey(i));
+          Handle<Object> entry = Handle<Object>(descs->GetCallbacksObject(i));
           inst_descs = Factory::CopyAppendProxyDescriptor(inst_descs,
                                                           key,
                                                           entry,
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index 3a091f9..fe130ce 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -370,7 +370,11 @@
 // whether it exited normally.  In the common case this doesn't matter because
 // we don't get here before the child has closed stdout and most programs don't
 // do that before they exit.
-#if defined(WNOWAIT) && !defined(ANDROID)
+//
+// We're disabling usage of waitid in Mac OS X because it doens't work for us:
+// a parent process hangs on waiting while a child process is already a zombie.
+// See http://code.google.com/p/v8/issues/detail?id=401.
+#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
 #define HAS_WAITID 1
 #endif
 
diff --git a/src/debug.cc b/src/debug.cc
index e37bfb7..52be930 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1260,6 +1260,7 @@
 
 // Handle stepping into a function.
 void Debug::HandleStepIn(Handle<JSFunction> function,
+                         Handle<Object> holder,
                          Address fp,
                          bool is_constructor) {
   // If the frame pointer is not supplied by the caller find it.
@@ -1285,21 +1286,12 @@
           Builtins::builtin(Builtins::FunctionCall)) {
         // Handle function.apply and function.call separately to flood the
         // function to be called and not the code for Builtins::FunctionApply or
-        // Builtins::FunctionCall. At the point of the call IC to call either
-        // Builtins::FunctionApply or Builtins::FunctionCall the expression
-        // stack has the following content:
-        //   symbol "apply" or "call"
-        //   function apply or call was called on
-        //   receiver for apply or call (first parameter to apply or call)
-        //   ... further arguments to apply or call.
-        JavaScriptFrameIterator it;
-        ASSERT(it.frame()->fp() == fp);
-        ASSERT(it.frame()->GetExpression(1)->IsJSFunction());
-        if (it.frame()->GetExpression(1)->IsJSFunction()) {
-          Handle<JSFunction>
-              actual_function(JSFunction::cast(it.frame()->GetExpression(1)));
-          Handle<SharedFunctionInfo> actual_shared(actual_function->shared());
-          Debug::FloodWithOneShot(actual_shared);
+        // Builtins::FunctionCall. The receiver of call/apply is the target
+        // function.
+        if (!holder.is_null() && holder->IsJSFunction()) {
+          Handle<SharedFunctionInfo> shared_info(
+              JSFunction::cast(*holder)->shared());
+          Debug::FloodWithOneShot(shared_info);
         }
       } else {
         Debug::FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
diff --git a/src/debug.h b/src/debug.h
index a1abced..970dbbe 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -270,6 +270,7 @@
 
   static bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
   static void HandleStepIn(Handle<JSFunction> function,
+                           Handle<Object> holder,
                            Address fp,
                            bool is_constructor);
   static Address step_in_fp() { return thread_local_.step_into_fp_; }
@@ -363,6 +364,10 @@
   static const int kIa32CallInstructionLength = 5;
   static const int kIa32JSReturnSequenceLength = 6;
 
+  // The x64 JS return sequence is padded with int3 to make it large
+  // enough to hold a call instruction when the debugger patches it.
+  static const int kX64JSReturnSequenceLength = 13;
+
   // Code generator routines.
   static void GenerateLoadICDebugBreak(MacroAssembler* masm);
   static void GenerateStoreICDebugBreak(MacroAssembler* masm);
diff --git a/src/factory.cc b/src/factory.cc
index 572180d..1045a4c 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -570,13 +570,9 @@
   int descriptor_count = 0;
 
   // Copy the descriptors from the array.
-  {
-    DescriptorWriter w(*result);
-    for (DescriptorReader r(*array); !r.eos(); r.advance()) {
-      if (!r.IsNullDescriptor()) {
-        w.WriteFrom(&r);
-      }
-      descriptor_count++;
+  for (int i = 0; i < array->number_of_descriptors(); i++) {
+    if (array->GetType(i) != NULL_DESCRIPTOR) {
+      result->CopyFrom(descriptor_count++, *array, i);
     }
   }
 
@@ -596,9 +592,6 @@
     if (result->LinearSearch(*key, descriptor_count) ==
         DescriptorArray::kNotFound) {
       CallbacksDescriptor desc(*key, *entry, entry->property_attributes());
-      // We do not use a DescriptorWriter because SymbolFromString can
-      // allocate. A DescriptorWriter holds a raw pointer and is
-      // therefore not GC safe.
       result->Set(descriptor_count, &desc);
       descriptor_count++;
     } else {
@@ -609,13 +602,11 @@
   // If duplicates were detected, allocate a result of the right size
   // and transfer the elements.
   if (duplicates > 0) {
+    int number_of_descriptors = result->number_of_descriptors() - duplicates;
     Handle<DescriptorArray> new_result =
-        NewDescriptorArray(result->number_of_descriptors() - duplicates);
-    DescriptorWriter w(*new_result);
-    DescriptorReader r(*result);
-    while (!w.eos()) {
-      w.WriteFrom(&r);
-      r.advance();
+        NewDescriptorArray(number_of_descriptors);
+    for (int i = 0; i < number_of_descriptors; i++) {
+      new_result->CopyFrom(i, *result, i);
     }
     result = new_result;
   }
diff --git a/src/globals.h b/src/globals.h
index 8088331..44bd527 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -257,14 +257,16 @@
 // NOTE: SpaceIterator depends on AllocationSpace enumeration values being
 // consecutive.
 enum AllocationSpace {
-  NEW_SPACE,          // Semispaces collected with copying collector.
-  OLD_POINTER_SPACE,  // Must be first of the paged spaces - see PagedSpaces.
-  OLD_DATA_SPACE,     // May not have pointers to new space.
-  CODE_SPACE,         // Also one of the old spaces.  Marked executable.
-  MAP_SPACE,          // Only map objects.
-  LO_SPACE,           // Large objects.
+  NEW_SPACE,            // Semispaces collected with copying collector.
+  OLD_POINTER_SPACE,    // May contain pointers to new space.
+  OLD_DATA_SPACE,       // Must not have pointers to new space.
+  CODE_SPACE,           // No pointers to new space, marked executable.
+  MAP_SPACE,            // Only and all map objects.
+  CELL_SPACE,           // Only and all cell objects.
+  LO_SPACE,             // Promoted large objects.
+
   FIRST_SPACE = NEW_SPACE,
-  LAST_SPACE = LO_SPACE  // <= 5 (see kSpaceBits and kLOSpacePointer)
+  LAST_SPACE = LO_SPACE
 };
 const int kSpaceTagSize = 3;
 const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
diff --git a/src/handles.cc b/src/handles.cc
index afed6e9..510ea95 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -289,10 +289,11 @@
     // hidden symbols hash code is zero (and no other string has hash
     // code zero) it will always occupy the first entry if present.
     DescriptorArray* descriptors = obj->map()->instance_descriptors();
-    DescriptorReader r(descriptors, 0);  // Explicitly position reader at zero.
-    if (!r.eos() && (r.GetKey() == *key) && r.IsProperty()) {
-      ASSERT(r.type() == FIELD);
-      return Handle<Object>(obj->FastPropertyAt(r.GetFieldIndex()));
+    if ((descriptors->number_of_descriptors() > 0) &&
+        (descriptors->GetKey(0) == *key) &&
+        descriptors->IsProperty(0)) {
+      ASSERT(descriptors->GetType(0) == FIELD);
+      return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0)));
     }
   }
 
@@ -588,12 +589,13 @@
     int num_enum = object->NumberOfEnumProperties();
     Handle<FixedArray> storage = Factory::NewFixedArray(num_enum);
     Handle<FixedArray> sort_array = Factory::NewFixedArray(num_enum);
-    for (DescriptorReader r(object->map()->instance_descriptors());
-         !r.eos();
-         r.advance()) {
-      if (r.IsProperty() && !r.IsDontEnum()) {
-        (*storage)->set(index, r.GetKey());
-        (*sort_array)->set(index, Smi::FromInt(r.GetDetails().index()));
+    Handle<DescriptorArray> descs =
+        Handle<DescriptorArray>(object->map()->instance_descriptors());
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      if (descs->IsProperty(i) && !descs->IsDontEnum(i)) {
+        (*storage)->set(index, descs->GetKey(i));
+        PropertyDetails details(descs->GetDetails(i));
+        (*sort_array)->set(index, Smi::FromInt(details.index()));
         index++;
       }
     }
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 3b6efed..36c6f4b 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -82,6 +82,8 @@
     result = code_space_->AllocateRaw(size_in_bytes);
   } else if (LO_SPACE == space) {
     result = lo_space_->AllocateRaw(size_in_bytes);
+  } else if (CELL_SPACE == space) {
+    result = cell_space_->AllocateRaw(size_in_bytes);
   } else {
     ASSERT(MAP_SPACE == space);
     result = map_space_->AllocateRaw(size_in_bytes);
@@ -107,12 +109,23 @@
 }
 
 
-Object* Heap::AllocateRawMap(int size_in_bytes) {
+Object* Heap::AllocateRawMap() {
 #ifdef DEBUG
   Counters::objs_since_last_full.Increment();
   Counters::objs_since_last_young.Increment();
 #endif
-  Object* result = map_space_->AllocateRaw(size_in_bytes);
+  Object* result = map_space_->AllocateRaw(Map::kSize);
+  if (result->IsFailure()) old_gen_exhausted_ = true;
+  return result;
+}
+
+
+Object* Heap::AllocateRawCell() {
+#ifdef DEBUG
+  Counters::objs_since_last_full.Increment();
+  Counters::objs_since_last_young.Increment();
+#endif
+  Object* result = cell_space_->AllocateRaw(JSGlobalPropertyCell::kSize);
   if (result->IsFailure()) old_gen_exhausted_ = true;
   return result;
 }
diff --git a/src/heap.cc b/src/heap.cc
index 3706159..0af3d90 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -53,6 +53,7 @@
 OldSpace* Heap::old_data_space_ = NULL;
 OldSpace* Heap::code_space_ = NULL;
 MapSpace* Heap::map_space_ = NULL;
+CellSpace* Heap::cell_space_ = NULL;
 LargeObjectSpace* Heap::lo_space_ = NULL;
 
 static const int kMinimumPromotionLimit = 2*MB;
@@ -110,7 +111,8 @@
       old_pointer_space_->Capacity() +
       old_data_space_->Capacity() +
       code_space_->Capacity() +
-      map_space_->Capacity();
+      map_space_->Capacity() +
+      cell_space_->Capacity();
 }
 
 
@@ -121,7 +123,8 @@
       old_pointer_space_->Available() +
       old_data_space_->Available() +
       code_space_->Available() +
-      map_space_->Available();
+      map_space_->Available() +
+      cell_space_->Available();
 }
 
 
@@ -130,6 +133,7 @@
          old_data_space_ != NULL &&
          code_space_ != NULL &&
          map_space_ != NULL &&
+         cell_space_ != NULL &&
          lo_space_ != NULL;
 }
 
@@ -359,6 +363,8 @@
       return code_space_->Available() >= requested_size;
     case MAP_SPACE:
       return map_space_->Available() >= requested_size;
+    case CELL_SPACE:
+      return cell_space_->Available() >= requested_size;
     case LO_SPACE:
       return lo_space_->Available() >= requested_size;
   }
@@ -595,6 +601,7 @@
 }
 #endif
 
+
 void Heap::Scavenge() {
 #ifdef DEBUG
   if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
@@ -653,7 +660,7 @@
   // Copy objects reachable from weak pointers.
   GlobalHandles::IterateWeakRoots(&scavenge_visitor);
 
-#if V8_HOST_ARCH_64_BIT
+#ifdef V8_HOST_ARCH_64_BIT
   // TODO(X64): Make this go away again. We currently disable RSets for
   // 64-bit-mode.
   HeapObjectIterator old_pointer_iterator(old_pointer_space_);
@@ -673,13 +680,25 @@
       heap_object->Iterate(&scavenge_visitor);
     }
   }
-#else  // V8_HOST_ARCH_64_BIT
+#else  // !defined(V8_HOST_ARCH_64_BIT)
   // Copy objects reachable from the old generation.  By definition,
   // there are no intergenerational pointers in code or data spaces.
   IterateRSet(old_pointer_space_, &ScavengePointer);
   IterateRSet(map_space_, &ScavengePointer);
   lo_space_->IterateRSet(&ScavengePointer);
-#endif   // V8_HOST_ARCH_64_BIT
+#endif
+
+  // Copy objects reachable from cells by scavenging cell values directly.
+  HeapObjectIterator cell_iterator(cell_space_);
+  while (cell_iterator.has_next()) {
+    HeapObject* cell = cell_iterator.next();
+    if (cell->IsJSGlobalPropertyCell()) {
+      Address value_address =
+          reinterpret_cast<Address>(cell) +
+          (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
+      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+    }
+  }
 
   do {
     ASSERT(new_space_front <= new_space_.top());
@@ -819,8 +838,8 @@
 
 
 void Heap::RebuildRSets() {
-  // By definition, we do not care about remembered set bits in code or data
-  // spaces.
+  // By definition, we do not care about remembered set bits in code,
+  // data, or cell spaces.
   map_space_->ClearRSet();
   RebuildRSets(map_space_);
 
@@ -995,7 +1014,7 @@
 
 Object* Heap::AllocatePartialMap(InstanceType instance_type,
                                  int instance_size) {
-  Object* result = AllocateRawMap(Map::kSize);
+  Object* result = AllocateRawMap();
   if (result->IsFailure()) return result;
 
   // Map::cast cannot be used due to uninitialized map field.
@@ -1009,7 +1028,7 @@
 
 
 Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
-  Object* result = AllocateRawMap(Map::kSize);
+  Object* result = AllocateRawMap();
   if (result->IsFailure()) return result;
 
   Map* map = reinterpret_cast<Map*>(result);
@@ -1055,7 +1074,6 @@
 bool Heap::CreateInitialMaps() {
   Object* obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
   if (obj->IsFailure()) return false;
-
   // Map::cast cannot be used due to uninitialized map field.
   Map* new_meta_map = reinterpret_cast<Map*>(obj);
   set_meta_map(new_meta_map);
@@ -1069,11 +1087,6 @@
   if (obj->IsFailure()) return false;
   set_oddball_map(Map::cast(obj));
 
-  obj = AllocatePartialMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
-                           JSGlobalPropertyCell::kSize);
-  if (obj->IsFailure()) return false;
-  set_global_property_cell_map(Map::cast(obj));
-
   // Allocate the empty array
   obj = AllocateEmptyFixedArray();
   if (obj->IsFailure()) return false;
@@ -1083,11 +1096,10 @@
   if (obj->IsFailure()) return false;
   set_null_value(obj);
 
-  // Allocate the empty descriptor array.  AllocateMap can now be used.
+  // Allocate the empty descriptor array.
   obj = AllocateEmptyFixedArray();
   if (obj->IsFailure()) return false;
-  // There is a check against empty_descriptor_array() in cast().
-  set_empty_descriptor_array(reinterpret_cast<DescriptorArray*>(obj));
+  set_empty_descriptor_array(DescriptorArray::cast(obj));
 
   // Fix the instance_descriptors for the existing maps.
   meta_map()->set_instance_descriptors(empty_descriptor_array());
@@ -1099,22 +1111,16 @@
   oddball_map()->set_instance_descriptors(empty_descriptor_array());
   oddball_map()->set_code_cache(empty_fixed_array());
 
-  global_property_cell_map()->set_instance_descriptors(
-      empty_descriptor_array());
-  global_property_cell_map()->set_code_cache(empty_fixed_array());
-
   // Fix prototype object for existing maps.
   meta_map()->set_prototype(null_value());
   meta_map()->set_constructor(null_value());
 
   fixed_array_map()->set_prototype(null_value());
   fixed_array_map()->set_constructor(null_value());
+
   oddball_map()->set_prototype(null_value());
   oddball_map()->set_constructor(null_value());
 
-  global_property_cell_map()->set_prototype(null_value());
-  global_property_cell_map()->set_constructor(null_value());
-
   obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
   if (obj->IsFailure()) return false;
   set_heap_number_map(Map::cast(obj));
@@ -1168,13 +1174,18 @@
   if (obj->IsFailure()) return false;
   set_code_map(Map::cast(obj));
 
+  obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
+                    JSGlobalPropertyCell::kSize);
+  if (obj->IsFailure()) return false;
+  set_global_property_cell_map(Map::cast(obj));
+
   obj = AllocateMap(FILLER_TYPE, kPointerSize);
   if (obj->IsFailure()) return false;
-  set_one_word_filler_map(Map::cast(obj));
+  set_one_pointer_filler_map(Map::cast(obj));
 
   obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
   if (obj->IsFailure()) return false;
-  set_two_word_filler_map(Map::cast(obj));
+  set_two_pointer_filler_map(Map::cast(obj));
 
   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
     const StructTable& entry = struct_table[i];
@@ -1242,9 +1253,7 @@
 
 
 Object* Heap::AllocateJSGlobalPropertyCell(Object* value) {
-  Object* result = AllocateRaw(JSGlobalPropertyCell::kSize,
-                               OLD_POINTER_SPACE,
-                               OLD_POINTER_SPACE);
+  Object* result = AllocateRawCell();
   if (result->IsFailure()) return result;
   HeapObject::cast(result)->set_map(global_property_cell_map());
   JSGlobalPropertyCell::cast(result)->set_value(value);
@@ -1821,7 +1830,7 @@
   if (size == 0) return;
   HeapObject* filler = HeapObject::FromAddress(addr);
   if (size == kPointerSize) {
-    filler->set_map(Heap::one_word_filler_map());
+    filler->set_map(Heap::one_pointer_filler_map());
   } else {
     filler->set_map(Heap::byte_array_map());
     ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
@@ -2697,6 +2706,8 @@
   code_space_->ReportStatistics();
   PrintF("Map space : ");
   map_space_->ReportStatistics();
+  PrintF("Cell space : ");
+  cell_space_->ReportStatistics();
   PrintF("Large object space : ");
   lo_space_->ReportStatistics();
   PrintF(">>>>>> ========================================= >>>>>>\n");
@@ -2717,6 +2728,7 @@
      old_data_space_->Contains(addr) ||
      code_space_->Contains(addr) ||
      map_space_->Contains(addr) ||
+     cell_space_->Contains(addr) ||
      lo_space_->SlowContains(addr));
 }
 
@@ -2741,6 +2753,8 @@
       return code_space_->Contains(addr);
     case MAP_SPACE:
       return map_space_->Contains(addr);
+    case CELL_SPACE:
+      return cell_space_->Contains(addr);
     case LO_SPACE:
       return lo_space_->SlowContains(addr);
   }
@@ -2754,12 +2768,20 @@
   ASSERT(HasBeenSetup());
 
   VerifyPointersVisitor visitor;
-  Heap::IterateRoots(&visitor);
+  IterateRoots(&visitor);
 
-  AllSpaces spaces;
-  while (Space* space = spaces.next()) {
-    space->Verify();
-  }
+  new_space_.Verify();
+
+  VerifyPointersAndRSetVisitor rset_visitor;
+  old_pointer_space_->Verify(&rset_visitor);
+  map_space_->Verify(&rset_visitor);
+
+  VerifyPointersVisitor no_rset_visitor;
+  old_data_space_->Verify(&no_rset_visitor);
+  code_space_->Verify(&no_rset_visitor);
+  cell_space_->Verify(&no_rset_visitor);
+
+  lo_space_->Verify();
 }
 #endif  // DEBUG
 
@@ -2964,6 +2986,7 @@
       + old_data_space_->Size()
       + code_space_->Size()
       + map_space_->Size()
+      + cell_space_->Size()
       + lo_space_->Size();
 }
 
@@ -3041,6 +3064,13 @@
   // enough to hold at least a page will cause it to allocate.
   if (!map_space_->Setup(NULL, 0)) return false;
 
+  // Initialize global property cell space.
+  cell_space_ = new CellSpace(old_generation_size_, CELL_SPACE);
+  if (cell_space_ == NULL) return false;
+  // Setting up a paged space without giving it a virtual memory range big
+  // enough to hold at least a page will cause it to allocate.
+  if (!cell_space_->Setup(NULL, 0)) return false;
+
   // The large object code space may contain code or data.  We set the memory
   // to be non-executable here for safety, but this means we need to enable it
   // explicitly when allocating large code objects.
@@ -3093,6 +3123,12 @@
     map_space_ = NULL;
   }
 
+  if (cell_space_ != NULL) {
+    cell_space_->TearDown();
+    delete cell_space_;
+    cell_space_ = NULL;
+  }
+
   if (lo_space_ != NULL) {
     lo_space_->TearDown();
     delete lo_space_;
@@ -3104,11 +3140,9 @@
 
 
 void Heap::Shrink() {
-  // Try to shrink map, old, and code spaces.
-  map_space_->Shrink();
-  old_pointer_space_->Shrink();
-  old_data_space_->Shrink();
-  code_space_->Shrink();
+  // Try to shrink all paged spaces.
+  PagedSpaces spaces;
+  while (PagedSpace* space = spaces.next()) space->Shrink();
 }
 
 
@@ -3116,24 +3150,16 @@
 
 void Heap::Protect() {
   if (HasBeenSetup()) {
-    new_space_.Protect();
-    map_space_->Protect();
-    old_pointer_space_->Protect();
-    old_data_space_->Protect();
-    code_space_->Protect();
-    lo_space_->Protect();
+    AllSpaces spaces;
+    while (Space* space = spaces.next()) space->Protect();
   }
 }
 
 
 void Heap::Unprotect() {
   if (HasBeenSetup()) {
-    new_space_.Unprotect();
-    map_space_->Unprotect();
-    old_pointer_space_->Unprotect();
-    old_data_space_->Unprotect();
-    code_space_->Unprotect();
-    lo_space_->Unprotect();
+    AllSpaces spaces;
+    while (Space* space = spaces.next()) space->Unprotect();
   }
 }
 
@@ -3171,6 +3197,8 @@
       return Heap::code_space();
     case MAP_SPACE:
       return Heap::map_space();
+    case CELL_SPACE:
+      return Heap::cell_space();
     case LO_SPACE:
       return Heap::lo_space();
     default:
@@ -3189,6 +3217,8 @@
       return Heap::code_space();
     case MAP_SPACE:
       return Heap::map_space();
+    case CELL_SPACE:
+      return Heap::cell_space();
     default:
       return NULL;
   }
@@ -3262,6 +3292,9 @@
     case MAP_SPACE:
       iterator_ = new HeapObjectIterator(Heap::map_space());
       break;
+    case CELL_SPACE:
+      iterator_ = new HeapObjectIterator(Heap::cell_space());
+      break;
     case LO_SPACE:
       iterator_ = new LargeObjectIterator(Heap::lo_space());
       break;
diff --git a/src/heap.h b/src/heap.h
index c88b5e6..f395988 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -105,8 +105,8 @@
   V(Map, boilerplate_function_map, BoilerplateFunctionMap)                     \
   V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, proxy_map, ProxyMap)                                                  \
-  V(Map, one_word_filler_map, OneWordFillerMap)                                \
-  V(Map, two_word_filler_map, TwoWordFillerMap)                                \
+  V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
+  V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
   V(Object, nan_value, NanValue)                                               \
   V(Object, undefined_value, UndefinedValue)                                   \
   V(Object, minus_zero_value, MinusZeroValue)                                  \
@@ -263,6 +263,7 @@
   static OldSpace* old_data_space() { return old_data_space_; }
   static OldSpace* code_space() { return code_space_; }
   static MapSpace* map_space() { return map_space_; }
+  static CellSpace* cell_space() { return cell_space_; }
   static LargeObjectSpace* lo_space() { return lo_space_; }
 
   static bool always_allocate() { return always_allocate_scope_depth_ != 0; }
@@ -852,6 +853,7 @@
   static OldSpace* old_data_space_;
   static OldSpace* code_space_;
   static MapSpace* map_space_;
+  static CellSpace* cell_space_;
   static LargeObjectSpace* lo_space_;
   static HeapState gc_state_;
 
@@ -975,7 +977,10 @@
   // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
   // have to test the allocation space argument and (b) can reduce code size
   // (since both AllocateRaw and AllocateRawMap are inlined).
-  static inline Object* AllocateRawMap(int size_in_bytes);
+  static inline Object* AllocateRawMap();
+
+  // Allocate an uninitialized object in the global property cell space.
+  static inline Object* AllocateRawCell();
 
   // Initializes a JSObject based on its map.
   static void InitializeJSObjectFromMap(JSObject* obj,
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 3b2eaa0..6d1dc2d 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -7591,6 +7591,16 @@
     __ dec(Operand::StaticVariable(scope_depth));
   }
 
+  // Make sure we're not trying to return 'the hole' from the runtime
+  // call as this may lead to crashes in the IC code later.
+  if (FLAG_debug_code) {
+    Label okay;
+    __ cmp(eax, Factory::the_hole_value());
+    __ j(not_equal, &okay);
+    __ int3();
+    __ bind(&okay);
+  }
+
   // Check for failure result.
   Label failure_returned;
   ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 97de4da..90e0fd1 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -75,6 +75,12 @@
   __ cmp(r0, JS_GLOBAL_PROXY_TYPE);
   __ j(equal, miss_label, not_taken);
 
+  // Possible work-around for http://crbug.com/16276.
+  __ cmp(r0, JS_GLOBAL_OBJECT_TYPE);
+  __ j(equal, miss_label, not_taken);
+  __ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
+  __ j(equal, miss_label, not_taken);
+
   // Check that the properties array is a dictionary.
   __ mov(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
   __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 2ee826e..0a887d5 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -273,114 +273,6 @@
 }
 
 
-void StubCompiler::GenerateLoadField(MacroAssembler* masm,
-                                     JSObject* object,
-                                     JSObject* holder,
-                                     Register receiver,
-                                     Register scratch1,
-                                     Register scratch2,
-                                     int index,
-                                     Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss_label, not_taken);
-
-  // Check that the maps haven't changed.
-  Register reg =
-      masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
-
-  // Get the value from the properties.
-  GenerateFastPropertyLoad(masm, eax, reg, holder, index);
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
-                                        JSObject* object,
-                                        JSObject* holder,
-                                        Register receiver,
-                                        Register name,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        AccessorInfo* callback,
-                                        Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss_label, not_taken);
-
-  // Check that the maps haven't changed.
-  Register reg =
-      masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
-
-  // Push the arguments on the JS stack of the caller.
-  __ pop(scratch2);  // remove return address
-  __ push(receiver);  // receiver
-  __ push(Immediate(Handle<AccessorInfo>(callback)));  // callback data
-  __ push(name);  // name
-  __ push(reg);  // holder
-  __ push(scratch2);  // restore return address
-
-  // Do tail-call to the runtime system.
-  ExternalReference load_callback_property =
-      ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
-  __ TailCallRuntime(load_callback_property, 4);
-}
-
-
-void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
-                                        JSObject* object,
-                                        JSObject* holder,
-                                        Register receiver,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Object* value,
-                                        Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss_label, not_taken);
-
-  // Check that the maps haven't changed.
-  Register reg =
-      masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
-
-  // Return the constant value.
-  __ mov(eax, Handle<Object>(value));
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
-                                           JSObject* object,
-                                           JSObject* holder,
-                                           Smi* lookup_hint,
-                                           Register receiver,
-                                           Register name,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ test(receiver, Immediate(kSmiTagMask));
-  __ j(zero, miss_label, not_taken);
-
-  // Check that the maps haven't changed.
-  Register reg =
-      masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
-
-  // Push the arguments on the JS stack of the caller.
-  __ pop(scratch2);  // remove return address
-  __ push(receiver);  // receiver
-  __ push(reg);  // holder
-  __ push(name);  // name
-  // TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or
-  // LOOKUP_IN_PROTOTYPE, but use a special version of lookup method?
-  __ push(Immediate(lookup_hint));
-  __ push(scratch2);  // restore return address
-
-  // Do tail-call to the runtime system.
-  ExternalReference load_ic_property =
-      ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
-  __ TailCallRuntime(load_ic_property, 4);
-}
 
 
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
@@ -474,10 +366,159 @@
 
 
 #undef __
-
 #define __ ACCESS_MASM(masm())
 
 
+Register StubCompiler::CheckPrototypes(JSObject* object,
+                                       Register object_reg,
+                                       JSObject* holder,
+                                       Register holder_reg,
+                                       Register scratch,
+                                       String* name,
+                                       Label* miss) {
+  // Check that the maps haven't changed.
+  Register result =
+      masm()->CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.
+  while (object != holder) {
+    if (object->IsGlobalObject()) {
+      GlobalObject* global = GlobalObject::cast(object);
+      Object* probe = global->EnsurePropertyCell(name);
+      if (probe->IsFailure()) {
+        set_failure(Failure::cast(probe));
+        return result;
+      }
+      JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+      ASSERT(cell->value()->IsTheHole());
+      __ mov(scratch, Immediate(Handle<Object>(cell)));
+      __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+             Immediate(Factory::the_hole_value()));
+      __ j(not_equal, miss, not_taken);
+    }
+    object = JSObject::cast(object->GetPrototype());
+  }
+
+  // Return the register containin the holder.
+  return result;
+}
+
+
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
+                                     Register receiver,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     int index,
+                                     String* name,
+                                     Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check the prototype chain.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Get the value from the properties.
+  GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
+  __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadCallback(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register name_reg,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        AccessorInfo* callback,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Push the arguments on the JS stack of the caller.
+  __ pop(scratch2);  // remove return address
+  __ push(receiver);  // receiver
+  __ push(Immediate(Handle<AccessorInfo>(callback)));  // callback data
+  __ push(name_reg);  // name
+  __ push(reg);  // holder
+  __ push(scratch2);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference load_callback_property =
+      ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+  __ TailCallRuntime(load_callback_property, 4);
+}
+
+
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Object* value,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Return the constant value.
+  __ mov(eax, Handle<Object>(value));
+  __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+                                           JSObject* holder,
+                                           Smi* lookup_hint,
+                                           Register receiver,
+                                           Register name_reg,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           String* name,
+                                           Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Push the arguments on the JS stack of the caller.
+  __ pop(scratch2);  // remove return address
+  __ push(receiver);  // receiver
+  __ push(reg);  // holder
+  __ push(name_reg);  // name
+  // TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or
+  // LOOKUP_IN_PROTOTYPE, but use a special version of lookup method?
+  __ push(Immediate(lookup_hint));
+  __ push(scratch2);  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference load_ic_property =
+      ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
+  __ TailCallRuntime(load_ic_property, 4);
+}
+
+
 // TODO(1241006): Avoid having lazy compile stubs specialized by the
 // number of arguments. It is not needed anymore.
 Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
@@ -520,7 +561,8 @@
 
   // Do the right check and compute the holder register.
   Register reg =
-      masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, ecx, name, &miss);
 
   GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
 
@@ -553,6 +595,7 @@
 Object* CallStubCompiler::CompileCallConstant(Object* object,
                                               JSObject* holder,
                                               JSFunction* function,
+                                              String* name,
                                               CheckType check) {
   // ----------- S t a t e -------------
   // -----------------------------------
@@ -575,7 +618,8 @@
   switch (check) {
     case RECEIVER_MAP_CHECK:
       // Check that the maps haven't changed.
-      __ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, ecx, name, &miss);
 
       // Patch the receiver on the stack with the global proxy if
       // necessary.
@@ -595,8 +639,8 @@
       GenerateLoadGlobalFunctionPrototype(masm(),
                                           Context::STRING_FUNCTION_INDEX,
                                           ecx);
-      __ CheckMaps(JSObject::cast(object->GetPrototype()),
-                   ecx, holder, ebx, edx, &miss);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
+                      ebx, edx, name, &miss);
       break;
 
     case NUMBER_CHECK: {
@@ -611,8 +655,8 @@
       GenerateLoadGlobalFunctionPrototype(masm(),
                                           Context::NUMBER_FUNCTION_INDEX,
                                           ecx);
-      __ CheckMaps(JSObject::cast(object->GetPrototype()),
-                   ecx, holder, ebx, edx, &miss);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
+                      ebx, edx, name, &miss);
       break;
     }
 
@@ -628,13 +672,14 @@
       GenerateLoadGlobalFunctionPrototype(masm(),
                                           Context::BOOLEAN_FUNCTION_INDEX,
                                           ecx);
-      __ CheckMaps(JSObject::cast(object->GetPrototype()),
-                   ecx, holder, ebx, edx, &miss);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), ecx, holder,
+                      ebx, edx, name, &miss);
       break;
     }
 
     case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
-      __ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, ecx, name, &miss);
       // Make sure object->elements()->map() != Heap::dictionary_array_map()
       // Get the elements array of the object.
       __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
@@ -692,7 +737,8 @@
 
   // Check that maps have not changed and compute the holder register.
   Register reg =
-      masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
+      CheckPrototypes(JSObject::cast(object), edx, holder,
+                      ebx, ecx, name, &miss);
 
   // Enter an internal frame.
   __ EnterInternalFrame();
@@ -745,7 +791,8 @@
 }
 
 
-Object* CallStubCompiler::CompileCallGlobal(GlobalObject* object,
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                            GlobalObject* holder,
                                             JSGlobalPropertyCell* cell,
                                             JSFunction* function,
                                             String* name) {
@@ -758,11 +805,19 @@
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
-  // Check that the map of the global has not changed.
+  // Get the receiver from the stack.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
-  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(object->map())));
-  __ j(not_equal, &miss, not_taken);
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ test(edx, Immediate(kSmiTagMask));
+    __ j(zero, &miss, not_taken);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, edx, holder, ebx, ecx, name, &miss);
 
   // Get the value from the cell.
   __ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
@@ -773,8 +828,10 @@
   __ j(not_equal, &miss, not_taken);
 
   // Patch the receiver on the stack with the global proxy.
-  __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
-  __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+  if (object->IsGlobalObject()) {
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+  }
 
   // Setup the context (function already in edi).
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -964,11 +1021,6 @@
   __ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
   __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
 
-  // RecordWrite clobbers the value register. Pass the value being stored in
-  // edx.
-  __ mov(edx, eax);
-  __ RecordWrite(ecx, JSGlobalPropertyCell::kValueOffset, edx, ebx);
-
   // Return the value (register eax).
   __ ret(0);
 
@@ -1027,6 +1079,7 @@
 }
 
 
+
 Object* LoadStubCompiler::CompileLoadField(JSObject* object,
                                            JSObject* holder,
                                            int index,
@@ -1039,7 +1092,7 @@
   Label miss;
 
   __ mov(eax, (Operand(esp, kPointerSize)));
-  GenerateLoadField(masm(), object, holder, eax, ebx, edx, index, &miss);
+  GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -1060,8 +1113,8 @@
   Label miss;
 
   __ mov(eax, (Operand(esp, kPointerSize)));
-  GenerateLoadCallback(masm(), object, holder, eax, ecx, ebx,
-                       edx, callback, &miss);
+  GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
+                       callback, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -1082,7 +1135,7 @@
   Label miss;
 
   __ mov(eax, (Operand(esp, kPointerSize)));
-  GenerateLoadConstant(masm(), object, holder, eax, ebx, edx, value, &miss);
+  GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
 
@@ -1104,14 +1157,14 @@
   __ mov(eax, (Operand(esp, kPointerSize)));
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
-  GenerateLoadInterceptor(masm(),
-                          receiver,
+  GenerateLoadInterceptor(receiver,
                           holder,
                           holder->InterceptorPropertyLookupHint(name),
                           eax,
                           ecx,
                           edx,
                           ebx,
+                          name,
                           &miss);
 
   __ bind(&miss);
@@ -1122,7 +1175,8 @@
 }
 
 
-Object* LoadStubCompiler::CompileLoadGlobal(GlobalObject* object,
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                            GlobalObject* holder,
                                             JSGlobalPropertyCell* cell,
                                             String* name,
                                             bool is_dont_delete) {
@@ -1135,11 +1189,19 @@
 
   __ IncrementCounter(&Counters::named_load_global_inline, 1);
 
-  // Check that the map of the global has not changed.
+  // Get the receiver from the stack.
   __ mov(eax, (Operand(esp, kPointerSize)));
-  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
-         Immediate(Handle<Map>(object->map())));
-  __ j(not_equal, &miss, not_taken);
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual loads. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ test(eax, Immediate(kSmiTagMask));
+    __ j(zero, &miss, not_taken);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, eax, holder, ebx, edx, name, &miss);
 
   // Get the value from the cell.
   __ mov(eax, Immediate(Handle<JSGlobalPropertyCell>(cell)));
@@ -1185,7 +1247,8 @@
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadField(masm(), receiver, holder, ecx, ebx, edx, index, &miss);
+  GenerateLoadField(receiver, holder, ecx, ebx, edx, index, name, &miss);
+
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_field, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1214,8 +1277,8 @@
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadCallback(masm(), receiver, holder, ecx, eax, ebx, edx,
-                       callback, &miss);
+  GenerateLoadCallback(receiver, holder, ecx, eax, ebx, edx,
+                       callback, name, &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_callback, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1244,7 +1307,8 @@
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadConstant(masm(), receiver, holder, ecx, ebx, edx, value, &miss);
+  GenerateLoadConstant(receiver, holder, ecx, ebx, edx,
+                       value, name, &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
   GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
@@ -1272,14 +1336,14 @@
   __ cmp(Operand(eax), Immediate(Handle<String>(name)));
   __ j(not_equal, &miss, not_taken);
 
-  GenerateLoadInterceptor(masm(),
-                          receiver,
+  GenerateLoadInterceptor(receiver,
                           holder,
                           Smi::FromInt(JSObject::kLookupInHolder),
                           ecx,
                           eax,
                           edx,
                           ebx,
+                          name,
                           &miss);
   __ bind(&miss);
   __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
diff --git a/src/ic.cc b/src/ic.cc
index dfdf722..7e82295 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -327,13 +327,11 @@
     return TypeError("non_object_property_call", object, name);
   }
 
-  Object* result = Heap::the_hole_value();
-
   // Check if the name is trivially convertible to an index and get
   // the element if so.
   uint32_t index;
   if (name->AsArrayIndex(&index)) {
-    result = object->GetElement(index);
+    Object* result = object->GetElement(index);
     if (result->IsJSFunction()) return result;
 
     // Try to find a suitable function delegate for the object at hand.
@@ -363,7 +361,7 @@
 
   // Get the property.
   PropertyAttributes attr;
-  result = object->GetProperty(*object, &lookup, *name, &attr);
+  Object* result = object->GetProperty(*object, &lookup, *name, &attr);
   if (result->IsFailure()) return result;
   if (lookup.type() == INTERCEPTOR) {
     // If the object does not have the requested property, check which
@@ -397,7 +395,7 @@
       // cause GC.
       HandleScope scope;
       Handle<JSFunction> function(JSFunction::cast(result));
-      Debug::HandleStepIn(function, fp(), false);
+      Debug::HandleStepIn(function, object, fp(), false);
       return *function;
     }
 #endif
@@ -452,24 +450,26 @@
       }
       case NORMAL: {
         if (!object->IsJSObject()) return;
-        if (object->IsGlobalObject()) {
-          // The stub generated for the global object picks the value directly
-          // from the property cell. So the property must be directly on the
-          // global object.
-          Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
-          if (lookup->holder() != *global) return;
+        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+        if (lookup->holder()->IsGlobalObject()) {
+          GlobalObject* global = GlobalObject::cast(lookup->holder());
           JSGlobalPropertyCell* cell =
               JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
           if (!cell->value()->IsJSFunction()) return;
           JSFunction* function = JSFunction::cast(cell->value());
-          code = StubCache::ComputeCallGlobal(argc, in_loop, *name, *global,
-                                              cell, function);
+          code = StubCache::ComputeCallGlobal(argc,
+                                              in_loop,
+                                              *name,
+                                              *receiver,
+                                              global,
+                                              cell,
+                                              function);
         } else {
           // There is only one shared stub for calling normalized
           // properties. It does not traverse the prototype chain, so the
           // property must be found in the receiver for the stub to be
           // applicable.
-          Handle<JSObject> receiver = Handle<JSObject>::cast(object);
           if (lookup->holder() != *receiver) return;
           code = StubCache::ComputeCallNormal(argc, in_loop, *name, *receiver);
         }
@@ -657,16 +657,15 @@
         break;
       }
       case NORMAL: {
-        if (object->IsGlobalObject()) {
-          // The stub generated for the global object picks the value directly
-          // from the property cell. So the property must be directly on the
-          // global object.
-          Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
-          if (lookup->holder() != *global) return;
+        if (lookup->holder()->IsGlobalObject()) {
+          GlobalObject* global = GlobalObject::cast(lookup->holder());
           JSGlobalPropertyCell* cell =
               JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
-          code = StubCache::ComputeLoadGlobal(*name, *global,
-                                              cell, lookup->IsDontDelete());
+          code = StubCache::ComputeLoadGlobal(*name,
+                                              *receiver,
+                                              global,
+                                              cell,
+                                              lookup->IsDontDelete());
         } else {
           // There is only one shared stub for loading normalized
           // properties. It does not traverse the prototype chain, so the
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 8ab0264..6e823b3 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -56,6 +56,7 @@
 int MarkCompactCollector::live_old_pointer_objects_ = 0;
 int MarkCompactCollector::live_code_objects_ = 0;
 int MarkCompactCollector::live_map_objects_ = 0;
+int MarkCompactCollector::live_cell_objects_ = 0;
 int MarkCompactCollector::live_lo_objects_ = 0;
 #endif
 
@@ -155,6 +156,7 @@
   live_old_data_objects_ = 0;
   live_code_objects_ = 0;
   live_map_objects_ = 0;
+  live_cell_objects_ = 0;
   live_lo_objects_ = 0;
 #endif
 }
@@ -710,6 +712,10 @@
   ScanOverflowedObjects(&map_it);
   if (marking_stack.is_full()) return;
 
+  HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
+  ScanOverflowedObjects(&cell_it);
+  if (marking_stack.is_full()) return;
+
   LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
   ScanOverflowedObjects(&lo_it);
   if (marking_stack.is_full()) return;
@@ -809,6 +815,9 @@
   } else if (Heap::map_space()->Contains(obj)) {
     ASSERT(obj->IsMap());
     live_map_objects_++;
+  } else if (Heap::cell_space()->Contains(obj)) {
+    ASSERT(obj->IsJSGlobalPropertyCell());
+    live_cell_objects_++;
   } else if (Heap::old_pointer_space()->Contains(obj)) {
     live_old_pointer_objects_++;
   } else if (Heap::old_data_space()->Contains(obj)) {
@@ -968,27 +977,32 @@
 
 
 // Allocation functions for the paged spaces call the space's MCAllocateRaw.
-inline Object* MCAllocateFromOldPointerSpace(HeapObject* object,
+inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
                                              int object_size) {
   return Heap::old_pointer_space()->MCAllocateRaw(object_size);
 }
 
 
-inline Object* MCAllocateFromOldDataSpace(HeapObject* object, int object_size) {
+inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
   return Heap::old_data_space()->MCAllocateRaw(object_size);
 }
 
 
-inline Object* MCAllocateFromCodeSpace(HeapObject* object, int object_size) {
+inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
   return Heap::code_space()->MCAllocateRaw(object_size);
 }
 
 
-inline Object* MCAllocateFromMapSpace(HeapObject* object, int object_size) {
+inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
   return Heap::map_space()->MCAllocateRaw(object_size);
 }
 
 
+inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
+  return Heap::cell_space()->MCAllocateRaw(object_size);
+}
+
+
 // The forwarding address is encoded at the same offset as the current
 // to-space object, but in from space.
 inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
@@ -1147,7 +1161,7 @@
         ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
       } else {
         ASSERT(size == kPointerSize);
-        object->set_map(Heap::raw_unchecked_one_word_filler_map());
+        object->set_map(Heap::raw_unchecked_one_pointer_filler_map());
       }
       ASSERT(object->Size() == size);
     }
@@ -1197,8 +1211,8 @@
       // loop.
     }
 
-    // If the last region was not live we need to from free_start to the
-    // allocation top in the page.
+    // If the last region was not live we need to deallocate from
+    // free_start to the allocation top in the page.
     if (!is_previous_alive) {
       int free_size = p->AllocationTop() - free_start;
       if (free_size > 0) {
@@ -1242,6 +1256,21 @@
 }
 
 
+void MarkCompactCollector::DeallocateCellBlock(Address start,
+                                               int size_in_bytes) {
+  // Free-list elements in cell space are assumed to have a fixed size.
+  // We break the free block into chunks and add them to the free list
+  // individually.
+  int size = Heap::cell_space()->object_size_in_bytes();
+  ASSERT(size_in_bytes % size == 0);
+  Heap::ClearRSetRange(start, size_in_bytes);
+  Address end = start + size_in_bytes;
+  for (Address a = start; a < end; a += size) {
+    Heap::cell_space()->Free(a);
+  }
+}
+
+
 void MarkCompactCollector::EncodeForwardingAddresses() {
   ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
   // Objects in the active semispace of the young generation may be
@@ -1262,6 +1291,11 @@
                                         LogNonLiveCodeObject>(
       Heap::code_space());
 
+  EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
+                                        IgnoreNonLiveObject>(
+      Heap::cell_space());
+
+
   // Compute new space next to last after the old and code spaces have been
   // compacted.  Objects in new space can be promoted to old or code space.
   EncodeForwardingAddressesInNewSpace();
@@ -1280,6 +1314,7 @@
   Heap::old_data_space()->MCWriteRelocationInfoToPage();
   Heap::code_space()->MCWriteRelocationInfoToPage();
   Heap::map_space()->MCWriteRelocationInfoToPage();
+  Heap::cell_space()->MCWriteRelocationInfoToPage();
 }
 
 
@@ -1294,6 +1329,7 @@
   SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
   SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
   SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
+  SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
   SweepSpace(Heap::new_space());
   SweepSpace(Heap::map_space(), &DeallocateMapBlock);
 }
@@ -1372,15 +1408,16 @@
     ASSERT(!Heap::InFromSpace(obj));
 
     if (Heap::new_space()->Contains(obj)) {
-      Address f_addr = Heap::new_space()->FromSpaceLow() +
-                       Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
-      new_addr = Memory::Address_at(f_addr);
+      Address forwarding_pointer_addr =
+          Heap::new_space()->FromSpaceLow() +
+          Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
+      new_addr = Memory::Address_at(forwarding_pointer_addr);
 
 #ifdef DEBUG
       ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
              Heap::old_data_space()->Contains(new_addr) ||
-             Heap::code_space()->Contains(new_addr) ||
-             Heap::new_space()->FromSpaceContains(new_addr));
+             Heap::new_space()->FromSpaceContains(new_addr) ||
+             Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
 
       if (Heap::new_space()->FromSpaceContains(new_addr)) {
         ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
@@ -1393,32 +1430,19 @@
       return;
 
     } else {
-      ASSERT(Heap::old_pointer_space()->Contains(obj) ||
-             Heap::old_data_space()->Contains(obj) ||
-             Heap::code_space()->Contains(obj) ||
-             Heap::map_space()->Contains(obj));
-
-      new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
-      ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
-             Heap::old_data_space()->Contains(new_addr) ||
-             Heap::code_space()->Contains(new_addr) ||
-             Heap::map_space()->Contains(new_addr));
-
 #ifdef DEBUG
-      if (Heap::old_pointer_space()->Contains(obj)) {
-        ASSERT(Heap::old_pointer_space()->MCSpaceOffsetForAddress(new_addr) <=
-               Heap::old_pointer_space()->MCSpaceOffsetForAddress(old_addr));
-      } else if (Heap::old_data_space()->Contains(obj)) {
-        ASSERT(Heap::old_data_space()->MCSpaceOffsetForAddress(new_addr) <=
-               Heap::old_data_space()->MCSpaceOffsetForAddress(old_addr));
-      } else if (Heap::code_space()->Contains(obj)) {
-        ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <=
-               Heap::code_space()->MCSpaceOffsetForAddress(old_addr));
-      } else {
-        ASSERT(Heap::map_space()->MCSpaceOffsetForAddress(new_addr) <=
-               Heap::map_space()->MCSpaceOffsetForAddress(old_addr));
+      PagedSpaces spaces;
+      PagedSpace* original_space = spaces.next();
+      while (original_space != NULL) {
+        if (original_space->Contains(obj)) break;
+        original_space = spaces.next();
       }
+      ASSERT(original_space != NULL);
 #endif
+      new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
+      ASSERT(original_space->Contains(new_addr));
+      ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
+             original_space->MCSpaceOffsetForAddress(old_addr));
     }
 
     *p = HeapObject::FromAddress(new_addr);
@@ -1450,6 +1474,8 @@
                                           &UpdatePointersInOldObject);
   int live_codes = IterateLiveObjects(Heap::code_space(),
                                       &UpdatePointersInOldObject);
+  int live_cells = IterateLiveObjects(Heap::cell_space(),
+                                      &UpdatePointersInOldObject);
   int live_news = IterateLiveObjects(Heap::new_space(),
                                      &UpdatePointersInNewObject);
 
@@ -1461,15 +1487,14 @@
   USE(live_pointer_olds);
   USE(live_data_olds);
   USE(live_codes);
+  USE(live_cells);
   USE(live_news);
-
-#ifdef DEBUG
   ASSERT(live_maps == live_map_objects_);
   ASSERT(live_data_olds == live_old_data_objects_);
   ASSERT(live_pointer_olds == live_old_pointer_objects_);
   ASSERT(live_codes == live_code_objects_);
+  ASSERT(live_cells == live_cell_objects_);
   ASSERT(live_news == live_young_objects_);
-#endif
 }
 
 
@@ -1590,30 +1615,31 @@
   int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
                                           &RelocateOldDataObject);
   int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
+  int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject);
   int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
 
   USE(live_maps);
   USE(live_data_olds);
   USE(live_pointer_olds);
   USE(live_codes);
+  USE(live_cells);
   USE(live_news);
-#ifdef DEBUG
   ASSERT(live_maps == live_map_objects_);
   ASSERT(live_data_olds == live_old_data_objects_);
   ASSERT(live_pointer_olds == live_old_pointer_objects_);
   ASSERT(live_codes == live_code_objects_);
+  ASSERT(live_cells == live_cell_objects_);
   ASSERT(live_news == live_young_objects_);
-#endif
 
   // Notify code object in LO to convert IC target to address
   // This must happen after lo_space_->Compact
   LargeObjectIterator it(Heap::lo_space());
   while (it.has_next()) { ConvertCodeICTargetToAddress(it.next()); }
 
-  // Flips from and to spaces
+  // Flip from and to spaces
   Heap::new_space()->Flip();
 
-  // Sets age_mark to bottom in to space
+  // Set age_mark to bottom in to space
   Address mark = Heap::new_space()->bottom();
   Heap::new_space()->set_age_mark(mark);
 
@@ -1637,7 +1663,7 @@
 
 
 int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
-  // decode map pointer (forwarded address)
+  // Recover map pointer.
   MapWord encoding = obj->map_word();
   Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
   ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
@@ -1645,10 +1671,10 @@
   // Get forwarding address before resetting map pointer
   Address new_addr = GetForwardingAddressInOldSpace(obj);
 
-  // recover map pointer
+  // Reset map pointer.  The meta map object may not be copied yet so
+  // Map::cast does not yet work.
   obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
 
-  // The meta map object may not be copied yet.
   Address old_addr = obj->address();
 
   if (new_addr != old_addr) {
@@ -1665,23 +1691,23 @@
 }
 
 
-static inline int RelocateOldObject(HeapObject* obj,
-                                    OldSpace* space,
-                                    Address new_addr,
-                                    Address map_addr) {
-  // recover map pointer
-  obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
+static inline int RestoreMap(HeapObject* obj,
+                             PagedSpace* space,
+                             Address new_addr,
+                             Address map_addr) {
+  // This must be a non-map object, and the function relies on the
+  // assumption that the Map space is compacted before the other paged
+  // spaces (see RelocateObjects).
 
-  // This is a non-map object, it relies on the assumption that the Map space
-  // is compacted before the Old space (see RelocateObjects).
+  // Reset map pointer.
+  obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
+
   int obj_size = obj->Size();
   ASSERT_OBJECT_SIZE(obj_size);
 
   ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
          space->MCSpaceOffsetForAddress(obj->address()));
 
-  space->MCAdjustRelocationEnd(new_addr, obj_size);
-
 #ifdef DEBUG
   if (FLAG_gc_verbose) {
     PrintF("relocate %p -> %p\n", obj->address(), new_addr);
@@ -1693,21 +1719,22 @@
 
 
 int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
-                                                   OldSpace* space) {
-  // decode map pointer (forwarded address)
+                                                   PagedSpace* space) {
+  // Recover map pointer.
   MapWord encoding = obj->map_word();
   Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
   ASSERT(Heap::map_space()->Contains(map_addr));
 
-  // Get forwarding address before resetting map pointer
+  // Get forwarding address before resetting map pointer.
   Address new_addr = GetForwardingAddressInOldSpace(obj);
 
-  int obj_size = RelocateOldObject(obj, space, new_addr, map_addr);
+  // Reset the map pointer.
+  int obj_size = RestoreMap(obj, space, new_addr, map_addr);
 
   Address old_addr = obj->address();
 
   if (new_addr != old_addr) {
-    memmove(new_addr, old_addr, obj_size);  // copy contents
+    memmove(new_addr, old_addr, obj_size);  // Copy contents
   }
 
   ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
@@ -1726,8 +1753,13 @@
 }
 
 
+int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
+  return RelocateOldNonCodeObject(obj, Heap::cell_space());
+}
+
+
 int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
-  // decode map pointer (forwarded address)
+  // Recover map pointer.
   MapWord encoding = obj->map_word();
   Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
   ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
@@ -1735,23 +1767,23 @@
   // Get forwarding address before resetting map pointer
   Address new_addr = GetForwardingAddressInOldSpace(obj);
 
-  int obj_size = RelocateOldObject(obj, Heap::code_space(), new_addr, map_addr);
+  // Reset the map pointer.
+  int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
 
-  // convert inline cache target to address using old address
+  // Convert inline cache target to address using old address.
   if (obj->IsCode()) {
-    // convert target to address first related to old_address
     Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
   }
 
   Address old_addr = obj->address();
 
   if (new_addr != old_addr) {
-    memmove(new_addr, old_addr, obj_size);  // copy contents
+    memmove(new_addr, old_addr, obj_size);  // Copy contents.
   }
 
   HeapObject* copied_to = HeapObject::FromAddress(new_addr);
   if (copied_to->IsCode()) {
-    // may also update inline cache target.
+    // May also update inline cache target.
     Code::cast(copied_to)->Relocate(new_addr - old_addr);
     // Notify the logger that compiled code has moved.
     LOG(CodeMoveEvent(old_addr, new_addr));
@@ -1771,15 +1803,15 @@
   Address new_addr =
     Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
 
+#ifdef DEBUG
   if (Heap::new_space()->FromSpaceContains(new_addr)) {
     ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
            Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
   } else {
-    OldSpace* target_space = Heap::TargetSpace(obj);
-    ASSERT(target_space == Heap::old_pointer_space() ||
-           target_space == Heap::old_data_space());
-    target_space->MCAdjustRelocationEnd(new_addr, obj_size);
+    ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
+           Heap::TargetSpace(obj) == Heap::old_data_space());
   }
+#endif
 
   // New and old addresses cannot overlap.
   memcpy(reinterpret_cast<void*>(new_addr),
diff --git a/src/mark-compact.h b/src/mark-compact.h
index d7ad630..bd9e4a0 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -293,6 +293,7 @@
   static void DeallocateOldDataBlock(Address start, int size_in_bytes);
   static void DeallocateCodeBlock(Address start, int size_in_bytes);
   static void DeallocateMapBlock(Address start, int size_in_bytes);
+  static void DeallocateCellBlock(Address start, int size_in_bytes);
 
   // If we are not compacting the heap, we simply sweep the spaces except
   // for the large object space, clearing mark bits and adding unmarked
@@ -352,8 +353,12 @@
   static int RelocateOldPointerObject(HeapObject* obj);
   static int RelocateOldDataObject(HeapObject* obj);
 
+  // Relocate a property cell object.
+  static int RelocateCellObject(HeapObject* obj);
+
   // Helper function.
-  static inline int RelocateOldNonCodeObject(HeapObject* obj, OldSpace* space);
+  static inline int RelocateOldNonCodeObject(HeapObject* obj,
+                                             PagedSpace* space);
 
   // Relocates an object in the code space.
   static int RelocateCodeObject(HeapObject* obj);
@@ -393,6 +398,9 @@
   // Number of live objects in Heap::map_space_.
   static int live_map_objects_;
 
+  // Number of live objects in Heap::cell_space_.
+  static int live_cell_objects_;
+
   // Number of live objects in Heap::lo_space_.
   static int live_lo_objects_;
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 4974268..d54f741 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -271,29 +271,38 @@
 
 void JSObject::PrintProperties() {
   if (HasFastProperties()) {
-    for (DescriptorReader r(map()->instance_descriptors());
-         !r.eos();
-         r.advance()) {
+    DescriptorArray* descs = map()->instance_descriptors();
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
       PrintF("   ");
-      r.GetKey()->StringPrint();
+      descs->GetKey(i)->StringPrint();
       PrintF(": ");
-      if (r.type() == FIELD) {
-        FastPropertyAt(r.GetFieldIndex())->ShortPrint();
-        PrintF(" (field at offset %d)\n", r.GetFieldIndex());
-      } else if (r.type() ==  CONSTANT_FUNCTION) {
-        r.GetConstantFunction()->ShortPrint();
-        PrintF(" (constant function)\n");
-      } else if (r.type() == CALLBACKS) {
-        r.GetCallbacksObject()->ShortPrint();
-        PrintF(" (callback)\n");
-      } else if (r.type() == MAP_TRANSITION) {
-        PrintF(" (map transition)\n");
-      } else if (r.type() == CONSTANT_TRANSITION) {
-        PrintF(" (constant transition)\n");
-      } else if (r.type() == NULL_DESCRIPTOR) {
-        PrintF(" (null descriptor)\n");
-      } else {
-        UNREACHABLE();
+      switch (descs->GetType(i)) {
+        case FIELD: {
+          int index = descs->GetFieldIndex(i);
+          FastPropertyAt(index)->ShortPrint();
+          PrintF(" (field at offset %d)\n", index);
+          break;
+        }
+        case CONSTANT_FUNCTION:
+          descs->GetConstantFunction(i)->ShortPrint();
+          PrintF(" (constant function)\n");
+          break;
+        case CALLBACKS:
+          descs->GetCallbacksObject(i)->ShortPrint();
+          PrintF(" (callback)\n");
+          break;
+        case MAP_TRANSITION:
+          PrintF(" (map transition)\n");
+          break;
+        case CONSTANT_TRANSITION:
+          PrintF(" (constant transition)\n");
+          break;
+        case NULL_DESCRIPTOR:
+          PrintF(" (null descriptor)\n");
+          break;
+        default:
+          UNREACHABLE();
+          break;
       }
     }
   } else {
@@ -1062,11 +1071,10 @@
 
 void DescriptorArray::PrintDescriptors() {
   PrintF("Descriptor array  %d\n", number_of_descriptors());
-  int number = 0;
-  for (DescriptorReader r(this); !r.eos(); r.advance()) {
+  for (int i = 0; i < number_of_descriptors(); i++) {
+    PrintF(" %d: ", i);
     Descriptor desc;
-    r.Get(&desc);
-    PrintF(" %d: ", number++);
+    Get(i, &desc);
     desc.Print();
   }
   PrintF("\n");
@@ -1076,14 +1084,14 @@
 bool DescriptorArray::IsSortedNoDuplicates() {
   String* current_key = NULL;
   uint32_t current = 0;
-  for (DescriptorReader r(this); !r.eos(); r.advance()) {
-    String* key = r.GetKey();
+  for (int i = 0; i < number_of_descriptors(); i++) {
+    String* key = GetKey(i);
     if (key == current_key) {
       PrintDescriptors();
       return false;
     }
     current_key = key;
-    uint32_t hash = r.GetKey()->Hash();
+    uint32_t hash = GetKey(i)->Hash();
     if (hash < current) {
       PrintDescriptors();
       return false;
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 3b152d6..37c9b8b 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -768,6 +768,8 @@
 
 
 Failure* Failure::RetryAfterGC(int requested_bytes) {
+  // Assert that the space encoding fits in the three bytes allotted for it.
+  ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
   int requested = requested_bytes >> kObjectAlignmentBits;
   int value = (requested << kSpaceTagSize) | NEW_SPACE;
   ASSERT(value >> kSpaceTagSize == requested);
@@ -1060,7 +1062,17 @@
 ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
 
 
-ACCESSORS(JSGlobalPropertyCell, value, Object, kValueOffset)
+Object* JSGlobalPropertyCell::value() {
+  return READ_FIELD(this, kValueOffset);
+}
+
+
+void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
+  // The write barrier is not used for global property cells.
+  ASSERT(!val->IsJSGlobalPropertyCell());
+  WRITE_FIELD(this, kValueOffset, val);
+}
+
 
 int JSObject::GetHeaderSize() {
   switch (map()->instance_type()) {
@@ -1339,6 +1351,56 @@
 }
 
 
+PropertyType DescriptorArray::GetType(int descriptor_number) {
+  ASSERT(descriptor_number < number_of_descriptors());
+  return PropertyDetails(GetDetails(descriptor_number)).type();
+}
+
+
+int DescriptorArray::GetFieldIndex(int descriptor_number) {
+  return Descriptor::IndexFromValue(GetValue(descriptor_number));
+}
+
+
+JSFunction* DescriptorArray::GetConstantFunction(int descriptor_number) {
+  return JSFunction::cast(GetValue(descriptor_number));
+}
+
+
+Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
+  ASSERT(GetType(descriptor_number) == CALLBACKS);
+  return GetValue(descriptor_number);
+}
+
+
+AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
+  ASSERT(GetType(descriptor_number) == CALLBACKS);
+  Proxy* p = Proxy::cast(GetCallbacksObject(descriptor_number));
+  return reinterpret_cast<AccessorDescriptor*>(p->proxy());
+}
+
+
+bool DescriptorArray::IsProperty(int descriptor_number) {
+  return GetType(descriptor_number) < FIRST_PHANTOM_PROPERTY_TYPE;
+}
+
+
+bool DescriptorArray::IsTransition(int descriptor_number) {
+  PropertyType t = GetType(descriptor_number);
+  return t == MAP_TRANSITION || t == CONSTANT_TRANSITION;
+}
+
+
+bool DescriptorArray::IsNullDescriptor(int descriptor_number) {
+  return GetType(descriptor_number) == NULL_DESCRIPTOR;
+}
+
+
+bool DescriptorArray::IsDontEnum(int descriptor_number) {
+  return PropertyDetails(GetDetails(descriptor_number)).IsDontEnum();
+}
+
+
 void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
   desc->Init(GetKey(descriptor_number),
              GetValue(descriptor_number),
@@ -1362,6 +1424,13 @@
 }
 
 
+void DescriptorArray::CopyFrom(int index, DescriptorArray* src, int src_index) {
+  Descriptor desc;
+  src->Get(src_index, &desc);
+  Set(index, &desc);
+}
+
+
 void DescriptorArray::Swap(int first, int second) {
   fast_swap(this, ToKeyIndex(first), ToKeyIndex(second));
   FixedArray* content_array = GetContentArray();
@@ -2642,7 +2711,7 @@
                                       Object* key,
                                       Object* value,
                                       PropertyDetails details) {
-  ASSERT(!key->IsString() || details.index() > 0);
+  ASSERT(!key->IsString() || details.IsDeleted() || details.index() > 0);
   int index = HashTable<Shape, Key>::EntryToIndex(entry);
   WriteBarrierMode mode = FixedArray::GetWriteBarrierMode();
   FixedArray::set(index, key, mode);
diff --git a/src/objects.cc b/src/objects.cc
index 93e7495..a9004c9 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -436,8 +436,7 @@
       store_value = Heap::AllocateJSGlobalPropertyCell(value);
       if (store_value->IsFailure()) return store_value;
     }
-    Object* dict =
-        property_dictionary()->Add(name, store_value, details);
+    Object* dict = property_dictionary()->Add(name, store_value, details);
     if (dict->IsFailure()) return dict;
     set_properties(StringDictionary::cast(dict));
     return value;
@@ -1633,6 +1632,7 @@
   return *value_handle;
 }
 
+
 void JSObject::LookupCallbackSetterInPrototypes(String* name,
                                                 LookupResult* result) {
   for (Object* pt = GetPrototype();
@@ -1660,7 +1660,7 @@
     if (JSObject::cast(pt)->HasFastElements()) continue;
     NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary();
     int entry = dictionary->FindEntry(index);
-    if (entry != StringDictionary::kNotFound) {
+    if (entry != NumberDictionary::kNotFound) {
       Object* element = dictionary->ValueAt(entry);
       PropertyDetails details = dictionary->DetailsAt(entry);
       if (details.type() == CALLBACKS) {
@@ -1712,30 +1712,24 @@
   } else {
     int entry = property_dictionary()->FindEntry(name);
     if (entry != StringDictionary::kNotFound) {
-      // Make sure to disallow caching for uninitialized constants
-      // found in the dictionary-mode objects.
       Object* value = property_dictionary()->ValueAt(entry);
       if (IsGlobalObject()) {
         PropertyDetails d = property_dictionary()->DetailsAt(entry);
         if (d.IsDeleted()) {
-          // We've skipped a global object during lookup, so we cannot
-          // use inline caching because the map of the global object
-          // doesn't change if the property should be re-added.
-          result->DisallowCaching();
           result->NotFound();
           return;
         }
         value = JSGlobalPropertyCell::cast(value)->value();
         ASSERT(result->IsLoaded());
       }
-      if (value->IsTheHole()) {
-        result->DisallowCaching();
-      }
+      // Make sure to disallow caching for uninitialized constants
+      // found in the dictionary-mode objects.
+      if (value->IsTheHole()) result->DisallowCaching();
       result->DictionaryResult(this, entry);
       return;
     }
     // Slow case object skipped during lookup. Do not use inline caching.
-    result->DisallowCaching();
+    if (!IsGlobalObject()) result->DisallowCaching();
   }
   result->NotFound();
 }
@@ -2121,20 +2115,19 @@
   if (obj->IsFailure()) return obj;
   StringDictionary* dictionary = StringDictionary::cast(obj);
 
-  for (DescriptorReader r(map()->instance_descriptors());
-       !r.eos();
-       r.advance()) {
-    PropertyDetails details = r.GetDetails();
+  DescriptorArray* descs = map()->instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    PropertyDetails details = descs->GetDetails(i);
     switch (details.type()) {
       case CONSTANT_FUNCTION: {
         PropertyDetails d =
             PropertyDetails(details.attributes(), NORMAL, details.index());
-        Object* value = r.GetConstantFunction();
+        Object* value = descs->GetConstantFunction(i);
         if (IsGlobalObject()) {
           value = Heap::AllocateJSGlobalPropertyCell(value);
           if (value->IsFailure()) return value;
         }
-        Object* result = dictionary->Add(r.GetKey(), value, d);
+        Object* result = dictionary->Add(descs->GetKey(i), value, d);
         if (result->IsFailure()) return result;
         dictionary = StringDictionary::cast(result);
         break;
@@ -2142,12 +2135,12 @@
       case FIELD: {
         PropertyDetails d =
             PropertyDetails(details.attributes(), NORMAL, details.index());
-        Object* value = FastPropertyAt(r.GetFieldIndex());
+        Object* value = FastPropertyAt(descs->GetFieldIndex(i));
         if (IsGlobalObject()) {
           value = Heap::AllocateJSGlobalPropertyCell(value);
           if (value->IsFailure()) return value;
         }
-        Object* result = dictionary->Add(r.GetKey(), value, d);
+        Object* result = dictionary->Add(descs->GetKey(i), value, d);
         if (result->IsFailure()) return result;
         dictionary = StringDictionary::cast(result);
         break;
@@ -2155,12 +2148,12 @@
       case CALLBACKS: {
         PropertyDetails d =
             PropertyDetails(details.attributes(), CALLBACKS, details.index());
-        Object* value = r.GetCallbacksObject();
+        Object* value = descs->GetCallbacksObject(i);
         if (IsGlobalObject()) {
           value = Heap::AllocateJSGlobalPropertyCell(value);
           if (value->IsFailure()) return value;
         }
-        Object* result = dictionary->Add(r.GetKey(), value, d);
+        Object* result = dictionary->Add(descs->GetKey(i), value, d);
         if (result->IsFailure()) return result;
         dictionary = StringDictionary::cast(result);
         break;
@@ -2574,35 +2567,44 @@
 
 int Map::NumberOfDescribedProperties() {
   int result = 0;
-  for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
-    if (r.IsProperty()) result++;
+  DescriptorArray* descs = instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    if (descs->IsProperty(i)) result++;
   }
   return result;
 }
 
 
 int Map::PropertyIndexFor(String* name) {
-  for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
-    if (r.Equals(name) && !r.IsNullDescriptor()) return r.GetFieldIndex();
+  DescriptorArray* descs = instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    if (name->Equals(descs->GetKey(i)) && !descs->IsNullDescriptor(i)) {
+      return descs->GetFieldIndex(i);
+    }
   }
   return -1;
 }
 
 
 int Map::NextFreePropertyIndex() {
-  int index = -1;
-  for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
-    if (r.type() == FIELD) {
-      if (r.GetFieldIndex() > index) index = r.GetFieldIndex();
+  int max_index = -1;
+  DescriptorArray* descs = instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    if (descs->GetType(i) == FIELD) {
+      int current_index = descs->GetFieldIndex(i);
+      if (current_index > max_index) max_index = current_index;
     }
   }
-  return index+1;
+  return max_index + 1;
 }
 
 
 AccessorDescriptor* Map::FindAccessor(String* name) {
-  for (DescriptorReader r(instance_descriptors()); !r.eos(); r.advance()) {
-    if (r.Equals(name) && r.type() == CALLBACKS) return r.GetCallbacks();
+  DescriptorArray* descs = instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    if (name->Equals(descs->GetKey(i)) && descs->GetType(i) == CALLBACKS) {
+      return descs->GetCallbacks(i);
+    }
   }
   return NULL;
 }
@@ -2850,16 +2852,15 @@
 
 Object* JSObject::SlowReverseLookup(Object* value) {
   if (HasFastProperties()) {
-    for (DescriptorReader r(map()->instance_descriptors());
-         !r.eos();
-         r.advance()) {
-      if (r.type() == FIELD) {
-        if (FastPropertyAt(r.GetFieldIndex()) == value) {
-          return r.GetKey();
+    DescriptorArray* descs = map()->instance_descriptors();
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      if (descs->GetType(i) == FIELD) {
+        if (FastPropertyAt(descs->GetFieldIndex(i)) == value) {
+          return descs->GetKey(i);
         }
-      } else if (r.type() == CONSTANT_FUNCTION) {
-        if (r.GetConstantFunction() == value) {
-          return r.GetKey();
+      } else if (descs->GetType(i) == CONSTANT_FUNCTION) {
+        if (descs->GetConstantFunction(i) == value) {
+          return descs->GetKey(i);
         }
       }
     }
@@ -3173,13 +3174,13 @@
   int transitions = 0;
   int null_descriptors = 0;
   if (remove_transitions) {
-    for (DescriptorReader r(this); !r.eos(); r.advance()) {
-      if (r.IsTransition()) transitions++;
-      if (r.IsNullDescriptor()) null_descriptors++;
+    for (int i = 0; i < number_of_descriptors(); i++) {
+      if (IsTransition(i)) transitions++;
+      if (IsNullDescriptor(i)) null_descriptors++;
     }
   } else {
-    for (DescriptorReader r(this); !r.eos(); r.advance()) {
-      if (r.IsNullDescriptor()) null_descriptors++;
+    for (int i = 0; i < number_of_descriptors(); i++) {
+      if (IsNullDescriptor(i)) null_descriptors++;
     }
   }
   int new_size = number_of_descriptors() - transitions - null_descriptors;
@@ -3227,32 +3228,31 @@
 
   // Copy the descriptors, filtering out transitions and null descriptors,
   // and inserting or replacing a descriptor.
-  DescriptorWriter w(new_descriptors);
-  DescriptorReader r(this);
   uint32_t descriptor_hash = descriptor->GetKey()->Hash();
+  int from_index = 0;
+  int to_index = 0;
 
-  for (; !r.eos(); r.advance()) {
-    if (r.GetKey()->Hash() > descriptor_hash ||
-        r.GetKey() == descriptor->GetKey()) break;
-    if (r.IsNullDescriptor()) continue;
-    if (remove_transitions && r.IsTransition()) continue;
-    w.WriteFrom(&r);
+  for (; from_index < number_of_descriptors(); from_index++) {
+    String* key = GetKey(from_index);
+    if (key->Hash() > descriptor_hash || key == descriptor->GetKey()) {
+      break;
+    }
+    if (IsNullDescriptor(from_index)) continue;
+    if (remove_transitions && IsTransition(from_index)) continue;
+    new_descriptors->CopyFrom(to_index++, this, from_index);
   }
-  w.Write(descriptor);
-  if (replacing) {
-    ASSERT(r.GetKey() == descriptor->GetKey());
-    r.advance();
-  } else {
-    ASSERT(r.eos() ||
-           r.GetKey()->Hash() > descriptor_hash ||
-           r.IsNullDescriptor());
+
+  new_descriptors->Set(to_index++, descriptor);
+  if (replacing) from_index++;
+
+  for (; from_index < number_of_descriptors(); from_index++) {
+    if (IsNullDescriptor(from_index)) continue;
+    if (remove_transitions && IsTransition(from_index)) continue;
+    new_descriptors->CopyFrom(to_index++, this, from_index);
   }
-  for (; !r.eos(); r.advance()) {
-    if (r.IsNullDescriptor()) continue;
-    if (remove_transitions && r.IsTransition()) continue;
-    w.WriteFrom(&r);
-  }
-  ASSERT(w.eos());
+
+  ASSERT(to_index == new_descriptors->number_of_descriptors());
+  SLOW_ASSERT(new_descriptors->IsSortedNoDuplicates());
 
   return new_descriptors;
 }
@@ -3265,8 +3265,8 @@
 
   // Compute the size of the map transition entries to be removed.
   int num_removed = 0;
-  for (DescriptorReader r(this); !r.eos(); r.advance()) {
-    if (!r.IsProperty()) num_removed++;
+  for (int i = 0; i < number_of_descriptors(); i++) {
+    if (!IsProperty(i)) num_removed++;
   }
 
   // Allocate the new descriptor array.
@@ -3275,11 +3275,11 @@
   DescriptorArray* new_descriptors = DescriptorArray::cast(result);
 
   // Copy the content.
-  DescriptorWriter w(new_descriptors);
-  for (DescriptorReader r(this); !r.eos(); r.advance()) {
-    if (r.IsProperty()) w.WriteFrom(&r);
+  int next_descriptor = 0;
+  for (int i = 0; i < number_of_descriptors(); i++) {
+    if (IsProperty(i)) new_descriptors->CopyFrom(next_descriptor++, this, i);
   }
-  ASSERT(w.eos());
+  ASSERT(next_descriptor == new_descriptors->number_of_descriptors());
 
   return new_descriptors;
 }
@@ -4586,10 +4586,10 @@
 
 void Map::CreateBackPointers() {
   DescriptorArray* descriptors = instance_descriptors();
-  for (DescriptorReader r(descriptors); !r.eos(); r.advance()) {
-    if (r.type() == MAP_TRANSITION) {
+  for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
+    if (descriptors->GetType(i) == MAP_TRANSITION) {
       // Get target.
-      Map* target = Map::cast(r.GetValue());
+      Map* target = Map::cast(descriptors->GetValue(i));
 #ifdef DEBUG
       // Verify target.
       Object* source_prototype = prototype();
@@ -4600,7 +4600,7 @@
       ASSERT(target_prototype->IsJSObject() ||
              target_prototype->IsNull());
       ASSERT(source_prototype->IsMap() ||
-          source_prototype == target_prototype);
+             source_prototype == target_prototype);
 #endif
       // Point target back to source.  set_prototype() will not let us set
       // the prototype to a map, as we do here.
@@ -6018,13 +6018,11 @@
 
 int JSObject::NumberOfLocalProperties(PropertyAttributes filter) {
   if (HasFastProperties()) {
+    DescriptorArray* descs = map()->instance_descriptors();
     int result = 0;
-    for (DescriptorReader r(map()->instance_descriptors());
-         !r.eos();
-         r.advance()) {
-      PropertyDetails details = r.GetDetails();
-      if (details.IsProperty() &&
-          (details.attributes() & filter) == 0) {
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      PropertyDetails details = descs->GetDetails(i);
+      if (details.IsProperty() && (details.attributes() & filter) == 0) {
         result++;
       }
     }
@@ -6157,16 +6155,11 @@
 // purpose of this function is to provide reflection information for the object
 // mirrors.
 void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) {
-  ASSERT(storage->length() >=
-         NumberOfLocalProperties(static_cast<PropertyAttributes>(NONE)) -
-             index);
+  ASSERT(storage->length() >= (NumberOfLocalProperties(NONE) - index));
   if (HasFastProperties()) {
-    for (DescriptorReader r(map()->instance_descriptors());
-         !r.eos();
-         r.advance()) {
-      if (r.IsProperty()) {
-        storage->set(index++, r.GetKey());
-      }
+    DescriptorArray* descs = map()->instance_descriptors();
+    for (int i = 0; i < descs->number_of_descriptors(); i++) {
+      if (descs->IsProperty(i)) storage->set(index++, descs->GetKey(i));
     }
     ASSERT(storage->length() >= index);
   } else {
@@ -6841,6 +6834,26 @@
 }
 
 
+Object* GlobalObject::EnsurePropertyCell(String* name) {
+  ASSERT(!HasFastProperties());
+  int entry = property_dictionary()->FindEntry(name);
+  if (entry == StringDictionary::kNotFound) {
+    Object* cell = Heap::AllocateJSGlobalPropertyCell(Heap::the_hole_value());
+    if (cell->IsFailure()) return cell;
+    PropertyDetails details(NONE, NORMAL);
+    details = details.AsDeleted();
+    Object* dictionary = property_dictionary()->Add(name, cell, details);
+    if (dictionary->IsFailure()) return dictionary;
+    set_properties(StringDictionary::cast(dictionary));
+    return cell;
+  } else {
+    Object* value = property_dictionary()->ValueAt(entry);
+    ASSERT(value->IsJSGlobalPropertyCell());
+    return value;
+  }
+}
+
+
 Object* SymbolTable::LookupString(String* string, Object** s) {
   SymbolKey key(string);
   return LookupKey(&key, s);
@@ -7200,7 +7213,7 @@
 
   uint32_t entry = Dictionary<Shape, Key>::FindInsertionEntry(hash);
   // Insert element at empty or deleted entry
-  if (details.index() == 0 && Shape::kIsEnumerable) {
+  if (!details.IsDeleted() && details.index() == 0 && Shape::kIsEnumerable) {
     // Assign an enumeration index to the property and update
     // SetNextEnumerationIndex.
     int index = NextEnumerationIndex();
@@ -7273,7 +7286,9 @@
   for (int i = 0; i < capacity; i++) {
     Object* k = HashTable<Shape, Key>::KeyAt(i);
     if (HashTable<Shape, Key>::IsKey(k)) {
-      PropertyAttributes attr = DetailsAt(i).attributes();
+      PropertyDetails details = DetailsAt(i);
+      if (details.IsDeleted()) continue;
+      PropertyAttributes attr = details.attributes();
       if ((attr & filter) == 0) result++;
     }
   }
@@ -7297,7 +7312,9 @@
   for (int i = 0; i < capacity; i++) {
      Object* k = HashTable<Shape, Key>::KeyAt(i);
      if (HashTable<Shape, Key>::IsKey(k)) {
-       PropertyAttributes attr = DetailsAt(i).attributes();
+       PropertyDetails details = DetailsAt(i);
+       if (details.IsDeleted()) continue;
+       PropertyAttributes attr = details.attributes();
        if ((attr & filter) == 0) storage->set(index++, k);
      }
   }
@@ -7315,13 +7332,12 @@
      Object* k = KeyAt(i);
      if (IsKey(k)) {
        PropertyDetails details = DetailsAt(i);
-       if (!details.IsDontEnum()) {
-         storage->set(index, k);
-         sort_array->set(index,
-                         Smi::FromInt(details.index()),
-                         SKIP_WRITE_BARRIER);
-         index++;
-       }
+       if (details.IsDeleted() || details.IsDontEnum()) continue;
+       storage->set(index, k);
+       sort_array->set(index,
+                       Smi::FromInt(details.index()),
+                       SKIP_WRITE_BARRIER);
+       index++;
      }
   }
   storage->SortPairs(sort_array, sort_array->length());
@@ -7338,6 +7354,8 @@
   for (int i = 0; i < capacity; i++) {
     Object* k = HashTable<Shape, Key>::KeyAt(i);
     if (HashTable<Shape, Key>::IsKey(k)) {
+      PropertyDetails details = DetailsAt(i);
+      if (details.IsDeleted()) continue;
       storage->set(index++, k);
     }
   }
@@ -7410,7 +7428,7 @@
   if (fields->IsFailure()) return fields;
 
   // Fill in the instance descriptor and the fields.
-  DescriptorWriter w(descriptors);
+  int next_descriptor = 0;
   int current_offset = 0;
   for (int i = 0; i < capacity; i++) {
     Object* k = KeyAt(i);
@@ -7427,7 +7445,7 @@
                                      JSFunction::cast(value),
                                      details.attributes(),
                                      details.index());
-        w.Write(&d);
+        descriptors->Set(next_descriptor++, &d);
       } else if (type == NORMAL) {
         if (current_offset < inobject_props) {
           obj->InObjectPropertyAtPut(current_offset,
@@ -7441,13 +7459,13 @@
                           current_offset++,
                           details.attributes(),
                           details.index());
-        w.Write(&d);
+        descriptors->Set(next_descriptor++, &d);
       } else if (type == CALLBACKS) {
         CallbacksDescriptor d(String::cast(key),
                               value,
                               details.attributes(),
                               details.index());
-        w.Write(&d);
+        descriptors->Set(next_descriptor++, &d);
       } else {
         UNREACHABLE();
       }
diff --git a/src/objects.h b/src/objects.h
index 446b4a7..5c76e4a 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -928,12 +928,14 @@
 
 // Failure is used for reporting out of memory situations and
 // propagating exceptions through the runtime system.  Failure objects
-// are transient and cannot occur as part of the objects graph.
+// are transient and cannot occur as part of the object graph.
 //
 // Failures are a single word, encoded as follows:
 // +-------------------------+---+--+--+
 // |rrrrrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
 // +-------------------------+---+--+--+
+//  3                       7 6 4 32 10
+//  1
 //
 // The low two bits, 0-1, are the failure tag, 11.  The next two bits,
 // 2-3, are a failure type tag 'tt' with possible values:
@@ -944,18 +946,13 @@
 //
 // The next three bits, 4-6, are an allocation space tag 'sss'.  The
 // allocation space tag is 000 for all failure types except
-// RETRY_AFTER_GC.  For RETRY_AFTER_GC, the possible values are
-// (the encoding is found in globals.h):
-//   000 NEW_SPACE
-//   001 OLD_SPACE
-//   010 CODE_SPACE
-//   011 MAP_SPACE
-//   100 LO_SPACE
+// RETRY_AFTER_GC.  For RETRY_AFTER_GC, the possible values are the
+// allocation spaces (the encoding is found in globals.h).
 //
-// The remaining bits is the number of words requested by the
-// allocation request that failed, and is zeroed except for
-// RETRY_AFTER_GC failures.  The 25 bits (on a 32 bit platform) gives
-// a representable range of 2^27 bytes (128MB).
+// The remaining bits is the size of the allocation request in units
+// of the pointer size, and is zeroed except for RETRY_AFTER_GC
+// failures.  The 25 bits (on a 32 bit platform) gives a representable
+// range of 2^27 bytes (128MB).
 
 // Failure type tag info.
 const int kFailureTypeTagSize = 2;
@@ -1085,14 +1082,6 @@
 
   inline Address ToEncodedAddress();
 
- private:
-  // HeapObject calls the private constructor and directly reads the value.
-  friend class HeapObject;
-
-  explicit MapWord(uintptr_t value) : value_(value) {}
-
-  uintptr_t value_;
-
   // Bits used by the marking phase of the garbage collector.
   //
   // The first word of a heap object is normally a map pointer. The last two
@@ -1134,6 +1123,14 @@
   // 0xFFE00000
   static const uint32_t kForwardingOffsetMask =
       ~(kMapPageIndexMask | kMapPageOffsetMask);
+
+ private:
+  // HeapObject calls the private constructor and directly reads the value.
+  friend class HeapObject;
+
+  explicit MapWord(uintptr_t value) : value_(value) {}
+
+  uintptr_t value_;
 };
 
 
@@ -1853,15 +1850,28 @@
   // using the supplied storage for the small "bridge".
   void SetEnumCache(FixedArray* bridge_storage, FixedArray* new_cache);
 
-  // Accessors for fetching instance descriptor at descriptor number..
+  // Accessors for fetching instance descriptor at descriptor number.
   inline String* GetKey(int descriptor_number);
   inline Object* GetValue(int descriptor_number);
   inline Smi* GetDetails(int descriptor_number);
+  inline PropertyType GetType(int descriptor_number);
+  inline int GetFieldIndex(int descriptor_number);
+  inline JSFunction* GetConstantFunction(int descriptor_number);
+  inline Object* GetCallbacksObject(int descriptor_number);
+  inline AccessorDescriptor* GetCallbacks(int descriptor_number);
+  inline bool IsProperty(int descriptor_number);
+  inline bool IsTransition(int descriptor_number);
+  inline bool IsNullDescriptor(int descriptor_number);
+  inline bool IsDontEnum(int descriptor_number);
 
   // Accessor for complete descriptor.
   inline void Get(int descriptor_number, Descriptor* desc);
   inline void Set(int descriptor_number, Descriptor* desc);
 
+  // Transfer complete descriptor from another descriptor array to
+  // this one.
+  inline void CopyFrom(int index, DescriptorArray* src, int src_index);
+
   // Copy the descriptor array, insert a new descriptor and optionally
   // remove map transitions.  If the descriptor is already present, it is
   // replaced.  If a replaced descriptor is a real property (not a transition
@@ -3243,6 +3253,9 @@
   // Retrieve the property cell used to store a property.
   Object* GetPropertyCell(LookupResult* result);
 
+  // Ensure that the global object has a cell for the given property name.
+  Object* EnsurePropertyCell(String* name);
+
   // Casting.
   static inline GlobalObject* cast(Object* obj);
 
diff --git a/src/property.cc b/src/property.cc
index 2915c4a..caa7397 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -31,20 +31,6 @@
 namespace internal {
 
 
-void DescriptorWriter::Write(Descriptor* desc) {
-  ASSERT(desc->key_->IsSymbol());
-  descriptors_->Set(pos_, desc);
-  advance();
-}
-
-
-void DescriptorWriter::WriteFrom(DescriptorReader* reader) {
-  Descriptor desc;
-  reader->Get(&desc);
-  Write(&desc);
-}
-
-
 #ifdef DEBUG
 void LookupResult::Print() {
   if (!IsValid()) {
diff --git a/src/property.h b/src/property.h
index 69e5640..1869719 100644
--- a/src/property.h
+++ b/src/property.h
@@ -95,8 +95,6 @@
         value_(value),
         details_(attributes, type, index) { }
 
-  friend class DescriptorWriter;
-  friend class DescriptorReader;
   friend class DescriptorArray;
 };
 
@@ -324,92 +322,6 @@
 };
 
 
-// The DescriptorStream is an abstraction for iterating over a map's
-// instance descriptors.
-class DescriptorStream BASE_EMBEDDED {
- public:
-  explicit DescriptorStream(DescriptorArray* descriptors, int pos) {
-    descriptors_ = descriptors;
-    pos_ = pos;
-    limit_ = descriptors_->number_of_descriptors();
-  }
-
-  // Tells whether we have reached the end of the steam.
-  bool eos() { return pos_ >= limit_; }
-
-  int next_position() { return pos_ + 1; }
-  void advance() { pos_ = next_position(); }
-
- protected:
-  DescriptorArray* descriptors_;
-  int pos_;   // Current position.
-  int limit_;  // Limit for position.
-};
-
-
-class DescriptorReader: public DescriptorStream {
- public:
-  explicit DescriptorReader(DescriptorArray* descriptors, int pos = 0)
-      : DescriptorStream(descriptors, pos) {}
-
-  String* GetKey() { return descriptors_->GetKey(pos_); }
-  Object* GetValue() { return descriptors_->GetValue(pos_); }
-  PropertyDetails GetDetails() {
-    return PropertyDetails(descriptors_->GetDetails(pos_));
-  }
-
-  int GetFieldIndex() { return Descriptor::IndexFromValue(GetValue()); }
-
-  bool IsDontEnum() { return GetDetails().IsDontEnum(); }
-
-  PropertyType type() { return GetDetails().type(); }
-
-  // Tells whether the type is a transition.
-  bool IsTransition() {
-    PropertyType t = type();
-    ASSERT(t != INTERCEPTOR);
-    return t == MAP_TRANSITION || t == CONSTANT_TRANSITION;
-  }
-
-  bool IsNullDescriptor() {
-    return type() == NULL_DESCRIPTOR;
-  }
-
-  bool IsProperty() {
-    return type() < FIRST_PHANTOM_PROPERTY_TYPE;
-  }
-
-  JSFunction* GetConstantFunction() { return JSFunction::cast(GetValue()); }
-
-  AccessorDescriptor* GetCallbacks() {
-    ASSERT(type() == CALLBACKS);
-    Proxy* p = Proxy::cast(GetCallbacksObject());
-    return reinterpret_cast<AccessorDescriptor*>(p->proxy());
-  }
-
-  Object* GetCallbacksObject() {
-    ASSERT(type() == CALLBACKS);
-    return GetValue();
-  }
-
-  bool Equals(String* name) { return name->Equals(GetKey()); }
-
-  void Get(Descriptor* desc) {
-    descriptors_->Get(pos_, desc);
-  }
-};
-
-class DescriptorWriter: public DescriptorStream {
- public:
-  explicit DescriptorWriter(DescriptorArray* descriptors)
-      : DescriptorStream(descriptors, 0) {}
-
-  // Append a descriptor to this stream.
-  void Write(Descriptor* desc);
-  // Read a descriptor from the reader and append it to this stream.
-  void WriteFrom(DescriptorReader* reader);
-};
-
 } }  // namespace v8::internal
 
 #endif  // V8_PROPERTY_H_
diff --git a/src/runtime.cc b/src/runtime.cc
index ea2690e..350d391 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -4316,7 +4316,7 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Handle stepping into constructors if step into is active.
   if (Debug::StepInActive()) {
-    Debug::HandleStepIn(function, 0, true);
+    Debug::HandleStepIn(function, Handle<Object>::null(), 0, true);
   }
 #endif
 
diff --git a/src/serialize.cc b/src/serialize.cc
index f633b06..592cf5a 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -42,47 +42,44 @@
 namespace v8 {
 namespace internal {
 
-// Encoding: a RelativeAddress must be able to fit in a pointer:
-// it is encoded as an Address with (from MS to LS bits):
-// 27 bits identifying a word in the space, in one of three formats:
-// - MAP and OLD spaces: 16 bits of page number, 11 bits of word offset in page
-// - NEW space:          27 bits of word offset
-// - LO space:           27 bits of page number
-// 3 bits to encode the AllocationSpace (special values for code in LO space)
-// 2 bits identifying this as a HeapObject
+// 32-bit encoding: a RelativeAddress must be able to fit in a
+// pointer: it is encoded as an Address with (from LS to MS bits):
+// - 2 bits identifying this as a HeapObject.
+// - 4 bits to encode the AllocationSpace (including special values for
+//   code and fixed arrays in LO space)
+// - 27 bits identifying a word in the space, in one of three formats:
+// - paged spaces: 16 bits of page number, 11 bits of word offset in page
+// - NEW space:    27 bits of word offset
+// - LO space:     27 bits of page number
 
 const int kSpaceShift = kHeapObjectTagSize;
-const int kSpaceBits = kSpaceTagSize;
-const int kSpaceMask = kSpaceTagMask;
-
-// These value are used instead of space numbers when serializing/
-// deserializing.  They indicate an object that is in large object space, but
-// should be treated specially.
-// Make the pages executable on platforms that support it:
-const int kLOSpaceExecutable = LAST_SPACE + 1;
-// Reserve space for write barrier bits (for objects that can contain
-// references to new space):
-const int kLOSpacePointer = LAST_SPACE + 2;
-
+const int kSpaceBits = 4;
+const int kSpaceMask = (1 << kSpaceBits) - 1;
 
 const int kOffsetShift = kSpaceShift + kSpaceBits;
 const int kOffsetBits = 11;
 const int kOffsetMask = (1 << kOffsetBits) - 1;
 
-const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
 const int kPageShift = kOffsetShift + kOffsetBits;
+const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
 const int kPageMask = (1 << kPageBits) - 1;
 
 const int kPageAndOffsetShift = kOffsetShift;
 const int kPageAndOffsetBits = kPageBits + kOffsetBits;
 const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
 
+// These values are special allocation space tags used for
+// serialization.
+// Mar the pages executable on platforms that support it.
+const int kLargeCode = LAST_SPACE + 1;
+// Allocate extra remembered-set bits.
+const int kLargeFixedArray = LAST_SPACE + 2;
+
 
 static inline AllocationSpace GetSpace(Address addr) {
   const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
   int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
-  if (space_number == kLOSpaceExecutable) space_number = LO_SPACE;
-  else if (space_number == kLOSpacePointer) space_number = LO_SPACE;
+  if (space_number > LAST_SPACE) space_number = LO_SPACE;
   return static_cast<AllocationSpace>(space_number);
 }
 
@@ -91,7 +88,7 @@
   const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
   const int space_number =
       (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
-  return (space_number == kLOSpaceExecutable);
+  return (space_number == kLargeCode);
 }
 
 
@@ -99,7 +96,7 @@
   const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
   const int space_number =
       (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
-  return (space_number == kLOSpacePointer);
+  return (space_number == kLargeFixedArray);
 }
 
 
@@ -147,6 +144,9 @@
                   int page_index,
                   int page_offset)
   : space_(space), page_index_(page_index), page_offset_(page_offset)  {
+    // Assert that the space encoding (plus the two pseudo-spaces for
+    // special large objects) fits in the available bits.
+    ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0);
     ASSERT(space <= LAST_SPACE && space >= 0);
   }
 
@@ -154,8 +154,7 @@
   Address Encode() const;
 
   AllocationSpace space() const {
-    if (space_ == kLOSpaceExecutable) return LO_SPACE;
-    if (space_ == kLOSpacePointer) return LO_SPACE;
+    if (space_ > LAST_SPACE) return LO_SPACE;
     return static_cast<AllocationSpace>(space_);
   }
   int page_index() const { return page_index_; }
@@ -165,7 +164,8 @@
     return space_ == CODE_SPACE ||
            space_ == OLD_POINTER_SPACE ||
            space_ == OLD_DATA_SPACE ||
-           space_ == MAP_SPACE;
+           space_ == MAP_SPACE ||
+           space_ == CELL_SPACE;
   }
 
   void next_address(int offset) { page_offset_ += offset; }
@@ -180,11 +180,11 @@
 
   void set_to_large_code_object() {
     ASSERT(space_ == LO_SPACE);
-    space_ = kLOSpaceExecutable;
+    space_ = kLargeCode;
   }
   void set_to_large_fixed_array() {
     ASSERT(space_ == LO_SPACE);
-    space_ = kLOSpacePointer;
+    space_ = kLargeFixedArray;
   }
 
 
@@ -201,6 +201,7 @@
   int result = 0;
   switch (space_) {
     case MAP_SPACE:
+    case CELL_SPACE:
     case OLD_POINTER_SPACE:
     case OLD_DATA_SPACE:
     case CODE_SPACE:
@@ -216,8 +217,8 @@
       result = word_offset << kPageAndOffsetShift;
       break;
     case LO_SPACE:
-    case kLOSpaceExecutable:
-    case kLOSpacePointer:
+    case kLargeCode:
+    case kLargeFixedArray:
       ASSERT_EQ(0, page_offset_);
       ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
       result = page_index_ << kPageAndOffsetShift;
@@ -235,6 +236,7 @@
   ASSERT(page_offset_ >= 0 && page_index_ >= 0);
   switch (space_) {
     case MAP_SPACE:
+    case CELL_SPACE:
     case OLD_POINTER_SPACE:
     case OLD_DATA_SPACE:
     case CODE_SPACE:
@@ -245,8 +247,8 @@
       ASSERT(page_index_ == 0);
       break;
     case LO_SPACE:
-    case kLOSpaceExecutable:
-    case kLOSpacePointer:
+    case kLargeCode:
+    case kLargeFixedArray:
       ASSERT(page_offset_ == 0);
       break;
   }
@@ -291,6 +293,7 @@
 void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
   switch (space) {
     case MAP_SPACE:
+    case CELL_SPACE:
     case OLD_POINTER_SPACE:
     case OLD_DATA_SPACE:
     case CODE_SPACE:
@@ -307,12 +310,15 @@
 void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
   switch (space) {
     case MAP_SPACE:
+    case CELL_SPACE:
     case OLD_POINTER_SPACE:
     case OLD_DATA_SPACE:
     case CODE_SPACE: {
       PagedSpace* ps;
       if (space == MAP_SPACE) {
         ps = Heap::map_space();
+      } else if (space == CELL_SPACE) {
+        ps = Heap::cell_space();
       } else if (space == OLD_POINTER_SPACE) {
         ps = Heap::old_pointer_space();
       } else if (space == OLD_DATA_SPACE) {
@@ -1121,6 +1127,8 @@
   writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
   writer_->PutC('|');
   writer_->PutInt(Heap::map_space()->Size());
+  writer_->PutC('|');
+  writer_->PutInt(Heap::cell_space()->Size());
   writer_->PutC(']');
   // Write global handles.
   writer_->PutC('G');
@@ -1303,6 +1311,7 @@
 Deserializer::Deserializer(const byte* str, int len)
   : reader_(str, len),
     map_pages_(kInitArraySize),
+    cell_pages_(kInitArraySize),
     old_pointer_pages_(kInitArraySize),
     old_data_pages_(kInitArraySize),
     code_pages_(kInitArraySize),
@@ -1475,6 +1484,8 @@
   InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
   reader_.ExpectC('|');
   InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
+  reader_.ExpectC('|');
+  InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_);
   reader_.ExpectC(']');
   // Create placeholders for global handles later to be fill during
   // IterateRoots.
@@ -1607,6 +1618,9 @@
     case MAP_SPACE:
       return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
                           Heap::map_space(), &map_pages_);
+    case CELL_SPACE:
+      return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
+                          Heap::cell_space(), &cell_pages_);
     case OLD_POINTER_SPACE:
       return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
                           Heap::old_pointer_space(), &old_pointer_pages_);
diff --git a/src/serialize.h b/src/serialize.h
index 7f4eb63..1b24065 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -320,10 +320,11 @@
   bool has_log_;  // The file has log information.
 
   // Resolve caches the following:
-  List<Page*> map_pages_;          // All pages in the map space.
+  List<Page*> map_pages_;  // All pages in the map space.
+  List<Page*> cell_pages_;  // All pages in the cell space.
   List<Page*> old_pointer_pages_;  // All pages in the old pointer space.
-  List<Page*> old_data_pages_;     // All pages in the old data space.
-  List<Page*> code_pages_;
+  List<Page*> old_data_pages_;  // All pages in the old data space.
+  List<Page*> code_pages_;  // All pages in the code space.
   List<Object*> large_objects_;    // All known large objects.
   // A list of global handles at deserialization time.
   List<Object**> global_handles_;
diff --git a/src/spaces.cc b/src/spaces.cc
index 3f3a635..2393281 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -37,8 +37,8 @@
 // For contiguous spaces, top should be in the space (or at the end) and limit
 // should be the end of the space.
 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
-  ASSERT((space).low() <= (info).top                 \
-         && (info).top <= (space).high()             \
+  ASSERT((space).low() <= (info).top                  \
+         && (info).top <= (space).high()              \
          && (info).limit == (space).high())
 
 
@@ -786,6 +786,77 @@
 #endif
 
 
+#ifdef DEBUG
+// We do not assume that the PageIterator works, because it depends on the
+// invariants we are checking during verification.
+void PagedSpace::Verify(ObjectVisitor* visitor) {
+  // The allocation pointer should be valid, and it should be in a page in the
+  // space.
+  ASSERT(allocation_info_.VerifyPagedAllocation());
+  Page* top_page = Page::FromAllocationTop(allocation_info_.top);
+  ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
+
+  // Loop over all the pages.
+  bool above_allocation_top = false;
+  Page* current_page = first_page_;
+  while (current_page->is_valid()) {
+    if (above_allocation_top) {
+      // We don't care what's above the allocation top.
+    } else {
+      // Unless this is the last page in the space containing allocated
+      // objects, the allocation top should be at a constant offset from the
+      // object area end.
+      Address top = current_page->AllocationTop();
+      if (current_page == top_page) {
+        ASSERT(top == allocation_info_.top);
+        // The next page will be above the allocation top.
+        above_allocation_top = true;
+      } else {
+        ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
+      }
+
+      // It should be packed with objects from the bottom to the top.
+      Address current = current_page->ObjectAreaStart();
+      while (current < top) {
+        HeapObject* object = HeapObject::FromAddress(current);
+
+        // The first word should be a map, and we expect all map pointers to
+        // be in map space.
+        Map* map = object->map();
+        ASSERT(map->IsMap());
+        ASSERT(Heap::map_space()->Contains(map));
+
+        // Perform space-specific object verification.
+        VerifyObject(object);
+
+        // The object itself should look OK.
+        object->Verify();
+
+        // All the interior pointers should be contained in the heap and
+        // have their remembered set bits set if required as determined
+        // by the visitor.
+        int size = object->Size();
+        if (object->IsCode()) {
+          Code::cast(object)->ConvertICTargetsFromAddressToObject();
+          object->IterateBody(map->instance_type(), size, visitor);
+          Code::cast(object)->ConvertICTargetsFromObjectToAddress();
+        } else {
+          object->IterateBody(map->instance_type(), size, visitor);
+        }
+
+        current += size;
+      }
+
+      // The allocation pointer should not be in the middle of an object.
+      ASSERT(current == top);
+    }
+
+    current_page = current_page->next_page();
+  }
+}
+#endif
+
+
 // -----------------------------------------------------------------------------
 // NewSpace implementation
 
@@ -1265,13 +1336,13 @@
   // If the block is too small (eg, one or two words), to hold both a size
   // field and a next pointer, we give it a filler map that gives it the
   // correct size.
-  if (size_in_bytes > ByteArray::kHeaderSize) {
+  if (size_in_bytes > ByteArray::kAlignedSize) {
     set_map(Heap::raw_unchecked_byte_array_map());
     ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
   } else if (size_in_bytes == kPointerSize) {
-    set_map(Heap::raw_unchecked_one_word_filler_map());
+    set_map(Heap::raw_unchecked_one_pointer_filler_map());
   } else if (size_in_bytes == 2 * kPointerSize) {
-    set_map(Heap::raw_unchecked_two_word_filler_map());
+    set_map(Heap::raw_unchecked_two_pointer_filler_map());
   } else {
     UNREACHABLE();
   }
@@ -1280,16 +1351,26 @@
 
 
 Address FreeListNode::next() {
-  ASSERT(map() == Heap::raw_unchecked_byte_array_map());
-  ASSERT(Size() >= kNextOffset + kPointerSize);
-  return Memory::Address_at(address() + kNextOffset);
+  ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
+         map() == Heap::raw_unchecked_two_pointer_filler_map());
+  if (map() == Heap::raw_unchecked_byte_array_map()) {
+    ASSERT(Size() >= kNextOffset + kPointerSize);
+    return Memory::Address_at(address() + kNextOffset);
+  } else {
+    return Memory::Address_at(address() + kPointerSize);
+  }
 }
 
 
 void FreeListNode::set_next(Address next) {
-  ASSERT(map() == Heap::raw_unchecked_byte_array_map());
-  ASSERT(Size() >= kNextOffset + kPointerSize);
-  Memory::Address_at(address() + kNextOffset) = next;
+  ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
+         map() == Heap::raw_unchecked_two_pointer_filler_map());
+  if (map() == Heap::raw_unchecked_byte_array_map()) {
+    ASSERT(Size() >= kNextOffset + kPointerSize);
+    Memory::Address_at(address() + kNextOffset) = next;
+  } else {
+    Memory::Address_at(address() + kPointerSize) = next;
+  }
 }
 
 
@@ -1445,42 +1526,42 @@
 #endif
 
 
-MapSpaceFreeList::MapSpaceFreeList(AllocationSpace owner) {
-  owner_ = owner;
+FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
+    : owner_(owner), object_size_(object_size) {
   Reset();
 }
 
 
-void MapSpaceFreeList::Reset() {
+void FixedSizeFreeList::Reset() {
   available_ = 0;
   head_ = NULL;
 }
 
 
-void MapSpaceFreeList::Free(Address start) {
+void FixedSizeFreeList::Free(Address start) {
 #ifdef DEBUG
-  for (int i = 0; i < Map::kSize; i += kPointerSize) {
+  for (int i = 0; i < object_size_; i += kPointerSize) {
     Memory::Address_at(start + i) = kZapValue;
   }
 #endif
   ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
   FreeListNode* node = FreeListNode::FromAddress(start);
-  node->set_size(Map::kSize);
+  node->set_size(object_size_);
   node->set_next(head_);
   head_ = node->address();
-  available_ += Map::kSize;
+  available_ += object_size_;
 }
 
 
-Object* MapSpaceFreeList::Allocate() {
+Object* FixedSizeFreeList::Allocate() {
   if (head_ == NULL) {
-    return Failure::RetryAfterGC(Map::kSize, owner_);
+    return Failure::RetryAfterGC(object_size_, owner_);
   }
 
   ASSERT(!FLAG_always_compact);  // We only use the freelists with mark-sweep.
   FreeListNode* node = FreeListNode::FromAddress(head_);
   head_ = node->next();
-  available_ -= Map::kSize;
+  available_ -= object_size_;
   return node;
 }
 
@@ -1494,7 +1575,6 @@
     // the space is considered 'available' and we will rediscover live data
     // and waste during the collection.
     MCResetRelocationInfo();
-    mc_end_of_relocation_ = bottom();
     ASSERT(Available() == Capacity());
   } else {
     // During a non-compacting collection, everything below the linear
@@ -1510,24 +1590,6 @@
 }
 
 
-void OldSpace::MCAdjustRelocationEnd(Address address, int size_in_bytes) {
-  ASSERT(Contains(address));
-  Address current_top = mc_end_of_relocation_;
-  Page* current_page = Page::FromAllocationTop(current_top);
-
-  // No more objects relocated to this page?  Move to the next.
-  ASSERT(current_top <= current_page->mc_relocation_top);
-  if (current_top == current_page->mc_relocation_top) {
-    // The space should already be properly expanded.
-    Page* next_page = current_page->next_page();
-    CHECK(next_page->is_valid());
-    mc_end_of_relocation_ = next_page->ObjectAreaStart();
-  }
-  ASSERT(mc_end_of_relocation_ == address);
-  mc_end_of_relocation_ += size_in_bytes;
-}
-
-
 void OldSpace::MCCommitRelocationInfo() {
   // Update fast allocation info.
   allocation_info_.top = mc_forwarding_info_.top;
@@ -1624,76 +1686,6 @@
 
 
 #ifdef DEBUG
-// We do not assume that the PageIterator works, because it depends on the
-// invariants we are checking during verification.
-void OldSpace::Verify() {
-  // The allocation pointer should be valid, and it should be in a page in the
-  // space.
-  ASSERT(allocation_info_.VerifyPagedAllocation());
-  Page* top_page = Page::FromAllocationTop(allocation_info_.top);
-  ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
-
-  // Loop over all the pages.
-  bool above_allocation_top = false;
-  Page* current_page = first_page_;
-  while (current_page->is_valid()) {
-    if (above_allocation_top) {
-      // We don't care what's above the allocation top.
-    } else {
-      // Unless this is the last page in the space containing allocated
-      // objects, the allocation top should be at the object area end.
-      Address top = current_page->AllocationTop();
-      if (current_page == top_page) {
-        ASSERT(top == allocation_info_.top);
-        // The next page will be above the allocation top.
-        above_allocation_top = true;
-      } else {
-        ASSERT(top == current_page->ObjectAreaEnd());
-      }
-
-      // It should be packed with objects from the bottom to the top.
-      Address current = current_page->ObjectAreaStart();
-      while (current < top) {
-        HeapObject* object = HeapObject::FromAddress(current);
-
-        // The first word should be a map, and we expect all map pointers to
-        // be in map space.
-        Map* map = object->map();
-        ASSERT(map->IsMap());
-        ASSERT(Heap::map_space()->Contains(map));
-
-        // The object should not be a map.
-        ASSERT(!object->IsMap());
-
-        // The object itself should look OK.
-        object->Verify();
-
-        // All the interior pointers should be contained in the heap and have
-        // their remembered set bits set if they point to new space.  Code
-        // objects do not have remembered set bits that we care about.
-        VerifyPointersAndRSetVisitor rset_visitor;
-        VerifyPointersVisitor no_rset_visitor;
-        int size = object->Size();
-        if (object->IsCode()) {
-          Code::cast(object)->ConvertICTargetsFromAddressToObject();
-          object->IterateBody(map->instance_type(), size, &no_rset_visitor);
-          Code::cast(object)->ConvertICTargetsFromObjectToAddress();
-        } else {
-          object->IterateBody(map->instance_type(), size, &rset_visitor);
-        }
-
-        current += size;
-      }
-
-      // The allocation pointer should not be in the middle of an object.
-      ASSERT(current == top);
-    }
-
-    current_page = current_page->next_page();
-  }
-}
-
-
 struct CommentStatistic {
   const char* comment;
   int size;
@@ -1987,25 +1979,13 @@
 #endif
 
 // -----------------------------------------------------------------------------
-// MapSpace implementation
+// FixedSpace implementation
 
-void MapSpace::PrepareForMarkCompact(bool will_compact) {
+void FixedSpace::PrepareForMarkCompact(bool will_compact) {
   if (will_compact) {
     // Reset relocation info.
     MCResetRelocationInfo();
 
-    // Initialize map index entry.
-    int page_count = 0;
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    while (it.has_next()) {
-      ASSERT_MAP_PAGE_INDEX(page_count);
-
-      Page* p = it.next();
-      ASSERT(p->mc_page_index == page_count);
-
-      page_addresses_[page_count++] = p->address();
-    }
-
     // During a compacting collection, everything in the space is considered
     // 'available' (set by the call to MCResetRelocationInfo) and we will
     // rediscover live and wasted bytes during the collection.
@@ -2023,7 +2003,7 @@
 }
 
 
-void MapSpace::MCCommitRelocationInfo() {
+void FixedSpace::MCCommitRelocationInfo() {
   // Update fast allocation info.
   allocation_info_.top = mc_forwarding_info_.top;
   allocation_info_.limit = mc_forwarding_info_.limit;
@@ -2053,7 +2033,8 @@
 // Slow case for normal allocation. Try in order: (1) allocate in the next
 // page in the space, (2) allocate off the space's free list, (3) expand the
 // space, (4) fail.
-HeapObject* MapSpace::SlowAllocateRaw(int size_in_bytes) {
+HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
+  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
   // Linear allocation in this space has failed.  If there is another page
   // in the space, move to that page and allocate there.  This allocation
   // should succeed.
@@ -2062,10 +2043,10 @@
     return AllocateInNextPage(current_page, size_in_bytes);
   }
 
-  // There is no next page in this space.  Try free list allocation.  The
-  // map space free list implicitly assumes that all free blocks are map
-  // sized.
-  if (size_in_bytes == Map::kSize) {
+  // There is no next page in this space.  Try free list allocation.
+  // The fixed space free list implicitly assumes that all free blocks
+  // are of the fixed size.
+  if (size_in_bytes == object_size_in_bytes_) {
     Object* result = free_list_.Allocate();
     if (!result->IsFailure()) {
       accounting_stats_.AllocateBytes(size_in_bytes);
@@ -2094,81 +2075,19 @@
 // Move to the next page (there is assumed to be one) and allocate there.
 // The top of page block is always wasted, because it is too small to hold a
 // map.
-HeapObject* MapSpace::AllocateInNextPage(Page* current_page,
-                                         int size_in_bytes) {
+HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
+                                           int size_in_bytes) {
   ASSERT(current_page->next_page()->is_valid());
-  ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == kPageExtra);
-  accounting_stats_.WasteBytes(kPageExtra);
+  ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
+  ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
+  accounting_stats_.WasteBytes(page_extra_);
   SetAllocationInfo(&allocation_info_, current_page->next_page());
   return AllocateLinearly(&allocation_info_, size_in_bytes);
 }
 
 
 #ifdef DEBUG
-// We do not assume that the PageIterator works, because it depends on the
-// invariants we are checking during verification.
-void MapSpace::Verify() {
-  // The allocation pointer should be valid, and it should be in a page in the
-  // space.
-  ASSERT(allocation_info_.VerifyPagedAllocation());
-  Page* top_page = Page::FromAllocationTop(allocation_info_.top);
-  ASSERT(MemoryAllocator::IsPageInSpace(top_page, this));
-
-  // Loop over all the pages.
-  bool above_allocation_top = false;
-  Page* current_page = first_page_;
-  while (current_page->is_valid()) {
-    if (above_allocation_top) {
-      // We don't care what's above the allocation top.
-    } else {
-      // Unless this is the last page in the space containing allocated
-      // objects, the allocation top should be at a constant offset from the
-      // object area end.
-      Address top = current_page->AllocationTop();
-      if (current_page == top_page) {
-        ASSERT(top == allocation_info_.top);
-        // The next page will be above the allocation top.
-        above_allocation_top = true;
-      } else {
-        ASSERT(top == current_page->ObjectAreaEnd() - kPageExtra);
-      }
-
-      // It should be packed with objects from the bottom to the top.
-      Address current = current_page->ObjectAreaStart();
-      while (current < top) {
-        HeapObject* object = HeapObject::FromAddress(current);
-
-        // The first word should be a map, and we expect all map pointers to
-        // be in map space.
-        Map* map = object->map();
-        ASSERT(map->IsMap());
-        ASSERT(Heap::map_space()->Contains(map));
-
-        // The object should be a map or a byte array.
-        ASSERT(object->IsMap() || object->IsByteArray());
-
-        // The object itself should look OK.
-        object->Verify();
-
-        // All the interior pointers should be contained in the heap and
-        // have their remembered set bits set if they point to new space.
-        VerifyPointersAndRSetVisitor visitor;
-        int size = object->Size();
-        object->IterateBody(map->instance_type(), size, &visitor);
-
-        current += size;
-      }
-
-      // The allocation pointer should not be in the middle of an object.
-      ASSERT(current == top);
-    }
-
-    current_page = current_page->next_page();
-  }
-}
-
-
-void MapSpace::ReportStatistics() {
+void FixedSpace::ReportStatistics() {
   int pct = Available() * 100 / Capacity();
   PrintF("  capacity: %d, waste: %d, available: %d, %%%d\n",
          Capacity(), Waste(), Available(), pct);
@@ -2215,7 +2134,50 @@
 }
 
 
-void MapSpace::PrintRSet() { DoPrintRSet("map"); }
+void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
+#endif
+
+
+// -----------------------------------------------------------------------------
+// MapSpace implementation
+
+void MapSpace::PrepareForMarkCompact(bool will_compact) {
+  // Call prepare of the super class.
+  FixedSpace::PrepareForMarkCompact(will_compact);
+
+  if (will_compact) {
+    // Initialize map index entry.
+    int page_count = 0;
+    PageIterator it(this, PageIterator::ALL_PAGES);
+    while (it.has_next()) {
+      ASSERT_MAP_PAGE_INDEX(page_count);
+
+      Page* p = it.next();
+      ASSERT(p->mc_page_index == page_count);
+
+      page_addresses_[page_count++] = p->address();
+    }
+  }
+}
+
+
+#ifdef DEBUG
+void MapSpace::VerifyObject(HeapObject* object) {
+  // The object should be a map or a free-list node.
+  ASSERT(object->IsMap() || object->IsByteArray());
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// GlobalPropertyCellSpace implementation
+
+#ifdef DEBUG
+void CellSpace::VerifyObject(HeapObject* object) {
+  // The object should be a global object property cell or a free-list node.
+  ASSERT(object->IsJSGlobalPropertyCell() ||
+         object->map() == Heap::two_pointer_filler_map());
+}
 #endif
 
 
diff --git a/src/spaces.h b/src/spaces.h
index 676652b..ccd1d27 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -302,7 +302,6 @@
   virtual int Size() = 0;
 
 #ifdef DEBUG
-  virtual void Verify() = 0;
   virtual void Print() = 0;
 #endif
 
@@ -836,6 +835,13 @@
   // Print meta info and objects in this space.
   virtual void Print();
 
+  // Verify integrity of this space.
+  virtual void Verify(ObjectVisitor* visitor);
+
+  // Overridden by subclasses to verify space-specific object
+  // properties (e.g., only maps or free-list nodes are in map space).
+  virtual void VerifyObject(HeapObject* obj) {}
+
   // Report code object related statistics
   void CollectCodeStatistics();
   static void ReportCodeStatistics();
@@ -862,6 +868,12 @@
   // Relocation information during mark-compact collections.
   AllocationInfo mc_forwarding_info_;
 
+  // Bytes of each page that cannot be allocated.  Possibly non-zero
+  // for pages in spaces with only fixed-size objects.  Always zero
+  // for pages in spaces with variable sized objects (those pages are
+  // padded with free-list nodes).
+  int page_extra_;
+
   // Sets allocation pointer to a page bottom.
   static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
 
@@ -1315,8 +1327,7 @@
  private:
   // The size range of blocks, in bytes. (Smaller allocations are allowed, but
   // will always result in waste.)
-  static const int kMinBlockSize =
-      POINTER_SIZE_ALIGN(ByteArray::kHeaderSize) + kPointerSize;
+  static const int kMinBlockSize = 2 * kPointerSize;
   static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
 
   // The identity of the owning space, for building allocation Failure
@@ -1391,9 +1402,9 @@
 
 
 // The free list for the map space.
-class MapSpaceFreeList BASE_EMBEDDED {
+class FixedSizeFreeList BASE_EMBEDDED {
  public:
-  explicit MapSpaceFreeList(AllocationSpace owner);
+  FixedSizeFreeList(AllocationSpace owner, int object_size);
 
   // Clear the free list.
   void Reset();
@@ -1402,12 +1413,12 @@
   int available() { return available_; }
 
   // Place a node on the free list.  The block starting at 'start' (assumed to
-  // have size Map::kSize) is placed on the free list.  Bookkeeping
+  // have size object_size_) is placed on the free list.  Bookkeeping
   // information will be written to the block, ie, its contents will be
   // destroyed.  The start address should be word aligned.
   void Free(Address start);
 
-  // Allocate a map-sized block from the free list.  The block is unitialized.
+  // Allocate a fixed sized block from the free list.  The block is unitialized.
   // A failure is returned if no block is available.
   Object* Allocate();
 
@@ -1422,7 +1433,10 @@
   // objects.
   AllocationSpace owner_;
 
-  DISALLOW_COPY_AND_ASSIGN(MapSpaceFreeList);
+  // The size of the objects in this space.
+  int object_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
 };
 
 
@@ -1437,6 +1451,7 @@
                     AllocationSpace id,
                     Executability executable)
       : PagedSpace(max_capacity, id, executable), free_list_(id) {
+    page_extra_ = 0;
   }
 
   // The bytes available on the free list (ie, not above the linear allocation
@@ -1460,20 +1475,11 @@
   // clears the free list.
   virtual void PrepareForMarkCompact(bool will_compact);
 
-  // Adjust the top of relocation pointer to point to the end of the object
-  // given by 'address' and 'size_in_bytes'.  Move it to the next page if
-  // necessary, ensure that it points to the address, then increment it by the
-  // size.
-  void MCAdjustRelocationEnd(Address address, int size_in_bytes);
-
   // Updates the allocation pointer to the relocation top after a mark-compact
   // collection.
   virtual void MCCommitRelocationInfo();
 
 #ifdef DEBUG
-  // Verify integrity of this space.
-  virtual void Verify();
-
   // Reports statistics for the space
   void ReportStatistics();
   // Dump the remembered sets in the space to stdout.
@@ -1492,39 +1498,41 @@
   // The space's free list.
   OldSpaceFreeList free_list_;
 
-  // During relocation, we keep a pointer to the most recently relocated
-  // object in order to know when to move to the next page.
-  Address mc_end_of_relocation_;
-
  public:
   TRACK_MEMORY("OldSpace")
 };
 
 
 // -----------------------------------------------------------------------------
-// Old space for all map objects
+// Old space for objects of a fixed size
 
-class MapSpace : public PagedSpace {
+class FixedSpace : public PagedSpace {
  public:
-  // Creates a map space object with a maximum capacity.
-  explicit MapSpace(int max_capacity, AllocationSpace id)
-      : PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { }
+  FixedSpace(int max_capacity,
+             AllocationSpace id,
+             int object_size_in_bytes,
+             const char* name)
+      : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
+        object_size_in_bytes_(object_size_in_bytes),
+        name_(name),
+        free_list_(id, object_size_in_bytes) {
+    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
+  }
 
   // The top of allocation in a page in this space. Undefined if page is unused.
   virtual Address PageAllocationTop(Page* page) {
     return page == TopPageOf(allocation_info_) ? top()
-        : page->ObjectAreaEnd() - kPageExtra;
+        : page->ObjectAreaEnd() - page_extra_;
   }
 
-  // Give a map-sized block of memory to the space's free list.
+  int object_size_in_bytes() { return object_size_in_bytes_; }
+
+  // Give a fixed sized block of memory to the space's free list.
   void Free(Address start) {
     free_list_.Free(start);
     accounting_stats_.DeallocateBytes(Map::kSize);
   }
 
-  // Given an index, returns the page address.
-  Address PageAddress(int page_index) { return page_addresses_[page_index]; }
-
   // Prepares for a mark-compact GC.
   virtual void PrepareForMarkCompact(bool will_compact);
 
@@ -1533,21 +1541,13 @@
   virtual void MCCommitRelocationInfo();
 
 #ifdef DEBUG
-  // Verify integrity of this space.
-  virtual void Verify();
-
   // Reports statistic info of the space
   void ReportStatistics();
+
   // Dump the remembered sets in the space to stdout.
   void PrintRSet();
 #endif
 
-  // Constants.
-  static const int kMapPageIndexBits = 10;
-  static const int kMaxMapPageIndex = (1 << kMapPageIndexBits) - 1;
-
-  static const int kPageExtra = Page::kObjectAreaSize % Map::kSize;
-
  protected:
   // Virtual function in the superclass.  Slow path of AllocateRaw.
   HeapObject* SlowAllocateRaw(int size_in_bytes);
@@ -1557,9 +1557,41 @@
   HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
 
  private:
-  // The space's free list.
-  MapSpaceFreeList free_list_;
+  // The size of objects in this space.
+  int object_size_in_bytes_;
 
+  // The name of this space.
+  const char* name_;
+
+  // The space's free list.
+  FixedSizeFreeList free_list_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Old space for all map objects
+
+class MapSpace : public FixedSpace {
+ public:
+  // Creates a map space object with a maximum capacity.
+  MapSpace(int max_capacity, AllocationSpace id)
+      : FixedSpace(max_capacity, id, Map::kSize, "map") {}
+
+  // Prepares for a mark-compact GC.
+  virtual void PrepareForMarkCompact(bool will_compact);
+
+  // Given an index, returns the page address.
+  Address PageAddress(int page_index) { return page_addresses_[page_index]; }
+
+  // Constants.
+  static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
+
+ protected:
+#ifdef DEBUG
+  virtual void VerifyObject(HeapObject* obj);
+#endif
+
+ private:
   // An array of page start address in a map space.
   Address page_addresses_[kMaxMapPageIndex + 1];
 
@@ -1569,6 +1601,25 @@
 
 
 // -----------------------------------------------------------------------------
+// Old space for all global object property cell objects
+
+class CellSpace : public FixedSpace {
+ public:
+  // Creates a property cell space object with a maximum capacity.
+  CellSpace(int max_capacity, AllocationSpace id)
+      : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
+
+ protected:
+#ifdef DEBUG
+  virtual void VerifyObject(HeapObject* obj);
+#endif
+
+ public:
+  TRACK_MEMORY("MapSpace")
+};
+
+
+// -----------------------------------------------------------------------------
 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
 // the large object space. A large object is allocated from OS heap with
 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 44ba297..9a137e3 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -343,10 +343,11 @@
     Add("<Invalid map>\n");
     return;
   }
-  for (DescriptorReader r(map->instance_descriptors()); !r.eos(); r.advance()) {
-    switch (r.type()) {
+  DescriptorArray* descs = map->instance_descriptors();
+  for (int i = 0; i < descs->number_of_descriptors(); i++) {
+    switch (descs->GetType(i)) {
       case FIELD: {
-        Object* key = r.GetKey();
+        Object* key = descs->GetKey(i);
         if (key->IsString() || key->IsNumber()) {
           int len = 3;
           if (key->IsString()) {
@@ -360,7 +361,7 @@
             key->ShortPrint();
           }
           Add(": ");
-          Object* value = js_object->FastPropertyAt(r.GetFieldIndex());
+          Object* value = js_object->FastPropertyAt(descs->GetFieldIndex(i));
           Add("%o\n", value);
         }
       }
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 8b3822a..7eb8cd3 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -173,14 +173,19 @@
 
 
 Object* StubCache::ComputeLoadGlobal(String* name,
-                                     GlobalObject* receiver,
+                                     JSObject* receiver,
+                                     GlobalObject* holder,
                                      JSGlobalPropertyCell* cell,
                                      bool is_dont_delete) {
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, NORMAL);
   Object* code = receiver->map()->FindInCodeCache(name, flags);
   if (code->IsUndefined()) {
     LoadStubCompiler compiler;
-    code = compiler.CompileLoadGlobal(receiver, cell, name, is_dont_delete);
+    code = compiler.CompileLoadGlobal(receiver,
+                                      holder,
+                                      cell,
+                                      name,
+                                      is_dont_delete);
     if (code->IsFailure()) return code;
     LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
@@ -445,7 +450,7 @@
     if (!function->is_compiled()) return Failure::InternalError();
     // Compile the stub - only create stubs for fully compiled functions.
     CallStubCompiler compiler(argc, in_loop);
-    code = compiler.CompileCallConstant(object, holder, function, check);
+    code = compiler.CompileCallConstant(object, holder, function, name, check);
     if (code->IsFailure()) return code;
     ASSERT_EQ(flags, Code::cast(code)->flags());
     LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
@@ -537,7 +542,8 @@
 Object* StubCache::ComputeCallGlobal(int argc,
                                      InLoopFlag in_loop,
                                      String* name,
-                                     GlobalObject* receiver,
+                                     JSObject* receiver,
+                                     GlobalObject* holder,
                                      JSGlobalPropertyCell* cell,
                                      JSFunction* function) {
   Code::Flags flags =
@@ -550,7 +556,7 @@
     // caches.
     if (!function->is_compiled()) return Failure::InternalError();
     CallStubCompiler compiler(argc, in_loop);
-    code = compiler.CompileCallGlobal(receiver, cell, function, name);
+    code = compiler.CompileCallGlobal(receiver, holder, cell, function, name);
     if (code->IsFailure()) return code;
     ASSERT_EQ(flags, Code::cast(code)->flags());
     LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
@@ -951,6 +957,10 @@
 
 
 Object* StubCompiler::GetCodeWithFlags(Code::Flags flags, const char* name) {
+  // Check for allocation failures during stub compilation.
+  if (failure_->IsFailure()) return failure_;
+
+  // Create code object in the heap.
   CodeDesc desc;
   masm_.GetCode(&desc);
   Object* result = Heap::CreateCode(desc, NULL, flags, masm_.CodeObject());
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 9abf370..8bee370 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -79,7 +79,8 @@
 
 
   static Object* ComputeLoadGlobal(String* name,
-                                   GlobalObject* receiver,
+                                   JSObject* receiver,
+                                   GlobalObject* holder,
                                    JSGlobalPropertyCell* cell,
                                    bool is_dont_delete);
 
@@ -164,7 +165,8 @@
   static Object* ComputeCallGlobal(int argc,
                                    InLoopFlag in_loop,
                                    String* name,
-                                   GlobalObject* receiver,
+                                   JSObject* receiver,
+                                   GlobalObject* holder,
                                    JSGlobalPropertyCell* cell,
                                    JSFunction* function);
 
@@ -322,7 +324,7 @@
     JSARRAY_HAS_FAST_ELEMENTS_CHECK
   };
 
-  StubCompiler() : scope_(), masm_(NULL, 256) { }
+  StubCompiler() : scope_(), masm_(NULL, 256), failure_(NULL) { }
 
   Object* CompileCallInitialize(Code::Flags flags);
   Object* CompileCallPreMonomorphic(Code::Flags flags);
@@ -342,40 +344,7 @@
   static void GenerateFastPropertyLoad(MacroAssembler* masm,
                                        Register dst, Register src,
                                        JSObject* holder, int index);
-  static void GenerateLoadField(MacroAssembler* masm,
-                                JSObject* object,
-                                JSObject* holder,
-                                Register receiver,
-                                Register scratch1,
-                                Register scratch2,
-                                int index,
-                                Label* miss_label);
-  static void GenerateLoadCallback(MacroAssembler* masm,
-                                   JSObject* object,
-                                   JSObject* holder,
-                                   Register receiver,
-                                   Register name,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   AccessorInfo* callback,
-                                   Label* miss_label);
-  static void GenerateLoadConstant(MacroAssembler* masm,
-                                   JSObject* object,
-                                   JSObject* holder,
-                                   Register receiver,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Object* value,
-                                   Label* miss_label);
-  static void GenerateLoadInterceptor(MacroAssembler* masm,
-                                      JSObject* object,
-                                      JSObject* holder,
-                                      Smi* lookup_hint,
-                                      Register receiver,
-                                      Register name,
-                                      Register scratch1,
-                                      Register scratch2,
-                                      Label* miss_label);
+
   static void GenerateLoadArrayLength(MacroAssembler* masm,
                                       Register receiver,
                                       Register scratch,
@@ -410,10 +379,60 @@
   Object* GetCodeWithFlags(Code::Flags flags, String* name);
 
   MacroAssembler* masm() { return &masm_; }
+  void set_failure(Failure* failure) { failure_ = failure; }
+
+  // Check the integrity of the prototype chain to make sure that the
+  // current IC is still valid.
+  Register CheckPrototypes(JSObject* object,
+                           Register object_reg,
+                           JSObject* holder,
+                           Register holder_reg,
+                           Register scratch,
+                           String* name,
+                           Label* miss);
+
+  void GenerateLoadField(JSObject* object,
+                         JSObject* holder,
+                         Register receiver,
+                         Register scratch1,
+                         Register scratch2,
+                         int index,
+                         String* name,
+                         Label* miss);
+
+  void GenerateLoadCallback(JSObject* object,
+                            JSObject* holder,
+                            Register receiver,
+                            Register name_reg,
+                            Register scratch1,
+                            Register scratch2,
+                            AccessorInfo* callback,
+                            String* name,
+                            Label* miss);
+
+  void GenerateLoadConstant(JSObject* object,
+                            JSObject* holder,
+                            Register receiver,
+                            Register scratch1,
+                            Register scratch2,
+                            Object* value,
+                            String* name,
+                            Label* miss);
+
+  void GenerateLoadInterceptor(JSObject* object,
+                               JSObject* holder,
+                               Smi* lookup_hint,
+                               Register receiver,
+                               Register name_reg,
+                               Register scratch1,
+                               Register scratch2,
+                               String* name,
+                               Label* miss);
 
  private:
   HandleScope scope_;
   MacroAssembler masm_;
+  Failure* failure_;
 };
 
 
@@ -435,8 +454,9 @@
                                  JSObject* holder,
                                  String* name);
 
-  Object* CompileLoadGlobal(GlobalObject* object,
-                            JSGlobalPropertyCell* holder,
+  Object* CompileLoadGlobal(JSObject* object,
+                            GlobalObject* holder,
+                            JSGlobalPropertyCell* cell,
                             String* name,
                             bool is_dont_delete);
 
@@ -515,11 +535,13 @@
   Object* CompileCallConstant(Object* object,
                               JSObject* holder,
                               JSFunction* function,
+                              String* name,
                               CheckType check);
   Object* CompileCallInterceptor(Object* object,
                                  JSObject* holder,
                                  String* name);
-  Object* CompileCallGlobal(GlobalObject* object,
+  Object* CompileCallGlobal(JSObject* object,
+                            GlobalObject* holder,
                             JSGlobalPropertyCell* cell,
                             JSFunction* function,
                             String* name);
diff --git a/src/unicode.cc b/src/unicode.cc
index 4a9e070..ef13593 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -194,18 +194,13 @@
 uchar Utf8::CalculateValue(const byte* str,
                            unsigned length,
                            unsigned* cursor) {
-  static const uchar kMaxOneByteChar = 0x7F;
-  static const uchar kMaxTwoByteChar = 0x7FF;
-  static const uchar kMaxThreeByteChar = 0xFFFF;
-  static const uchar kMaxFourByteChar = 0x1FFFFF;
-
   // We only get called for non-ascii characters.
   if (length == 1) {
     *cursor += 1;
     return kBadChar;
   }
-  int first = str[0];
-  int second = str[1] ^ 0x80;
+  byte first = str[0];
+  byte second = str[1] ^ 0x80;
   if (second & 0xC0) {
     *cursor += 1;
     return kBadChar;
@@ -227,7 +222,7 @@
     *cursor += 1;
     return kBadChar;
   }
-  int third = str[2] ^ 0x80;
+  byte third = str[2] ^ 0x80;
   if (third & 0xC0) {
     *cursor += 1;
     return kBadChar;
@@ -245,7 +240,7 @@
     *cursor += 1;
     return kBadChar;
   }
-  int fourth = str[3] ^ 0x80;
+  byte fourth = str[3] ^ 0x80;
   if (fourth & 0xC0) {
     *cursor += 1;
     return kBadChar;
diff --git a/src/version.cc b/src/version.cc
index 87d99d8..f1833a5 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      13
-#define PATCH_LEVEL       2 
+#define BUILD_NUMBER      14
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 167334f..c4ee454 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -73,45 +73,8 @@
 XMMRegister xmm15 = { 15 };
 
 
-Operand::Operand(Register base, int32_t disp): rex_(0) {
-  len_ = 1;
-  if (base.is(rsp) || base.is(r12)) {
-    // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
-    set_sib(times_1, rsp, base);
-  }
-
-  if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
-    set_modrm(0, base);
-  } else if (is_int8(disp)) {
-    set_modrm(1, base);
-    set_disp8(disp);
-  } else {
-    set_modrm(2, base);
-    set_disp32(disp);
-  }
-}
-
-
-Operand::Operand(Register base,
-                 Register index,
-                 ScaleFactor scale,
-                 int32_t disp): rex_(0) {
-  ASSERT(!index.is(rsp));
-  len_ = 1;
-  set_sib(scale, index, base);
-  if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
-    // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
-    // possibly set by set_sib.
-    set_modrm(0, rsp);
-  } else if (is_int8(disp)) {
-    set_modrm(1, rsp);
-    set_disp8(disp);
-  } else {
-    set_modrm(2, rsp);
-    set_disp32(disp);
-  }
-}
-
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatures
 
 // The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
 //   fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
@@ -193,6 +156,71 @@
   ASSERT(IsSupported(CMOV));
 }
 
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+  // Call instruction takes up 13 bytes and int3 takes up one byte.
+  Address patch_site = pc_;
+  Memory::uint16_at(patch_site) = 0xBA49u;  // movq r10, imm64
+  // Write "0x00, call r10" starting at last byte of address.  We overwrite
+  // the 0x00 later, and this lets us write a uint32.
+  Memory::uint32_at(patch_site + 9) = 0xD2FF4900u;  // 0x00, call r10
+  Memory::Address_at(patch_site + 2) = target;
+
+  // Add the requested number of int3 instructions after the call.
+  for (int i = 0; i < guard_bytes; i++) {
+    *(patch_site + 13 + i) = 0xCC;  // int3
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp): rex_(0) {
+  len_ = 1;
+  if (base.is(rsp) || base.is(r12)) {
+    // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+    set_sib(times_1, rsp, base);
+  }
+
+  if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+    set_modrm(0, base);
+  } else if (is_int8(disp)) {
+    set_modrm(1, base);
+    set_disp8(disp);
+  } else {
+    set_modrm(2, base);
+    set_disp32(disp);
+  }
+}
+
+
+Operand::Operand(Register base,
+                 Register index,
+                 ScaleFactor scale,
+                 int32_t disp): rex_(0) {
+  ASSERT(!index.is(rsp));
+  len_ = 1;
+  set_sib(scale, index, base);
+  if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+    // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
+    // possibly set by set_sib.
+    set_modrm(0, rsp);
+  } else if (is_int8(disp)) {
+    set_modrm(1, rsp);
+    set_disp8(disp);
+  } else {
+    set_modrm(2, rsp);
+    set_disp32(disp);
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // Implementation of Assembler
 
@@ -579,6 +607,23 @@
 }
 
 
+void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint6(shift_amount.value_));  // illegal shift count
+  if (shift_amount.value_ == 1) {
+    emit_optional_rex_32(dst);
+    emit(0xD1);
+    emit_modrm(subcode, dst);
+  } else {
+    emit_optional_rex_32(dst);
+    emit(0xC1);
+    emit_modrm(subcode, dst);
+    emit(shift_amount.value_);
+  }
+}
+
+
 void Assembler::bt(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 7e30934..e895332 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -690,11 +690,22 @@
     shift(dst, shift_amount, 0x7);
   }
 
+  // Shifts dst right, duplicating sign bit, by shift_amount bits.
+  // Shifting by 1 is handled efficiently.
+  void sarl(Register dst, Immediate shift_amount) {
+    shift_32(dst, shift_amount, 0x7);
+  }
+
   // Shifts dst right, duplicating sign bit, by cl % 64 bits.
   void sar(Register dst) {
     shift(dst, 0x7);
   }
 
+  // Shifts dst right, duplicating sign bit, by cl % 64 bits.
+  void sarl(Register dst) {
+    shift_32(dst, 0x7);
+  }
+
   void shl(Register dst, Immediate shift_amount) {
     shift(dst, shift_amount, 0x4);
   }
@@ -1123,6 +1134,7 @@
                                  Immediate src);
   // Emit machine code for a shift operation.
   void shift(Register dst, Immediate shift_amount, int subcode);
+  void shift_32(Register dst, Immediate shift_amount, int subcode);
   // Shift dst by cl % 64 bits.
   void shift(Register dst, int subcode);
   void shift_32(Register dst, int subcode);
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index b1f2b8f..e3e32e6 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -355,14 +355,19 @@
   // receiver.
   frame_->Exit();
   masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+  // Add padding that will be overwritten by a debugger breakpoint.
+  // frame_->Exit() generates "movq rsp, rbp; pop rbp" length 5.
+  // "ret k" has length 2.
+  const int kPadding = Debug::kX64JSReturnSequenceLength - 5 - 2;
+  for (int i = 0; i < kPadding; ++i) {
+    masm_->int3();
+  }
   DeleteFrame();
 
-  // TODO(x64): introduce kX64JSReturnSequenceLength and enable assert.
-
   // Check that the size of the code used for returning matches what is
   // expected by the debugger.
-  // ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
-  //          masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+  ASSERT_EQ(Debug::kX64JSReturnSequenceLength,
+            masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
 }
 
 
@@ -1292,7 +1297,7 @@
   node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
 
   __ movq(rax, frame_->ElementAt(0));  // load the current count
-  __ cmpq(rax, frame_->ElementAt(1));  // compare to the array length
+  __ cmpl(rax, frame_->ElementAt(1));  // compare to the array length
   node->break_target()->Branch(above_equal);
 
   // Get the i'th entry of the array.
@@ -2724,12 +2729,6 @@
 
 
 void DeferredPrefixCountOperation::Generate() {
-  // Undo the optimistic smi operation.
-  if (is_increment_) {
-    __ subq(dst_, Immediate(Smi::FromInt(1)));
-  } else {
-    __ addq(dst_, Immediate(Smi::FromInt(1)));
-  }
   __ push(dst_);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
   __ push(rax);
@@ -2765,12 +2764,6 @@
 
 
 void DeferredPostfixCountOperation::Generate() {
-  // Undo the optimistic smi operation.
-  if (is_increment_) {
-    __ subq(dst_, Immediate(Smi::FromInt(1)));
-  } else {
-    __ addq(dst_, Immediate(Smi::FromInt(1)));
-  }
   __ push(dst_);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
 
@@ -2827,19 +2820,6 @@
     // Ensure the new value is writable.
     frame_->Spill(new_value.reg());
 
-    // In order to combine the overflow and the smi tag check, we need
-    // to be able to allocate a byte register.  We attempt to do so
-    // without spilling.  If we fail, we will generate separate overflow
-    // and smi tag checks.
-    //
-    // We allocate and clear the temporary register before
-    // performing the count operation since clearing the register using
-    // xor will clear the overflow flag.
-    Result tmp = allocator_->AllocateWithoutSpilling();
-
-    // Clear scratch register to prepare it for setcc after the operation below.
-    __ xor_(kScratchRegister, kScratchRegister);
-
     DeferredCode* deferred = NULL;
     if (is_postfix) {
       deferred = new DeferredPostfixCountOperation(new_value.reg(),
@@ -2850,25 +2830,26 @@
                                                   is_increment);
     }
 
+    Result tmp = allocator_->AllocateWithoutSpilling();
+    ASSERT(kSmiTagMask == 1 && kSmiTag == 0);
+    __ movl(tmp.reg(), Immediate(kSmiTagMask));
+    // Smi test.
+    __ movq(kScratchRegister, new_value.reg());
     if (is_increment) {
-      __ addq(new_value.reg(), Immediate(Smi::FromInt(1)));
+      __ addl(kScratchRegister, Immediate(Smi::FromInt(1)));
     } else {
-      __ subq(new_value.reg(), Immediate(Smi::FromInt(1)));
+      __ subl(kScratchRegister, Immediate(Smi::FromInt(1)));
     }
-
-    // If the count operation didn't overflow and the result is a valid
-    // smi, we're done. Otherwise, we jump to the deferred slow-case
-    // code.
-
-    // We combine the overflow and the smi tag check.
-    __ setcc(overflow, kScratchRegister);
-    __ or_(kScratchRegister, new_value.reg());
-    __ testl(kScratchRegister, Immediate(kSmiTagMask));
+    // deferred->Branch(overflow);
+    __ cmovl(overflow, kScratchRegister, tmp.reg());
+    __ testl(kScratchRegister, tmp.reg());
     tmp.Unuse();
     deferred->Branch(not_zero);
+    __ movq(new_value.reg(), kScratchRegister);
 
     deferred->BindExit();
 
+
     // Postfix: store the old value in the allocated slot under the
     // reference.
     if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
@@ -5081,12 +5062,12 @@
     // Perform the operation.
     switch (op) {
       case Token::SAR:
-        __ sar(answer.reg());
+        __ sarl(answer.reg());
         // No checks of result necessary
         break;
       case Token::SHR: {
         Label result_ok;
-        __ shr(answer.reg());
+        __ shrl(answer.reg());
         // Check that the *unsigned* result fits in a smi.  Neither of
         // the two high-order bits can be set:
         //  * 0x80000000: high bit would be lost when smi tagging.
@@ -5109,7 +5090,7 @@
         Label result_ok;
         __ shl(answer.reg());
         // Check that the *signed* result fits in a smi.
-        __ cmpq(answer.reg(), Immediate(0xc0000000));
+        __ cmpl(answer.reg(), Immediate(0xc0000000));
         __ j(positive, &result_ok);
         ASSERT(kSmiTag == 0);
         __ shl(rcx, Immediate(kSmiTagSize));
@@ -6675,12 +6656,12 @@
       // Move the second operand into register ecx.
       __ movq(rcx, rbx);
       // Remove tags from operands (but keep sign).
-      __ sar(rax, Immediate(kSmiTagSize));
-      __ sar(rcx, Immediate(kSmiTagSize));
+      __ sarl(rax, Immediate(kSmiTagSize));
+      __ sarl(rcx, Immediate(kSmiTagSize));
       // Perform the operation.
       switch (op_) {
         case Token::SAR:
-          __ sar(rax);
+          __ sarl(rax);
           // No checks of result necessary
           break;
         case Token::SHR:
@@ -6691,19 +6672,17 @@
           // - 0x40000000: this number would convert to negative when
           // Smi tagging these two cases can only happen with shifts
           // by 0 or 1 when handed a valid smi.
-          __ testq(rax, Immediate(0xc0000000));
+          __ testl(rax, Immediate(0xc0000000));
           __ j(not_zero, slow);
           break;
         case Token::SHL:
           __ shll(rax);
-          // TODO(Smi): Significant change if Smi changes.
           // Check that the *signed* result fits in a smi.
           // It does, if the 30th and 31st bits are equal, since then
           // shifting the SmiTag in at the bottom doesn't change the sign.
           ASSERT(kSmiTagSize == 1);
           __ cmpl(rax, Immediate(0xc0000000));
           __ j(sign, slow);
-          __ movsxlq(rax, rax);  // Extend new sign of eax into rax.
           break;
         default:
           UNREACHABLE();
@@ -6815,7 +6794,6 @@
         __ testl(rax, Immediate(1));
         __ j(not_zero, &operand_conversion_failure);
       } else {
-        // TODO(X64): Verify that SSE3 is always supported, drop this code.
         // Check if right operand is int32.
         __ fist_s(Operand(rsp, 0 * kPointerSize));
         __ fild_s(Operand(rsp, 0 * kPointerSize));
@@ -6842,9 +6820,9 @@
         case Token::BIT_OR:  __ or_(rax, rcx); break;
         case Token::BIT_AND: __ and_(rax, rcx); break;
         case Token::BIT_XOR: __ xor_(rax, rcx); break;
-        case Token::SAR: __ sar(rax); break;
-        case Token::SHL: __ shl(rax); break;
-        case Token::SHR: __ shr(rax); break;
+        case Token::SAR: __ sarl(rax); break;
+        case Token::SHL: __ shll(rax); break;
+        case Token::SHR: __ shrl(rax); break;
         default: UNREACHABLE();
       }
       if (op_ == Token::SHR) {
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 3b10132..e94e781 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -38,8 +38,10 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
-  UNIMPLEMENTED();
-  return false;
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  // 11th byte of patch is 0x49, 11th byte of JS return is 0xCC (int3).
+  ASSERT(*(rinfo->pc() + 10) == 0x49 || *(rinfo->pc() + 10) == 0xCC);
+  return (*(rinfo->pc() + 10) == 0x49);
 }
 
 void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 1a24694..c577615 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -42,7 +42,8 @@
 Object* CallStubCompiler::CompileCallConstant(Object* a,
                                               JSObject* b,
                                               JSFunction* c,
-                                              StubCompiler::CheckType d) {
+                                              String* d,
+                                              StubCompiler::CheckType e) {
   UNIMPLEMENTED();
   return NULL;
 }
@@ -65,7 +66,8 @@
 
 
 
-Object* CallStubCompiler::CompileCallGlobal(GlobalObject* object,
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+                                            GlobalObject* holder,
                                             JSGlobalPropertyCell* cell,
                                             JSFunction* function,
                                             String* name) {
@@ -109,7 +111,8 @@
 }
 
 
-Object* LoadStubCompiler::CompileLoadGlobal(GlobalObject* object,
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+                                            GlobalObject* holder,
                                             JSGlobalPropertyCell* cell,
                                             String* name,
                                             bool is_dont_delete) {