Version 3.16.12

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@13573 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 656c2fc..a683b1b 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2013-01-31: Version 3.16.12
+
+        Performance and stability improvements on all platforms.
+
+
 2013-01-30: Version 3.16.11
 
         Put making embedded maps in optimized code weak behind a flag.
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index ead6674..8b4d007 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -101,42 +101,7 @@
     if (index >= kDoubleRegZero.code())
       index += kNumReservedRegisters;
 
-    // TODO(hans): Maybe this could just use VFPRegisters::Name()?
-    const char* const names[] = {
-      "d0",
-      "d1",
-      "d2",
-      "d3",
-      "d4",
-      "d5",
-      "d6",
-      "d7",
-      "d8",
-      "d9",
-      "d10",
-      "d11",
-      "d12",
-      "d13"
-      "d14",
-      "d15",
-      "d16",
-      "d17",
-      "d18",
-      "d19",
-      "d20",
-      "d21",
-      "d22",
-      "d23",
-      "d24",
-      "d25",
-      "d26",
-      "d27",
-      "d28",
-      "d29",
-      "d30",
-      "d31"
-    };
-    return names[index];
+    return VFPRegisters::Name(index, true);
   } else {
     ASSERT(index == 0);
     return "sfpd0";
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index d0b4d1e..1236e31 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -45,7 +45,7 @@
   descriptor->register_param_count_ = 2;
   descriptor->register_params_ = registers;
   descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_Miss);
+      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
 }
 
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index a822a8e..f4cab9d 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -1128,23 +1128,18 @@
   }
   __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
 
-  // TODO(hans): Change the code below to not clobber r0, so that it can be
-  // used in the "restore the d registers" code further down, making this mov
-  // redundant.
-  __ mov(r4, r0);
-
   // Replace the current (input) frame with the output frames.
   Label outer_push_loop, inner_push_loop,
       outer_loop_header, inner_loop_header;
-  // Outer loop state: r0 = current "FrameDescription** output_",
+  // Outer loop state: r4 = current "FrameDescription** output_",
   // r1 = one past the last FrameDescription**.
   __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
-  __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset()));  // r0 is output_.
-  __ add(r1, r0, Operand(r1, LSL, 2));
+  __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset()));  // r4 is output_.
+  __ add(r1, r4, Operand(r1, LSL, 2));
   __ jmp(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
-  __ ldr(r2, MemOperand(r0, 0));  // output_[ix]
+  __ ldr(r2, MemOperand(r4, 0));  // output_[ix]
   __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
   __ jmp(&inner_loop_header);
   __ bind(&inner_push_loop);
@@ -1155,9 +1150,9 @@
   __ bind(&inner_loop_header);
   __ cmp(r3, Operand::Zero());
   __ b(ne, &inner_push_loop);  // test for gt?
-  __ add(r0, r0, Operand(kPointerSize));
+  __ add(r4, r4, Operand(kPointerSize));
   __ bind(&outer_loop_header);
-  __ cmp(r0, r1);
+  __ cmp(r4, r1);
   __ b(lt, &outer_push_loop);
 
   if (CpuFeatures::IsSupported(VFP2)) {
@@ -1167,7 +1162,7 @@
       // Check CPU flags for number of registers, setting the Z condition flag.
       __ CheckFor32DRegs(ip);
 
-      __ ldr(r1, MemOperand(r4, Deoptimizer::input_offset()));
+      __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
       int src_offset = FrameDescription::double_registers_offset();
       for (int i = 0; i < DwVfpRegister::kNumRegisters; ++i) {
         if (i == kDoubleRegZero.code()) continue;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index d153578..5a0c0b2 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -766,7 +766,7 @@
   // even though s_registers_ & d_registers_ share the same
   // physical registers in the target.
   for (int i = 0; i < num_d_registers * 2; i++) {
-    vfp_register[i] = 0;
+    vfp_registers_[i] = 0;
   }
   n_flag_FPSCR_ = false;
   z_flag_FPSCR_ = false;
@@ -901,7 +901,7 @@
   double dm_val = 0.0;
   // Read the bits from the unsigned integer register_[] array
   // into the double precision floating point value and return it.
-  char buffer[2 * sizeof(vfp_register[0])];
+  char buffer[2 * sizeof(vfp_registers_[0])];
   memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
   memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
   return(dm_val);
@@ -936,13 +936,13 @@
 // Getting from and setting into VFP registers.
 void Simulator::set_s_register(int sreg, unsigned int value) {
   ASSERT((sreg >= 0) && (sreg < num_s_registers));
-  vfp_register[sreg] = value;
+  vfp_registers_[sreg] = value;
 }
 
 
 unsigned int Simulator::get_s_register(int sreg) const {
   ASSERT((sreg >= 0) && (sreg < num_s_registers));
-  return vfp_register[sreg];
+  return vfp_registers_[sreg];
 }
 
 
@@ -952,10 +952,10 @@
   if (register_size == 1) ASSERT(reg_index < num_s_registers);
   if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
 
-  char buffer[register_size * sizeof(vfp_register[0])];
-  memcpy(buffer, &value, register_size * sizeof(vfp_register[0]));
-  memcpy(&vfp_register[reg_index * register_size], buffer,
-         register_size * sizeof(vfp_register[0]));
+  char buffer[register_size * sizeof(vfp_registers_[0])];
+  memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
+  memcpy(&vfp_registers_[reg_index * register_size], buffer,
+         register_size * sizeof(vfp_registers_[0]));
 }
 
 
@@ -966,10 +966,10 @@
   if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
 
   ReturnType value = 0;
-  char buffer[register_size * sizeof(vfp_register[0])];
-  memcpy(buffer, &vfp_register[register_size * reg_index],
-         register_size * sizeof(vfp_register[0]));
-  memcpy(&value, buffer, register_size * sizeof(vfp_register[0]));
+  char buffer[register_size * sizeof(vfp_registers_[0])];
+  memcpy(buffer, &vfp_registers_[register_size * reg_index],
+         register_size * sizeof(vfp_registers_[0]));
+  memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
   return value;
 }
 
@@ -978,8 +978,8 @@
 // from r0-r3 or d0 and d1.
 void Simulator::GetFpArgs(double* x, double* y) {
   if (use_eabi_hardfloat()) {
-    *x = vfp_register[0];
-    *y = vfp_register[1];
+    *x = vfp_registers_[0];
+    *y = vfp_registers_[1];
   } else {
     // We use a char buffer to get around the strict-aliasing rules which
     // otherwise allow the compiler to optimize away the copy.
@@ -997,7 +997,7 @@
 // from r0 and r1 or d0.
 void Simulator::GetFpArgs(double* x) {
   if (use_eabi_hardfloat()) {
-    *x = vfp_register[0];
+    *x = vfp_registers_[0];
   } else {
     // We use a char buffer to get around the strict-aliasing rules which
     // otherwise allow the compiler to optimize away the copy.
@@ -1013,7 +1013,7 @@
 // from r0 and r1 or d0 and one integer value.
 void Simulator::GetFpArgs(double* x, int32_t* y) {
   if (use_eabi_hardfloat()) {
-    *x = vfp_register[0];
+    *x = vfp_registers_[0];
     *y = registers_[1];
   } else {
     // We use a char buffer to get around the strict-aliasing rules which
@@ -1032,10 +1032,10 @@
 // The return value is either in r0/r1 or d0.
 void Simulator::SetFpResult(const double& result) {
   if (use_eabi_hardfloat()) {
-    char buffer[2 * sizeof(vfp_register[0])];
+    char buffer[2 * sizeof(vfp_registers_[0])];
     memcpy(buffer, &result, sizeof(buffer));
     // Copy result to d0.
-    memcpy(vfp_register, buffer, sizeof(buffer));
+    memcpy(vfp_registers_, buffer, sizeof(buffer));
   } else {
     char buffer[2 * sizeof(registers_[0])];
     memcpy(buffer, &result, sizeof(buffer));
@@ -1692,18 +1692,18 @@
         switch (redirection->type()) {
         case ExternalReference::BUILTIN_FP_FP_CALL:
         case ExternalReference::BUILTIN_COMPARE_CALL:
-          arg0 = vfp_register[0];
-          arg1 = vfp_register[1];
-          arg2 = vfp_register[2];
-          arg3 = vfp_register[3];
+          arg0 = vfp_registers_[0];
+          arg1 = vfp_registers_[1];
+          arg2 = vfp_registers_[2];
+          arg3 = vfp_registers_[3];
           break;
         case ExternalReference::BUILTIN_FP_CALL:
-          arg0 = vfp_register[0];
-          arg1 = vfp_register[1];
+          arg0 = vfp_registers_[0];
+          arg1 = vfp_registers_[1];
           break;
         case ExternalReference::BUILTIN_FP_INT_CALL:
-          arg0 = vfp_register[0];
-          arg1 = vfp_register[1];
+          arg0 = vfp_registers_[0];
+          arg1 = vfp_registers_[1];
           arg2 = get_register(0);
           break;
         default:
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 7be4b6b..907a590 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -373,8 +373,7 @@
   bool v_flag_;
 
   // VFP architecture state.
-  // TODO(hans): Rename vfp_register to vfp_registers_.
-  unsigned int vfp_register[num_d_registers * 2];
+  unsigned int vfp_registers_[num_d_registers * 2];
   bool n_flag_FPSCR_;
   bool z_flag_FPSCR_;
   bool c_flag_FPSCR_;
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index f513c8a..ca82ba1 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -2894,9 +2894,11 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
-                                                      Handle<JSObject> object,
-                                                      Handle<JSObject> last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> last,
+    Handle<GlobalObject> global) {
   // ----------- S t a t e -------------
   //  -- r0    : receiver
   //  -- lr    : return address
@@ -2906,14 +2908,24 @@
   // Check that receiver is not a smi.
   __ JumpIfSmi(r0, &miss);
 
+
+  Register scratch = r1;
+
   // Check the maps of the full prototype chain.
-  CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
+  Register result =
+      CheckPrototypes(object, r0, last, r3, scratch, r4, name, &miss);
 
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
-  if (last->IsGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
+  if (!global.is_null()) {
+    GenerateCheckPropertyCell(masm(), global, name, scratch, &miss);
+  }
+
+  if (!last->HasFastProperties()) {
+    __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
+    __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+    __ cmp(scratch, Operand(isolate()->factory()->null_value()));
+    __ b(ne, &miss);
   }
 
   // Return undefined if maps of the full prototype chain are still the
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 0b0332a..ca9bf1d 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -417,6 +417,12 @@
             "trace progress of the incremental marking")
 DEFINE_bool(track_gc_object_stats, false,
             "track object counts and memory usage")
+DEFINE_bool(parallel_sweeping, false, "enable parallel sweeping")
+DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
+DEFINE_int(sweeper_threads, 1,
+           "number of parallel and concurrent sweeping threads")
+DEFINE_bool(parallel_marking, false, "enable parallel marking")
+DEFINE_int(marking_threads, 1, "number of parallel marking threads")
 #ifdef VERIFY_HEAP
 DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
 #endif
diff --git a/src/heap.cc b/src/heap.cc
index 14c44e8..8d398ec 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -145,6 +145,8 @@
       min_in_mutator_(kMaxInt),
       alive_after_last_gc_(0),
       last_gc_end_timestamp_(0.0),
+      marking_time_(0.0),
+      sweeping_time_(0.0),
       store_buffer_(this),
       marking_(this),
       incremental_marking_(this),
@@ -1306,7 +1308,8 @@
 
   incremental_marking()->PrepareForScavenge();
 
-  AdvanceSweepers(static_cast<int>(new_space_.Size()));
+  paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
+  paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
@@ -5418,9 +5421,9 @@
   // 3. many lazy sweep steps.
   // Use mark-sweep-compact events to count incremental GCs in a round.
 
-
   if (incremental_marking()->IsStopped()) {
-    if (!IsSweepingComplete() &&
+    if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
+        !IsSweepingComplete() &&
         !AdvanceSweepers(static_cast<int>(step_size))) {
       return false;
     }
@@ -6338,7 +6341,7 @@
 #endif
 
   if (FLAG_print_cumulative_gc_stat) {
-    PrintF("\n\n");
+    PrintF("\n");
     PrintF("gc_count=%d ", gc_count_);
     PrintF("mark_sweep_count=%d ", ms_count_);
     PrintF("max_gc_pause=%d ", get_max_gc_pause());
@@ -6346,6 +6349,8 @@
     PrintF("min_in_mutator=%d ", get_min_in_mutator());
     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
            get_max_alive_after_gc());
+    PrintF("total_marking_time=%f ", marking_time());
+    PrintF("total_sweeping_time=%f ", sweeping_time());
     PrintF("\n\n");
   }
 
@@ -7036,6 +7041,9 @@
 
   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
 
+  heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
+
+  if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
 
   if (!FLAG_trace_gc_nvp) {
diff --git a/src/heap.h b/src/heap.h
index 5db1c95..4f9b672 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1596,6 +1596,24 @@
   // Returns minimal interval between two subsequent collections.
   int get_min_in_mutator() { return min_in_mutator_; }
 
+  // TODO(hpayer): remove, should be handled by GCTracer
+  void AddMarkingTime(double marking_time) {
+    marking_time_ += marking_time;
+  }
+
+  double marking_time() const {
+    return marking_time_;
+  }
+
+  // TODO(hpayer): remove, should be handled by GCTracer
+  void AddSweepingTime(double sweeping_time) {
+    sweeping_time_ += sweeping_time;
+  }
+
+  double sweeping_time() const {
+    return sweeping_time_;
+  }
+
   MarkCompactCollector* mark_compact_collector() {
     return &mark_compact_collector_;
   }
@@ -1618,6 +1636,7 @@
   }
 
   bool AdvanceSweepers(int step_size) {
+    ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
     bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
     sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
     return sweeping_complete;
@@ -2035,7 +2054,6 @@
 
   GCTracer* tracer_;
 
-
   // Allocates a small number to string cache.
   MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache();
   // Creates and installs the full-sized number string cache.
@@ -2173,6 +2191,12 @@
 
   double last_gc_end_timestamp_;
 
+  // Cumulative GC time spent in marking
+  double marking_time_;
+
+  // Cumulative GC time spent in sweeping
+  double sweeping_time_;
+
   MarkCompactCollector mark_compact_collector_;
 
   StoreBuffer store_buffer_;
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 25ff1d5..2c6b0a5 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -48,7 +48,7 @@
   descriptor->register_param_count_ = 2;
   descriptor->register_params_ = registers;
   descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_Miss);
+      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
 }
 
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 805016e..89f606d 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -2964,9 +2964,11 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
-                                                      Handle<JSObject> object,
-                                                      Handle<JSObject> last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> last,
+    Handle<GlobalObject> global) {
   // ----------- S t a t e -------------
   //  -- ecx    : name
   //  -- edx    : receiver
@@ -2977,18 +2979,25 @@
   // Check that the receiver isn't a smi.
   __ JumpIfSmi(edx, &miss);
 
-  ASSERT(last->IsGlobalObject() || last->HasFastProperties());
+  Register scratch = eax;
 
   // Check the maps of the full prototype chain. Also check that
   // global property cells up to (but not including) the last object
   // in the prototype chain are empty.
-  CheckPrototypes(object, edx, last, ebx, eax, edi, name, &miss);
+  Register result =
+      CheckPrototypes(object, edx, last, ebx, scratch, edi, name, &miss);
 
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
-  if (last->IsGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm(), Handle<GlobalObject>::cast(last), name, eax, &miss);
+  if (!global.is_null()) {
+    GenerateCheckPropertyCell(masm(), global, name, scratch, &miss);
+  }
+
+  if (!last->HasFastProperties()) {
+    __ mov(scratch, FieldOperand(result, HeapObject::kMapOffset));
+    __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
+    __ cmp(scratch, isolate()->factory()->null_value());
+    __ j(not_equal, &miss);
   }
 
   // Return undefined if maps of the full prototype chain are still the
diff --git a/src/ic.cc b/src/ic.cc
index cd9095b..b74b0d9 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -111,16 +111,30 @@
   ASSERT((TraceIC(type, name, old_state, new_target), true))
 
 IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
-  ASSERT(isolate == Isolate::Current());
+  // To improve the performance of the (much used) IC code, we unfold a few
+  // levels of the stack frame iteration code. This yields a ~35% speedup when
+  // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
+  const Address entry =
+      Isolate::c_entry_fp(isolate->thread_local_top());
+  Address* pc_address =
+      reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
+  Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
+  // If there's another JavaScript frame on the stack or a
+  // StubFailureTrampoline, we need to look one frame further down the stack to
+  // find the frame pointer and the return address stack slot.
+  if (depth == EXTRA_CALL_FRAME) {
+    const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
+    pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
+    fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
+  }
+#ifdef DEBUG
   StackFrameIterator it;
   for (int i = 0; i < depth + 1; i++) it.Advance();
-  // Skip StubFailureTrampolineFrames
-  if (it.frame()->is_stub_failure_trampoline()) {
-    it.Advance();
-  }
   StackFrame* frame = it.frame();
-  fp_ = frame->fp();
-  pc_address_ = frame->pc_address();
+  ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
+#endif
+  fp_ = fp;
+  pc_address_ = pc_address;
 }
 
 
@@ -155,26 +169,6 @@
 #endif
 
 
-static bool HasNormalObjectsInPrototypeChain(Isolate* isolate,
-                                             LookupResult* lookup,
-                                             Object* receiver) {
-  Object* end = lookup->IsProperty()
-      ? lookup->holder() : Object::cast(isolate->heap()->null_value());
-  for (Object* current = receiver;
-       current != end;
-       current = current->GetPrototype()) {
-    if (current->IsJSObject() &&
-        !JSObject::cast(current)->HasFastProperties() &&
-        !current->IsJSGlobalProxy() &&
-        !current->IsJSGlobalObject()) {
-      return true;
-    }
-  }
-
-  return false;
-}
-
-
 static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
                                                    Object* receiver,
                                                    Object* name) {
@@ -686,14 +680,6 @@
   // Bail out if we didn't find a result.
   if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
 
-  if (lookup->holder() != *object &&
-      HasNormalObjectsInPrototypeChain(
-          isolate(), lookup, object->GetPrototype())) {
-    // Suppress optimization for prototype chains with slow properties objects
-    // in the middle.
-    return;
-  }
-
   // Compute the number of arguments.
   int argc = target()->arguments_count();
   Handle<Code> code;
@@ -1009,8 +995,6 @@
   // deal with non-JS objects here.
   if (!object->IsJSObject()) return;
 
-  if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
-
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
   Handle<Code> code;
   if (state == UNINITIALIZED) {
@@ -1876,7 +1860,7 @@
 RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  LoadIC ic(isolate);
+  LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   return ic.Load(state, args.at<Object>(0), args.at<String>(1));
 }
@@ -1886,7 +1870,16 @@
 RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  KeyedLoadIC ic(isolate);
+  KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+  IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+  return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS);
 }
@@ -1895,7 +1888,7 @@
 RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  KeyedLoadIC ic(isolate);
+  KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
   IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
   return ic.Load(state,
                  args.at<Object>(0),
diff --git a/src/ic.h b/src/ic.h
index ea6aa05..cb31640 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -342,7 +342,7 @@
 
 class LoadIC: public IC {
  public:
-  explicit LoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
+  explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
     ASSERT(target()->is_load_stub() || target()->is_keyed_load_stub());
   }
 
@@ -404,7 +404,8 @@
 
 class KeyedLoadIC: public LoadIC {
  public:
-  explicit KeyedLoadIC(Isolate* isolate) : LoadIC(isolate) {
+  explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
+      : LoadIC(depth, isolate) {
     ASSERT(target()->is_keyed_load_stub());
   }
 
@@ -813,6 +814,7 @@
 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
 
 DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure);
 
 } }  // namespace v8::internal
 
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index ef7dbe0..97332c5 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -754,18 +754,24 @@
 void IncrementalMarking::Hurry() {
   if (state() == MARKING) {
     double start = 0.0;
-    if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Hurry\n");
+    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
       start = OS::TimeCurrentMillis();
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Hurry\n");
+      }
     }
     // TODO(gc) hurry can mark objects it encounters black as mutator
     // was stopped.
     ProcessMarkingDeque();
     state_ = COMPLETE;
-    if (FLAG_trace_incremental_marking) {
+    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
       double end = OS::TimeCurrentMillis();
-      PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
-             static_cast<int>(end - start));
+      double delta = end - start;
+      heap_->AddMarkingTime(delta);
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+               static_cast<int>(delta));
+      }
     }
   }
 
@@ -889,7 +895,8 @@
 
   double start = 0;
 
-  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+  if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
+      FLAG_print_cumulative_gc_stat) {
     start = OS::TimeCurrentMillis();
   }
 
@@ -969,12 +976,14 @@
     }
   }
 
-  if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+  if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
+      FLAG_print_cumulative_gc_stat) {
     double end = OS::TimeCurrentMillis();
     double delta = (end - start);
     longest_step_ = Max(longest_step_, delta);
     steps_took_ += delta;
     steps_took_since_last_gc_ += delta;
+    heap_->AddMarkingTime(delta);
   }
 }
 
diff --git a/src/isolate.cc b/src/isolate.cc
index f0eb36d..d70e19e 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -40,6 +40,7 @@
 #include "isolate.h"
 #include "lithium-allocator.h"
 #include "log.h"
+#include "marking-thread.h"
 #include "messages.h"
 #include "platform.h"
 #include "regexp-stack.h"
@@ -49,6 +50,7 @@
 #include "simulator.h"
 #include "spaces.h"
 #include "stub-cache.h"
+#include "sweeper-thread.h"
 #include "version.h"
 #include "vm-state-inl.h"
 
@@ -1651,7 +1653,9 @@
       code_stub_interface_descriptors_(NULL),
       context_exit_happened_(false),
       deferred_handles_head_(NULL),
-      optimizing_compiler_thread_(this) {
+      optimizing_compiler_thread_(this),
+      marking_thread_(NULL),
+      sweeper_thread_(NULL) {
   TRACE_ISOLATE(constructor);
 
   memset(isolate_addresses_, 0,
@@ -1699,6 +1703,7 @@
 #undef ISOLATE_INIT_ARRAY_EXECUTE
 }
 
+
 void Isolate::TearDown() {
   TRACE_ISOLATE(tear_down);
 
@@ -1734,6 +1739,22 @@
   if (state_ == INITIALIZED) {
     TRACE_ISOLATE(deinit);
 
+    if (FLAG_concurrent_sweeping || FLAG_parallel_sweeping) {
+      for (int i = 0; i < FLAG_sweeper_threads; i++) {
+        sweeper_thread_[i]->Stop();
+        delete sweeper_thread_[i];
+      }
+      delete[] sweeper_thread_;
+    }
+
+    if (FLAG_parallel_marking) {
+      for (int i = 0; i < FLAG_marking_threads; i++) {
+        marking_thread_[i]->Stop();
+        delete marking_thread_[i];
+      }
+      delete[] marking_thread_;
+    }
+
     if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
 
     if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
@@ -2103,6 +2124,28 @@
   }
 
   if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
+
+  if (FLAG_parallel_marking) {
+    if (FLAG_marking_threads < 1) {
+      FLAG_marking_threads = 1;
+    }
+    marking_thread_ = new MarkingThread*[FLAG_marking_threads];
+    for (int i = 0; i < FLAG_marking_threads; i++) {
+      marking_thread_[i] = new MarkingThread(this);
+      marking_thread_[i]->Start();
+    }
+  }
+
+  if (FLAG_parallel_sweeping || FLAG_concurrent_sweeping) {
+    if (FLAG_sweeper_threads < 1) {
+      FLAG_sweeper_threads = 1;
+    }
+    sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
+    for (int i = 0; i < FLAG_sweeper_threads; i++) {
+      sweeper_thread_[i] = new SweeperThread(this);
+      sweeper_thread_[i]->Start();
+    }
+  }
   return true;
 }
 
diff --git a/src/isolate.h b/src/isolate.h
index 8ac8502..df70ba9 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -71,6 +71,7 @@
 class InlineRuntimeFunctionsTable;
 class NoAllocationStringAllocator;
 class InnerPointerToCodeCache;
+class MarkingThread;
 class PreallocatedMemoryThread;
 class RegExpStack;
 class SaveContext;
@@ -78,6 +79,7 @@
 class ConsStringIteratorOp;
 class StringTracker;
 class StubCache;
+class SweeperThread;
 class ThreadManager;
 class ThreadState;
 class ThreadVisitor;  // Defined in v8threads.h
@@ -531,11 +533,6 @@
     thread_local_top_.save_context_ = save;
   }
 
-  // Access to the map of "new Object()".
-  Map* empty_object_map() {
-    return context()->native_context()->object_function()->map();
-  }
-
   // Access to current thread id.
   ThreadId thread_id() { return thread_local_top_.thread_id_; }
   void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
@@ -1078,6 +1075,14 @@
   // TODO(svenpanne) This method is on death row...
   static v8::Isolate* GetDefaultIsolateForLocking();
 
+  MarkingThread** marking_threads() {
+    return marking_thread_;
+  }
+
+  SweeperThread** sweeper_threads() {
+    return sweeper_thread_;
+  }
+
  private:
   Isolate();
 
@@ -1301,11 +1306,15 @@
 
   DeferredHandles* deferred_handles_head_;
   OptimizingCompilerThread optimizing_compiler_thread_;
+  MarkingThread** marking_thread_;
+  SweeperThread** sweeper_thread_;
 
   friend class ExecutionAccess;
   friend class HandleScopeImplementer;
   friend class IsolateInitializer;
+  friend class MarkingThread;
   friend class OptimizingCompilerThread;
+  friend class SweeperThread;
   friend class ThreadManager;
   friend class Simulator;
   friend class StackGuard;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index bfb4031..bdd4785 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -37,9 +37,11 @@
 #include "ic-inl.h"
 #include "incremental-marking.h"
 #include "mark-compact.h"
+#include "marking-thread.h"
 #include "objects-visiting.h"
 #include "objects-visiting-inl.h"
 #include "stub-cache.h"
+#include "sweeper-thread.h"
 
 namespace v8 {
 namespace internal {
@@ -503,6 +505,56 @@
 }
 
 
+void MarkCompactCollector::StartSweeperThreads() {
+  SweeperThread::set_sweeping_pending(true);
+  for (int i = 0; i < FLAG_sweeper_threads; i++) {
+    heap()->isolate()->sweeper_threads()[i]->StartSweeping();
+  }
+}
+
+
+void MarkCompactCollector::WaitUntilSweepingCompleted() {
+  if (SweeperThread::sweeping_pending()) {
+    for (int i = 0; i < FLAG_sweeper_threads; i++) {
+      heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
+    }
+    SweeperThread::set_sweeping_pending(false);
+    StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
+    StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
+    heap()->FreeQueuedChunks();
+  }
+}
+
+
+intptr_t MarkCompactCollector::
+             StealMemoryFromSweeperThreads(PagedSpace* space) {
+  intptr_t freed_bytes = 0;
+  for (int i = 0; i < FLAG_sweeper_threads; i++) {
+    freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space);
+  }
+  return freed_bytes;
+}
+
+
+bool MarkCompactCollector::AreSweeperThreadsActivated() {
+  return heap()->isolate()->sweeper_threads() != NULL;
+}
+
+
+void MarkCompactCollector::MarkInParallel() {
+  for (int i = 0; i < FLAG_marking_threads; i++) {
+    heap()->isolate()->marking_threads()[i]->StartMarking();
+  }
+}
+
+
+void MarkCompactCollector::WaitUntilMarkingCompleted() {
+  for (int i = 0; i < FLAG_marking_threads; i++) {
+    heap()->isolate()->marking_threads()[i]->WaitForMarkingThread();
+  }
+}
+
+
 bool Marking::TransferMark(Address old_start, Address new_start) {
   // This is only used when resizing an object.
   ASSERT(MemoryChunk::FromAddress(old_start) ==
@@ -805,6 +857,11 @@
 
   ASSERT(!FLAG_never_compact || !FLAG_always_compact);
 
+  if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) {
+    // Instead of waiting we could also abort the sweeper threads here.
+    WaitUntilSweepingCompleted();
+  }
+
   // Clear marking bits if incremental marking is aborted.
   if (was_marked_incrementally_ && abort_incremental_marking_) {
     heap()->incremental_marking()->Abort();
@@ -2832,6 +2889,11 @@
             space->identity() == CODE_SPACE);
   ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
 
+  double start_time = 0.0;
+  if (FLAG_print_cumulative_gc_stat) {
+    start_time = OS::TimeCurrentMillis();
+  }
+
   MarkBit::CellType* cells = p->markbits()->cells();
   p->MarkSweptPrecisely();
 
@@ -2897,6 +2959,9 @@
     space->Free(free_start, static_cast<int>(p->area_end() - free_start));
   }
   p->ResetLiveBytes();
+  if (FLAG_print_cumulative_gc_stat) {
+    space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
+  }
 }
 
 
@@ -3123,7 +3188,7 @@
 
         switch (space->identity()) {
           case OLD_DATA_SPACE:
-            SweepConservatively(space, p);
+            SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
             break;
           case OLD_POINTER_SPACE:
             SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
@@ -3482,6 +3547,33 @@
 }
 
 
+template<MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space,
+                     FreeList* free_list,
+                     Address start,
+                     int size) {
+  if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
+    return space->Free(start, size);
+  } else {
+    return size - free_list->Free(start, size);
+  }
+}
+
+
+// Force instantiation of templatized SweepConservatively method for
+// SWEEP_SEQUENTIALLY mode.
+template intptr_t MarkCompactCollector::
+    SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+        PagedSpace*, FreeList*, Page*);
+
+
+// Force instantiation of templatized SweepConservatively method for
+// SWEEP_IN_PARALLEL mode.
+template intptr_t MarkCompactCollector::
+    SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
+        PagedSpace*, FreeList*, Page*);
+
+
 // Sweeps a space conservatively.  After this has been done the larger free
 // spaces have been put on the free list and the smaller ones have been
 // ignored and left untouched.  A free space is always either ignored or put
@@ -3489,8 +3581,16 @@
 // because it means that any FreeSpace maps left actually describe a region of
 // memory that can be ignored when scanning.  Dead objects other than free
 // spaces will not contain the free space map.
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+template<MarkCompactCollector::SweepingParallelism mode>
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
+                                                   FreeList* free_list,
+                                                   Page* p) {
   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+  ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
+         free_list != NULL) ||
+         (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
+         free_list == NULL));
+
   MarkBit::CellType* cells = p->markbits()->cells();
   p->MarkSweptConservatively();
 
@@ -3517,8 +3617,8 @@
   }
   size_t size = block_address - p->area_start();
   if (cell_index == last_cell_index) {
-    freed_bytes += static_cast<int>(space->Free(p->area_start(),
-                                                static_cast<int>(size)));
+    freed_bytes += Free<mode>(space, free_list, p->area_start(),
+                              static_cast<int>(size));
     ASSERT_EQ(0, p->LiveBytes());
     return freed_bytes;
   }
@@ -3527,8 +3627,9 @@
   Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
   // Free the first free space.
   size = free_end - p->area_start();
-  freed_bytes += space->Free(p->area_start(),
-                             static_cast<int>(size));
+  freed_bytes += Free<mode>(space, free_list, p->area_start(),
+                            static_cast<int>(size));
+
   // The start of the current free area is represented in undigested form by
   // the address of the last 32-word section that contained a live object and
   // the marking bitmap for that cell, which describes where the live object
@@ -3541,10 +3642,10 @@
   for ( ;
        cell_index < last_cell_index;
        cell_index++, block_address += 32 * kPointerSize) {
-    ASSERT(static_cast<unsigned>(cell_index) ==
-           Bitmap::IndexToCell(
-               Bitmap::CellAlignIndex(
-                   p->AddressToMarkbitIndex(block_address))));
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(block_address))));
     uint32_t cell = cells[cell_index];
     if (cell != 0) {
       // We have a live object.  Check approximately whether it is more than 32
@@ -3557,8 +3658,8 @@
           // so now we need to find the start of the first live object at the
           // end of the free space.
           free_end = StartOfLiveObject(block_address, cell);
-          freed_bytes += space->Free(free_start,
-                                     static_cast<int>(free_end - free_start));
+          freed_bytes += Free<mode>(space, free_list, free_start,
+                                    static_cast<int>(free_end - free_start));
         }
       }
       // Update our undigested record of where the current free area started.
@@ -3572,8 +3673,8 @@
   // Handle the free space at the end of the page.
   if (block_address - free_start > 32 * kPointerSize) {
     free_start = DigestFreeStart(free_start, free_start_cell);
-    freed_bytes += space->Free(free_start,
-                               static_cast<int>(block_address - free_start));
+    freed_bytes += Free<mode>(space, free_list, free_start,
+                              static_cast<int>(block_address - free_start));
   }
 
   p->ResetLiveBytes();
@@ -3581,10 +3682,24 @@
 }
 
 
+void MarkCompactCollector::SweepInParallel(PagedSpace* space,
+                                           FreeList* private_free_list,
+                                           FreeList* free_list) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+
+    if (p->TryParallelSweeping()) {
+      SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
+      free_list->Concatenate(private_free_list);
+    }
+  }
+}
+
+
 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
   space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
                                       sweeper == LAZY_CONSERVATIVE);
-
   space->ClearStats();
 
   PageIterator it(space);
@@ -3597,6 +3712,7 @@
   while (it.has_next()) {
     Page* p = it.next();
 
+    ASSERT(p->parallel_sweeping() == 0);
     // Clear sweeping flags indicating that marking bits are still intact.
     p->ClearSweptPrecisely();
     p->ClearSweptConservatively();
@@ -3642,7 +3758,7 @@
           PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
                  reinterpret_cast<intptr_t>(p));
         }
-        SweepConservatively(space, p);
+        SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
         pages_swept++;
         break;
       }
@@ -3651,12 +3767,20 @@
           PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
                  reinterpret_cast<intptr_t>(p));
         }
-        freed_bytes += SweepConservatively(space, p);
+        freed_bytes += SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
         pages_swept++;
         space->SetPagesToSweep(p->next_page());
         lazy_sweeping_active = true;
         break;
       }
+      case PARALLEL_CONSERVATIVE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        p->set_parallel_sweeping(1);
+        break;
+      }
       case PRECISE: {
         if (FLAG_gc_verbose) {
           PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
@@ -3696,11 +3820,13 @@
       FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
   if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
   if (sweep_precisely_) how_to_sweep = PRECISE;
+  if (AreSweeperThreadsActivated()) how_to_sweep = PARALLEL_CONSERVATIVE;
   // Noncompacting collections simply sweep the spaces to clear the mark
   // bits and free the nonlive blocks (for old and map spaces).  We sweep
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
+
   SweepSpace(heap()->old_pointer_space(), how_to_sweep);
   SweepSpace(heap()->old_data_space(), how_to_sweep);
 
@@ -3711,6 +3837,15 @@
 
   EvacuateNewSpaceAndCandidates();
 
+  if (AreSweeperThreadsActivated()) {
+    // TODO(hpayer): The starting of the sweeper threads should be after
+    // SweepSpace old data space.
+    StartSweeperThreads();
+    if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) {
+      WaitUntilSweepingCompleted();
+    }
+  }
+
   // ClearNonLiveTransitions depends on precise sweeping of map space to
   // detect whether unmarked map became dead in this collection or in one
   // of the previous ones.
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 0837959..9cdb46a 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -594,9 +594,15 @@
   enum SweeperType {
     CONSERVATIVE,
     LAZY_CONSERVATIVE,
+    PARALLEL_CONSERVATIVE,
     PRECISE
   };
 
+  enum SweepingParallelism {
+    SWEEP_SEQUENTIALLY,
+    SWEEP_IN_PARALLEL
+  };
+
 #ifdef VERIFY_HEAP
   void VerifyMarkbitsAreClean();
   static void VerifyMarkbitsAreClean(PagedSpace* space);
@@ -605,7 +611,10 @@
 
   // Sweep a single page from the given space conservatively.
   // Return a number of reclaimed bytes.
-  static intptr_t SweepConservatively(PagedSpace* space, Page* p);
+  template<SweepingParallelism type>
+  static intptr_t SweepConservatively(PagedSpace* space,
+                                      FreeList* free_list,
+                                      Page* p);
 
   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
     return Page::FromAddress(reinterpret_cast<Address>(anchor))->
@@ -671,6 +680,22 @@
 
   MarkingParity marking_parity() { return marking_parity_; }
 
+  // Concurrent and parallel sweeping support.
+  void SweepInParallel(PagedSpace* space,
+                       FreeList* private_free_list,
+                       FreeList* free_list);
+
+  void WaitUntilSweepingCompleted();
+
+  intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
+
+  bool AreSweeperThreadsActivated();
+
+  // Parallel marking support.
+  void MarkInParallel();
+
+  void WaitUntilMarkingCompleted();
+
  private:
   MarkCompactCollector();
   ~MarkCompactCollector();
@@ -679,6 +704,7 @@
   void RemoveDeadInvalidatedCode();
   void ProcessInvalidatedCode(ObjectVisitor* visitor);
 
+  void StartSweeperThreads();
 
 #ifdef DEBUG
   enum CollectorState {
diff --git a/src/marking-thread.cc b/src/marking-thread.cc
new file mode 100644
index 0000000..ac64381
--- /dev/null
+++ b/src/marking-thread.cc
@@ -0,0 +1,85 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "marking-thread.h"
+
+#include "v8.h"
+
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+MarkingThread::MarkingThread(Isolate* isolate)
+     : Thread("MarkingThread"),
+       isolate_(isolate),
+       heap_(isolate->heap()),
+       start_marking_semaphore_(OS::CreateSemaphore(0)),
+       end_marking_semaphore_(OS::CreateSemaphore(0)),
+       stop_semaphore_(OS::CreateSemaphore(0)) {
+  NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+  id_ = NoBarrier_AtomicIncrement(&id_counter_, 1);
+}
+
+
+Atomic32 MarkingThread::id_counter_ = -1;
+
+
+void MarkingThread::Run() {
+  Isolate::SetIsolateThreadLocals(isolate_, NULL);
+
+  while (true) {
+    start_marking_semaphore_->Wait();
+
+    if (Acquire_Load(&stop_thread_)) {
+      stop_semaphore_->Signal();
+      return;
+    }
+
+    end_marking_semaphore_->Signal();
+  }
+}
+
+
+void MarkingThread::Stop() {
+  Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+  start_marking_semaphore_->Signal();
+  stop_semaphore_->Wait();
+}
+
+
+void MarkingThread::StartMarking() {
+  start_marking_semaphore_->Signal();
+}
+
+
+void MarkingThread::WaitForMarkingThread() {
+  end_marking_semaphore_->Wait();
+}
+
+} }  // namespace v8::internal
diff --git a/src/marking-thread.h b/src/marking-thread.h
new file mode 100644
index 0000000..9efa3af
--- /dev/null
+++ b/src/marking-thread.h
@@ -0,0 +1,71 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MARKING_THREAD_H_
+#define V8_MARKING_THREAD_H_
+
+#include "atomicops.h"
+#include "flags.h"
+#include "platform.h"
+#include "v8utils.h"
+
+#include "spaces.h"
+
+#include "heap.h"
+
+namespace v8 {
+namespace internal {
+
+class MarkingThread : public Thread {
+ public:
+  explicit MarkingThread(Isolate* isolate);
+
+  void Run();
+  void Stop();
+  void StartMarking();
+  void WaitForMarkingThread();
+
+  ~MarkingThread() {
+    delete start_marking_semaphore_;
+    delete end_marking_semaphore_;
+    delete stop_semaphore_;
+  }
+
+ private:
+  Isolate* isolate_;
+  Heap* heap_;
+  Semaphore* start_marking_semaphore_;
+  Semaphore* end_marking_semaphore_;
+  Semaphore* stop_semaphore_;
+  volatile AtomicWord stop_thread_;
+  int id_;
+  static Atomic32 id_counter_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_MARKING_THREAD_H_
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 793968e..65081b9 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -1310,10 +1310,8 @@
     __ MultiPop(kJSCallerSaved | kCalleeSaved);
   }
 
-  __ mov(at, ra);  // Stash the miss continuation
   __ Addu(sp, sp, Operand(kPointerSize));  // Ignore state
-  __ pop(ra);  // Restore RA to continuation in JSFunction
-  __ Jump(at);  // Jump to miss handler
+  __ Jump(ra);  // Jump to miss handler
 }
 
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 677a76c..7d03e91 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -46,7 +46,7 @@
   descriptor->register_param_count_ = 2;
   descriptor->register_params_ = registers;
   descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_Miss);
+      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
 }
 
 
@@ -7977,6 +7977,16 @@
 }
 
 
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+  ASSERT(!Serializer::enabled());
+  bool save_fp_regs = CpuFeatures::IsSupported(FPU);
+  CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
+  __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+  __ Ret();
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (entry_hook_ != NULL) {
     ProfileEntryHookStub stub;
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 634aecd..9a8cfd0 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -448,22 +448,27 @@
 void Deoptimizer::DoCompiledStubFrame(TranslationIterator* iterator,
                                       int frame_index) {
   //
-  //               FROM                                  TO             <-fp
+  //               FROM                                  TO
   //    |          ....           |          |          ....           |
   //    +-------------------------+          +-------------------------+
-  //    | JSFunction continuation |          |       parameter 1       |
+  //    | JSFunction continuation |          | JSFunction continuation |
   //    +-------------------------+          +-------------------------+
-  // |  |   saved frame (fp)      |          |          ....           |
-  // |  +=========================+<-fp      +-------------------------+
-  // |  |   JSFunction context    |          |       parameter n       |
+  // |  |   saved frame (fp)      |          |   saved frame (fp)      |
+  // |  +=========================+<-fp      +=========================+<-fp
+  // |  |   JSFunction context    |          |   JSFunction context    |
   // v  +-------------------------+          +-------------------------|
-  //    |   COMPILED_STUB marker  |          | JSFunction continuation |
-  //    +-------------------------+          +-------------------------+<-sp
-  //    |                         |          a0 = number of parameters
-  //    | ...                     |          a1 = failure handler address
-  //    |                         |          fp = saved frame
-  //    +-------------------------+<-sp      cp = JSFunction context
-  //
+  //    |   COMPILED_STUB marker  |          |   STUB_FAILURE marker   |
+  //    +-------------------------+          +-------------------------+
+  //    |                         |          |     stub parameter 1    |
+  //    | ...                     |          +-------------------------+
+  //    |                         |          |            ...          |
+  //    |-------------------------|<-sp      +-------------------------+
+  //                                         |     stub parameter n    |
+  //      parameters in registers            +-------------------------+<-sp
+  //       and spilled to stack              s0-s1 = number of parameters
+  //                                         s2 = failure handler address
+  //                                         fp = saved frame
+  //                                         cp = JSFunction context
   //
 
   ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
@@ -471,39 +476,59 @@
   CodeStubInterfaceDescriptor* descriptor =
       isolate_->code_stub_interface_descriptor(major_key);
 
-  int output_frame_size =
-      (1 + descriptor->register_param_count_) * kPointerSize;
+  int output_frame_size = StandardFrameConstants::kFixedFrameSize +
+      kPointerSize * descriptor->register_param_count_;
+
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, 0);
+  ASSERT(frame_index == 0);
+  output_[frame_index] = output_frame;
   Code* notify_failure =
       isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
   output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
   output_frame->SetContinuation(
-      reinterpret_cast<uint32_t>(notify_failure->entry()));
+      reinterpret_cast<intptr_t>(notify_failure->entry()));
 
-  Code* code;
-  CEntryStub(1, kSaveFPRegs).FindCodeInCache(&code, isolate_);
-  output_frame->SetPc(reinterpret_cast<intptr_t>(code->instruction_start()));
+  Code* trampoline = NULL;
+  StubFailureTrampolineStub().FindCodeInCache(&trampoline, isolate_);
+  ASSERT(trampoline != NULL);
+  output_frame->SetPc(reinterpret_cast<intptr_t>(
+      trampoline->instruction_start()));
   unsigned input_frame_size = input_->GetFrameSize();
-  intptr_t value = input_->GetFrameSlot(input_frame_size - kPointerSize);
-  output_frame->SetFrameSlot(0, value);
-  value = input_->GetFrameSlot(input_frame_size - 2 * kPointerSize);
+
+  // JSFunction continuation
+  intptr_t input_frame_offset = input_frame_size - kPointerSize;
+  intptr_t output_frame_offset = output_frame_size - kPointerSize;
+  intptr_t value = input_->GetFrameSlot(input_frame_offset);
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  // saved frame ptr
+  input_frame_offset -= kPointerSize;
+  value = input_->GetFrameSlot(input_frame_offset);
+  output_frame_offset -= kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  // Restore context
+  input_frame_offset -= kPointerSize;
+  value = input_->GetFrameSlot(input_frame_offset);
+  output_frame->SetRegister(cp.code(), value);
+  output_frame_offset -= kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  // Internal frame markers
+  output_frame_offset -= kPointerSize;
+  value = reinterpret_cast<intptr_t>(
+      Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  for (int i = 0; i < descriptor->register_param_count_; ++i) {
+    output_frame_offset -= kPointerSize;
+    DoTranslateCommand(iterator, 0, output_frame_offset);
+  }
+
+  value = input_->GetRegister(fp.code());
   output_frame->SetRegister(fp.code(), value);
   output_frame->SetFp(value);
-  value = input_->GetFrameSlot(input_frame_size - 3 * kPointerSize);
-  output_frame->SetRegister(cp.code(), value);
-
-  int parameter_offset = kPointerSize * descriptor->register_param_count_;
-  for (int i = 0; i < descriptor->register_param_count_; ++i) {
-    Translation::Opcode opcode =
-        static_cast<Translation::Opcode>(iterator->Next());
-    ASSERT(opcode == Translation::REGISTER);
-    USE(opcode);
-    int input_reg = iterator->Next();
-    intptr_t reg_value = input_->GetRegister(input_reg);
-    output_frame->SetFrameSlot(parameter_offset, reg_value);
-    parameter_offset -= kPointerSize;
-  }
 
   ApiFunction function(descriptor->deoptimization_handler_);
   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
@@ -512,9 +537,6 @@
   output_frame->SetRegister(s1.code(),
       (descriptor->register_param_count_ - 1) * kPointerSize);
   output_frame->SetRegister(s2.code(), handler);
-
-  ASSERT(frame_index == 0);
-  output_[frame_index] = output_frame;
 }
 
 
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index f4f0b2f..13dd63c 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -67,8 +67,6 @@
   status_ = GENERATING;
   CpuFeatures::Scope scope(FPU);
 
-  CodeStub::GenerateFPStubs();
-
   // Open a frame scope to indicate that there is a frame on the stack.  The
   // NONE indicates that the scope shouldn't actually generate code to set up
   // the frame (that is done in GeneratePrologue).
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 28d8762..18894c1 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2142,6 +2142,16 @@
 }
 
 
+Object** DescriptorArray::GetDescriptorStartSlot(int descriptor_number) {
+  return GetKeySlot(descriptor_number);
+}
+
+
+Object** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) {
+  return GetValueSlot(descriptor_number - 1) + 1;
+}
+
+
 String* DescriptorArray::GetKey(int descriptor_number) {
   ASSERT(descriptor_number < number_of_descriptors());
   return String::cast(get(ToKeyIndex(descriptor_number)));
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index 7332a0c..4fabba4 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -396,6 +396,33 @@
     ASSERT(transitions->IsMap() || transitions->IsUndefined());
   }
 
+  // Since descriptor arrays are potentially shared, ensure that only the
+  // descriptors that appeared for this map are marked. The first time a
+  // non-empty descriptor array is marked, its header is also visited. The slot
+  // holding the descriptor array will be implicitly recorded when the pointer
+  // fields of this map are visited.
+  DescriptorArray* descriptors = map->instance_descriptors();
+  if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
+      descriptors->length() > 0) {
+    StaticVisitor::VisitPointers(heap,
+        descriptors->GetFirstElementAddress(),
+        descriptors->GetDescriptorEndSlot(0));
+  }
+  int start = 0;
+  int end = map->NumberOfOwnDescriptors();
+  Object* back_pointer = map->GetBackPointer();
+  if (!back_pointer->IsUndefined()) {
+    Map* parent_map = Map::cast(back_pointer);
+    if (descriptors == parent_map->instance_descriptors()) {
+      start = parent_map->NumberOfOwnDescriptors();
+    }
+  }
+  if (start < end) {
+    StaticVisitor::VisitPointers(heap,
+        descriptors->GetDescriptorStartSlot(start),
+        descriptors->GetDescriptorEndSlot(end));
+  }
+
   // Mark prototype dependent codes array but do not push it onto marking
   // stack, this will make references from it weak. We will clean dead
   // codes when we iterate over maps in ClearNonLiveTransitions.
diff --git a/src/objects.cc b/src/objects.cc
index 0825b64..fb8c704 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1601,10 +1601,7 @@
     if (!maybe_values->To(&values)) return maybe_values;
   }
 
-  // Only allow map transition if the object isn't the global object.
-  TransitionFlag flag = isolate->empty_object_map() != map()
-      ? INSERT_TRANSITION
-      : OMIT_TRANSITION;
+  TransitionFlag flag = INSERT_TRANSITION;
 
   Map* new_map;
   MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
@@ -1630,15 +1627,11 @@
   // Allocate new instance descriptors with (name, function) added
   ConstantFunctionDescriptor d(name, function, attributes, 0);
 
-  Heap* heap = GetHeap();
   TransitionFlag flag =
-      // Do not add transitions to the empty object map (map of "new Object()"),
-      // nor to global objects.
-      (map() == heap->isolate()->empty_object_map() || IsGlobalObject() ||
+      // Do not add transitions to  global objects.
+      (IsGlobalObject() ||
       // Don't add transitions to special properties with non-trivial
       // attributes.
-      // TODO(verwaest): Once we support attribute changes, these transitions
-      // should be kept as well.
        attributes != NONE)
       ? OMIT_TRANSITION
       : INSERT_TRANSITION;
@@ -1841,10 +1834,8 @@
 
   if (!HasFastProperties()) return result;
 
-  // This method should only be used to convert existing transitions. Objects
-  // with the map of "new Object()" cannot have transitions in the first place.
+  // This method should only be used to convert existing transitions.
   Map* new_map = map();
-  ASSERT(new_map != GetIsolate()->empty_object_map());
 
   // TODO(verwaest): From here on we lose existing map transitions, causing
   // invalid back pointers. This will change once we can store multiple
@@ -2415,10 +2406,8 @@
   }
 
   bool allow_store_transition =
-      // Only remember the map transition if the object's map is NOT equal to
-      // the global object_function's map and there is not an already existing
+      // Only remember the map transition if there is not an already existing
       // non-matching element transition.
-      (GetIsolate()->empty_object_map() != map()) &&
       !start_map->IsUndefined() && !start_map->is_shared() &&
       IsFastElementsKind(from_kind);
 
@@ -3188,14 +3177,17 @@
   if (is_observed) {
     if (lookup.IsTransition()) {
       EnqueueChangeRecord(self, "new", name, old_value);
+    } else if (old_value->IsTheHole()) {
+      EnqueueChangeRecord(self, "reconfigured", name, old_value);
     } else {
       LookupResult new_lookup(isolate);
       self->LocalLookup(*name, &new_lookup, true);
-      if (old_value->IsTheHole() ||
-          new_lookup.GetAttributes() != old_attributes) {
+      bool value_changed = new_lookup.IsDataProperty() &&
+          !old_value->SameValue(*Object::GetProperty(self, name));
+      if (new_lookup.GetAttributes() != old_attributes) {
+        if (!value_changed) old_value = isolate->factory()->the_hole_value();
         EnqueueChangeRecord(self, "reconfigured", name, old_value);
-      } else if (new_lookup.IsDataProperty() &&
-          !Object::GetProperty(self, name)->SameValue(*old_value)) {
+      } else if (value_changed) {
         EnqueueChangeRecord(self, "updated", name, old_value);
       }
     }
@@ -7654,11 +7646,12 @@
                                 Map* map,
                                 DescriptorArray* descriptors,
                                 int number_of_own_descriptors) {
-  int number_of_descriptors = descriptors->number_of_descriptors();
+  int number_of_descriptors = descriptors->number_of_descriptors_storage();
   int to_trim = number_of_descriptors - number_of_own_descriptors;
-  if (to_trim <= 0) return;
+  if (to_trim == 0) return;
 
-  RightTrimFixedArray<FROM_GC>(heap, descriptors, to_trim);
+  RightTrimFixedArray<FROM_GC>(
+      heap, descriptors, to_trim * DescriptorArray::kDescriptorSize);
   descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
 
   if (descriptors->HasEnumCache()) TrimEnumCache(heap, map, descriptors);
@@ -10348,10 +10341,17 @@
       EnqueueChangeRecord(
           self, "updated", isolate->factory()->length_symbol(), old_length);
     }
-  } else if (old_attributes != new_attributes || old_value->IsTheHole()) {
+  } else if (old_value->IsTheHole()) {
     EnqueueChangeRecord(self, "reconfigured", name, old_value);
-  } else if (!old_value->SameValue(*Object::GetElement(self, index))) {
-    EnqueueChangeRecord(self, "updated", name, old_value);
+  } else {
+    bool value_changed =
+        !old_value->SameValue(*Object::GetElement(self, index));
+    if (old_attributes != new_attributes) {
+      if (!value_changed) old_value = isolate->factory()->the_hole_value();
+      EnqueueChangeRecord(self, "reconfigured", name, old_value);
+    } else if (value_changed) {
+      EnqueueChangeRecord(self, "updated", name, old_value);
+    }
   }
 
   return *hresult;
diff --git a/src/objects.h b/src/objects.h
index 975fdf8..10f74fc 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2609,6 +2609,8 @@
   inline Object** GetKeySlot(int descriptor_number);
   inline Object* GetValue(int descriptor_number);
   inline Object** GetValueSlot(int descriptor_number);
+  inline Object** GetDescriptorStartSlot(int descriptor_number);
+  inline Object** GetDescriptorEndSlot(int descriptor_number);
   inline PropertyDetails GetDetails(int descriptor_number);
   inline PropertyType GetType(int descriptor_number);
   inline int GetFieldIndex(int descriptor_number);
diff --git a/src/spaces.cc b/src/spaces.cc
index 8195e7c..711cde1 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -466,6 +466,7 @@
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_ = static_cast<int>(area_start - base);
+  chunk->parallel_sweeping_ = 0;
   chunk->ResetLiveBytes();
   Bitmap::Clear(chunk);
   chunk->initialize_scan_on_scavenge(false);
@@ -710,7 +711,7 @@
 
 void MemoryAllocator::Free(MemoryChunk* chunk) {
   LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
-  if (chunk->has_owner()) {
+  if (chunk->owner() != NULL) {
     ObjectSpace space =
         static_cast<ObjectSpace>(1 << chunk->owner()->identity());
     PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
@@ -2041,6 +2042,29 @@
 }
 
 
+intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
+  intptr_t free_bytes = 0;
+  if (category->top_ != NULL) {
+    ASSERT(category->end_ != NULL);
+    // This is safe (not going to deadlock) since Concatenate operations
+    // are never performed on the same free lists at the same time in
+    // reverse order.
+    ScopedLock lock_target(mutex_);
+    ScopedLock lock_source(category->mutex());
+    free_bytes = category->available();
+    if (end_ == NULL) {
+      end_ = category->end();
+    } else {
+      category->end()->set_next(top_);
+    }
+    top_ = category->top();
+    available_ += category->available();
+    category->Reset();
+  }
+  return free_bytes;
+}
+
+
 void FreeListCategory::Reset() {
   top_ = NULL;
   end_ = NULL;
@@ -2139,6 +2163,16 @@
 }
 
 
+intptr_t FreeList::Concatenate(FreeList* free_list) {
+  intptr_t free_bytes = 0;
+  free_bytes += small_list_.Concatenate(free_list->small_list());
+  free_bytes += medium_list_.Concatenate(free_list->medium_list());
+  free_bytes += large_list_.Concatenate(free_list->large_list());
+  free_bytes += huge_list_.Concatenate(free_list->huge_list());
+  return free_bytes;
+}
+
+
 void FreeList::Reset() {
   small_list_.Reset();
   medium_list_.Reset();
@@ -2503,7 +2537,10 @@
                reinterpret_cast<intptr_t>(p));
       }
       DecreaseUnsweptFreeBytes(p);
-      freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
+      freed_bytes +=
+          MarkCompactCollector::
+              SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+                  this, NULL, p);
     }
     p = next_page;
   } while (p != anchor() && freed_bytes < bytes_to_sweep);
@@ -2535,6 +2572,21 @@
 }
 
 
+bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  if (collector->AreSweeperThreadsActivated()) {
+    if (FLAG_concurrent_sweeping &&
+        collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
+      collector->WaitUntilSweepingCompleted();
+      return true;
+    }
+    return false;
+  } else {
+    return AdvanceSweeper(size_in_bytes);
+  }
+}
+
+
 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   // Allocation in this space has failed.
 
@@ -2544,7 +2596,7 @@
   bool sweeping_complete = false;
 
   for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
-    sweeping_complete = AdvanceSweeper(size_in_bytes);
+    sweeping_complete = EnsureSweeperProgress(size_in_bytes);
 
     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2567,7 +2619,7 @@
   // Last ditch, sweep all the remaining pages to try to find space.  This may
   // cause a pause.
   if (!IsSweepingComplete()) {
-    AdvanceSweeper(kMaxInt);
+    EnsureSweeperProgress(kMaxInt);
 
     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);
diff --git a/src/spaces.h b/src/spaces.h
index ddf9dfe..07daacf 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -320,7 +320,8 @@
   Space* owner() const {
     if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
         kFailureTag) {
-      return reinterpret_cast<Space*>(owner_ - kFailureTag);
+      return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
+                                      kFailureTag);
     } else {
       return NULL;
     }
@@ -333,14 +334,6 @@
            kFailureTag);
   }
 
-  // Workaround for a bug in Clang-3.3 which in some situations optimizes away
-  // an "if (chunk->owner() != NULL)" check.
-  bool has_owner() {
-    if (owner_ == 0) return false;
-    if (reinterpret_cast<intptr_t>(owner_) == kFailureTag) return false;
-    return true;
-  }
-
   VirtualMemory* reserved_memory() {
     return &reservation_;
   }
@@ -462,6 +455,18 @@
   // Return all current flags.
   intptr_t GetFlags() { return flags_; }
 
+  intptr_t parallel_sweeping() const {
+    return parallel_sweeping_;
+  }
+
+  void set_parallel_sweeping(intptr_t state) {
+    parallel_sweeping_ = state;
+  }
+
+  bool TryParallelSweeping() {
+    return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
+  }
+
   // Manage live byte count (count of bytes known to be live,
   // because they are marked black).
   void ResetLiveBytes() {
@@ -541,8 +546,8 @@
   static const size_t kWriteBarrierCounterOffset =
       kSlotsBufferOffset + kPointerSize + kPointerSize;
 
-  static const size_t kHeaderSize =
-      kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize;
+  static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
+                                    kIntSize + kIntSize + kPointerSize;
 
   static const int kBodyOffset =
       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -694,6 +699,8 @@
   // count highest number of bytes ever allocated on the page.
   int high_water_mark_;
 
+  intptr_t parallel_sweeping_;
+
   static MemoryChunk* Initialize(Heap* heap,
                                  Address base,
                                  size_t size,
@@ -1403,7 +1410,17 @@
 // the end element of the linked list of free memory blocks.
 class FreeListCategory {
  public:
-  FreeListCategory() : top_(NULL), end_(NULL), available_(0) {}
+  FreeListCategory() :
+      top_(NULL),
+      end_(NULL),
+      mutex_(OS::CreateMutex()),
+      available_(0) {}
+
+  ~FreeListCategory() {
+    delete mutex_;
+  }
+
+  intptr_t Concatenate(FreeListCategory* category);
 
   void Reset();
 
@@ -1429,6 +1446,8 @@
   int available() const { return available_; }
   void set_available(int available) { available_ = available; }
 
+  Mutex* mutex() { return mutex_; }
+
 #ifdef DEBUG
   intptr_t SumFreeList();
   int FreeListLength();
@@ -1437,6 +1456,7 @@
  private:
   FreeListNode* top_;
   FreeListNode* end_;
+  Mutex* mutex_;
 
   // Total available bytes in all blocks of this free list category.
   int available_;
@@ -1470,6 +1490,8 @@
  public:
   explicit FreeList(PagedSpace* owner);
 
+  intptr_t Concatenate(FreeList* free_list);
+
   // Clear the free list.
   void Reset();
 
@@ -1517,6 +1539,11 @@
 
   intptr_t EvictFreeListItems(Page* p);
 
+  FreeListCategory* small_list() { return &small_list_; }
+  FreeListCategory* medium_list() { return &medium_list_; }
+  FreeListCategory* large_list() { return &large_list_; }
+  FreeListCategory* huge_list() { return &huge_list_; }
+
  private:
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
@@ -1731,6 +1758,11 @@
 
   bool AdvanceSweeper(intptr_t bytes_to_sweep);
 
+  // When parallel sweeper threads are active this function waits
+  // for them to complete, otherwise AdvanceSweeper with size_in_bytes
+  // is called.
+  bool EnsureSweeperProgress(intptr_t size_in_bytes);
+
   bool IsSweepingComplete() {
     return !first_unswept_page_->is_valid();
   }
@@ -1755,6 +1787,12 @@
   }
 
  protected:
+  FreeList* free_list() { return &free_list_; }
+
+  void AddToAccountingStats(intptr_t bytes) {
+    accounting_stats_.DeallocateBytes(bytes);
+  }
+
   int area_size_;
 
   // Maximum capacity of this space.
@@ -1804,6 +1842,7 @@
   MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
 
   friend class PageIterator;
+  friend class SweeperThread;
 };
 
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index e6722f7..16420a5 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -102,7 +102,6 @@
 
 Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
                                                Handle<JSObject> receiver) {
-  ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
   // If no global objects are present in the prototype chain, the load
   // nonexistent IC stub can be shared for all names for a given map
   // and we use the empty string for the map cache in that case.  If
@@ -110,12 +109,20 @@
   // property cells in the stub and therefore the stub will be
   // specific to the name.
   Handle<String> cache_name = factory()->empty_string();
-  if (receiver->IsGlobalObject()) cache_name = name;
-  Handle<JSObject> last = receiver;
-  while (last->GetPrototype() != heap()->null_value()) {
-    last = Handle<JSObject>(JSObject::cast(last->GetPrototype()));
-    if (last->IsGlobalObject()) cache_name = name;
-  }
+  Handle<JSObject> current;
+  Handle<Object> next = receiver;
+  Handle<GlobalObject> global;
+  do {
+    current = Handle<JSObject>::cast(next);
+    next = Handle<Object>(current->GetPrototype());
+    if (current->IsGlobalObject()) {
+      global = Handle<GlobalObject>::cast(current);
+      cache_name = name;
+    } else if (!current->HasFastProperties()) {
+      cache_name = name;
+    }
+  } while (!next->IsNull());
+
   // Compile the stub that is either shared for all names or
   // name specific if there are global objects involved.
   Code::Flags flags =
@@ -126,7 +133,7 @@
 
   LoadStubCompiler compiler(isolate_);
   Handle<Code> code =
-      compiler.CompileLoadNonexistent(cache_name, receiver, last);
+      compiler.CompileLoadNonexistent(cache_name, receiver, current, global);
   PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *cache_name));
   GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *cache_name, *code));
   JSObject::UpdateMapCodeCache(receiver, cache_name, code);
@@ -138,9 +145,11 @@
                                          Handle<JSObject> receiver,
                                          Handle<JSObject> holder,
                                          PropertyIndex field_index) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::FIELD);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
                        isolate_);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
@@ -149,7 +158,7 @@
       compiler.CompileLoadField(receiver, holder, field_index, name);
   PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
   GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
@@ -159,10 +168,12 @@
                                             Handle<JSObject> holder,
                                             Handle<AccessorInfo> callback) {
   ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
                        isolate_);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
@@ -171,7 +182,7 @@
       compiler.CompileLoadCallback(name, receiver, holder, callback);
   PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
   GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
@@ -180,10 +191,12 @@
                                              Handle<JSObject> receiver,
                                              Handle<JSObject> holder,
                                              Handle<JSFunction> getter) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
                        isolate_);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
@@ -192,7 +205,7 @@
       compiler.CompileLoadViaGetter(name, receiver, holder, getter);
   PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
   GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
@@ -201,10 +214,12 @@
                                             Handle<JSObject> receiver,
                                             Handle<JSObject> holder,
                                             Handle<JSFunction> value) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CONSTANT_FUNCTION);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
                        isolate_);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
@@ -213,7 +228,7 @@
         compiler.CompileLoadConstant(receiver, holder, value, name);
   PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
   GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
@@ -221,10 +236,12 @@
 Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
                                                Handle<JSObject> receiver,
                                                Handle<JSObject> holder) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::INTERCEPTOR);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
                        isolate_);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
@@ -233,7 +250,7 @@
         compiler.CompileLoadInterceptor(receiver, holder, name);
   PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
   GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
@@ -248,10 +265,12 @@
                                           Handle<GlobalObject> holder,
                                           Handle<JSGlobalPropertyCell> cell,
                                           bool is_dont_delete) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
                        isolate_);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
@@ -260,7 +279,7 @@
       compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
   PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
   GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
@@ -269,10 +288,12 @@
                                               Handle<JSObject> receiver,
                                               Handle<JSObject> holder,
                                               PropertyIndex field_index) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::FIELD);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
                        isolate_);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
@@ -281,7 +302,7 @@
       compiler.CompileLoadField(name, receiver, holder, field_index);
   PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
   GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
@@ -290,10 +311,12 @@
                                                  Handle<JSObject> receiver,
                                                  Handle<JSObject> holder,
                                                  Handle<JSFunction> value) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
                                                     Code::CONSTANT_FUNCTION);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
                        isolate_);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
@@ -302,7 +325,7 @@
       compiler.CompileLoadConstant(name, receiver, holder, value);
   PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
   GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
@@ -310,10 +333,12 @@
 Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
                                                     Handle<JSObject> receiver,
                                                     Handle<JSObject> holder) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
                        isolate_);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
@@ -321,7 +346,7 @@
   Handle<Code> code = compiler.CompileLoadInterceptor(receiver, holder, name);
   PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
   GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
@@ -331,10 +356,12 @@
     Handle<JSObject> receiver,
     Handle<JSObject> holder,
     Handle<AccessorInfo> callback) {
-  ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
+  InlineCacheHolderFlag cache_holder =
+      IC::GetCodeCacheForObject(*receiver, *holder);
+  Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
-  Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+  Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
                        isolate_);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
@@ -343,7 +370,7 @@
       compiler.CompileLoadCallback(name, receiver, holder, callback);
   PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
   GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
-  JSObject::UpdateMapCodeCache(receiver, name, code);
+  JSObject::UpdateMapCodeCache(map_holder, name, code);
   return code;
 }
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 5becb11..451af31 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -608,7 +608,8 @@
 
   Handle<Code> CompileLoadNonexistent(Handle<String> name,
                                       Handle<JSObject> object,
-                                      Handle<JSObject> last);
+                                      Handle<JSObject> last,
+                                      Handle<GlobalObject> global);
 
   Handle<Code> CompileLoadField(Handle<JSObject> object,
                                 Handle<JSObject> holder,
diff --git a/src/sweeper-thread.cc b/src/sweeper-thread.cc
new file mode 100644
index 0000000..7e31e6c
--- /dev/null
+++ b/src/sweeper-thread.cc
@@ -0,0 +1,105 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "sweeper-thread.h"
+
+#include "v8.h"
+
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+SweeperThread::SweeperThread(Isolate* isolate)
+     : Thread("SweeperThread"),
+       isolate_(isolate),
+       heap_(isolate->heap()),
+       collector_(heap_->mark_compact_collector()),
+       start_sweeping_semaphore_(OS::CreateSemaphore(0)),
+       end_sweeping_semaphore_(OS::CreateSemaphore(0)),
+       stop_semaphore_(OS::CreateSemaphore(0)),
+       free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+       free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
+       private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+       private_free_list_old_pointer_space_(
+           heap_->paged_space(OLD_POINTER_SPACE)) {
+  NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+}
+
+
+bool SweeperThread::sweeping_pending_ = false;
+
+
+void SweeperThread::Run() {
+  Isolate::SetIsolateThreadLocals(isolate_, NULL);
+  while (true) {
+    start_sweeping_semaphore_->Wait();
+
+    if (Acquire_Load(&stop_thread_)) {
+      stop_semaphore_->Signal();
+      return;
+    }
+
+    collector_->SweepInParallel(heap_->old_data_space(),
+                                &private_free_list_old_data_space_,
+                                &free_list_old_data_space_);
+    collector_->SweepInParallel(heap_->old_pointer_space(),
+                                &private_free_list_old_pointer_space_,
+                                &free_list_old_pointer_space_);
+    end_sweeping_semaphore_->Signal();
+  }
+}
+
+intptr_t SweeperThread::StealMemory(PagedSpace* space) {
+  intptr_t free_bytes = 0;
+  if (space->identity() == OLD_POINTER_SPACE) {
+    free_bytes = space->free_list()->Concatenate(&free_list_old_pointer_space_);
+    space->AddToAccountingStats(free_bytes);
+  } else if (space->identity() == OLD_DATA_SPACE) {
+    free_bytes = space->free_list()->Concatenate(&free_list_old_data_space_);
+    space->AddToAccountingStats(free_bytes);
+  }
+  return free_bytes;
+}
+
+void SweeperThread::Stop() {
+  Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+  start_sweeping_semaphore_->Signal();
+  stop_semaphore_->Wait();
+}
+
+
+void SweeperThread::StartSweeping() {
+  start_sweeping_semaphore_->Signal();
+}
+
+
+void SweeperThread::WaitForSweeperThread() {
+  end_sweeping_semaphore_->Wait();
+}
+} }  // namespace v8::internal
diff --git a/src/sweeper-thread.h b/src/sweeper-thread.h
new file mode 100644
index 0000000..ba793c2
--- /dev/null
+++ b/src/sweeper-thread.h
@@ -0,0 +1,81 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SWEEPER_THREAD_H_
+#define V8_SWEEPER_THREAD_H_
+
+#include "atomicops.h"
+#include "flags.h"
+#include "platform.h"
+#include "v8utils.h"
+
+#include "spaces.h"
+
+#include "heap.h"
+
+namespace v8 {
+namespace internal {
+
+class SweeperThread : public Thread {
+ public:
+  explicit SweeperThread(Isolate* isolate);
+
+  void Run();
+  void Stop();
+  void StartSweeping();
+  void WaitForSweeperThread();
+  intptr_t StealMemory(PagedSpace* space);
+
+  static bool sweeping_pending() { return sweeping_pending_; }
+  static void set_sweeping_pending(bool sweeping_pending) {
+    sweeping_pending_ = sweeping_pending;
+  }
+
+  ~SweeperThread() {
+    delete start_sweeping_semaphore_;
+    delete end_sweeping_semaphore_;
+    delete stop_semaphore_;
+  }
+
+ private:
+  Isolate* isolate_;
+  Heap* heap_;
+  MarkCompactCollector* collector_;
+  Semaphore* start_sweeping_semaphore_;
+  Semaphore* end_sweeping_semaphore_;
+  Semaphore* stop_semaphore_;
+  FreeList free_list_old_data_space_;
+  FreeList free_list_old_pointer_space_;
+  FreeList private_free_list_old_data_space_;
+  FreeList private_free_list_old_pointer_space_;
+  volatile AtomicWord stop_thread_;
+  static bool sweeping_pending_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SWEEPER_THREAD_H_
diff --git a/src/version.cc b/src/version.cc
index 364aed7..3035ac2 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     16
-#define BUILD_NUMBER      11
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      12
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 109fcfc..2d3f7c6 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -45,7 +45,7 @@
   descriptor->register_param_count_ = 2;
   descriptor->register_params_ = registers;
   descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_Miss);
+      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
 }
 
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index ba6321a..5fdc2a1 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -2789,9 +2789,11 @@
 }
 
 
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
-                                                      Handle<JSObject> object,
-                                                      Handle<JSObject> last) {
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+    Handle<String> name,
+    Handle<JSObject> object,
+    Handle<JSObject> last,
+    Handle<GlobalObject> global) {
   // ----------- S t a t e -------------
   //  -- rax    : receiver
   //  -- rcx    : name
@@ -2805,13 +2807,21 @@
   // Check the maps of the full prototype chain. Also check that
   // global property cells up to (but not including) the last object
   // in the prototype chain are empty.
-  CheckPrototypes(object, rax, last, rbx, rdx, rdi, name, &miss);
+  Register scratch = rdx;
+  Register result =
+      CheckPrototypes(object, rax, last, rbx, scratch, rdi, name, &miss);
 
   // If the last object in the prototype chain is a global object,
   // check that the global property cell is empty.
-  if (last->IsGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm(), Handle<GlobalObject>::cast(last), name, rdx, &miss);
+  if (!global.is_null()) {
+    GenerateCheckPropertyCell(masm(), global, name, scratch, &miss);
+  }
+
+  if (!last->HasFastProperties()) {
+    __ movq(scratch, FieldOperand(result, HeapObject::kMapOffset));
+    __ movq(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
+    __ Cmp(scratch, isolate()->factory()->null_value());
+    __ j(not_equal, &miss);
   }
 
   // Return undefined if maps of the full prototype chain are still the
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 949e413..48610b0 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -564,7 +564,7 @@
       if (v8::internal::Snapshot::IsEnabled()) {
         CHECK_LE(delta, 2600 * 1024);
       } else {
-        CHECK_LE(delta, 3000 * 1024);
+        CHECK_LE(delta, 3100 * 1024);
       }
     }
   }
diff --git a/test/mjsunit/harmony/object-observe.js b/test/mjsunit/harmony/object-observe.js
index 4836eaa..b7ea22e 100644
--- a/test/mjsunit/harmony/object-observe.js
+++ b/test/mjsunit/harmony/object-observe.js
@@ -371,7 +371,7 @@
   { object: obj, name: "a", type: "new" },
   { object: obj, name: "a", type: "updated", oldValue: 4 },
   { object: obj, name: "a", type: "updated", oldValue: 5 },
-  { object: obj, name: "a", type: "reconfigured", oldValue: 6 },
+  { object: obj, name: "a", type: "reconfigured" },
   { object: obj, name: "a", type: "updated", oldValue: 6 },
   { object: obj, name: "a", type: "reconfigured", oldValue: 8 },
   { object: obj, name: "a", type: "reconfigured", oldValue: 7 },
@@ -429,7 +429,7 @@
   { object: obj, name: "1", type: "new" },
   { object: obj, name: "1", type: "updated", oldValue: 4 },
   { object: obj, name: "1", type: "updated", oldValue: 5 },
-  { object: obj, name: "1", type: "reconfigured", oldValue: 6 },
+  { object: obj, name: "1", type: "reconfigured" },
   { object: obj, name: "1", type: "updated", oldValue: 6 },
   { object: obj, name: "1", type: "reconfigured", oldValue: 8 },
   { object: obj, name: "1", type: "reconfigured", oldValue: 7 },
@@ -493,7 +493,7 @@
     { object: obj, name: prop, type: "new" },
     { object: obj, name: prop, type: "updated", oldValue: 4 },
     { object: obj, name: prop, type: "updated", oldValue: 5 },
-    { object: obj, name: prop, type: "reconfigured", oldValue: 6 },
+    { object: obj, name: prop, type: "reconfigured" },
     { object: obj, name: prop, type: "updated", oldValue: 6 },
     { object: obj, name: prop, type: "reconfigured", oldValue: 8 },
     { object: obj, name: prop, type: "reconfigured", oldValue: 7 },
@@ -537,7 +537,7 @@
     { object: obj, name: prop, type: "updated", oldValue: 4 },
     { object: obj, name: prop, type: "updated", oldValue: 5 },
     { object: obj, name: prop, type: "updated", oldValue: 6 },
-    { object: obj, name: prop, type: "reconfigured", oldValue: 7 },
+    { object: obj, name: prop, type: "reconfigured" },
   ]);
   Object.unobserve(obj, observer.callback);
 }
@@ -657,7 +657,7 @@
   { object: arr, name: '1', type: 'deleted', oldValue: 'b' },
   { object: arr, name: 'length', type: 'updated', oldValue: 2 },
   { object: arr, name: 'length', type: 'updated', oldValue: 1 },
-  { object: arr, name: 'length', type: 'reconfigured', oldValue: 10 },
+  { object: arr, name: 'length', type: 'reconfigured' },
   { object: arr2, name: '1', type: 'deleted', oldValue: 'beta' },
   { object: arr2, name: 'length', type: 'updated', oldValue: 2 },
   { object: arr2, name: 'length', type: 'reconfigured', oldValue: 1 },
diff --git a/tools/grokdump.py b/tools/grokdump.py
index 083dc68..7a07675 100755
--- a/tools/grokdump.py
+++ b/tools/grokdump.py
@@ -878,31 +878,32 @@
   153: "OBJECT_TEMPLATE_INFO_TYPE",
   154: "SIGNATURE_INFO_TYPE",
   155: "TYPE_SWITCH_INFO_TYPE",
-  156: "SCRIPT_TYPE",
-  157: "CODE_CACHE_TYPE",
-  158: "POLYMORPHIC_CODE_CACHE_TYPE",
-  159: "TYPE_FEEDBACK_INFO_TYPE",
-  160: "ALIASED_ARGUMENTS_ENTRY_TYPE",
-  163: "FIXED_ARRAY_TYPE",
+  156: "ALLOCATION_SITE_INFO_TYPE",
+  157: "SCRIPT_TYPE",
+  158: "CODE_CACHE_TYPE",
+  159: "POLYMORPHIC_CODE_CACHE_TYPE",
+  160: "TYPE_FEEDBACK_INFO_TYPE",
+  161: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+  164: "FIXED_ARRAY_TYPE",
   145: "FIXED_DOUBLE_ARRAY_TYPE",
-  164: "SHARED_FUNCTION_INFO_TYPE",
-  165: "JS_MESSAGE_OBJECT_TYPE",
-  168: "JS_VALUE_TYPE",
-  169: "JS_DATE_TYPE",
-  170: "JS_OBJECT_TYPE",
-  171: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
-  172: "JS_MODULE_TYPE",
-  173: "JS_GLOBAL_OBJECT_TYPE",
-  174: "JS_BUILTINS_OBJECT_TYPE",
-  175: "JS_GLOBAL_PROXY_TYPE",
-  176: "JS_ARRAY_TYPE",
-  167: "JS_PROXY_TYPE",
-  179: "JS_WEAK_MAP_TYPE",
-  180: "JS_REGEXP_TYPE",
-  181: "JS_FUNCTION_TYPE",
-  166: "JS_FUNCTION_PROXY_TYPE",
-  161: "DEBUG_INFO_TYPE",
-  162: "BREAK_POINT_INFO_TYPE",
+  165: "SHARED_FUNCTION_INFO_TYPE",
+  166: "JS_MESSAGE_OBJECT_TYPE",
+  169: "JS_VALUE_TYPE",
+  170: "JS_DATE_TYPE",
+  171: "JS_OBJECT_TYPE",
+  172: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+  173: "JS_MODULE_TYPE",
+  174: "JS_GLOBAL_OBJECT_TYPE",
+  175: "JS_BUILTINS_OBJECT_TYPE",
+  176: "JS_GLOBAL_PROXY_TYPE",
+  177: "JS_ARRAY_TYPE",
+  168: "JS_PROXY_TYPE",
+  180: "JS_WEAK_MAP_TYPE",
+  181: "JS_REGEXP_TYPE",
+  182: "JS_FUNCTION_TYPE",
+  167: "JS_FUNCTION_PROXY_TYPE",
+  162: "DEBUG_INFO_TYPE",
+  163: "BREAK_POINT_INFO_TYPE",
 }
 
 
@@ -928,83 +929,84 @@
 # }
 # printf("}\n");
 KNOWN_MAPS = {
-  0x08081: (128, "MetaMap"),
-  0x080a5: (163, "FixedArrayMap"),
-  0x080c9: (130, "OddballMap"),
-  0x080ed: (163, "FixedCOWArrayMap"),
-  0x08111: (163, "ScopeInfoMap"),
-  0x08135: (132, "HeapNumberMap"),
-  0x08159: (133, "ForeignMap"),
-  0x0817d: (64, "SymbolMap"),
-  0x081a1: (68, "AsciiSymbolMap"),
-  0x081c5: (65, "ConsSymbolMap"),
-  0x081e9: (69, "ConsAsciiSymbolMap"),
-  0x0820d: (66, "ExternalSymbolMap"),
-  0x08231: (74, "ExternalSymbolWithAsciiDataMap"),
-  0x08255: (70, "ExternalAsciiSymbolMap"),
-  0x08279: (82, "ShortExternalSymbolMap"),
-  0x0829d: (90, "ShortExternalSymbolWithAsciiDataMap"),
-  0x082c1: (86, "ShortExternalAsciiSymbolMap"),
-  0x082e5: (0, "StringMap"),
-  0x08309: (4, "AsciiStringMap"),
-  0x0832d: (1, "ConsStringMap"),
-  0x08351: (5, "ConsAsciiStringMap"),
-  0x08375: (3, "SlicedStringMap"),
-  0x08399: (7, "SlicedAsciiStringMap"),
-  0x083bd: (2, "ExternalStringMap"),
-  0x083e1: (10, "ExternalStringWithAsciiDataMap"),
-  0x08405: (6, "ExternalAsciiStringMap"),
-  0x08429: (18, "ShortExternalStringMap"),
-  0x0844d: (26, "ShortExternalStringWithAsciiDataMap"),
-  0x08471: (22, "ShortExternalAsciiStringMap"),
-  0x08495: (0, "UndetectableStringMap"),
-  0x084b9: (4, "UndetectableAsciiStringMap"),
-  0x084dd: (145, "FixedDoubleArrayMap"),
-  0x08501: (134, "ByteArrayMap"),
-  0x08525: (135, "FreeSpaceMap"),
-  0x08549: (144, "ExternalPixelArrayMap"),
-  0x0856d: (136, "ExternalByteArrayMap"),
-  0x08591: (137, "ExternalUnsignedByteArrayMap"),
-  0x085b5: (138, "ExternalShortArrayMap"),
-  0x085d9: (139, "ExternalUnsignedShortArrayMap"),
-  0x085fd: (140, "ExternalIntArrayMap"),
-  0x08621: (141, "ExternalUnsignedIntArrayMap"),
-  0x08645: (142, "ExternalFloatArrayMap"),
-  0x08669: (163, "NonStrictArgumentsElementsMap"),
-  0x0868d: (143, "ExternalDoubleArrayMap"),
-  0x086b1: (129, "CodeMap"),
-  0x086d5: (131, "GlobalPropertyCellMap"),
-  0x086f9: (146, "OnePointerFillerMap"),
-  0x0871d: (146, "TwoPointerFillerMap"),
-  0x08741: (147, "AccessorInfoMap"),
-  0x08765: (148, "AccessorPairMap"),
-  0x08789: (149, "AccessCheckInfoMap"),
-  0x087ad: (150, "InterceptorInfoMap"),
-  0x087d1: (151, "CallHandlerInfoMap"),
-  0x087f5: (152, "FunctionTemplateInfoMap"),
-  0x08819: (153, "ObjectTemplateInfoMap"),
-  0x0883d: (154, "SignatureInfoMap"),
-  0x08861: (155, "TypeSwitchInfoMap"),
-  0x08885: (156, "ScriptMap"),
-  0x088a9: (157, "CodeCacheMap"),
-  0x088cd: (158, "PolymorphicCodeCacheMap"),
-  0x088f1: (159, "TypeFeedbackInfoMap"),
-  0x08915: (160, "AliasedArgumentsEntryMap"),
-  0x08939: (161, "DebugInfoMap"),
-  0x0895d: (162, "BreakPointInfoMap"),
-  0x08981: (163, "HashTableMap"),
-  0x089a5: (163, "FunctionContextMap"),
-  0x089c9: (163, "CatchContextMap"),
-  0x089ed: (163, "WithContextMap"),
-  0x08a11: (163, "BlockContextMap"),
-  0x08a35: (163, "ModuleContextMap"),
-  0x08a59: (163, "GlobalContextMap"),
-  0x08a7d: (163, "NativeContextMap"),
-  0x08aa1: (164, "SharedFunctionInfoMap"),
-  0x08ac5: (165, "JSMessageObjectMap"),
-  0x08ae9: (170, "ExternalMap"),
-  0x08b0d: (170, "NeanderMap"),
-  0x08b31: (170, ""),
+  0x08081: (134, "ByteArrayMap"),
+  0x080a9: (128, "MetaMap"),
+  0x080d1: (130, "OddballMap"),
+  0x080f9: (68, "AsciiSymbolMap"),
+  0x08121: (164, "FixedArrayMap"),
+  0x08149: (132, "HeapNumberMap"),
+  0x08171: (135, "FreeSpaceMap"),
+  0x08199: (146, "OnePointerFillerMap"),
+  0x081c1: (146, "TwoPointerFillerMap"),
+  0x081e9: (131, "GlobalPropertyCellMap"),
+  0x08211: (165, "SharedFunctionInfoMap"),
+  0x08239: (4, "AsciiStringMap"),
+  0x08261: (164, "NativeContextMap"),
+  0x08289: (129, "CodeMap"),
+  0x082b1: (164, "ScopeInfoMap"),
+  0x082d9: (164, "FixedCOWArrayMap"),
+  0x08301: (145, "FixedDoubleArrayMap"),
+  0x08329: (164, "HashTableMap"),
+  0x08351: (0, "StringMap"),
+  0x08379: (64, "SymbolMap"),
+  0x083a1: (1, "ConsStringMap"),
+  0x083c9: (5, "ConsAsciiStringMap"),
+  0x083f1: (3, "SlicedStringMap"),
+  0x08419: (7, "SlicedAsciiStringMap"),
+  0x08441: (65, "ConsSymbolMap"),
+  0x08469: (69, "ConsAsciiSymbolMap"),
+  0x08491: (66, "ExternalSymbolMap"),
+  0x084b9: (74, "ExternalSymbolWithAsciiDataMap"),
+  0x084e1: (70, "ExternalAsciiSymbolMap"),
+  0x08509: (2, "ExternalStringMap"),
+  0x08531: (10, "ExternalStringWithAsciiDataMap"),
+  0x08559: (6, "ExternalAsciiStringMap"),
+  0x08581: (82, "ShortExternalSymbolMap"),
+  0x085a9: (90, "ShortExternalSymbolWithAsciiDataMap"),
+  0x085d1: (86, "ShortExternalAsciiSymbolMap"),
+  0x085f9: (18, "ShortExternalStringMap"),
+  0x08621: (26, "ShortExternalStringWithAsciiDataMap"),
+  0x08649: (22, "ShortExternalAsciiStringMap"),
+  0x08671: (0, "UndetectableStringMap"),
+  0x08699: (4, "UndetectableAsciiStringMap"),
+  0x086c1: (144, "ExternalPixelArrayMap"),
+  0x086e9: (136, "ExternalByteArrayMap"),
+  0x08711: (137, "ExternalUnsignedByteArrayMap"),
+  0x08739: (138, "ExternalShortArrayMap"),
+  0x08761: (139, "ExternalUnsignedShortArrayMap"),
+  0x08789: (140, "ExternalIntArrayMap"),
+  0x087b1: (141, "ExternalUnsignedIntArrayMap"),
+  0x087d9: (142, "ExternalFloatArrayMap"),
+  0x08801: (143, "ExternalDoubleArrayMap"),
+  0x08829: (164, "NonStrictArgumentsElementsMap"),
+  0x08851: (164, "FunctionContextMap"),
+  0x08879: (164, "CatchContextMap"),
+  0x088a1: (164, "WithContextMap"),
+  0x088c9: (164, "BlockContextMap"),
+  0x088f1: (164, "ModuleContextMap"),
+  0x08919: (164, "GlobalContextMap"),
+  0x08941: (166, "JSMessageObjectMap"),
+  0x08969: (133, "ForeignMap"),
+  0x08991: (171, "NeanderMap"),
+  0x089b9: (156, "AllocationSiteInfoMap"),
+  0x089e1: (159, "PolymorphicCodeCacheMap"),
+  0x08a09: (157, "ScriptMap"),
+  0x08a31: (171, ""),
+  0x08a59: (171, "ExternalMap"),
+  0x08a81: (147, "AccessorInfoMap"),
+  0x08aa9: (148, "AccessorPairMap"),
+  0x08ad1: (149, "AccessCheckInfoMap"),
+  0x08af9: (150, "InterceptorInfoMap"),
+  0x08b21: (151, "CallHandlerInfoMap"),
+  0x08b49: (152, "FunctionTemplateInfoMap"),
+  0x08b71: (153, "ObjectTemplateInfoMap"),
+  0x08b99: (154, "SignatureInfoMap"),
+  0x08bc1: (155, "TypeSwitchInfoMap"),
+  0x08be9: (158, "CodeCacheMap"),
+  0x08c11: (160, "TypeFeedbackInfoMap"),
+  0x08c39: (161, "AliasedArgumentsEntryMap"),
+  0x08c61: (162, "DebugInfoMap"),
+  0x08c89: (163, "BreakPointInfoMap"),
 }
 
 
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 2dda977..37856cb 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -382,6 +382,8 @@
             '../../src/macro-assembler.h',
             '../../src/mark-compact.cc',
             '../../src/mark-compact.h',
+            '../../src/marking-thread.h',
+            '../../src/marking-thread.cc',
             '../../src/messages.cc',
             '../../src/messages.h',
             '../../src/natives.h',
@@ -461,6 +463,8 @@
             '../../src/strtod.h',
             '../../src/stub-cache.cc',
             '../../src/stub-cache.h',
+            '../../src/sweeper-thread.h',
+            '../../src/sweeper-thread.cc',
             '../../src/token.cc',
             '../../src/token.h',
             '../../src/transitions-inl.h',