Push version 1.3.7 to trunk.

Reduced the size of generated code on ARM platforms by reducing the size of constant pools.

Changed build files to not include the 'ENV' user environment variable in the build environment.

Changed the handling of idle notifications.


git-svn-id: http://v8.googlecode.com/svn/trunk@2752 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index ff39f04..d9d2b02 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,14 @@
+2009-08-25: Version 1.3.7
+
+        Reduced the size of generated code on ARM platforms by reducing
+        the size of constant pools.
+
+        Changed build files to not include the 'ENV' user environment
+        variable in the build environment.
+
+        Changed the handling of idle notifications.
+
+
 2009-08-21: Version 1.3.6
 
         Add support for forceful termination of JavaScript execution.
diff --git a/SConstruct b/SConstruct
index efd34db..5e9747c 100644
--- a/SConstruct
+++ b/SConstruct
@@ -789,12 +789,20 @@
 
   context = BuildContext(options, env_overrides, samples=SplitList(env['sample']))
 
-  library_flags = context.AddRelevantFlags(os.environ, LIBRARY_FLAGS)
+  # Remove variables which can't be imported from the user's external
+  # environment into a construction environment.
+  user_environ = os.environ.copy()
+  try:
+    del user_environ['ENV']
+  except KeyError:
+    pass
+
+  library_flags = context.AddRelevantFlags(user_environ, LIBRARY_FLAGS)
   v8_flags = context.AddRelevantFlags(library_flags, V8_EXTRA_FLAGS)
   mksnapshot_flags = context.AddRelevantFlags(library_flags, MKSNAPSHOT_EXTRA_FLAGS)
   dtoa_flags = context.AddRelevantFlags(library_flags, DTOA_EXTRA_FLAGS)
   cctest_flags = context.AddRelevantFlags(v8_flags, CCTEST_EXTRA_FLAGS)
-  sample_flags = context.AddRelevantFlags(os.environ, SAMPLE_FLAGS)
+  sample_flags = context.AddRelevantFlags(user_environ, SAMPLE_FLAGS)
   d8_flags = context.AddRelevantFlags(library_flags, D8_FLAGS)
 
   context.flags = {
diff --git a/include/v8.h b/include/v8.h
index c7cc315..a40c068 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -2280,9 +2280,13 @@
   /**
    * Optional notification that the embedder is idle.
    * V8 uses the notification to reduce memory footprint.
+   * This call can be used repeatedly if the embedder remains idle.
    * \param is_high_priority tells whether the embedder is high priority.
+   * Returns true if the embedder should stop calling IdleNotification
+   * until real work has been done.  This indicates that V8 has done
+   * as much cleanup as it will be able to do.
    */
-  static void IdleNotification(bool is_high_priority);
+  static bool IdleNotification(bool is_high_priority);
 
   /**
    * Optional notification that the system is running low on memory.
diff --git a/src/api.cc b/src/api.cc
index 7d97fc6..bb38356 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2604,8 +2604,8 @@
 }
 
 
-void v8::V8::IdleNotification(bool is_high_priority) {
-  i::V8::IdleNotification(is_high_priority);
+bool v8::V8::IdleNotification(bool is_high_priority) {
+  return i::V8::IdleNotification(is_high_priority);
 }
 
 
@@ -3335,7 +3335,7 @@
     flags &= ~(PROFILER_MODULE_HEAP_SNAPSHOT | PROFILER_MODULE_CPU);
     const int current_flags = i::Logger::GetActiveProfilerModules();
     i::Logger::ResumeProfiler(flags);
-    i::Heap::CollectAllGarbage();
+    i::Heap::CollectAllGarbage(false);
     i::Logger::PauseProfiler(~current_flags & flags);
   } else {
     i::Logger::ResumeProfiler(flags);
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 28524c8..5745a06 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -214,9 +214,13 @@
   // Enter an internal frame.
   __ EnterInternalFrame();
 
-  // Setup the context from the function argument.
+  // Set up the context from the function argument.
   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
+  // Set up the roots register.
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ mov(r10, Operand(roots_address));
+
   // Push the function and the receiver onto the stack.
   __ push(r1);
   __ push(r2);
@@ -239,7 +243,7 @@
 
   // Initialize all JavaScript callee-saved registers, since they will be seen
   // by the garbage collector as part of handlers.
-  __ mov(r4, Operand(Factory::undefined_value()));
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
   __ mov(r5, Operand(r4));
   __ mov(r6, Operand(r4));
   __ mov(r7, Operand(r4));
@@ -282,7 +286,7 @@
   { Label done;
     __ tst(r0, Operand(r0));
     __ b(ne, &done);
-    __ mov(r2, Operand(Factory::undefined_value()));
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
     __ push(r2);
     __ add(r0, r0, Operand(1));
     __ bind(&done);
@@ -323,10 +327,10 @@
     __ tst(r2, Operand(kSmiTagMask));
     __ b(eq, &call_to_object);
 
-    __ mov(r3, Operand(Factory::null_value()));
+    __ LoadRoot(r3, Heap::kNullValueRootIndex);
     __ cmp(r2, r3);
     __ b(eq, &use_global_receiver);
-    __ mov(r3, Operand(Factory::undefined_value()));
+    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
     __ cmp(r2, r3);
     __ b(eq, &use_global_receiver);
 
@@ -492,10 +496,10 @@
   __ ldr(r0, MemOperand(fp, kRecvOffset));
   __ tst(r0, Operand(kSmiTagMask));
   __ b(eq, &call_to_object);
-  __ mov(r1, Operand(Factory::null_value()));
+  __ LoadRoot(r1, Heap::kNullValueRootIndex);
   __ cmp(r0, r1);
   __ b(eq, &use_global_receiver);
-  __ mov(r1, Operand(Factory::undefined_value()));
+  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
   __ cmp(r0, r1);
   __ b(eq, &use_global_receiver);
 
@@ -665,7 +669,7 @@
     // r1: function
     // r2: expected number of arguments
     // r3: code entry to call
-    __ mov(ip, Operand(Factory::undefined_value()));
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
     __ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
     __ sub(r2, r2, Operand(4 * kPointerSize));  // Adjust for frame.
 
diff --git a/src/arm/cfg-arm.cc b/src/arm/cfg-arm.cc
index 34e64b3..e0e563c 100644
--- a/src/arm/cfg-arm.cc
+++ b/src/arm/cfg-arm.cc
@@ -67,7 +67,7 @@
     __ add(fp, sp, Operand(2 * kPointerSize));
     int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
     if (count > 0) {
-      __ mov(ip, Operand(Factory::undefined_value()));
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
       for (int i = 0; i < count; i++) {
         __ push(ip);
       }
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 71ffaa2..12d828d 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -305,7 +305,7 @@
     // sp: stack pointer
     // fp: frame pointer
     // cp: callee's context
-    __ mov(r0, Operand(Factory::undefined_value()));
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
 
     function_return_.Bind();
     if (FLAG_trace) {
@@ -478,11 +478,11 @@
     JumpTarget loaded;
     JumpTarget materialize_true;
     materialize_true.Branch(cc_reg_);
-    __ mov(r0, Operand(Factory::false_value()));
+    __ LoadRoot(r0, Heap::kFalseValueRootIndex);
     frame_->EmitPush(r0);
     loaded.Jump();
     materialize_true.Bind();
-    __ mov(r0, Operand(Factory::true_value()));
+    __ LoadRoot(r0, Heap::kTrueValueRootIndex);
     frame_->EmitPush(r0);
     loaded.Bind();
     cc_reg_ = al;
@@ -499,7 +499,7 @@
     // Load "true" if necessary.
     if (true_target.is_linked()) {
       true_target.Bind();
-      __ mov(r0, Operand(Factory::true_value()));
+      __ LoadRoot(r0, Heap::kTrueValueRootIndex);
       frame_->EmitPush(r0);
     }
     // If both "true" and "false" need to be loaded jump across the code for
@@ -510,7 +510,7 @@
     // Load "false" if necessary.
     if (false_target.is_linked()) {
       false_target.Bind();
-      __ mov(r0, Operand(Factory::false_value()));
+      __ LoadRoot(r0, Heap::kFalseValueRootIndex);
       frame_->EmitPush(r0);
     }
     // A value is loaded on all paths reaching this point.
@@ -640,15 +640,18 @@
   // Fast case checks
 
   // Check if the value is 'false'.
-  __ cmp(r0, Operand(Factory::false_value()));
+  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+  __ cmp(r0, ip);
   false_target->Branch(eq);
 
   // Check if the value is 'true'.
-  __ cmp(r0, Operand(Factory::true_value()));
+  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+  __ cmp(r0, ip);
   true_target->Branch(eq);
 
   // Check if the value is 'undefined'.
-  __ cmp(r0, Operand(Factory::undefined_value()));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
   false_target->Branch(eq);
 
   // Check if the value is a smi.
@@ -661,7 +664,8 @@
   frame_->EmitPush(r0);
   frame_->CallRuntime(Runtime::kToBool, 1);
   // Convert the result (r0) to a condition code.
-  __ cmp(r0, Operand(Factory::false_value()));
+  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+  __ cmp(r0, ip);
 
   cc_reg_ = ne;
 }
@@ -1185,7 +1189,7 @@
     // 'undefined') because we may have a (legal) redeclaration and we
     // must not destroy the current value.
     if (node->mode() == Variable::CONST) {
-      __ mov(r0, Operand(Factory::the_hole_value()));
+      __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
       frame_->EmitPush(r0);
     } else if (node->fun() != NULL) {
       LoadAndSpill(node->fun());
@@ -1725,9 +1729,11 @@
   // Both SpiderMonkey and kjs ignore null and undefined in contrast
   // to the specification.  12.6.4 mandates a call to ToObject.
   frame_->EmitPop(r0);
-  __ cmp(r0, Operand(Factory::undefined_value()));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
   exit.Branch(eq);
-  __ cmp(r0, Operand(Factory::null_value()));
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r0, ip);
   exit.Branch(eq);
 
   // Stack layout in body:
@@ -1759,7 +1765,8 @@
   // Otherwise, we got a FixedArray, and we have to do a slow check.
   __ mov(r2, Operand(r0));
   __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ cmp(r1, Operand(Factory::meta_map()));
+  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+  __ cmp(r1, ip);
   fixed_array.Branch(ne);
 
   // Get enum cache
@@ -1833,7 +1840,8 @@
   __ mov(r3, Operand(r0));
 
   // If the property has been removed while iterating, we just skip it.
-  __ cmp(r3, Operand(Factory::null_value()));
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r3, ip);
   node->continue_target()->Branch(eq);
 
   end_del_check.Bind();
@@ -2093,7 +2101,7 @@
 
     // Fake a top of stack value (unneeded when FALLING) and set the
     // state in r2, then jump around the unlink blocks if any.
-    __ mov(r0, Operand(Factory::undefined_value()));
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
     frame_->EmitPush(r0);
     __ mov(r2, Operand(Smi::FromInt(FALLING)));
     if (nof_unlinks > 0) {
@@ -2135,7 +2143,7 @@
         frame_->EmitPush(r0);
       } else {
         // Fake TOS for targets that shadowed breaks and continues.
-        __ mov(r0, Operand(Factory::undefined_value()));
+        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
         frame_->EmitPush(r0);
       }
       __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
@@ -2322,8 +2330,9 @@
                                                  r2,
                                                  &slow));
         if (potential_slot->var()->mode() == Variable::CONST) {
-          __ cmp(r0, Operand(Factory::the_hole_value()));
-          __ mov(r0, Operand(Factory::undefined_value()), LeaveCC, eq);
+          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+          __ cmp(r0, ip);
+          __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
         }
         // There is always control flow to slow from
         // ContextSlotOperandCheckExtensions so we have to jump around
@@ -2360,8 +2369,9 @@
       // value.
       Comment cmnt(masm_, "[ Unhole const");
       frame_->EmitPop(r0);
-      __ cmp(r0, Operand(Factory::the_hole_value()));
-      __ mov(r0, Operand(Factory::undefined_value()), LeaveCC, eq);
+      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+      __ cmp(r0, ip);
+      __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
       frame_->EmitPush(r0);
     }
   }
@@ -2404,7 +2414,8 @@
     __ bind(&next);
     // Terminate at global context.
     __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
-    __ cmp(tmp2, Operand(Factory::global_context_map()));
+    __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+    __ cmp(tmp2, ip);
     __ b(eq, &fast);
     // Check that extension is NULL.
     __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
@@ -2501,7 +2512,8 @@
   __ ldr(r2, FieldMemOperand(r1, literal_offset));
 
   JumpTarget done;
-  __ cmp(r2, Operand(Factory::undefined_value()));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r2, ip);
   done.Branch(ne);
 
   // If the entry is undefined we call the runtime system to computed
@@ -2583,7 +2595,8 @@
 
   // Check whether we need to materialize the object literal boilerplate.
   // If so, jump to the deferred code.
-  __ cmp(r2, Operand(Factory::undefined_value()));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r2, Operand(ip));
   deferred->Branch(eq);
   deferred->BindExit();
 
@@ -2705,7 +2718,8 @@
 
   // Check whether we need to materialize the object literal boilerplate.
   // If so, jump to the deferred code.
-  __ cmp(r2, Operand(Factory::undefined_value()));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r2, Operand(ip));
   deferred->Branch(eq);
   deferred->BindExit();
 
@@ -3036,7 +3050,7 @@
 
   // Prepare stack for call to resolved function.
   LoadAndSpill(function);
-  __ mov(r2, Operand(Factory::undefined_value()));
+  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
   frame_->EmitPush(r2);  // Slot for receiver
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
@@ -3180,7 +3194,7 @@
 
   // Non-JS objects have class null.
   null.Bind();
-  __ mov(r0, Operand(Factory::null_value()));
+  __ LoadRoot(r0, Heap::kNullValueRootIndex);
   frame_->EmitPush(r0);
 
   // All done.
@@ -3253,7 +3267,7 @@
     __ CallRuntime(Runtime::kLog, 2);
   }
 #endif
-  __ mov(r0, Operand(Factory::undefined_value()));
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   frame_->EmitPush(r0);
 }
 
@@ -3274,7 +3288,7 @@
 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
   VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 2);
-  __ mov(r0, Operand(Factory::undefined_value()));
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
   frame_->EmitPush(r0);
 }
 
@@ -3494,14 +3508,14 @@
       } else {
         // Default: Result of deleting non-global, not dynamically
         // introduced variables is false.
-        __ mov(r0, Operand(Factory::false_value()));
+        __ LoadRoot(r0, Heap::kFalseValueRootIndex);
       }
 
     } else {
       // Default: Result of deleting expressions is true.
       LoadAndSpill(node->expression());  // may have side-effects
       frame_->Drop();
-      __ mov(r0, Operand(Factory::true_value()));
+      __ LoadRoot(r0, Heap::kTrueValueRootIndex);
     }
     frame_->EmitPush(r0);
 
@@ -3554,7 +3568,7 @@
       case Token::VOID:
         // since the stack top is cached in r0, popping and then
         // pushing a value can be done by just writing to r0.
-        __ mov(r0, Operand(Factory::undefined_value()));
+        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
         break;
 
       case Token::ADD: {
@@ -3880,14 +3894,16 @@
     if (left_is_null || right_is_null) {
       LoadAndSpill(left_is_null ? right : left);
       frame_->EmitPop(r0);
-      __ cmp(r0, Operand(Factory::null_value()));
+      __ LoadRoot(ip, Heap::kNullValueRootIndex);
+      __ cmp(r0, ip);
 
       // The 'null' value is only equal to 'undefined' if using non-strict
       // comparisons.
       if (op != Token::EQ_STRICT) {
         true_target()->Branch(eq);
 
-        __ cmp(r0, Operand(Factory::undefined_value()));
+        __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+        __ cmp(r0, Operand(ip));
         true_target()->Branch(eq);
 
         __ tst(r0, Operand(kSmiTagMask));
@@ -3924,7 +3940,8 @@
       __ tst(r1, Operand(kSmiTagMask));
       true_target()->Branch(eq);
       __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
-      __ cmp(r1, Operand(Factory::heap_number_map()));
+      __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+      __ cmp(r1, ip);
       cc_reg_ = eq;
 
     } else if (check->Equals(Heap::string_symbol())) {
@@ -3944,13 +3961,16 @@
       cc_reg_ = lt;
 
     } else if (check->Equals(Heap::boolean_symbol())) {
-      __ cmp(r1, Operand(Factory::true_value()));
+      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+      __ cmp(r1, ip);
       true_target()->Branch(eq);
-      __ cmp(r1, Operand(Factory::false_value()));
+      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+      __ cmp(r1, ip);
       cc_reg_ = eq;
 
     } else if (check->Equals(Heap::undefined_symbol())) {
-      __ cmp(r1, Operand(Factory::undefined_value()));
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+      __ cmp(r1, ip);
       true_target()->Branch(eq);
 
       __ tst(r1, Operand(kSmiTagMask));
@@ -3975,7 +3995,8 @@
       false_target()->Branch(eq);
 
       __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-      __ cmp(r1, Operand(Factory::null_value()));
+      __ LoadRoot(ip, Heap::kNullValueRootIndex);
+      __ cmp(r1, ip);
       true_target()->Branch(eq);
 
       // It can be an undetectable object.
@@ -4206,7 +4227,8 @@
           // executed, the code is identical to a normal store (see below).
           Comment cmnt(masm, "[ Init const");
           __ ldr(r2, cgen_->SlotOperand(slot, r2));
-          __ cmp(r2, Operand(Factory::the_hole_value()));
+          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+          __ cmp(r2, ip);
           exit.Branch(ne);
         }
 
@@ -4939,7 +4961,7 @@
   // Tag and adjust back to start of new object.
   __ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag));
   // Get heap number map into scratch2.
-  __ mov(scratch2, Operand(Factory::heap_number_map()));
+  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
   // Store heap number map in new object.
   __ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset));
 }
@@ -6090,7 +6112,8 @@
   __ bind(&loop);
   __ cmp(r2, Operand(r4));
   __ b(eq, &is_instance);
-  __ cmp(r2, Operand(Factory::null_value()));
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r2, ip);
   __ b(eq, &is_not_instance);
   __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
   __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index d193ab9..0abe35b 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -842,7 +842,7 @@
 // formatting. See for example the command "objdump -d <binary file>".
 static const char* reg_names[kMaxRegisters] = {
   "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
-  "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc",
+  "r8", "r9", "r10", "fp", "ip", "sp", "lr", "pc",
 };
 
 
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 8781256..848d04b 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -87,7 +87,8 @@
   // Check that the properties array is a dictionary.
   __ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
   __ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset));
-  __ cmp(r3, Operand(Factory::hash_table_map()));
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(r3, ip);
   __ b(ne, miss);
 
   // Compute the capacity mask.
@@ -254,9 +255,11 @@
 
   // Check for boolean.
   __ bind(&non_string);
-  __ cmp(r1, Operand(Factory::true_value()));
+  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+  __ cmp(r1, ip);
   __ b(eq, &boolean);
-  __ cmp(r1, Operand(Factory::false_value()));
+  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+  __ cmp(r1, ip);
   __ b(ne, &miss);
   __ bind(&boolean);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
@@ -582,7 +585,8 @@
   __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
   __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ cmp(r3, Operand(Factory::fixed_array_map()));
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+  __ cmp(r3, ip);
   __ b(ne, &slow);
   // Check that the key (index) is within bounds.
   __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
@@ -601,7 +605,8 @@
   __ bind(&fast);
   __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
-  __ cmp(r0, Operand(Factory::the_hole_value()));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(r0, ip);
   // In case the loaded value is the_hole we have to consult GetProperty
   // to ensure the prototype chain is searched.
   __ b(eq, &slow);
@@ -661,7 +666,8 @@
   __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
   __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ cmp(r2, Operand(Factory::fixed_array_map()));
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+  __ cmp(r2, ip);
   __ b(ne, &slow);
   // Untag the key (for checking against untagged length in the fixed array).
   __ mov(r1, Operand(r1, ASR, kSmiTagSize));
@@ -710,7 +716,8 @@
   __ bind(&array);
   __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
   __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ cmp(r1, Operand(Factory::fixed_array_map()));
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+  __ cmp(r1, ip);
   __ b(ne, &slow);
 
   // Check the key against the length in the array, compute the
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 4b02e2d..4e337c4 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -174,6 +174,13 @@
 }
 
 
+void MacroAssembler::LoadRoot(Register destination,
+                              Heap::RootListIndex index,
+                              Condition cond) {
+  ldr(destination, MemOperand(r10, index << kPointerSizeLog2), cond);
+}
+
+
 // Will clobber 4 registers: object, offset, scratch, ip.  The
 // register 'object' contains a heap object pointer.  The heap object
 // tag is shifted away.
@@ -714,7 +721,8 @@
     push(holder_reg);  // Temporarily save holder on the stack.
     // Read the first word and compare to the global_context_map.
     ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
-    cmp(holder_reg, Operand(Factory::global_context_map()));
+    LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+    cmp(holder_reg, ip);
     Check(eq, "JSGlobalObject::global_context should be a global context.");
     pop(holder_reg);  // Restore holder.
   }
@@ -731,11 +739,13 @@
     // that ip is clobbered as part of cmp with an object Operand.
     push(holder_reg);  // Temporarily save holder on the stack.
     mov(holder_reg, ip);  // Move ip to its holding place.
-    cmp(holder_reg, Operand(Factory::null_value()));
+    LoadRoot(ip, Heap::kNullValueRootIndex);
+    cmp(holder_reg, ip);
     Check(ne, "JSGlobalProxy::context() should not be null.");
 
     ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
-    cmp(holder_reg, Operand(Factory::global_context_map()));
+    LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
+    cmp(holder_reg, ip);
     Check(eq, "JSGlobalObject::global_context should be a global context.");
     // Restore ip is not needed. ip is reloaded below.
     pop(holder_reg);  // Restore holder.
@@ -792,7 +802,8 @@
   // If the prototype or initial map is the hole, don't return it and
   // simply miss the cache instead. This will allow us to allocate a
   // prototype object on-demand in the runtime system.
-  cmp(result, Operand(Factory::the_hole_value()));
+  LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  cmp(result, ip);
   b(eq, miss);
 
   // If the function does not have an initial map, we're done.
@@ -832,7 +843,7 @@
   if (num_arguments > 0) {
     add(sp, sp, Operand(num_arguments * kPointerSize));
   }
-  mov(r0, Operand(Factory::undefined_value()));
+  LoadRoot(r0, Heap::kUndefinedValueRootIndex);
 }
 
 
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index ab74805..a35c98a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -89,6 +89,10 @@
   void Ret(Condition cond = al);
   // Jumps to the label at the index given by the Smi in "index".
   void SmiJumpTable(Register index, Vector<Label*> targets);
+  // Load an object from the root table.
+  void LoadRoot(Register destination,
+                Heap::RootListIndex index,
+                Condition cond = al);
 
   // Sets the remembered set bit for [address+offset], where address is the
   // address of the heap object 'object'.  The address must be in the first 8K
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 393db59..03e0779 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -395,7 +395,8 @@
       __ mov(scratch, Operand(Handle<Object>(cell)));
       __ ldr(scratch,
              FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-      __ cmp(scratch, Operand(Factory::the_hole_value()));
+      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+      __ cmp(scratch, ip);
       __ b(ne, miss);
     }
     object = JSObject::cast(object->GetPrototype());
@@ -667,9 +668,11 @@
     case BOOLEAN_CHECK: {
       Label fast;
       // Check that the object is a boolean.
-      __ cmp(r1, Operand(Factory::true_value()));
+      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+      __ cmp(r1, ip);
       __ b(eq, &fast);
-      __ cmp(r1, Operand(Factory::false_value()));
+      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+      __ cmp(r1, ip);
       __ b(ne, &miss);
       __ bind(&fast);
       // Check that the maps starting from the prototype haven't changed.
@@ -688,7 +691,8 @@
       __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
       // Check that the object is in fast mode (not dictionary).
       __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
-      __ cmp(r2, Operand(Factory::fixed_array_map()));
+      __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+      __ cmp(r2, ip);
       __ b(ne, &miss);
       break;
 
@@ -1108,7 +1112,8 @@
 
   // Check for deleted property if property can actually be deleted.
   if (!is_dont_delete) {
-    __ cmp(r0, Operand(Factory::the_hole_value()));
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(r0, ip);
     __ b(eq, &miss);
   }
 
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index d3dabf8..91952f3 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -139,7 +139,7 @@
     Comment cmnt(masm(), "[ Allocate space for locals");
     Adjust(count);
       // Initialize stack slots with 'undefined' value.
-    __ mov(ip, Operand(Factory::undefined_value()));
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
     for (int i = 0; i < count; i++) {
       __ push(ip);
     }
diff --git a/src/assembler.cc b/src/assembler.cc
index 5d0310d..546490e 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -563,6 +563,11 @@
 }
 
 
+ExternalReference ExternalReference::roots_address() {
+  return ExternalReference(Heap::roots_address());
+}
+
+
 ExternalReference ExternalReference::address_of_stack_guard_limit() {
   return ExternalReference(StackGuard::address_of_jslimit());
 }
diff --git a/src/assembler.h b/src/assembler.h
index 1ddc8a3..e217918 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -401,6 +401,9 @@
   // Static variable Factory::the_hole_value.location()
   static ExternalReference the_hole_value_location();
 
+  // Static variable Heap::roots_address()
+  static ExternalReference roots_address();
+
   // Static variable StackGuard::address_of_jslimit()
   static ExternalReference address_of_stack_guard_limit();
 
diff --git a/src/debug.cc b/src/debug.cc
index f2a2814..faeb29b 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1548,8 +1548,8 @@
   // Perform two GCs to get rid of all unreferenced scripts. The first GC gets
   // rid of all the cached script wrappers and the second gets rid of the
   // scripts which is no longer referenced.
-  Heap::CollectAllGarbage();
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
+  Heap::CollectAllGarbage(false);
 
   ASSERT(script_cache_ == NULL);
   script_cache_ = new ScriptCache();
@@ -1599,7 +1599,7 @@
 
   // Perform GC to get unreferenced scripts evicted from the cache before
   // returning the content.
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
 
   // Get the scripts from the cache.
   return script_cache_->GetScripts();
diff --git a/src/execution.cc b/src/execution.cc
index 0ad55bd..7c42e5e 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -677,7 +677,7 @@
 
 v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
   // All allocation spaces other than NEW_SPACE have the same effect.
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
   return v8::Undefined();
 }
 
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 114ae0d..0646878 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -238,7 +238,7 @@
         amount_of_external_allocated_memory_ -
         amount_of_external_allocated_memory_at_last_global_gc_;
     if (amount_since_last_global_gc > external_allocation_limit_) {
-      CollectAllGarbage();
+      CollectAllGarbage(false);
     }
   } else {
     // Avoid underflow.
@@ -285,7 +285,7 @@
     }                                                                     \
     if (!__object__->IsRetryAfterGC()) RETURN_EMPTY;                      \
     Counters::gc_last_resort_from_handles.Increment();                    \
-    Heap::CollectAllGarbage();                                            \
+    Heap::CollectAllGarbage(false);                                       \
     {                                                                     \
       AlwaysAllocateScope __scope__;                                      \
       __object__ = FUNCTION_CALL;                                         \
diff --git a/src/heap.cc b/src/heap.cc
index ad25f93..ef4c2d4 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -332,7 +332,7 @@
   // informed decisions about when to force a collection.
   if (!FLAG_expose_gc && context_disposed_pending_) {
     HistogramTimerScope scope(&Counters::gc_context);
-    CollectAllGarbage();
+    CollectAllGarbage(false);
   }
   context_disposed_pending_ = false;
 }
@@ -465,8 +465,9 @@
     old_gen_allocation_limit_ =
         old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
     old_gen_exhausted_ = false;
+  } else {
+    Scavenge();
   }
-  Scavenge();
   Counters::objs_since_last_young.Set(0);
 
   PostGarbageCollectionProcessing();
@@ -520,6 +521,12 @@
 
   Counters::objs_since_last_full.Set(0);
   context_disposed_pending_ = false;
+
+  Scavenge();
+
+  // Shrink new space as much as possible after compacting full
+  // garbage collections.
+  if (is_compacting) new_space_.Shrink();
 }
 
 
@@ -668,8 +675,6 @@
       survived_since_last_expansion_ > new_space_.Capacity()) {
     // Grow the size of new space if there is room to grow and enough
     // data has survived scavenge since the last expansion.
-    // TODO(1240712): NewSpace::Grow has a return value which is
-    // ignored here.
     new_space_.Grow();
     survived_since_last_expansion_ = 0;
   }
diff --git a/src/heap.h b/src/heap.h
index ac6f5be..9fd3fec 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -629,7 +629,7 @@
 
   // Performs a full garbage collection. Force compaction if the
   // parameter is true.
-  static void CollectAllGarbage(bool force_compaction = false);
+  static void CollectAllGarbage(bool force_compaction);
 
   // Performs a full garbage collection if a context has been disposed
   // since the last time the check was performed.
@@ -733,6 +733,9 @@
   // Update the next script id.
   static inline void SetLastScriptId(Object* last_script_id);
 
+  // Generated code can embed this address to get access to the roots.
+  static Object** roots_address() { return roots_; }
+
 #ifdef DEBUG
   static void Print();
   static void PrintHandles();
@@ -839,6 +842,59 @@
            > old_gen_allocation_limit_;
   }
 
+  // Can be called when the embedding application is idle.
+  static bool IdleNotification() {
+    static const int kIdlesBeforeCollection = 7;
+    static int number_idle_notifications = 0;
+    static int last_gc_count = gc_count_;
+
+    bool finished = false;
+
+    if (last_gc_count == gc_count_) {
+      number_idle_notifications++;
+    } else {
+      number_idle_notifications = 0;
+      last_gc_count = gc_count_;
+    }
+
+    if (number_idle_notifications >= kIdlesBeforeCollection) {
+      // The first time through we collect without forcing compaction.
+      // The second time through we force compaction and quit.
+      bool force_compaction =
+          number_idle_notifications > kIdlesBeforeCollection;
+      CollectAllGarbage(force_compaction);
+      last_gc_count = gc_count_;
+      if (force_compaction) {
+        number_idle_notifications = 0;
+        finished = true;
+      }
+    }
+
+    // Uncommit unused memory in new space.
+    Heap::UncommitFromSpace();
+    return finished;
+  }
+
+  // Declare all the root indices.
+  enum RootListIndex {
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+    STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+
+// Utility type maps
+#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
+  STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+
+#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
+    SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+#undef SYMBOL_DECLARATION
+
+    kSymbolTableRootIndex,
+    kStrongRootListLength = kSymbolTableRootIndex,
+    kRootListLength
+  };
+
  private:
   static int semispace_size_;
   static int initial_semispace_size_;
@@ -923,26 +979,6 @@
   // last GC.
   static int old_gen_exhausted_;
 
-  // Declare all the root indices.
-  enum RootListIndex {
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
-    STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-
-// Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
-  STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
-
-#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
-    SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_DECLARATION
-
-    kSymbolTableRootIndex,
-    kStrongRootListLength = kSymbolTableRootIndex,
-    kRootListLength
-  };
-
   static Object* roots_[kRootListLength];
 
   struct StringTypeTable {
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 4891f37..80789eb 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -171,7 +171,7 @@
     }
   }
   // Get rid of unreferenced scripts with a global GC.
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
   i::Serializer ser;
   ser.Serialize();
   v8::internal::byte* bytes;
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 91aae2f..7f3628d 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -814,15 +814,13 @@
 
 Failure* Failure::Construct(Type type, int value) {
   int info = (value << kFailureTypeTagSize) | type;
-  // TODO(X64): Stop using Smi validation for non-smi checks, even if they
-  // happen to be identical at the moment.
-  ASSERT(Smi::IsValid(info));  // Same validation check as in Smi
+  ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
   return reinterpret_cast<Failure*>(
       (static_cast<intptr_t>(info) << kFailureTagSize) | kFailureTag);
 }
 
 
-bool Smi::IsValid(int value) {
+bool Smi::IsValid(intptr_t value) {
 #ifdef DEBUG
   bool in_range = (value >= kMinValue) && (value <= kMaxValue);
 #endif
@@ -937,12 +935,13 @@
 
 
 Address MapWord::DecodeMapAddress(MapSpace* map_space) {
-  int map_page_index = (value_ & kMapPageIndexMask) >> kMapPageIndexShift;
+  int map_page_index =
+      static_cast<int>((value_ & kMapPageIndexMask) >> kMapPageIndexShift);
   ASSERT_MAP_PAGE_INDEX(map_page_index);
 
-  int map_page_offset =
+  int map_page_offset = static_cast<int>(
       ((value_ & kMapPageOffsetMask) >> kMapPageOffsetShift)
-      << kObjectAlignmentBits;
+      << kObjectAlignmentBits);
 
   return (map_space->PageAddress(map_page_index) + map_page_offset);
 }
diff --git a/src/objects.h b/src/objects.h
index a402961..763752b 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -905,7 +905,7 @@
   static inline Smi* FromIntptr(intptr_t value);
 
   // Returns whether value can be represented in a Smi.
-  static inline bool IsValid(int value);
+  static inline bool IsValid(intptr_t value);
 
   static inline bool IsIntptrValid(intptr_t);
 
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 0a2d990..de6cd12 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -54,10 +54,6 @@
 #define _WIN32_WINNT 0x500
 #endif
 
-#ifdef  _WIN64
-#error Windows 64-bit blatforms not supported
-#endif
-
 #include <windows.h>
 
 #include <time.h>  // For LocalOffset() implementation.
@@ -1190,6 +1186,9 @@
   memset(&context, 0, sizeof(context));
   context.ContextFlags = CONTEXT_CONTROL;
   context.ContextFlags = CONTEXT_CONTROL;
+#ifdef  _WIN64
+  // TODO(X64): Implement context capture.
+#else
   __asm    call x
   __asm x: pop eax
   __asm    mov context.Eip, eax
@@ -1199,15 +1198,22 @@
   // capture the context instead of inline assembler. However it is
   // only available on XP, Vista, Server 2003 and Server 2008 which
   // might not be sufficient.
+#endif
 
   // Initialize the stack walking
   STACKFRAME64 stack_frame;
   memset(&stack_frame, 0, sizeof(stack_frame));
+#ifdef  _WIN64
+  stack_frame.AddrPC.Offset = context.Rip;
+  stack_frame.AddrFrame.Offset = context.Rbp;
+  stack_frame.AddrStack.Offset = context.Rsp;
+#else
   stack_frame.AddrPC.Offset = context.Eip;
-  stack_frame.AddrPC.Mode = AddrModeFlat;
   stack_frame.AddrFrame.Offset = context.Ebp;
-  stack_frame.AddrFrame.Mode = AddrModeFlat;
   stack_frame.AddrStack.Offset = context.Esp;
+#endif
+  stack_frame.AddrPC.Mode = AddrModeFlat;
+  stack_frame.AddrFrame.Mode = AddrModeFlat;
   stack_frame.AddrStack.Mode = AddrModeFlat;
   int frames_count = 0;
 
diff --git a/src/runtime.cc b/src/runtime.cc
index b3e8aa4..845ac63 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -7263,7 +7263,7 @@
   ASSERT(args.length() == 3);
 
   // First perform a full GC in order to avoid references from dead objects.
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
 
   // Check parameters.
   CONVERT_CHECKED(JSObject, target, args[0]);
@@ -7339,7 +7339,7 @@
   ASSERT(args.length() == 2);
 
   // First perform a full GC in order to avoid dead objects.
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
 
   // Check parameters.
   CONVERT_CHECKED(JSFunction, constructor, args[0]);
@@ -7633,7 +7633,7 @@
     // Handle last resort GC and make sure to allow future allocations
     // to grow the heap without causing GCs (if possible).
     Counters::gc_last_resort_from_js.Increment();
-    Heap::CollectAllGarbage();
+    Heap::CollectAllGarbage(false);
   }
 }
 
diff --git a/src/serialize.cc b/src/serialize.cc
index 963138e..d2fd1e4 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -672,13 +672,17 @@
       UNCLASSIFIED,
       2,
       "Factory::the_hole_value().location()");
-  Add(ExternalReference::address_of_stack_guard_limit().address(),
+  Add(ExternalReference::roots_address().address(),
       UNCLASSIFIED,
       3,
+      "Heap::roots_address()");
+  Add(ExternalReference::address_of_stack_guard_limit().address(),
+      UNCLASSIFIED,
+      4,
       "StackGuard::address_of_jslimit()");
   Add(ExternalReference::address_of_regexp_stack_limit().address(),
       UNCLASSIFIED,
-      4,
+      5,
       "RegExpStack::limit_address()");
   Add(ExternalReference::new_space_start().address(),
       UNCLASSIFIED,
@@ -699,36 +703,36 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Add(ExternalReference::debug_break().address(),
       UNCLASSIFIED,
-      5,
+      10,
       "Debug::Break()");
   Add(ExternalReference::debug_step_in_fp_address().address(),
       UNCLASSIFIED,
-      10,
+      11,
       "Debug::step_in_fp_addr()");
 #endif
   Add(ExternalReference::double_fp_operation(Token::ADD).address(),
       UNCLASSIFIED,
-      11,
+      12,
       "add_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::SUB).address(),
       UNCLASSIFIED,
-      12,
+      13,
       "sub_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::MUL).address(),
       UNCLASSIFIED,
-      13,
+      14,
       "mul_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::DIV).address(),
       UNCLASSIFIED,
-      14,
+      15,
       "div_two_doubles");
   Add(ExternalReference::double_fp_operation(Token::MOD).address(),
       UNCLASSIFIED,
-      15,
+      16,
       "mod_two_doubles");
   Add(ExternalReference::compare_doubles().address(),
       UNCLASSIFIED,
-      16,
+      17,
       "compare_doubles");
 }
 
diff --git a/src/spaces.cc b/src/spaces.cc
index 337f014..45e82f4 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -951,15 +951,43 @@
 }
 
 
-bool NewSpace::Grow() {
+void NewSpace::Grow() {
   ASSERT(Capacity() < MaximumCapacity());
-  // TODO(1240712): Failure to double the from space can result in
-  // semispaces of different sizes.  In the event of that failure, the
-  // to space doubling should be rolled back before returning false.
-  if (!to_space_.Grow() || !from_space_.Grow()) return false;
+  if (to_space_.Grow()) {
+    // Only grow from space if we managed to grow to space.
+    if (!from_space_.Grow()) {
+      // If we managed to grow to space but couldn't grow from space,
+      // attempt to shrink to space.
+      if (!to_space_.ShrinkTo(from_space_.Capacity())) {
+        // We are in an inconsistent state because we could not
+        // commit/uncommit memory from new space.
+        V8::FatalProcessOutOfMemory("Failed to grow new space.");
+      }
+    }
+  }
   allocation_info_.limit = to_space_.high();
   ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-  return true;
+}
+
+
+void NewSpace::Shrink() {
+  int new_capacity = Max(InitialCapacity(), 2 * Size());
+  int rounded_new_capacity = RoundUp(new_capacity, OS::AllocateAlignment());
+  if (rounded_new_capacity < Capacity() &&
+      to_space_.ShrinkTo(rounded_new_capacity))  {
+    // Only shrink from space if we managed to shrink to space.
+    if (!from_space_.ShrinkTo(rounded_new_capacity)) {
+      // If we managed to shrink to space but couldn't shrink from
+      // space, attempt to grow to space again.
+      if (!to_space_.GrowTo(from_space_.Capacity())) {
+        // We are in an inconsistent state because we could not
+        // commit/uncommit memory from new space.
+        V8::FatalProcessOutOfMemory("Failed to shrink new space.");
+      }
+    }
+  }
+  allocation_info_.limit = to_space_.high();
+  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 }
 
 
@@ -1058,6 +1086,7 @@
   // otherwise.  In the mark-compact collector, the memory region of the from
   // space is used as the marking stack. It requires contiguous memory
   // addresses.
+  initial_capacity_ = initial_capacity;
   capacity_ = initial_capacity;
   maximum_capacity_ = maximum_capacity;
   committed_ = false;
@@ -1091,6 +1120,32 @@
 }
 
 
+bool SemiSpace::GrowTo(int new_capacity) {
+  ASSERT(new_capacity <= maximum_capacity_);
+  ASSERT(new_capacity > capacity_);
+  size_t delta = new_capacity - capacity_;
+  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+  if (!MemoryAllocator::CommitBlock(high(), delta, executable())) {
+    return false;
+  }
+  capacity_ = new_capacity;
+  return true;
+}
+
+
+bool SemiSpace::ShrinkTo(int new_capacity) {
+  ASSERT(new_capacity >= initial_capacity_);
+  ASSERT(new_capacity < capacity_);
+  size_t delta = capacity_ - new_capacity;
+  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
+  if (!MemoryAllocator::UncommitBlock(high() - delta, delta)) {
+    return false;
+  }
+  capacity_ = new_capacity;
+  return true;
+}
+
+
 #ifdef DEBUG
 void SemiSpace::Print() { }
 
diff --git a/src/spaces.h b/src/spaces.h
index f12e0e4..98663db 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1010,6 +1010,15 @@
   // address range to grow).
   bool Grow();
 
+  // Grow the semispace to the new capacity.  The new capacity
+  // requested must be larger than the current capacity.
+  bool GrowTo(int new_capacity);
+
+  // Shrinks the semispace to the new capacity.  The new capacity
+  // requested must be more than the amount of used memory in the
+  // semispace and less than the current capacity.
+  bool ShrinkTo(int new_capacity);
+
   // Returns the start address of the space.
   Address low() { return start_; }
   // Returns one past the end address of the space.
@@ -1057,11 +1066,14 @@
   // Returns the maximum capacity of the semi space.
   int MaximumCapacity() { return maximum_capacity_; }
 
+  // Returns the initial capacity of the semi space.
+  int InitialCapacity() { return initial_capacity_; }
 
  private:
   // The current and maximum capacity of the space.
   int capacity_;
   int maximum_capacity_;
+  int initial_capacity_;
 
   // The start address of the space.
   Address start_;
@@ -1152,8 +1164,11 @@
   void Flip();
 
   // Grow the capacity of the semispaces.  Assumes that they are not at
-  // their maximum capacity.  Returns a flag indicating success or failure.
-  bool Grow();
+  // their maximum capacity.
+  void Grow();
+
+  // Shrink the capacity of the semispaces.
+  void Shrink();
 
   // True if the address or object lies in the address range of either
   // semispace (not necessarily below the allocation pointer).
@@ -1181,6 +1196,12 @@
     return to_space_.MaximumCapacity();
   }
 
+  // Returns the initial capacity of a semispace.
+  int InitialCapacity() {
+    ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
+    return to_space_.InitialCapacity();
+  }
+
   // Return the address of the allocation pointer in the active semispace.
   Address top() { return allocation_info_.top; }
   // Return the address of the first object in the active semispace.
diff --git a/src/utils.h b/src/utils.h
index 91662ee..275dbb5 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -114,8 +114,10 @@
 
 
 // Returns true if (addr + offset) is aligned.
-static inline bool IsAddressAligned(Address addr, int alignment, int offset) {
-  int offs = OffsetFrom(addr + offset);
+static inline bool IsAddressAligned(Address addr,
+                                    intptr_t alignment,
+                                    int offset) {
+  intptr_t offs = OffsetFrom(addr + offset);
   return IsAligned(offs, alignment);
 }
 
@@ -446,15 +448,15 @@
 
 
 inline Vector<const char> CStrVector(const char* data) {
-  return Vector<const char>(data, strlen(data));
+  return Vector<const char>(data, static_cast<int>(strlen(data)));
 }
 
 inline Vector<char> MutableCStrVector(char* data) {
-  return Vector<char>(data, strlen(data));
+  return Vector<char>(data, static_cast<int>(strlen(data)));
 }
 
 inline Vector<char> MutableCStrVector(char* data, int max) {
-  int length = strlen(data);
+  int length = static_cast<int>(strlen(data));
   return Vector<char>(data, (length < max) ? length : max);
 }
 
diff --git a/src/v8.cc b/src/v8.cc
index faec986..bbcc9d1 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -157,13 +157,13 @@
 }
 
 
-void V8::IdleNotification(bool is_high_priority) {
-  if (!FLAG_use_idle_notification) return;
+bool V8::IdleNotification(bool is_high_priority) {
+  if (!FLAG_use_idle_notification) return false;
   // Ignore high priority instances of V8.
-  if (is_high_priority) return;
+  if (is_high_priority) return false;
 
-  // Uncommit unused memory in new space.
-  Heap::UncommitFromSpace();
+  // Tell the heap that it may want to adjust.
+  return Heap::IdleNotification();
 }
 
 
diff --git a/src/v8.h b/src/v8.h
index 1ca3245..50be6df 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -100,7 +100,7 @@
   static Smi* RandomPositiveSmi();
 
   // Idle notification directly from the API.
-  static void IdleNotification(bool is_high_priority);
+  static bool IdleNotification(bool is_high_priority);
 
  private:
   // True if engine is currently running
diff --git a/src/version.cc b/src/version.cc
index 69b7510..65fd116 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     3
-#define BUILD_NUMBER      6
+#define BUILD_NUMBER      7
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 462b960..d61b37b 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -4586,7 +4586,7 @@
                          : RelocInfo::CODE_TARGET_CONTEXT;
   Result answer = frame_->CallLoadIC(mode);
   // A test rax instruction following the call signals that the inobject
-  // property case was inlined.  Ensure that there is not a test eax
+  // property case was inlined.  Ensure that there is not a test rax
   // instruction here.
   masm_->nop();
   // Discard the global object. The result is in answer.
@@ -6264,8 +6264,8 @@
         } else {
           unsigned_left >>= shift_amount;
         }
-        ASSERT(Smi::IsValid(unsigned_left));  // Converted to signed.
-        answer_object = Smi::FromInt(unsigned_left);  // Converted to signed.
+        ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
+        answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
         break;
       }
     default:
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 7b7e1a3..c8f855b 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -462,11 +462,11 @@
     CHECK(source->IsExternal());
     CHECK_EQ(resource,
              static_cast<TestResource*>(source->GetExternalStringResource()));
-    v8::internal::Heap::CollectAllGarbage();
+    v8::internal::Heap::CollectAllGarbage(false);
     CHECK_EQ(0, TestResource::dispose_count);
   }
   v8::internal::CompilationCache::Clear();
-  v8::internal::Heap::CollectAllGarbage();
+  v8::internal::Heap::CollectAllGarbage(false);
   CHECK_EQ(1, TestResource::dispose_count);
 }
 
@@ -483,11 +483,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    v8::internal::Heap::CollectAllGarbage();
+    v8::internal::Heap::CollectAllGarbage(false);
     CHECK_EQ(0, TestAsciiResource::dispose_count);
   }
   v8::internal::CompilationCache::Clear();
-  v8::internal::Heap::CollectAllGarbage();
+  v8::internal::Heap::CollectAllGarbage(false);
   CHECK_EQ(1, TestAsciiResource::dispose_count);
 }
 
@@ -505,11 +505,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    v8::internal::Heap::CollectAllGarbage();
+    v8::internal::Heap::CollectAllGarbage(false);
     CHECK_EQ(0, TestResource::dispose_count);
   }
   v8::internal::CompilationCache::Clear();
-  v8::internal::Heap::CollectAllGarbage();
+  v8::internal::Heap::CollectAllGarbage(false);
   CHECK_EQ(1, TestResource::dispose_count);
 }
 
@@ -528,11 +528,11 @@
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
-    v8::internal::Heap::CollectAllGarbage();
+    v8::internal::Heap::CollectAllGarbage(false);
     CHECK_EQ(0, TestAsciiResource::dispose_count);
   }
   v8::internal::CompilationCache::Clear();
-  v8::internal::Heap::CollectAllGarbage();
+  v8::internal::Heap::CollectAllGarbage(false);
   CHECK_EQ(1, TestAsciiResource::dispose_count);
 }
 
@@ -550,8 +550,8 @@
     i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
     CHECK(isymbol->IsSymbol());
   }
-  i::Heap::CollectAllGarbage();
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
+  i::Heap::CollectAllGarbage(false);
 }
 
 
@@ -568,8 +568,8 @@
     i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring);
     CHECK(isymbol->IsSymbol());
   }
-  i::Heap::CollectAllGarbage();
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
+  i::Heap::CollectAllGarbage(false);
 }
 
 
@@ -1333,12 +1333,12 @@
 
   // Check reading and writing aligned pointers.
   obj->SetPointerInInternalField(0, aligned);
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
   CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
 
   // Check reading and writing unaligned pointers.
   obj->SetPointerInInternalField(0, unaligned);
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
   CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
 
   delete[] data;
@@ -1351,7 +1351,7 @@
 
   // Ensure that the test starts with an fresh heap to test whether the hash
   // code is based on the address.
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
   Local<v8::Object> obj = v8::Object::New();
   int hash = obj->GetIdentityHash();
   int hash1 = obj->GetIdentityHash();
@@ -1361,7 +1361,7 @@
   // objects should not be assigned the same hash code. If the test below fails
   // the random number generator should be evaluated.
   CHECK_NE(hash, hash2);
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
   int hash3 = v8::Object::New()->GetIdentityHash();
   // Make sure that the identity hash is not based on the initial address of
   // the object alone. If the test below fails the random number generator
@@ -1381,7 +1381,7 @@
   v8::Local<v8::String> empty = v8_str("");
   v8::Local<v8::String> prop_name = v8_str("prop_name");
 
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
 
   // Make sure delete of a non-existent hidden value works
   CHECK(obj->DeleteHiddenValue(key));
@@ -1391,7 +1391,7 @@
   CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002)));
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
 
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
 
   // Make sure we do not find the hidden property.
   CHECK(!obj->Has(empty));
@@ -1402,7 +1402,7 @@
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
   CHECK_EQ(2003, obj->Get(empty)->Int32Value());
 
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
 
   // Add another property and delete it afterwards to force the object in
   // slow case.
@@ -1413,7 +1413,7 @@
   CHECK(obj->Delete(prop_name));
   CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value());
 
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
 
   CHECK(obj->DeleteHiddenValue(key));
   CHECK(obj->GetHiddenValue(key).IsEmpty());
@@ -1429,7 +1429,7 @@
   }
   // The whole goal of this interceptor is to cause a GC during local property
   // lookup.
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
   i::FLAG_always_compact = saved_always_compact;
   return v8::Handle<Value>();
 }
@@ -2982,7 +2982,7 @@
   CHECK_EQ(v8::Integer::New(3), args[2]);
   CHECK_EQ(v8::Undefined(), args[3]);
   v8::HandleScope scope;
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
   return v8::Undefined();
 }
 
@@ -4960,7 +4960,7 @@
     Local<String> name,
     const AccessorInfo& info) {
   ApiTestFuzzer::Fuzz();
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
   return v8::Handle<Value>();
 }
 
@@ -6165,8 +6165,8 @@
   // the first garbage collection but some of the maps have already
   // been marked at that point.  Therefore some of the maps are not
   // collected until the second garbage collection.
-  v8::internal::Heap::CollectAllGarbage();
-  v8::internal::Heap::CollectAllGarbage();
+  v8::internal::Heap::CollectAllGarbage(false);
+  v8::internal::Heap::CollectAllGarbage(false);
   v8::internal::HeapIterator it;
   while (it.has_next()) {
     v8::internal::HeapObject* object = it.next();
@@ -6242,7 +6242,7 @@
   // weak callback of the first handle would be able to 'reallocate' it.
   handle1.MakeWeak(NULL, NewPersistentHandleCallback);
   handle2.Dispose();
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
 }
 
 
@@ -6250,7 +6250,7 @@
 
 void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) {
   to_be_disposed.Dispose();
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
 }
 
 
@@ -6265,7 +6265,7 @@
   }
   handle1.MakeWeak(NULL, DisposeAndForceGcCallback);
   to_be_disposed = handle2;
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
 }
 
 
@@ -6842,7 +6842,7 @@
       {
         v8::Locker lock;
         // TODO(lrn): Perhaps create some garbage before collecting.
-        i::Heap::CollectAllGarbage();
+        i::Heap::CollectAllGarbage(false);
         gc_count_++;
       }
       i::OS::Sleep(1);
@@ -6963,7 +6963,7 @@
     while (gc_during_apply_ < kRequiredGCs) {
       {
         v8::Locker lock;
-        i::Heap::CollectAllGarbage();
+        i::Heap::CollectAllGarbage(false);
         gc_count_++;
       }
       i::OS::Sleep(1);
@@ -7680,11 +7680,11 @@
   uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
   i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(kElementCount,
                                                               pixel_data);
-  i::Heap::CollectAllGarbage();  // Force GC to trigger verification.
+  i::Heap::CollectAllGarbage(false);  // Force GC to trigger verification.
   for (int i = 0; i < kElementCount; i++) {
     pixels->set(i, i);
   }
-  i::Heap::CollectAllGarbage();  // Force GC to trigger verification.
+  i::Heap::CollectAllGarbage(false);  // Force GC to trigger verification.
   for (int i = 0; i < kElementCount; i++) {
     CHECK_EQ(i, pixels->get(i));
     CHECK_EQ(i, pixel_data[i]);
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index f5e4f3a..bd09d0d 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -414,8 +414,8 @@
   CHECK_EQ(NULL, Debug::debug_info_list_);
 
   // Collect garbage to ensure weak handles are cleared.
-  Heap::CollectAllGarbage();
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
+  Heap::CollectAllGarbage(false);
 
   // Iterate the head and check that there are no debugger related objects left.
   HeapIterator iterator;
@@ -843,7 +843,7 @@
       Heap::CollectGarbage(0, v8::internal::NEW_SPACE);
     } else {
       // Mark sweep (and perhaps compact).
-      Heap::CollectAllGarbage();
+      Heap::CollectAllGarbage(false);
     }
   }
 }
@@ -1206,7 +1206,7 @@
     CHECK_EQ(2 + i * 3, break_point_hit_count);
 
     // Mark sweep (and perhaps compact) and call function.
-    Heap::CollectAllGarbage();
+    Heap::CollectAllGarbage(false);
     f->Call(recv, 0, NULL);
     CHECK_EQ(3 + i * 3, break_point_hit_count);
   }
@@ -5094,7 +5094,7 @@
 
   // Do garbage collection to ensure that only the script in this test will be
   // collected afterwards.
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
 
   script_collected_count = 0;
   v8::Debug::SetDebugEventListener(DebugEventScriptCollectedEvent,
@@ -5106,7 +5106,7 @@
 
   // Do garbage collection to collect the script above which is no longer
   // referenced.
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
 
   CHECK_EQ(2, script_collected_count);
 
@@ -5141,7 +5141,7 @@
 
     // Do garbage collection to ensure that only the script in this test will be
     // collected afterwards.
-    Heap::CollectAllGarbage();
+    Heap::CollectAllGarbage(false);
 
     v8::Debug::SetMessageHandler2(ScriptCollectedMessageHandler);
     {
@@ -5152,7 +5152,7 @@
 
   // Do garbage collection to collect the script above which is no longer
   // referenced.
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
 
   CHECK_EQ(2, script_collected_message_count);
 
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 1cca17d..69efdc5 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -123,13 +123,13 @@
           "20354189       eorcss r4, r5, r9, lsl #3");
 
   COMPARE(sub(r5, r6, Operand(r10, LSL, 31), LeaveCC, hs),
-          "20465f8a       subcs r5, r6, sl, lsl #31");
+          "20465f8a       subcs r5, r6, r10, lsl #31");
   COMPARE(sub(r5, r6, Operand(r10, LSL, 30), SetCC, cc),
-          "30565f0a       subccs r5, r6, sl, lsl #30");
+          "30565f0a       subccs r5, r6, r10, lsl #30");
   COMPARE(sub(r5, r6, Operand(r10, LSL, 24), LeaveCC, lo),
-          "30465c0a       subcc r5, r6, sl, lsl #24");
+          "30465c0a       subcc r5, r6, r10, lsl #24");
   COMPARE(sub(r5, r6, Operand(r10, LSL, 16), SetCC, mi),
-          "4056580a       submis r5, r6, sl, lsl #16");
+          "4056580a       submis r5, r6, r10, lsl #16");
 
   COMPARE(rsb(r6, r7, Operand(fp)),
           "e067600b       rsb r6, r7, fp");
@@ -163,7 +163,7 @@
   COMPARE(sbc(r7, r9, Operand(ip, ROR, 4)),
           "e0c9726c       sbc r7, r9, ip, ror #4");
   COMPARE(sbc(r7, r10, Operand(ip), SetCC),
-          "e0da700c       sbcs r7, sl, ip");
+          "e0da700c       sbcs r7, r10, ip");
   COMPARE(sbc(r7, ip, Operand(ip, ROR, 31), SetCC, hi),
           "80dc7fec       sbchis r7, ip, ip, ror #31");
 
@@ -240,7 +240,7 @@
           "51d10004       bicpls r0, r1, r4");
 
   COMPARE(mvn(r10, Operand(r1)),
-          "e1e0a001       mvn sl, r1");
+          "e1e0a001       mvn r10, r1");
   COMPARE(mvn(r9, Operand(r2)),
           "e1e09002       mvn r9, r2");
   COMPARE(mvn(r0, Operand(r3), SetCC),
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index df58234..5884a41 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -685,7 +685,7 @@
       "  obj.test =\n"
       "    (function a(j) { return function b() { return j; } })(100);\n"
       "})(this);");
-  i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage(false);
 
   EmbeddedVector<char, 204800> buffer;
   int log_size;
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 36f051f..6939a80 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -125,12 +125,14 @@
            encoder.Encode(the_hole_value_location.address()));
   ExternalReference stack_guard_limit_address =
       ExternalReference::address_of_stack_guard_limit();
-  CHECK_EQ(make_code(UNCLASSIFIED, 3),
+  CHECK_EQ(make_code(UNCLASSIFIED, 4),
            encoder.Encode(stack_guard_limit_address.address()));
-  CHECK_EQ(make_code(UNCLASSIFIED, 5),
+  CHECK_EQ(make_code(UNCLASSIFIED, 10),
            encoder.Encode(ExternalReference::debug_break().address()));
   CHECK_EQ(make_code(UNCLASSIFIED, 6),
            encoder.Encode(ExternalReference::new_space_start().address()));
+  CHECK_EQ(make_code(UNCLASSIFIED, 3),
+           encoder.Encode(ExternalReference::roots_address().address()));
 }
 
 
@@ -157,9 +159,9 @@
   CHECK_EQ(ExternalReference::the_hole_value_location().address(),
            decoder.Decode(make_code(UNCLASSIFIED, 2)));
   CHECK_EQ(ExternalReference::address_of_stack_guard_limit().address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 3)));
+           decoder.Decode(make_code(UNCLASSIFIED, 4)));
   CHECK_EQ(ExternalReference::debug_break().address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 5)));
+           decoder.Decode(make_code(UNCLASSIFIED, 10)));
   CHECK_EQ(ExternalReference::new_space_start().address(),
            decoder.Decode(make_code(UNCLASSIFIED, 6)));
 }
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 3065ba1..127b7a2 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -480,7 +480,7 @@
   // symbol entry in the symbol table because it is used by the script
   // kept alive by the weak wrapper. Make sure we don't destruct the
   // external string.
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
   CHECK(!resource_destructed);
 
   {
@@ -499,7 +499,7 @@
   // Forcing another garbage collection should let us get rid of the
   // slice from the symbol table. The external string remains in the
   // heap until the next GC.
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
   CHECK(!resource_destructed);
   v8::HandleScope scope;
   Handle<String> key_string = Factory::NewStringFromAscii(key_vector);
@@ -508,7 +508,7 @@
 
   // Forcing yet another garbage collection must allow us to finally
   // get rid of the external string.
-  Heap::CollectAllGarbage();
+  Heap::CollectAllGarbage(false);
   CHECK(resource_destructed);
 
   delete[] source;
diff --git a/test/mjsunit/date-parse.js b/test/mjsunit/date-parse.js
index bb7ecd2..4bbb2c6 100644
--- a/test/mjsunit/date-parse.js
+++ b/test/mjsunit/date-parse.js
@@ -250,8 +250,8 @@
 
 
 // Test that we can parse our own date format.
-// (Dates from 1970 to ~2070 with 95h steps.)
-for (var i = 0; i < 24 * 365 * 100; i += 95) {
+// (Dates from 1970 to ~2070 with 150h steps.)
+for (var i = 0; i < 24 * 365 * 100; i += 150) {
   var ms = i * (3600 * 1000);
   var s = (new Date(ms)).toString();
   assertEquals(ms, Date.parse(s), "parse own: " + s);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 4bf67e8..6ac4938 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -52,7 +52,7 @@
 debug-changebreakpoint: CRASH || FAIL
 debug-clearbreakpoint: CRASH || FAIL
 debug-clearbreakpointgroup: PASS, FAIL if $mode == debug
-debug-conditional-breakpoints: FAIL
+debug-conditional-breakpoints: CRASH || FAIL
 debug-evaluate: CRASH || FAIL
 debug-ignore-breakpoints: CRASH || FAIL
 debug-multiple-breakpoints: CRASH || FAIL
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 399c9c6..41395b3 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -171,7 +171,7 @@
 
 # Tests that sorting arrays of ints is less than 3 times as fast
 # as sorting arrays of strings.
-js1_5/extensions/regress-371636: PASS || FAIL
+js1_5/extensions/regress-371636: PASS || FAIL || TIMEOUT if $mode == debug
 
 
 # Tests depend on GC timings. Inherently flaky.
diff --git a/tools/visual_studio/d8_x64.vcproj b/tools/visual_studio/d8_x64.vcproj
index dd2b83d..5c47a8a 100644
--- a/tools/visual_studio/d8_x64.vcproj
+++ b/tools/visual_studio/d8_x64.vcproj
@@ -50,6 +50,7 @@
 			<Tool
 				Name="VCLinkerTool"
 				AdditionalDependencies="winmm.lib Ws2_32.lib"
+				TargetMachine="17"
 			/>
 			<Tool
 				Name="VCALinkTool"
@@ -111,6 +112,7 @@
 			<Tool
 				Name="VCLinkerTool"
 				AdditionalDependencies="winmm.lib Ws2_32.lib"
+				TargetMachine="17"
 			/>
 			<Tool
 				Name="VCALinkTool"
diff --git a/tools/visual_studio/v8_cctest_x64.vcproj b/tools/visual_studio/v8_cctest_x64.vcproj
index fc7ac4b..d0fbac6 100644
--- a/tools/visual_studio/v8_cctest_x64.vcproj
+++ b/tools/visual_studio/v8_cctest_x64.vcproj
@@ -50,6 +50,7 @@
 			<Tool
 				Name="VCLinkerTool"
 				AdditionalDependencies="winmm.lib Ws2_32.lib"
+				TargetMachine="17"
 			/>
 			<Tool
 				Name="VCALinkTool"
@@ -111,6 +112,7 @@
 			<Tool
 				Name="VCLinkerTool"
 				AdditionalDependencies="winmm.lib Ws2_32.lib"
+				TargetMachine="17"
 			/>
 			<Tool
 				Name="VCALinkTool"
diff --git a/tools/visual_studio/v8_shell_sample_x64.vcproj b/tools/visual_studio/v8_shell_sample_x64.vcproj
index ab276f4..e1d5164 100644
--- a/tools/visual_studio/v8_shell_sample_x64.vcproj
+++ b/tools/visual_studio/v8_shell_sample_x64.vcproj
@@ -50,6 +50,7 @@
 			<Tool
 				Name="VCLinkerTool"
 				AdditionalDependencies="winmm.lib Ws2_32.lib"
+				TargetMachine="17"
 			/>
 			<Tool
 				Name="VCALinkTool"
@@ -111,6 +112,7 @@
 			<Tool
 				Name="VCLinkerTool"
 				AdditionalDependencies="winmm.lib Ws2_32.lib"
+				TargetMachine="17"
 			/>
 			<Tool
 				Name="VCALinkTool"