Version 3.16.14

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@13610 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 3aa76e7..62fcc9a 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -7879,7 +7879,12 @@
   bool save_fp_regs = CpuFeatures::IsSupported(VFP2);
   CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+  int parameter_count_offset =
+      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+  __ ldr(r1, MemOperand(fp, parameter_count_offset));
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+  __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
+  __ add(sp, sp, r1);
   __ Ret();
 }
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 85b62da..06415ed 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -466,13 +466,19 @@
   // v  +-------------------------+          +-------------------------|
   //    |   COMPILED_STUB marker  |          |   STUB_FAILURE marker   |
   //    +-------------------------+          +-------------------------+
-  //    |                         |          |     stub parameter 1    |
+  //    |                         |          |   caller args.length_   |
   //    | ...                     |          +-------------------------+
-  //    |                         |          |            ...          |
+  //    |                         |          |  caller args.arguments_ |
   //    |-------------------------|<-sp      +-------------------------+
-  //                                         |     stub parameter n    |
-  //      parameters in registers            +-------------------------+<-sp
-  //       and spilled to stack              r0 = number of parameters
+  //                                         |  caller args pointer    |
+  //                                         +-------------------------+
+  //                                         |  caller stack param 1   |
+  //      parameters in registers            +-------------------------+
+  //       and spilled to stack              |           ....          |
+  //                                         +-------------------------+
+  //                                         |  caller stack param n   |
+  //                                         +-------------------------+<-sp
+  //                                         r0 = number of parameters
   //                                         r1 = failure handler address
   //                                         fp = saved frame
   //                                         cp = JSFunction context
@@ -483,9 +489,15 @@
   CodeStubInterfaceDescriptor* descriptor =
       isolate_->code_stub_interface_descriptor(major_key);
 
+  // The output frame must have room for all pushed register parameters
+  // and the standard stack frame slots.
   int output_frame_size = StandardFrameConstants::kFixedFrameSize +
       kPointerSize * descriptor->register_param_count_;
 
+  // Include space for an argument object to the callee and optionally
+  // the space to pass the argument object to the stub failure handler.
+  output_frame_size += sizeof(Arguments) + kPointerSize;
+
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, 0);
   ASSERT(frame_index == 0);
@@ -497,12 +509,15 @@
       reinterpret_cast<intptr_t>(notify_failure->entry()));
 
   Code* trampoline = NULL;
-  StubFailureTrampolineStub().FindCodeInCache(&trampoline, isolate_);
+  int extra = descriptor->extra_expression_stack_count_;
+  StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
   ASSERT(trampoline != NULL);
   output_frame->SetPc(reinterpret_cast<intptr_t>(
       trampoline->instruction_start()));
   unsigned input_frame_size = input_->GetFrameSize();
 
+  intptr_t frame_ptr = input_->GetRegister(fp.code());
+
   // JSFunction continuation
   intptr_t input_frame_offset = input_frame_size - kPointerSize;
   intptr_t output_frame_offset = output_frame_size - kPointerSize;
@@ -528,6 +543,28 @@
       Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
   output_frame->SetFrameSlot(output_frame_offset, value);
 
+  int caller_arg_count = 0;
+  if (descriptor->stack_parameter_count_ != NULL) {
+    caller_arg_count =
+        input_->GetRegister(descriptor->stack_parameter_count_->code());
+  }
+
+  // Build the Arguments object for the caller's parameters and a pointer to it.
+  output_frame_offset -= kPointerSize;
+  value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
+      (caller_arg_count - 1) * kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  output_frame->SetFrameSlot(output_frame_offset, value);
+  output_frame_offset -= kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, caller_arg_count);
+
+  value = frame_ptr - (output_frame_size - output_frame_offset) -
+      StandardFrameConstants::kMarkerOffset;
+  output_frame_offset -= kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  // Copy the register parameters to the failure frame.
   for (int i = 0; i < descriptor->register_param_count_; ++i) {
     output_frame_offset -= kPointerSize;
     DoTranslateCommand(iterator, 0, output_frame_offset);
@@ -538,14 +575,17 @@
     output_frame->SetDoubleRegister(i, double_value);
   }
 
-  value = input_->GetRegister(fp.code());
-  output_frame->SetRegister(fp.code(), value);
-  output_frame->SetFp(value);
+  output_frame->SetRegister(fp.code(), frame_ptr);
+  output_frame->SetFp(frame_ptr);
 
   ApiFunction function(descriptor->deoptimization_handler_);
   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
-  output_frame->SetRegister(r0.code(), descriptor->register_param_count_);
+  int params = descriptor->register_param_count_;
+  if (descriptor->stack_parameter_count_ != NULL) {
+    params++;
+  }
+  output_frame->SetRegister(r0.code(), params);
   output_frame->SetRegister(r1.code(), handler);
 }
 
@@ -603,7 +643,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
            fp_value, output_offset, value);
   }
@@ -612,7 +652,7 @@
   output_offset -= kPointerSize;
   value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
            top_address + output_offset, output_offset, value);
   }
@@ -621,7 +661,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
            top_address + output_offset, output_offset, value);
   }
@@ -630,7 +670,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(construct_stub);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; code object\n",
            top_address + output_offset, output_offset, value);
   }
@@ -639,7 +679,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
            top_address + output_offset, output_offset, value, height - 1);
   }
@@ -648,7 +688,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(function);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
            top_address + output_offset, output_offset, value);
   }
@@ -658,7 +698,7 @@
   output_offset -= kPointerSize;
   value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
            top_address + output_offset, output_offset, value);
   }
@@ -682,7 +722,7 @@
   unsigned height = 0;
   unsigned height_in_bytes = height * kPointerSize;
   const char* kind = is_setter_stub_frame ? "setter" : "getter";
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating %s stub => height=%u\n", kind, height_in_bytes);
   }
 
@@ -715,7 +755,7 @@
   output_offset -= kPointerSize;
   intptr_t callers_pc = output_[frame_index - 1]->GetPc();
   output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; caller's pc\n",
            top_address + output_offset, output_offset, callers_pc);
@@ -727,7 +767,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; caller's fp\n",
            fp_value, output_offset, value);
@@ -737,7 +777,7 @@
   output_offset -= kPointerSize;
   value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; context\n",
            top_address + output_offset, output_offset, value);
@@ -747,7 +787,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; function (%s sentinel)\n",
            top_address + output_offset, output_offset, value, kind);
@@ -761,7 +801,7 @@
   Code* accessor_stub = isolate_->builtins()->builtin(name);
   value = reinterpret_cast<intptr_t>(accessor_stub);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; code object\n",
            top_address + output_offset, output_offset, value);
@@ -807,7 +847,7 @@
   }
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating ");
     function->PrintName();
     PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@@ -871,7 +911,7 @@
     value = output_[frame_index - 1]->GetPc();
   }
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
            top_address + output_offset, output_offset, value);
   }
@@ -894,7 +934,7 @@
   if (is_topmost) {
     output_frame->SetRegister(fp.code(), fp_value);
   }
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
            fp_value, output_offset, value);
   }
@@ -912,7 +952,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(cp.code(), value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
            top_address + output_offset, output_offset, value);
   }
@@ -925,7 +965,7 @@
   // input frame.
   ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
            top_address + output_offset, output_offset, value);
   }
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index a10acd0..ee9fc0e 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -134,20 +134,6 @@
 };
 
 
-class StandardFrameConstants : public AllStatic {
- public:
-  // Fixed part of the frame consists of return address, caller fp,
-  // context and function.
-  static const int kFixedFrameSize    =  4 * kPointerSize;
-  static const int kExpressionsOffset = -3 * kPointerSize;
-  static const int kMarkerOffset      = -2 * kPointerSize;
-  static const int kContextOffset     = -1 * kPointerSize;
-  static const int kCallerFPOffset    =  0 * kPointerSize;
-  static const int kCallerPCOffset    =  1 * kPointerSize;
-  static const int kCallerSPOffset    =  2 * kPointerSize;
-};
-
-
 class JavaScriptFrameConstants : public AllStatic {
  public:
   // FP-relative.
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 04ab381..dacdd5a 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -2257,7 +2257,7 @@
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   LParameter* result = new(zone()) LParameter;
-  if (info()->IsOptimizing()) {
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
     int spill_index = chunk()->GetParameterStackSlot(instr->index());
     return DefineAsSpilled(result, spill_index);
   } else {
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 0d74a64..6796f73 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -776,15 +776,6 @@
 }
 
 
-void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
-                                    const Register scratch,
-                                    const Condition cond) {
-  vmrs(scratch, cond);
-  bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
-  vmsr(scratch, cond);
-}
-
-
 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
                                            const DwVfpRegister src2,
                                            const Condition cond) {
@@ -2695,15 +2686,29 @@
 
   Label done;
 
-  // Test for values that can be exactly represented as a signed 32-bit integer.
-  TryFastDoubleToInt32(result, double_input, double_scratch, &done);
-
-  // Clear cumulative exception flags.
-  ClearFPSCRBits(kVFPExceptionMask, scratch);
-  // Try a conversion to a signed integer.
+  // Test if the value can be exactly represented as a signed integer.
   vcvt_s32_f64(double_scratch.low(), double_input);
   vmov(result, double_scratch.low());
-  // Retrieve he FPSCR.
+  vcvt_f64_s32(double_scratch, double_scratch.low());
+  // Note: this comparison is cheaper than reading the FPSCR exception bits.
+  VFPCompareAndSetFlags(double_input, double_scratch);
+  b(eq, &done);
+
+  // Check the exception flags. If they are not set, we are done.
+  // If they are set, it could be because of the conversion above, or because
+  // they were set before this code.
+  vmrs(scratch);
+  tst(scratch, Operand(kVFPOverflowExceptionBit |
+                       kVFPUnderflowExceptionBit |
+                       kVFPInvalidOpExceptionBit));
+  b(eq, &done);
+
+  // Clear cumulative exception flags.
+  bic(scratch, scratch, Operand(kVFPExceptionMask));
+  vmsr(scratch);
+  // Try a conversion to a signed integer.
+  vcvt_s32_f64(double_scratch.low(), double_input);
+  // Retrieve the FPSCR.
   vmrs(scratch);
   // Check for overflow and NaNs.
   tst(scratch, Operand(kVFPOverflowExceptionBit |
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 4d469f0..93760bd 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -460,11 +460,6 @@
             const MemOperand& dst,
             Condition cond = al);
 
-  // Clear specified FPSCR bits.
-  void ClearFPSCRBits(const uint32_t bits_to_clear,
-                      const Register scratch,
-                      const Condition cond = al);
-
   // Compare double values and move the result to the normal condition flags.
   void VFPCompareAndSetFlags(const DwVfpRegister src1,
                              const DwVfpRegister src2,
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 215de73..37e6be2 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -2437,23 +2437,15 @@
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> function,
-                                                   Handle<String> name,
-                                                   CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+                                              Handle<JSObject> holder,
+                                              Handle<String> name,
+                                              CheckType check,
+                                              Label* success) {
   // ----------- S t a t e -------------
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
-  if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder,
-                                          Handle<JSGlobalPropertyCell>::null(),
-                                          function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
-  }
-
   Label miss;
   GenerateNameCheck(name, &miss);
 
@@ -2487,78 +2479,89 @@
       break;
 
     case STRING_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        // Check that the object is a two-byte string or a symbol.
-        __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
-        __ b(ge, &miss);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            r0, holder, r3, r1, r4, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+      // Check that the object is a two-byte string or a symbol.
+      __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+      __ b(ge, &miss);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          r0, holder, r3, r1, r4, name, &miss);
       break;
 
-    case NUMBER_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        Label fast;
-        // Check that the object is a smi or a heap number.
-        __ JumpIfSmi(r1, &fast);
-        __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
-        __ b(ne, &miss);
-        __ bind(&fast);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            r0, holder, r3, r1, r4, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+    case NUMBER_CHECK: {
+      Label fast;
+      // Check that the object is a smi or a heap number.
+      __ JumpIfSmi(r1, &fast);
+      __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
+      __ b(ne, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          r0, holder, r3, r1, r4, name, &miss);
       break;
-
-    case BOOLEAN_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        Label fast;
-        // Check that the object is a boolean.
-        __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-        __ cmp(r1, ip);
-        __ b(eq, &fast);
-        __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-        __ cmp(r1, ip);
-        __ b(ne, &miss);
-        __ bind(&fast);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            r0, holder, r3, r1, r4, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+    }
+    case BOOLEAN_CHECK: {
+      Label fast;
+      // Check that the object is a boolean.
+      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+      __ cmp(r1, ip);
+      __ b(eq, &fast);
+      __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+      __ cmp(r1, ip);
+      __ b(ne, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          r0, holder, r3, r1, r4, name, &miss);
       break;
+    }
   }
 
+  __ b(success);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(
       function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
 
-  // Handle call cache miss.
-  __ bind(&miss);
-  GenerateMissBranch();
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<String> name,
+    CheckType check,
+    Handle<JSFunction> function) {
+  if (HasCustomCallGenerator(function)) {
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
+  }
+
+  Label success;
+
+  CompileHandlerFrontend(object, holder, name, check, &success);
+  __ bind(&success);
+  CompileHandlerBackend(function);
+
 
   // Return the generated code.
   return GetCode(function);
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index b638c40..f837045 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -102,7 +102,8 @@
   HGraph* graph = this->graph();
   Zone* zone = this->zone();
   for (int i = 0; i < descriptor->register_param_count_; ++i) {
-    HParameter* param = new(zone) HParameter(i);
+    HParameter* param =
+        new(zone) HParameter(i, HParameter::REGISTER_PARAMETER);
     AddInstruction(param);
     graph->start_environment()->Push(param);
     parameters_[i] = param;
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 2d23ddb..5e3795c 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -606,6 +606,14 @@
 }
 
 
+void StubFailureTrampolineStub::GenerateAheadOfTime() {
+  int i = 0;
+  for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) {
+    StubFailureTrampolineStub(i).GetCode();
+  }
+}
+
+
 FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL;
 
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index dde0cc2..5eea0a4 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -253,8 +253,12 @@
 struct CodeStubInterfaceDescriptor {
   CodeStubInterfaceDescriptor()
       : register_param_count_(-1),
+        stack_parameter_count_(NULL),
+        extra_expression_stack_count_(0),
         register_params_(NULL) { }
   int register_param_count_;
+  const Register* stack_parameter_count_;
+  int extra_expression_stack_count_;
   Register* register_params_;
   Address deoptimization_handler_;
 };
@@ -549,12 +553,7 @@
  public:
   explicit ICStub(Code::Kind kind) : kind_(kind) { }
   virtual int GetCodeKind() { return kind_; }
-  // Currently all IC stubs do not collect explicit type feedback but rather
-  // check the instance type.
-  // TODO(verwaest): These stubs should collect proper type feedback, and should
-  // not check the instance type explicitly (perhaps unless more than
-  // kMaxPolymorphism maps are recorded).
-  virtual InlineCacheState GetICState() { return MEGAMORPHIC; }
+  virtual InlineCacheState GetICState() { return MONOMORPHIC; }
 
   bool Describes(Code* code) {
     return GetMajorKey(code) == MajorKey() && code->stub_info() == MinorKey();
@@ -1425,14 +1424,23 @@
 
 class StubFailureTrampolineStub : public PlatformCodeStub {
  public:
-  StubFailureTrampolineStub() {}
+  static const int kMaxExtraExpressionStackCount = 1;
+
+  explicit StubFailureTrampolineStub(int extra_expression_stack_count)
+      : extra_expression_stack_count_(extra_expression_stack_count) {}
+
+  virtual bool IsPregenerated() { return true; }
+
+  static void GenerateAheadOfTime();
 
  private:
   Major MajorKey() { return StubFailureTrampoline; }
-  int MinorKey() { return 0; }
+  int MinorKey() { return extra_expression_stack_count_; }
 
   void Generate(MacroAssembler* masm);
 
+  int extra_expression_stack_count_;
+
   DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub);
 };
 
diff --git a/src/codegen.cc b/src/codegen.cc
index b42645c..508e221 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -123,7 +123,9 @@
 #ifdef ENABLE_DISASSEMBLER
   bool print_code = Isolate::Current()->bootstrapper()->IsActive()
       ? FLAG_print_builtin_code
-      : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
+      : (FLAG_print_code ||
+         (info->IsStub() && FLAG_print_code_stubs) ||
+         (info->IsOptimizing() && FLAG_print_opt_code));
   if (print_code) {
     // Print the source code if available.
     FunctionLiteral* function = info->function();
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 8015baf..e12f3b6 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -491,12 +491,15 @@
 }
 
 
-bool Deoptimizer::TraceEnabledFor(BailoutType type) {
-  switch (type) {
+bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
+                                  StackFrame::Type frame_type) {
+  switch (deopt_type) {
     case EAGER:
     case LAZY:
     case DEBUGGER:
-      return FLAG_trace_deopt;
+      return (frame_type == StackFrame::STUB)
+          ? FLAG_trace_stub_failures
+          : FLAG_trace_deopt;
     case OSR:
       return FLAG_trace_osr;
   }
@@ -540,7 +543,8 @@
       output_(NULL),
       deferred_arguments_objects_values_(0),
       deferred_arguments_objects_(0),
-      deferred_heap_numbers_(0) {
+      deferred_heap_numbers_(0),
+      trace_(false) {
   // For COMPILED_STUBs called from builtins, the function pointer is a SMI
   // indicating an internal frame.
   if (function->IsSmi()) {
@@ -550,13 +554,14 @@
     function->shared()->increment_deopt_count();
   }
   compiled_code_ = FindOptimizedCode(function, optimized_code);
-  if (TraceEnabledFor(type)) Trace();
-  ASSERT(HEAP->allow_allocation(false));
-  unsigned size = ComputeInputFrameSize();
-  input_ = new(size) FrameDescription(size, function);
   StackFrame::Type frame_type = function == NULL
       ? StackFrame::STUB
       : StackFrame::JAVA_SCRIPT;
+  trace_ = TraceEnabledFor(type, frame_type);
+  if (trace_) Trace();
+  ASSERT(HEAP->allow_allocation(false));
+  unsigned size = ComputeInputFrameSize();
+  input_ = new(size) FrameDescription(size, function);
   input_->SetFrameType(frame_type);
 }
 
@@ -717,7 +722,7 @@
 
   // Print some helpful diagnostic information.
   int64_t start = OS::Ticks();
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
            (bailout_type_ == LAZY ? " (lazy)" : ""),
            reinterpret_cast<intptr_t>(function_));
@@ -795,7 +800,7 @@
   }
 
   // Print some helpful diagnostic information.
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     double ms = static_cast<double>(OS::Ticks() - start) / 1000;
     int index = output_count_ - 1;  // Index of the topmost frame.
     JSFunction* function = output_[index]->GetFunction();
@@ -833,7 +838,7 @@
   for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
     HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
     Handle<Object> num = isolate_->factory()->NewNumber(d.value());
-    if (FLAG_trace_deopt) {
+    if (trace_) {
       PrintF("Materializing a new heap number %p [%e] in slot %p\n",
              reinterpret_cast<void*>(*num),
              d.value(),
@@ -878,7 +883,7 @@
         }
         frame->SetExpression(i, *arguments);
         ASSERT_EQ(Memory::Object_at(descriptor.slot_address()), *arguments);
-        if (FLAG_trace_deopt) {
+        if (trace_) {
           PrintF("Materializing %sarguments object for %p: ",
                  frame->has_adapted_arguments() ? "(adapted) " : "",
                  reinterpret_cast<void*>(descriptor.slot_address()));
@@ -913,7 +918,7 @@
       int index = (info->parameters_count() - 1) -
           static_cast<int>(slot - parameters_top) / kPointerSize;
 
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF("Materializing a new heap number %p [%e] in slot %p"
                "for parameter slot #%d\n",
                reinterpret_cast<void*>(*num),
@@ -929,7 +934,7 @@
       int index = info->expression_count() - 1 -
           static_cast<int>(slot - expressions_top) / kPointerSize;
 
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF("Materializing a new heap number %p [%e] in slot %p"
                "for expression slot #%d\n",
                reinterpret_cast<void*>(*num),
@@ -976,7 +981,7 @@
     case Translation::REGISTER: {
       int input_reg = iterator->Next();
       intptr_t input_value = input_->GetRegister(input_reg);
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF(
             "    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
             output_[frame_index]->GetTop() + output_offset,
@@ -994,7 +999,7 @@
       int input_reg = iterator->Next();
       intptr_t value = input_->GetRegister(input_reg);
       bool is_smi = Smi::IsValid(value);
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF(
             "    0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
             output_[frame_index]->GetTop() + output_offset,
@@ -1021,7 +1026,7 @@
       int input_reg = iterator->Next();
       uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
       bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF(
             "    0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
             " ; uint %s (%s)\n",
@@ -1048,7 +1053,7 @@
     case Translation::DOUBLE_REGISTER: {
       int input_reg = iterator->Next();
       double value = input_->GetDoubleRegister(input_reg);
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
                output_[frame_index]->GetTop() + output_offset,
                output_offset,
@@ -1067,7 +1072,7 @@
       unsigned input_offset =
           input_->GetOffsetFromSlotIndex(input_slot_index);
       intptr_t input_value = input_->GetFrameSlot(input_offset);
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF("    0x%08" V8PRIxPTR ": ",
                output_[frame_index]->GetTop() + output_offset);
         PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
@@ -1087,7 +1092,7 @@
           input_->GetOffsetFromSlotIndex(input_slot_index);
       intptr_t value = input_->GetFrameSlot(input_offset);
       bool is_smi = Smi::IsValid(value);
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF("    0x%08" V8PRIxPTR ": ",
                output_[frame_index]->GetTop() + output_offset);
         PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
@@ -1117,7 +1122,7 @@
       uintptr_t value =
           static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
       bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF("    0x%08" V8PRIxPTR ": ",
                output_[frame_index]->GetTop() + output_offset);
         PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
@@ -1145,7 +1150,7 @@
       unsigned input_offset =
           input_->GetOffsetFromSlotIndex(input_slot_index);
       double value = input_->GetDoubleFrameSlot(input_offset);
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
                output_[frame_index]->GetTop() + output_offset,
                output_offset,
@@ -1161,7 +1166,7 @@
 
     case Translation::LITERAL: {
       Object* literal = ComputeLiteral(iterator->Next());
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- ",
                output_[frame_index]->GetTop() + output_offset,
                output_offset);
@@ -1176,7 +1181,7 @@
     case Translation::ARGUMENTS_OBJECT: {
       int args_index = iterator->Next() + 1;  // Skip receiver.
       int args_length = iterator->Next() - 1;  // Skip receiver.
-      if (FLAG_trace_deopt) {
+      if (trace_) {
         PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- ",
                output_[frame_index]->GetTop() + output_offset,
                output_offset);
@@ -1497,10 +1502,9 @@
     // size matches with the stack height we can compute based on the
     // environment at the OSR entry. The code for that his built into
     // the DoComputeOsrOutputFrame function for now.
-  } else {
+  } else if (compiled_code_->kind() != Code::COMPILED_STUB) {
     unsigned stack_slots = compiled_code_->stack_slots();
-    unsigned outgoing_size = compiled_code_->kind() == Code::COMPILED_STUB
-        ? 0 : ComputeOutgoingArgumentSize();
+    unsigned outgoing_size = ComputeOutgoingArgumentSize();
     ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
   }
 #endif
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 656d3fe..1a9d693 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -144,7 +144,8 @@
     DEBUGGER
   };
 
-  static bool TraceEnabledFor(BailoutType type);
+  static bool TraceEnabledFor(BailoutType deopt_type,
+                              StackFrame::Type frame_type);
   static const char* MessageFor(BailoutType type);
 
   int output_count() const { return output_count_; }
@@ -407,6 +408,8 @@
   List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
   List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
 
+  bool trace_;
+
   static const int table_entry_size_;
 
   friend class FrameDescription;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 0983644..4a0d3c6 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -344,7 +344,9 @@
 DEFINE_bool(opt, true, "use adaptive optimizations")
 DEFINE_bool(always_opt, false, "always try to optimize functions")
 DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_bool(trace_deopt, false, "trace deoptimization")
+DEFINE_bool(trace_deopt, false, "trace optimize function deoptimization")
+DEFINE_bool(trace_stub_failures, false,
+            "trace deoptimization of generated code stubs")
 
 // compiler.cc
 DEFINE_int(min_preparse_length, 1024,
diff --git a/src/frames-inl.h b/src/frames-inl.h
index e0c934f..83b37a5 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -256,7 +256,7 @@
 
 
 inline StubFailureTrampolineFrame::StubFailureTrampolineFrame(
-    StackFrameIterator* iterator) : InternalFrame(iterator) {
+    StackFrameIterator* iterator) : StandardFrame(iterator) {
 }
 
 
diff --git a/src/frames.cc b/src/frames.cc
index 98b3d65..40abf64 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -1304,14 +1304,41 @@
 
 
 void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const {
-  const int offset = StandardFrameConstants::kContextOffset;
   Object** base = &Memory::Object_at(sp());
-  Object** limit = &Memory::Object_at(fp() + offset) + 1;
+  Object** limit = &Memory::Object_at(fp() +
+                                      kFirstRegisterParameterFrameOffset);
+  v->VisitPointers(base, limit);
+  base = &Memory::Object_at(fp() + StandardFrameConstants::kMarkerOffset);
+  const int offset = StandardFrameConstants::kContextOffset;
+  limit = &Memory::Object_at(fp() + offset) + 1;
   v->VisitPointers(base, limit);
   IteratePc(v, pc_address(), LookupCode());
 }
 
 
+Address StubFailureTrampolineFrame::GetCallerStackPointer() const {
+  return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
+Code* StubFailureTrampolineFrame::unchecked_code() const {
+  int i = 0;
+  for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) {
+    Code* trampoline;
+    StubFailureTrampolineStub(i).FindCodeInCache(&trampoline, isolate());
+    ASSERT(trampoline != NULL);
+    Address current_pc = pc();
+    Address code_start = trampoline->instruction_start();
+    Address code_end = code_start + trampoline->instruction_size();
+    if (code_start <= current_pc && current_pc < code_end) {
+      return trampoline;
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
 // -------------------------------------------------------------------------
 
 
diff --git a/src/frames.h b/src/frames.h
index 26bf434..a60e535 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -143,6 +143,22 @@
   V(ARGUMENTS_ADAPTOR,       ArgumentsAdaptorFrame)
 
 
+class StandardFrameConstants : public AllStatic {
+ public:
+  // Fixed part of the frame consists of return address, caller fp,
+  // context and function.
+  // StandardFrame::IterateExpressions assumes that kContextOffset is the last
+  // object pointer.
+  static const int kFixedFrameSize    =  4 * kPointerSize;
+  static const int kExpressionsOffset = -3 * kPointerSize;
+  static const int kMarkerOffset      = -2 * kPointerSize;
+  static const int kContextOffset     = -1 * kPointerSize;
+  static const int kCallerFPOffset    =  0 * kPointerSize;
+  static const int kCallerPCOffset    = +1 * kPointerSize;
+  static const int kCallerSPOffset    = +2 * kPointerSize;
+};
+
+
 // Abstract base class for all stack frames.
 class StackFrame BASE_EMBEDDED {
  public:
@@ -672,16 +688,30 @@
 };
 
 
-class StubFailureTrampolineFrame: public InternalFrame {
+class StubFailureTrampolineFrame: public StandardFrame {
  public:
+  // sizeof(Arguments) - sizeof(Arguments*) is 3 * kPointerSize), but the
+  // presubmit script complains about using sizeof() on a type.
+  static const int kFirstRegisterParameterFrameOffset =
+      StandardFrameConstants::kMarkerOffset - 3 * kPointerSize;
+
+  static const int kCallerStackParameterCountFrameOffset =
+      StandardFrameConstants::kMarkerOffset - 2 * kPointerSize;
+
   virtual Type type() const { return STUB_FAILURE_TRAMPOLINE; }
 
+  // Get the code associated with this frame.
+  // This method could be called during marking phase of GC.
+  virtual Code* unchecked_code() const;
+
   virtual void Iterate(ObjectVisitor* v) const;
 
  protected:
   inline explicit StubFailureTrampolineFrame(
       StackFrameIterator* iterator);
 
+  virtual Address GetCallerStackPointer() const;
+
  private:
   friend class StackFrameIterator;
 };
diff --git a/src/heap.h b/src/heap.h
index 0cbfb39..5eefc1c 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1639,8 +1639,9 @@
   }
 
   bool IsSweepingComplete() {
-    return old_data_space()->IsSweepingComplete() &&
-           old_pointer_space()->IsSweepingComplete();
+    return !mark_compact_collector()->IsConcurrentSweepingInProgress() &&
+           old_data_space()->IsLazySweepingComplete() &&
+           old_pointer_space()->IsLazySweepingComplete();
   }
 
   bool AdvanceSweepers(int step_size) {
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index eea429e..53f1b65 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -4010,11 +4010,20 @@
 
 class HParameter: public HTemplateInstruction<0> {
  public:
-  explicit HParameter(unsigned index) : index_(index) {
+  enum ParameterKind {
+    STACK_PARAMETER,
+    REGISTER_PARAMETER
+  };
+
+  explicit HParameter(unsigned index,
+                      ParameterKind kind = STACK_PARAMETER)
+      : index_(index),
+        kind_(kind) {
     set_representation(Representation::Tagged());
   }
 
   unsigned index() const { return index_; }
+  ParameterKind kind() const { return kind_; }
 
   virtual void PrintDataTo(StringStream* stream);
 
@@ -4026,6 +4035,7 @@
 
  private:
   unsigned index_;
+  ParameterKind kind_;
 };
 
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 9546312..d9cd90d 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -48,6 +48,7 @@
   static Register registers[] = { edx, ecx };
   descriptor->register_param_count_ = 2;
   descriptor->register_params_ = registers;
+  descriptor->stack_parameter_count_ = NULL;
   descriptor->deoptimization_handler_ =
       FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
 }
@@ -7646,8 +7647,14 @@
   bool save_fp_regs = CpuFeatures::IsSupported(SSE2);
   CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
   __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
+  int parameter_count_offset =
+      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+  __ mov(ebx, MemOperand(ebp, parameter_count_offset));
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
-  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
+  __ pop(ecx);
+  __ lea(esp, MemOperand(esp, ebx, times_pointer_size,
+                         extra_expression_stack_count_ * kPointerSize));
+  __ jmp(ecx);  // Return to IC Miss stub, continuation still on stack.
 }
 
 
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 8d9c6b2..9743b65 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -468,7 +468,7 @@
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating arguments adaptor => height=%d\n", height_in_bytes);
   }
 
@@ -503,7 +503,7 @@
   output_offset -= kPointerSize;
   intptr_t callers_pc = output_[frame_index - 1]->GetPc();
   output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
            top_address + output_offset, output_offset, callers_pc);
   }
@@ -514,7 +514,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
            fp_value, output_offset, value);
   }
@@ -524,7 +524,7 @@
   intptr_t context = reinterpret_cast<intptr_t>(
       Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   output_frame->SetFrameSlot(output_offset, context);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
            top_address + output_offset, output_offset, context);
   }
@@ -533,7 +533,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(function);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
            top_address + output_offset, output_offset, value);
   }
@@ -542,7 +542,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
            top_address + output_offset, output_offset, value, height - 1);
   }
@@ -573,13 +573,19 @@
   // v  +-------------------------+          +-------------------------|
   //    |   COMPILED_STUB marker  |          |   STUB_FAILURE marker   |
   //    +-------------------------+          +-------------------------+
-  //    |                         |          |     stub parameter 1    |
+  //    |                         |          |   caller args.length_   |
   //    | ...                     |          +-------------------------+
-  //    |                         |          |            ...          |
+  //    |                         |          |  caller args.arguments_ |
   //    |-------------------------|<-esp     +-------------------------+
-  //                                         |     stub parameter n    |
-  //      parameters in registers            +-------------------------+<-esp
-  //       and spilled to stack              eax = number of parameters
+  //                                         |  caller args pointer    |
+  //                                         +-------------------------+
+  //                                         |  caller stack param 1   |
+  //      parameters in registers            +-------------------------+
+  //       and spilled to stack              |           ....          |
+  //                                         +-------------------------+
+  //                                         |  caller stack param n   |
+  //                                         +-------------------------+<-esp
+  //                                         eax = number of parameters
   //                                         ebx = failure handler address
   //                                         ebp = saved frame
   //                                         esi = JSFunction context
@@ -590,9 +596,15 @@
   CodeStubInterfaceDescriptor* descriptor =
       isolate_->code_stub_interface_descriptor(major_key);
 
+  // The output frame must have room for all pushed register parameters
+  // and the standard stack frame slots.
   int output_frame_size = StandardFrameConstants::kFixedFrameSize +
       kPointerSize * descriptor->register_param_count_;
 
+  // Include space for an argument object to the callee and optionally
+  // the space to pass the argument object to the stub failure handler.
+  output_frame_size += sizeof(Arguments) + kPointerSize;
+
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, 0);
   ASSERT(frame_index == 0);
@@ -604,12 +616,15 @@
       reinterpret_cast<intptr_t>(notify_failure->entry()));
 
   Code* trampoline = NULL;
-  StubFailureTrampolineStub().FindCodeInCache(&trampoline, isolate_);
+  int extra = descriptor->extra_expression_stack_count_;
+  StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
   ASSERT(trampoline != NULL);
   output_frame->SetPc(reinterpret_cast<intptr_t>(
       trampoline->instruction_start()));
   unsigned input_frame_size = input_->GetFrameSize();
 
+  intptr_t frame_ptr = input_->GetRegister(ebp.code());
+
   // JSFunction continuation
   intptr_t input_frame_offset = input_frame_size - kPointerSize;
   intptr_t output_frame_offset = output_frame_size - kPointerSize;
@@ -635,14 +650,35 @@
       Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
   output_frame->SetFrameSlot(output_frame_offset, value);
 
+  int caller_arg_count = 0;
+  if (descriptor->stack_parameter_count_ != NULL) {
+    caller_arg_count =
+        input_->GetRegister(descriptor->stack_parameter_count_->code());
+  }
+
+  // Build the Arguments object for the caller's parameters and a pointer to it.
+  output_frame_offset -= kPointerSize;
+  value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
+      (caller_arg_count - 1) * kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  output_frame->SetFrameSlot(output_frame_offset, value);
+  output_frame_offset -= kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, caller_arg_count);
+
+  value = frame_ptr - (output_frame_size - output_frame_offset) -
+      StandardFrameConstants::kMarkerOffset;
+  output_frame_offset -= kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  // Copy the register parameters to the failure frame.
   for (int i = 0; i < descriptor->register_param_count_; ++i) {
     output_frame_offset -= kPointerSize;
     DoTranslateCommand(iterator, 0, output_frame_offset);
   }
 
-  value = input_->GetRegister(ebp.code());
-  output_frame->SetRegister(ebp.code(), value);
-  output_frame->SetFp(value);
+  output_frame->SetRegister(ebp.code(), frame_ptr);
+  output_frame->SetFp(frame_ptr);
 
   for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
     double double_value = input_->GetDoubleRegister(i);
@@ -651,7 +687,11 @@
 
   intptr_t handler =
       reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
-  output_frame->SetRegister(eax.code(), descriptor->register_param_count_);
+  int params = descriptor->register_param_count_;
+  if (descriptor->stack_parameter_count_ != NULL) {
+    params++;
+  }
+  output_frame->SetRegister(eax.code(), params);
   output_frame->SetRegister(ebx.code(), handler);
 }
 
@@ -663,7 +703,7 @@
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating construct stub => height=%d\n", height_in_bytes);
   }
 
@@ -698,7 +738,7 @@
   output_offset -= kPointerSize;
   intptr_t callers_pc = output_[frame_index - 1]->GetPc();
   output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
            top_address + output_offset, output_offset, callers_pc);
   }
@@ -709,7 +749,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
            fp_value, output_offset, value);
   }
@@ -718,7 +758,7 @@
   output_offset -= kPointerSize;
   value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
            top_address + output_offset, output_offset, value);
   }
@@ -727,7 +767,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
            top_address + output_offset, output_offset, value);
   }
@@ -736,7 +776,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(construct_stub);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; code object\n",
            top_address + output_offset, output_offset, value);
   }
@@ -745,7 +785,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
            top_address + output_offset, output_offset, value, height - 1);
   }
@@ -755,7 +795,7 @@
   output_offset -= kPointerSize;
   value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
            top_address + output_offset, output_offset, value);
   }
@@ -779,7 +819,7 @@
   unsigned height = 0;
   unsigned height_in_bytes = height * kPointerSize;
   const char* kind = is_setter_stub_frame ? "setter" : "getter";
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating %s stub => height=%u\n", kind, height_in_bytes);
   }
 
@@ -813,7 +853,7 @@
   output_offset -= kPointerSize;
   intptr_t callers_pc = output_[frame_index - 1]->GetPc();
   output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; caller's pc\n",
            top_address + output_offset, output_offset, callers_pc);
@@ -825,7 +865,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; caller's fp\n",
            fp_value, output_offset, value);
@@ -835,7 +875,7 @@
   output_offset -= kPointerSize;
   value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; context\n",
            top_address + output_offset, output_offset, value);
@@ -845,7 +885,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; function (%s sentinel)\n",
            top_address + output_offset, output_offset, value, kind);
@@ -859,7 +899,7 @@
   Code* accessor_stub = isolate_->builtins()->builtin(name);
   value = reinterpret_cast<intptr_t>(accessor_stub);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; code object\n",
            top_address + output_offset, output_offset, value);
@@ -902,7 +942,7 @@
   }
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating ");
     function->PrintName();
     PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@@ -980,7 +1020,7 @@
     value = output_[frame_index - 1]->GetPc();
   }
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
            top_address + output_offset, output_offset, value);
   }
@@ -1003,7 +1043,7 @@
     fp_value);
   output_frame->SetFp(fp_value);
   if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
            fp_value, output_offset, value);
   }
@@ -1023,7 +1063,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(esi.code(), value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; context\n",
            top_address + output_offset, output_offset, value);
   }
@@ -1036,7 +1076,7 @@
   // input frame.
   ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08x: [top + %d] <- 0x%08x ; function\n",
            top_address + output_offset, output_offset, value);
   }
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 18915e2..5bd102a 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -97,22 +97,6 @@
 };
 
 
-class StandardFrameConstants : public AllStatic {
- public:
-  // Fixed part of the frame consists of return address, caller fp,
-  // context and function.
-  // StandardFrame::IterateExpressions assumes that kContextOffset is the last
-  // object pointer.
-  static const int kFixedFrameSize    =  4 * kPointerSize;
-  static const int kExpressionsOffset = -3 * kPointerSize;
-  static const int kMarkerOffset      = -2 * kPointerSize;
-  static const int kContextOffset     = -1 * kPointerSize;
-  static const int kCallerFPOffset    =  0 * kPointerSize;
-  static const int kCallerPCOffset    = +1 * kPointerSize;
-  static const int kCallerSPOffset    = +2 * kPointerSize;
-};
-
-
 class JavaScriptFrameConstants : public AllStatic {
  public:
   // FP-relative.
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 42ae13f..37d8390 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -2346,7 +2346,7 @@
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   LParameter* result = new(zone()) LParameter;
-  if (info()->IsOptimizing()) {
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
     int spill_index = chunk()->GetParameterStackSlot(instr->index());
     return DefineAsSpilled(result, spill_index);
   } else {
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 704f8de..96dc46b 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -2368,11 +2368,11 @@
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> function,
-                                                   Handle<String> name,
-                                                   CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+                                              Handle<JSObject> holder,
+                                              Handle<String> name,
+                                              CheckType check,
+                                              Label* success) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
   //  -- esp[0]              : return address
@@ -2380,15 +2380,6 @@
   //  -- ...
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
-
-  if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder,
-                                          Handle<JSGlobalPropertyCell>::null(),
-                                          function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
-  }
-
   Label miss;
   GenerateNameCheck(name, &miss);
 
@@ -2421,76 +2412,87 @@
       break;
 
     case STRING_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        // Check that the object is a string or a symbol.
-        __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
-        __ j(above_equal, &miss);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            eax, holder, ebx, edx, edi, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+      // Check that the object is a string or a symbol.
+      __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
+      __ j(above_equal, &miss);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          eax, holder, ebx, edx, edi, name, &miss);
       break;
 
-    case NUMBER_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        Label fast;
-        // Check that the object is a smi or a heap number.
-        __ JumpIfSmi(edx, &fast);
-        __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
-        __ j(not_equal, &miss);
-        __ bind(&fast);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            eax, holder, ebx, edx, edi, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+    case NUMBER_CHECK: {
+      Label fast;
+      // Check that the object is a smi or a heap number.
+      __ JumpIfSmi(edx, &fast);
+      __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
+      __ j(not_equal, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          eax, holder, ebx, edx, edi, name, &miss);
       break;
-
-    case BOOLEAN_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        Label fast;
-        // Check that the object is a boolean.
-        __ cmp(edx, factory()->true_value());
-        __ j(equal, &fast);
-        __ cmp(edx, factory()->false_value());
-        __ j(not_equal, &miss);
-        __ bind(&fast);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            eax, holder, ebx, edx, edi, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+    }
+    case BOOLEAN_CHECK: {
+      Label fast;
+      // Check that the object is a boolean.
+      __ cmp(edx, factory()->true_value());
+      __ j(equal, &fast);
+      __ cmp(edx, factory()->false_value());
+      __ j(not_equal, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          eax, holder, ebx, edx, edi, name, &miss);
       break;
+    }
   }
 
+  __ jmp(success);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
                     NullCallWrapper(), call_kind);
+}
 
-  // Handle call cache miss.
-  __ bind(&miss);
-  GenerateMissBranch();
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<String> name,
+    CheckType check,
+    Handle<JSFunction> function) {
+
+  if (HasCustomCallGenerator(function)) {
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
+  }
+
+  Label success;
+
+  CompileHandlerFrontend(object, holder, name, check, &success);
+  __ bind(&success);
+  CompileHandlerBackend(function);
 
   // Return the generated code.
   return GetCode(function);
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index 97332c5..e2fca5b 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -562,6 +562,7 @@
 
 
 void IncrementalMarking::Start() {
+  ASSERT(!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress());
   if (FLAG_trace_incremental_marking) {
     PrintF("[IncrementalMarking] Start\n");
   }
@@ -570,8 +571,7 @@
 
   ResetStepCounters();
 
-  if (heap_->old_pointer_space()->IsSweepingComplete() &&
-      heap_->old_data_space()->IsSweepingComplete()) {
+  if (heap_->IsSweepingComplete()) {
     StartMarking(ALLOW_COMPACTION);
   } else {
     if (FLAG_trace_incremental_marking) {
diff --git a/src/isolate.cc b/src/isolate.cc
index d70e19e..c8d9c3a 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -2120,7 +2120,7 @@
     // Ensure that the stub failure trampoline has been generated.
     HandleScope scope(this);
     CodeStub::GenerateFPStubs();
-    StubFailureTrampolineStub().GetCode();
+    StubFailureTrampolineStub::GenerateAheadOfTime();
   }
 
   if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
diff --git a/src/jsregexp-inl.h b/src/jsregexp-inl.h
new file mode 100644
index 0000000..3ef07d8
--- /dev/null
+++ b/src/jsregexp-inl.h
@@ -0,0 +1,106 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_JSREGEXP_INL_H_
+#define V8_JSREGEXP_INL_H_
+
+#include "allocation.h"
+#include "handles.h"
+#include "heap.h"
+#include "jsregexp.h"
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+RegExpImpl::GlobalCache::~GlobalCache() {
+  // Deallocate the register array if we allocated it in the constructor
+  // (as opposed to using the existing jsregexp_static_offsets_vector).
+  if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+    DeleteArray(register_array_);
+  }
+}
+
+
+int32_t* RegExpImpl::GlobalCache::FetchNext() {
+  current_match_index_++;
+  if (current_match_index_ >= num_matches_) {
+    // Current batch of results exhausted.
+    // Fail if last batch was not even fully filled.
+    if (num_matches_ < max_matches_) {
+      num_matches_ = 0;  // Signal failed match.
+      return NULL;
+    }
+
+    int32_t* last_match =
+        &register_array_[(current_match_index_ - 1) * registers_per_match_];
+    int last_end_index = last_match[1];
+
+    if (regexp_->TypeTag() == JSRegExp::ATOM) {
+      num_matches_ = RegExpImpl::AtomExecRaw(regexp_,
+                                             subject_,
+                                             last_end_index,
+                                             register_array_,
+                                             register_array_size_);
+    } else {
+      int last_start_index = last_match[0];
+      if (last_start_index == last_end_index) last_end_index++;
+      if (last_end_index > subject_->length()) {
+        num_matches_ = 0;  // Signal failed match.
+        return NULL;
+      }
+      num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
+                                                 subject_,
+                                                 last_end_index,
+                                                 register_array_,
+                                                 register_array_size_);
+    }
+
+    if (num_matches_ <= 0) return NULL;
+    current_match_index_ = 0;
+    return register_array_;
+  } else {
+    return &register_array_[current_match_index_ * registers_per_match_];
+  }
+}
+
+
+int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
+  int index = current_match_index_ * registers_per_match_;
+  if (num_matches_ == 0) {
+    // After a failed match we shift back by one result.
+    index -= registers_per_match_;
+  }
+  return &register_array_[index];
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_JSREGEXP_INL_H_
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index a33df6f..8928d4e 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -32,6 +32,7 @@
 #include "execution.h"
 #include "factory.h"
 #include "jsregexp.h"
+#include "jsregexp-inl.h"
 #include "platform.h"
 #include "string-search.h"
 #include "runtime.h"
@@ -760,68 +761,6 @@
 }
 
 
-RegExpImpl::GlobalCache::~GlobalCache() {
-  // Deallocate the register array if we allocated it in the constructor
-  // (as opposed to using the existing jsregexp_static_offsets_vector).
-  if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
-    DeleteArray(register_array_);
-  }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::FetchNext() {
-  current_match_index_++;
-  if (current_match_index_ >= num_matches_) {
-    // Current batch of results exhausted.
-    // Fail if last batch was not even fully filled.
-    if (num_matches_ < max_matches_) {
-      num_matches_ = 0;  // Signal failed match.
-      return NULL;
-    }
-
-    int32_t* last_match =
-        &register_array_[(current_match_index_ - 1) * registers_per_match_];
-    int last_end_index = last_match[1];
-
-    if (regexp_->TypeTag() == JSRegExp::ATOM) {
-      num_matches_ = RegExpImpl::AtomExecRaw(regexp_,
-                                             subject_,
-                                             last_end_index,
-                                             register_array_,
-                                             register_array_size_);
-    } else {
-      int last_start_index = last_match[0];
-      if (last_start_index == last_end_index) last_end_index++;
-      if (last_end_index > subject_->length()) {
-        num_matches_ = 0;  // Signal failed match.
-        return NULL;
-      }
-      num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
-                                                 subject_,
-                                                 last_end_index,
-                                                 register_array_,
-                                                 register_array_size_);
-    }
-
-    if (num_matches_ <= 0) return NULL;
-    current_match_index_ = 0;
-    return register_array_;
-  } else {
-    return &register_array_[current_match_index_ * registers_per_match_];
-  }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
-  int index = current_match_index_ * registers_per_match_;
-  if (num_matches_ == 0) {
-    // After a failed match we shift back by one result.
-    index -= registers_per_match_;
-  }
-  return &register_array_[index];
-}
-
-
 // -------------------------------------------------------------------
 // Implementation of the Irregexp regular expression engine.
 //
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 7e83d42..ae0e902 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -153,17 +153,17 @@
                 bool is_global,
                 Isolate* isolate);
 
-    ~GlobalCache();
+    INLINE(~GlobalCache());
 
     // Fetch the next entry in the cache for global regexp match results.
     // This does not set the last match info.  Upon failure, NULL is returned.
     // The cause can be checked with Result().  The previous
     // result is still in available in memory when a failure happens.
-    int32_t* FetchNext();
+    INLINE(int32_t* FetchNext());
 
-    int32_t* LastSuccessfulMatch();
+    INLINE(int32_t* LastSuccessfulMatch());
 
-    inline bool HasException() { return num_matches_ < 0; }
+    INLINE(bool HasException()) { return num_matches_ < 0; }
 
    private:
     int num_matches_;
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 3c6ab30..869b318 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -561,6 +561,11 @@
 }
 
 
+bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
+  return SweeperThread::sweeping_pending();
+}
+
+
 void MarkCompactCollector::MarkInParallel() {
   for (int i = 0; i < FLAG_marking_threads; i++) {
     heap()->isolate()->marking_threads()[i]->StartMarking();
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 9159b40..2613a3e 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -692,6 +692,8 @@
 
   bool AreSweeperThreadsActivated();
 
+  bool IsConcurrentSweepingInProgress();
+
   // Parallel marking support.
   void MarkInParallel();
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 620dce9..0f205c1 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -2430,25 +2430,16 @@
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> function,
-                                                   Handle<String> name,
-                                                   CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+                                              Handle<JSObject> holder,
+                                              Handle<String> name,
+                                              CheckType check,
+                                              Label* success) {
   // ----------- S t a t e -------------
   //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
-  if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder,
-                                          Handle<JSGlobalPropertyCell>::null(),
-                                          function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
-  }
-
   Label miss;
-
   GenerateNameCheck(name, &miss);
 
   // Get the receiver from the stack.
@@ -2481,77 +2472,87 @@
       break;
 
     case STRING_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        // Check that the object is a two-byte string or a symbol.
-        __ GetObjectType(a1, a3, a3);
-        __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            a0, holder, a3, a1, t0, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+      // Check that the object is a two-byte string or a symbol.
+      __ GetObjectType(a1, a3, a3);
+      __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          a0, holder, a3, a1, t0, name, &miss);
       break;
 
-    case NUMBER_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
+    case NUMBER_CHECK: {
       Label fast;
-        // Check that the object is a smi or a heap number.
-        __ JumpIfSmi(a1, &fast);
-        __ GetObjectType(a1, a0, a0);
-        __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
-        __ bind(&fast);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            a0, holder, a3, a1, t0, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
-      break;
-
-    case BOOLEAN_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        Label fast;
-        // Check that the object is a boolean.
-        __ LoadRoot(t0, Heap::kTrueValueRootIndex);
-        __ Branch(&fast, eq, a1, Operand(t0));
-        __ LoadRoot(t0, Heap::kFalseValueRootIndex);
-        __ Branch(&miss, ne, a1, Operand(t0));
-        __ bind(&fast);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            a0, holder, a3, a1, t0, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+      // Check that the object is a smi or a heap number.
+      __ JumpIfSmi(a1, &fast);
+      __ GetObjectType(a1, a0, a0);
+      __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          a0, holder, a3, a1, t0, name, &miss);
       break;
     }
+    case BOOLEAN_CHECK: {
+      Label fast;
+      // Check that the object is a boolean.
+      __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+      __ Branch(&fast, eq, a1, Operand(t0));
+      __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+      __ Branch(&miss, ne, a1, Operand(t0));
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          a0, holder, a3, a1, t0, name, &miss);
+      break;
+    }
+  }
 
-  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
-      ? CALL_AS_FUNCTION
-      : CALL_AS_METHOD;
-  __ InvokeFunction(
-      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
+  __ jmp(success);
 
   // Handle call cache miss.
   __ bind(&miss);
 
   GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
+  CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
+      ? CALL_AS_FUNCTION
+      : CALL_AS_METHOD;
+  __ InvokeFunction(
+      function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
+
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<String> name,
+    CheckType check,
+    Handle<JSFunction> function) {
+  if (HasCustomCallGenerator(function)) {
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
+  }
+
+  Label success;
+
+  CompileHandlerFrontend(object, holder, name, check, &success);
+  __ bind(&success);
+  CompileHandlerBackend(function);
 
   // Return the generated code.
   return GetCode(function);
diff --git a/src/object-observe.js b/src/object-observe.js
index d0a84f6..b35f547 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -29,33 +29,38 @@
 
 var observationState = %GetObservationState();
 if (IS_UNDEFINED(observationState.observerInfoMap)) {
-  observationState.observerInfoMap = %CreateObjectHashTable();
-  observationState.objectInfoMap = %CreateObjectHashTable();
-  observationState.notifierTargetMap = %CreateObjectHashTable();
+  observationState.observerInfoMap = %ObservationWeakMapCreate();
+  observationState.objectInfoMap = %ObservationWeakMapCreate();
+  observationState.notifierTargetMap = %ObservationWeakMapCreate();
   observationState.pendingObservers = new InternalArray;
   observationState.observerPriority = 0;
 }
 
-function InternalObjectHashTable(tableName) {
-  this.tableName = tableName;
+function ObservationWeakMap(map) {
+  this.map_ = map;
 }
 
-InternalObjectHashTable.prototype = {
+ObservationWeakMap.prototype = {
   get: function(key) {
-    return %ObjectHashTableGet(observationState[this.tableName], key);
+    key = %UnwrapGlobalProxy(key);
+    if (!IS_SPEC_OBJECT(key)) return void 0;
+    return %WeakMapGet(this.map_, key);
   },
   set: function(key, value) {
-    observationState[this.tableName] =
-        %ObjectHashTableSet(observationState[this.tableName], key, value);
+    key = %UnwrapGlobalProxy(key);
+    if (!IS_SPEC_OBJECT(key)) return void 0;
+    %WeakMapSet(this.map_, key, value);
   },
   has: function(key) {
     return !IS_UNDEFINED(this.get(key));
   }
 };
 
-var observerInfoMap = new InternalObjectHashTable('observerInfoMap');
-var objectInfoMap = new InternalObjectHashTable('objectInfoMap');
-var notifierTargetMap = new InternalObjectHashTable('notifierTargetMap');
+var observerInfoMap =
+    new ObservationWeakMap(observationState.observerInfoMap);
+var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
+var notifierTargetMap =
+    new ObservationWeakMap(observationState.notifierTargetMap);
 
 function CreateObjectInfo(object) {
   var info = {
diff --git a/src/objects.cc b/src/objects.cc
index 948f4a6..4653931 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -9344,6 +9344,25 @@
 }
 
 
+// Returns false if the passed-in index is marked non-configurable,
+// which will cause the ES5 truncation operation to halt, and thus
+// no further old values need be collected.
+static bool GetOldValue(Isolate* isolate,
+                        Handle<JSObject> object,
+                        uint32_t index,
+                        List<Handle<Object> >* old_values,
+                        List<Handle<String> >* indices) {
+  PropertyAttributes attributes = object->GetLocalElementAttribute(index);
+  ASSERT(attributes != ABSENT);
+  if (attributes == DONT_DELETE) return false;
+  old_values->Add(object->GetLocalElementAccessorPair(index) == NULL
+      ? Object::GetElement(object, index)
+      : Handle<Object>::cast(isolate->factory()->the_hole_value()));
+  indices->Add(isolate->factory()->Uint32ToString(index));
+  return true;
+}
+
+
 MaybeObject* JSArray::SetElementsLength(Object* len) {
   // We should never end in here with a pixel or external array.
   ASSERT(AllowsSetElementsLength());
@@ -9363,19 +9382,28 @@
   if (!new_length_handle->ToArrayIndex(&new_length))
     return Failure::InternalError();
 
-  // TODO(adamk): This loop can be very slow for arrays in dictionary mode.
-  // Find another way to iterate over arrays with dictionary elements.
-  for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
-    PropertyAttributes attributes = self->GetLocalElementAttribute(i);
-    if (attributes == ABSENT) continue;
-    // A non-configurable property will cause the truncation operation to
-    // stop at this index.
-    if (attributes == DONT_DELETE) break;
-    old_values.Add(
-        self->GetLocalElementAccessorPair(i) == NULL
-        ? Object::GetElement(self, i)
-        : Handle<Object>::cast(isolate->factory()->the_hole_value()));
-    indices.Add(isolate->factory()->Uint32ToString(i));
+  // Observed arrays should always be in dictionary mode;
+  // if they were in fast mode, the below is slower than necessary
+  // as it iterates over the array backing store multiple times.
+  ASSERT(self->HasDictionaryElements());
+  static const PropertyAttributes kNoAttrFilter = NONE;
+  int num_elements = self->NumberOfLocalElements(kNoAttrFilter);
+  if (num_elements > 0) {
+    if (old_length == static_cast<uint32_t>(num_elements)) {
+      // Simple case for arrays without holes.
+      for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
+        if (!GetOldValue(isolate, self, i, &old_values, &indices)) break;
+      }
+    } else {
+      // For sparse arrays, only iterate over existing elements.
+      Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
+      self->GetLocalElementKeys(*keys, kNoAttrFilter);
+      while (num_elements-- > 0) {
+        uint32_t index = NumberToUint32(keys->get(num_elements));
+        if (index < new_length) break;
+        if (!GetOldValue(isolate, self, index, &old_values, &indices)) break;
+      }
+    }
   }
 
   MaybeObject* result =
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 0f0143a..e7e6dc7 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -27,6 +27,17 @@
 
 // Platform specific code for Win32.
 
+// Secure API functions are not available using MinGW with msvcrt.dll
+// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
+// disable definition of secure API functions in standard headers that
+// would conflict with our own implementation.
+#ifdef __MINGW32__
+#include <_mingw.h>
+#ifdef MINGW_HAS_SECURE_API
+#undef MINGW_HAS_SECURE_API
+#endif  // MINGW_HAS_SECURE_API
+#endif  // __MINGW32__
+
 #define V8_WIN32_HEADERS_FULL
 #include "win32-headers.h"
 
@@ -65,8 +76,6 @@
 #endif  // __MINGW64_VERSION_MAJOR
 
 
-#ifndef MINGW_HAS_SECURE_API
-
 int localtime_s(tm* out_tm, const time_t* time) {
   tm* posix_local_time_struct = localtime(time);
   if (posix_local_time_struct == NULL) return 1;
@@ -113,8 +122,6 @@
   return 0;
 }
 
-#endif  // MINGW_HAS_SECURE_API
-
 #endif  // __MINGW32__
 
 // Generate a pseudo-random number in the range 0-2^31-1. Usually
@@ -202,7 +209,7 @@
 UNARY_MATH_FUNCTION(exp, CreateExpFunction())
 UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
 
-#undef MATH_FUNCTION
+#undef UNARY_MATH_FUNCTION
 
 
 void lazily_initialize_fast_exp() {
@@ -812,6 +819,9 @@
 }
 
 
+#undef _TRUNCATE
+#undef STRUNCATE
+
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
 // and verification).  The estimate is conservative, i.e., not all addresses in
@@ -1218,6 +1228,11 @@
   // application is closed.
 }
 
+#undef DBGHELP_FUNCTION_LIST
+#undef TLHELP32_FUNCTION_LIST
+#undef DLL_FUNC_VAR
+#undef DLL_FUNC_TYPE
+
 
 // Load the symbols for generating stack traces.
 static bool LoadSymbols(HANDLE process_handle) {
diff --git a/src/runtime.cc b/src/runtime.cc
index a86aa40..f808d10 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -45,6 +45,7 @@
 #include "global-handles.h"
 #include "isolate-inl.h"
 #include "jsregexp.h"
+#include "jsregexp-inl.h"
 #include "json-parser.h"
 #include "json-stringifier.h"
 #include "liveedit.h"
@@ -78,6 +79,9 @@
   RUNTIME_ASSERT(args[index]->Is##Type());                           \
   Handle<Type> name = args.at<Type>(index);
 
+#define CONVERT_ARG_STUB_CALLER_ARGS(name)                           \
+  Arguments* name = reinterpret_cast<Arguments*>(args[0]);
+
 // Cast the given object to a boolean and store it in a variable with
 // the given name.  If the object is not a boolean call IllegalOperation
 // and return.
@@ -863,10 +867,8 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+static JSWeakMap* WeakMapInitialize(Isolate* isolate,
+                                    Handle<JSWeakMap> weakmap) {
   ASSERT(weakmap->map()->inobject_properties() == 0);
   Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
   weakmap->set_table(*table);
@@ -875,6 +877,14 @@
 }
 
 
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+  return WeakMapInitialize(isolate, weakmap);
+}
+
+
 RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 2);
@@ -13414,37 +13424,28 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectHashTable) {
-  ASSERT(args.length() == 0);
-  return ObjectHashTable::Allocate(0);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableGet) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(ObjectHashTable, table, 0);
-  Object* key = args[1];
-  if (key->IsJSGlobalProxy()) {
-    key = key->GetPrototype();
-    if (key->IsNull()) return isolate->heap()->undefined_value();
-  }
-  Object* lookup = table->Lookup(key);
-  return lookup->IsTheHole() ? isolate->heap()->undefined_value() : lookup;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableSet) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ObservationWeakMapCreate) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(ObjectHashTable, table, 0);
-  Handle<Object> key = args.at<Object>(1);
-  if (key->IsJSGlobalProxy()) {
-    key = handle(key->GetPrototype(), isolate);
-    if (key->IsNull()) return *table;
+  ASSERT(args.length() == 0);
+  // TODO(adamk): Currently this runtime function is only called three times per
+  // isolate. If it's called more often, the map should be moved into the
+  // strong root list.
+  Handle<Map> map =
+      isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
+  Handle<JSWeakMap> weakmap =
+      Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map));
+  return WeakMapInitialize(isolate, weakmap);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) {
+  ASSERT(args.length() == 1);
+  Object* object = args[0];
+  if (object->IsJSGlobalProxy()) {
+    object = object->GetPrototype();
+    if (object->IsNull()) return isolate->heap()->undefined_value();
   }
-  Handle<Object> value = args.at<Object>(2);
-  return *PutIntoObjectHashTable(table, key, value);
+  return object;
 }
 
 
diff --git a/src/runtime.h b/src/runtime.h
index ea2aac0..2625c69 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -333,9 +333,8 @@
   F(SetIsObserved, 2, 1) \
   F(SetObserverDeliveryPending, 0, 1) \
   F(GetObservationState, 0, 1) \
-  F(CreateObjectHashTable, 0, 1) \
-  F(ObjectHashTableGet, 2, 1) \
-  F(ObjectHashTableSet, 3, 1) \
+  F(ObservationWeakMapCreate, 0, 1) \
+  F(UnwrapGlobalProxy, 1, 1) \
   \
   /* Statements */ \
   F(NewClosure, 3, 1) \
diff --git a/src/spaces.cc b/src/spaces.cc
index 711cde1..01e1e30 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -2525,7 +2525,7 @@
 
 
 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
-  if (IsSweepingComplete()) return true;
+  if (IsLazySweepingComplete()) return true;
 
   intptr_t freed_bytes = 0;
   Page* p = first_unswept_page_;
@@ -2553,7 +2553,7 @@
 
   heap()->FreeQueuedChunks();
 
-  return IsSweepingComplete();
+  return IsLazySweepingComplete();
 }
 
 
@@ -2575,12 +2575,14 @@
 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
   MarkCompactCollector* collector = heap()->mark_compact_collector();
   if (collector->AreSweeperThreadsActivated()) {
-    if (FLAG_concurrent_sweeping &&
-        collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
-      collector->WaitUntilSweepingCompleted();
-      return true;
+    if (FLAG_concurrent_sweeping) {
+      if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
+        collector->WaitUntilSweepingCompleted();
+        return true;
+      }
+      return false;
     }
-    return false;
+    return true;
   } else {
     return AdvanceSweeper(size_in_bytes);
   }
@@ -2618,7 +2620,7 @@
 
   // Last ditch, sweep all the remaining pages to try to find space.  This may
   // cause a pause.
-  if (!IsSweepingComplete()) {
+  if (!IsLazySweepingComplete()) {
     EnsureSweeperProgress(kMaxInt);
 
     // Retry the free list allocation.
diff --git a/src/spaces.h b/src/spaces.h
index 07daacf..ed68000 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -1644,7 +1644,8 @@
   // As size, but the bytes in lazily swept pages are estimated and the bytes
   // in the current linear allocation area are not included.
   virtual intptr_t SizeOfObjects() {
-    ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
+    // TODO(hpayer): broken when concurrent sweeping turned on
+    ASSERT(!IsLazySweepingComplete() || (unswept_free_bytes_ == 0));
     return Size() - unswept_free_bytes_ - (limit() - top());
   }
 
@@ -1763,7 +1764,7 @@
   // is called.
   bool EnsureSweeperProgress(intptr_t size_in_bytes);
 
-  bool IsSweepingComplete() {
+  bool IsLazySweepingComplete() {
     return !first_unswept_page_->is_valid();
   }
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 08ca3f6..06b4782 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -578,6 +578,14 @@
     check = BOOLEAN_CHECK;
   }
 
+  if (check != RECEIVER_MAP_CHECK &&
+      !function->IsBuiltin() &&
+      function->shared()->is_classic_mode()) {
+    // Calling non-strict non-builtins with a value as the receiver
+    // requires boxing.
+    return Handle<Code>::null();
+  }
+
   Code::Flags flags =
       Code::ComputeMonomorphicFlags(kind, Code::CONSTANT_FUNCTION, extra_state,
                                     cache_holder, argc);
@@ -587,7 +595,7 @@
 
   CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
   Handle<Code> code =
-      compiler.CompileCallConstant(object, holder, function, name, check);
+      compiler.CompileCallConstant(object, holder, name, check, function);
   code->set_check_type(check);
   ASSERT_EQ(flags, code->flags());
   PROFILE(isolate_,
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 3a98f54..740a7c8 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -817,11 +817,19 @@
                                 PropertyIndex index,
                                 Handle<String> name);
 
+  void CompileHandlerFrontend(Handle<Object> object,
+                              Handle<JSObject> holder,
+                              Handle<String> name,
+                              CheckType check,
+                              Label* success);
+
+  void CompileHandlerBackend(Handle<JSFunction> function);
+
   Handle<Code> CompileCallConstant(Handle<Object> object,
                                    Handle<JSObject> holder,
-                                   Handle<JSFunction> function,
                                    Handle<String> name,
-                                   CheckType check);
+                                   CheckType check,
+                                   Handle<JSFunction> function);
 
   Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
                                       Handle<JSObject> holder,
diff --git a/src/version.cc b/src/version.cc
index 999e3de..ba8c46a 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     16
-#define BUILD_NUMBER      13
+#define BUILD_NUMBER      14
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index c5d1849..0ac0862 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1505,13 +1505,12 @@
 void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
   // Non-relocatable values might not need a 64-bit representation.
   if (RelocInfo::IsNone(rmode)) {
-    // Sadly, there is no zero or sign extending move for 8-bit immediates.
-    if (is_int32(value)) {
-      movq(dst, Immediate(static_cast<int32_t>(value)));
-      return;
-    } else if (is_uint32(value)) {
+    if (is_uint32(value)) {
       movl(dst, Immediate(static_cast<int32_t>(value)));
       return;
+    } else if (is_int32(value)) {
+      movq(dst, Immediate(static_cast<int32_t>(value)));
+      return;
     }
     // Value cannot be represented by 32 bits, so do a full 64 bit immediate
     // value.
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index e4d401f..095113c 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -45,6 +45,7 @@
   static Register registers[] = { rdx, rax };
   descriptor->register_param_count_ = 2;
   descriptor->register_params_ = registers;
+  descriptor->stack_parameter_count_ = NULL;
   descriptor->deoptimization_handler_ =
       FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
 }
@@ -6635,8 +6636,14 @@
   ASSERT(!Serializer::enabled());
   CEntryStub ces(1, kSaveFPRegs);
   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+  int parameter_count_offset =
+      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+  __ movq(rbx, MemOperand(rbp, parameter_count_offset));
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
-  __ ret(0);  // Return to IC Miss stub, continuation still on stack.
+  __ pop(rcx);
+  __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size,
+                         extra_expression_stack_count_ * kPointerSize));
+  __ jmp(rcx);  // Return to IC Miss stub, continuation still on stack.
 }
 
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 5d1a6d2..4d199c5 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -352,7 +352,7 @@
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating arguments adaptor => height=%d\n", height_in_bytes);
   }
 
@@ -387,7 +387,7 @@
   output_offset -= kPointerSize;
   intptr_t callers_pc = output_[frame_index - 1]->GetPc();
   output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; caller's pc\n",
            top_address + output_offset, output_offset, callers_pc);
@@ -399,7 +399,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; caller's fp\n",
            fp_value, output_offset, value);
@@ -410,7 +410,7 @@
   intptr_t context = reinterpret_cast<intptr_t>(
       Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   output_frame->SetFrameSlot(output_offset, context);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; context (adaptor sentinel)\n",
            top_address + output_offset, output_offset, context);
@@ -420,7 +420,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(function);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; function\n",
            top_address + output_offset, output_offset, value);
@@ -430,7 +430,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; argc (%d)\n",
            top_address + output_offset, output_offset, value, height - 1);
@@ -462,13 +462,19 @@
   // v  +-------------------------+          +-------------------------|
   //    |   COMPILED_STUB marker  |          |   STUB_FAILURE marker   |
   //    +-------------------------+          +-------------------------+
-  //    |                         |          |     stub parameter 1    |
+  //    |                         |          |   caller args.length_   |
   //    | ...                     |          +-------------------------+
-  //    |                         |          |            ...          |
+  //    |                         |          |  caller args.arguments_ |
   //    |-------------------------|<-rsp     +-------------------------+
-  //                                         |     stub parameter n    |
-  //      parameters in registers            +-------------------------+<-rsp
-  //       and spilled to stack              rax = number of parameters
+  //                                         |  caller args pointer    |
+  //                                         +-------------------------+
+  //                                         |  caller stack param 1   |
+  //      parameters in registers            +-------------------------+
+  //       and spilled to stack              |           ....          |
+  //                                         +-------------------------+
+  //                                         |  caller stack param n   |
+  //                                         +-------------------------+<-rsp
+  //                                         rax = number of parameters
   //                                         rbx = failure handler address
   //                                         rbp = saved frame
   //                                         rsi = JSFunction context
@@ -479,9 +485,15 @@
   CodeStubInterfaceDescriptor* descriptor =
       isolate_->code_stub_interface_descriptor(major_key);
 
+  // The output frame must have room for all pushed register parameters
+  // and the standard stack frame slots.
   int output_frame_size = StandardFrameConstants::kFixedFrameSize +
       kPointerSize * descriptor->register_param_count_;
 
+  // Include space for an argument object to the callee and optionally
+  // the space to pass the argument object to the stub failure handler.
+  output_frame_size += sizeof(Arguments) + kPointerSize;
+
   FrameDescription* output_frame =
       new(output_frame_size) FrameDescription(output_frame_size, 0);
   ASSERT(frame_index == 0);
@@ -493,12 +505,15 @@
       reinterpret_cast<intptr_t>(notify_failure->entry()));
 
   Code* trampoline = NULL;
-  StubFailureTrampolineStub().FindCodeInCache(&trampoline, isolate_);
+  int extra = descriptor->extra_expression_stack_count_;
+  StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
   ASSERT(trampoline != NULL);
   output_frame->SetPc(reinterpret_cast<intptr_t>(
       trampoline->instruction_start()));
   unsigned input_frame_size = input_->GetFrameSize();
 
+  intptr_t frame_ptr = input_->GetRegister(rbp.code());
+
   // JSFunction continuation
   unsigned input_frame_offset = input_frame_size - kPointerSize;
   unsigned output_frame_offset = output_frame_size - kPointerSize;
@@ -524,14 +539,35 @@
       Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
   output_frame->SetFrameSlot(output_frame_offset, value);
 
+  intptr_t caller_arg_count = 0;
+  if (descriptor->stack_parameter_count_ != NULL) {
+    caller_arg_count =
+        input_->GetRegister(descriptor->stack_parameter_count_->code());
+  }
+
+  // Build the Arguments object for the caller's parameters and a pointer to it.
+  output_frame_offset -= kPointerSize;
+  value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
+      (caller_arg_count - 1) * kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  output_frame->SetFrameSlot(output_frame_offset, value);
+  output_frame_offset -= kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, caller_arg_count);
+
+  value = frame_ptr - (output_frame_size - output_frame_offset) -
+      StandardFrameConstants::kMarkerOffset;
+  output_frame_offset -= kPointerSize;
+  output_frame->SetFrameSlot(output_frame_offset, value);
+
+  // Copy the register parameters to the failure frame.
   for (int i = 0; i < descriptor->register_param_count_; ++i) {
     output_frame_offset -= kPointerSize;
     DoTranslateCommand(iterator, 0, output_frame_offset);
   }
 
-  value = input_->GetRegister(rbp.code());
-  output_frame->SetRegister(rbp.code(), value);
-  output_frame->SetFp(value);
+  output_frame->SetRegister(rbp.code(), frame_ptr);
+  output_frame->SetFp(frame_ptr);
 
   for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
     double double_value = input_->GetDoubleRegister(i);
@@ -540,7 +576,11 @@
 
   intptr_t handler =
       reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
-  output_frame->SetRegister(rax.code(), descriptor->register_param_count_);
+  int params = descriptor->register_param_count_;
+  if (descriptor->stack_parameter_count_ != NULL) {
+    params++;
+  }
+  output_frame->SetRegister(rax.code(), params);
   output_frame->SetRegister(rbx.code(), handler);
 }
 
@@ -552,7 +592,7 @@
   JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating construct stub => height=%d\n", height_in_bytes);
   }
 
@@ -587,7 +627,7 @@
   output_offset -= kPointerSize;
   intptr_t callers_pc = output_[frame_index - 1]->GetPc();
   output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; caller's pc\n",
            top_address + output_offset, output_offset, callers_pc);
@@ -599,7 +639,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; caller's fp\n",
            fp_value, output_offset, value);
@@ -609,7 +649,7 @@
   output_offset -= kPointerSize;
   value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; context\n",
            top_address + output_offset, output_offset, value);
@@ -619,7 +659,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; function (construct sentinel)\n",
            top_address + output_offset, output_offset, value);
@@ -629,7 +669,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(construct_stub);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; code object\n",
            top_address + output_offset, output_offset, value);
@@ -639,7 +679,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; argc (%d)\n",
            top_address + output_offset, output_offset, value, height - 1);
@@ -650,7 +690,7 @@
   output_offset -= kPointerSize;
   value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; allocated receiver\n",
            top_address + output_offset, output_offset, value);
@@ -675,7 +715,7 @@
   unsigned height = 0;
   unsigned height_in_bytes = height * kPointerSize;
   const char* kind = is_setter_stub_frame ? "setter" : "getter";
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating %s stub => height=%u\n", kind, height_in_bytes);
   }
 
@@ -709,7 +749,7 @@
   output_offset -= kPointerSize;
   intptr_t callers_pc = output_[frame_index - 1]->GetPc();
   output_frame->SetFrameSlot(output_offset, callers_pc);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; caller's pc\n",
            top_address + output_offset, output_offset, callers_pc);
@@ -721,7 +761,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
   output_frame->SetFp(fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; caller's fp\n",
            fp_value, output_offset, value);
@@ -731,7 +771,7 @@
   output_offset -= kPointerSize;
   value = output_[frame_index - 1]->GetContext();
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; context\n",
            top_address + output_offset, output_offset, value);
@@ -741,7 +781,7 @@
   output_offset -= kPointerSize;
   value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; function (%s sentinel)\n",
            top_address + output_offset, output_offset, value, kind);
@@ -755,7 +795,7 @@
   Code* accessor_stub = isolate_->builtins()->builtin(name);
   value = reinterpret_cast<intptr_t>(accessor_stub);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
            " ; code object\n",
            top_address + output_offset, output_offset, value);
@@ -798,7 +838,7 @@
   }
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("  translating ");
     function->PrintName();
     PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@@ -862,7 +902,7 @@
     value = output_[frame_index - 1]->GetPc();
   }
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR  " ; caller's pc\n",
            top_address + output_offset, output_offset, value);
@@ -884,7 +924,7 @@
   ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
   output_frame->SetFp(fp_value);
   if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR " ; caller's fp\n",
            fp_value, output_offset, value);
@@ -903,7 +943,7 @@
   output_frame->SetFrameSlot(output_offset, value);
   output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(rsi.code(), value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR "; context\n",
            top_address + output_offset, output_offset, value);
@@ -917,7 +957,7 @@
   // input frame.
   ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
   output_frame->SetFrameSlot(output_offset, value);
-  if (FLAG_trace_deopt) {
+  if (trace_) {
     PrintF("    0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
            V8PRIxPTR "; function\n",
            top_address + output_offset, output_offset, value);
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 3e3d63d..c9092af 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -85,20 +85,6 @@
 };
 
 
-class StandardFrameConstants : public AllStatic {
- public:
-  // Fixed part of the frame consists of return address, caller fp,
-  // context and function.
-  static const int kFixedFrameSize    =  4 * kPointerSize;
-  static const int kExpressionsOffset = -3 * kPointerSize;
-  static const int kMarkerOffset      = -2 * kPointerSize;
-  static const int kContextOffset     = -1 * kPointerSize;
-  static const int kCallerFPOffset    =  0 * kPointerSize;
-  static const int kCallerPCOffset    = +1 * kPointerSize;
-  static const int kCallerSPOffset    = +2 * kPointerSize;
-};
-
-
 class JavaScriptFrameConstants : public AllStatic {
  public:
   // FP-relative.
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index a7efa76..e35a381 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -2200,7 +2200,7 @@
 
 LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
   LParameter* result = new(zone()) LParameter;
-  if (info()->IsOptimizing()) {
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
     int spill_index = chunk()->GetParameterStackSlot(instr->index());
     return DefineAsSpilled(result, spill_index);
   } else {
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index bdedb18..f6561e1 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2911,23 +2911,14 @@
 }
 
 
-static double kUint32Bias =
-    static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-
 void MacroAssembler::LoadUint32(XMMRegister dst,
                                 Register src,
                                 XMMRegister scratch) {
-  Label done;
-  cmpl(src, Immediate(0));
-  movq(kScratchRegister,
-       reinterpret_cast<int64_t>(&kUint32Bias),
-       RelocInfo::NONE64);
-  movsd(scratch, Operand(kScratchRegister, 0));
-  cvtlsi2sd(dst, src);
-  j(not_sign, &done, Label::kNear);
-  addsd(dst, scratch);
-  bind(&done);
+  if (FLAG_debug_code) {
+    cmpq(src, Immediate(0xffffffff));
+    Assert(below_equal, "input GPR is expected to have upper32 cleared");
+  }
+  cvtqsi2sd(dst, src);
 }
 
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 5ab1347..91d13ba 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -2188,11 +2188,11 @@
 }
 
 
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
-                                                   Handle<JSObject> holder,
-                                                   Handle<JSFunction> function,
-                                                   Handle<String> name,
-                                                   CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+                                              Handle<JSObject> holder,
+                                              Handle<String> name,
+                                              CheckType check,
+                                              Label* success) {
   // ----------- S t a t e -------------
   // rcx                 : function name
   // rsp[0]              : return address
@@ -2202,15 +2202,6 @@
   // rsp[argc * 8]       : argument 1
   // rsp[(argc + 1) * 8] : argument 0 = receiver
   // -----------------------------------
-
-  if (HasCustomCallGenerator(function)) {
-    Handle<Code> code = CompileCustomCall(object, holder,
-                                          Handle<JSGlobalPropertyCell>::null(),
-                                          function, name);
-    // A null handle means bail out to the regular compiler code below.
-    if (!code.is_null()) return code;
-  }
-
   Label miss;
   GenerateNameCheck(name, &miss);
 
@@ -2245,76 +2236,86 @@
       break;
 
     case STRING_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        // Check that the object is a two-byte string or a symbol.
-        __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
-        __ j(above_equal, &miss);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            rax, holder, rbx, rdx, rdi, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+      // Check that the object is a two-byte string or a symbol.
+      __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
+      __ j(above_equal, &miss);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          rax, holder, rbx, rdx, rdi, name, &miss);
       break;
 
-    case NUMBER_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        Label fast;
-        // Check that the object is a smi or a heap number.
-        __ JumpIfSmi(rdx, &fast);
-        __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
-        __ j(not_equal, &miss);
-        __ bind(&fast);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            rax, holder, rbx, rdx, rdi, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+    case NUMBER_CHECK: {
+      Label fast;
+      // Check that the object is a smi or a heap number.
+      __ JumpIfSmi(rdx, &fast);
+      __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
+      __ j(not_equal, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          rax, holder, rbx, rdx, rdi, name, &miss);
       break;
-
-    case BOOLEAN_CHECK:
-      if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
-        Label fast;
-        // Check that the object is a boolean.
-        __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
-        __ j(equal, &fast);
-        __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
-        __ j(not_equal, &miss);
-        __ bind(&fast);
-        // Check that the maps starting from the prototype haven't changed.
-        GenerateDirectLoadGlobalFunctionPrototype(
-            masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
-        CheckPrototypes(
-            Handle<JSObject>(JSObject::cast(object->GetPrototype())),
-            rax, holder, rbx, rdx, rdi, name, &miss);
-      } else {
-        // Calling non-strict non-builtins with a value as the receiver
-        // requires boxing.
-        __ jmp(&miss);
-      }
+    }
+    case BOOLEAN_CHECK: {
+      Label fast;
+      // Check that the object is a boolean.
+      __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
+      __ j(equal, &fast);
+      __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
+      __ j(not_equal, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateDirectLoadGlobalFunctionPrototype(
+          masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
+      CheckPrototypes(
+          Handle<JSObject>(JSObject::cast(object->GetPrototype())),
+          rax, holder, rbx, rdx, rdi, name, &miss);
       break;
+    }
   }
 
+  __ jmp(success);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
   CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
       ? CALL_AS_FUNCTION
       : CALL_AS_METHOD;
   __ InvokeFunction(function, arguments(), JUMP_FUNCTION,
                     NullCallWrapper(), call_kind);
+}
 
-  // Handle call cache miss.
-  __ bind(&miss);
-  GenerateMissBranch();
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+    Handle<Object> object,
+    Handle<JSObject> holder,
+    Handle<String> name,
+    CheckType check,
+    Handle<JSFunction> function) {
+  if (HasCustomCallGenerator(function)) {
+    Handle<Code> code = CompileCustomCall(object, holder,
+                                          Handle<JSGlobalPropertyCell>::null(),
+                                          function, name);
+    // A null handle means bail out to the regular compiler code below.
+    if (!code.is_null()) return code;
+  }
+
+  Label success;
+
+  CompileHandlerFrontend(object, holder, name, check, &success);
+  __ bind(&success);
+  CompileHandlerBackend(function);
 
   // Return the generated code.
   return GetCode(function);
@@ -2839,7 +2840,7 @@
 
 Register* LoadStubCompiler::registers() {
   // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { rax, rcx, rbx, rdx, rdi, r8 };
+  static Register registers[] = { rax, rcx, rdx, rbx, rdi, r8 };
   return registers;
 }