Version 3.24.0

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@18280 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index d4ff932..95405da 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2013-12-09: Version 3.24.0
+
+        Performance and stability improvements on all platforms.
+
+
 2013-12-04: Version 3.23.18
 
         Performance and stability improvements on all platforms.
diff --git a/Makefile b/Makefile
index 910bcbd..d2c9a9d 100644
--- a/Makefile
+++ b/Makefile
@@ -273,7 +273,7 @@
 .SECONDEXPANSION:
 $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
 
-$(ARCHES): $(addprefix $$@.,$(MODES))
+$(ARCHES): $(addprefix $$@.,$(DEFAULT_MODES))
 
 # Defines how to build a particular target (e.g. ia32.release).
 $(BUILDS): $(OUTDIR)/Makefile.$$@
@@ -368,6 +368,7 @@
 	rm -f $(OUTDIR)/Makefile.$(basename $@)*
 	rm -rf $(OUTDIR)/$(basename $@).release
 	rm -rf $(OUTDIR)/$(basename $@).debug
+	rm -rf $(OUTDIR)/$(basename $@).optdebug
 	find $(OUTDIR) -regex '.*\(host\|target\)\.$(basename $@).*\.mk' -delete
 
 native.clean:
diff --git a/build/gyp_v8 b/build/gyp_v8
index 92e6503..40c377d 100755
--- a/build/gyp_v8
+++ b/build/gyp_v8
@@ -167,5 +167,9 @@
   # Generate for the architectures supported on the given platform.
   gyp_args = list(args)
   if platform.system() == 'Linux':
+    # --generator-output defines where the Makefile goes.
     gyp_args.append('--generator-output=out')
+    # -Goutput_dir defines where the build output goes, relative to the
+    # Makefile. Set it to . so that the build output doesn't end up in out/out.
+    gyp_args.append('-Goutput_dir=.')
   run_gyp(gyp_args)
diff --git a/samples/process.cc b/samples/process.cc
index b18a3ff..7e3f78f 100644
--- a/samples/process.cc
+++ b/samples/process.cc
@@ -311,7 +311,7 @@
 // JavaScript object.
 Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
   // Handle scope for temporary handles.
-  HandleScope handle_scope(GetIsolate());
+  EscapableHandleScope handle_scope(GetIsolate());
 
   // Fetch the template for creating JavaScript map wrappers.
   // It only has to be created once, which we do on demand.
@@ -323,7 +323,7 @@
       Local<ObjectTemplate>::New(GetIsolate(), map_template_);
 
   // Create an empty map wrapper.
-  Handle<Object> result = templ->NewInstance();
+  Local<Object> result = templ->NewInstance();
 
   // Wrap the raw C++ pointer in an External so it can be referenced
   // from within JavaScript.
@@ -336,7 +336,7 @@
   // of these handles will go away when the handle scope is deleted
   // we need to call Close to let one, the result, escape into the
   // outer handle scope.
-  return handle_scope.Close(result);
+  return handle_scope.Escape(result);
 }
 
 
@@ -399,14 +399,14 @@
 
 Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
     Isolate* isolate) {
-  HandleScope handle_scope(isolate);
+  EscapableHandleScope handle_scope(isolate);
 
-  Handle<ObjectTemplate> result = ObjectTemplate::New();
+  Local<ObjectTemplate> result = ObjectTemplate::New();
   result->SetInternalFieldCount(1);
   result->SetNamedPropertyHandler(MapGet, MapSet);
 
   // Again, return the result through the current handle scope.
-  return handle_scope.Close(result);
+  return handle_scope.Escape(result);
 }
 
 
@@ -420,7 +420,7 @@
  */
 Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
   // Handle scope for temporary handles.
-  HandleScope handle_scope(GetIsolate());
+  EscapableHandleScope handle_scope(GetIsolate());
 
   // Fetch the template for creating JavaScript http request wrappers.
   // It only has to be created once, which we do on demand.
@@ -432,7 +432,7 @@
       Local<ObjectTemplate>::New(GetIsolate(), request_template_);
 
   // Create an empty http request wrapper.
-  Handle<Object> result = templ->NewInstance();
+  Local<Object> result = templ->NewInstance();
 
   // Wrap the raw C++ pointer in an External so it can be referenced
   // from within JavaScript.
@@ -445,7 +445,7 @@
   // of these handles will go away when the handle scope is deleted
   // we need to call Close to let one, the result, escape into the
   // outer handle scope.
-  return handle_scope.Close(result);
+  return handle_scope.Escape(result);
 }
 
 
@@ -509,9 +509,9 @@
 
 Handle<ObjectTemplate> JsHttpRequestProcessor::MakeRequestTemplate(
     Isolate* isolate) {
-  HandleScope handle_scope(isolate);
+  EscapableHandleScope handle_scope(isolate);
 
-  Handle<ObjectTemplate> result = ObjectTemplate::New();
+  Local<ObjectTemplate> result = ObjectTemplate::New();
   result->SetInternalFieldCount(1);
 
   // Add accessors for each of the fields of the request.
@@ -529,7 +529,7 @@
       GetUserAgent);
 
   // Again, return the result through the current handle scope.
-  return handle_scope.Close(result);
+  return handle_scope.Escape(result);
 }
 
 
diff --git a/src/api.cc b/src/api.cc
index aee9e3a..943dbba 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -5318,7 +5318,6 @@
       global_constructor->set_needs_access_check(
           proxy_constructor->needs_access_check());
     }
-    isolate->runtime_profiler()->Reset();
   }
   // Leave V8.
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index bef4bc3..4b65845 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -938,18 +938,9 @@
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    // Lookup and calculate pc offset.
-    __ ldr(r1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
-    __ ldr(r2, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
-    __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
-    __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
-    __ sub(r1, r1, r2);
-    __ SmiTag(r1);
-
-    // Pass both function and pc offset as arguments.
+    // Pass function as argument.
     __ push(r0);
-    __ push(r1);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
   }
 
   // If the code object is null, just return to the unoptimized code.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index cc2dbdc..4eb08a1 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1418,18 +1418,6 @@
   }
   AllowExternalCallThatCantCauseGC scope(masm);
   switch (type_) {
-    case TranscendentalCache::SIN:
-      __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
-          0, 1);
-      break;
-    case TranscendentalCache::COS:
-      __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
-          0, 1);
-      break;
-    case TranscendentalCache::TAN:
-      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
-          0, 1);
-      break;
     case TranscendentalCache::LOG:
       __ CallCFunction(ExternalReference::math_log_double_function(isolate),
           0, 1);
@@ -1445,9 +1433,6 @@
 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
   switch (type_) {
     // Add more cases when necessary.
-    case TranscendentalCache::SIN: return Runtime::kMath_sin;
-    case TranscendentalCache::COS: return Runtime::kMath_cos;
-    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 238d34e..255e1f3 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -39,9 +39,6 @@
 
 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
   switch (type) {
-    case TranscendentalCache::SIN: return &sin;
-    case TranscendentalCache::COS: return &cos;
-    case TranscendentalCache::TAN: return &tan;
     case TranscendentalCache::LOG: return &log;
     default: UNIMPLEMENTED();
   }
@@ -836,8 +833,10 @@
 
 #undef __
 
+#ifdef DEBUG
 // add(r0, pc, Operand(-8))
 static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
+#endif
 
 static byte* GetNoCodeAgeSequence(uint32_t* length) {
   // The sequence of instructions that is patched out for aging code is the
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 5508803..f6d3ea3 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -334,10 +334,6 @@
 
 void FullCodeGenerator::EmitProfilingCounterReset() {
   int reset_value = FLAG_interrupt_budget;
-  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
-    // Self-optimization is a one-off thing: if it fails, don't try again.
-    reset_value = Smi::kMaxValue;
-  }
   if (isolate()->IsDebuggerActive()) {
     // Detect debug break requests as soon as possible.
     reset_value = FLAG_interrupt_budget >> 4;
@@ -355,13 +351,10 @@
   Assembler::BlockConstPoolScope block_const_pool(masm_);
   Label ok;
 
-  int weight = 1;
-  if (FLAG_weighted_back_edges) {
-    ASSERT(back_edge_target->is_bound());
-    int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
-    weight = Min(kMaxBackEdgeWeight,
-                 Max(1, distance / kCodeSizeMultiplier));
-  }
+  ASSERT(back_edge_target->is_bound());
+  int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+  int weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kCodeSizeMultiplier));
   EmitProfilingCounterDecrement(weight);
   __ b(pl, &ok);
   __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
@@ -394,32 +387,24 @@
       __ push(r0);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
-    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
-      // Pretend that the exit is a backwards jump to the entry.
-      int weight = 1;
-      if (info_->ShouldSelfOptimize()) {
-        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-      } else if (FLAG_weighted_back_edges) {
-        int distance = masm_->pc_offset();
-        weight = Min(kMaxBackEdgeWeight,
-                     Max(1, distance / kCodeSizeMultiplier));
-      }
-      EmitProfilingCounterDecrement(weight);
-      Label ok;
-      __ b(pl, &ok);
-      __ push(r0);
-      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
-        __ ldr(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-        __ push(r2);
-        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
-      } else {
-        __ Call(isolate()->builtins()->InterruptCheck(),
-                RelocInfo::CODE_TARGET);
-      }
-      __ pop(r0);
-      EmitProfilingCounterReset();
-      __ bind(&ok);
+    // Pretend that the exit is a backwards jump to the entry.
+    int weight = 1;
+    if (info_->ShouldSelfOptimize()) {
+      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+    } else {
+      int distance = masm_->pc_offset();
+      weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kCodeSizeMultiplier));
     }
+    EmitProfilingCounterDecrement(weight);
+    Label ok;
+    __ b(pl, &ok);
+    __ push(r0);
+    __ Call(isolate()->builtins()->InterruptCheck(),
+            RelocInfo::CODE_TARGET);
+    __ pop(r0);
+    EmitProfilingCounterReset();
+    __ bind(&ok);
 
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 6119b24..5a1d55e 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -256,7 +256,7 @@
   stream->Add("if typeof ");
   value()->PrintTo(stream);
   stream->Add(" == \"%s\" then B%d else B%d",
-              *hydrogen()->type_literal()->ToCString(),
+              hydrogen()->type_literal()->ToCString().get(),
               true_block_id(), false_block_id());
 }
 
@@ -309,13 +309,13 @@
 
 void LCallNamed::PrintDataTo(StringStream* stream) {
   SmartArrayPointer<char> name_string = name()->ToCString();
-  stream->Add("%s #%d / ", *name_string, arity());
+  stream->Add("%s #%d / ", name_string.get(), arity());
 }
 
 
 void LCallGlobal::PrintDataTo(StringStream* stream) {
   SmartArrayPointer<char> name_string = name()->ToCString();
-  stream->Add("%s #%d / ", *name_string, arity());
+  stream->Add("%s #%d / ", name_string.get(), arity());
 }
 
 
@@ -360,7 +360,7 @@
 void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(".");
-  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(String::cast(*name())->ToCString().get());
   stream->Add(" <- ");
   value()->PrintTo(stream);
 }
@@ -1187,9 +1187,6 @@
     case kMathRound: return DoMathRound(instr);
     case kMathAbs: return DoMathAbs(instr);
     case kMathLog: return DoMathLog(instr);
-    case kMathSin: return DoMathSin(instr);
-    case kMathCos: return DoMathCos(instr);
-    case kMathTan: return DoMathTan(instr);
     case kMathExp: return DoMathExp(instr);
     case kMathSqrt: return DoMathSqrt(instr);
     case kMathPowHalf: return DoMathPowHalf(instr);
@@ -1233,27 +1230,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), d2);
-  LMathSin* result = new(zone()) LMathSin(input);
-  return MarkAsCall(DefineFixedDouble(result, d2), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), d2);
-  LMathCos* result = new(zone()) LMathCos(input);
-  return MarkAsCall(DefineFixedDouble(result, d2), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), d2);
-  LMathTan* result = new(zone()) LMathTan(input);
-  return MarkAsCall(DefineFixedDouble(result, d2), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   ASSERT(instr->representation().IsDouble());
   ASSERT(instr->value()->representation().IsDouble());
@@ -2677,5 +2653,4 @@
   return DefineAsRegister(new(zone()) LLoadFieldByIndex(object, index));
 }
 
-
 } }  // namespace v8::internal
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index cfafc06..4bac4b1 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -131,7 +131,6 @@
   V(LoadNamedGeneric)                           \
   V(MapEnumLength)                              \
   V(MathAbs)                                    \
-  V(MathCos)                                    \
   V(MathExp)                                    \
   V(MathFloor)                                  \
   V(MathFloorOfDiv)                             \
@@ -139,9 +138,7 @@
   V(MathMinMax)                                 \
   V(MathPowHalf)                                \
   V(MathRound)                                  \
-  V(MathSin)                                    \
   V(MathSqrt)                                   \
-  V(MathTan)                                    \
   V(ModI)                                       \
   V(MulI)                                       \
   V(MultiplyAddD)                               \
@@ -816,42 +813,6 @@
 };
 
 
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathSin(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathCos(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathTan(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
-};
-
-
 class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
  public:
   LMathExp(LOperand* value,
@@ -2778,9 +2739,6 @@
   LInstruction* DoMathRound(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
-  LInstruction* DoMathSin(HUnaryMathOperation* instr);
-  LInstruction* DoMathCos(HUnaryMathOperation* instr);
-  LInstruction* DoMathTan(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index b9f1a5f..8f1c9c0 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -3951,39 +3951,6 @@
 }
 
 
-void LCodeGen::DoMathTan(LMathTan* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(d2));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ mov(cp, Operand::Zero());
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(d2));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ mov(cp, Operand::Zero());
-  TranscendentalCacheStub stub(TranscendentalCache::COS,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(d2));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ mov(cp, Operand::Zero());
-  TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   ASSERT(ToRegister(instr->context()).is(cp));
   ASSERT(ToRegister(instr->function()).is(r1));
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 265265d..9d1418b 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -911,12 +911,12 @@
   __ str(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
   // Write receiver to stack frame.
   int index = stack_space - 1;
-  __ str(receiver, MemOperand(sp, index * kPointerSize));
+  __ str(receiver, MemOperand(sp, index-- * kPointerSize));
   // Write the arguments to stack frame.
   for (int i = 0; i < argc; i++) {
     ASSERT(!receiver.is(values[i]));
     ASSERT(!scratch.is(values[i]));
-    __ str(receiver, MemOperand(sp, index-- * kPointerSize));
+    __ str(values[i], MemOperand(sp, index-- * kPointerSize));
   }
 
   GenerateFastApiDirectCall(masm, optimization, argc, true);
@@ -927,12 +927,10 @@
  public:
   CallInterceptorCompiler(CallStubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name,
-                          ExtraICState extra_ic_state)
+                          Register name)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
-        name_(name),
-        extra_ic_state_(extra_ic_state) {}
+        name_(name) {}
 
   void Compile(MacroAssembler* masm,
                Handle<JSObject> object,
@@ -1107,7 +1105,6 @@
   CallStubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
-  ExtraICState extra_ic_state_;
 };
 
 
@@ -2466,7 +2463,7 @@
   // Get the receiver from the stack.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), r2, extra_state());
+  CallInterceptorCompiler compiler(this, arguments(), r2);
   compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
                    &miss);
 
diff --git a/src/assembler.cc b/src/assembler.cc
index febae63..b77e92d 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -306,7 +306,9 @@
 //                dropped, and last non-zero chunk tagged with 1.)
 
 
+#ifdef DEBUG
 const int kMaxStandardNonCompactModes = 14;
+#endif
 
 const int kTagBits = 2;
 const int kTagMask = (1 << kTagBits) - 1;
@@ -1418,50 +1420,11 @@
 }
 
 
-static double math_sin_double(double x) {
-  return sin(x);
-}
-
-
-static double math_cos_double(double x) {
-  return cos(x);
-}
-
-
-static double math_tan_double(double x) {
-  return tan(x);
-}
-
-
 static double math_log_double(double x) {
   return log(x);
 }
 
 
-ExternalReference ExternalReference::math_sin_double_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate,
-                                    FUNCTION_ADDR(math_sin_double),
-                                    BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_cos_double_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate,
-                                    FUNCTION_ADDR(math_cos_double),
-                                    BUILTIN_FP_CALL));
-}
-
-
-ExternalReference ExternalReference::math_tan_double_function(
-    Isolate* isolate) {
-  return ExternalReference(Redirect(isolate,
-                                    FUNCTION_ADDR(math_tan_double),
-                                    BUILTIN_FP_CALL));
-}
-
-
 ExternalReference ExternalReference::math_log_double_function(
     Isolate* isolate) {
   return ExternalReference(Redirect(isolate,
diff --git a/src/assembler.h b/src/assembler.h
index 0c706c4..6c06262 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -811,9 +811,6 @@
   static ExternalReference address_of_the_hole_nan();
   static ExternalReference address_of_uint32_bias();
 
-  static ExternalReference math_sin_double_function(Isolate* isolate);
-  static ExternalReference math_cos_double_function(Isolate* isolate);
-  static ExternalReference math_tan_double_function(Isolate* isolate);
   static ExternalReference math_log_double_function(Isolate* isolate);
 
   static ExternalReference math_exp_constants(int constant_index);
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 4d69f84..1d2fb86 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -41,10 +41,11 @@
 #include "platform.h"
 #include "snapshot.h"
 #include "trig-table.h"
-#include "extensions/free-buffer-extension.h"
 #include "extensions/externalize-string-extension.h"
+#include "extensions/free-buffer-extension.h"
 #include "extensions/gc-extension.h"
 #include "extensions/statistics-extension.h"
+#include "extensions/trigger-failure-extension.h"
 #include "code-stubs.h"
 
 namespace v8 {
@@ -107,6 +108,7 @@
   GCExtension::Register();
   ExternalizeStringExtension::Register();
   StatisticsExtension::Register();
+  TriggerFailureExtension::Register();
 }
 
 
@@ -2266,6 +2268,9 @@
   if (FLAG_track_gc_object_stats) {
     InstallExtension(isolate, "v8/statistics", &extension_states);
   }
+  if (FLAG_expose_trigger_failure) {
+    InstallExtension(isolate, "v8/trigger-failure", &extension_states);
+  }
 
   if (extensions == NULL) return true;
   // Install required extensions
diff --git a/src/builtins.cc b/src/builtins.cc
index f9c2708..b27f29a 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -153,8 +153,8 @@
 #endif
 
 
-static inline bool CalledAsConstructor(Isolate* isolate) {
 #ifdef DEBUG
+static inline bool CalledAsConstructor(Isolate* isolate) {
   // Calculate the result using a full stack frame iterator and check
   // that the state of the stack is as we assume it to be in the
   // code below.
@@ -163,7 +163,6 @@
   it.Advance();
   StackFrame* frame = it.frame();
   bool reference_result = frame->is_construct();
-#endif
   Address fp = Isolate::c_entry_fp(isolate->thread_local_top());
   // Because we know fp points to an exit frame we can use the relevant
   // part of ExitFrame::ComputeCallerState directly.
@@ -180,6 +179,7 @@
   ASSERT_EQ(result, reference_result);
   return result;
 }
+#endif
 
 
 // ----------------------------------------------------------------------------
diff --git a/src/cached-powers.cc b/src/cached-powers.cc
index fbfaf26..9e2919b 100644
--- a/src/cached-powers.cc
+++ b/src/cached-powers.cc
@@ -133,7 +133,10 @@
   {V8_2PART_UINT64_C(0xaf87023b, 9bf0ee6b), 1066, 340},
 };
 
+#ifdef DEBUG
 static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
+#endif
+
 static const int kCachedPowersOffset = 348;  // -1 * the first decimal_exponent.
 static const double kD_1_LOG2_10 = 0.30102999566398114;  //  1 / lg(10)
 // Difference between the decimal exponents in the table above.
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 96cfc37..7922d6c 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -300,7 +300,8 @@
   Handle<Code> code = chunk->Codegen();
   if (FLAG_profile_hydrogen_code_stub_compilation) {
     double ms = timer.Elapsed().InMillisecondsF();
-    PrintF("[Lazy compilation of %s took %0.3f ms]\n", *stub->GetName(), ms);
+    PrintF("[Lazy compilation of %s took %0.3f ms]\n",
+           stub->GetName().get(), ms);
   }
   return code;
 }
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 3fb6153..c66001a 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -80,8 +80,8 @@
 
 void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
   SmartArrayPointer<const char> name = GetName();
-  PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
-  GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
+  PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, name.get()));
+  GDBJIT(AddCode(GDBJITInterface::STUB, name.get(), code));
   Counters* counters = isolate->counters();
   counters->total_stubs_code_size()->Increment(code->instruction_size());
 }
@@ -164,7 +164,7 @@
 #ifdef ENABLE_DISASSEMBLER
     if (FLAG_print_code_stubs) {
       CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
-      new_object->Disassemble(*GetName(), trace_scope.file());
+      new_object->Disassemble(GetName().get(), trace_scope.file());
       PrintF(trace_scope.file(), "\n");
     }
 #endif
diff --git a/src/codegen.cc b/src/codegen.cc
index 28f7d6c..adb47b8 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -81,7 +81,7 @@
           CodeStub::MajorName(info->code_stub()->MajorKey(), true);
       PrintF("%s", name == NULL ? "<unknown>" : name);
     } else {
-      PrintF("%s", *info->function()->debug_name()->ToCString());
+      PrintF("%s", info->function()->debug_name()->ToCString().get());
     }
     PrintF("]\n");
   }
@@ -162,7 +162,7 @@
       if (FLAG_print_unopt_code) {
         PrintF(tracing_scope.file(), "--- Unoptimized code ---\n");
         info->closure()->shared()->code()->Disassemble(
-            *function->debug_name()->ToCString(), tracing_scope.file());
+            function->debug_name()->ToCString().get(), tracing_scope.file());
       }
       PrintF(tracing_scope.file(), "--- Optimized code ---\n");
     } else {
@@ -177,7 +177,7 @@
       code->Disassemble(CodeStub::MajorName(major_key, false),
                         tracing_scope.file());
     } else {
-      code->Disassemble(*function->debug_name()->ToCString(),
+      code->Disassemble(function->debug_name()->ToCString().get(),
                         tracing_scope.file());
     }
     PrintF(tracing_scope.file(), "--- End code ---\n");
diff --git a/src/compiler.cc b/src/compiler.cc
index 83f9ab2..c6dfa04 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -235,8 +235,7 @@
 // profiler, so they trigger their own optimization when they're called
 // for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
 bool CompilationInfo::ShouldSelfOptimize() {
-  return FLAG_self_optimization &&
-      FLAG_crankshaft &&
+  return FLAG_crankshaft &&
       !function()->flags()->Contains(kDontSelfOptimize) &&
       !function()->dont_optimize() &&
       function()->scope()->AllowsLazyCompilation() &&
@@ -460,7 +459,7 @@
   if (FLAG_trace_hydrogen) {
     Handle<String> name = info()->function()->debug_name();
     PrintF("-----------------------------------------------------------\n");
-    PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
+    PrintF("Compiling method %s using hydrogen\n", name->ToCString().get());
     isolate()->GetHTracer()->TraceCompilation(info());
   }
 
@@ -1056,6 +1055,7 @@
 
 
 bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
+                                   Handle<Code> unoptimized,
                                    uint32_t osr_pc_offset) {
   bool compiling_for_osr = (osr_pc_offset != 0);
 
@@ -1078,11 +1078,10 @@
   Handle<SharedFunctionInfo> shared = info->shared_info();
 
   if (compiling_for_osr) {
-    BailoutId osr_ast_id =
-        shared->code()->TranslatePcOffsetToAstId(osr_pc_offset);
+    BailoutId osr_ast_id = unoptimized->TranslatePcOffsetToAstId(osr_pc_offset);
     ASSERT(!osr_ast_id.IsNone());
     info->SetOptimizing(osr_ast_id);
-    info->set_osr_pc_offset(osr_pc_offset);
+    info->SetOsrInfo(unoptimized, osr_pc_offset);
 
     if (FLAG_trace_osr) {
       PrintF("[COSR - attempt to queue ");
@@ -1100,30 +1099,30 @@
   isolate->counters()->total_compile_size()->Increment(compiled_size);
 
   {
-    CompilationHandleScope handle_scope(*info);
+    CompilationHandleScope handle_scope(info.get());
 
-    if (!compiling_for_osr && InstallCodeFromOptimizedCodeMap(*info)) {
+    if (!compiling_for_osr && InstallCodeFromOptimizedCodeMap(info.get())) {
       return true;
     }
 
-    if (Parser::Parse(*info)) {
+    if (Parser::Parse(info.get())) {
       LanguageMode language_mode = info->function()->language_mode();
       info->SetLanguageMode(language_mode);
       shared->set_language_mode(language_mode);
       info->SaveHandles();
 
-      if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) {
-        RecompileJob* job = new(info->zone()) RecompileJob(*info);
+      if (Rewriter::Rewrite(info.get()) && Scope::Analyze(info.get())) {
+        RecompileJob* job = new(info->zone()) RecompileJob(info.get());
         RecompileJob::Status status = job->CreateGraph();
         if (status == RecompileJob::SUCCEEDED) {
           info.Detach();
-          shared->code()->set_profiler_ticks(0);
+          unoptimized->set_profiler_ticks(0);
           isolate->optimizing_compiler_thread()->QueueForOptimization(job);
           ASSERT(!isolate->has_pending_exception());
           return true;
         } else if (status == RecompileJob::BAILED_OUT) {
           isolate->clear_pending_exception();
-          InstallFullCode(*info);
+          InstallFullCode(info.get());
         }
       }
     }
@@ -1140,7 +1139,7 @@
   // Except when OSR already disabled optimization for some reason.
   if (info->shared_info()->optimization_disabled()) {
     info->AbortOptimization();
-    InstallFullCode(*info);
+    InstallFullCode(info.get());
     if (FLAG_trace_concurrent_recompilation) {
       PrintF("  ** aborting optimization for ");
       info->closure()->PrintName();
@@ -1172,14 +1171,14 @@
            status == RecompileJob::BAILED_OUT);
   }
 
-  InstallCodeCommon(*info);
+  InstallCodeCommon(info.get());
   if (status == RecompileJob::SUCCEEDED) {
     Handle<Code> code = info->code();
     ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
     info->closure()->ReplaceCode(*code);
     if (info->shared_info()->SearchOptimizedCodeMap(
             info->closure()->context()->native_context()) == -1) {
-      InsertCodeIntoOptimizedCodeMap(*info);
+      InsertCodeIntoOptimizedCodeMap(info.get());
     }
     if (FLAG_trace_concurrent_recompilation) {
       PrintF("  ** Optimized code for ");
@@ -1188,7 +1187,7 @@
     }
   } else {
     info->AbortOptimization();
-    InstallFullCode(*info);
+    InstallFullCode(info.get());
   }
   // Optimized code is finally replacing unoptimized code.  Reset the latter's
   // profiler ticks to prevent too soon re-opt after a deopt.
diff --git a/src/compiler.h b/src/compiler.h
index 080907e..7599c13 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -85,6 +85,7 @@
   Handle<Context> context() const { return context_; }
   BailoutId osr_ast_id() const { return osr_ast_id_; }
   uint32_t osr_pc_offset() const { return osr_pc_offset_; }
+  Handle<Code> osr_patched_code() const { return osr_patched_code_; }
   int opt_count() const { return opt_count_; }
   int num_parameters() const;
   int num_heap_slots() const;
@@ -265,6 +266,7 @@
     SaveHandle(&shared_info_);
     SaveHandle(&context_);
     SaveHandle(&script_);
+    SaveHandle(&osr_patched_code_);
   }
 
   BailoutReason bailout_reason() const { return bailout_reason_; }
@@ -311,7 +313,8 @@
     return abort_due_to_dependency_;
   }
 
-  void set_osr_pc_offset(uint32_t pc_offset) {
+  void SetOsrInfo(Handle<Code> code, uint32_t pc_offset) {
+    osr_patched_code_ = code;
     osr_pc_offset_ = pc_offset;
   }
 
@@ -416,6 +419,10 @@
   // The pc_offset corresponding to osr_ast_id_ in unoptimized code.
   // We can look this up in the back edge table, but cache it for quick access.
   uint32_t osr_pc_offset_;
+  // The unoptimized code we patched for OSR may not be the shared code
+  // afterwards, since we may need to compile it again to include deoptimization
+  // data.  Keep track which code we patched.
+  Handle<Code> osr_patched_code_;
 
   // Flag whether compilation needs to be aborted due to dependency change.
   bool abort_due_to_dependency_;
@@ -626,6 +633,7 @@
   static bool CompileLazy(CompilationInfo* info);
 
   static bool RecompileConcurrent(Handle<JSFunction> function,
+                                  Handle<Code> unoptimized,
                                   uint32_t osr_pc_offset = 0);
 
   // Compile a shared function info object (the function is possibly lazily
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index 6c297d7..0cd1cc2 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -332,13 +332,13 @@
   // Receive the connect message (with empty body).
   i::SmartArrayPointer<char> message =
       i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
-  ASSERT(*message == NULL);
+  ASSERT(message.get() == NULL);
 
   while (true) {
     // Receive a message.
     i::SmartArrayPointer<char> message =
         i::DebuggerAgentUtil::ReceiveMessage(remote_debugger_->conn());
-    if (*message == NULL) {
+    if (message.get() == NULL) {
       remote_debugger_->ConnectionClosed();
       return;
     }
diff --git a/src/d8-debug.h b/src/d8-debug.h
index 5587622..f753177 100644
--- a/src/d8-debug.h
+++ b/src/d8-debug.h
@@ -135,7 +135,7 @@
   static const int kDisconnect = 3;
 
   int type() { return type_; }
-  char* data() { return *data_; }
+  char* data() { return data_.get(); }
 
  private:
   void set_next(RemoteDebuggerEvent* event) { next_ = event; }
diff --git a/src/d8.cc b/src/d8.cc
index 2fdbe0c..eb70f3e 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -168,10 +168,9 @@
 const char* Shell::kPrompt = "d8> ";
 
 
+#ifndef V8_SHARED
 const int MB = 1024 * 1024;
 
-
-#ifndef V8_SHARED
 bool CounterMap::Match(void* key1, void* key2) {
   const char* name1 = reinterpret_cast<const char*>(key1);
   const char* name2 = reinterpret_cast<const char*>(key2);
@@ -610,19 +609,19 @@
 Handle<Array> Shell::GetCompletions(Isolate* isolate,
                                     Handle<String> text,
                                     Handle<String> full) {
-  HandleScope handle_scope(isolate);
+  EscapableHandleScope handle_scope(isolate);
   v8::Local<v8::Context> utility_context =
       v8::Local<v8::Context>::New(isolate, utility_context_);
   v8::Context::Scope context_scope(utility_context);
   Handle<Object> global = utility_context->Global();
-  Handle<Value> fun =
+  Local<Value> fun =
       global->Get(String::NewFromUtf8(isolate, "GetCompletions"));
   static const int kArgc = 3;
   v8::Local<v8::Context> evaluation_context =
       v8::Local<v8::Context>::New(isolate, evaluation_context_);
   Handle<Value> argv[kArgc] = { evaluation_context->Global(), text, full };
-  Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
-  return handle_scope.Close(Handle<Array>::Cast(val));
+  Local<Value> val = Local<Function>::Cast(fun)->Call(global, kArgc, argv);
+  return handle_scope.Escape(Local<Array>::Cast(val));
 }
 
 
@@ -966,7 +965,7 @@
 #endif  // V8_SHARED
   // Initialize the global objects
   Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
-  HandleScope handle_scope(isolate);
+  EscapableHandleScope handle_scope(isolate);
   Local<Context> context = Context::New(isolate, NULL, global_template);
   ASSERT(!context.IsEmpty());
   Context::Scope scope(context);
@@ -986,7 +985,7 @@
   context->Global()->Set(String::NewFromUtf8(isolate, "arguments"),
                          Utils::ToLocal(arguments_jsarray));
 #endif  // V8_SHARED
-  return handle_scope.Close(context);
+  return handle_scope.Escape(context);
 }
 
 
diff --git a/src/date.cc b/src/date.cc
index a377451..4afd8dc 100644
--- a/src/date.cc
+++ b/src/date.cc
@@ -36,7 +36,6 @@
 namespace internal {
 
 
-static const int kDays4Years[] = {0, 365, 2 * 365, 3 * 365 + 1};
 static const int kDaysIn4Years = 4 * 365 + 1;
 static const int kDaysIn100Years = 25 * kDaysIn4Years - 1;
 static const int kDaysIn400Years = 4 * kDaysIn100Years + 1;
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 51823aa..d3d22c9 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -192,7 +192,7 @@
 
 void DebuggerAgentSession::Run() {
   // Send the hello message.
-  bool ok = DebuggerAgentUtil::SendConnectMessage(client_, *agent_->name_);
+  bool ok = DebuggerAgentUtil::SendConnectMessage(client_, agent_->name_.get());
   if (!ok) return;
 
   while (true) {
@@ -200,7 +200,7 @@
     SmartArrayPointer<char> message =
         DebuggerAgentUtil::ReceiveMessage(client_);
 
-    const char* msg = *message;
+    const char* msg = message.get();
     bool is_closing_session = (msg == NULL);
 
     if (msg == NULL) {
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 76f2fa9..fc0df65 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -691,12 +691,12 @@
     }
   }
   PrintF(stderr, "[couldn't find pc offset for node=%d]\n", id.ToInt());
-  PrintF(stderr, "[method: %s]\n", *shared->DebugName()->ToCString());
+  PrintF(stderr, "[method: %s]\n", shared->DebugName()->ToCString().get());
   // Print the source code if available.
   HeapStringAllocator string_allocator;
   StringStream stream(&string_allocator);
   shared->SourceCodePrint(&stream, -1);
-  PrintF(stderr, "[source:\n%s\n]", *stream.ToCString());
+  PrintF(stderr, "[source:\n%s\n]", stream.ToCString().get());
 
   FATAL("unable to find pc offset during deoptimization");
   return -1;
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 69737ed..8bec2ce 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -224,7 +224,7 @@
         StringStream accumulator(&allocator);
         relocinfo.target_object()->ShortPrint(&accumulator);
         SmartArrayPointer<const char> obj_name = accumulator.ToCString();
-        out.AddFormatted("    ;; object: %s", *obj_name);
+        out.AddFormatted("    ;; object: %s", obj_name.get());
       } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
         const char* reference_name =
             ref_encoder.NameOfAddress(relocinfo.target_reference());
diff --git a/src/extensions/trigger-failure-extension.cc b/src/extensions/trigger-failure-extension.cc
new file mode 100644
index 0000000..5fe6bbb
--- /dev/null
+++ b/src/extensions/trigger-failure-extension.cc
@@ -0,0 +1,82 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "trigger-failure-extension.h"
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+const char* const TriggerFailureExtension::kSource =
+    "native function triggerCheckFalse();"
+    "native function triggerAssertFalse();"
+    "native function triggerSlowAssertFalse();";
+
+
+v8::Handle<v8::FunctionTemplate>
+TriggerFailureExtension::GetNativeFunctionTemplate(
+    v8::Isolate* isolate,
+    v8::Handle<v8::String> str) {
+  if (strcmp(*v8::String::Utf8Value(str), "triggerCheckFalse") == 0) {
+    return v8::FunctionTemplate::New(
+        TriggerFailureExtension::TriggerCheckFalse);
+  } else if (strcmp(*v8::String::Utf8Value(str), "triggerAssertFalse") == 0) {
+    return v8::FunctionTemplate::New(
+        TriggerFailureExtension::TriggerAssertFalse);
+  } else {
+    CHECK_EQ(0, strcmp(*v8::String::Utf8Value(str), "triggerSlowAssertFalse"));
+    return v8::FunctionTemplate::New(
+        TriggerFailureExtension::TriggerSlowAssertFalse);
+  }
+}
+
+
+void TriggerFailureExtension::TriggerCheckFalse(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
+  CHECK(false);
+}
+
+
+void TriggerFailureExtension::TriggerAssertFalse(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
+  ASSERT(false);
+}
+
+
+void TriggerFailureExtension::TriggerSlowAssertFalse(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
+  SLOW_ASSERT(false);
+}
+
+
+void TriggerFailureExtension::Register() {
+  static TriggerFailureExtension trigger_failure_extension;
+  static v8::DeclareExtension declaration(&trigger_failure_extension);
+}
+
+} }  // namespace v8::internal
diff --git a/src/extensions/trigger-failure-extension.h b/src/extensions/trigger-failure-extension.h
new file mode 100644
index 0000000..5a4223f
--- /dev/null
+++ b/src/extensions/trigger-failure-extension.h
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
+#define V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+class TriggerFailureExtension : public v8::Extension {
+ public:
+  TriggerFailureExtension() : v8::Extension("v8/trigger-failure", kSource) {}
+  virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
+      v8::Isolate* isolate,
+      v8::Handle<v8::String> name);
+  static void TriggerCheckFalse(
+      const v8::FunctionCallbackInfo<v8::Value>& args);
+  static void TriggerAssertFalse(
+      const v8::FunctionCallbackInfo<v8::Value>& args);
+  static void TriggerSlowAssertFalse(
+      const v8::FunctionCallbackInfo<v8::Value>& args);
+  static void Register();
+
+ private:
+  static const char* const kSource;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
diff --git a/src/factory.cc b/src/factory.cc
index 483e6a6..c10111a 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -896,10 +896,10 @@
       if (space > 0) {
         MaybeObject* maybe_arg = args->GetElement(isolate(), i);
         Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
-        const char* arg = *arg_str->ToCString();
+        SmartArrayPointer<char> arg = arg_str->ToCString();
         Vector<char> v2(p, static_cast<int>(space));
-        OS::StrNCpy(v2, arg, space);
-        space -= Min(space, strlen(arg));
+        OS::StrNCpy(v2, arg.get(), space);
+        space -= Min(space, strlen(arg.get()));
         p = &buffer[kBufferSize] - space;
       }
     }
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index b304ce5..658991f 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -347,19 +347,8 @@
 
 DEFINE_bool(new_string_add, true, "enable new string addition")
 
-// Experimental profiler changes.
-DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
-DEFINE_bool(watch_ic_patching, false, "profiler considers IC stability")
+// Profiler flags.
 DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
-DEFINE_bool(self_optimization, false,
-            "primitive functions trigger their own optimization")
-DEFINE_bool(direct_self_opt, false,
-            "call recompile stub directly when self-optimizing")
-DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
-DEFINE_bool(interrupt_at_exit, false,
-            "insert an interrupt check at function exit")
-DEFINE_bool(weighted_back_edges, false,
-            "weight back edges by jump distance for interrupt triggering")
            // 0x1700 fits in the immediate field of an ARM instruction.
 DEFINE_int(interrupt_budget, 0x1700,
            "execution budget before interrupt is triggered")
@@ -367,13 +356,6 @@
            "percentage of ICs that must have type info to allow optimization")
 DEFINE_int(self_opt_count, 130, "call count before self-optimization")
 
-DEFINE_implication(experimental_profiler, watch_ic_patching)
-DEFINE_implication(experimental_profiler, self_optimization)
-// Not implying direct_self_opt here because it seems to be a bad idea.
-DEFINE_implication(experimental_profiler, retry_self_opt)
-DEFINE_implication(experimental_profiler, interrupt_at_exit)
-DEFINE_implication(experimental_profiler, weighted_back_edges)
-
 DEFINE_bool(trace_opt_verbose, false, "extra verbose compilation tracing")
 DEFINE_implication(trace_opt_verbose, trace_opt)
 
@@ -421,6 +403,7 @@
 DEFINE_implication(expose_gc_as, expose_gc)
 DEFINE_bool(expose_externalize_string, false,
             "expose externalize string extension")
+DEFINE_bool(expose_trigger_failure, false, "expose trigger-failure extension")
 DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
 DEFINE_bool(builtins_in_stack_traces, false,
             "show built-in functions in stack traces")
diff --git a/src/flags.cc b/src/flags.cc
index 0c36aed..8e42206 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -566,7 +566,7 @@
     Flag* f = &flags[i];
     SmartArrayPointer<const char> value = ToString(f);
     printf("  --%s (%s)\n        type: %s  default: %s\n",
-           f->name(), f->comment(), Type2String(f->type()), *value);
+           f->name(), f->comment(), Type2String(f->type()), value.get());
   }
 }
 
diff --git a/src/frames.cc b/src/frames.cc
index 9549c2d..bcf675b 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -824,7 +824,7 @@
             SmartArrayPointer<char> c_script_name =
                 script_name->ToCString(DISALLOW_NULLS,
                                        ROBUST_STRING_TRAVERSAL);
-            PrintF(file, " at %s:%d", *c_script_name, line);
+            PrintF(file, " at %s:%d", c_script_name.get(), line);
           } else {
             PrintF(file, " at <unknown>:%d", line);
           }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 483d1e3..a40b61e 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -1697,9 +1697,11 @@
 void BackEdgeTable::AddStackCheck(CompilationInfo* info) {
   DisallowHeapAllocation no_gc;
   Isolate* isolate = info->isolate();
-  Code* code = info->shared_info()->code();
+  Code* code = *info->osr_patched_code();
   Address pc = code->instruction_start() + info->osr_pc_offset();
-  ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate, code, pc));
+  ASSERT_EQ(info->osr_ast_id().ToInt(),
+            code->TranslatePcOffsetToAstId(info->osr_pc_offset()).ToInt());
+  ASSERT_NE(INTERRUPT, GetBackEdgeState(isolate, code, pc));
   Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck);
   PatchAt(code, pc, OSR_AFTER_STACK_CHECK, patch);
 }
@@ -1708,8 +1710,10 @@
 void BackEdgeTable::RemoveStackCheck(CompilationInfo* info) {
   DisallowHeapAllocation no_gc;
   Isolate* isolate = info->isolate();
-  Code* code = info->shared_info()->code();
+  Code* code = *info->osr_patched_code();
   Address pc = code->instruction_start() + info->osr_pc_offset();
+  ASSERT_EQ(info->osr_ast_id().ToInt(),
+            code->TranslatePcOffsetToAstId(info->osr_pc_offset()).ToInt());
   if (GetBackEdgeState(isolate, code, pc) == OSR_AFTER_STACK_CHECK) {
     Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
     PatchAt(code, pc, ON_STACK_REPLACEMENT, patch);
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 21cfd22..afe5b71 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -1089,7 +1089,7 @@
     w->Write<uint8_t>(sizeof(intptr_t));
 
     w->WriteULEB128(1);  // Abbreviation code.
-    w->WriteString(*desc_->GetFilename());
+    w->WriteString(desc_->GetFilename().get());
     w->Write<intptr_t>(desc_->CodeStart());
     w->Write<intptr_t>(desc_->CodeStart() + desc_->CodeSize());
     w->Write<uint32_t>(0);
@@ -1131,7 +1131,7 @@
       for (int param = 0; param < params; ++param) {
         w->WriteULEB128(current_abbreviation++);
         w->WriteString(
-            *scope->parameter(param)->name()->ToCString(DISALLOW_NULLS));
+            scope->parameter(param)->name()->ToCString(DISALLOW_NULLS).get());
         w->Write<uint32_t>(ty_offset);
         Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
         uintptr_t block_start = w->position();
@@ -1182,7 +1182,7 @@
       for (int local = 0; local < locals; ++local) {
         w->WriteULEB128(current_abbreviation++);
         w->WriteString(
-            *stack_locals[local]->name()->ToCString(DISALLOW_NULLS));
+            stack_locals[local]->name()->ToCString(DISALLOW_NULLS).get());
         w->Write<uint32_t>(ty_offset);
         Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
         uintptr_t block_start = w->position();
@@ -1455,7 +1455,7 @@
     w->Write<uint8_t>(1);  // DW_LNS_SET_COLUMN operands count.
     w->Write<uint8_t>(0);  // DW_LNS_NEGATE_STMT operands count.
     w->Write<uint8_t>(0);  // Empty include_directories sequence.
-    w->WriteString(*desc_->GetFilename());  // File name.
+    w->WriteString(desc_->GetFilename().get());  // File name.
     w->WriteULEB128(0);  // Current directory.
     w->WriteULEB128(0);  // Unknown modification time.
     w->WriteULEB128(0);  // Unknown file size.
@@ -2009,7 +2009,8 @@
   if (!name.is_null() && name->IsString()) {
     SmartArrayPointer<char> name_cstring =
         Handle<String>::cast(name)->ToCString(DISALLOW_NULLS);
-    AddCode(*name_cstring, *code, GDBJITInterface::FUNCTION, *script, info);
+    AddCode(name_cstring.get(), *code, GDBJITInterface::FUNCTION, *script,
+            info);
   } else {
     AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
   }
@@ -2132,7 +2133,7 @@
                               Code* code) {
   if (!FLAG_gdbjit) return;
   if (name != NULL && name->IsString()) {
-    AddCode(tag, *String::cast(name)->ToCString(DISALLOW_NULLS), code);
+    AddCode(tag, String::cast(name)->ToCString(DISALLOW_NULLS).get(), code);
   } else {
     AddCode(tag, "", code);
   }
diff --git a/src/harmony-math.js b/src/harmony-math.js
index a4d3f2e..2bf33d6 100644
--- a/src/harmony-math.js
+++ b/src/harmony-math.js
@@ -47,13 +47,82 @@
 }
 
 
+// ES6 draft 09-27-13, section 20.2.2.30.
+function MathSinh(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  // Idempotent for NaN, +/-0 and +/-Infinity.
+  if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
+  return (MathExp(x) - MathExp(-x)) / 2;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.12.
+function MathCosh(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  // Idempotent for NaN and +/-Infinity.
+  if (!NUMBER_IS_FINITE(x)) return x;
+  return (MathExp(x) + MathExp(-x)) / 2;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.33.
+function MathTanh(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  // Idempotent for +/-0.
+  if (x === 0) return x;
+  // Returns +/-1 for +/-Infinity.
+  if (!NUMBER_IS_FINITE(x)) return MathSign(x);
+  var exp1 = MathExp(x);
+  var exp2 = MathExp(-x);
+  return (exp1 - exp2) / (exp1 + exp2);
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.5.
+function MathAsinh(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  // Idempotent for NaN, +/-0 and +/-Infinity.
+  if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
+  if (x > 0) return MathLog(x + MathSqrt(x * x + 1));
+  // This is to prevent numerical errors caused by large negative x.
+  return -MathLog(-x + MathSqrt(x * x + 1));
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.3.
+function MathAcosh(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  if (x < 1) return NAN;
+  // Idempotent for NaN and +Infinity.
+  if (!NUMBER_IS_FINITE(x)) return x;
+  return MathLog(x + MathSqrt(x + 1) * MathSqrt(x - 1));
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.7.
+function MathAtanh(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  // Idempotent for +/-0.
+  if (x === 0) return x;
+  // Returns NaN for NaN and +/- Infinity.
+  if (!NUMBER_IS_FINITE(x)) return NAN;
+  return 0.5 * MathLog((1 + x) / (1 - x));
+}
+
+
 function ExtendMath() {
   %CheckIsBootstrapping();
 
   // Set up the non-enumerable functions on the Math object.
   InstallFunctions($Math, DONT_ENUM, $Array(
     "sign", MathSign,
-    "trunc", MathTrunc
+    "trunc", MathTrunc,
+    "sinh", MathSinh,
+    "cosh", MathCosh,
+    "tanh", MathTanh,
+    "asinh", MathAsinh,
+    "acosh", MathAcosh,
+    "atanh", MathAtanh
   ));
 }
 
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 525c634..3229aee 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -763,23 +763,10 @@
 
 double TranscendentalCache::SubCache::Calculate(double input) {
   switch (type_) {
-    case ACOS:
-      return acos(input);
-    case ASIN:
-      return asin(input);
-    case ATAN:
-      return atan(input);
-    case COS:
-      return fast_cos(input);
-    case EXP:
-      return exp(input);
     case LOG:
       return fast_log(input);
-    case SIN:
-      return fast_sin(input);
-    case TAN:
-      return fast_tan(input);
     default:
+      UNREACHABLE();
       return 0.0;  // Never happens.
   }
 }
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 6ae6272..1e40950 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -118,7 +118,7 @@
   is_tracking_object_moves_ = true;
   ASSERT(!is_tracking_allocations());
   if (track_allocations) {
-    allocation_tracker_.Reset(new AllocationTracker(*ids_, *names_));
+    allocation_tracker_.Reset(new AllocationTracker(ids_.get(), names_.get()));
     heap()->DisableInlineAllocation();
   }
 }
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 3dd5b4d..6d15868 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -55,9 +55,11 @@
 
   void StartHeapObjectsTracking(bool track_allocations);
   void StopHeapObjectsTracking();
-  AllocationTracker* allocation_tracker() { return *allocation_tracker_; }
-  HeapObjectsMap* heap_object_map() { return *ids_; }
-  StringsStorage* names() { return *names_; }
+  AllocationTracker* allocation_tracker() const {
+    return allocation_tracker_.get();
+  }
+  HeapObjectsMap* heap_object_map() const { return ids_.get(); }
+  StringsStorage* names() const { return names_.get(); }
 
   SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
   int GetSnapshotsCount();
@@ -80,7 +82,9 @@
   void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
 
   bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
-  bool is_tracking_allocations() { return !allocation_tracker_.is_empty(); }
+  bool is_tracking_allocations() const {
+    return !allocation_tracker_.is_empty();
+  }
 
   Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
 
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index 84e819d..b9bea75 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -202,6 +202,7 @@
 
 }  // namespace
 
+
 HeapSnapshot::HeapSnapshot(HeapProfiler* profiler,
                            const char* title,
                            unsigned uid)
@@ -218,6 +219,10 @@
   STATIC_CHECK(
       sizeof(HeapEntry) ==
       SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
+  USE(SnapshotSizeConstants<4>::kExpectedHeapGraphEdgeSize);
+  USE(SnapshotSizeConstants<4>::kExpectedHeapEntrySize);
+  USE(SnapshotSizeConstants<8>::kExpectedHeapGraphEdgeSize);
+  USE(SnapshotSizeConstants<8>::kExpectedHeapEntrySize);
   for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
     gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
   }
@@ -1854,8 +1859,8 @@
     const char* name = name_format_string != NULL && reference_name->IsString()
         ? names_->GetFormatted(
               name_format_string,
-              *String::cast(reference_name)->ToCString(
-                  DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)) :
+              String::cast(reference_name)->ToCString(
+                  DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL).get()) :
         names_->GetName(reference_name);
 
     filler_->SetNamedReference(type,
diff --git a/src/heap.cc b/src/heap.cc
index d5c40ad..13c874a 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1548,9 +1548,6 @@
 
   promotion_queue_.Destroy();
 
-  if (!FLAG_watch_ic_patching) {
-    isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
-  }
   incremental_marking()->UpdateMarkingDequeAfterScavenge();
 
   ScavengeWeakObjectRetainer weak_object_retainer(this);
diff --git a/src/heap.h b/src/heap.h
index 1c8e0e1..c8ccb7f 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -2904,7 +2904,7 @@
 
 class TranscendentalCache {
  public:
-  enum Type {ACOS, ASIN, ATAN, COS, EXP, LOG, SIN, TAN, kNumberOfCaches};
+  enum Type { LOG, kNumberOfCaches};
   static const int kTranscendentalTypeBits = 3;
   STATIC_ASSERT((1 << kTranscendentalTypeBits) >= kNumberOfCaches);
 
diff --git a/src/hydrogen-dce.cc b/src/hydrogen-dce.cc
index e101ee5..3a2eac4 100644
--- a/src/hydrogen-dce.cc
+++ b/src/hydrogen-dce.cc
@@ -64,7 +64,7 @@
   }
   stream.Add(" -> ");
   instr->PrintTo(&stream);
-  PrintF("[MarkLive %s]\n", *stream.ToCString());
+  PrintF("[MarkLive %s]\n", stream.ToCString().get());
 }
 
 
diff --git a/src/hydrogen-gvn.cc b/src/hydrogen-gvn.cc
index 02b3a0a..3ad9312 100644
--- a/src/hydrogen-gvn.cc
+++ b/src/hydrogen-gvn.cc
@@ -412,10 +412,13 @@
 
       // Propagate loop side effects upwards.
       if (block->HasParentLoopHeader()) {
-        int header_id = block->parent_loop_header()->block_id();
-        loop_side_effects_[header_id].Add(block->IsLoopHeader()
-                                          ? loop_side_effects_[id]
-                                          : side_effects);
+        HBasicBlock* with_parent = block;
+        if (block->IsLoopHeader()) side_effects = loop_side_effects_[id];
+        do {
+          HBasicBlock* parent_block = with_parent->parent_loop_header();
+          loop_side_effects_[parent_block->block_id()].Add(side_effects);
+          with_parent = parent_block;
+        } while (with_parent->HasParentLoopHeader());
       }
     }
   }
@@ -517,7 +520,7 @@
       GVNFlagSet side_effects = loop_side_effects_[block->block_id()];
       TRACE_GVN_2("Try loop invariant motion for block B%d %s\n",
                   block->block_id(),
-                  *GetGVNFlagsString(side_effects));
+                  GetGVNFlagsString(side_effects).get());
 
       GVNFlagSet accumulated_first_time_depends;
       GVNFlagSet accumulated_first_time_changes;
@@ -542,7 +545,7 @@
   GVNFlagSet depends_flags = HValue::ConvertChangesToDependsFlags(loop_kills);
   TRACE_GVN_2("Loop invariant motion for B%d %s\n",
               block->block_id(),
-              *GetGVNFlagsString(depends_flags));
+              GetGVNFlagsString(depends_flags).get());
   HInstruction* instr = block->first();
   while (instr != NULL) {
     HInstruction* next = instr->next();
@@ -551,8 +554,8 @@
       TRACE_GVN_4("Checking instruction %d (%s) %s. Loop %s\n",
                   instr->id(),
                   instr->Mnemonic(),
-                  *GetGVNFlagsString(instr->gvn_flags()),
-                  *GetGVNFlagsString(loop_kills));
+                  GetGVNFlagsString(instr->gvn_flags()).get(),
+                  GetGVNFlagsString(loop_kills).get());
       bool can_hoist = !instr->gvn_flags().ContainsAnyOf(depends_flags);
       if (can_hoist && !graph()->use_optimistic_licm()) {
         can_hoist = block->IsLoopSuccessorDominator();
@@ -567,7 +570,8 @@
         }
 
         if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
-          TRACE_GVN_1("Hoisting loop invariant instruction %d\n", instr->id());
+          TRACE_GVN_2("Hoisting loop invariant instruction i%d to block B%d\n",
+                      instr->id(), pre_header->block_id());
           // Move the instruction out of the loop.
           instr->Unlink();
           instr->InsertBefore(pre_header->end());
@@ -585,11 +589,11 @@
       first_time_changes->Add(instr->ChangesFlags());
       if (!(previous_depends == *first_time_depends)) {
         TRACE_GVN_1("Updated first-time accumulated %s\n",
-                    *GetGVNFlagsString(*first_time_depends));
+                    GetGVNFlagsString(*first_time_depends).get());
       }
       if (!(previous_changes == *first_time_changes)) {
         TRACE_GVN_1("Updated first-time accumulated %s\n",
-                    *GetGVNFlagsString(*first_time_changes));
+                    GetGVNFlagsString(*first_time_changes).get());
       }
     }
     instr = next;
@@ -801,7 +805,7 @@
         map->Kill(flags);
         dominators->Store(flags, instr);
         TRACE_GVN_2("Instruction %d %s\n", instr->id(),
-                    *GetGVNFlagsString(flags));
+                    GetGVNFlagsString(flags).get());
       }
       if (instr->CheckFlag(HValue::kUseGVN)) {
         ASSERT(!instr->HasObservableSideEffects());
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 9f96202..e7ad219 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1119,9 +1119,6 @@
     case kMathRound: return "round";
     case kMathAbs: return "abs";
     case kMathLog: return "log";
-    case kMathSin: return "sin";
-    case kMathCos: return "cos";
-    case kMathTan: return "tan";
     case kMathExp: return "exp";
     case kMathSqrt: return "sqrt";
     case kMathPowHalf: return "pow-half";
@@ -1536,7 +1533,7 @@
       *tag = kStringTag;
       return;
     case IS_INTERNALIZED_STRING:
-      *mask = kIsNotInternalizedMask;
+      *mask = kIsNotStringMask | kIsNotInternalizedMask;
       *tag = kInternalizedTag;
       return;
     default:
@@ -1779,10 +1776,7 @@
     result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
                                   (a->CanBeMinusZero() ||
                                    (a->CanBeZero() && b->CanBeNegative())));
-    if (!a->Includes(kMinInt) ||
-        !b->Includes(-1) ||
-        CheckFlag(kAllUsesTruncatingToInt32)) {
-      // It is safe to clear kCanOverflow when kAllUsesTruncatingToInt32.
+    if (!a->Includes(kMinInt) || !b->Includes(-1)) {
       ClearFlag(HValue::kCanOverflow);
     }
 
@@ -2501,7 +2495,7 @@
 
 void HEnterInlined::PrintDataTo(StringStream* stream) {
   SmartArrayPointer<char> name = function()->debug_name()->ToCString();
-  stream->Add("%s, id=%d", *name, function()->id().ToInt());
+  stream->Add("%s, id=%d", name.get(), function()->id().ToInt());
 }
 
 
@@ -3112,7 +3106,7 @@
 void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintNameTo(stream);
   stream->Add(".");
-  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(String::cast(*name())->ToCString().get());
 }
 
 
@@ -3250,7 +3244,7 @@
   object()->PrintNameTo(stream);
   stream->Add(".");
   ASSERT(name()->IsString());
-  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(String::cast(*name())->ToCString().get());
   stream->Add(" = ");
   value()->PrintNameTo(stream);
 }
@@ -3859,10 +3853,6 @@
     }
     if (std::isinf(d)) {  // +Infinity and -Infinity.
       switch (op) {
-        case kMathSin:
-        case kMathCos:
-        case kMathTan:
-          return H_CONSTANT_DOUBLE(OS::nan_value());
         case kMathExp:
           return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
         case kMathLog:
@@ -3880,12 +3870,6 @@
       }
     }
     switch (op) {
-      case kMathSin:
-        return H_CONSTANT_DOUBLE(fast_sin(d));
-      case kMathCos:
-        return H_CONSTANT_DOUBLE(fast_cos(d));
-      case kMathTan:
-        return H_CONSTANT_DOUBLE(fast_tan(d));
       case kMathExp:
         return H_CONSTANT_DOUBLE(fast_exp(d));
       case kMathLog:
@@ -4269,6 +4253,29 @@
 }
 
 
+HObjectAccess HObjectAccess::ForAllocationSiteOffset(int offset) {
+  switch (offset) {
+    case AllocationSite::kTransitionInfoOffset:
+      return HObjectAccess(kInobject, offset, Representation::Tagged());
+    case AllocationSite::kNestedSiteOffset:
+      return HObjectAccess(kInobject, offset, Representation::Tagged());
+    case AllocationSite::kMementoFoundCountOffset:
+      return HObjectAccess(kInobject, offset, Representation::Smi());
+    case AllocationSite::kMementoCreateCountOffset:
+      return HObjectAccess(kInobject, offset, Representation::Smi());
+    case AllocationSite::kPretenureDecisionOffset:
+      return HObjectAccess(kInobject, offset, Representation::Smi());
+    case AllocationSite::kDependentCodeOffset:
+      return HObjectAccess(kInobject, offset, Representation::Tagged());
+    case AllocationSite::kWeakNextOffset:
+      return HObjectAccess(kInobject, offset, Representation::Tagged());
+    default:
+      UNREACHABLE();
+  }
+  return HObjectAccess(kInobject, offset);
+}
+
+
 HObjectAccess HObjectAccess::ForContextSlot(int index) {
   ASSERT(index >= 0);
   Portion portion = kInobject;
@@ -4402,11 +4409,15 @@
       break;
     case kDouble:  // fall through
     case kInobject:
-      if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
+      if (!name_.is_null()) {
+        stream->Add(String::cast(*name_)->ToCString().get());
+      }
       stream->Add("[in-object]");
       break;
     case kBackingStore:
-      if (!name_.is_null()) stream->Add(*String::cast(*name_)->ToCString());
+      if (!name_.is_null()) {
+        stream->Add(String::cast(*name_)->ToCString().get());
+      }
       stream->Add("[backing-store]");
       break;
     case kExternalMemory:
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 9f7605f..23dbbd2 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -2638,9 +2638,6 @@
         case kMathPowHalf:
         case kMathLog:
         case kMathExp:
-        case kMathSin:
-        case kMathCos:
-        case kMathTan:
           return Representation::Double();
         case kMathAbs:
           return representation();
@@ -2685,9 +2682,6 @@
         SetGVNFlag(kChangesNewSpacePromotion);
         break;
       case kMathLog:
-      case kMathSin:
-      case kMathCos:
-      case kMathTan:
         set_representation(Representation::Double());
         // These operations use the TranscendentalCache, so they may allocate.
         SetGVNFlag(kChangesNewSpacePromotion);
@@ -5961,10 +5955,7 @@
                 ? Representation::Smi() : Representation::Tagged());
   }
 
-  static HObjectAccess ForAllocationSiteOffset(int offset) {
-    ASSERT(offset >= HeapObject::kHeaderSize && offset < AllocationSite::kSize);
-    return HObjectAccess(kInobject, offset);
-  }
+  static HObjectAccess ForAllocationSiteOffset(int offset);
 
   static HObjectAccess ForAllocationSiteList() {
     return HObjectAccess(kExternalMemory, 0, Representation::Tagged());
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 900e07e..176dd9f 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -2592,6 +2592,9 @@
             AllocationSite::kMementoCreateCountOffset));
     memento_create_count = AddUncasted<HAdd>(
         memento_create_count, graph()->GetConstant1());
+    // This smi value is reset to zero after every gc, overflow isn't a problem
+    // since the counter is bounded by the new space size.
+    memento_create_count->ClearFlag(HValue::kCanOverflow);
     HStoreNamedField* store = Add<HStoreNamedField>(
         allocation_site, HObjectAccess::ForAllocationSiteOffset(
             AllocationSite::kMementoCreateCountOffset), memento_create_count);
@@ -6779,7 +6782,7 @@
     SmartArrayPointer<char> caller_name =
         caller->shared()->DebugName()->ToCString();
     PrintF("Trying to inline the polymorphic call to %s from %s\n",
-           *name->ToCString(), *caller_name);
+           name->ToCString().get(), caller_name.get());
   }
 
   if (!TryInlineCall(expr)) {
@@ -6892,8 +6895,8 @@
       SmartArrayPointer<char> caller_name =
           caller->shared()->DebugName()->ToCString();
       PrintF("Trying to inline the polymorphic call to %s from %s\n",
-             *name->ToCString(),
-             *caller_name);
+             name->ToCString().get(),
+             caller_name.get());
     }
     if (FLAG_polymorphic_inlining && TryInlineCall(expr)) {
       // Trying to inline will signal that we should bailout from the
@@ -6957,10 +6960,11 @@
     SmartArrayPointer<char> caller_name =
         caller->shared()->DebugName()->ToCString();
     if (reason == NULL) {
-      PrintF("Inlined %s called from %s.\n", *target_name, *caller_name);
+      PrintF("Inlined %s called from %s.\n", target_name.get(),
+             caller_name.get());
     } else {
       PrintF("Did not inline %s called from %s (%s).\n",
-             *target_name, *caller_name, reason);
+             target_name.get(), caller_name.get(), reason);
     }
   }
 }
@@ -10496,7 +10500,7 @@
   HeapStringAllocator string_allocator;
   StringStream trace(&string_allocator);
   PrintTo(&trace);
-  PrintF("%s", *trace.ToCString());
+  PrintF("%s", trace.ToCString().get());
 }
 
 
@@ -10504,8 +10508,8 @@
   Tag tag(this, "compilation");
   if (info->IsOptimizing()) {
     Handle<String> name = info->function()->debug_name();
-    PrintStringProperty("name", *name->ToCString());
-    PrintStringProperty("method", *name->ToCString());
+    PrintStringProperty("name", name->ToCString().get());
+    PrintStringProperty("method", name->ToCString().get());
   } else {
     CodeStub::Major major_key = info->code_stub()->MajorKey();
     PrintStringProperty("name", CodeStub::MajorName(major_key, false));
@@ -10725,7 +10729,8 @@
 
 
 void HTracer::FlushToFile() {
-  AppendChars(filename_.start(), *trace_.ToCString(), trace_.length(), false);
+  AppendChars(filename_.start(), trace_.ToCString().get(), trace_.length(),
+              false);
   trace_.Reset();
 }
 
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 32c3397..7b44bc0 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1316,17 +1316,9 @@
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    // Lookup and calculate pc offset.
-    __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
-    __ mov(ebx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
-    __ sub(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ sub(edx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
-    __ SmiTag(edx);
-
-    // Pass both function and pc offset as arguments.
+    // Pass function as argument.
     __ push(eax);
-    __ push(edx);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
   }
 
   Label skip;
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 0481814..4633400 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -896,9 +896,6 @@
 
 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
   switch (type_) {
-    case TranscendentalCache::SIN: return Runtime::kMath_sin;
-    case TranscendentalCache::COS: return Runtime::kMath_cos;
-    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -913,95 +910,10 @@
   // Input value is on FP stack, and also in ebx/edx.
   // Input value is possibly in xmm1.
   // Address of result (a newly allocated HeapNumber) may be in eax.
-  if (type == TranscendentalCache::SIN ||
-      type == TranscendentalCache::COS ||
-      type == TranscendentalCache::TAN) {
-    // Both fsin and fcos require arguments in the range +/-2^63 and
-    // return NaN for infinities and NaN. They can share all code except
-    // the actual fsin/fcos operation.
-    Label in_range, done;
-    // If argument is outside the range -2^63..2^63, fsin/cos doesn't
-    // work. We must reduce it to the appropriate range.
-    __ mov(edi, edx);
-    __ and_(edi, Immediate(0x7ff00000));  // Exponent only.
-    int supported_exponent_limit =
-        (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
-    __ cmp(edi, Immediate(supported_exponent_limit));
-    __ j(below, &in_range, Label::kNear);
-    // Check for infinity and NaN. Both return NaN for sin.
-    __ cmp(edi, Immediate(0x7ff00000));
-    Label non_nan_result;
-    __ j(not_equal, &non_nan_result, Label::kNear);
-    // Input is +/-Infinity or NaN. Result is NaN.
-    __ fstp(0);
-    // NaN is represented by 0x7ff8000000000000.
-    __ push(Immediate(0x7ff80000));
-    __ push(Immediate(0));
-    __ fld_d(Operand(esp, 0));
-    __ add(esp, Immediate(2 * kPointerSize));
-    __ jmp(&done, Label::kNear);
-
-    __ bind(&non_nan_result);
-
-    // Use fpmod to restrict argument to the range +/-2*PI.
-    __ mov(edi, eax);  // Save eax before using fnstsw_ax.
-    __ fldpi();
-    __ fadd(0);
-    __ fld(1);
-    // FPU Stack: input, 2*pi, input.
-    {
-      Label no_exceptions;
-      __ fwait();
-      __ fnstsw_ax();
-      // Clear if Illegal Operand or Zero Division exceptions are set.
-      __ test(eax, Immediate(5));
-      __ j(zero, &no_exceptions, Label::kNear);
-      __ fnclex();
-      __ bind(&no_exceptions);
-    }
-
-    // Compute st(0) % st(1)
-    {
-      Label partial_remainder_loop;
-      __ bind(&partial_remainder_loop);
-      __ fprem1();
-      __ fwait();
-      __ fnstsw_ax();
-      __ test(eax, Immediate(0x400 /* C2 */));
-      // If C2 is set, computation only has partial result. Loop to
-      // continue computation.
-      __ j(not_zero, &partial_remainder_loop);
-    }
-    // FPU Stack: input, 2*pi, input % 2*pi
-    __ fstp(2);
-    __ fstp(0);
-    __ mov(eax, edi);  // Restore eax (allocated HeapNumber pointer).
-
-    // FPU Stack: input % 2*pi
-    __ bind(&in_range);
-    switch (type) {
-      case TranscendentalCache::SIN:
-        __ fsin();
-        break;
-      case TranscendentalCache::COS:
-        __ fcos();
-        break;
-      case TranscendentalCache::TAN:
-        // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
-        // FP register stack.
-        __ fptan();
-        __ fstp(0);  // Pop FP register stack.
-        break;
-      default:
-        UNREACHABLE();
-    }
-    __ bind(&done);
-  } else {
-    ASSERT(type == TranscendentalCache::LOG);
-    __ fldln2();
-    __ fxch();
-    __ fyl2x();
-  }
+  ASSERT(type == TranscendentalCache::LOG);
+  __ fldln2();
+  __ fxch();
+  __ fyl2x();
 }
 
 
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index ab4029d..84187b0 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -66,9 +66,6 @@
   if (buffer == NULL) {
     // Fallback to library function if function cannot be created.
     switch (type) {
-      case TranscendentalCache::SIN: return &sin;
-      case TranscendentalCache::COS: return &cos;
-      case TranscendentalCache::TAN: return &tan;
       case TranscendentalCache::LOG: return &log;
       default: UNIMPLEMENTED();
     }
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 3c5d4aa..3c92afa 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -319,10 +319,6 @@
 
 void FullCodeGenerator::EmitProfilingCounterReset() {
   int reset_value = FLAG_interrupt_budget;
-  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
-    // Self-optimization is a one-off thing: if it fails, don't try again.
-    reset_value = Smi::kMaxValue;
-  }
   __ mov(ebx, Immediate(profiling_counter_));
   __ mov(FieldOperand(ebx, Cell::kValueOffset),
          Immediate(Smi::FromInt(reset_value)));
@@ -334,13 +330,10 @@
   Comment cmnt(masm_, "[ Back edge bookkeeping");
   Label ok;
 
-  int weight = 1;
-  if (FLAG_weighted_back_edges) {
-    ASSERT(back_edge_target->is_bound());
-    int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
-    weight = Min(kMaxBackEdgeWeight,
-                 Max(1, distance / kCodeSizeMultiplier));
-  }
+  ASSERT(back_edge_target->is_bound());
+  int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+  int weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kCodeSizeMultiplier));
   EmitProfilingCounterDecrement(weight);
   __ j(positive, &ok, Label::kNear);
   __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
@@ -372,31 +365,24 @@
       __ push(eax);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
-    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
-      // Pretend that the exit is a backwards jump to the entry.
-      int weight = 1;
-      if (info_->ShouldSelfOptimize()) {
-        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-      } else if (FLAG_weighted_back_edges) {
-        int distance = masm_->pc_offset();
-        weight = Min(kMaxBackEdgeWeight,
-                     Max(1, distance / kCodeSizeMultiplier));
-      }
-      EmitProfilingCounterDecrement(weight);
-      Label ok;
-      __ j(positive, &ok, Label::kNear);
-      __ push(eax);
-      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
-        __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
-      } else {
-        __ call(isolate()->builtins()->InterruptCheck(),
-                RelocInfo::CODE_TARGET);
-      }
-      __ pop(eax);
-      EmitProfilingCounterReset();
-      __ bind(&ok);
+    // Pretend that the exit is a backwards jump to the entry.
+    int weight = 1;
+    if (info_->ShouldSelfOptimize()) {
+      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+    } else {
+      int distance = masm_->pc_offset();
+      weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kCodeSizeMultiplier));
     }
+    EmitProfilingCounterDecrement(weight);
+    Label ok;
+    __ j(positive, &ok, Label::kNear);
+    __ push(eax);
+    __ call(isolate()->builtins()->InterruptCheck(),
+            RelocInfo::CODE_TARGET);
+    __ pop(eax);
+    EmitProfilingCounterReset();
+    __ bind(&ok);
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
     Label check_exit_codesize;
@@ -4847,9 +4833,11 @@
 
 static const byte kJnsInstruction = 0x79;
 static const byte kJnsOffset = 0x11;
-static const byte kCallInstruction = 0xe8;
 static const byte kNopByteOne = 0x66;
 static const byte kNopByteTwo = 0x90;
+#ifdef DEBUG
+static const byte kCallInstruction = 0xe8;
+#endif
 
 
 void BackEdgeTable::PatchAt(Code* unoptimized_code,
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 28ae469..bba3aca 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -4197,39 +4197,6 @@
 }
 
 
-void LCodeGen::DoMathTan(LMathTan* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ Set(esi, Immediate(0));
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ Set(esi, Immediate(0));
-  TranscendentalCacheStub stub(TranscendentalCache::COS,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ Set(esi, Immediate(0));
-  TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   ASSERT(ToRegister(instr->context()).is(esi));
   ASSERT(ToRegister(instr->function()).is(edi));
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index aa35e9d..aca9bdf 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -286,7 +286,7 @@
   stream->Add("if typeof ");
   value()->PrintTo(stream);
   stream->Add(" == \"%s\" then B%d else B%d",
-              *hydrogen()->type_literal()->ToCString(),
+              hydrogen()->type_literal()->ToCString().get(),
               true_block_id(), false_block_id());
 }
 
@@ -341,13 +341,13 @@
 
 void LCallNamed::PrintDataTo(StringStream* stream) {
   SmartArrayPointer<char> name_string = name()->ToCString();
-  stream->Add("%s #%d / ", *name_string, arity());
+  stream->Add("%s #%d / ", name_string.get(), arity());
 }
 
 
 void LCallGlobal::PrintDataTo(StringStream* stream) {
   SmartArrayPointer<char> name_string = name()->ToCString();
-  stream->Add("%s #%d / ", *name_string, arity());
+  stream->Add("%s #%d / ", name_string.get(), arity());
 }
 
 
@@ -420,7 +420,7 @@
 void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(".");
-  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(String::cast(*name())->ToCString().get());
   stream->Add(" <- ");
   value()->PrintTo(stream);
 }
@@ -1272,9 +1272,6 @@
     case kMathRound: return DoMathRound(instr);
     case kMathAbs: return DoMathAbs(instr);
     case kMathLog: return DoMathLog(instr);
-    case kMathSin: return DoMathSin(instr);
-    case kMathCos: return DoMathCos(instr);
-    case kMathTan: return DoMathTan(instr);
     case kMathExp: return DoMathExp(instr);
     case kMathSqrt: return DoMathSqrt(instr);
     case kMathPowHalf: return DoMathPowHalf(instr);
@@ -1317,27 +1314,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), xmm1);
-  LMathSin* result = new(zone()) LMathSin(input);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), xmm1);
-  LMathCos* result = new(zone()) LMathCos(input);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), xmm1);
-  LMathTan* result = new(zone()) LMathTan(input);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   ASSERT(instr->representation().IsDouble());
   ASSERT(instr->value()->representation().IsDouble());
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index ea4fef8..912ec1e 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -133,7 +133,6 @@
   V(LoadRoot)                                   \
   V(MapEnumLength)                              \
   V(MathAbs)                                    \
-  V(MathCos)                                    \
   V(MathExp)                                    \
   V(MathFloor)                                  \
   V(MathFloorOfDiv)                             \
@@ -141,9 +140,7 @@
   V(MathMinMax)                                 \
   V(MathPowHalf)                                \
   V(MathRound)                                  \
-  V(MathSin)                                    \
   V(MathSqrt)                                   \
-  V(MathTan)                                    \
   V(ModI)                                       \
   V(MulI)                                       \
   V(NumberTagD)                                 \
@@ -792,42 +789,6 @@
 };
 
 
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathSin(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathCos(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathTan(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
-};
-
-
 class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LMathExp(LOperand* value,
@@ -2787,9 +2748,6 @@
   LInstruction* DoMathRound(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
-  LInstruction* DoMathSin(HUnaryMathOperation* instr);
-  LInstruction* DoMathCos(HUnaryMathOperation* instr);
-  LInstruction* DoMathTan(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index f4f3b2b..88923c3 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -647,8 +647,7 @@
  public:
   CallInterceptorCompiler(CallStubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name,
-                          ExtraICState extra_state)
+                          Register name)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
         name_(name) {}
@@ -2589,7 +2588,7 @@
   // Get the receiver from the stack.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), ecx, extra_state());
+  CallInterceptorCompiler compiler(this, arguments(), ecx);
   compiler.Compile(masm(), object, holder, name, &lookup, edx, ebx, edi, eax,
                    &miss);
 
diff --git a/src/ic.cc b/src/ic.cc
index 8a25504..25f875a 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -440,9 +440,6 @@
 
 
 void IC::PostPatching(Address address, Code* target, Code* old_target) {
-  if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
-    return;
-  }
   Isolate* isolate = target->GetHeap()->isolate();
   Code* host = isolate->
       inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
@@ -465,10 +462,8 @@
         TypeFeedbackInfo::cast(host->type_feedback_info());
     info->change_own_type_change_checksum();
   }
-  if (FLAG_watch_ic_patching) {
-    host->set_profiler_ticks(0);
-    isolate->runtime_profiler()->NotifyICChanged();
-  }
+  host->set_profiler_ticks(0);
+  isolate->runtime_profiler()->NotifyICChanged();
   // TODO(2029): When an optimized function is patched, it would
   // be nice to propagate the corresponding type information to its
   // unoptimized version for the benefit of later inlining.
diff --git a/src/isolate.cc b/src/isolate.cc
index 25bc546..a0dd958 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1182,7 +1182,7 @@
         fatal_exception_depth++;
         PrintF(stderr,
                "%s\n\nFROM\n",
-               *MessageHandler::GetLocalizedMessage(this, message_obj));
+               MessageHandler::GetLocalizedMessage(this, message_obj).get());
         PrintCurrentStackTrace(stderr);
         OS::Abort();
       }
@@ -1197,13 +1197,13 @@
       if (exception->IsString() && location->script()->name()->IsString()) {
         OS::PrintError(
             "Extension or internal compilation error: %s in %s at line %d.\n",
-            *String::cast(exception)->ToCString(),
-            *String::cast(location->script()->name())->ToCString(),
+            String::cast(exception)->ToCString().get(),
+            String::cast(location->script()->name())->ToCString().get(),
             line_number + 1);
       } else if (location->script()->name()->IsString()) {
         OS::PrintError(
             "Extension or internal compilation error in %s at line %d.\n",
-            *String::cast(location->script()->name())->ToCString(),
+            String::cast(location->script()->name())->ToCString().get(),
             line_number + 1);
       } else {
         OS::PrintError("Extension or internal compilation error.\n");
@@ -1688,7 +1688,6 @@
     bootstrapper_->TearDown();
 
     if (runtime_profiler_ != NULL) {
-      runtime_profiler_->TearDown();
       delete runtime_profiler_;
       runtime_profiler_ = NULL;
     }
@@ -2049,7 +2048,6 @@
   if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
 
   runtime_profiler_ = new RuntimeProfiler(this);
-  runtime_profiler_->SetUp();
 
   // If we are deserializing, log non-function code objects and compiled
   // functions found in the snapshot.
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 1f3f2a1..edd2eac 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -645,8 +645,8 @@
 #if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
   if (FLAG_trace_regexp_bytecodes) {
     String* pattern = regexp->Pattern();
-    PrintF("\n\nRegexp match:   /%s/\n\n", *(pattern->ToCString()));
-    PrintF("\n\nSubject string: '%s'\n\n", *(subject->ToCString()));
+    PrintF("\n\nRegexp match:   /%s/\n\n", pattern->ToCString().get());
+    PrintF("\n\nSubject string: '%s'\n\n", subject->ToCString().get());
   }
 #endif
   int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
@@ -1151,7 +1151,7 @@
 #ifdef DEBUG
   if (FLAG_print_code) {
     CodeTracer::Scope trace_scope(heap->isolate()->GetCodeTracer());
-    Handle<Code>::cast(code)->Disassemble(*pattern->ToCString(),
+    Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(),
                                           trace_scope.file());
   }
   if (FLAG_trace_regexp_assembler) {
@@ -4374,7 +4374,7 @@
   stream()->Add("\"];\n");
   Visit(node);
   stream()->Add("}\n");
-  printf("%s", *(stream()->ToCString()));
+  printf("%s", stream()->ToCString().get());
 }
 
 
@@ -4669,7 +4669,7 @@
   StringStream stream(&alloc);
   DispatchTableDumper dumper(&stream);
   tree()->ForEach(&dumper);
-  OS::PrintError("%s", *stream.ToCString());
+  OS::PrintError("%s", stream.ToCString().get());
 }
 
 
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 29c3194..48fa862 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -1369,7 +1369,7 @@
           ASSERT(chunk_->info()->IsOptimizing());
           AllowHandleDereference allow_deref;
           PrintF("Function: %s\n",
-                 *chunk_->info()->function()->debug_name()->ToCString());
+                 chunk_->info()->function()->debug_name()->ToCString().get());
         }
         PrintF("Value %d used before first definition!\n", operand_index);
         LiveRange* range = LiveRangeFor(operand_index);
diff --git a/src/log.cc b/src/log.cc
index a508e87..95362c0 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1041,7 +1041,7 @@
   if (key->IsString()) {
     SmartArrayPointer<char> str =
         String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    ApiEvent("api,check-security,\"%s\"\n", *str);
+    ApiEvent("api,check-security,\"%s\"\n", str.get());
   } else if (key->IsSymbol()) {
     Symbol* symbol = Symbol::cast(key);
     if (symbol->name()->IsUndefined()) {
@@ -1051,7 +1051,7 @@
       SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
           DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
       ApiEvent("api,check-security,symbol(\"%s\" hash %x)\n",
-               *str,
+               str.get(),
                Symbol::cast(key)->Hash());
     }
   } else if (key->IsUndefined()) {
@@ -1253,17 +1253,18 @@
   if (name->IsString()) {
     SmartArrayPointer<char> property_name =
         String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name);
+    ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, class_name.get(),
+             property_name.get());
   } else {
     Symbol* symbol = Symbol::cast(name);
     uint32_t hash = symbol->Hash();
     if (symbol->name()->IsUndefined()) {
-      ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, *class_name, hash);
+      ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, class_name.get(), hash);
     } else {
       SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
           DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
       ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)\n",
-               tag, *class_name, *str, hash);
+               tag, class_name.get(), str.get(), hash);
     }
   }
 }
@@ -1275,7 +1276,7 @@
   String* class_name_obj = holder->class_name();
   SmartArrayPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index);
+  ApiEvent("api,%s,\"%s\",%u\n", tag, class_name.get(), index);
 }
 
 
@@ -1284,7 +1285,7 @@
   String* class_name_obj = object->class_name();
   SmartArrayPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  ApiEvent("api,%s,\"%s\"\n", tag, *class_name);
+  ApiEvent("api,%s,\"%s\"\n", tag, class_name.get());
 }
 
 
@@ -1332,7 +1333,7 @@
   if (name->IsString()) {
     SmartArrayPointer<char> str =
         String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    msg.Append(",1,\"%s%s\"", prefix, *str);
+    msg.Append(",1,\"%s%s\"", prefix, str.get());
   } else {
     Symbol* symbol = Symbol::cast(name);
     if (symbol->name()->IsUndefined()) {
@@ -1340,7 +1341,8 @@
     } else {
       SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
           DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-      msg.Append(",1,symbol(\"%s\" hash %x)", prefix, *str, symbol->Hash());
+      msg.Append(",1,symbol(\"%s\" hash %x)", prefix, str.get(),
+                 symbol->Hash());
     }
   }
   msg.Append('\n');
@@ -1439,7 +1441,7 @@
   if (name->IsString()) {
     SmartArrayPointer<char> str =
         String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    msg.Append("\"%s\"", *str);
+    msg.Append("\"%s\"", str.get());
   } else {
     msg.AppendSymbolName(Symbol::cast(name));
   }
@@ -1470,11 +1472,11 @@
   AppendCodeCreateHeader(&msg, tag, code);
   SmartArrayPointer<char> name =
       shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  msg.Append("\"%s ", *name);
+  msg.Append("\"%s ", name.get());
   if (source->IsString()) {
     SmartArrayPointer<char> sourcestr =
        String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    msg.Append("%s", *sourcestr);
+    msg.Append("%s", sourcestr.get());
   } else {
     msg.AppendSymbolName(Symbol::cast(source));
   }
@@ -2064,7 +2066,7 @@
 
   SmartArrayPointer<const char> log_file_name =
       PrepareLogFileName(isolate, FLAG_logfile);
-  log_->Initialize(*log_file_name);
+  log_->Initialize(log_file_name.get());
 
 
   if (FLAG_perf_basic_prof) {
@@ -2078,7 +2080,7 @@
   }
 
   if (FLAG_ll_prof) {
-    ll_logger_ = new LowLevelLogger(*log_file_name);
+    ll_logger_ = new LowLevelLogger(log_file_name.get());
     addCodeEventListener(ll_logger_);
   }
 
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 0e6b980..7da765d 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -2422,11 +2422,6 @@
     }
   }
 
-  if (!FLAG_watch_ic_patching) {
-    // Clean up dead objects from the runtime profiler.
-    heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
-  }
-
   if (FLAG_track_gc_object_stats) {
     heap()->CheckpointObjectStats();
   }
@@ -3514,12 +3509,6 @@
   heap_->UpdateReferencesInExternalStringTable(
       &UpdateReferenceInExternalStringTableEntry);
 
-  if (!FLAG_watch_ic_patching) {
-    // Update JSFunction pointers from the runtime profiler.
-    heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
-        &updating_visitor);
-  }
-
   EvacuationWeakObjectRetainer evacuation_object_retainer;
   heap()->ProcessWeakReferences(&evacuation_object_retainer);
 
diff --git a/src/messages.cc b/src/messages.cc
index 9eae67a..3f4484a 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -43,15 +43,15 @@
                                           Handle<Object> message_obj) {
   SmartArrayPointer<char> str = GetLocalizedMessage(isolate, message_obj);
   if (loc == NULL) {
-    PrintF("%s\n", *str);
+    PrintF("%s\n", str.get());
   } else {
     HandleScope scope(isolate);
     Handle<Object> data(loc->script()->name(), isolate);
     SmartArrayPointer<char> data_str;
     if (data->IsString())
       data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
-    PrintF("%s:%i: %s\n", *data_str ? *data_str : "<unknown>",
-           loc->start_pos(), *str);
+    PrintF("%s:%i: %s\n", data_str.get() ? data_str.get() : "<unknown>",
+           loc->start_pos(), str.get());
   }
 }
 
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 9aed3bd..f551dd5 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -260,6 +260,12 @@
 }
 
 
+MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
+                       OffsetAddend offset_addend) : Operand(rm) {
+  offset_ = unit * multiplier + offset_addend;
+}
+
+
 // -----------------------------------------------------------------------------
 // Specific instructions, constants, and masks.
 
@@ -1623,6 +1629,15 @@
 }
 
 
+void Assembler::pref(int32_t hint, const MemOperand& rs) {
+  ASSERT(kArchVariant != kLoongson);
+  ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
+  Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
+      | (rs.offset_);
+  emit(instr);
+}
+
+
 //--------Coprocessor-instructions----------------
 
 // Load, store, move.
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index d9ef46c..70f77ea 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -386,7 +386,15 @@
 // Class MemOperand represents a memory operand in load and store instructions.
 class MemOperand : public Operand {
  public:
+  // Immediate value attached to offset.
+  enum OffsetAddend {
+    offset_minus_one = -1,
+    offset_zero = 0
+  };
+
   explicit MemOperand(Register rn, int32_t offset = 0);
+  explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
+                      OffsetAddend offset_addend = offset_zero);
   int32_t offset() const { return offset_; }
 
   bool OffsetIsInt16Encodable() const {
@@ -711,6 +719,11 @@
   void swr(Register rd, const MemOperand& rs);
 
 
+  //----------------Prefetch--------------------
+
+  void pref(int32_t hint, const MemOperand& rs);
+
+
   //-------------Misc-instructions--------------
 
   // Break / Trap instructions.
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 19f3cdf..40cc99f 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -969,18 +969,9 @@
   __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    // Lookup and calculate pc offset.
-    __ lw(a1, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
-    __ lw(a2, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
-    __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
-    __ Subu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
-    __ Subu(a1, a1, a2);
-    __ SmiTag(a1);
-
-    // Pass both function and pc offset as arguments.
+    // Pass function as argument.
     __ push(a0);
-    __ push(a1);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
   }
 
   // If the code object is null, just return to the unoptimized code.
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 4c3708c..cd1af91 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -1467,20 +1467,6 @@
   AllowExternalCallThatCantCauseGC scope(masm);
   Isolate* isolate = masm->isolate();
   switch (type_) {
-    case TranscendentalCache::SIN:
-      __ CallCFunction(
-          ExternalReference::math_sin_double_function(isolate),
-          0, 1);
-      break;
-    case TranscendentalCache::COS:
-      __ CallCFunction(
-          ExternalReference::math_cos_double_function(isolate),
-          0, 1);
-      break;
-    case TranscendentalCache::TAN:
-      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
-          0, 1);
-      break;
     case TranscendentalCache::LOG:
       __ CallCFunction(
           ExternalReference::math_log_double_function(isolate),
@@ -1497,9 +1483,6 @@
 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
   switch (type_) {
     // Add more cases when necessary.
-    case TranscendentalCache::SIN: return Runtime::kMath_sin;
-    case TranscendentalCache::COS: return Runtime::kMath_cos;
-    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 3a87c5a..cd3a24f 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -39,9 +39,6 @@
 
 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
   switch (type) {
-    case TranscendentalCache::SIN: return &sin;
-    case TranscendentalCache::COS: return &cos;
-    case TranscendentalCache::TAN: return &tan;
     case TranscendentalCache::LOG: return &log;
     default: UNIMPLEMENTED();
   }
@@ -113,6 +110,430 @@
 }
 
 
+#if defined(V8_HOST_ARCH_MIPS)
+OS::MemCopyUint8Function CreateMemCopyUint8Function(
+      OS::MemCopyUint8Function stub) {
+#if defined(USE_SIMULATOR)
+  return stub;
+#else
+  if (Serializer::enabled()) {
+     return stub;
+  }
+
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
+  if (buffer == NULL) return stub;
+
+  // This code assumes that cache lines are 32 bytes and if the cache line is
+  // larger it will not work correctly.
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+  {
+    Label lastb, unaligned, aligned, chkw,
+          loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
+          leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
+          ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
+
+    // The size of each prefetch.
+    uint32_t pref_chunk = 32;
+    // The maximum size of a prefetch, it must not be less then pref_chunk.
+    // If the real size of a prefetch is greater then max_pref_size and
+    // the kPrefHintPrepareForStore hint is used, the code will not work
+    // correctly.
+    uint32_t max_pref_size = 128;
+    ASSERT(pref_chunk < max_pref_size);
+
+    // pref_limit is set based on the fact that we never use an offset
+    // greater then 5 on a store pref and that a single pref can
+    // never be larger then max_pref_size.
+    uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
+    int32_t pref_hint_load = kPrefHintLoadStreamed;
+    int32_t pref_hint_store = kPrefHintPrepareForStore;
+    uint32_t loadstore_chunk = 4;
+
+    // The initial prefetches may fetch bytes that are before the buffer being
+    // copied. Start copies with an offset of 4 so avoid this situation when
+    // using kPrefHintPrepareForStore.
+    ASSERT(pref_hint_store != kPrefHintPrepareForStore ||
+           pref_chunk * 4 >= max_pref_size);
+
+    // If the size is less than 8, go to lastb. Regardless of size,
+    // copy dst pointer to v0 for the retuen value.
+    __ slti(t2, a2, 2 * loadstore_chunk);
+    __ bne(t2, zero_reg, &lastb);
+    __ mov(v0, a0);  // In delay slot.
+
+    // If src and dst have different alignments, go to unaligned, if they
+    // have the same alignment (but are not actually aligned) do a partial
+    // load/store to make them aligned. If they are both already aligned
+    // we can start copying at aligned.
+    __ xor_(t8, a1, a0);
+    __ andi(t8, t8, loadstore_chunk - 1);  // t8 is a0/a1 word-displacement.
+    __ bne(t8, zero_reg, &unaligned);
+    __ subu(a3, zero_reg, a0);  // In delay slot.
+
+    __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
+    __ beq(a3, zero_reg, &aligned);  // Already aligned.
+    __ subu(a2, a2, a3);  // In delay slot. a2 is the remining bytes count.
+
+    __ lwr(t8, MemOperand(a1));
+    __ addu(a1, a1, a3);
+    __ swr(t8, MemOperand(a0));
+    __ addu(a0, a0, a3);
+
+    // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
+    // count how many bytes we have to copy after all the 64 byte chunks are
+    // copied and a3 to the dst pointer after all the 64 byte chunks have been
+    // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
+    __ bind(&aligned);
+    __ andi(t8, a2, 0x3f);
+    __ beq(a2, t8, &chkw);  // Less than 64?
+    __ subu(a3, a2, t8);  // In delay slot.
+    __ addu(a3, a0, a3);  // Now a3 is the final dst after loop.
+
+    // When in the loop we prefetch with kPrefHintPrepareForStore hint,
+    // in this case the a0+x should be past the "t0-32" address. This means:
+    // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
+    // x=64 the last "safe" a0 address is "t0-96". In the current version we
+    // will use "pref hint, 128(a0)", so "t0-160" is the limit.
+    if (pref_hint_store == kPrefHintPrepareForStore) {
+      __ addu(t0, a0, a2);  // t0 is the "past the end" address.
+      __ Subu(t9, t0, pref_limit);  // t9 is the "last safe pref" address.
+    }
+
+    __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+
+    if (pref_hint_store != kPrefHintPrepareForStore) {
+      __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+    }
+    __ bind(&loop16w);
+    __ lw(t0, MemOperand(a1));
+
+    if (pref_hint_store == kPrefHintPrepareForStore) {
+      __ sltu(v1, t9, a0);  // If a0 > t9, don't use next prefetch.
+      __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
+    }
+    __ lw(t1, MemOperand(a1, 1, loadstore_chunk));  // Maybe in delay slot.
+
+    __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+    __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+    __ bind(&skip_pref);
+    __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
+    __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
+    __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
+    __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
+    __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
+    __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+
+    __ sw(t0, MemOperand(a0));
+    __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+    __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+    __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+    __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+    __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+
+    __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
+    __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
+    __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
+    __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
+    __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
+    __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
+    __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
+    __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+
+    __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
+    __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
+    __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
+    __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
+    __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
+    __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
+    __ addiu(a0, a0, 16 * loadstore_chunk);
+    __ bne(a0, a3, &loop16w);
+    __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
+    __ mov(a2, t8);
+
+    // Here we have src and dest word-aligned but less than 64-bytes to go.
+    // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
+    // down to chk1w to handle the tail end of the copy.
+    __ bind(&chkw);
+    __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+    __ andi(t8, a2, 0x1f);
+    __ beq(a2, t8, &chk1w);  // Less than 32?
+    __ nop();  // In delay slot.
+    __ lw(t0, MemOperand(a1));
+    __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
+    __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
+    __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
+    __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
+    __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
+    __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
+    __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
+    __ addiu(a1, a1, 8 * loadstore_chunk);
+    __ sw(t0, MemOperand(a0));
+    __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+    __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+    __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+    __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+    __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+    __ addiu(a0, a0, 8 * loadstore_chunk);
+
+    // Here we have less than 32 bytes to copy. Set up for a loop to copy
+    // one word at a time. Set a2 to count how many bytes we have to copy
+    // after all the word chunks are copied and a3 to the dst pointer after
+    // all the word chunks have been copied. We will loop, incrementing a0
+    // and a1 untill a0 equals a3.
+    __ bind(&chk1w);
+    __ andi(a2, t8, loadstore_chunk - 1);
+    __ beq(a2, t8, &lastb);
+    __ subu(a3, t8, a2);  // In delay slot.
+    __ addu(a3, a0, a3);
+
+    __ bind(&wordCopy_loop);
+    __ lw(t3, MemOperand(a1));
+    __ addiu(a0, a0, loadstore_chunk);
+    __ addiu(a1, a1, loadstore_chunk);
+    __ bne(a0, a3, &wordCopy_loop);
+    __ sw(t3, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
+
+    __ bind(&lastb);
+    __ Branch(&leave, le, a2, Operand(zero_reg));
+    __ addu(a3, a0, a2);
+
+    __ bind(&lastbloop);
+    __ lb(v1, MemOperand(a1));
+    __ addiu(a0, a0, 1);
+    __ addiu(a1, a1, 1);
+    __ bne(a0, a3, &lastbloop);
+    __ sb(v1, MemOperand(a0, -1));  // In delay slot.
+
+    __ bind(&leave);
+    __ jr(ra);
+    __ nop();
+
+    // Unaligned case. Only the dst gets aligned so we need to do partial
+    // loads of the source followed by normal stores to the dst (once we
+    // have aligned the destination).
+    __ bind(&unaligned);
+    __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
+    __ beq(a3, zero_reg, &ua_chk16w);
+    __ subu(a2, a2, a3);  // In delay slot.
+
+    __ lwr(v1, MemOperand(a1));
+    __ lwl(v1,
+           MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    __ addu(a1, a1, a3);
+    __ swr(v1, MemOperand(a0));
+    __ addu(a0, a0, a3);
+
+    // Now the dst (but not the source) is aligned. Set a2 to count how many
+    // bytes we have to copy after all the 64 byte chunks are copied and a3 to
+    // the dst pointer after all the 64 byte chunks have been copied. We will
+    // loop, incrementing a0 and a1 until a0 equals a3.
+    __ bind(&ua_chk16w);
+    __ andi(t8, a2, 0x3f);
+    __ beq(a2, t8, &ua_chkw);
+    __ subu(a3, a2, t8);  // In delay slot.
+    __ addu(a3, a0, a3);
+
+    if (pref_hint_store == kPrefHintPrepareForStore) {
+      __ addu(t0, a0, a2);
+      __ Subu(t9, t0, pref_limit);
+    }
+
+    __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+
+    if (pref_hint_store != kPrefHintPrepareForStore) {
+      __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+    }
+
+    __ bind(&ua_loop16w);
+    __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+    __ lwr(t0, MemOperand(a1));
+    __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+    __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+
+    if (pref_hint_store == kPrefHintPrepareForStore) {
+      __ sltu(v1, t9, a0);
+      __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+    }
+    __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
+
+    __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+    __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+    __ bind(&ua_skip_pref);
+    __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+    __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+    __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+    __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+    __ lwl(t0,
+           MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t1,
+           MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t2,
+           MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t3,
+           MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t4,
+           MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t5,
+           MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t6,
+           MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t7,
+           MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+    __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+    __ sw(t0, MemOperand(a0));
+    __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+    __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+    __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+    __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+    __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+    __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
+    __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
+    __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
+    __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
+    __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
+    __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
+    __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
+    __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
+    __ lwl(t0,
+           MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t1,
+           MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t2,
+           MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t3,
+           MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t4,
+           MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t5,
+           MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t6,
+           MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t7,
+           MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+    __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+    __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
+    __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
+    __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
+    __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
+    __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
+    __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
+    __ addiu(a0, a0, 16 * loadstore_chunk);
+    __ bne(a0, a3, &ua_loop16w);
+    __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
+    __ mov(a2, t8);
+
+    // Here less than 64-bytes. Check for
+    // a 32 byte chunk and copy if there is one. Otherwise jump down to
+    // ua_chk1w to handle the tail end of the copy.
+    __ bind(&ua_chkw);
+    __ Pref(pref_hint_load, MemOperand(a1));
+    __ andi(t8, a2, 0x1f);
+
+    __ beq(a2, t8, &ua_chk1w);
+    __ nop();  // In delay slot.
+    __ lwr(t0, MemOperand(a1));
+    __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
+    __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
+    __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
+    __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
+    __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
+    __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
+    __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
+    __ lwl(t0,
+           MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t1,
+           MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t2,
+           MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t3,
+           MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t4,
+           MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t5,
+           MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t6,
+           MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t7,
+           MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+    __ addiu(a1, a1, 8 * loadstore_chunk);
+    __ sw(t0, MemOperand(a0));
+    __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
+    __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
+    __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
+    __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
+    __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
+    __ addiu(a0, a0, 8 * loadstore_chunk);
+
+    // Less than 32 bytes to copy. Set up for a loop to
+    // copy one word at a time.
+    __ bind(&ua_chk1w);
+    __ andi(a2, t8, loadstore_chunk - 1);
+    __ beq(a2, t8, &ua_smallCopy);
+    __ subu(a3, t8, a2);  // In delay slot.
+    __ addu(a3, a0, a3);
+
+    __ bind(&ua_wordCopy_loop);
+    __ lwr(v1, MemOperand(a1));
+    __ lwl(v1,
+           MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    __ addiu(a0, a0, loadstore_chunk);
+    __ addiu(a1, a1, loadstore_chunk);
+    __ bne(a0, a3, &ua_wordCopy_loop);
+    __ sw(v1, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
+
+    // Copy the last 8 bytes.
+    __ bind(&ua_smallCopy);
+    __ beq(a2, zero_reg, &leave);
+    __ addu(a3, a0, a2);  // In delay slot.
+
+    __ bind(&ua_smallCopy_loop);
+    __ lb(v1, MemOperand(a1));
+    __ addiu(a0, a0, 1);
+    __ addiu(a1, a1, 1);
+    __ bne(a0, a3, &ua_smallCopy_loop);
+    __ sb(v1, MemOperand(a0, -1));  // In delay slot.
+
+    __ jr(ra);
+    __ nop();
+  }
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+  CPU::FlushICache(buffer, actual_size);
+  OS::ProtectCode(buffer, actual_size);
+  return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
+#endif
+}
+#endif
+
 #undef __
 
 
@@ -603,9 +1024,10 @@
   __ bind(&done);
 }
 
-
+#ifdef DEBUG
 // nop(CODE_AGE_MARKER_NOP)
 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
+#endif
 
 static byte* GetNoCodeAgeSequence(uint32_t* length) {
   // The sequence of instructions that is patched out for aging code is the
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index 5a0870f..dcf8b82 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -124,6 +124,16 @@
 
 const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
 
+// 'pref' instruction hints
+const int32_t kPrefHintLoad = 0;
+const int32_t kPrefHintStore = 1;
+const int32_t kPrefHintLoadStreamed = 4;
+const int32_t kPrefHintStoreStreamed = 5;
+const int32_t kPrefHintLoadRetained = 6;
+const int32_t kPrefHintStoreRetained = 7;
+const int32_t kPrefHintWritebackInvalidate = 25;
+const int32_t kPrefHintPrepareForStore = 30;
+
 // Helper functions for converting between register numbers and names.
 class Registers {
  public:
@@ -297,6 +307,8 @@
   LWC1      =   ((6 << 3) + 1) << kOpcodeShift,
   LDC1      =   ((6 << 3) + 5) << kOpcodeShift,
 
+  PREF      =   ((6 << 3) + 3) << kOpcodeShift,
+
   SWC1      =   ((7 << 3) + 1) << kOpcodeShift,
   SDC1      =   ((7 << 3) + 5) << kOpcodeShift,
 
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index 691df94..1ae0340 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -899,6 +899,9 @@
     case LWR:
       Format(instr, "lwr     'rt, 'imm16s('rs)");
       break;
+    case PREF:
+      Format(instr, "pref    'rt, 'imm16s('rs)");
+      break;
     case SB:
       Format(instr, "sb      'rt, 'imm16s('rs)");
       break;
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 3ce2ab5..74b3807 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -341,10 +341,6 @@
 
 void FullCodeGenerator::EmitProfilingCounterReset() {
   int reset_value = FLAG_interrupt_budget;
-  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
-    // Self-optimization is a one-off thing: if it fails, don't try again.
-    reset_value = Smi::kMaxValue;
-  }
   if (isolate()->IsDebuggerActive()) {
     // Detect debug break requests as soon as possible.
     reset_value = FLAG_interrupt_budget >> 4;
@@ -365,13 +361,10 @@
   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   Comment cmnt(masm_, "[ Back edge bookkeeping");
   Label ok;
-  int weight = 1;
-  if (FLAG_weighted_back_edges) {
-    ASSERT(back_edge_target->is_bound());
-    int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
-    weight = Min(kMaxBackEdgeWeight,
-                 Max(1, distance / kCodeSizeMultiplier));
-  }
+  ASSERT(back_edge_target->is_bound());
+  int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+  int weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kCodeSizeMultiplier));
   EmitProfilingCounterDecrement(weight);
   __ slt(at, a3, zero_reg);
   __ beq(at, zero_reg, &ok);
@@ -404,32 +397,24 @@
       __ push(v0);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
-    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
-      // Pretend that the exit is a backwards jump to the entry.
-      int weight = 1;
-      if (info_->ShouldSelfOptimize()) {
-        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-      } else if (FLAG_weighted_back_edges) {
-        int distance = masm_->pc_offset();
-        weight = Min(kMaxBackEdgeWeight,
-                     Max(1, distance / kCodeSizeMultiplier));
-      }
-      EmitProfilingCounterDecrement(weight);
-      Label ok;
-      __ Branch(&ok, ge, a3, Operand(zero_reg));
-      __ push(v0);
-      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
-        __ lw(a2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-        __ push(a2);
-        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
-      } else {
-        __ Call(isolate()->builtins()->InterruptCheck(),
-                RelocInfo::CODE_TARGET);
-      }
-      __ pop(v0);
-      EmitProfilingCounterReset();
-      __ bind(&ok);
+    // Pretend that the exit is a backwards jump to the entry.
+    int weight = 1;
+    if (info_->ShouldSelfOptimize()) {
+      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+    } else {
+      int distance = masm_->pc_offset();
+      weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kCodeSizeMultiplier));
     }
+    EmitProfilingCounterDecrement(weight);
+    Label ok;
+    __ Branch(&ok, ge, a3, Operand(zero_reg));
+    __ push(v0);
+    __ Call(isolate()->builtins()->InterruptCheck(),
+            RelocInfo::CODE_TARGET);
+    __ pop(v0);
+    EmitProfilingCounterReset();
+    __ bind(&ok);
 
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 26f446c..b4d9441 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -3874,39 +3874,6 @@
 }
 
 
-void LCodeGen::DoMathTan(LMathTan* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(f4));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ mov(cp, zero_reg);
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(f4));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ mov(cp, zero_reg);
-  TranscendentalCacheStub stub(TranscendentalCache::COS,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(f4));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ mov(cp, zero_reg);
-  TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   ASSERT(ToRegister(instr->context()).is(cp));
   ASSERT(ToRegister(instr->function()).is(a1));
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index a441ba5..1ca008c 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -261,7 +261,7 @@
   stream->Add("if typeof ");
   value()->PrintTo(stream);
   stream->Add(" == \"%s\" then B%d else B%d",
-              *hydrogen()->type_literal()->ToCString(),
+              hydrogen()->type_literal()->ToCString().get(),
               true_block_id(), false_block_id());
 }
 
@@ -314,13 +314,13 @@
 
 void LCallNamed::PrintDataTo(StringStream* stream) {
   SmartArrayPointer<char> name_string = name()->ToCString();
-  stream->Add("%s #%d / ", *name_string, arity());
+  stream->Add("%s #%d / ", name_string.get(), arity());
 }
 
 
 void LCallGlobal::PrintDataTo(StringStream* stream) {
   SmartArrayPointer<char> name_string = name()->ToCString();
-  stream->Add("%s #%d / ", *name_string, arity());
+  stream->Add("%s #%d / ", name_string.get(), arity());
 }
 
 
@@ -365,7 +365,7 @@
 void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(".");
-  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(String::cast(*name())->ToCString().get());
   stream->Add(" <- ");
   value()->PrintTo(stream);
 }
@@ -1190,9 +1190,6 @@
     case kMathRound: return DoMathRound(instr);
     case kMathAbs: return DoMathAbs(instr);
     case kMathLog: return DoMathLog(instr);
-    case kMathSin: return DoMathSin(instr);
-    case kMathCos: return DoMathCos(instr);
-    case kMathTan: return DoMathTan(instr);
     case kMathExp: return DoMathExp(instr);
     case kMathSqrt: return DoMathSqrt(instr);
     case kMathPowHalf: return DoMathPowHalf(instr);
@@ -1210,27 +1207,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), f4);
-  LMathSin* result = new(zone()) LMathSin(input);
-  return MarkAsCall(DefineFixedDouble(result, f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), f4);
-  LMathCos* result = new(zone()) LMathCos(input);
-  return MarkAsCall(DefineFixedDouble(result, f4), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), f4);
-  LMathTan* result = new(zone()) LMathTan(input);
-  return MarkAsCall(DefineFixedDouble(result, f4), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   ASSERT(instr->representation().IsDouble());
   ASSERT(instr->value()->representation().IsDouble());
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index dbb78ea..99885dc 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -131,7 +131,6 @@
   V(LoadNamedGeneric)                           \
   V(MapEnumLength)                              \
   V(MathAbs)                                    \
-  V(MathCos)                                    \
   V(MathExp)                                    \
   V(MathFloor)                                  \
   V(MathFloorOfDiv)                             \
@@ -139,9 +138,7 @@
   V(MathMinMax)                                 \
   V(MathPowHalf)                                \
   V(MathRound)                                  \
-  V(MathSin)                                    \
   V(MathSqrt)                                   \
-  V(MathTan)                                    \
   V(ModI)                                       \
   V(MulI)                                       \
   V(MultiplyAddD)                               \
@@ -811,42 +808,6 @@
 };
 
 
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathSin(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathCos(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathTan(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
-};
-
-
 class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
  public:
   LMathExp(LOperand* value,
@@ -2752,9 +2713,6 @@
   LInstruction* DoMathRound(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
-  LInstruction* DoMathSin(HUnaryMathOperation* instr);
-  LInstruction* DoMathCos(HUnaryMathOperation* instr);
-  LInstruction* DoMathTan(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index f33e6fa..bc981cb 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -789,8 +789,29 @@
 }
 
 
+void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
+  if (kArchVariant == kLoongson) {
+    lw(zero_reg, rs);
+  } else {
+    pref(hint, rs);
+  }
+}
+
+
 //------------Pseudo-instructions-------------
 
+void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
+  lwr(rd, rs);
+  lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
+  swr(rd, rs);
+  swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
   AllowDeferredHandleDereference smi_check;
   if (value->IsSmi()) {
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 4e30c35..f1b82ed 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -601,12 +601,17 @@
 #undef DEFINE_INSTRUCTION
 #undef DEFINE_INSTRUCTION2
 
+  void Pref(int32_t hint, const MemOperand& rs);
+
 
   // ---------------------------------------------------------------------------
   // Pseudo-instructions.
 
   void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
 
+  void Ulw(Register rd, const MemOperand& rs);
+  void Usw(Register rd, const MemOperand& rs);
+
   // Load int32 in the rd register.
   void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
   inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 3d4617a..0db010e 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -899,12 +899,12 @@
   __ sw(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize));
   // Write receiver to stack frame.
   int index = stack_space - 1;
-  __ sw(receiver, MemOperand(sp, index * kPointerSize));
+  __ sw(receiver, MemOperand(sp, index-- * kPointerSize));
   // Write the arguments to stack frame.
   for (int i = 0; i < argc; i++) {
     ASSERT(!receiver.is(values[i]));
     ASSERT(!scratch.is(values[i]));
-    __ sw(receiver, MemOperand(sp, index-- * kPointerSize));
+    __ sw(values[i], MemOperand(sp, index-- * kPointerSize));
   }
 
   GenerateFastApiDirectCall(masm, optimization, argc, true);
@@ -915,12 +915,10 @@
  public:
   CallInterceptorCompiler(CallStubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name,
-                          ExtraICState extra_ic_state)
+                          Register name)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
-        name_(name),
-        extra_ic_state_(extra_ic_state) {}
+        name_(name) {}
 
   void Compile(MacroAssembler* masm,
                Handle<JSObject> object,
@@ -1095,7 +1093,6 @@
   CallStubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
-  ExtraICState extra_ic_state_;
 };
 
 
@@ -2463,7 +2460,7 @@
   // Get the receiver from the stack.
   __ lw(a1, MemOperand(sp, argc * kPointerSize));
 
-  CallInterceptorCompiler compiler(this, arguments(), a2, extra_state());
+  CallInterceptorCompiler compiler(this, arguments(), a2);
   compiler.Compile(masm(), object, holder, name, &lookup, a1, a3, t0, a0,
                    &miss);
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 381c9aa..1da9852 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -903,7 +903,7 @@
         source->ToCString(DISALLOW_NULLS,
                           FAST_STRING_TRAVERSAL,
                           start, length, NULL);
-    PrintF(out, "%s", *source_string);
+    PrintF(out, "%s", source_string.get());
   }
   // Script files are often large, hard to read.
   // PrintF(out, "\n - script =");
diff --git a/src/objects.cc b/src/objects.cc
index f96fd83..43a047e 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1683,7 +1683,7 @@
       SmartArrayPointer<char> debug_name =
           shared->DebugName()->ToCString();
       if (debug_name[0] != 0) {
-        accumulator->Add("<SharedFunctionInfo %s>", *debug_name);
+        accumulator->Add("<SharedFunctionInfo %s>", debug_name.get());
       } else {
         accumulator->Add("<SharedFunctionInfo>");
       }
@@ -9913,7 +9913,7 @@
 
 void JSFunction::PrintName(FILE* out) {
   SmartArrayPointer<char> name = shared()->DebugName()->ToCString();
-  PrintF(out, "%s", *name);
+  PrintF(out, "%s", name.get());
 }
 
 
diff --git a/src/objects.h b/src/objects.h
index bbdefc1..f09d58f 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -6494,12 +6494,6 @@
   V(Math, ceil, MathCeil)                           \
   V(Math, abs, MathAbs)                             \
   V(Math, log, MathLog)                             \
-  V(Math, sin, MathSin)                             \
-  V(Math, cos, MathCos)                             \
-  V(Math, tan, MathTan)                             \
-  V(Math, asin, MathASin)                           \
-  V(Math, acos, MathACos)                           \
-  V(Math, atan, MathATan)                           \
   V(Math, exp, MathExp)                             \
   V(Math, sqrt, MathSqrt)                           \
   V(Math, pow, MathPow)                             \
diff --git a/src/parser.cc b/src/parser.cc
index b168919..bfb5bd1 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -600,7 +600,7 @@
     } else if (info()->script()->name()->IsString()) {
       String* name = String::cast(info()->script()->name());
       SmartArrayPointer<char> name_chars = name->ToCString();
-      PrintF("[parsing script: %s", *name_chars);
+      PrintF("[parsing script: %s", name_chars.get());
     } else {
       PrintF("[parsing script");
     }
@@ -729,7 +729,7 @@
   if (FLAG_trace_parse && result != NULL) {
     double ms = timer.Elapsed().InMillisecondsF();
     SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
-    PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
+    PrintF("[parsing function: %s - took %0.3f ms]\n", name_chars.get(), ms);
   }
   return result;
 }
@@ -1521,7 +1521,7 @@
         // In harmony mode we treat re-declarations as early errors. See
         // ES5 16 for a definition of early errors.
         SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
-        const char* elms[2] = { "Variable", *c_string };
+        const char* elms[2] = { "Variable", c_string.get() };
         Vector<const char*> args(elms, 2);
         ReportMessage("redeclaration", args);
         *ok = false;
@@ -2145,7 +2145,7 @@
     // make later anyway so we should go back and fix this then.
     if (ContainsLabel(labels, label) || TargetStackContainsLabel(label)) {
       SmartArrayPointer<char> c_string = label->ToCString(DISALLOW_NULLS);
-      const char* elms[2] = { "Label", *c_string };
+      const char* elms[2] = { "Label", c_string.get() };
       Vector<const char*> args(elms, 2);
       ReportMessage("redeclaration", args);
       *ok = false;
@@ -3512,7 +3512,7 @@
 
 void Parser::ReportInvalidPreparseData(Handle<String> name, bool* ok) {
   SmartArrayPointer<char> name_string = name->ToCString(DISALLOW_NULLS);
-  const char* element[1] = { *name_string };
+  const char* element[1] = { name_string.get() };
   ReportMessage("invalid_preparser_data",
                 Vector<const char*>(element, 1));
   *ok = false;
@@ -4601,7 +4601,7 @@
     // errors. See ES5 16 for a definition of early errors.
     Handle<String> name = decl->proxy()->name();
     SmartArrayPointer<char> c_string = name->ToCString(DISALLOW_NULLS);
-    const char* elms[2] = { "Variable", *c_string };
+    const char* elms[2] = { "Variable", c_string.get() };
     Vector<const char*> args(elms, 2);
     int position = decl->proxy()->position();
     Scanner::Location location = position == RelocInfo::kNoPosition
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 879dcc8..0070bdf 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -302,9 +302,6 @@
   return (*fast_##name##_function)(x);                   \
 }
 
-UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
-UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
-UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
 UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
 UNARY_MATH_FUNCTION(exp, CreateExpFunction())
 UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
@@ -506,6 +503,12 @@
     OS::MemCopyUint8Function stub);
 OS::MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
     OS::MemCopyUint16Uint8Function stub);
+
+#elif defined(V8_HOST_ARCH_MIPS)
+OS::MemCopyUint8Function OS::memcopy_uint8_function = &OS::MemCopyUint8Wrapper;
+// Defined in codegen-mips.cc.
+OS::MemCopyUint8Function CreateMemCopyUint8Function(
+    OS::MemCopyUint8Function stub);
 #endif
 
 
@@ -520,10 +523,10 @@
       CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
   OS::memcopy_uint16_uint8_function =
       CreateMemCopyUint16Uint8Function(&OS::MemCopyUint16Uint8Wrapper);
+#elif defined(V8_HOST_ARCH_MIPS)
+  OS::memcopy_uint8_function =
+      CreateMemCopyUint8Function(&OS::MemCopyUint8Wrapper);
 #endif
-  init_fast_sin_function();
-  init_fast_cos_function();
-  init_fast_tan_function();
   init_fast_log_function();
   // fast_exp is initialized lazily.
   init_fast_sqrt_function();
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index ea11806..7ca5e55 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -196,9 +196,6 @@
   return (*fast_##name##_function)(x);                   \
 }
 
-UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
-UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
-UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
 UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
 UNARY_MATH_FUNCTION(exp, CreateExpFunction())
 UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
@@ -217,9 +214,6 @@
 #ifdef _WIN64
   init_modulo_function();
 #endif
-  init_fast_sin_function();
-  init_fast_cos_function();
-  init_fast_tan_function();
   init_fast_log_function();
   // fast_exp is initialized lazily.
   init_fast_sqrt_function();
diff --git a/src/platform.h b/src/platform.h
index 3bd87a9..f94a526 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -96,9 +96,6 @@
 double modulo(double x, double y);
 
 // Custom implementation of math functions.
-double fast_sin(double input);
-double fast_cos(double input);
-double fast_tan(double input);
 double fast_log(double input);
 double fast_exp(double input);
 double fast_sqrt(double input);
@@ -368,6 +365,26 @@
                                  size_t size) {
     (*memcopy_uint16_uint8_function)(dest, src, size);
   }
+#elif defined(V8_HOST_ARCH_MIPS)
+  typedef void (*MemCopyUint8Function)(uint8_t* dest,
+                                       const uint8_t* src,
+                                       size_t size);
+  static MemCopyUint8Function memcopy_uint8_function;
+  static void MemCopyUint8Wrapper(uint8_t* dest,
+                                  const uint8_t* src,
+                                  size_t chars) {
+    memcpy(dest, src, chars);
+  }
+  // For values < 16, the assembler function is slower than the inlined C code.
+  static const int kMinComplexMemCopy = 16;
+  static void MemCopy(void* dest, const void* src, size_t size) {
+    (*memcopy_uint8_function)(reinterpret_cast<uint8_t*>(dest),
+                              reinterpret_cast<const uint8_t*>(src),
+                              size);
+  }
+  static void MemMove(void* dest, const void* src, size_t size) {
+    memmove(dest, src, size);
+  }
 #else
   // Copy memory area to disjoint memory area.
   static void MemCopy(void* dest, const void* src, size_t size) {
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index 1ce1fa4..c446b4b 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -427,7 +427,7 @@
 
 
 Handle<HeapObject> RegExpMacroAssemblerTracer::GetCode(Handle<String> source) {
-  PrintF(" GetCode(%s);\n", *(source->ToCString()));
+  PrintF(" GetCode(%s);\n", source->ToCString().get());
   return assembler_->GetCode(source);
 }
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 691fc66..8c4b11f 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -45,24 +45,6 @@
 namespace internal {
 
 
-// Optimization sampler constants.
-static const int kSamplerFrameCount = 2;
-
-// Constants for statistical profiler.
-static const int kSamplerFrameWeight[kSamplerFrameCount] = { 2, 1 };
-
-static const int kSamplerTicksBetweenThresholdAdjustment = 32;
-
-static const int kSamplerThresholdInit = 3;
-static const int kSamplerThresholdMin = 1;
-static const int kSamplerThresholdDelta = 1;
-
-static const int kSamplerThresholdSizeFactorInit = 3;
-
-static const int kSizeLimit = 1500;
-
-// Constants for counter based profiler.
-
 // Number of times a function has to be seen on the stack before it is
 // optimized.
 static const int kProfilerTicksBeforeOptimization = 2;
@@ -94,14 +76,7 @@
 
 RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
     : isolate_(isolate),
-      sampler_threshold_(kSamplerThresholdInit),
-      sampler_threshold_size_factor_(kSamplerThresholdSizeFactorInit),
-      sampler_ticks_until_threshold_adjustment_(
-          kSamplerTicksBetweenThresholdAdjustment),
-      sampler_window_position_(0),
-      any_ic_changed_(false),
-      code_generated_(false) {
-  ClearSampleBuffer();
+      any_ic_changed_(false) {
 }
 
 
@@ -189,38 +164,6 @@
 }
 
 
-void RuntimeProfiler::ClearSampleBuffer() {
-  memset(sampler_window_, 0, sizeof(sampler_window_));
-  memset(sampler_window_weight_, 0, sizeof(sampler_window_weight_));
-}
-
-
-int RuntimeProfiler::LookupSample(JSFunction* function) {
-  int weight = 0;
-  for (int i = 0; i < kSamplerWindowSize; i++) {
-    Object* sample = sampler_window_[i];
-    if (sample != NULL) {
-      bool fits = FLAG_lookup_sample_by_shared
-          ? (function->shared() == JSFunction::cast(sample)->shared())
-          : (function == JSFunction::cast(sample));
-      if (fits) {
-        weight += sampler_window_weight_[i];
-      }
-    }
-  }
-  return weight;
-}
-
-
-void RuntimeProfiler::AddSample(JSFunction* function, int weight) {
-  ASSERT(IsPowerOf2(kSamplerWindowSize));
-  sampler_window_[sampler_window_position_] = function;
-  sampler_window_weight_[sampler_window_position_] = weight;
-  sampler_window_position_ = (sampler_window_position_ + 1) &
-      (kSamplerWindowSize - 1);
-}
-
-
 void RuntimeProfiler::OptimizeNow() {
   HandleScope scope(isolate_);
 
@@ -231,34 +174,14 @@
   // Run through the JavaScript frames and collect them. If we already
   // have a sample of the function, we mark it for optimizations
   // (eagerly or lazily).
-  JSFunction* samples[kSamplerFrameCount];
-  int sample_count = 0;
   int frame_count = 0;
-  int frame_count_limit = FLAG_watch_ic_patching ? FLAG_frame_count
-                                                 : kSamplerFrameCount;
+  int frame_count_limit = FLAG_frame_count;
   for (JavaScriptFrameIterator it(isolate_);
        frame_count++ < frame_count_limit && !it.done();
        it.Advance()) {
     JavaScriptFrame* frame = it.frame();
     JSFunction* function = frame->function();
 
-    if (!FLAG_watch_ic_patching) {
-      // Adjust threshold each time we have processed
-      // a certain number of ticks.
-      if (sampler_ticks_until_threshold_adjustment_ > 0) {
-        sampler_ticks_until_threshold_adjustment_--;
-        if (sampler_ticks_until_threshold_adjustment_ <= 0) {
-          // If the threshold is not already at the minimum
-          // modify and reset the ticks until next adjustment.
-          if (sampler_threshold_ > kSamplerThresholdMin) {
-            sampler_threshold_ -= kSamplerThresholdDelta;
-            sampler_ticks_until_threshold_adjustment_ =
-                kSamplerTicksBetweenThresholdAdjustment;
-          }
-        }
-      }
-    }
-
     SharedFunctionInfo* shared = function->shared();
     Code* shared_code = shared->code();
 
@@ -322,116 +245,36 @@
     }
     if (!function->IsOptimizable()) continue;
 
-    if (FLAG_watch_ic_patching) {
-      int ticks = shared_code->profiler_ticks();
+    int ticks = shared_code->profiler_ticks();
 
-      if (ticks >= kProfilerTicksBeforeOptimization) {
-        int typeinfo, total, percentage;
-        GetICCounts(shared_code, &typeinfo, &total, &percentage);
-        if (percentage >= FLAG_type_info_threshold) {
-          // If this particular function hasn't had any ICs patched for enough
-          // ticks, optimize it now.
-          Optimize(function, "hot and stable");
-        } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
-          Optimize(function, "not much type info but very hot");
-        } else {
-          shared_code->set_profiler_ticks(ticks + 1);
-          if (FLAG_trace_opt_verbose) {
-            PrintF("[not yet optimizing ");
-            function->PrintName();
-            PrintF(", not enough type info: %d/%d (%d%%)]\n",
-                   typeinfo, total, percentage);
-          }
-        }
-      } else if (!any_ic_changed_ &&
-                 shared_code->instruction_size() < kMaxSizeEarlyOpt) {
-        // If no IC was patched since the last tick and this function is very
-        // small, optimistically optimize it now.
-        Optimize(function, "small function");
+    if (ticks >= kProfilerTicksBeforeOptimization) {
+      int typeinfo, total, percentage;
+      GetICCounts(shared_code, &typeinfo, &total, &percentage);
+      if (percentage >= FLAG_type_info_threshold) {
+        // If this particular function hasn't had any ICs patched for enough
+        // ticks, optimize it now.
+        Optimize(function, "hot and stable");
+      } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
+        Optimize(function, "not much type info but very hot");
       } else {
         shared_code->set_profiler_ticks(ticks + 1);
+        if (FLAG_trace_opt_verbose) {
+          PrintF("[not yet optimizing ");
+          function->PrintName();
+          PrintF(", not enough type info: %d/%d (%d%%)]\n",
+                 typeinfo, total, percentage);
+        }
       }
-    } else {  // !FLAG_watch_ic_patching
-      samples[sample_count++] = function;
-
-      int function_size = function->shared()->SourceSize();
-      int threshold_size_factor = (function_size > kSizeLimit)
-          ? sampler_threshold_size_factor_
-          : 1;
-
-      int threshold = sampler_threshold_ * threshold_size_factor;
-
-      if (LookupSample(function) >= threshold) {
-        Optimize(function, "sampler window lookup");
-      }
+    } else if (!any_ic_changed_ &&
+               shared_code->instruction_size() < kMaxSizeEarlyOpt) {
+      // If no IC was patched since the last tick and this function is very
+      // small, optimistically optimize it now.
+      Optimize(function, "small function");
+    } else {
+      shared_code->set_profiler_ticks(ticks + 1);
     }
   }
-  if (FLAG_watch_ic_patching) {
-    any_ic_changed_ = false;
-  } else {  // !FLAG_watch_ic_patching
-    // Add the collected functions as samples. It's important not to do
-    // this as part of collecting them because this will interfere with
-    // the sample lookup in case of recursive functions.
-    for (int i = 0; i < sample_count; i++) {
-      AddSample(samples[i], kSamplerFrameWeight[i]);
-    }
-  }
-}
-
-
-void RuntimeProfiler::SetUp() {
-  if (!FLAG_watch_ic_patching) {
-    ClearSampleBuffer();
-  }
-}
-
-
-void RuntimeProfiler::Reset() {
-  if (!FLAG_watch_ic_patching) {
-    sampler_threshold_ = kSamplerThresholdInit;
-    sampler_threshold_size_factor_ = kSamplerThresholdSizeFactorInit;
-    sampler_ticks_until_threshold_adjustment_ =
-        kSamplerTicksBetweenThresholdAdjustment;
-  }
-}
-
-
-void RuntimeProfiler::TearDown() {
-  // Nothing to do.
-}
-
-
-// Update the pointers in the sampler window after a GC.
-void RuntimeProfiler::UpdateSamplesAfterScavenge() {
-  for (int i = 0; i < kSamplerWindowSize; i++) {
-    Object* function = sampler_window_[i];
-    if (function != NULL && isolate_->heap()->InNewSpace(function)) {
-      MapWord map_word = HeapObject::cast(function)->map_word();
-      if (map_word.IsForwardingAddress()) {
-        sampler_window_[i] = map_word.ToForwardingAddress();
-      } else {
-        sampler_window_[i] = NULL;
-      }
-    }
-  }
-}
-
-
-void RuntimeProfiler::RemoveDeadSamples() {
-  for (int i = 0; i < kSamplerWindowSize; i++) {
-    Object* function = sampler_window_[i];
-    if (function != NULL &&
-        !Marking::MarkBitFrom(HeapObject::cast(function)).Get()) {
-      sampler_window_[i] = NULL;
-    }
-  }
-}
-
-
-void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
-  for (int i = 0; i < kSamplerWindowSize; i++) {
-    visitor->VisitPointer(&sampler_window_[i]);
-  }
+  any_ic_changed_ = false;
 }
 
 
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index 28d6d32..efd9b50 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -45,47 +45,18 @@
 
   void OptimizeNow();
 
-  void SetUp();
-  void Reset();
-  void TearDown();
-
   void NotifyICChanged() { any_ic_changed_ = true; }
 
-  // Rate limiting support.
-
-  void UpdateSamplesAfterScavenge();
-  void RemoveDeadSamples();
-  void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
-
   void AttemptOnStackReplacement(JSFunction* function);
 
  private:
-  static const int kSamplerWindowSize = 16;
-
   void Optimize(JSFunction* function, const char* reason);
 
-  void ClearSampleBuffer();
-
-  void ClearSampleBufferNewSpaceEntries();
-
-  int LookupSample(JSFunction* function);
-
-  void AddSample(JSFunction* function, int weight);
-
   bool CodeSizeOKForOSR(Code* shared_code);
 
   Isolate* isolate_;
 
-  int sampler_threshold_;
-  int sampler_threshold_size_factor_;
-  int sampler_ticks_until_threshold_adjustment_;
-
-  Object* sampler_window_[kSamplerWindowSize];
-  int sampler_window_position_;
-  int sampler_window_weight_[kSamplerWindowSize];
-
   bool any_ic_changed_;
-  bool code_generated_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/runtime.cc b/src/runtime.cc
index 7dc96f2..e763d43 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -7655,7 +7655,7 @@
   isolate->counters()->math_acos()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return isolate->transcendental_cache()->Get(TranscendentalCache::ACOS, x);
+  return isolate->heap()->AllocateHeapNumber(acos(x));
 }
 
 
@@ -7665,7 +7665,7 @@
   isolate->counters()->math_asin()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return isolate->transcendental_cache()->Get(TranscendentalCache::ASIN, x);
+  return isolate->heap()->AllocateHeapNumber(asin(x));
 }
 
 
@@ -7675,7 +7675,7 @@
   isolate->counters()->math_atan()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return isolate->transcendental_cache()->Get(TranscendentalCache::ATAN, x);
+  return isolate->heap()->AllocateHeapNumber(atan(x));
 }
 
 
@@ -7705,16 +7705,6 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
-  SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
-  isolate->counters()->math_cos()->Increment();
-
-  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return isolate->transcendental_cache()->Get(TranscendentalCache::COS, x);
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
   SealHandleScope shs(isolate);
   ASSERT(args.length() == 1);
@@ -7742,7 +7732,7 @@
   isolate->counters()->math_log()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
+  return isolate->heap()->AllocateHeapNumber(fast_log(x));
 }
 
 
@@ -7831,16 +7821,6 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
-  SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
-  isolate->counters()->math_sin()->Increment();
-
-  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return isolate->transcendental_cache()->Get(TranscendentalCache::SIN, x);
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
   SealHandleScope shs(isolate);
   ASSERT(args.length() == 1);
@@ -7851,16 +7831,6 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
-  SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
-  isolate->counters()->math_tan()->Increment();
-
-  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return isolate->transcendental_cache()->Get(TranscendentalCache::TAN, x);
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
   SealHandleScope shs(isolate);
   ASSERT(args.length() == 2);
@@ -8234,7 +8204,7 @@
   bool exception = false;
   Handle<Object> result =
       Execution::New(Handle<JSFunction>::cast(bound_function),
-                     total_argc, *param_data, &exception);
+                     total_argc, param_data.get(), &exception);
   if (exception) {
     return Failure::Exception();
   }
@@ -8407,10 +8377,11 @@
     function->ReplaceCode(function->shared()->code());
     return isolate->heap()->undefined_value();
   }
-  function->shared()->code()->set_profiler_ticks(0);
+  Handle<Code> shared_code(function->shared()->code());
+  shared_code->set_profiler_ticks(0);
   ASSERT(isolate->concurrent_recompilation_enabled());
-  if (!Compiler::RecompileConcurrent(function)) {
-    function->ReplaceCode(function->shared()->code());
+  if (!Compiler::RecompileConcurrent(function, shared_code)) {
+    function->ReplaceCode(*shared_code);
   }
   return isolate->heap()->undefined_value();
 }
@@ -8660,20 +8631,27 @@
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  ASSERT(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  CONVERT_NUMBER_CHECKED(uint32_t, pc_offset, Uint32, args[1]);
   Handle<Code> unoptimized(function->shared()->code(), isolate);
 
-#ifdef DEBUG
+  // Passing the PC in the javascript frame from the caller directly is
+  // not GC safe, so we walk the stack to get it.
   JavaScriptFrameIterator it(isolate);
   JavaScriptFrame* frame = it.frame();
+  if (!unoptimized->contains(frame->pc())) {
+    // Code on the stack may not be the code object referenced by the shared
+    // function info.  It may have been replaced to include deoptimization data.
+    unoptimized = Handle<Code>(frame->LookupCode());
+  }
+
+  uint32_t pc_offset = static_cast<uint32_t>(frame->pc() -
+                                             unoptimized->instruction_start());
+
+#ifdef DEBUG
   ASSERT_EQ(frame->function(), *function);
   ASSERT_EQ(frame->LookupCode(), *unoptimized);
   ASSERT(unoptimized->contains(frame->pc()));
-
-  ASSERT(pc_offset ==
-         static_cast<uint32_t>(frame->pc() - unoptimized->instruction_start()));
 #endif  // DEBUG
 
   // We're not prepared to handle a function with arguments object.
@@ -8699,12 +8677,12 @@
 
     if (job == NULL) {
       if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
-          Compiler::RecompileConcurrent(function, pc_offset)) {
+          Compiler::RecompileConcurrent(function, unoptimized, pc_offset)) {
         if (function->IsMarkedForLazyRecompilation() ||
             function->IsMarkedForConcurrentRecompilation()) {
           // Prevent regular recompilation if we queue this for OSR.
           // TODO(yangguo): remove this as soon as OSR becomes one-shot.
-          function->ReplaceCode(*unoptimized);
+          function->ReplaceCode(function->shared()->code());
         }
         return NULL;
       }
@@ -13560,7 +13538,7 @@
   CONVERT_ARG_CHECKED(String, arg, 0);
   SmartArrayPointer<char> flags =
       arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  FlagList::SetFlagsFromString(*flags, StrLength(*flags));
+  FlagList::SetFlagsFromString(flags.get(), StrLength(flags.get()));
   return isolate->heap()->undefined_value();
 }
 
@@ -14335,7 +14313,7 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
-  OS::PrintError("abort: %s\n", *message->ToCString());
+  OS::PrintError("abort: %s\n", message->ToCString().get());
   isolate->PrintStack(stderr);
   OS::Abort();
   UNREACHABLE();
diff --git a/src/runtime.h b/src/runtime.h
index 0f920c7..95fb419 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -101,7 +101,7 @@
   F(GetOptimizationStatus, -1, 1) \
   F(GetOptimizationCount, 1, 1) \
   F(UnblockConcurrentRecompilation, 0, 1) \
-  F(CompileForOnStackReplacement, 2, 1) \
+  F(CompileForOnStackReplacement, 1, 1) \
   F(SetAllocationTimeout, 2, 1) \
   F(AllocateInNewSpace, 1, 1) \
   F(AllocateInTargetSpace, 2, 1) \
@@ -180,16 +180,13 @@
   F(Math_asin, 1, 1) \
   F(Math_atan, 1, 1) \
   F(Math_atan2, 2, 1) \
-  F(Math_cos, 1, 1) \
   F(Math_exp, 1, 1) \
   F(Math_floor, 1, 1) \
   F(Math_log, 1, 1) \
   F(Math_pow, 2, 1) \
   F(Math_pow_cfunction, 2, 1) \
   F(RoundNumber, 1, 1) \
-  F(Math_sin, 1, 1) \
   F(Math_sqrt, 1, 1) \
-  F(Math_tan, 1, 1) \
   \
   /* Regular expressions */ \
   F(RegExpCompile, 3, 1) \
diff --git a/src/scanner-character-streams.cc b/src/scanner-character-streams.cc
index fb50345..cbef3f9 100644
--- a/src/scanner-character-streams.cc
+++ b/src/scanner-character-streams.cc
@@ -213,11 +213,11 @@
 
 
 static const byte kUtf8MultiByteMask = 0xC0;
-static const byte kUtf8MultiByteCharStart = 0xC0;
 static const byte kUtf8MultiByteCharFollower = 0x80;
 
 
 #ifdef DEBUG
+static const byte kUtf8MultiByteCharStart = 0xC0;
 static bool IsUtf8MultiCharacterStart(byte first_byte) {
   return (first_byte & kUtf8MultiByteMask) == kUtf8MultiByteCharStart;
 }
diff --git a/src/scopes.cc b/src/scopes.cc
index fefc696..a84d4e7 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -800,7 +800,7 @@
 
 static void PrintName(Handle<String> name) {
   SmartArrayPointer<char> s = name->ToCString(DISALLOW_NULLS);
-  PrintF("%s", *s);
+  PrintF("%s", s.get());
 }
 
 
diff --git a/src/smart-pointers.h b/src/smart-pointers.h
index 7c35b2a..7203c16 100644
--- a/src/smart-pointers.h
+++ b/src/smart-pointers.h
@@ -36,35 +36,31 @@
 class SmartPointerBase {
  public:
   // Default constructor. Constructs an empty scoped pointer.
-  inline SmartPointerBase() : p_(NULL) {}
+  SmartPointerBase() : p_(NULL) {}
 
   // Constructs a scoped pointer from a plain one.
-  explicit inline SmartPointerBase(T* ptr) : p_(ptr) {}
+  explicit SmartPointerBase(T* ptr) : p_(ptr) {}
 
   // Copy constructor removes the pointer from the original to avoid double
   // freeing.
-  inline SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
+  SmartPointerBase(const SmartPointerBase<Deallocator, T>& rhs)
       : p_(rhs.p_) {
     const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
   }
 
-  // When the destructor of the scoped pointer is executed the plain pointer
-  // is deleted using DeleteArray.  This implies that you must allocate with
-  // NewArray.
-  inline ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
+  T* operator->() const { return p_; }
 
-  inline T* operator->() const { return p_; }
+  T& operator*() const { return *p_; }
 
-  // You can get the underlying pointer out with the * operator.
-  inline T* operator*() { return p_; }
+  T* get() const { return p_; }
 
   // You can use [n] to index as if it was a plain pointer.
-  inline T& operator[](size_t i) {
+  T& operator[](size_t i) {
     return p_[i];
   }
 
   // You can use [n] to index as if it was a plain pointer.
-  const inline T& operator[](size_t i) const {
+  const T& operator[](size_t i) const {
     return p_[i];
   }
 
@@ -76,13 +72,14 @@
   // If you want to take out the plain pointer and don't want it automatically
   // deleted then call Detach().  Afterwards, the smart pointer is empty
   // (NULL).
-  inline T* Detach() {
+  T* Detach() {
     T* temp = p_;
     p_ = NULL;
     return temp;
   }
 
-  inline void Reset(T* new_value) {
+  void Reset(T* new_value) {
+    ASSERT(p_ == NULL || p_ != new_value);
     if (p_) Deallocator::Delete(p_);
     p_ = new_value;
   }
@@ -90,7 +87,7 @@
   // Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
   // the copy constructor it removes the pointer in the original to avoid
   // double freeing.
-  inline SmartPointerBase<Deallocator, T>& operator=(
+  SmartPointerBase<Deallocator, T>& operator=(
       const SmartPointerBase<Deallocator, T>& rhs) {
     ASSERT(is_empty());
     T* tmp = rhs.p_;  // swap to handle self-assignment
@@ -99,7 +96,13 @@
     return *this;
   }
 
-  inline bool is_empty() { return p_ == NULL; }
+  bool is_empty() const { return p_ == NULL; }
+
+ protected:
+  // When the destructor of the scoped pointer is executed the plain pointer
+  // is deleted using DeleteArray.  This implies that you must allocate with
+  // NewArray.
+  ~SmartPointerBase() { if (p_) Deallocator::Delete(p_); }
 
  private:
   T* p_;
@@ -119,10 +122,10 @@
 template<typename T>
 class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
  public:
-  inline SmartArrayPointer() { }
-  explicit inline SmartArrayPointer(T* ptr)
+  SmartArrayPointer() { }
+  explicit SmartArrayPointer(T* ptr)
       : SmartPointerBase<ArrayDeallocator<T>, T>(ptr) { }
-  inline SmartArrayPointer(const SmartArrayPointer<T>& rhs)
+  SmartArrayPointer(const SmartArrayPointer<T>& rhs)
       : SmartPointerBase<ArrayDeallocator<T>, T>(rhs) { }
 };
 
@@ -138,10 +141,10 @@
 template<typename T>
 class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
  public:
-  inline SmartPointer() { }
-  explicit inline SmartPointer(T* ptr)
+  SmartPointer() { }
+  explicit SmartPointer(T* ptr)
       : SmartPointerBase<ObjectDeallocator<T>, T>(ptr) { }
-  inline SmartPointer(const SmartPointer<T>& rhs)
+  SmartPointer(const SmartPointer<T>& rhs)
       : SmartPointerBase<ObjectDeallocator<T>, T>(rhs) { }
 };
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index eec5baf..a562ace 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -1110,7 +1110,7 @@
 Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
                                             Handle<Name> name) {
   return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
-      ? GetCodeWithFlags(flags, *Handle<String>::cast(name)->ToCString())
+      ? GetCodeWithFlags(flags, Handle<String>::cast(name)->ToCString().get())
       : GetCodeWithFlags(flags, NULL);
 }
 
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 9178046..72f5650 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -242,15 +242,12 @@
   SC(math_asin, V8.MathAsin)                                          \
   SC(math_atan, V8.MathAtan)                                          \
   SC(math_atan2, V8.MathAtan2)                                        \
-  SC(math_cos, V8.MathCos)                                            \
   SC(math_exp, V8.MathExp)                                            \
   SC(math_floor, V8.MathFloor)                                        \
   SC(math_log, V8.MathLog)                                            \
   SC(math_pow, V8.MathPow)                                            \
   SC(math_round, V8.MathRound)                                        \
-  SC(math_sin, V8.MathSin)                                            \
   SC(math_sqrt, V8.MathSqrt)                                          \
-  SC(math_tan, V8.MathTan)                                            \
   SC(transcendental_cache_hit, V8.TranscendentalCacheHit)             \
   SC(transcendental_cache_miss, V8.TranscendentalCacheMiss)           \
   SC(stack_interrupts, V8.StackInterrupts)                            \
diff --git a/src/v8utils.h b/src/v8utils.h
index 02e57eb..058b153 100644
--- a/src/v8utils.h
+++ b/src/v8utils.h
@@ -266,6 +266,9 @@
 INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint8_t* src, int chars));
 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
+#elif defined(V8_HOST_ARCH_MIPS)
+INLINE(void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars));
+INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
 #endif
 
 // Copy from ASCII/16bit chars to ASCII/16bit chars.
@@ -421,6 +424,24 @@
       break;
   }
 }
+
+
+#elif defined(V8_HOST_ARCH_MIPS)
+void CopyCharsUnsigned(uint8_t* dest, const uint8_t* src, int chars) {
+  if (chars < OS::kMinComplexMemCopy) {
+    memcpy(dest, src, chars);
+  } else {
+    OS::MemCopy(dest, src, chars);
+  }
+}
+
+void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars) {
+  if (chars < OS::kMinComplexMemCopy) {
+    memcpy(dest, src, chars * sizeof(*dest));
+  } else {
+    OS::MemCopy(dest, src, chars * sizeof(*dest));
+  }
+}
 #endif
 
 
diff --git a/src/version.cc b/src/version.cc
index ef9c078..1dca166 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,8 +33,8 @@
 // NOTE these macros are used by some of the tool scripts and the build
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     23
-#define BUILD_NUMBER      18
+#define MINOR_VERSION     24
+#define BUILD_NUMBER      0
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index f4864f8..aef9164 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -1393,17 +1393,9 @@
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    // Lookup and calculate pc offset.
-    __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
-    __ movq(rbx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
-    __ subq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ subq(rdx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
-    __ Integer32ToSmi(rdx, rdx);
-
-    // Pass both function and pc offset as arguments.
+    // Pass function as argument.
     __ push(rax);
-    __ push(rdx);
-    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 2);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
   }
 
   Label skip;
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 0c9a0f2..2f65856 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -789,9 +789,6 @@
 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
   switch (type_) {
     // Add more cases when necessary.
-    case TranscendentalCache::SIN: return Runtime::kMath_sin;
-    case TranscendentalCache::COS: return Runtime::kMath_cos;
-    case TranscendentalCache::TAN: return Runtime::kMath_tan;
     case TranscendentalCache::LOG: return Runtime::kMath_log;
     default:
       UNIMPLEMENTED();
@@ -807,99 +804,10 @@
   // rbx: Bits of input double. Must be preserved.
   // rcx: Pointer to cache entry. Must be preserved.
   // st(0): Input double
-  Label done;
-  if (type == TranscendentalCache::SIN ||
-      type == TranscendentalCache::COS ||
-      type == TranscendentalCache::TAN) {
-    // Both fsin and fcos require arguments in the range +/-2^63 and
-    // return NaN for infinities and NaN. They can share all code except
-    // the actual fsin/fcos operation.
-    Label in_range;
-    // If argument is outside the range -2^63..2^63, fsin/cos doesn't
-    // work. We must reduce it to the appropriate range.
-    __ movq(rdi, rbx);
-    // Move exponent and sign bits to low bits.
-    __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
-    // Remove sign bit.
-    __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
-    int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
-    __ cmpl(rdi, Immediate(supported_exponent_limit));
-    __ j(below, &in_range);
-    // Check for infinity and NaN. Both return NaN for sin.
-    __ cmpl(rdi, Immediate(0x7ff));
-    Label non_nan_result;
-    __ j(not_equal, &non_nan_result, Label::kNear);
-    // Input is +/-Infinity or NaN. Result is NaN.
-    __ fstp(0);
-    // NaN is represented by 0x7ff8000000000000.
-    __ subq(rsp, Immediate(kPointerSize));
-    __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
-    __ movl(Operand(rsp, 0), Immediate(0x00000000));
-    __ fld_d(Operand(rsp, 0));
-    __ addq(rsp, Immediate(kPointerSize));
-    __ jmp(&done);
-
-    __ bind(&non_nan_result);
-
-    // Use fpmod to restrict argument to the range +/-2*PI.
-    __ movq(rdi, rax);  // Save rax before using fnstsw_ax.
-    __ fldpi();
-    __ fadd(0);
-    __ fld(1);
-    // FPU Stack: input, 2*pi, input.
-    {
-      Label no_exceptions;
-      __ fwait();
-      __ fnstsw_ax();
-      // Clear if Illegal Operand or Zero Division exceptions are set.
-      __ testl(rax, Immediate(5));  // #IO and #ZD flags of FPU status word.
-      __ j(zero, &no_exceptions);
-      __ fnclex();
-      __ bind(&no_exceptions);
-    }
-
-    // Compute st(0) % st(1)
-    {
-      Label partial_remainder_loop;
-      __ bind(&partial_remainder_loop);
-      __ fprem1();
-      __ fwait();
-      __ fnstsw_ax();
-      __ testl(rax, Immediate(0x400));  // Check C2 bit of FPU status word.
-      // If C2 is set, computation only has partial result. Loop to
-      // continue computation.
-      __ j(not_zero, &partial_remainder_loop);
-  }
-    // FPU Stack: input, 2*pi, input % 2*pi
-    __ fstp(2);
-    // FPU Stack: input % 2*pi, 2*pi,
-    __ fstp(0);
-    // FPU Stack: input % 2*pi
-    __ movq(rax, rdi);  // Restore rax, pointer to the new HeapNumber.
-    __ bind(&in_range);
-    switch (type) {
-      case TranscendentalCache::SIN:
-        __ fsin();
-        break;
-      case TranscendentalCache::COS:
-        __ fcos();
-        break;
-      case TranscendentalCache::TAN:
-        // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
-        // FP register stack.
-        __ fptan();
-        __ fstp(0);  // Pop FP register stack.
-        break;
-      default:
-        UNREACHABLE();
-    }
-    __ bind(&done);
-  } else {
-    ASSERT(type == TranscendentalCache::LOG);
-    __ fldln2();
-    __ fxch();
-    __ fyl2x();
-  }
+  ASSERT(type == TranscendentalCache::LOG);
+  __ fldln2();
+  __ fxch();
+  __ fyl2x();
 }
 
 
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index afe0e3b..96d930e 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -64,9 +64,6 @@
   if (buffer == NULL) {
     // Fallback to library function if function cannot be created.
     switch (type) {
-      case TranscendentalCache::SIN: return &sin;
-      case TranscendentalCache::COS: return &cos;
-      case TranscendentalCache::TAN: return &tan;
       case TranscendentalCache::LOG: return &log;
       default: UNIMPLEMENTED();
     }
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index e479368..473f548 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -310,10 +310,6 @@
 
 void FullCodeGenerator::EmitProfilingCounterReset() {
   int reset_value = FLAG_interrupt_budget;
-  if (info_->ShouldSelfOptimize() && !FLAG_retry_self_opt) {
-    // Self-optimization is a one-off thing; if it fails, don't try again.
-    reset_value = Smi::kMaxValue;
-  }
   __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
   __ Move(kScratchRegister, Smi::FromInt(reset_value));
   __ movq(FieldOperand(rbx, Cell::kValueOffset), kScratchRegister);
@@ -325,13 +321,10 @@
   Comment cmnt(masm_, "[ Back edge bookkeeping");
   Label ok;
 
-  int weight = 1;
-  if (FLAG_weighted_back_edges) {
-    ASSERT(back_edge_target->is_bound());
-    int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
-    weight = Min(kMaxBackEdgeWeight,
-                 Max(1, distance / kCodeSizeMultiplier));
-  }
+  ASSERT(back_edge_target->is_bound());
+  int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+  int weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kCodeSizeMultiplier));
   EmitProfilingCounterDecrement(weight);
   __ j(positive, &ok, Label::kNear);
   __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
@@ -362,31 +355,24 @@
       __ push(rax);
       __ CallRuntime(Runtime::kTraceExit, 1);
     }
-    if (FLAG_interrupt_at_exit || FLAG_self_optimization) {
-      // Pretend that the exit is a backwards jump to the entry.
-      int weight = 1;
-      if (info_->ShouldSelfOptimize()) {
-        weight = FLAG_interrupt_budget / FLAG_self_opt_count;
-      } else if (FLAG_weighted_back_edges) {
-        int distance = masm_->pc_offset();
-        weight = Min(kMaxBackEdgeWeight,
-                     Max(1, distance / kCodeSizeMultiplier));
-      }
-      EmitProfilingCounterDecrement(weight);
-      Label ok;
-      __ j(positive, &ok, Label::kNear);
-      __ push(rax);
-      if (info_->ShouldSelfOptimize() && FLAG_direct_self_opt) {
-        __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-        __ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
-      } else {
-        __ call(isolate()->builtins()->InterruptCheck(),
-                RelocInfo::CODE_TARGET);
-      }
-      __ pop(rax);
-      EmitProfilingCounterReset();
-      __ bind(&ok);
+    // Pretend that the exit is a backwards jump to the entry.
+    int weight = 1;
+    if (info_->ShouldSelfOptimize()) {
+      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+    } else {
+      int distance = masm_->pc_offset();
+      weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kCodeSizeMultiplier));
     }
+    EmitProfilingCounterDecrement(weight);
+    Label ok;
+    __ j(positive, &ok, Label::kNear);
+    __ push(rax);
+    __ call(isolate()->builtins()->InterruptCheck(),
+            RelocInfo::CODE_TARGET);
+    __ pop(rax);
+    EmitProfilingCounterReset();
+    __ bind(&ok);
 #ifdef DEBUG
     // Add a label for checking the size of the code used for returning.
     Label check_exit_codesize;
@@ -4840,9 +4826,11 @@
 
 static const byte kJnsInstruction = 0x79;
 static const byte kJnsOffset = 0x1d;
-static const byte kCallInstruction = 0xe8;
 static const byte kNopByteOne = 0x66;
 static const byte kNopByteTwo = 0x90;
+#ifdef DEBUG
+static const byte kCallInstruction = 0xe8;
+#endif
 
 
 void BackEdgeTable::PatchAt(Code* unoptimized_code,
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 3ab5236..7c9949a 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -3747,39 +3747,6 @@
 }
 
 
-void LCodeGen::DoMathTan(LMathTan* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ Set(rsi, 0);
-  TranscendentalCacheStub stub(TranscendentalCache::TAN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathCos(LMathCos* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ Set(rsi, 0);
-  TranscendentalCacheStub stub(TranscendentalCache::COS,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
-void LCodeGen::DoMathSin(LMathSin* instr) {
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
-  // Set the context register to a GC-safe fake value. Clobbering it is
-  // OK because this instruction is marked as a call.
-  __ Set(rsi, 0);
-  TranscendentalCacheStub stub(TranscendentalCache::SIN,
-                               TranscendentalCacheStub::UNTAGGED);
-  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
-}
-
-
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   ASSERT(ToRegister(instr->context()).is(rsi));
   ASSERT(ToRegister(instr->function()).is(rdi));
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 0f7ebc4..974a970 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -259,7 +259,7 @@
   stream->Add("if typeof ");
   value()->PrintTo(stream);
   stream->Add(" == \"%s\" then B%d else B%d",
-              *hydrogen()->type_literal()->ToCString(),
+              hydrogen()->type_literal()->ToCString().get(),
               true_block_id(), false_block_id());
 }
 
@@ -312,13 +312,13 @@
 
 void LCallNamed::PrintDataTo(StringStream* stream) {
   SmartArrayPointer<char> name_string = name()->ToCString();
-  stream->Add("%s #%d / ", *name_string, arity());
+  stream->Add("%s #%d / ", name_string.get(), arity());
 }
 
 
 void LCallGlobal::PrintDataTo(StringStream* stream) {
   SmartArrayPointer<char> name_string = name()->ToCString();
-  stream->Add("%s #%d / ", *name_string, arity());
+  stream->Add("%s #%d / ", name_string.get(), arity());
 }
 
 
@@ -384,7 +384,7 @@
 void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
   stream->Add(".");
-  stream->Add(*String::cast(*name())->ToCString());
+  stream->Add(String::cast(*name())->ToCString().get());
   stream->Add(" <- ");
   value()->PrintTo(stream);
 }
@@ -1187,9 +1187,6 @@
     case kMathRound: return DoMathRound(instr);
     case kMathAbs: return DoMathAbs(instr);
     case kMathLog: return DoMathLog(instr);
-    case kMathSin: return DoMathSin(instr);
-    case kMathCos: return DoMathCos(instr);
-    case kMathTan: return DoMathTan(instr);
     case kMathExp: return DoMathExp(instr);
     case kMathSqrt: return DoMathSqrt(instr);
     case kMathPowHalf: return DoMathPowHalf(instr);
@@ -1231,27 +1228,6 @@
 }
 
 
-LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), xmm1);
-  LMathSin* result = new(zone()) LMathSin(input);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), xmm1);
-  LMathCos* result = new(zone()) LMathCos(input);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
-LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
-  LOperand* input = UseFixedDouble(instr->value(), xmm1);
-  LMathTan* result = new(zone()) LMathTan(input);
-  return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
-}
-
-
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   ASSERT(instr->representation().IsDouble());
   ASSERT(instr->value()->representation().IsDouble());
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 44bd992..e644c2d 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -131,7 +131,6 @@
   V(LoadNamedGeneric)                           \
   V(MapEnumLength)                              \
   V(MathAbs)                                    \
-  V(MathCos)                                    \
   V(MathExp)                                    \
   V(MathFloor)                                  \
   V(MathFloorOfDiv)                             \
@@ -139,9 +138,7 @@
   V(MathMinMax)                                 \
   V(MathPowHalf)                                \
   V(MathRound)                                  \
-  V(MathSin)                                    \
   V(MathSqrt)                                   \
-  V(MathTan)                                    \
   V(ModI)                                       \
   V(MulI)                                       \
   V(NumberTagD)                                 \
@@ -770,42 +767,6 @@
 };
 
 
-class LMathSin V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathSin(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
-};
-
-
-class LMathCos V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathCos(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
-};
-
-
-class LMathTan V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
-  explicit LMathTan(LOperand* value) {
-    inputs_[0] = value;
-  }
-
-  LOperand* value() { return inputs_[0]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
-};
-
-
 class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
@@ -2705,9 +2666,6 @@
   LInstruction* DoMathRound(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
-  LInstruction* DoMathSin(HUnaryMathOperation* instr);
-  LInstruction* DoMathCos(HUnaryMathOperation* instr);
-  LInstruction* DoMathTan(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
   LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 9bf6e9f..9cfcf46 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -639,12 +639,10 @@
  public:
   CallInterceptorCompiler(CallStubCompiler* stub_compiler,
                           const ParameterCount& arguments,
-                          Register name,
-                          ExtraICState extra_ic_state)
+                          Register name)
       : stub_compiler_(stub_compiler),
         arguments_(arguments),
-        name_(name),
-        extra_ic_state_(extra_ic_state) {}
+        name_(name) {}
 
   void Compile(MacroAssembler* masm,
                Handle<JSObject> object,
@@ -820,7 +818,6 @@
   CallStubCompiler* stub_compiler_;
   const ParameterCount& arguments_;
   Register name_;
-  ExtraICState extra_ic_state_;
 };
 
 
@@ -2497,7 +2494,7 @@
   StackArgumentsAccessor args(rsp, arguments());
   __ movq(rdx, args.GetReceiverOperand());
 
-  CallInterceptorCompiler compiler(this, arguments(), rcx, extra_state());
+  CallInterceptorCompiler compiler(this, arguments(), rcx);
   compiler.Compile(masm(), object, holder, name, &lookup, rdx, rbx, rdi, rax,
                    &miss);
 
diff --git a/test/cctest/test-accessors.cc b/test/cctest/test-accessors.cc
index 142687b..2c257ee 100644
--- a/test/cctest/test-accessors.cc
+++ b/test/cctest/test-accessors.cc
@@ -205,21 +205,22 @@
     "var result = [];"
     "var key_0 = 'x0';"
     "var key_1 = 'x1';"
-    "for (var i = 0; i < 10; i++) {"
+    "for (var j = 0; j < 10; j++) {"
+    "  var i = 4*j;"
     "  holder.x0 = i;"
     "  result.push(obj.x0);"
-    "  holder.x1 = i;"
+    "  holder.x1 = i + 1;"
     "  result.push(obj.x1);"
-    "  holder[key_0] = i;"
+    "  holder[key_0] = i + 2;"
     "  result.push(obj[key_0]);"
-    "  holder[key_1] = i;"
+    "  holder[key_1] = i + 3;"
     "  result.push(obj[key_1]);"
     "}"
     "result"));
   CHECK_EQ(40, array->Length());
   for (int i = 0; i < 40; i++) {
     v8::Handle<Value> entry = array->Get(v8::Integer::New(i));
-    CHECK_EQ(v8::Integer::New(i/4), entry);
+    CHECK_EQ(v8::Integer::New(i), entry);
   }
 }
 
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index e8852e3..6cdc500 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -1195,14 +1195,14 @@
 template<typename T>
 Handle<Value> TestFastReturnValues() {
   LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
+  v8::EscapableHandleScope scope(env->GetIsolate());
   v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New();
   v8::FunctionCallback callback = &FastReturnValueCallback<T>;
   object_template->Set(env->GetIsolate(), "callback",
                        v8::FunctionTemplate::New(callback));
   v8::Local<v8::Object> object = object_template->NewInstance();
   (*env)->Global()->Set(v8_str("callback_object"), object);
-  return scope.Close(CompileRun("callback_object.callback()"));
+  return scope.Escape(CompileRun("callback_object.callback()"));
 }
 
 
@@ -4188,12 +4188,12 @@
 
 
 void HandleF(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  v8::HandleScope scope(args.GetIsolate());
+  v8::EscapableHandleScope scope(args.GetIsolate());
   ApiTestFuzzer::Fuzz();
   Local<v8::Array> result = v8::Array::New(args.GetIsolate(), args.Length());
   for (int i = 0; i < args.Length(); i++)
     result->Set(i, args[i]);
-  args.GetReturnValue().Set(scope.Close(result));
+  args.GetReturnValue().Set(scope.Escape(result));
 }
 
 
@@ -6454,9 +6454,12 @@
 template <typename T> static void USE(T) { }
 
 
-// This test is not intended to be run, just type checked.
-static inline void PersistentHandles(v8::Isolate* isolate) {
-  USE(PersistentHandles);
+// The point of this test is type checking. We run it only so compilers
+// don't complain about an unused function.
+TEST(PersistentHandles) {
+  LocalContext env;
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
   Local<String> str = v8_str("foo");
   v8::Persistent<String> p_str(isolate, str);
   p_str.Reset();
@@ -13236,10 +13239,10 @@
 
 
 static v8::Handle<Value> NestedScope(v8::Local<Context> env) {
-  v8::HandleScope inner(env->GetIsolate());
+  v8::EscapableHandleScope inner(env->GetIsolate());
   env->Enter();
-  v8::Handle<Value> three = v8_num(3);
-  v8::Handle<Value> value = inner.Close(three);
+  v8::Local<Value> three = v8_num(3);
+  v8::Local<Value> value = inner.Escape(three);
   env->Exit();
   return value;
 }
@@ -13865,10 +13868,10 @@
   v8::HandleScope outer(isolate);
   static v8::Persistent<v8::ObjectTemplate> templ;
   if (templ.IsEmpty()) {
-    v8::HandleScope inner(isolate);
-    v8::Handle<v8::ObjectTemplate> local = v8::ObjectTemplate::New();
+    v8::EscapableHandleScope inner(isolate);
+    v8::Local<v8::ObjectTemplate> local = v8::ObjectTemplate::New();
     local->SetInternalFieldCount(1);
-    templ.Reset(isolate, inner.Close(local));
+    templ.Reset(isolate, inner.Escape(local));
   }
   v8::Handle<v8::Object> result =
       v8::Local<v8::ObjectTemplate>::New(isolate, templ)->NewInstance();
@@ -18088,8 +18091,6 @@
 }
 
 
-static const int k0CacheSize = 16;
-
 THREADED_TEST(FillJSFunctionResultCache) {
   i::FLAG_allow_natives_syntax = true;
   LocalContext context;
@@ -18276,7 +18277,8 @@
   const int aligned_length = length*sizeof(uintptr_t)/sizeof(uint16_t);
   i::SmartArrayPointer<uintptr_t>
   aligned_contents(new uintptr_t[aligned_length]);
-  uint16_t* string_contents = reinterpret_cast<uint16_t*>(*aligned_contents);
+  uint16_t* string_contents =
+      reinterpret_cast<uint16_t*>(aligned_contents.get());
   // Set to contain only one byte.
   for (int i = 0; i < length-1; i++) {
     string_contents[i] = 0x41;
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index 9627277..9ddfde0 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -59,10 +59,6 @@
 }
 
 
-static inline i::Address ToAddress(int n) {
-  return reinterpret_cast<i::Address>(n);
-}
-
 static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
                                    i::Address frame1,
                                    i::Address frame2 = NULL,
@@ -147,7 +143,7 @@
   SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
           &generator, NULL, TimeDelta::FromMicroseconds(100)));
   processor->Start();
-  CpuProfiler profiler(isolate, profiles, &generator, *processor);
+  CpuProfiler profiler(isolate, profiles, &generator, processor.get());
 
   // Enqueue code creation events.
   const char* aaa_str = "aaa";
@@ -162,7 +158,7 @@
   profiler.CodeCreateEvent(i::Logger::STUB_TAG, args4_code, 4);
 
   // Enqueue a tick event to enable code events processing.
-  EnqueueTickSampleEvent(*processor, aaa_code->address());
+  EnqueueTickSampleEvent(processor.get(), aaa_code->address());
 
   processor->StopSynchronously();
 
@@ -209,19 +205,19 @@
   SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
           &generator, NULL, TimeDelta::FromMicroseconds(100)));
   processor->Start();
-  CpuProfiler profiler(isolate, profiles, &generator, *processor);
+  CpuProfiler profiler(isolate, profiles, &generator, processor.get());
 
   profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame1_code, "bbb");
   profiler.CodeCreateEvent(i::Logger::STUB_TAG, frame2_code, 5);
   profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, frame3_code, "ddd");
 
-  EnqueueTickSampleEvent(*processor, frame1_code->instruction_start());
+  EnqueueTickSampleEvent(processor.get(), frame1_code->instruction_start());
   EnqueueTickSampleEvent(
-      *processor,
+      processor.get(),
       frame2_code->instruction_start() + frame2_code->ExecutableSize() / 2,
       frame1_code->instruction_start() + frame2_code->ExecutableSize() / 2);
   EnqueueTickSampleEvent(
-      *processor,
+      processor.get(),
       frame3_code->instruction_end() - 1,
       frame2_code->instruction_end() - 1,
       frame1_code->instruction_end() - 1);
@@ -278,7 +274,7 @@
   SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
           &generator, NULL, TimeDelta::FromMicroseconds(100)));
   processor->Start();
-  CpuProfiler profiler(isolate, profiles, &generator, *processor);
+  CpuProfiler profiler(isolate, profiles, &generator, processor.get());
 
   profiler.CodeCreateEvent(i::Logger::BUILTIN_TAG, code, "bbb");
 
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 8c6f418..3cb8400 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -70,65 +70,6 @@
 // Size of temp buffer for formatting small strings.
 #define SMALL_STRING_BUFFER_SIZE 80
 
-// --- A d d i t i o n a l   C h e c k   H e l p e r s
-
-
-// Helper function used by the CHECK_EQ function when given Address
-// arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
-                                     const char* expected_source,
-                                     ::v8::internal::Address expected,
-                                     const char* value_source,
-                                     ::v8::internal::Address value) {
-  if (expected != value) {
-    V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n#   "
-                         "Expected: %i\n#   Found: %i",
-             expected_source, value_source, expected, value);
-  }
-}
-
-
-// Helper function used by the CHECK_NE function when given Address
-// arguments.  Should not be called directly.
-static inline void CheckNonEqualsHelper(const char* file, int line,
-                                        const char* unexpected_source,
-                                        ::v8::internal::Address unexpected,
-                                        const char* value_source,
-                                        ::v8::internal::Address value) {
-  if (unexpected == value) {
-    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %i",
-             unexpected_source, value_source, value);
-  }
-}
-
-
-// Helper function used by the CHECK function when given code
-// arguments.  Should not be called directly.
-static inline void CheckEqualsHelper(const char* file, int line,
-                                     const char* expected_source,
-                                     const Code* expected,
-                                     const char* value_source,
-                                     const Code* value) {
-  if (expected != value) {
-    V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n#   "
-                         "Expected: %p\n#   Found: %p",
-             expected_source, value_source, expected, value);
-  }
-}
-
-
-static inline void CheckNonEqualsHelper(const char* file, int line,
-                                        const char* expected_source,
-                                        const Code* expected,
-                                        const char* value_source,
-                                        const Code* value) {
-  if (expected == value) {
-    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %p",
-             expected_source, value_source, value);
-  }
-}
-
-
 // --- H e l p e r   C l a s s e s
 
 
@@ -6123,7 +6064,7 @@
 
   void Run();
   void WaitForListening() { listening_.Wait(); }
-  char* body() { return *body_; }
+  char* body() { return body_.get(); }
 
  private:
   int port_;
diff --git a/test/cctest/test-declarative-accessors.cc b/test/cctest/test-declarative-accessors.cc
index af3ffc3..aefd59d 100644
--- a/test/cctest/test-declarative-accessors.cc
+++ b/test/cctest/test-declarative-accessors.cc
@@ -177,7 +177,7 @@
   v8::Handle<v8::Value> expected = Convert(value, helper->isolate_);
   helper->array_->Reset();
   helper->array_->As<T*>()[index] = value;
-  VerifyRead(descriptor, internal_field, *helper->array_, expected);
+  VerifyRead(descriptor, internal_field, helper->array_.get(), expected);
 }
 
 
@@ -223,7 +223,7 @@
       CHECK(false);
       break;
   }
-  AlignedArray* array = *helper->array_;
+  AlignedArray* array = helper->array_.get();
   array->Reset();
   VerifyRead(descriptor, internal_field, array, v8::False(helper->isolate_));
   array->As<T*>()[index] = compare_value;
@@ -251,7 +251,7 @@
       OOD::NewInternalFieldDereference(helper.isolate_, internal_field)
       ->NewRawShift(helper.isolate_, static_cast<uint16_t>(index*sizeof(ptr)))
       ->NewPointerCompare(helper.isolate_, ptr);
-  AlignedArray* array = *helper.array_;
+  AlignedArray* array = helper.array_.get();
   VerifyRead(descriptor, internal_field, array, v8::False(helper.isolate_));
   array->As<uintptr_t*>()[index] = reinterpret_cast<uintptr_t>(ptr);
   VerifyRead(descriptor, internal_field, array, v8::True(helper.isolate_));
@@ -273,7 +273,7 @@
       ->NewRawShift(helper.isolate_,
                     static_cast<uint16_t>(second_index*sizeof(int16_t)))
       ->NewPrimitiveValue(helper.isolate_, v8::kDescriptorInt16Type, 0);
-  AlignedArray* array = *helper.array_;
+  AlignedArray* array = helper.array_.get();
   array->As<uintptr_t**>()[first_index] =
       &array->As<uintptr_t*>()[pointed_to_index];
   VerifyRead(descriptor, internal_field, array, v8::Integer::New(0));
@@ -292,7 +292,7 @@
       OOD::NewInternalFieldDereference(helper.isolate_, internal_field)
       ->NewRawShift(helper.isolate_, index*kPointerSize)
       ->NewHandleDereference(helper.isolate_);
-  HandleArray* array = *helper.handle_array_;
+  HandleArray* array = helper.handle_array_.get();
   v8::Handle<v8::String> expected = v8_str("whatever");
   array->handles_[index].Reset(helper.isolate_, expected);
   VerifyRead(descriptor, internal_field, array, expected);
diff --git a/test/cctest/test-func-name-inference.cc b/test/cctest/test-func-name-inference.cc
index 4e9d1b1..d7c5083 100644
--- a/test/cctest/test-func-name-inference.cc
+++ b/test/cctest/test-func-name-inference.cc
@@ -89,7 +89,7 @@
   // Verify inferred function name.
   SmartArrayPointer<char> inferred_name =
       shared_func_info->inferred_name()->ToCString();
-  CHECK_EQ(ref_inferred_name, *inferred_name);
+  CHECK_EQ(ref_inferred_name, inferred_name.get());
 #endif  // ENABLE_DEBUGGER_SUPPORT
 }
 
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index 7a27495..0a81632 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -1581,9 +1581,9 @@
 
 static int StringCmp(const char* ref, i::String* act) {
   i::SmartArrayPointer<char> s_act = act->ToCString();
-  int result = strcmp(ref, *s_act);
+  int result = strcmp(ref, s_act.get());
   if (result != 0)
-    fprintf(stderr, "Expected: \"%s\", Actual: \"%s\"\n", ref, *s_act);
+    fprintf(stderr, "Expected: \"%s\", Actual: \"%s\"\n", ref, s_act.get());
   return result;
 }
 
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index 89a1e5b..e53250d 100644
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -390,13 +390,13 @@
 
   size_t kProgramSize = 1024 * 1024;
   i::SmartArrayPointer<char> program(i::NewArray<char>(kProgramSize + 1));
-  memset(*program, '(', kProgramSize);
+  memset(program.get(), '(', kProgramSize);
   program[kProgramSize] = '\0';
 
   uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
 
   i::Utf8ToUtf16CharacterStream stream(
-      reinterpret_cast<const i::byte*>(*program),
+      reinterpret_cast<const i::byte*>(program.get()),
       static_cast<unsigned>(kProgramSize));
   i::CompleteParserRecorder log;
   i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
@@ -447,7 +447,7 @@
   i::Vector<const char> ascii_vector(ascii_source, static_cast<int>(length));
   i::Handle<i::String> ascii_string(
       factory->NewStringFromAscii(ascii_vector));
-  TestExternalResource resource(*uc16_buffer, length);
+  TestExternalResource resource(uc16_buffer.get(), length);
   i::Handle<i::String> uc16_string(
       factory->NewExternalStringFromTwoByte(&resource));
 
@@ -1152,7 +1152,7 @@
           "with error:\n"
           "\t%s\n"
           "However, the preparser succeeded",
-          *source->ToCString(), *message_string->ToCString());
+          source->ToCString().get(), message_string->ToCString().get());
       CHECK(false);
     }
     // Check that preparser and parser produce the same error.
@@ -1164,9 +1164,9 @@
           "However, found the following error messages\n"
           "\tparser:    %s\n"
           "\tpreparser: %s\n",
-          *source->ToCString(),
-          *message_string->ToCString(),
-          *preparser_message->ToCString());
+          source->ToCString().get(),
+          message_string->ToCString().get(),
+          preparser_message->ToCString().get());
       CHECK(false);
     }
   } else if (data.has_error()) {
@@ -1176,7 +1176,7 @@
         "with error:\n"
         "\t%s\n"
         "However, the parser succeeded",
-        *source->ToCString(), *FormatMessage(&data)->ToCString());
+        source->ToCString().get(), FormatMessage(&data)->ToCString().get());
     CHECK(false);
   }
 }
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index cc94646..aef1e03 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -132,7 +132,7 @@
 
 
 #define CHECK_PARSE_ERROR(input) CHECK(!CheckParse(input))
-#define CHECK_PARSE_EQ(input, expected) CHECK_EQ(expected, *Parse(input))
+#define CHECK_PARSE_EQ(input, expected) CHECK_EQ(expected, Parse(input).get())
 #define CHECK_SIMPLE(input, simple) CHECK_EQ(simple, CheckSimple(input));
 #define CHECK_MIN_MAX(input, min, max)                                         \
   { MinMaxPair min_max = CheckMinMaxMatch(input);                              \
@@ -399,7 +399,7 @@
   CHECK(result.tree == NULL);
   CHECK(!result.error.is_null());
   SmartArrayPointer<char> str = result.error->ToCString(ALLOW_NULLS);
-  CHECK_EQ(expected, *str);
+  CHECK_EQ(expected, str.get());
 }
 
 
@@ -430,7 +430,7 @@
     accumulator.Add("()");
   }
   SmartArrayPointer<const char> many_captures(accumulator.ToCString());
-  ExpectError(*many_captures, kTooManyCaptures);
+  ExpectError(many_captures.get(), kTooManyCaptures);
 }
 
 
diff --git a/test/cctest/test-reloc-info.cc b/test/cctest/test-reloc-info.cc
index e638201..5ab9e80 100644
--- a/test/cctest/test-reloc-info.cc
+++ b/test/cctest/test-reloc-info.cc
@@ -47,8 +47,8 @@
   const int buffer_size = code_size + relocation_info_size;
   SmartArrayPointer<byte> buffer(new byte[buffer_size]);
 
-  byte* pc = *buffer;
-  byte* buffer_end = *buffer + buffer_size;
+  byte* pc = buffer.get();
+  byte* buffer_end = buffer.get() + buffer_size;
 
   RelocInfoWriter writer(buffer_end, pc);
   byte* relocation_info_end = buffer_end - relocation_info_size;
@@ -60,13 +60,13 @@
   }
 
   relocation_info_size = static_cast<int>(buffer_end - writer.pos());
-  CodeDesc desc = { *buffer, buffer_size, code_size,
+  CodeDesc desc = { buffer.get(), buffer_size, code_size,
                     relocation_info_size, NULL };
 
   // Read only (non-statement) positions.
   {
     RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::POSITION));
-    pc = *buffer;
+    pc = buffer.get();
     for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
       RelocInfo::Mode mode = (i % 2 == 0) ?
           RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
@@ -83,7 +83,7 @@
   // Read only statement positions.
   {
     RelocIterator it(desc, RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION));
-    pc = *buffer;
+    pc = buffer.get();
     for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
       RelocInfo::Mode mode = (i % 2 == 0) ?
           RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
@@ -100,7 +100,7 @@
   // Read both types of positions.
   {
     RelocIterator it(desc, RelocInfo::kPositionMask);
-    pc = *buffer;
+    pc = buffer.get();
     for (int i = 0, pos = 0; i < 100; i++, pc += i, pos += i) {
       RelocInfo::Mode mode = (i % 2 == 0) ?
           RelocInfo::STATEMENT_POSITION : RelocInfo::POSITION;
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 6f03633..45a6a1b 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -1175,7 +1175,7 @@
   CHECK(result->IsString());
   string = v8::Utils::OpenHandle(v8::String::Cast(*result));
   CHECK(string->IsSlicedString());
-  CHECK_EQ("bcdefghijklmnopqrstuvwxy", *(string->ToCString()));
+  CHECK_EQ("bcdefghijklmnopqrstuvwxy", string->ToCString().get());
 }
 
 
@@ -1197,14 +1197,14 @@
   string = v8::Utils::OpenHandle(v8::String::Cast(*result));
   CHECK(string->IsSlicedString());
   CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
-  CHECK_EQ("bcdefghijklmnopqrstuvwxy", *(string->ToCString()));
+  CHECK_EQ("bcdefghijklmnopqrstuvwxy", string->ToCString().get());
 
   result = CompileRun(slice_from_slice);
   CHECK(result->IsString());
   string = v8::Utils::OpenHandle(v8::String::Cast(*result));
   CHECK(string->IsSlicedString());
   CHECK(SlicedString::cast(*string)->parent()->IsSeqString());
-  CHECK_EQ("cdefghijklmnopqrstuvwx", *(string->ToCString()));
+  CHECK_EQ("cdefghijklmnopqrstuvwx", string->ToCString().get());
 }
 
 
@@ -1269,7 +1269,7 @@
   // Ordinary HeapNumbers can be handled (in runtime).
   result = CompileRun("%_SubString(short, Math.sqrt(4), 5.1);");
   string = v8::Utils::OpenHandle(v8::String::Cast(*result));
-  CHECK_EQ("cde", *(string->ToCString()));
+  CHECK_EQ("cde", string->ToCString().get());
 
   CompileRun("var long = 'abcdefghijklmnopqrstuvwxyz';");
   // Invalid indices.
@@ -1284,7 +1284,7 @@
   // Ordinary HeapNumbers within bounds can be handled (in runtime).
   result = CompileRun("%_SubString(long, Math.sqrt(4), 17.1);");
   string = v8::Utils::OpenHandle(v8::String::Cast(*result));
-  CHECK_EQ("cdefghijklmnopq", *(string->ToCString()));
+  CHECK_EQ("cdefghijklmnopq", string->ToCString().get());
 
   // Test that out-of-bounds substring of a slice fails when the indices
   // would have been valid for the underlying string.
diff --git a/test/mjsunit/div-mul-minus-one.js b/test/mjsunit/div-mul-minus-one.js
index f05bf0f..5ade614 100644
--- a/test/mjsunit/div-mul-minus-one.js
+++ b/test/mjsunit/div-mul-minus-one.js
@@ -36,9 +36,7 @@
 var expected_minus_zero = div(0);
 %OptimizeFunctionOnNextCall(div);
 assertEquals(expected_MinInt, div(kMinInt));
-assertOptimized(div);
 assertEquals(expected_minus_zero , div(0));
-assertOptimized(div);
 
 function mul(g) {
   return (g * -1) ^ 1
diff --git a/test/mjsunit/harmony/math-hyperbolic.js b/test/mjsunit/harmony/math-hyperbolic.js
new file mode 100644
index 0000000..604448d
--- /dev/null
+++ b/test/mjsunit/harmony/math-hyperbolic.js
@@ -0,0 +1,132 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-maths
+
+[Math.sinh, Math.cosh, Math.tanh, Math.asinh, Math.acosh, Math.atanh].
+    forEach(function(fun) {
+  assertTrue(isNaN(fun(NaN)));
+  assertTrue(isNaN(fun("abc")));
+  assertTrue(isNaN(fun({})));
+  assertEquals(fun(0), fun([]));
+  assertTrue(isNaN(fun([1, 1])));
+  assertEquals(fun(1.11), fun({ toString: function() { return "1.11"; } }));
+  assertEquals(fun(-3.1), fun({ toString: function() { return -3.1; } }));
+  assertEquals(fun(-1.1), fun({ valueOf: function() { return "-1.1"; } }));
+  assertEquals(fun(3.11), fun({ valueOf: function() { return 3.11; } }));
+});
+
+
+function test_id(fun, rev, value) {
+  assertEqualsDelta(1, rev(fun(value))/value, 1E-7);
+}
+
+[Math.PI, 2, 5, 1E-5, 0.3].forEach(function(x) {
+  test_id(Math.sinh, Math.asinh, x);
+  test_id(Math.sinh, Math.asinh, -x);
+  test_id(Math.cosh, Math.acosh, x);
+  test_id(Math.tanh, Math.atanh, x);
+  test_id(Math.tanh, Math.atanh, -x);
+});
+
+
+[Math.sinh, Math.asinh, Math.tanh, Math.atanh].forEach(function(fun) {
+  assertEquals("-Infinity", String(1/fun(-0)));
+  assertEquals("Infinity", String(1/fun(0)));
+});
+
+
+[Math.sinh, Math.asinh, Math.cosh].forEach(function(fun) {
+  assertEquals("-Infinity", String(fun(-Infinity)));
+  assertEquals("Infinity", String(fun(Infinity)));
+  assertEquals("-Infinity", String(fun("-Infinity")));
+  assertEquals("Infinity", String(fun("Infinity")));
+});
+
+
+assertEquals("-Infinity", String(Math.atanh(-1)));
+assertEquals("Infinity", String(Math.atanh(1)));
+
+// Math.atanh(x) is NaN for |x| > 1 and NaN
+[1.000000000001, Math.PI, 10000000, 2, Infinity, NaN].forEach(function(x) {
+  assertTrue(isNaN(Math.atanh(-x)));
+  assertTrue(isNaN(Math.atanh(x)));
+});
+
+
+assertEquals(1, Math.tanh(Infinity));
+assertEquals(-1, Math.tanh(-Infinity));
+assertEquals(1, Math.cosh(0));
+assertEquals(1, Math.cosh(-0));
+
+assertEquals(0, Math.acosh(1));
+assertEquals("Infinity", String(Math.acosh(Infinity)));
+
+// Math.acosh(x) is NaN for x < 1
+[0.99999999999, 0.2, -1000, 0, -0].forEach(function(x) {
+  assertTrue(isNaN(Math.acosh(x)));
+});
+
+
+// Some random samples.
+assertEqualsDelta(0.5210953054937, Math.sinh(0.5), 1E-12);
+assertEqualsDelta(74.203210577788, Math.sinh(5), 1E-12);
+assertEqualsDelta(-0.5210953054937, Math.sinh(-0.5), 1E-12);
+assertEqualsDelta(-74.203210577788, Math.sinh(-5), 1E-12);
+
+assertEqualsDelta(1.1276259652063, Math.cosh(0.5), 1E-12);
+assertEqualsDelta(74.209948524787, Math.cosh(5), 1E-12);
+assertEqualsDelta(1.1276259652063, Math.cosh(-0.5), 1E-12);
+assertEqualsDelta(74.209948524787, Math.cosh(-5), 1E-12);
+
+assertEqualsDelta(0.4621171572600, Math.tanh(0.5), 1E-12);
+assertEqualsDelta(0.9999092042625, Math.tanh(5), 1E-12);
+assertEqualsDelta(-0.4621171572600, Math.tanh(-0.5), 1E-12);
+assertEqualsDelta(-0.9999092042625, Math.tanh(-5), 1E-12);
+
+assertEqualsDelta(0.4812118250596, Math.asinh(0.5), 1E-12);
+assertEqualsDelta(2.3124383412727, Math.asinh(5), 1E-12);
+assertEqualsDelta(-0.4812118250596, Math.asinh(-0.5), 1E-12);
+assertEqualsDelta(-2.3124383412727, Math.asinh(-5), 1E-12);
+
+assertEqualsDelta(0.9624236501192, Math.acosh(1.5), 1E-12);
+assertEqualsDelta(2.2924316695612, Math.acosh(5), 1E-12);
+assertEqualsDelta(0.4435682543851, Math.acosh(1.1), 1E-12);
+assertEqualsDelta(1.3169578969248, Math.acosh(2), 1E-12);
+
+assertEqualsDelta(0.5493061443341, Math.atanh(0.5), 1E-12);
+assertEqualsDelta(0.1003353477311, Math.atanh(0.1), 1E-12);
+assertEqualsDelta(-0.5493061443341, Math.atanh(-0.5), 1E-12);
+assertEqualsDelta(-0.1003353477311, Math.atanh(-0.1), 1E-12);
+
+[0, 1E-50, 1E-10, 1E10, 1E50, 1E100, 1E150].forEach(function(x) {
+  assertEqualsDelta(Math.asinh(x), -Math.asinh(-x), 1E-12);
+});
+
+[1-(1E-16), 0, 1E-10, 1E-50].forEach(function(x) {
+  assertEqualsDelta(Math.atanh(x), -Math.atanh(-x), 1E-12);
+});
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index a0b45a0..4997753 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -99,6 +99,11 @@
   ##############################################################################
   # Long running test that reproduces memory leak and should be run manually.
   'regress/regress-2073': [SKIP],
+
+  ##############################################################################
+  # Tests verifying CHECK and ASSERT.
+  'verify-check-false': [FAIL, NO_VARIANTS],
+  'verify-assert-false': [NO_VARIANTS, ['mode == release', PASS], ['mode == debug', FAIL]],
 }],  # ALWAYS
 
 ##############################################################################
diff --git a/test/mjsunit/regress/internalized-string-not-equal.js b/test/mjsunit/regress/internalized-string-not-equal.js
new file mode 100644
index 0000000..911279b
--- /dev/null
+++ b/test/mjsunit/regress/internalized-string-not-equal.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// A bug in r15773, when masks for internalized string and string types
+// were reorganized.
+function equal(o1, o2) {
+  return (o1 == o2);
+}
+var a = "abc";
+var b = "abc";
+equal(a, b);
+equal(a, b);
+%OptimizeFunctionOnNextCall(equal);
+assertTrue(equal(1.3, 1.3));
diff --git a/test/mjsunit/regress/regress-3039.js b/test/mjsunit/regress/regress-3039.js
new file mode 100644
index 0000000..3c7f62c
--- /dev/null
+++ b/test/mjsunit/regress/regress-3039.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function do_div(x, y) {
+  return (x / y) | 0;
+}
+
+// Preparation.
+assertEquals(17, do_div(51, 3));
+assertEquals(13, do_div(65, 5));
+%OptimizeFunctionOnNextCall(do_div);
+assertEquals(11, do_div(77, 7));
+
+// The actual test. We should not trigger a floating point exception.
+assertEquals(-2147483648, do_div(-2147483648, -1));
diff --git a/test/mjsunit/regress/regress-context-osr.js b/test/mjsunit/regress/regress-context-osr.js
new file mode 100644
index 0000000..b74907d
--- /dev/null
+++ b/test/mjsunit/regress/regress-context-osr.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+function f() {
+  try { } catch (e) { }
+}
+
+for (this.x = 0; this.x < 1; ++this.x) {
+  for (this.y = 0; this.y < 1; ++this.y) {
+    for (this.ll = 0; this.ll < 70670; ++this.ll) {
+      f();
+    }
+  }
+}
diff --git a/test/mjsunit/verify-assert-false.js b/test/mjsunit/verify-assert-false.js
new file mode 100644
index 0000000..8bea7df
--- /dev/null
+++ b/test/mjsunit/verify-assert-false.js
@@ -0,0 +1,30 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-trigger-failure
+
+triggerAssertFalse();
diff --git a/test/mjsunit/verify-check-false.js b/test/mjsunit/verify-check-false.js
new file mode 100644
index 0000000..426bf84
--- /dev/null
+++ b/test/mjsunit/verify-check-false.js
@@ -0,0 +1,30 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-trigger-failure
+
+triggerCheckFalse();
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 2164b74..a1134f0 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -340,6 +340,8 @@
         '../../src/extensions/gc-extension.h',
         '../../src/extensions/statistics-extension.cc',
         '../../src/extensions/statistics-extension.h',
+        '../../src/extensions/trigger-failure-extension.cc',
+        '../../src/extensions/trigger-failure-extension.h',
         '../../src/factory.cc',
         '../../src/factory.h',
         '../../src/fast-dtoa.cc',
diff --git a/tools/run-tests.py b/tools/run-tests.py
index 28926e5..15c42d0 100755
--- a/tools/run-tests.py
+++ b/tools/run-tests.py
@@ -329,7 +329,10 @@
       s.DownloadData()
 
   for (arch, mode) in options.arch_and_mode:
-    code = Execute(arch, mode, args, options, suites, workspace)
+    try:
+      code = Execute(arch, mode, args, options, suites, workspace)
+    except KeyboardInterrupt:
+      return 2
     exit_code = exit_code or code
   return exit_code
 
@@ -449,7 +452,7 @@
       return exit_code
     overall_duration = time.time() - start_time
   except KeyboardInterrupt:
-    return 1
+    raise
 
   if options.time:
     verbose.PrintTestDurations(suites, overall_duration)
diff --git a/tools/testrunner/local/commands.py b/tools/testrunner/local/commands.py
index 01f170d..4f3dc51 100644
--- a/tools/testrunner/local/commands.py
+++ b/tools/testrunner/local/commands.py
@@ -64,34 +64,34 @@
 
 
 def RunProcess(verbose, timeout, args, **rest):
-  if verbose: print "#", " ".join(args)
-  popen_args = args
-  prev_error_mode = SEM_INVALID_VALUE
-  if utils.IsWindows():
-    popen_args = subprocess.list2cmdline(args)
-    # Try to change the error mode to avoid dialogs on fatal errors. Don't
-    # touch any existing error mode flags by merging the existing error mode.
-    # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
-    error_mode = SEM_NOGPFAULTERRORBOX
-    prev_error_mode = Win32SetErrorMode(error_mode)
-    Win32SetErrorMode(error_mode | prev_error_mode)
-  process = subprocess.Popen(
-    shell=utils.IsWindows(),
-    args=popen_args,
-    **rest
-  )
-  if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
-    Win32SetErrorMode(prev_error_mode)
-  # Compute the end time - if the process crosses this limit we
-  # consider it timed out.
-  if timeout is None: end_time = None
-  else: end_time = time.time() + timeout
-  timed_out = False
-  # Repeatedly check the exit code from the process in a
-  # loop and keep track of whether or not it times out.
-  exit_code = None
-  sleep_time = INITIAL_SLEEP_TIME
   try:
+    if verbose: print "#", " ".join(args)
+    popen_args = args
+    prev_error_mode = SEM_INVALID_VALUE
+    if utils.IsWindows():
+      popen_args = subprocess.list2cmdline(args)
+      # Try to change the error mode to avoid dialogs on fatal errors. Don't
+      # touch any existing error mode flags by merging the existing error mode.
+      # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
+      error_mode = SEM_NOGPFAULTERRORBOX
+      prev_error_mode = Win32SetErrorMode(error_mode)
+      Win32SetErrorMode(error_mode | prev_error_mode)
+    process = subprocess.Popen(
+      shell=utils.IsWindows(),
+      args=popen_args,
+      **rest
+    )
+    if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
+      Win32SetErrorMode(prev_error_mode)
+    # Compute the end time - if the process crosses this limit we
+    # consider it timed out.
+    if timeout is None: end_time = None
+    else: end_time = time.time() + timeout
+    timed_out = False
+    # Repeatedly check the exit code from the process in a
+    # loop and keep track of whether or not it times out.
+    exit_code = None
+    sleep_time = INITIAL_SLEEP_TIME
     while exit_code is None:
       if (not end_time is None) and (time.time() >= end_time):
         # Kill the process and wait for it to exit.
@@ -131,10 +131,10 @@
 
 
 def Execute(args, verbose=False, timeout=None):
-  args = [ c for c in args if c != "" ]
-  (fd_out, outname) = tempfile.mkstemp()
-  (fd_err, errname) = tempfile.mkstemp()
   try:
+    args = [ c for c in args if c != "" ]
+    (fd_out, outname) = tempfile.mkstemp()
+    (fd_err, errname) = tempfile.mkstemp()
     (exit_code, timed_out) = RunProcess(
       verbose,
       timeout,
@@ -142,12 +142,15 @@
       stdout=fd_out,
       stderr=fd_err
     )
+  except KeyboardInterrupt:
+    raise
   except:
     raise
-  os.close(fd_out)
-  os.close(fd_err)
-  out = file(outname).read()
-  errors = file(errname).read()
-  CheckedUnlink(outname)
-  CheckedUnlink(errname)
+  finally:
+    os.close(fd_out)
+    os.close(fd_err)
+    out = file(outname).read()
+    errors = file(errname).read()
+    CheckedUnlink(outname)
+    CheckedUnlink(errname)
   return output.Output(exit_code, timed_out, out, errors)