Version 3.24.7

Fix small spec violation in String.prototype.split (issue 3026).

Correctly resolve forcibly context allocated parameters in debug- evaluate (Chromium issue 325676).

Introduce Function::GetBoundFunction.

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@18413 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/accessors.cc b/src/accessors.cc
index 4da9dd4..ba84c9a 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 #include "accessors.h"
 
+#include "compiler.h"
 #include "contexts.h"
 #include "deoptimizer.h"
 #include "execution.h"
@@ -648,9 +649,9 @@
   // If the function isn't compiled yet, the length is not computed correctly
   // yet. Compile it now and return the right length.
   HandleScope scope(isolate);
-  Handle<JSFunction> handle(function);
-  if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
-    return Smi::FromInt(handle->shared()->length());
+  Handle<JSFunction> function_handle(function);
+  if (Compiler::EnsureCompiled(function_handle, KEEP_EXCEPTION)) {
+    return Smi::FromInt(function_handle->shared()->length());
   }
   return Failure::Exception();
 }
diff --git a/src/api.cc b/src/api.cc
index 1f5b165..c5023f8 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1720,16 +1720,16 @@
       pre_data_impl = NULL;
     }
     i::Handle<i::SharedFunctionInfo> result =
-      i::Compiler::Compile(str,
-                           name_obj,
-                           line_offset,
-                           column_offset,
-                           is_shared_cross_origin,
-                           isolate->global_context(),
-                           NULL,
-                           pre_data_impl,
-                           Utils::OpenHandle(*script_data, true),
-                           i::NOT_NATIVES_CODE);
+      i::Compiler::CompileScript(str,
+                                 name_obj,
+                                 line_offset,
+                                 column_offset,
+                                 is_shared_cross_origin,
+                                 isolate->global_context(),
+                                 NULL,
+                                 pre_data_impl,
+                                 Utils::OpenHandle(*script_data, true),
+                                 i::NOT_NATIVES_CODE);
     has_pending_exception = result.is_null();
     EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
     raw_result = *result;
@@ -4163,6 +4163,20 @@
 }
 
 
+Local<v8::Value> Function::GetBoundFunction() const {
+  i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
+  if (!func->shared()->bound()) {
+    return v8::Undefined(reinterpret_cast<v8::Isolate*>(func->GetIsolate()));
+  }
+  i::Handle<i::FixedArray> bound_args = i::Handle<i::FixedArray>(
+      i::FixedArray::cast(func->function_bindings()));
+  i::Handle<i::Object> original(
+      bound_args->get(i::JSFunction::kBoundFunctionIndex),
+      func->GetIsolate());
+  return Utils::ToLocal(i::Handle<i::JSFunction>::cast(original));
+}
+
+
 int String::Length() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   return str->length();
@@ -7123,15 +7137,6 @@
 }
 
 
-v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const {
-  i::Isolate* isolate = i::Isolate::Current();
-  i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject();
-  return !object.is_null() ?
-      ToApiHandle<Value>(object) :
-      ToApiHandle<Value>(isolate->factory()->undefined_value());
-}
-
-
 static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
   return const_cast<i::HeapSnapshot*>(
       reinterpret_cast<const i::HeapSnapshot*>(snapshot));
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index d95b746..fb319df 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -289,8 +289,8 @@
 }
 
 
-static void CallRuntimePassFunction(MacroAssembler* masm,
-                                    Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+    MacroAssembler* masm, Runtime::FunctionId function_id) {
   FrameScope scope(masm, StackFrame::INTERNAL);
   // Push a copy of the function onto the stack.
   __ push(r1);
@@ -313,7 +313,13 @@
 }
 
 
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+  __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(r0);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
   // Checking whether the queued function is ready for install is optional,
   // since we come across interrupts and stack checks elsewhere.  However,
   // not checking may delay installing ready functions, and always checking
@@ -324,22 +330,14 @@
   __ cmp(sp, Operand(ip));
   __ b(hs, &ok);
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
-  // Tail call to returned code.
-  __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(r0);
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+  GenerateTailCallToReturnedCode(masm);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
 }
 
 
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
-  GenerateTailCallToSharedCode(masm);
-}
-
-
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
                                            bool count_constructions) {
@@ -774,19 +772,38 @@
 }
 
 
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kLazyCompile);
-  // Do a tail-call of the compiled function.
-  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(r2);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+  GenerateTailCallToReturnedCode(masm);
 }
 
 
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
-  // Do a tail-call of the compiled function.
-  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(r2);
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  // Push a copy of the function onto the stack.
+  __ push(r1);
+  // Push call kind information and function as parameter to the runtime call.
+  __ Push(r5, r1);
+  // Whether to compile in a background thread.
+  __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
+  // Restore call kind information.
+  __ pop(r5);
+  // Restore receiver.
+  __ pop(r1);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  CallCompileOptimized(masm, false);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  CallCompileOptimized(masm, true);
+  GenerateTailCallToReturnedCode(masm);
 }
 
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index c47bdf5..f27ca7a 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1499,7 +1499,7 @@
   if (cache == NULL || !cache->Lookup(name, &function_info)) {
     ASSERT(source->IsOneByteRepresentation());
     Handle<String> script_name = factory->NewStringFromUtf8(name);
-    function_info = Compiler::Compile(
+    function_info = Compiler::CompileScript(
         source,
         script_name,
         0,
@@ -2354,7 +2354,7 @@
     Handle<JSFunction> function
         = Handle<JSFunction>(JSFunction::cast(function_object));
     builtins->set_javascript_builtin(id, *function);
-    if (!JSFunction::CompileLazy(function, CLEAR_EXCEPTION)) {
+    if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
       return false;
     }
     builtins->set_javascript_builtin_code(id, function->shared()->code());
diff --git a/src/builtins.h b/src/builtins.h
index edc13f7..affb253 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -88,7 +88,7 @@
 #define BUILTIN_LIST_A(V)                                               \
   V(ArgumentsAdaptorTrampoline,     BUILTIN, UNINITIALIZED,             \
                                     kNoExtraICState)                    \
-  V(InRecompileQueue,               BUILTIN, UNINITIALIZED,             \
+  V(InOptimizationQueue,            BUILTIN, UNINITIALIZED,             \
                                     kNoExtraICState)                    \
   V(JSConstructStubCountdown,       BUILTIN, UNINITIALIZED,             \
                                     kNoExtraICState)                    \
@@ -100,11 +100,11 @@
                                     kNoExtraICState)                    \
   V(JSConstructEntryTrampoline,     BUILTIN, UNINITIALIZED,             \
                                     kNoExtraICState)                    \
-  V(LazyCompile,                    BUILTIN, UNINITIALIZED,             \
+  V(CompileUnoptimized,             BUILTIN, UNINITIALIZED,             \
                                     kNoExtraICState)                    \
-  V(LazyRecompile,                  BUILTIN, UNINITIALIZED,             \
+  V(CompileOptimized,               BUILTIN, UNINITIALIZED,             \
                                     kNoExtraICState)                    \
-  V(ConcurrentRecompile,            BUILTIN, UNINITIALIZED,             \
+  V(CompileOptimizedConcurrent,     BUILTIN, UNINITIALIZED,             \
                                     kNoExtraICState)                    \
   V(NotifyDeoptimized,              BUILTIN, UNINITIALIZED,             \
                                     kNoExtraICState)                    \
@@ -385,15 +385,15 @@
   static void Generate_Adaptor(MacroAssembler* masm,
                                CFunctionId id,
                                BuiltinExtraArguments extra_args);
-  static void Generate_InRecompileQueue(MacroAssembler* masm);
-  static void Generate_ConcurrentRecompile(MacroAssembler* masm);
+  static void Generate_CompileUnoptimized(MacroAssembler* masm);
+  static void Generate_InOptimizationQueue(MacroAssembler* masm);
+  static void Generate_CompileOptimized(MacroAssembler* masm);
+  static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
   static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
   static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
   static void Generate_JSConstructStubApi(MacroAssembler* masm);
   static void Generate_JSEntryTrampoline(MacroAssembler* masm);
   static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
-  static void Generate_LazyCompile(MacroAssembler* masm);
-  static void Generate_LazyRecompile(MacroAssembler* masm);
   static void Generate_NotifyDeoptimized(MacroAssembler* masm);
   static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
   static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index dc1f185..f66a9e3 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -1194,9 +1194,14 @@
     Label install_optimized;
     HValue* first_context_slot = Add<HLoadNamedField>(optimized_map,
         HObjectAccess::ForFirstContextSlot());
+    HValue* first_osr_ast_slot = Add<HLoadNamedField>(optimized_map,
+        HObjectAccess::ForFirstOsrAstIdSlot());
+    HValue* osr_ast_id_none = Add<HConstant>(BailoutId::None().ToInt());
     IfBuilder already_in(this);
     already_in.If<HCompareObjectEqAndBranch>(native_context,
                                              first_context_slot);
+    already_in.AndIf<HCompareObjectEqAndBranch>(first_osr_ast_slot,
+                                                osr_ast_id_none);
     already_in.Then();
     {
       HValue* code_object = Add<HLoadNamedField>(optimized_map,
@@ -1213,7 +1218,7 @@
                                shared_function_entry_length);
       HValue* array_length = Add<HLoadNamedField>(optimized_map,
           HObjectAccess::ForFixedArrayLength());
-      HValue* key = loop_builder.BeginBody(array_length,
+      HValue* slot_iterator = loop_builder.BeginBody(array_length,
                                            graph()->GetConstant0(),
                                            Token::GT);
       {
@@ -1222,8 +1227,8 @@
         HValue* second_entry_index =
             Add<HConstant>(SharedFunctionInfo::kSecondEntryIndex);
         IfBuilder restore_check(this);
-        restore_check.If<HCompareNumericAndBranch>(key, second_entry_index,
-                                                   Token::EQ);
+        restore_check.If<HCompareNumericAndBranch>(
+            slot_iterator, second_entry_index, Token::EQ);
         restore_check.Then();
         {
           // Store the unoptimized code
@@ -1232,20 +1237,29 @@
         }
         restore_check.Else();
         {
-          HValue* keyed_minus = AddUncasted<HSub>(
-              key, shared_function_entry_length);
-          HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map,
-              keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+          STATIC_ASSERT(SharedFunctionInfo::kContextOffset == 0);
+          STATIC_ASSERT(SharedFunctionInfo::kEntryLength -
+                            SharedFunctionInfo::kOsrAstIdOffset == 1);
+          HValue* native_context_slot = AddUncasted<HSub>(
+              slot_iterator, shared_function_entry_length);
+          HValue* osr_ast_id_slot = AddUncasted<HSub>(
+              slot_iterator, graph()->GetConstant1());
+          HInstruction* native_context_entry = Add<HLoadKeyed>(optimized_map,
+              native_context_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+          HInstruction* osr_ast_id_entry = Add<HLoadKeyed>(optimized_map,
+              osr_ast_id_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
           IfBuilder done_check(this);
           done_check.If<HCompareObjectEqAndBranch>(native_context,
-                                                   keyed_lookup);
+                                                   native_context_entry);
+          done_check.AndIf<HCompareObjectEqAndBranch>(osr_ast_id_entry,
+                                                      osr_ast_id_none);
           done_check.Then();
           {
             // Hit: fetch the optimized code.
-            HValue* keyed_plus = AddUncasted<HAdd>(
-                keyed_minus, graph()->GetConstant1());
+            HValue* code_slot = AddUncasted<HAdd>(
+                native_context_slot, graph()->GetConstant1());
             HValue* code_object = Add<HLoadKeyed>(optimized_map,
-                keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+                code_slot, static_cast<HValue*>(NULL), FAST_ELEMENTS);
             BuildInstallOptimizedCode(js_function, native_context, code_object);
 
             // Fall out of the loop
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 5678ebd..4e49680 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -114,7 +114,9 @@
 #if V8_TARGET_ARCH_MIPS
 #define CODE_STUB_LIST_MIPS(V)  \
   V(RegExpCEntry)               \
-  V(DirectCEntry)
+  V(DirectCEntry)               \
+  V(StoreRegistersState)        \
+  V(RestoreRegistersState)
 #else
 #define CODE_STUB_LIST_MIPS(V)
 #endif
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index fffe5da..a69ef4c 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -421,7 +421,6 @@
 Handle<SharedFunctionInfo> CompilationCache::LookupEval(
     Handle<String> source,
     Handle<Context> context,
-    bool is_global,
     LanguageMode language_mode,
     int scope_position) {
   if (!IsEnabled()) {
@@ -429,7 +428,7 @@
   }
 
   Handle<SharedFunctionInfo> result;
-  if (is_global) {
+  if (context->IsNativeContext()) {
     result = eval_global_.Lookup(
         source, context, language_mode, scope_position);
   } else {
@@ -454,9 +453,7 @@
 void CompilationCache::PutScript(Handle<String> source,
                                  Handle<Context> context,
                                  Handle<SharedFunctionInfo> function_info) {
-  if (!IsEnabled()) {
-    return;
-  }
+  if (!IsEnabled()) return;
 
   script_.Put(source, context, function_info);
 }
@@ -464,15 +461,12 @@
 
 void CompilationCache::PutEval(Handle<String> source,
                                Handle<Context> context,
-                               bool is_global,
                                Handle<SharedFunctionInfo> function_info,
                                int scope_position) {
-  if (!IsEnabled()) {
-    return;
-  }
+  if (!IsEnabled()) return;
 
   HandleScope scope(isolate());
-  if (is_global) {
+  if (context->IsNativeContext()) {
     eval_global_.Put(source, context, function_info, scope_position);
   } else {
     ASSERT(scope_position != RelocInfo::kNoPosition);
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 414e09e..ead52b5 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -222,7 +222,6 @@
   // contain a script for the given source string.
   Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
                                         Handle<Context> context,
-                                        bool is_global,
                                         LanguageMode language_mode,
                                         int scope_position);
 
@@ -241,7 +240,6 @@
   // with the shared function info. This may overwrite an existing mapping.
   void PutEval(Handle<String> source,
                Handle<Context> context,
-               bool is_global,
                Handle<SharedFunctionInfo> function_info,
                int scope_position);
 
diff --git a/src/compiler.cc b/src/compiler.cc
index 6b7786f..276b9d8 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -59,7 +59,6 @@
     : flags_(LanguageModeField::encode(CLASSIC_MODE)),
       script_(script),
       osr_ast_id_(BailoutId::None()),
-      osr_pc_offset_(0),
       parameter_count_(0) {
   Initialize(script->GetIsolate(), BASE, zone);
 }
@@ -71,7 +70,6 @@
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       osr_ast_id_(BailoutId::None()),
-      osr_pc_offset_(0),
       parameter_count_(0) {
   Initialize(script_->GetIsolate(), BASE, zone);
 }
@@ -85,7 +83,6 @@
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       context_(closure->context()),
       osr_ast_id_(BailoutId::None()),
-      osr_pc_offset_(0),
       parameter_count_(0) {
   Initialize(script_->GetIsolate(), BASE, zone);
 }
@@ -97,7 +94,6 @@
     : flags_(LanguageModeField::encode(CLASSIC_MODE) |
              IsLazy::encode(true)),
       osr_ast_id_(BailoutId::None()),
-      osr_pc_offset_(0),
       parameter_count_(0) {
   Initialize(isolate, STUB, zone);
   code_stub_ = stub;
@@ -243,86 +239,6 @@
 }
 
 
-// Determine whether to use the full compiler for all code. If the flag
-// --always-full-compiler is specified this is the case. For the virtual frame
-// based compiler the full compiler is also used if a debugger is connected, as
-// the code from the full compiler supports mode precise break points. For the
-// crankshaft adaptive compiler debugging the optimized code is not possible at
-// all. However crankshaft support recompilation of functions, so in this case
-// the full compiler need not be be used if a debugger is attached, but only if
-// break points has actually been set.
-static bool IsDebuggerActive(Isolate* isolate) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  return isolate->use_crankshaft() ?
-    isolate->debug()->has_break_points() :
-    isolate->debugger()->IsDebuggerActive();
-#else
-  return false;
-#endif
-}
-
-
-static bool AlwaysFullCompiler(Isolate* isolate) {
-  return FLAG_always_full_compiler || IsDebuggerActive(isolate);
-}
-
-
-void RecompileJob::RecordOptimizationStats() {
-  Handle<JSFunction> function = info()->closure();
-  if (!function->IsOptimized()) {
-    // Concurrent recompilation and OSR may race.  Increment only once.
-    int opt_count = function->shared()->opt_count();
-    function->shared()->set_opt_count(opt_count + 1);
-  }
-  double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
-  double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
-  double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
-  if (FLAG_trace_opt) {
-    PrintF("[optimizing ");
-    function->ShortPrint();
-    PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
-           ms_codegen);
-  }
-  if (FLAG_trace_opt_stats) {
-    static double compilation_time = 0.0;
-    static int compiled_functions = 0;
-    static int code_size = 0;
-
-    compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
-    compiled_functions++;
-    code_size += function->shared()->SourceSize();
-    PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
-           compiled_functions,
-           code_size,
-           compilation_time);
-  }
-  if (FLAG_hydrogen_stats) {
-    isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_,
-                                                    time_taken_to_optimize_,
-                                                    time_taken_to_codegen_);
-  }
-}
-
-
-// A return value of true indicates the compilation pipeline is still
-// going, not necessarily that we optimized the code.
-static bool MakeCrankshaftCode(CompilationInfo* info) {
-  RecompileJob job(info);
-  RecompileJob::Status status = job.CreateGraph();
-
-  if (status != RecompileJob::SUCCEEDED) {
-    return status != RecompileJob::FAILED;
-  }
-  status = job.OptimizeGraph();
-  if (status != RecompileJob::SUCCEEDED) {
-    status = job.AbortOptimization();
-    return status != RecompileJob::FAILED;
-  }
-  status = job.GenerateAndInstallCode();
-  return status != RecompileJob::FAILED;
-}
-
-
 class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
  public:
   explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
@@ -359,7 +275,26 @@
 };
 
 
-RecompileJob::Status RecompileJob::CreateGraph() {
+// Determine whether to use the full compiler for all code. If the flag
+// --always-full-compiler is specified this is the case. For the virtual frame
+// based compiler the full compiler is also used if a debugger is connected, as
+// the code from the full compiler supports mode precise break points. For the
+// crankshaft adaptive compiler debugging the optimized code is not possible at
+// all. However crankshaft support recompilation of functions, so in this case
+// the full compiler need not be be used if a debugger is attached, but only if
+// break points has actually been set.
+static bool IsDebuggerActive(Isolate* isolate) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  return isolate->use_crankshaft() ?
+    isolate->debug()->has_break_points() :
+    isolate->debugger()->IsDebuggerActive();
+#else
+  return false;
+#endif
+}
+
+
+OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
   ASSERT(isolate()->use_crankshaft());
   ASSERT(info()->IsOptimizing());
   ASSERT(!info()->IsCompilingForDebugging());
@@ -375,18 +310,15 @@
   // Fall back to using the full code generator if it's not possible
   // to use the Hydrogen-based optimizing compiler. We already have
   // generated code for this from the shared function object.
-  if (AlwaysFullCompiler(isolate())) {
-    info()->AbortOptimization();
-    return SetLastStatus(BAILED_OUT);
-  }
+  if (FLAG_always_full_compiler) return AbortOptimization();
+  if (IsDebuggerActive(isolate())) return AbortOptimization(kDebuggerIsActive);
 
   // Limit the number of times we re-compile a functions with
   // the optimizing compiler.
   const int kMaxOptCount =
       FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
   if (info()->opt_count() > kMaxOptCount) {
-    info()->set_bailout_reason(kOptimizedTooManyTimes);
-    return AbortOptimization();
+    return AbortAndDisableOptimization(kOptimizedTooManyTimes);
   }
 
   // Due to an encoding limit on LUnallocated operands in the Lithium
@@ -399,21 +331,18 @@
   const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
   Scope* scope = info()->scope();
   if ((scope->num_parameters() + 1) > parameter_limit) {
-    info()->set_bailout_reason(kTooManyParameters);
-    return AbortOptimization();
+    return AbortAndDisableOptimization(kTooManyParameters);
   }
 
   const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
   if (info()->is_osr() &&
       scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
-    info()->set_bailout_reason(kTooManyParametersLocals);
-    return AbortOptimization();
+    return AbortAndDisableOptimization(kTooManyParametersLocals);
   }
 
   // Take --hydrogen-filter into account.
   if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
-    info()->AbortOptimization();
-    return SetLastStatus(BAILED_OUT);
+    return AbortOptimization(kHydrogenFilter);
   }
 
   // Recompile the unoptimized version of the code if the current version
@@ -473,7 +402,6 @@
   graph_ = graph_builder_->CreateGraph();
 
   if (isolate()->has_pending_exception()) {
-    info()->SetCode(Handle<Code>::null());
     return SetLastStatus(FAILED);
   }
 
@@ -483,24 +411,21 @@
   ASSERT(!graph_builder_->inline_bailout() || graph_ == NULL);
   if (graph_ == NULL) {
     if (graph_builder_->inline_bailout()) {
-      info_->AbortOptimization();
-      return SetLastStatus(BAILED_OUT);
-    } else {
       return AbortOptimization();
+    } else {
+      return AbortAndDisableOptimization();
     }
   }
 
   if (info()->HasAbortedDueToDependencyChange()) {
-    info_->set_bailout_reason(kBailedOutDueToDependencyChange);
-    info_->AbortOptimization();
-    return SetLastStatus(BAILED_OUT);
+    return AbortOptimization(kBailedOutDueToDependencyChange);
   }
 
   return SetLastStatus(SUCCEEDED);
 }
 
 
-RecompileJob::Status RecompileJob::OptimizeGraph() {
+OptimizedCompileJob::Status OptimizedCompileJob::OptimizeGraph() {
   DisallowHeapAllocation no_allocation;
   DisallowHandleAllocation no_handles;
   DisallowHandleDereference no_deref;
@@ -510,20 +435,19 @@
   Timer t(this, &time_taken_to_optimize_);
   ASSERT(graph_ != NULL);
   BailoutReason bailout_reason = kNoReason;
-  if (!graph_->Optimize(&bailout_reason)) {
-    if (bailout_reason != kNoReason) graph_builder_->Bailout(bailout_reason);
-    return SetLastStatus(BAILED_OUT);
-  } else {
+
+  if (graph_->Optimize(&bailout_reason)) {
     chunk_ = LChunk::NewChunk(graph_);
-    if (chunk_ == NULL) {
-      return SetLastStatus(BAILED_OUT);
-    }
+    if (chunk_ != NULL) return SetLastStatus(SUCCEEDED);
+  } else if (bailout_reason != kNoReason) {
+    graph_builder_->Bailout(bailout_reason);
   }
-  return SetLastStatus(SUCCEEDED);
+
+  return AbortOptimization();
 }
 
 
-RecompileJob::Status RecompileJob::GenerateAndInstallCode() {
+OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
   ASSERT(last_status() == SUCCEEDED);
   ASSERT(!info()->HasAbortedDueToDependencyChange());
   DisallowCodeDependencyChange no_dependency_change;
@@ -539,9 +463,9 @@
     Handle<Code> optimized_code = chunk_->Codegen();
     if (optimized_code.is_null()) {
       if (info()->bailout_reason() == kNoReason) {
-        info()->set_bailout_reason(kCodeGenerationFailed);
+        info_->set_bailout_reason(kCodeGenerationFailed);
       }
-      return AbortOptimization();
+      return AbortAndDisableOptimization();
     }
     info()->SetCode(optimized_code);
   }
@@ -552,54 +476,40 @@
 }
 
 
-static bool GenerateCode(CompilationInfo* info) {
-  bool is_optimizing = info->isolate()->use_crankshaft() &&
-                       !info->IsCompilingForDebugging() &&
-                       info->IsOptimizing();
-  if (is_optimizing) {
-    Logger::TimerEventScope timer(
-        info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
-    return MakeCrankshaftCode(info);
-  } else {
-    if (info->IsOptimizing()) {
-      // Have the CompilationInfo decide if the compilation should be
-      // BASE or NONOPT.
-      info->DisableOptimization();
-    }
-    Logger::TimerEventScope timer(
-        info->isolate(), Logger::TimerEventScope::v8_compile_full_code);
-    return FullCodeGenerator::MakeCode(info);
+void OptimizedCompileJob::RecordOptimizationStats() {
+  Handle<JSFunction> function = info()->closure();
+  if (!function->IsOptimized()) {
+    // Concurrent recompilation and OSR may race.  Increment only once.
+    int opt_count = function->shared()->opt_count();
+    function->shared()->set_opt_count(opt_count + 1);
   }
-}
-
-
-static bool MakeCode(CompilationInfo* info) {
-  // Precondition: code has been parsed.  Postcondition: the code field in
-  // the compilation info is set if compilation succeeded.
-  ASSERT(info->function() != NULL);
-  return Rewriter::Rewrite(info) && Scope::Analyze(info) && GenerateCode(info);
-}
-
-
-#ifdef ENABLE_DEBUGGER_SUPPORT
-bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
-  // Precondition: code has been parsed.  Postcondition: the code field in
-  // the compilation info is set if compilation succeeded.
-  bool succeeded = MakeCode(info);
-  if (!info->shared_info().is_null()) {
-    Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
-                                                     info->zone());
-    info->shared_info()->set_scope_info(*scope_info);
+  double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
+  double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
+  double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
+  if (FLAG_trace_opt) {
+    PrintF("[optimizing ");
+    function->ShortPrint();
+    PrintF(" - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, ms_optimize,
+           ms_codegen);
   }
-  return succeeded;
-}
-#endif
+  if (FLAG_trace_opt_stats) {
+    static double compilation_time = 0.0;
+    static int compiled_functions = 0;
+    static int code_size = 0;
 
-
-static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
-                                          bool allow_lazy_without_ctx = false) {
-  return LiveEditFunctionTracker::IsActive(info->isolate()) ||
-         (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
+    compilation_time += (ms_creategraph + ms_optimize + ms_codegen);
+    compiled_functions++;
+    code_size += function->shared()->SourceSize();
+    PrintF("Compiled: %d functions with %d byte source size in %fms.\n",
+           compiled_functions,
+           code_size,
+           compilation_time);
+  }
+  if (FLAG_hydrogen_stats) {
+    isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_,
+                                                    time_taken_to_optimize_,
+                                                    time_taken_to_codegen_);
+  }
 }
 
 
@@ -630,54 +540,250 @@
 }
 
 
-static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
+static void UpdateSharedFunctionInfo(CompilationInfo* info) {
+  // Update the shared function info with the compiled code and the
+  // scope info.  Please note, that the order of the shared function
+  // info initialization is important since set_scope_info might
+  // trigger a GC, causing the ASSERT below to be invalid if the code
+  // was flushed. By setting the code object last we avoid this.
+  Handle<SharedFunctionInfo> shared = info->shared_info();
+  Handle<ScopeInfo> scope_info =
+      ScopeInfo::Create(info->scope(), info->zone());
+  shared->set_scope_info(*scope_info);
+
+  Handle<Code> code = info->code();
+  CHECK(code->kind() == Code::FUNCTION);
+  shared->ReplaceCode(*code);
+  if (shared->optimization_disabled()) code->set_optimizable(false);
+
+  // Set the expected number of properties for instances.
+  FunctionLiteral* lit = info->function();
+  int expected = lit->expected_property_count();
+  SetExpectedNofPropertiesFromEstimate(shared, expected);
+
+  // Check the function has compiled code.
+  ASSERT(shared->is_compiled());
+  shared->set_dont_optimize_reason(lit->dont_optimize_reason());
+  shared->set_dont_inline(lit->flags()->Contains(kDontInline));
+  shared->set_ast_node_count(lit->ast_node_count());
+  shared->set_language_mode(lit->language_mode());
+}
+
+
+// Sets the function info on a function.
+// The start_position points to the first '(' character after the function name
+// in the full script source. When counting characters in the script source the
+// the first character is number 0 (not 1).
+static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
+                            FunctionLiteral* lit,
+                            bool is_toplevel,
+                            Handle<Script> script) {
+  function_info->set_length(lit->parameter_count());
+  function_info->set_formal_parameter_count(lit->parameter_count());
+  function_info->set_script(*script);
+  function_info->set_function_token_position(lit->function_token_position());
+  function_info->set_start_position(lit->start_position());
+  function_info->set_end_position(lit->end_position());
+  function_info->set_is_expression(lit->is_expression());
+  function_info->set_is_anonymous(lit->is_anonymous());
+  function_info->set_is_toplevel(is_toplevel);
+  function_info->set_inferred_name(*lit->inferred_name());
+  function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+  function_info->set_allows_lazy_compilation_without_context(
+      lit->AllowsLazyCompilationWithoutContext());
+  function_info->set_language_mode(lit->language_mode());
+  function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
+  function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
+  function_info->set_ast_node_count(lit->ast_node_count());
+  function_info->set_is_function(lit->is_function());
+  function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
+  function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
+  function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
+  function_info->set_is_generator(lit->is_generator());
+}
+
+
+static bool CompileUnoptimizedCode(CompilationInfo* info) {
+  ASSERT(info->function() != NULL);
+  if (!Rewriter::Rewrite(info)) return false;
+  if (!Scope::Analyze(info)) return false;
+  ASSERT(info->scope() != NULL);
+
+  if (!FullCodeGenerator::MakeCode(info)) {
+    Isolate* isolate = info->isolate();
+    if (!isolate->has_pending_exception()) isolate->StackOverflow();
+    return false;
+  }
+  return true;
+}
+
+
+static Handle<Code> GetUnoptimizedCodeCommon(CompilationInfo* info) {
+  VMState<COMPILER> state(info->isolate());
+  PostponeInterruptsScope postpone(info->isolate());
+  if (!Parser::Parse(info)) return Handle<Code>::null();
+  LanguageMode language_mode = info->function()->language_mode();
+  info->SetLanguageMode(language_mode);
+
+  if (!CompileUnoptimizedCode(info)) return Handle<Code>::null();
+  Compiler::RecordFunctionCompilation(
+      Logger::LAZY_COMPILE_TAG, info, info->shared_info());
+  UpdateSharedFunctionInfo(info);
+  ASSERT_EQ(Code::FUNCTION, info->code()->kind());
+  return info->code();
+}
+
+
+Handle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
+  ASSERT(!function->GetIsolate()->has_pending_exception());
+  ASSERT(!function->is_compiled());
+  if (function->shared()->is_compiled()) {
+    return Handle<Code>(function->shared()->code());
+  }
+
+  CompilationInfoWithZone info(function);
+  Handle<Code> result = GetUnoptimizedCodeCommon(&info);
+  ASSERT_EQ(result.is_null(), info.isolate()->has_pending_exception());
+
+  if (FLAG_always_opt &&
+      !result.is_null() &&
+      info.isolate()->use_crankshaft() &&
+      !info.shared_info()->optimization_disabled() &&
+      !info.isolate()->DebuggerHasBreakPoints()) {
+    Handle<Code> opt_code = Compiler::GetOptimizedCode(
+        function, result, Compiler::NOT_CONCURRENT);
+    if (!opt_code.is_null()) result = opt_code;
+  }
+
+  return result;
+}
+
+
+Handle<Code> Compiler::GetUnoptimizedCode(Handle<SharedFunctionInfo> shared) {
+  ASSERT(!shared->GetIsolate()->has_pending_exception());
+  ASSERT(!shared->is_compiled());
+
+  CompilationInfoWithZone info(shared);
+  Handle<Code> result = GetUnoptimizedCodeCommon(&info);
+  ASSERT_EQ(result.is_null(), info.isolate()->has_pending_exception());
+  return result;
+}
+
+
+bool Compiler::EnsureCompiled(Handle<JSFunction> function,
+                              ClearExceptionFlag flag) {
+  if (function->is_compiled()) return true;
+  Handle<Code> code = Compiler::GetUnoptimizedCode(function);
+  if (code.is_null()) {
+    if (flag == CLEAR_EXCEPTION) {
+      function->GetIsolate()->clear_pending_exception();
+    }
+    return false;
+  }
+  function->ReplaceCode(*code);
+  ASSERT(function->is_compiled());
+  return true;
+}
+
+
+// Compile full code for debugging. This code will have debug break slots
+// and deoptimization information. Deoptimization information is required
+// in case that an optimized version of this function is still activated on
+// the stack. It will also make sure that the full code is compiled with
+// the same flags as the previous version, that is flags which can change
+// the code generated. The current method of mapping from already compiled
+// full code without debug break slots to full code with debug break slots
+// depends on the generated code is otherwise exactly the same.
+// If compilation fails, just keep the existing code.
+Handle<Code> Compiler::GetCodeForDebugging(Handle<JSFunction> function) {
+  CompilationInfoWithZone info(function);
+  Isolate* isolate = info.isolate();
+  VMState<COMPILER> state(isolate);
+
+  ASSERT(!isolate->has_pending_exception());
+  Handle<Code> old_code(function->shared()->code());
+  ASSERT(old_code->kind() == Code::FUNCTION);
+  ASSERT(!old_code->has_debug_break_slots());
+
+  info.MarkCompilingForDebugging();
+  if (old_code->is_compiled_optimizable()) {
+    info.EnableDeoptimizationSupport();
+  } else {
+    info.MarkNonOptimizable();
+  }
+  Handle<Code> new_code = GetUnoptimizedCodeCommon(&info);
+  if (new_code.is_null()) {
+    isolate->clear_pending_exception();
+  } else {
+    ASSERT_EQ(old_code->is_compiled_optimizable(),
+              new_code->is_compiled_optimizable());
+  }
+  return new_code;
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void Compiler::CompileForLiveEdit(Handle<Script> script) {
+  // TODO(635): support extensions.
+  CompilationInfoWithZone info(script);
+  PostponeInterruptsScope postpone(info.isolate());
+  VMState<COMPILER> state(info.isolate());
+
+  info.MarkAsGlobal();
+  if (!Parser::Parse(&info)) return;
+  LanguageMode language_mode = info.function()->language_mode();
+  info.SetLanguageMode(language_mode);
+
+  LiveEditFunctionTracker tracker(info.isolate(), info.function());
+  if (!CompileUnoptimizedCode(&info)) return;
+  if (!info.shared_info().is_null()) {
+    Handle<ScopeInfo> scope_info = ScopeInfo::Create(info.scope(),
+                                                     info.zone());
+    info.shared_info()->set_scope_info(*scope_info);
+  }
+  tracker.RecordRootFunctionInfo(info.code());
+}
+#endif
+
+
+static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
+                                          bool allow_lazy_without_ctx = false) {
+  return LiveEditFunctionTracker::IsActive(info->isolate()) ||
+         (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
+}
+
+
+static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
   PostponeInterruptsScope postpone(isolate);
-
   ASSERT(!isolate->native_context().is_null());
   Handle<Script> script = info->script();
+
   // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
   FixedArray* array = isolate->native_context()->embedder_data();
   script->set_context_data(array->get(0));
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  if (info->is_eval()) {
-    script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
-    // For eval scripts add information on the function from which eval was
-    // called.
-    if (info->is_eval()) {
-      StackTraceFrameIterator it(isolate);
-      if (!it.done()) {
-        script->set_eval_from_shared(it.frame()->function()->shared());
-        Code* code = it.frame()->LookupCode();
-        int offset = static_cast<int>(
-            it.frame()->pc() - code->instruction_start());
-        script->set_eval_from_instructions_offset(Smi::FromInt(offset));
-      }
-    }
-  }
-
-  // Notify debugger
   isolate->debugger()->OnBeforeCompile(script);
 #endif
 
-  // Only allow non-global compiles for eval.
   ASSERT(info->is_eval() || info->is_global());
-  {
-    Parser parser(info);
-    if ((info->pre_parse_data() != NULL ||
-         String::cast(script->source())->length() > FLAG_min_preparse_length) &&
-        !DebuggerWantsEagerCompilation(info))
-      parser.set_allow_lazy(true);
-    if (!parser.Parse()) {
+
+  bool parse_allow_lazy =
+      (info->pre_parse_data() != NULL ||
+       String::cast(script->source())->length() > FLAG_min_preparse_length) &&
+      !DebuggerWantsEagerCompilation(info);
+
+  Handle<SharedFunctionInfo> result;
+
+  { VMState<COMPILER> state(info->isolate());
+    if (!Parser::Parse(info, parse_allow_lazy)) {
       return Handle<SharedFunctionInfo>::null();
     }
-  }
 
-  FunctionLiteral* lit = info->function();
-  LiveEditFunctionTracker live_edit_tracker(isolate, lit);
-  Handle<SharedFunctionInfo> result;
-  {
+    FunctionLiteral* lit = info->function();
+    LiveEditFunctionTracker live_edit_tracker(isolate, lit);
+
     // Measure how long it takes to do the compilation; only take the
     // rest of the function into account to avoid overlap with the
     // parsing statistics.
@@ -687,48 +793,32 @@
     HistogramTimerScope timer(rate);
 
     // Compile the code.
-    if (!MakeCode(info)) {
-      if (!isolate->has_pending_exception()) isolate->StackOverflow();
+    if (!CompileUnoptimizedCode(info)) {
       return Handle<SharedFunctionInfo>::null();
     }
 
     // Allocate function.
     ASSERT(!info->code().is_null());
-    result =
-        isolate->factory()->NewSharedFunctionInfo(
-            lit->name(),
-            lit->materialized_literal_count(),
-            lit->is_generator(),
-            info->code(),
-            ScopeInfo::Create(info->scope(), info->zone()));
+    result = isolate->factory()->NewSharedFunctionInfo(
+        lit->name(),
+        lit->materialized_literal_count(),
+        lit->is_generator(),
+        info->code(),
+        ScopeInfo::Create(info->scope(), info->zone()));
 
     ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
-    Compiler::SetFunctionInfo(result, lit, true, script);
+    SetFunctionInfo(result, lit, true, script);
 
-    if (script->name()->IsString()) {
-      PROFILE(isolate, CodeCreateEvent(
-          info->is_eval()
-          ? Logger::EVAL_TAG
-              : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
-                *info->code(),
-                *result,
-                info,
-                String::cast(script->name())));
-      GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
-                     script,
-                     info->code(),
-                     info));
-    } else {
-      PROFILE(isolate, CodeCreateEvent(
-          info->is_eval()
-          ? Logger::EVAL_TAG
-              : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
-                *info->code(),
-                *result,
-                info,
-                isolate->heap()->empty_string()));
-      GDBJIT(AddCode(Handle<String>(), script, info->code(), info));
-    }
+    Handle<String> script_name = script->name()->IsString()
+        ? Handle<String>(String::cast(script->name()))
+        : isolate->factory()->empty_string();
+    Logger::LogEventsAndTags log_tag = info->is_eval()
+        ? Logger::EVAL_TAG
+        : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
+
+    PROFILE(isolate, CodeCreateEvent(
+                log_tag, *info->code(), *result, info, *script_name));
+    GDBJIT(AddCode(script_name, script, info->code(), info));
 
     // Hint to the runtime system used when allocating space for initial
     // property space by setting the expected number of properties for
@@ -737,38 +827,91 @@
                                          lit->expected_property_count());
 
     script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+
+    live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
   }
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  // Notify debugger
-  isolate->debugger()->OnAfterCompile(
-      script, Debugger::NO_AFTER_COMPILE_FLAGS);
+  isolate->debugger()->OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
 #endif
 
-  live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
-
   return result;
 }
 
 
-Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
-                                             Handle<Object> script_name,
-                                             int line_offset,
-                                             int column_offset,
-                                             bool is_shared_cross_origin,
-                                             Handle<Context> context,
-                                             v8::Extension* extension,
-                                             ScriptDataImpl* pre_data,
-                                             Handle<Object> script_data,
-                                             NativesFlag natives) {
+Handle<JSFunction> Compiler::GetFunctionFromEval(Handle<String> source,
+                                                 Handle<Context> context,
+                                                 LanguageMode language_mode,
+                                                 ParseRestriction restriction,
+                                                 int scope_position) {
+  Isolate* isolate = source->GetIsolate();
+  int source_length = source->length();
+  isolate->counters()->total_eval_size()->Increment(source_length);
+  isolate->counters()->total_compile_size()->Increment(source_length);
+
+  CompilationCache* compilation_cache = isolate->compilation_cache();
+  Handle<SharedFunctionInfo> shared_info = compilation_cache->LookupEval(
+      source, context, language_mode, scope_position);
+
+  if (shared_info.is_null()) {
+    Handle<Script> script = isolate->factory()->NewScript(source);
+    CompilationInfoWithZone info(script);
+    info.MarkAsEval();
+    if (context->IsNativeContext()) info.MarkAsGlobal();
+    info.SetLanguageMode(language_mode);
+    info.SetParseRestriction(restriction);
+    info.SetContext(context);
+
+#if ENABLE_DEBUGGER_SUPPORT
+    Debug::RecordEvalCaller(script);
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+    shared_info = CompileToplevel(&info);
+
+    if (shared_info.is_null()) {
+      return Handle<JSFunction>::null();
+    } else {
+      // Explicitly disable optimization for eval code. We're not yet prepared
+      // to handle eval-code in the optimizing compiler.
+      shared_info->DisableOptimization(kEval);
+
+      // If caller is strict mode, the result must be in strict mode or
+      // extended mode as well, but not the other way around. Consider:
+      // eval("'use strict'; ...");
+      ASSERT(language_mode != STRICT_MODE || !shared_info->is_classic_mode());
+      // If caller is in extended mode, the result must also be in
+      // extended mode.
+      ASSERT(language_mode != EXTENDED_MODE ||
+             shared_info->is_extended_mode());
+      if (!shared_info->dont_cache()) {
+        compilation_cache->PutEval(
+            source, context, shared_info, scope_position);
+      }
+    }
+  } else if (shared_info->ic_age() != isolate->heap()->global_ic_age()) {
+    shared_info->ResetForNewContext(isolate->heap()->global_ic_age());
+  }
+
+  return isolate->factory()->NewFunctionFromSharedFunctionInfo(
+      shared_info, context, NOT_TENURED);
+}
+
+
+Handle<SharedFunctionInfo> Compiler::CompileScript(Handle<String> source,
+                                                   Handle<Object> script_name,
+                                                   int line_offset,
+                                                   int column_offset,
+                                                   bool is_shared_cross_origin,
+                                                   Handle<Context> context,
+                                                   v8::Extension* extension,
+                                                   ScriptDataImpl* pre_data,
+                                                   Handle<Object> script_data,
+                                                   NativesFlag natives) {
   Isolate* isolate = source->GetIsolate();
   int source_length = source->length();
   isolate->counters()->total_load_size()->Increment(source_length);
   isolate->counters()->total_compile_size()->Increment(source_length);
 
-  // The VM is in the COMPILER state until exiting this function.
-  VMState<COMPILER> state(isolate);
-
   CompilationCache* compilation_cache = isolate->compilation_cache();
 
   // Do a lookup in the compilation cache but not for extensions.
@@ -816,14 +959,12 @@
     if (FLAG_use_strict) {
       info.SetLanguageMode(FLAG_harmony_scoping ? EXTENDED_MODE : STRICT_MODE);
     }
-    result = MakeFunctionInfo(&info);
+    result = CompileToplevel(&info);
     if (extension == NULL && !result.is_null() && !result->dont_cache()) {
       compilation_cache->PutScript(source, context, result);
     }
-  } else {
-    if (result->ic_age() != isolate->heap()->global_ic_age()) {
+  } else if (result->ic_age() != isolate->heap()->global_ic_age()) {
       result->ResetForNewContext(isolate->heap()->global_ic_age());
-    }
   }
 
   if (result.is_null()) isolate->ReportPendingMessages();
@@ -831,372 +972,6 @@
 }
 
 
-Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
-                                                 Handle<Context> context,
-                                                 bool is_global,
-                                                 LanguageMode language_mode,
-                                                 ParseRestriction restriction,
-                                                 int scope_position) {
-  Isolate* isolate = source->GetIsolate();
-  int source_length = source->length();
-  isolate->counters()->total_eval_size()->Increment(source_length);
-  isolate->counters()->total_compile_size()->Increment(source_length);
-
-  // The VM is in the COMPILER state until exiting this function.
-  VMState<COMPILER> state(isolate);
-
-  // Do a lookup in the compilation cache; if the entry is not there, invoke
-  // the compiler and add the result to the cache.
-  Handle<SharedFunctionInfo> result;
-  CompilationCache* compilation_cache = isolate->compilation_cache();
-  result = compilation_cache->LookupEval(source,
-                                         context,
-                                         is_global,
-                                         language_mode,
-                                         scope_position);
-
-  if (result.is_null()) {
-    // Create a script object describing the script to be compiled.
-    Handle<Script> script = isolate->factory()->NewScript(source);
-    CompilationInfoWithZone info(script);
-    info.MarkAsEval();
-    if (is_global) info.MarkAsGlobal();
-    info.SetLanguageMode(language_mode);
-    info.SetParseRestriction(restriction);
-    info.SetContext(context);
-    result = MakeFunctionInfo(&info);
-    if (!result.is_null()) {
-      // Explicitly disable optimization for eval code. We're not yet prepared
-      // to handle eval-code in the optimizing compiler.
-      result->DisableOptimization(kEval);
-
-      // If caller is strict mode, the result must be in strict mode or
-      // extended mode as well, but not the other way around. Consider:
-      // eval("'use strict'; ...");
-      ASSERT(language_mode != STRICT_MODE || !result->is_classic_mode());
-      // If caller is in extended mode, the result must also be in
-      // extended mode.
-      ASSERT(language_mode != EXTENDED_MODE ||
-             result->is_extended_mode());
-      if (!result->dont_cache()) {
-        compilation_cache->PutEval(
-            source, context, is_global, result, scope_position);
-      }
-    }
-  } else {
-    if (result->ic_age() != isolate->heap()->global_ic_age()) {
-      result->ResetForNewContext(isolate->heap()->global_ic_age());
-    }
-  }
-
-  return result;
-}
-
-
-static bool InstallFullCode(CompilationInfo* info) {
-  // Update the shared function info with the compiled code and the
-  // scope info.  Please note, that the order of the shared function
-  // info initialization is important since set_scope_info might
-  // trigger a GC, causing the ASSERT below to be invalid if the code
-  // was flushed. By setting the code object last we avoid this.
-  Handle<SharedFunctionInfo> shared = info->shared_info();
-  Handle<Code> code = info->code();
-  CHECK(code->kind() == Code::FUNCTION);
-  Handle<JSFunction> function = info->closure();
-  Handle<ScopeInfo> scope_info =
-      ScopeInfo::Create(info->scope(), info->zone());
-  shared->set_scope_info(*scope_info);
-  shared->ReplaceCode(*code);
-  if (!function.is_null()) {
-    function->ReplaceCode(*code);
-    ASSERT(!function->IsOptimized());
-  }
-
-  // Set the expected number of properties for instances.
-  FunctionLiteral* lit = info->function();
-  int expected = lit->expected_property_count();
-  SetExpectedNofPropertiesFromEstimate(shared, expected);
-
-  // Check the function has compiled code.
-  ASSERT(shared->is_compiled());
-  shared->set_dont_optimize_reason(lit->dont_optimize_reason());
-  shared->set_dont_inline(lit->flags()->Contains(kDontInline));
-  shared->set_ast_node_count(lit->ast_node_count());
-
-  if (info->isolate()->use_crankshaft() &&
-      !function.is_null() &&
-      !shared->optimization_disabled()) {
-    // If we're asked to always optimize, we compile the optimized
-    // version of the function right away - unless the debugger is
-    // active as it makes no sense to compile optimized code then.
-    if (FLAG_always_opt &&
-        !info->isolate()->DebuggerHasBreakPoints()) {
-      CompilationInfoWithZone optimized(function);
-      optimized.SetOptimizing(BailoutId::None());
-      return Compiler::CompileLazy(&optimized);
-    }
-  }
-  return true;
-}
-
-
-static void InstallCodeCommon(CompilationInfo* info) {
-  Handle<SharedFunctionInfo> shared = info->shared_info();
-  Handle<Code> code = info->code();
-  ASSERT(!code.is_null());
-
-  // Set optimizable to false if this is disallowed by the shared
-  // function info, e.g., we might have flushed the code and must
-  // reset this bit when lazy compiling the code again.
-  if (shared->optimization_disabled()) code->set_optimizable(false);
-
-  if (shared->code() == *code) {
-    // Do not send compilation event for the same code twice.
-    return;
-  }
-  Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
-}
-
-
-static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
-  Handle<Code> code = info->code();
-  if (code->kind() != Code::OPTIMIZED_FUNCTION) return;  // Nothing to do.
-
-  // Cache non-OSR optimized code.
-  if (FLAG_cache_optimized_code && !info->is_osr()) {
-    Handle<JSFunction> function = info->closure();
-    Handle<SharedFunctionInfo> shared(function->shared());
-    Handle<FixedArray> literals(function->literals());
-    Handle<Context> native_context(function->context()->native_context());
-    SharedFunctionInfo::AddToOptimizedCodeMap(
-        shared, native_context, code, literals);
-  }
-}
-
-
-static bool InstallCodeFromOptimizedCodeMap(CompilationInfo* info) {
-  if (!info->IsOptimizing()) return false;  // Nothing to look up.
-
-  // Lookup non-OSR optimized code.
-  if (FLAG_cache_optimized_code && !info->is_osr()) {
-    Handle<SharedFunctionInfo> shared = info->shared_info();
-    Handle<JSFunction> function = info->closure();
-    ASSERT(!function.is_null());
-    Handle<Context> native_context(function->context()->native_context());
-    int index = shared->SearchOptimizedCodeMap(*native_context);
-    if (index > 0) {
-      if (FLAG_trace_opt) {
-        PrintF("[found optimized code for ");
-        function->ShortPrint();
-        PrintF("]\n");
-      }
-      // Caching of optimized code enabled and optimized code found.
-      shared->InstallFromOptimizedCodeMap(*function, index);
-      return true;
-    }
-  }
-  return false;
-}
-
-
-bool Compiler::CompileLazy(CompilationInfo* info) {
-  Isolate* isolate = info->isolate();
-
-  // The VM is in the COMPILER state until exiting this function.
-  VMState<COMPILER> state(isolate);
-
-  PostponeInterruptsScope postpone(isolate);
-
-  Handle<SharedFunctionInfo> shared = info->shared_info();
-  int compiled_size = shared->end_position() - shared->start_position();
-  isolate->counters()->total_compile_size()->Increment(compiled_size);
-
-  if (InstallCodeFromOptimizedCodeMap(info)) return true;
-
-  // Generate the AST for the lazily compiled function.
-  if (Parser::Parse(info)) {
-    // Measure how long it takes to do the lazy compilation; only take the
-    // rest of the function into account to avoid overlap with the lazy
-    // parsing statistics.
-    HistogramTimerScope timer(isolate->counters()->compile_lazy());
-
-    // After parsing we know the function's language mode. Remember it.
-    LanguageMode language_mode = info->function()->language_mode();
-    info->SetLanguageMode(language_mode);
-    shared->set_language_mode(language_mode);
-
-    // Compile the code.
-    if (!MakeCode(info)) {
-      if (!isolate->has_pending_exception()) {
-        isolate->StackOverflow();
-      }
-    } else {
-      InstallCodeCommon(info);
-
-      if (info->IsOptimizing()) {
-        // Optimized code successfully created.
-        Handle<Code> code = info->code();
-        ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate));
-        // TODO(titzer): Only replace the code if it was not an OSR compile.
-        info->closure()->ReplaceCode(*code);
-        InsertCodeIntoOptimizedCodeMap(info);
-        return true;
-      } else if (!info->is_osr()) {
-        // Compilation failed. Replace with full code if not OSR compile.
-        return InstallFullCode(info);
-      }
-    }
-  }
-
-  ASSERT(info->code().is_null());
-  return false;
-}
-
-
-bool Compiler::RecompileConcurrent(Handle<JSFunction> closure,
-                                   Handle<Code> unoptimized,
-                                   uint32_t osr_pc_offset) {
-  bool compiling_for_osr = (osr_pc_offset != 0);
-
-  Isolate* isolate = closure->GetIsolate();
-  // Here we prepare compile data for the concurrent recompilation thread, but
-  // this still happens synchronously and interrupts execution.
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_recompile_synchronous);
-
-  if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
-    if (FLAG_trace_concurrent_recompilation) {
-      PrintF("  ** Compilation queue full, will retry optimizing ");
-      closure->PrintName();
-      PrintF(" on next run.\n");
-    }
-    return false;
-  }
-
-  SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
-  Handle<SharedFunctionInfo> shared = info->shared_info();
-
-  if (compiling_for_osr) {
-    BailoutId osr_ast_id = unoptimized->TranslatePcOffsetToAstId(osr_pc_offset);
-    ASSERT(!osr_ast_id.IsNone());
-    info->SetOptimizing(osr_ast_id);
-    info->SetOsrInfo(unoptimized, osr_pc_offset);
-
-    if (FLAG_trace_osr) {
-      PrintF("[COSR - attempt to queue ");
-      closure->PrintName();
-      PrintF(" at AST id %d]\n", osr_ast_id.ToInt());
-    }
-  } else {
-    info->SetOptimizing(BailoutId::None());
-  }
-
-  VMState<COMPILER> state(isolate);
-  PostponeInterruptsScope postpone(isolate);
-
-  int compiled_size = shared->end_position() - shared->start_position();
-  isolate->counters()->total_compile_size()->Increment(compiled_size);
-
-  {
-    CompilationHandleScope handle_scope(info.get());
-
-    if (!compiling_for_osr && InstallCodeFromOptimizedCodeMap(info.get())) {
-      return true;
-    }
-
-    if (Parser::Parse(info.get())) {
-      LanguageMode language_mode = info->function()->language_mode();
-      info->SetLanguageMode(language_mode);
-      shared->set_language_mode(language_mode);
-      info->SaveHandles();
-
-      if (Rewriter::Rewrite(info.get()) && Scope::Analyze(info.get())) {
-        RecompileJob* job = new(info->zone()) RecompileJob(info.get());
-        RecompileJob::Status status = job->CreateGraph();
-        if (status == RecompileJob::SUCCEEDED) {
-          info.Detach();
-          unoptimized->set_profiler_ticks(0);
-          isolate->optimizing_compiler_thread()->QueueForOptimization(job);
-          ASSERT(!isolate->has_pending_exception());
-          return true;
-        } else if (status == RecompileJob::BAILED_OUT) {
-          isolate->clear_pending_exception();
-          InstallFullCode(info.get());
-        }
-      }
-    }
-  }
-
-  if (isolate->has_pending_exception()) isolate->clear_pending_exception();
-  return false;
-}
-
-
-Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) {
-  SmartPointer<CompilationInfo> info(job->info());
-  // The function may have already been optimized by OSR.  Simply continue.
-  // Except when OSR already disabled optimization for some reason.
-  if (info->shared_info()->optimization_disabled()) {
-    info->AbortOptimization();
-    InstallFullCode(info.get());
-    if (FLAG_trace_concurrent_recompilation) {
-      PrintF("  ** aborting optimization for ");
-      info->closure()->PrintName();
-      PrintF(" as it has been disabled.\n");
-    }
-    ASSERT(!info->closure()->IsInRecompileQueue());
-    return Handle<Code>::null();
-  }
-
-  Isolate* isolate = info->isolate();
-  VMState<COMPILER> state(isolate);
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_recompile_synchronous);
-  // If crankshaft succeeded, install the optimized code else install
-  // the unoptimized code.
-  RecompileJob::Status status = job->last_status();
-  if (info->HasAbortedDueToDependencyChange()) {
-    info->set_bailout_reason(kBailedOutDueToDependencyChange);
-    status = job->AbortOptimization();
-  } else if (status != RecompileJob::SUCCEEDED) {
-    info->set_bailout_reason(kFailedBailedOutLastTime);
-    status = job->AbortOptimization();
-  } else if (isolate->DebuggerHasBreakPoints()) {
-    info->set_bailout_reason(kDebuggerIsActive);
-    status = job->AbortOptimization();
-  } else {
-    status = job->GenerateAndInstallCode();
-    ASSERT(status == RecompileJob::SUCCEEDED ||
-           status == RecompileJob::BAILED_OUT);
-  }
-
-  InstallCodeCommon(info.get());
-  if (status == RecompileJob::SUCCEEDED) {
-    Handle<Code> code = info->code();
-    ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
-    info->closure()->ReplaceCode(*code);
-    if (info->shared_info()->SearchOptimizedCodeMap(
-            info->closure()->context()->native_context()) == -1) {
-      InsertCodeIntoOptimizedCodeMap(info.get());
-    }
-    if (FLAG_trace_concurrent_recompilation) {
-      PrintF("  ** Optimized code for ");
-      info->closure()->PrintName();
-      PrintF(" installed.\n");
-    }
-  } else {
-    info->AbortOptimization();
-    InstallFullCode(info.get());
-  }
-  // Optimized code is finally replacing unoptimized code.  Reset the latter's
-  // profiler ticks to prevent too soon re-opt after a deopt.
-  info->shared_info()->code()->set_profiler_ticks(0);
-  ASSERT(!info->closure()->IsInRecompileQueue());
-  return (status == RecompileJob::SUCCEEDED) ? info->code()
-                                             : Handle<Code>::null();
-}
-
-
 Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
                                                        Handle<Script> script) {
   // Precondition: code has been parsed and scopes have been analyzed.
@@ -1221,13 +996,13 @@
   bool allow_lazy = literal->AllowsLazyCompilation() &&
       !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
 
-  Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate));
-
   // Generate code
+  Handle<ScopeInfo> scope_info;
   if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
-    Handle<Code> code = isolate->builtins()->LazyCompile();
+    Handle<Code> code = isolate->builtins()->CompileUnoptimized();
     info.SetCode(code);
-  } else if (GenerateCode(&info)) {
+    scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
+  } else if (FullCodeGenerator::MakeCode(&info)) {
     ASSERT(!info.code().is_null());
     scope_info = ScopeInfo::Create(info.scope(), info.zone());
   } else {
@@ -1255,36 +1030,201 @@
 }
 
 
-// Sets the function info on a function.
-// The start_position points to the first '(' character after the function name
-// in the full script source. When counting characters in the script source the
-// the first character is number 0 (not 1).
-void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
-                               FunctionLiteral* lit,
-                               bool is_toplevel,
-                               Handle<Script> script) {
-  function_info->set_length(lit->parameter_count());
-  function_info->set_formal_parameter_count(lit->parameter_count());
-  function_info->set_script(*script);
-  function_info->set_function_token_position(lit->function_token_position());
-  function_info->set_start_position(lit->start_position());
-  function_info->set_end_position(lit->end_position());
-  function_info->set_is_expression(lit->is_expression());
-  function_info->set_is_anonymous(lit->is_anonymous());
-  function_info->set_is_toplevel(is_toplevel);
-  function_info->set_inferred_name(*lit->inferred_name());
-  function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
-  function_info->set_allows_lazy_compilation_without_context(
-      lit->AllowsLazyCompilationWithoutContext());
-  function_info->set_language_mode(lit->language_mode());
-  function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
-  function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
-  function_info->set_ast_node_count(lit->ast_node_count());
-  function_info->set_is_function(lit->is_function());
-  function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
-  function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
-  function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
-  function_info->set_is_generator(lit->is_generator());
+static Handle<Code> GetCodeFromOptimizedCodeMap(Handle<JSFunction> function,
+                                                BailoutId osr_ast_id) {
+  if (FLAG_cache_optimized_code) {
+    Handle<SharedFunctionInfo> shared(function->shared());
+    DisallowHeapAllocation no_gc;
+    int index = shared->SearchOptimizedCodeMap(
+        function->context()->native_context(), osr_ast_id);
+    if (index > 0) {
+      if (FLAG_trace_opt) {
+        PrintF("[found optimized code for ");
+        function->ShortPrint();
+        if (!osr_ast_id.IsNone()) {
+          PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
+        }
+        PrintF("]\n");
+      }
+      FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index);
+      if (literals != NULL) function->set_literals(literals);
+      return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index));
+    }
+  }
+  return Handle<Code>::null();
+}
+
+
+static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
+  Handle<Code> code = info->code();
+  if (code->kind() != Code::OPTIMIZED_FUNCTION) return;  // Nothing to do.
+
+  // Cache optimized code.
+  if (FLAG_cache_optimized_code) {
+    Handle<JSFunction> function = info->closure();
+    Handle<SharedFunctionInfo> shared(function->shared());
+    Handle<FixedArray> literals(function->literals());
+    Handle<Context> native_context(function->context()->native_context());
+    SharedFunctionInfo::AddToOptimizedCodeMap(
+        shared, native_context, code, literals, info->osr_ast_id());
+  }
+}
+
+
+static bool CompileOptimizedPrologue(CompilationInfo* info) {
+  if (!Parser::Parse(info)) return false;
+  LanguageMode language_mode = info->function()->language_mode();
+  info->SetLanguageMode(language_mode);
+
+  if (!Rewriter::Rewrite(info)) return false;
+  if (!Scope::Analyze(info)) return false;
+  ASSERT(info->scope() != NULL);
+  return true;
+}
+
+
+static bool GetOptimizedCodeNow(CompilationInfo* info) {
+  if (!CompileOptimizedPrologue(info)) return false;
+
+  Logger::TimerEventScope timer(
+      info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
+
+  OptimizedCompileJob job(info);
+  if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED) return false;
+  if (job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED) return false;
+  if (job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) return false;
+
+  // Success!
+  ASSERT(!info->isolate()->has_pending_exception());
+  InsertCodeIntoOptimizedCodeMap(info);
+  Compiler::RecordFunctionCompilation(
+      Logger::LAZY_COMPILE_TAG, info, info->shared_info());
+  return true;
+}
+
+
+static bool GetOptimizedCodeLater(CompilationInfo* info) {
+  Isolate* isolate = info->isolate();
+  if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
+    if (FLAG_trace_concurrent_recompilation) {
+      PrintF("  ** Compilation queue full, will retry optimizing ");
+      info->closure()->PrintName();
+      PrintF(" later.\n");
+    }
+    return false;
+  }
+
+  CompilationHandleScope handle_scope(info);
+  if (!CompileOptimizedPrologue(info)) return false;
+  info->SaveHandles();  // Copy handles to the compilation handle scope.
+
+  Logger::TimerEventScope timer(
+      isolate, Logger::TimerEventScope::v8_recompile_synchronous);
+
+  OptimizedCompileJob* job = new(info->zone()) OptimizedCompileJob(info);
+  OptimizedCompileJob::Status status = job->CreateGraph();
+  if (status != OptimizedCompileJob::SUCCEEDED) return false;
+  isolate->optimizing_compiler_thread()->QueueForOptimization(job);
+
+  if (FLAG_trace_concurrent_recompilation) {
+    PrintF("  ** Queued ");
+     info->closure()->PrintName();
+    if (info->is_osr()) {
+      PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt());
+    } else {
+      PrintF(" for concurrent optimization.\n");
+    }
+  }
+  return true;
+}
+
+
+Handle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
+                                        Handle<Code> current_code,
+                                        ConcurrencyMode mode,
+                                        BailoutId osr_ast_id) {
+  Handle<Code> cached_code = GetCodeFromOptimizedCodeMap(function, osr_ast_id);
+  if (!cached_code.is_null()) return cached_code;
+
+  SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(function));
+  Isolate* isolate = info->isolate();
+  VMState<COMPILER> state(isolate);
+  ASSERT(!isolate->has_pending_exception());
+  PostponeInterruptsScope postpone(isolate);
+
+  Handle<SharedFunctionInfo> shared = info->shared_info();
+  ASSERT_NE(ScopeInfo::Empty(isolate), shared->scope_info());
+  int compiled_size = shared->end_position() - shared->start_position();
+  isolate->counters()->total_compile_size()->Increment(compiled_size);
+  current_code->set_profiler_ticks(0);
+
+  info->SetOptimizing(osr_ast_id, current_code);
+
+  if (mode == CONCURRENT) {
+    if (GetOptimizedCodeLater(info.get())) {
+      info.Detach();  // The background recompile job owns this now.
+      return isolate->builtins()->InOptimizationQueue();
+    }
+  } else {
+    if (GetOptimizedCodeNow(info.get())) return info->code();
+  }
+
+  // Failed.
+  if (FLAG_trace_opt) {
+    PrintF("[failed to optimize ");
+    function->PrintName();
+    PrintF("]\n");
+  }
+
+  if (isolate->has_pending_exception()) isolate->clear_pending_exception();
+  return Handle<Code>::null();
+}
+
+
+Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
+  // Take ownership of compilation info.  Deleting compilation info
+  // also tears down the zone and the recompile job.
+  SmartPointer<CompilationInfo> info(job->info());
+  Isolate* isolate = info->isolate();
+
+  VMState<COMPILER> state(isolate);
+  Logger::TimerEventScope timer(
+      isolate, Logger::TimerEventScope::v8_recompile_synchronous);
+
+  Handle<SharedFunctionInfo> shared = info->shared_info();
+  shared->code()->set_profiler_ticks(0);
+
+  // 1) Optimization may have failed.
+  // 2) The function may have already been optimized by OSR.  Simply continue.
+  //    Except when OSR already disabled optimization for some reason.
+  // 3) The code may have already been invalidated due to dependency change.
+  // 4) Debugger may have been activated.
+
+  if (job->last_status() != OptimizedCompileJob::SUCCEEDED ||
+      shared->optimization_disabled() ||
+      info->HasAbortedDueToDependencyChange() ||
+      isolate->DebuggerHasBreakPoints()) {
+    return Handle<Code>::null();
+  }
+
+  if (job->GenerateCode() != OptimizedCompileJob::SUCCEEDED) {
+    return Handle<Code>::null();
+  }
+
+  Compiler::RecordFunctionCompilation(
+      Logger::LAZY_COMPILE_TAG, info.get(), shared);
+  if (info->shared_info()->SearchOptimizedCodeMap(
+          info->context()->native_context(), info->osr_ast_id()) == -1) {
+    InsertCodeIntoOptimizedCodeMap(info.get());
+  }
+
+  if (FLAG_trace_concurrent_recompilation) {
+    PrintF("  ** Optimized code for ");
+    info->closure()->PrintName();
+    PrintF(" generated.\n");
+  }
+
+  return Handle<Code>(*info->code());
 }
 
 
@@ -1301,31 +1241,18 @@
       info->isolate()->cpu_profiler()->is_profiling()) {
     Handle<Script> script = info->script();
     Handle<Code> code = info->code();
-    if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
+    if (code.is_identical_to(info->isolate()->builtins()->CompileUnoptimized()))
       return;
     int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
     int column_num =
         GetScriptColumnNumber(script, shared->start_position()) + 1;
     USE(line_num);
-    if (script->name()->IsString()) {
-      PROFILE(info->isolate(),
-              CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
-                              *code,
-                              *shared,
-                              info,
-                              String::cast(script->name()),
-                              line_num,
-                              column_num));
-    } else {
-      PROFILE(info->isolate(),
-              CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
-                              *code,
-                              *shared,
-                              info,
-                              info->isolate()->heap()->empty_string(),
-                              line_num,
-                              column_num));
-    }
+    String* script_name = script->name()->IsString()
+        ? String::cast(script->name())
+        : info->isolate()->heap()->empty_string();
+    Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
+    PROFILE(info->isolate(), CodeCreateEvent(
+        log_tag, *code, *shared, info, script_name, line_num, column_num));
   }
 
   GDBJIT(AddCode(Handle<String>(shared->DebugName()),
diff --git a/src/compiler.h b/src/compiler.h
index 7599c13..4d7c1a2 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -84,8 +84,7 @@
   ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
   Handle<Context> context() const { return context_; }
   BailoutId osr_ast_id() const { return osr_ast_id_; }
-  uint32_t osr_pc_offset() const { return osr_pc_offset_; }
-  Handle<Code> osr_patched_code() const { return osr_patched_code_; }
+  Handle<Code> unoptimized_code() const { return unoptimized_code_; }
   int opt_count() const { return opt_count_; }
   int num_parameters() const;
   int num_heap_slots() const;
@@ -189,19 +188,16 @@
   void SetContext(Handle<Context> context) {
     context_ = context;
   }
-  void MarkCompilingForDebugging(Handle<Code> current_code) {
-    ASSERT(mode_ != OPTIMIZE);
-    ASSERT(current_code->kind() == Code::FUNCTION);
+
+  void MarkCompilingForDebugging() {
     flags_ |= IsCompilingForDebugging::encode(true);
-    if (current_code->is_compiled_optimizable()) {
-      EnableDeoptimizationSupport();
-    } else {
-      mode_ = CompilationInfo::NONOPT;
-    }
   }
   bool IsCompilingForDebugging() {
     return IsCompilingForDebugging::decode(flags_);
   }
+  void MarkNonOptimizable() {
+    SetMode(CompilationInfo::NONOPT);
+  }
 
   bool ShouldTrapOnDeopt() const {
     return (FLAG_trap_on_deopt && IsOptimizing()) ||
@@ -221,9 +217,11 @@
   bool IsOptimizing() const { return mode_ == OPTIMIZE; }
   bool IsOptimizable() const { return mode_ == BASE; }
   bool IsStub() const { return mode_ == STUB; }
-  void SetOptimizing(BailoutId osr_ast_id) {
+  void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) {
+    ASSERT(!shared_info_.is_null());
     SetMode(OPTIMIZE);
     osr_ast_id_ = osr_ast_id;
+    unoptimized_code_ = unoptimized;
   }
   void DisableOptimization();
 
@@ -239,11 +237,6 @@
   // Determines whether or not to insert a self-optimization header.
   bool ShouldSelfOptimize();
 
-  // Reset code to the unoptimized version when optimization is aborted.
-  void AbortOptimization() {
-    SetCode(handle(shared_info()->code()));
-  }
-
   void set_deferred_handles(DeferredHandles* deferred_handles) {
     ASSERT(deferred_handles_ == NULL);
     deferred_handles_ = deferred_handles;
@@ -266,7 +259,7 @@
     SaveHandle(&shared_info_);
     SaveHandle(&context_);
     SaveHandle(&script_);
-    SaveHandle(&osr_patched_code_);
+    SaveHandle(&unoptimized_code_);
   }
 
   BailoutReason bailout_reason() const { return bailout_reason_; }
@@ -313,13 +306,8 @@
     return abort_due_to_dependency_;
   }
 
-  void SetOsrInfo(Handle<Code> code, uint32_t pc_offset) {
-    osr_patched_code_ = code;
-    osr_pc_offset_ = pc_offset;
-  }
-
-  bool HasSameOsrEntry(Handle<JSFunction> function, uint32_t pc_offset) {
-    return osr_pc_offset_ == pc_offset && function.is_identical_to(closure_);
+  bool HasSameOsrEntry(Handle<JSFunction> function, BailoutId osr_ast_id) {
+    return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure_);
   }
 
  protected:
@@ -416,13 +404,10 @@
   // Compilation mode flag and whether deoptimization is allowed.
   Mode mode_;
   BailoutId osr_ast_id_;
-  // The pc_offset corresponding to osr_ast_id_ in unoptimized code.
-  // We can look this up in the back edge table, but cache it for quick access.
-  uint32_t osr_pc_offset_;
   // The unoptimized code we patched for OSR may not be the shared code
   // afterwards, since we may need to compile it again to include deoptimization
   // data.  Keep track which code we patched.
-  Handle<Code> osr_patched_code_;
+  Handle<Code> unoptimized_code_;
 
   // Flag whether compilation needs to be aborted due to dependency change.
   bool abort_due_to_dependency_;
@@ -518,9 +503,9 @@
 // fail, bail-out to the full code generator or succeed.  Apart from
 // their return value, the status of the phase last run can be checked
 // using last_status().
-class RecompileJob: public ZoneObject {
+class OptimizedCompileJob: public ZoneObject {
  public:
-  explicit RecompileJob(CompilationInfo* info)
+  explicit OptimizedCompileJob(CompilationInfo* info)
       : info_(info),
         graph_builder_(NULL),
         graph_(NULL),
@@ -534,14 +519,21 @@
 
   MUST_USE_RESULT Status CreateGraph();
   MUST_USE_RESULT Status OptimizeGraph();
-  MUST_USE_RESULT Status GenerateAndInstallCode();
+  MUST_USE_RESULT Status GenerateCode();
 
   Status last_status() const { return last_status_; }
   CompilationInfo* info() const { return info_; }
   Isolate* isolate() const { return info()->isolate(); }
 
-  MUST_USE_RESULT Status AbortOptimization() {
-    info_->AbortOptimization();
+  MUST_USE_RESULT Status AbortOptimization(
+      BailoutReason reason = kNoReason) {
+    if (reason != kNoReason) info_->set_bailout_reason(reason);
+    return SetLastStatus(BAILED_OUT);
+  }
+
+  MUST_USE_RESULT Status AbortAndDisableOptimization(
+      BailoutReason reason = kNoReason) {
+    if (reason != kNoReason) info_->set_bailout_reason(reason);
     info_->shared_info()->DisableOptimization(info_->bailout_reason());
     return SetLastStatus(BAILED_OUT);
   }
@@ -571,7 +563,7 @@
   void RecordOptimizationStats();
 
   struct Timer {
-    Timer(RecompileJob* job, TimeDelta* location)
+    Timer(OptimizedCompileJob* job, TimeDelta* location)
         : job_(job), location_(location) {
       ASSERT(location_ != NULL);
       timer_.Start();
@@ -581,7 +573,7 @@
       *location_ += timer_.Elapsed();
     }
 
-    RecompileJob* job_;
+    OptimizedCompileJob* job_;
     ElapsedTimer timer_;
     TimeDelta* location_;
   };
@@ -601,57 +593,53 @@
 
 class Compiler : public AllStatic {
  public:
-  // Call count before primitive functions trigger their own optimization.
-  static const int kCallsUntilPrimitiveOpt = 200;
+  static Handle<Code> GetUnoptimizedCode(Handle<JSFunction> function);
+  static Handle<Code> GetUnoptimizedCode(Handle<SharedFunctionInfo> shared);
+  static bool EnsureCompiled(Handle<JSFunction> function,
+                             ClearExceptionFlag flag);
+  static Handle<Code> GetCodeForDebugging(Handle<JSFunction> function);
 
-  // All routines return a SharedFunctionInfo.
-  // If an error occurs an exception is raised and the return handle
-  // contains NULL.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  static void CompileForLiveEdit(Handle<Script> script);
+#endif
 
-  // Compile a String source within a context.
-  static Handle<SharedFunctionInfo> Compile(Handle<String> source,
-                                            Handle<Object> script_name,
-                                            int line_offset,
-                                            int column_offset,
-                                            bool is_shared_cross_origin,
-                                            Handle<Context> context,
-                                            v8::Extension* extension,
-                                            ScriptDataImpl* pre_data,
-                                            Handle<Object> script_data,
-                                            NativesFlag is_natives_code);
-
-  // Compile a String source within a context for Eval.
-  static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
+  // Compile a String source within a context for eval.
+  static Handle<JSFunction> GetFunctionFromEval(Handle<String> source,
                                                 Handle<Context> context,
-                                                bool is_global,
                                                 LanguageMode language_mode,
                                                 ParseRestriction restriction,
                                                 int scope_position);
 
-  // Compile from function info (used for lazy compilation). Returns true on
-  // success and false if the compilation resulted in a stack overflow.
-  static bool CompileLazy(CompilationInfo* info);
+  // Compile a String source within a context.
+  static Handle<SharedFunctionInfo> CompileScript(Handle<String> source,
+                                                  Handle<Object> script_name,
+                                                  int line_offset,
+                                                  int column_offset,
+                                                  bool is_shared_cross_origin,
+                                                  Handle<Context> context,
+                                                  v8::Extension* extension,
+                                                  ScriptDataImpl* pre_data,
+                                                  Handle<Object> script_data,
+                                                  NativesFlag is_natives_code);
 
-  static bool RecompileConcurrent(Handle<JSFunction> function,
-                                  Handle<Code> unoptimized,
-                                  uint32_t osr_pc_offset = 0);
-
-  // Compile a shared function info object (the function is possibly lazily
-  // compiled).
+  // Create a shared function info object (the code may be lazily compiled).
   static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
                                                       Handle<Script> script);
 
-  // Set the function info for a newly compiled function.
-  static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
-                              FunctionLiteral* lit,
-                              bool is_toplevel,
-                              Handle<Script> script);
+  enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
 
-  static Handle<Code> InstallOptimizedCode(RecompileJob* job);
+  // Generate and return optimized code or start a concurrent optimization job.
+  // In the latter case, return the InOptimizationQueue builtin.  On failure,
+  // return the empty handle.
+  static Handle<Code> GetOptimizedCode(
+      Handle<JSFunction> function,
+      Handle<Code> current_code,
+      ConcurrencyMode mode,
+      BailoutId osr_ast_id = BailoutId::None());
 
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  static bool MakeCodeForLiveEdit(CompilationInfo* info);
-#endif
+  // Generate and return code from previously queued optimization job.
+  // On failure, return the empty handle.
+  static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
 
   static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
                                         CompilationInfo* info,
diff --git a/src/debug.cc b/src/debug.cc
index dbbfe7e..4a7fa6b 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -783,14 +783,13 @@
 
   // Compile the script.
   Handle<SharedFunctionInfo> function_info;
-  function_info = Compiler::Compile(source_code,
-                                    script_name,
-                                    0, 0,
-                                    false,
-                                    context,
-                                    NULL, NULL,
-                                    Handle<String>::null(),
-                                    NATIVES_CODE);
+  function_info = Compiler::CompileScript(source_code,
+                                          script_name, 0, 0,
+                                          false,
+                                          context,
+                                          NULL, NULL,
+                                          Handle<String>::null(),
+                                          NATIVES_CODE);
 
   // Silently ignore stack overflows during compilation.
   if (function_info.is_null()) {
@@ -1868,41 +1867,6 @@
 }
 
 
-// Helper function to compile full code for debugging. This code will
-// have debug break slots and deoptimization information. Deoptimization
-// information is required in case that an optimized version of this
-// function is still activated on the stack. It will also make sure that
-// the full code is compiled with the same flags as the previous version,
-// that is flags which can change the code generated. The current method
-// of mapping from already compiled full code without debug break slots
-// to full code with debug break slots depends on the generated code is
-// otherwise exactly the same.
-static bool CompileFullCodeForDebugging(Handle<JSFunction> function,
-                                        Handle<Code> current_code) {
-  ASSERT(!current_code->has_debug_break_slots());
-
-  CompilationInfoWithZone info(function);
-  info.MarkCompilingForDebugging(current_code);
-  ASSERT(!info.shared_info()->is_compiled());
-  ASSERT(!info.isolate()->has_pending_exception());
-
-  // Use compile lazy which will end up compiling the full code in the
-  // configuration configured above.
-  bool result = Compiler::CompileLazy(&info);
-  ASSERT(result != info.isolate()->has_pending_exception());
-  info.isolate()->clear_pending_exception();
-#if DEBUG
-  if (result) {
-    Handle<Code> new_code(function->shared()->code());
-    ASSERT(new_code->has_debug_break_slots());
-    ASSERT(current_code->is_compiled_optimizable() ==
-           new_code->is_compiled_optimizable());
-  }
-#endif
-  return result;
-}
-
-
 static void CollectActiveFunctionsFromThread(
     Isolate* isolate,
     ThreadLocalTop* top,
@@ -2059,8 +2023,7 @@
 
     Deoptimizer::DeoptimizeAll(isolate_);
 
-    Handle<Code> lazy_compile =
-        Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
+    Handle<Code> lazy_compile = isolate_->builtins()->CompileUnoptimized();
 
     // There will be at least one break point when we are done.
     has_break_points_ = true;
@@ -2112,9 +2075,9 @@
             function->set_code(*lazy_compile);
             function->shared()->set_code(*lazy_compile);
           } else if (kind == Code::BUILTIN &&
-              (function->IsInRecompileQueue() ||
-               function->IsMarkedForLazyRecompilation() ||
-               function->IsMarkedForConcurrentRecompilation())) {
+              (function->IsInOptimizationQueue() ||
+               function->IsMarkedForOptimization() ||
+               function->IsMarkedForConcurrentOptimization())) {
             // Abort in-flight compilation.
             Code* shared_code = function->shared()->code();
             if (shared_code->kind() == Code::FUNCTION &&
@@ -2159,19 +2122,12 @@
       if (!shared->code()->has_debug_break_slots()) {
         // Try to compile the full code with debug break slots. If it
         // fails just keep the current code.
-        Handle<Code> current_code(function->shared()->code());
-        shared->set_code(*lazy_compile);
         bool prev_force_debugger_active =
             isolate_->debugger()->force_debugger_active();
         isolate_->debugger()->set_force_debugger_active(true);
-        ASSERT(current_code->kind() == Code::FUNCTION);
-        CompileFullCodeForDebugging(function, current_code);
+        function->ReplaceCode(*Compiler::GetCodeForDebugging(function));
         isolate_->debugger()->set_force_debugger_active(
             prev_force_debugger_active);
-        if (!shared->is_compiled()) {
-          shared->set_code(*current_code);
-          continue;
-        }
       }
 
       // Keep function code in sync with shared function info.
@@ -2284,11 +2240,10 @@
       // will compile all inner functions that cannot be compiled without a
       // context, because Compiler::BuildFunctionInfo checks whether the
       // debugger is active.
-      if (target_function.is_null()) {
-        SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
-      } else {
-        JSFunction::CompileLazy(target_function, KEEP_EXCEPTION);
-      }
+      Handle<Code> result = target_function.is_null()
+          ? Compiler::GetUnoptimizedCode(target)
+          : Compiler::GetUnoptimizedCode(target_function);
+      if (result.is_null()) return isolate_->heap()->undefined_value();
     }
   }  // End while loop.
 
@@ -2312,7 +2267,7 @@
 
   // Ensure function is compiled. Return false if this failed.
   if (!function.is_null() &&
-      !JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION)) {
+      !Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
     return false;
   }
 
@@ -2598,6 +2553,21 @@
 }
 
 
+void Debug::RecordEvalCaller(Handle<Script> script) {
+  script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
+  // For eval scripts add information on the function from which eval was
+  // called.
+  StackTraceFrameIterator it(script->GetIsolate());
+  if (!it.done()) {
+    script->set_eval_from_shared(it.frame()->function()->shared());
+    Code* code = it.frame()->LookupCode();
+    int offset = static_cast<int>(
+        it.frame()->pc() - code->instruction_start());
+    script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+  }
+}
+
+
 void Debug::AfterGarbageCollection() {
   // Generate events for collected scripts.
   if (script_cache_ != NULL) {
diff --git a/src/debug.h b/src/debug.h
index 7eedfd2..d1b3b23 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -424,6 +424,9 @@
   void AddScriptToScriptCache(Handle<Script> script);
   Handle<FixedArray> GetLoadedScripts();
 
+  // Record function from which eval was called.
+  static void RecordEvalCaller(Handle<Script> script);
+
   // Garbage collection notifications.
   void AfterGarbageCollection();
 
diff --git a/src/factory.cc b/src/factory.cc
index c10111a..9f1f085 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -287,11 +287,43 @@
 }
 
 
-Handle<String> Factory::NewConsString(Handle<String> first,
-                                      Handle<String> second) {
-  CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->AllocateConsString(*first, *second),
-                     String);
+// Returns true for a character in a range.  Both limits are inclusive.
+static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
+  // This makes uses of the the unsigned wraparound.
+  return character - from <= to - from;
+}
+
+
+static inline Handle<String> MakeOrFindTwoCharacterString(Isolate* isolate,
+                                                          uint16_t c1,
+                                                          uint16_t c2) {
+  // Numeric strings have a different hash algorithm not known by
+  // LookupTwoCharsStringIfExists, so we skip this step for such strings.
+  if (!Between(c1, '0', '9') || !Between(c2, '0', '9')) {
+    String* result;
+    StringTable* table = isolate->heap()->string_table();
+    if (table->LookupTwoCharsStringIfExists(c1, c2, &result)) {
+      return handle(result);
+    }
+  }
+
+  // Now we know the length is 2, we might as well make use of that fact
+  // when building the new string.
+  if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
+    // We can do this.
+    ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
+    Handle<SeqOneByteString> str = isolate->factory()->NewRawOneByteString(2);
+    uint8_t* dest = str->GetChars();
+    dest[0] = static_cast<uint8_t>(c1);
+    dest[1] = static_cast<uint8_t>(c2);
+    return str;
+  } else {
+    Handle<SeqTwoByteString> str = isolate->factory()->NewRawTwoByteString(2);
+    uc16* dest = str->GetChars();
+    dest[0] = c1;
+    dest[1] = c2;
+    return str;
+  }
 }
 
 
@@ -307,6 +339,99 @@
 }
 
 
+Handle<ConsString> Factory::NewRawConsString(String::Encoding encoding) {
+  Handle<Map> map = (encoding == String::ONE_BYTE_ENCODING)
+      ? cons_ascii_string_map() : cons_string_map();
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->Allocate(*map, NEW_SPACE),
+                     ConsString);
+}
+
+
+Handle<String> Factory::NewConsString(Handle<String> left,
+                                      Handle<String> right) {
+  int left_length = left->length();
+  if (left_length == 0) return right;
+  int right_length = right->length();
+  if (right_length == 0) return left;
+
+  int length = left_length + right_length;
+
+  if (length == 2) {
+    uint16_t c1 = left->Get(0);
+    uint16_t c2 = right->Get(0);
+    return MakeOrFindTwoCharacterString(isolate(), c1, c2);
+  }
+
+  // Make sure that an out of memory exception is thrown if the length
+  // of the new cons string is too large.
+  if (length > String::kMaxLength || length < 0) {
+    isolate()->context()->mark_out_of_memory();
+    V8::FatalProcessOutOfMemory("String concatenation result too large.");
+    UNREACHABLE();
+    return Handle<String>::null();
+  }
+
+  bool left_is_one_byte = left->IsOneByteRepresentation();
+  bool right_is_one_byte = right->IsOneByteRepresentation();
+  bool is_one_byte = left_is_one_byte && right_is_one_byte;
+  bool is_one_byte_data_in_two_byte_string = false;
+  if (!is_one_byte) {
+    // At least one of the strings uses two-byte representation so we
+    // can't use the fast case code for short ASCII strings below, but
+    // we can try to save memory if all chars actually fit in ASCII.
+    is_one_byte_data_in_two_byte_string =
+        left->HasOnlyOneByteChars() && right->HasOnlyOneByteChars();
+    if (is_one_byte_data_in_two_byte_string) {
+      isolate()->counters()->string_add_runtime_ext_to_ascii()->Increment();
+    }
+  }
+
+  // If the resulting string is small make a flat string.
+  if (length < ConsString::kMinLength) {
+    // Note that neither of the two inputs can be a slice because:
+    STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
+    ASSERT(left->IsFlat());
+    ASSERT(right->IsFlat());
+
+    if (is_one_byte) {
+      Handle<SeqOneByteString> result = NewRawOneByteString(length);
+      DisallowHeapAllocation no_gc;
+      uint8_t* dest = result->GetChars();
+      // Copy left part.
+      const uint8_t* src = left->IsExternalString()
+          ? Handle<ExternalAsciiString>::cast(left)->GetChars()
+          : Handle<SeqOneByteString>::cast(left)->GetChars();
+      for (int i = 0; i < left_length; i++) *dest++ = src[i];
+      // Copy right part.
+      src = right->IsExternalString()
+          ? Handle<ExternalAsciiString>::cast(right)->GetChars()
+          : Handle<SeqOneByteString>::cast(right)->GetChars();
+      for (int i = 0; i < right_length; i++) *dest++ = src[i];
+      return result;
+    }
+
+    return (is_one_byte_data_in_two_byte_string)
+        ? ConcatStringContent<uint8_t>(NewRawOneByteString(length), left, right)
+        : ConcatStringContent<uc16>(NewRawTwoByteString(length), left, right);
+  }
+
+  Handle<ConsString> result = NewRawConsString(
+      (is_one_byte || is_one_byte_data_in_two_byte_string)
+          ? String::ONE_BYTE_ENCODING
+          : String::TWO_BYTE_ENCODING);
+
+  DisallowHeapAllocation no_gc;
+  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+
+  result->set_hash_field(String::kEmptyHashField);
+  result->set_length(length);
+  result->set_first(*left, mode);
+  result->set_second(*right, mode);
+  return result;
+}
+
+
 Handle<String> Factory::NewFlatConcatString(Handle<String> first,
                                             Handle<String> second) {
   int total_length = first->length() + second->length();
@@ -320,22 +445,89 @@
 }
 
 
-Handle<String> Factory::NewSubString(Handle<String> str,
-                                     int begin,
-                                     int end) {
+Handle<SlicedString> Factory::NewRawSlicedString(String::Encoding encoding) {
+  Handle<Map> map = (encoding == String::ONE_BYTE_ENCODING)
+      ? sliced_ascii_string_map() : sliced_string_map();
   CALL_HEAP_FUNCTION(isolate(),
-                     str->SubString(begin, end),
-                     String);
+                     isolate()->heap()->Allocate(*map, NEW_SPACE),
+                     SlicedString);
 }
 
 
 Handle<String> Factory::NewProperSubString(Handle<String> str,
                                            int begin,
                                            int end) {
+#if VERIFY_HEAP
+  if (FLAG_verify_heap) str->StringVerify();
+#endif
   ASSERT(begin > 0 || end < str->length());
-  CALL_HEAP_FUNCTION(isolate(),
-                     isolate()->heap()->AllocateSubString(*str, begin, end),
-                     String);
+
+  int length = end - begin;
+  if (length <= 0) return empty_string();
+  if (length == 1) {
+    return LookupSingleCharacterStringFromCode(isolate(), str->Get(begin));
+  }
+  if (length == 2) {
+    // Optimization for 2-byte strings often used as keys in a decompression
+    // dictionary.  Check whether we already have the string in the string
+    // table to prevent creation of many unnecessary strings.
+    uint16_t c1 = str->Get(begin);
+    uint16_t c2 = str->Get(begin + 1);
+    return MakeOrFindTwoCharacterString(isolate(), c1, c2);
+  }
+
+  if (!FLAG_string_slices || length < SlicedString::kMinLength) {
+    if (str->IsOneByteRepresentation()) {
+      Handle<SeqOneByteString> result = NewRawOneByteString(length);
+      uint8_t* dest = result->GetChars();
+      DisallowHeapAllocation no_gc;
+      String::WriteToFlat(*str, dest, begin, end);
+      return result;
+    } else {
+      Handle<SeqTwoByteString> result = NewRawTwoByteString(length);
+      uc16* dest = result->GetChars();
+      DisallowHeapAllocation no_gc;
+      String::WriteToFlat(*str, dest, begin, end);
+      return result;
+    }
+  }
+
+  int offset = begin;
+
+  while (str->IsConsString()) {
+    Handle<ConsString> cons = Handle<ConsString>::cast(str);
+    int split = cons->first()->length();
+    if (split <= offset) {
+      // Slice is fully contained in the second part.
+      str = Handle<String>(cons->second(), isolate());
+      offset -= split;  // Adjust for offset.
+      continue;
+    } else if (offset + length <= split) {
+      // Slice is fully contained in the first part.
+      str = Handle<String>(cons->first(), isolate());
+      continue;
+    }
+    break;
+  }
+
+  if (str->IsSlicedString()) {
+    Handle<SlicedString> slice = Handle<SlicedString>::cast(str);
+    str = Handle<String>(slice->parent(), isolate());
+    offset += slice->offset();
+  } else {
+    str = FlattenGetString(str);
+  }
+
+  ASSERT(str->IsSeqString() || str->IsExternalString());
+  Handle<SlicedString> slice = NewRawSlicedString(
+      str->IsOneByteRepresentation() ? String::ONE_BYTE_ENCODING
+                                     : String::TWO_BYTE_ENCODING);
+
+  slice->set_hash_field(String::kEmptyHashField);
+  slice->set_length(length);
+  slice->set_parent(*str);
+  slice->set_offset(offset);
+  return slice;
 }
 
 
@@ -732,7 +924,8 @@
 
   result->set_context(*context);
 
-  int index = function_info->SearchOptimizedCodeMap(context->native_context());
+  int index = function_info->SearchOptimizedCodeMap(context->native_context(),
+                                                    BailoutId::None());
   if (!function_info->bound() && index < 0) {
     int number_of_literals = function_info->num_literals();
     Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
@@ -748,7 +941,10 @@
 
   if (index > 0) {
     // Caching of optimized code enabled and optimized code found.
-    function_info->InstallFromOptimizedCodeMap(*result, index);
+    FixedArray* literals =
+        function_info->GetLiteralsFromOptimizedCodeMap(index);
+    if (literals != NULL) result->set_literals(literals);
+    result->ReplaceCode(function_info->GetCodeFromOptimizedCodeMap(index));
     return result;
   }
 
@@ -759,7 +955,7 @@
       function_info->allows_lazy_compilation() &&
       !function_info->optimization_disabled() &&
       !isolate()->DebuggerHasBreakPoints()) {
-    result->MarkForLazyRecompilation();
+    result->MarkForOptimization();
   }
   return result;
 }
diff --git a/src/factory.h b/src/factory.h
index 92086d4..94e89f5 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -158,23 +158,28 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   // Create a new cons string object which consists of a pair of strings.
-  Handle<String> NewConsString(Handle<String> first,
-                               Handle<String> second);
+  Handle<String> NewConsString(Handle<String> left,
+                               Handle<String> right);
+
+  Handle<ConsString> NewRawConsString(String::Encoding encoding);
 
   // Create a new sequential string containing the concatenation of the inputs.
   Handle<String> NewFlatConcatString(Handle<String> first,
                                      Handle<String> second);
 
-  // Create a new string object which holds a substring of a string.
-  Handle<String> NewSubString(Handle<String> str,
-                              int begin,
-                              int end);
-
   // Create a new string object which holds a proper substring of a string.
   Handle<String> NewProperSubString(Handle<String> str,
                                     int begin,
                                     int end);
 
+  // Create a new string object which holds a substring of a string.
+  Handle<String> NewSubString(Handle<String> str, int begin, int end) {
+    if (begin == 0 && end == str->length()) return str;
+    return NewProperSubString(str, begin, end);
+  }
+
+  Handle<SlicedString> NewRawSlicedString(String::Encoding encoding);
+
   // Creates a new external String object.  There are two String encodings
   // in the system: ASCII and two byte.  Unlike other String types, it does
   // not make sense to have a UTF-8 factory function for external strings,
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index a40b61e..ff4e8d3 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -312,6 +312,10 @@
 
 bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
+
+  Logger::TimerEventScope timer(
+      isolate, Logger::TimerEventScope::v8_compile_full_code);
+
   Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
     int len = String::cast(script->source())->length();
@@ -1644,8 +1648,7 @@
 }
 
 
-void BackEdgeTable::Patch(Isolate* isolate,
-                          Code* unoptimized) {
+void BackEdgeTable::Patch(Isolate* isolate, Code* unoptimized) {
   DisallowHeapAllocation no_gc;
   Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
 
@@ -1668,8 +1671,7 @@
 }
 
 
-void BackEdgeTable::Revert(Isolate* isolate,
-                           Code* unoptimized) {
+void BackEdgeTable::Revert(Isolate* isolate, Code* unoptimized) {
   DisallowHeapAllocation no_gc;
   Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck);
 
@@ -1694,29 +1696,23 @@
 }
 
 
-void BackEdgeTable::AddStackCheck(CompilationInfo* info) {
+void BackEdgeTable::AddStackCheck(Handle<Code> code, uint32_t pc_offset) {
   DisallowHeapAllocation no_gc;
-  Isolate* isolate = info->isolate();
-  Code* code = *info->osr_patched_code();
-  Address pc = code->instruction_start() + info->osr_pc_offset();
-  ASSERT_EQ(info->osr_ast_id().ToInt(),
-            code->TranslatePcOffsetToAstId(info->osr_pc_offset()).ToInt());
-  ASSERT_NE(INTERRUPT, GetBackEdgeState(isolate, code, pc));
+  Isolate* isolate = code->GetIsolate();
+  Address pc = code->instruction_start() + pc_offset;
   Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck);
-  PatchAt(code, pc, OSR_AFTER_STACK_CHECK, patch);
+  PatchAt(*code, pc, OSR_AFTER_STACK_CHECK, patch);
 }
 
 
-void BackEdgeTable::RemoveStackCheck(CompilationInfo* info) {
+void BackEdgeTable::RemoveStackCheck(Handle<Code> code, uint32_t pc_offset) {
   DisallowHeapAllocation no_gc;
-  Isolate* isolate = info->isolate();
-  Code* code = *info->osr_patched_code();
-  Address pc = code->instruction_start() + info->osr_pc_offset();
-  ASSERT_EQ(info->osr_ast_id().ToInt(),
-            code->TranslatePcOffsetToAstId(info->osr_pc_offset()).ToInt());
-  if (GetBackEdgeState(isolate, code, pc) == OSR_AFTER_STACK_CHECK) {
+  Isolate* isolate = code->GetIsolate();
+  Address pc = code->instruction_start() + pc_offset;
+
+  if (OSR_AFTER_STACK_CHECK == GetBackEdgeState(isolate, *code, pc)) {
     Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
-    PatchAt(code, pc, ON_STACK_REPLACEMENT, patch);
+    PatchAt(*code, pc, ON_STACK_REPLACEMENT, patch);
   }
 }
 
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 11d5341..6fd61c0 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -928,10 +928,10 @@
 
   // Change a back edge patched for on-stack replacement to perform a
   // stack check first.
-  static void AddStackCheck(CompilationInfo* info);
+  static void AddStackCheck(Handle<Code> code, uint32_t pc_offset);
 
-  // Remove the stack check, if available, and replace by on-stack replacement.
-  static void RemoveStackCheck(CompilationInfo* info);
+  // Revert the patch by AddStackCheck.
+  static void RemoveStackCheck(Handle<Code> code, uint32_t pc_offset);
 
   // Return the current patch state of the back edge.
   static BackEdgeState GetBackEdgeState(Isolate* isolate,
diff --git a/src/handles.cc b/src/handles.cc
index 8bf3b0c..bc8d2d7 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -211,11 +211,12 @@
 }
 
 
-Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
+Handle<String> LookupSingleCharacterStringFromCode(Isolate* isolate,
                                                    uint32_t index) {
   CALL_HEAP_FUNCTION(
       isolate,
-      isolate->heap()->LookupSingleCharacterStringFromCode(index), Object);
+      isolate->heap()->LookupSingleCharacterStringFromCode(index),
+      String);
 }
 
 
diff --git a/src/handles.h b/src/handles.h
index 7fef919..d42d1bd 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -245,7 +245,7 @@
                            Handle<Object> obj,
                            Handle<Object> key);
 
-Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
+Handle<String> LookupSingleCharacterStringFromCode(Isolate* isolate,
                                                    uint32_t index);
 
 Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
diff --git a/src/harmony-math.js b/src/harmony-math.js
index 2bf33d6..d57a104 100644
--- a/src/harmony-math.js
+++ b/src/harmony-math.js
@@ -110,6 +110,51 @@
 }
 
 
+//ES6 draft 09-27-13, section 20.2.2.21.
+function MathLog10(x) {
+  return MathLog(x) * 0.434294481903251828;  // log10(x) = log(x)/log(10).
+}
+
+
+//ES6 draft 09-27-13, section 20.2.2.22.
+function MathLog2(x) {
+  return MathLog(x) * 1.442695040888963407;  // log2(x) = log(x)/log(2).
+}
+
+
+//ES6 draft 09-27-13, section 20.2.2.17.
+function MathHypot(x, y) {  // Function length is 2.
+  // We may want to introduce fast paths for two arguments and when
+  // normalization to avoid overflow is not necessary.  For now, we
+  // simply assume the general case.
+  var length = %_ArgumentsLength();
+  var args = new InternalArray(length);
+  var max = 0;
+  for (var i = 0; i < length; i++) {
+    var n = %_Arguments(i);
+    if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
+    if (n === INFINITY || n === -INFINITY) return INFINITY;
+    n = MathAbs(n);
+    if (n > max) max = n;
+    args[i] = n;
+  }
+
+  // Kahan summation to avoid rounding errors.
+  // Normalize the numbers to the largest one to avoid overflow.
+  if (max === 0) max = 1;
+  var sum = 0;
+  var compensation = 0;
+  for (var i = 0; i < length; i++) {
+    var n = args[i] / max;
+    var summand = n * n - compensation;
+    var preliminary = sum + summand;
+    compensation = (preliminary - sum) - summand;
+    sum = preliminary;
+  }
+  return MathSqrt(sum) * max;
+}
+
+
 function ExtendMath() {
   %CheckIsBootstrapping();
 
@@ -122,7 +167,10 @@
     "tanh", MathTanh,
     "asinh", MathAsinh,
     "acosh", MathAcosh,
-    "atanh", MathAtanh
+    "atanh", MathAtanh,
+    "log10", MathLog10,
+    "log2", MathLog2,
+    "hypot", MathHypot
   ));
 }
 
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index b9bea75..992945b 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -99,11 +99,6 @@
 }
 
 
-Handle<HeapObject> HeapEntry::GetHeapObject() {
-  return snapshot_->profiler()->FindHeapObjectById(id());
-}
-
-
 void HeapEntry::Print(
     const char* prefix, const char* edge_name, int max_depth, int indent) {
   STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
diff --git a/src/heap-snapshot-generator.h b/src/heap-snapshot-generator.h
index b33b7f0..d93362f 100644
--- a/src/heap-snapshot-generator.h
+++ b/src/heap-snapshot-generator.h
@@ -138,8 +138,6 @@
   void Print(
       const char* prefix, const char* edge_name, int max_depth, int indent);
 
-  Handle<HeapObject> GetHeapObject();
-
  private:
   INLINE(HeapGraphEdge** children_arr());
   const char* TypeAsString();
diff --git a/src/heap.cc b/src/heap.cc
index 7477450..f827a02 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -3820,264 +3820,6 @@
 }
 
 
-
-// Returns true for a character in a range.  Both limits are inclusive.
-static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
-  // This makes uses of the the unsigned wraparound.
-  return character - from <= to - from;
-}
-
-
-MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
-    Heap* heap,
-    uint16_t c1,
-    uint16_t c2) {
-  String* result;
-  // Numeric strings have a different hash algorithm not known by
-  // LookupTwoCharsStringIfExists, so we skip this step for such strings.
-  if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
-      heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
-    return result;
-  // Now we know the length is 2, we might as well make use of that fact
-  // when building the new string.
-  } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
-    // We can do this.
-    ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
-    Object* result;
-    { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-    uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
-    dest[0] = static_cast<uint8_t>(c1);
-    dest[1] = static_cast<uint8_t>(c2);
-    return result;
-  } else {
-    Object* result;
-    { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-    uc16* dest = SeqTwoByteString::cast(result)->GetChars();
-    dest[0] = c1;
-    dest[1] = c2;
-    return result;
-  }
-}
-
-
-MaybeObject* Heap::AllocateConsString(String* first, String* second) {
-  int first_length = first->length();
-  if (first_length == 0) {
-    return second;
-  }
-
-  int second_length = second->length();
-  if (second_length == 0) {
-    return first;
-  }
-
-  int length = first_length + second_length;
-
-  // Optimization for 2-byte strings often used as keys in a decompression
-  // dictionary.  Check whether we already have the string in the string
-  // table to prevent creation of many unneccesary strings.
-  if (length == 2) {
-    uint16_t c1 = first->Get(0);
-    uint16_t c2 = second->Get(0);
-    return MakeOrFindTwoCharacterString(this, c1, c2);
-  }
-
-  bool first_is_one_byte = first->IsOneByteRepresentation();
-  bool second_is_one_byte = second->IsOneByteRepresentation();
-  bool is_one_byte = first_is_one_byte && second_is_one_byte;
-  // Make sure that an out of memory exception is thrown if the length
-  // of the new cons string is too large.
-  if (length > String::kMaxLength || length < 0) {
-    isolate()->context()->mark_out_of_memory();
-    return Failure::OutOfMemoryException(0x4);
-  }
-
-  bool is_one_byte_data_in_two_byte_string = false;
-  if (!is_one_byte) {
-    // At least one of the strings uses two-byte representation so we
-    // can't use the fast case code for short ASCII strings below, but
-    // we can try to save memory if all chars actually fit in ASCII.
-    is_one_byte_data_in_two_byte_string =
-        first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
-    if (is_one_byte_data_in_two_byte_string) {
-      isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
-    }
-  }
-
-  // If the resulting string is small make a flat string.
-  if (length < ConsString::kMinLength) {
-    // Note that neither of the two inputs can be a slice because:
-    STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
-    ASSERT(first->IsFlat());
-    ASSERT(second->IsFlat());
-    if (is_one_byte) {
-      Object* result;
-      { MaybeObject* maybe_result = AllocateRawOneByteString(length);
-        if (!maybe_result->ToObject(&result)) return maybe_result;
-      }
-      // Copy the characters into the new object.
-      uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
-      // Copy first part.
-      const uint8_t* src;
-      if (first->IsExternalString()) {
-        src = ExternalAsciiString::cast(first)->GetChars();
-      } else {
-        src = SeqOneByteString::cast(first)->GetChars();
-      }
-      for (int i = 0; i < first_length; i++) *dest++ = src[i];
-      // Copy second part.
-      if (second->IsExternalString()) {
-        src = ExternalAsciiString::cast(second)->GetChars();
-      } else {
-        src = SeqOneByteString::cast(second)->GetChars();
-      }
-      for (int i = 0; i < second_length; i++) *dest++ = src[i];
-      return result;
-    } else {
-      if (is_one_byte_data_in_two_byte_string) {
-        Object* result;
-        { MaybeObject* maybe_result = AllocateRawOneByteString(length);
-          if (!maybe_result->ToObject(&result)) return maybe_result;
-        }
-        // Copy the characters into the new object.
-        uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
-        String::WriteToFlat(first, dest, 0, first_length);
-        String::WriteToFlat(second, dest + first_length, 0, second_length);
-        isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
-        return result;
-      }
-
-      Object* result;
-      { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
-        if (!maybe_result->ToObject(&result)) return maybe_result;
-      }
-      // Copy the characters into the new object.
-      uc16* dest = SeqTwoByteString::cast(result)->GetChars();
-      String::WriteToFlat(first, dest, 0, first_length);
-      String::WriteToFlat(second, dest + first_length, 0, second_length);
-      return result;
-    }
-  }
-
-  Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
-      cons_ascii_string_map() : cons_string_map();
-
-  Object* result;
-  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-
-  DisallowHeapAllocation no_gc;
-  ConsString* cons_string = ConsString::cast(result);
-  WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
-  cons_string->set_length(length);
-  cons_string->set_hash_field(String::kEmptyHashField);
-  cons_string->set_first(first, mode);
-  cons_string->set_second(second, mode);
-  return result;
-}
-
-
-MaybeObject* Heap::AllocateSubString(String* buffer,
-                                     int start,
-                                     int end,
-                                     PretenureFlag pretenure) {
-  int length = end - start;
-  if (length <= 0) {
-    return empty_string();
-  }
-
-  // Make an attempt to flatten the buffer to reduce access time.
-  buffer = buffer->TryFlattenGetString();
-
-  if (length == 1) {
-    return LookupSingleCharacterStringFromCode(buffer->Get(start));
-  } else if (length == 2) {
-    // Optimization for 2-byte strings often used as keys in a decompression
-    // dictionary.  Check whether we already have the string in the string
-    // table to prevent creation of many unnecessary strings.
-    uint16_t c1 = buffer->Get(start);
-    uint16_t c2 = buffer->Get(start + 1);
-    return MakeOrFindTwoCharacterString(this, c1, c2);
-  }
-
-  if (!FLAG_string_slices ||
-      !buffer->IsFlat() ||
-      length < SlicedString::kMinLength ||
-      pretenure == TENURED) {
-    Object* result;
-    // WriteToFlat takes care of the case when an indirect string has a
-    // different encoding from its underlying string.  These encodings may
-    // differ because of externalization.
-    bool is_one_byte = buffer->IsOneByteRepresentation();
-    { MaybeObject* maybe_result = is_one_byte
-                                  ? AllocateRawOneByteString(length, pretenure)
-                                  : AllocateRawTwoByteString(length, pretenure);
-      if (!maybe_result->ToObject(&result)) return maybe_result;
-    }
-    String* string_result = String::cast(result);
-    // Copy the characters into the new object.
-    if (is_one_byte) {
-      ASSERT(string_result->IsOneByteRepresentation());
-      uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
-      String::WriteToFlat(buffer, dest, start, end);
-    } else {
-      ASSERT(string_result->IsTwoByteRepresentation());
-      uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
-      String::WriteToFlat(buffer, dest, start, end);
-    }
-    return result;
-  }
-
-  ASSERT(buffer->IsFlat());
-#if VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    buffer->StringVerify();
-  }
-#endif
-
-  Object* result;
-  // When slicing an indirect string we use its encoding for a newly created
-  // slice and don't check the encoding of the underlying string.  This is safe
-  // even if the encodings are different because of externalization.  If an
-  // indirect ASCII string is pointing to a two-byte string, the two-byte char
-  // codes of the underlying string must still fit into ASCII (because
-  // externalization must not change char codes).
-  { Map* map = buffer->IsOneByteRepresentation()
-                 ? sliced_ascii_string_map()
-                 : sliced_string_map();
-    MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
-    if (!maybe_result->ToObject(&result)) return maybe_result;
-  }
-
-  DisallowHeapAllocation no_gc;
-  SlicedString* sliced_string = SlicedString::cast(result);
-  sliced_string->set_length(length);
-  sliced_string->set_hash_field(String::kEmptyHashField);
-  if (buffer->IsConsString()) {
-    ConsString* cons = ConsString::cast(buffer);
-    ASSERT(cons->second()->length() == 0);
-    sliced_string->set_parent(cons->first());
-    sliced_string->set_offset(start);
-  } else if (buffer->IsSlicedString()) {
-    // Prevent nesting sliced strings.
-    SlicedString* parent_slice = SlicedString::cast(buffer);
-    sliced_string->set_parent(parent_slice->parent());
-    sliced_string->set_offset(start + parent_slice->offset());
-  } else {
-    sliced_string->set_parent(buffer);
-    sliced_string->set_offset(start);
-  }
-  ASSERT(sliced_string->parent()->IsSeqString() ||
-         sliced_string->parent()->IsExternalString());
-  return result;
-}
-
-
 MaybeObject* Heap::AllocateExternalStringFromAscii(
     const ExternalAsciiString::Resource* resource) {
   size_t length = resource->length();
diff --git a/src/heap.h b/src/heap.h
index d05f019..bedd1ea 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1071,25 +1071,6 @@
       Object* stack_trace,
       Object* stack_frames);
 
-  // Allocates a new cons string object.
-  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
-  // failed.
-  // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* AllocateConsString(String* first,
-                                                  String* second);
-
-  // Allocates a new sub string object which is a substring of an underlying
-  // string buffer stretching from the index start (inclusive) to the index
-  // end (exclusive).
-  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
-  // failed.
-  // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT MaybeObject* AllocateSubString(
-      String* buffer,
-      int start,
-      int end,
-      PretenureFlag pretenure = NOT_TENURED);
-
   // Allocate a new external string object, which is backed by a string
   // resource that resides outside the V8 heap.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 1e270ab..25ac2df 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -6028,6 +6028,10 @@
     return HObjectAccess(kInobject, SharedFunctionInfo::kFirstContextSlot);
   }
 
+  static HObjectAccess ForFirstOsrAstIdSlot() {
+    return HObjectAccess(kInobject, SharedFunctionInfo::kFirstOsrAstIdSlot);
+  }
+
   static HObjectAccess ForOptimizedCodeMap() {
     return HObjectAccess(kInobject,
                          SharedFunctionInfo::kOptimizedCodeMapOffset);
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 2de5ef8..1f73a7d 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -74,8 +74,8 @@
 }
 
 
-static void CallRuntimePassFunction(MacroAssembler* masm,
-                                    Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+    MacroAssembler* masm, Runtime::FunctionId function_id) {
   FrameScope scope(masm, StackFrame::INTERNAL);
   // Push a copy of the function.
   __ push(edi);
@@ -100,7 +100,13 @@
 }
 
 
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
+  __ jmp(eax);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
   // Checking whether the queued function is ready for install is optional,
   // since we come across interrupts and stack checks elsewhere.  However,
   // not checking may delay installing ready functions, and always checking
@@ -112,22 +118,14 @@
   __ cmp(esp, Operand::StaticVariable(stack_limit));
   __ j(above_equal, &ok, Label::kNear);
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
-  // Tail call to returned code.
-  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(eax);
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+  GenerateTailCallToReturnedCode(masm);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
 }
 
 
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
-  GenerateTailCallToSharedCode(masm);
-}
-
-
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
                                            bool count_constructions) {
@@ -509,19 +507,41 @@
 }
 
 
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kLazyCompile);
-  // Do a tail-call of the compiled function.
-  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(eax);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+  GenerateTailCallToReturnedCode(masm);
 }
 
 
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
-  // Do a tail-call of the compiled function.
-  __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
-  __ jmp(eax);
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  // Push a copy of the function.
+  __ push(edi);
+  // Push call kind information.
+  __ push(ecx);
+  // Function is also the parameter to the runtime call.
+  __ push(edi);
+  // Whether to compile in a background thread.
+  __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
+  // Restore call kind information.
+  __ pop(ecx);
+  // Restore receiver.
+  __ pop(edi);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  CallCompileOptimized(masm, false);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  CallCompileOptimized(masm, true);
+  GenerateTailCallToReturnedCode(masm);
 }
 
 
diff --git a/src/ic.cc b/src/ic.cc
index dd7df98..af9b19e 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -2054,7 +2054,7 @@
   if (raw_function->is_compiled()) return raw_function;
 
   Handle<JSFunction> function(raw_function);
-  JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+  Compiler::EnsureCompiled(function, CLEAR_EXCEPTION);
   return *function;
 }
 
@@ -2075,7 +2075,7 @@
   if (raw_function->is_compiled()) return raw_function;
 
   Handle<JSFunction> function(raw_function, isolate);
-  JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+  Compiler::EnsureCompiled(function, CLEAR_EXCEPTION);
   return *function;
 }
 
@@ -2155,7 +2155,7 @@
   if (raw_function->is_compiled()) return raw_function;
 
   Handle<JSFunction> function(raw_function, isolate);
-  JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
+  Compiler::EnsureCompiled(function, CLEAR_EXCEPTION);
   return *function;
 }
 
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 3d459d4..002e062 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -602,27 +602,6 @@
 }
 
 
-static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
-  // TODO(635): support extensions.
-  PostponeInterruptsScope postpone(isolate);
-
-  // Build AST.
-  CompilationInfoWithZone info(script);
-  info.MarkAsGlobal();
-  // Parse and don't allow skipping lazy functions.
-  if (Parser::Parse(&info)) {
-    // Compile the code.
-    LiveEditFunctionTracker tracker(info.isolate(), info.function());
-    if (Compiler::MakeCodeForLiveEdit(&info)) {
-      ASSERT(!info.code().is_null());
-      tracker.RecordRootFunctionInfo(info.code());
-    } else {
-      info.isolate()->StackOverflow();
-    }
-  }
-}
-
-
 // Unwraps JSValue object, returning its field "value"
 static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
   return Handle<Object>(jsValue->value(), jsValue->GetIsolate());
@@ -951,7 +930,7 @@
     try_catch.SetVerbose(true);
 
     // A logical 'try' section.
-    CompileScriptForTracker(isolate, script);
+    Compiler::CompileForLiveEdit(script);
   }
 
   // A logical 'catch' section.
diff --git a/src/log.cc b/src/log.cc
index 95362c0..25ce4ee 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1432,8 +1432,7 @@
   CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, name));
 
   if (!FLAG_log_code || !log_->IsEnabled()) return;
-  if (code == isolate_->builtins()->builtin(
-      Builtins::kLazyCompile))
+  if (code == isolate_->builtins()->builtin(Builtins::kCompileUnoptimized))
     return;
 
   Log::MessageBuilder msg(log_);
@@ -1967,8 +1966,8 @@
   // During iteration, there can be heap allocation due to
   // GetScriptLineNumber call.
   for (int i = 0; i < compiled_funcs_count; ++i) {
-    if (*code_objects[i] == isolate_->builtins()->builtin(
-        Builtins::kLazyCompile))
+    if (code_objects[i].is_identical_to(
+            isolate_->builtins()->CompileUnoptimized()))
       continue;
     LogExistingFunction(sfis[i], code_objects[i]);
   }
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 1e78093..0594c07 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -986,7 +986,8 @@
 // objects have been marked.
 
 void CodeFlusher::ProcessJSFunctionCandidates() {
-  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
+  Code* lazy_compile =
+      isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
   Object* undefined = isolate_->heap()->undefined_value();
 
   JSFunction* candidate = jsfunction_candidates_head_;
@@ -1031,7 +1032,8 @@
 
 
 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
-  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
+  Code* lazy_compile =
+      isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
 
   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
   SharedFunctionInfo* next_candidate;
@@ -1063,55 +1065,40 @@
 
 
 void CodeFlusher::ProcessOptimizedCodeMaps() {
-  static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
-  static const int kEntryLength = SharedFunctionInfo::kEntryLength;
-  static const int kContextOffset = 0;
-  static const int kCodeOffset = 1;
-  static const int kLiteralsOffset = 2;
-  STATIC_ASSERT(kEntryLength == 3);
+  STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
 
   SharedFunctionInfo* holder = optimized_code_map_holder_head_;
   SharedFunctionInfo* next_holder;
+
   while (holder != NULL) {
     next_holder = GetNextCodeMap(holder);
     ClearNextCodeMap(holder);
 
     FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
-    int new_length = kEntriesStart;
+    int new_length = SharedFunctionInfo::kEntriesStart;
     int old_length = code_map->length();
-    for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
-      Code* code = Code::cast(code_map->get(i + kCodeOffset));
-      MarkBit code_mark = Marking::MarkBitFrom(code);
-      if (!code_mark.Get()) {
-        continue;
+    for (int i = SharedFunctionInfo::kEntriesStart;
+         i < old_length;
+         i += SharedFunctionInfo::kEntryLength) {
+      Code* code =
+          Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
+      if (!Marking::MarkBitFrom(code).Get()) continue;
+
+      // Move every slot in the entry.
+      for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
+        int dst_index = new_length++;
+        Object** slot = code_map->RawFieldOfElementAt(dst_index);
+        Object* object = code_map->get(i + j);
+        code_map->set(dst_index, object);
+        if (j == SharedFunctionInfo::kOsrAstIdOffset) {
+          ASSERT(object->IsSmi());
+        } else {
+          ASSERT(Marking::IsBlack(
+              Marking::MarkBitFrom(HeapObject::cast(*slot))));
+          isolate_->heap()->mark_compact_collector()->
+              RecordSlot(slot, slot, *slot);
+        }
       }
-
-      // Update and record the context slot in the optimized code map.
-      Object** context_slot = HeapObject::RawField(code_map,
-          FixedArray::OffsetOfElementAt(new_length));
-      code_map->set(new_length++, code_map->get(i + kContextOffset));
-      ASSERT(Marking::IsBlack(
-          Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
-      isolate_->heap()->mark_compact_collector()->
-          RecordSlot(context_slot, context_slot, *context_slot);
-
-      // Update and record the code slot in the optimized code map.
-      Object** code_slot = HeapObject::RawField(code_map,
-          FixedArray::OffsetOfElementAt(new_length));
-      code_map->set(new_length++, code_map->get(i + kCodeOffset));
-      ASSERT(Marking::IsBlack(
-          Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
-      isolate_->heap()->mark_compact_collector()->
-          RecordSlot(code_slot, code_slot, *code_slot);
-
-      // Update and record the literals slot in the optimized code map.
-      Object** literals_slot = HeapObject::RawField(code_map,
-          FixedArray::OffsetOfElementAt(new_length));
-      code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
-      ASSERT(Marking::IsBlack(
-          Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
-      isolate_->heap()->mark_compact_collector()->
-          RecordSlot(literals_slot, literals_slot, *literals_slot);
     }
 
     // Trim the optimized code map if entries have been removed.
@@ -2606,9 +2593,7 @@
             cached_map,
             SKIP_WRITE_BARRIER);
       }
-      Object** slot =
-          HeapObject::RawField(prototype_transitions,
-                               FixedArray::OffsetOfElementAt(proto_index));
+      Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
       RecordSlot(slot, slot, prototype);
       new_number_of_transitions++;
     }
@@ -2713,12 +2698,10 @@
     for (int i = 0; i < table->Capacity(); i++) {
       if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
         Object** key_slot =
-            HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
-                ObjectHashTable::EntryToIndex(i)));
+            table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
         RecordSlot(anchor, key_slot, *key_slot);
         Object** value_slot =
-            HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
-                ObjectHashTable::EntryToValueIndex(i)));
+            table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
         MarkCompactMarkingVisitor::MarkObjectByPointer(
             this, anchor, value_slot);
       }
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index ae8e3e7..efa78a2 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -297,8 +297,8 @@
 }
 
 
-static void CallRuntimePassFunction(MacroAssembler* masm,
-                                    Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+    MacroAssembler* masm, Runtime::FunctionId function_id) {
   FrameScope scope(masm, StackFrame::INTERNAL);
   // Push a copy of the function onto the stack.
   // Push call kind information and function as parameter to the runtime call.
@@ -318,7 +318,13 @@
 }
 
 
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+  __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
   // Checking whether the queued function is ready for install is optional,
   // since we come across interrupts and stack checks elsewhere.  However,
   // not checking may delay installing ready functions, and always checking
@@ -328,22 +334,14 @@
   __ LoadRoot(t0, Heap::kStackLimitRootIndex);
   __ Branch(&ok, hs, sp, Operand(t0));
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
-  // Tail call to returned code.
-  __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(at);
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+  GenerateTailCallToReturnedCode(masm);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
 }
 
 
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
-  GenerateTailCallToSharedCode(masm);
-}
-
-
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
                                            bool count_constructions) {
@@ -790,22 +788,40 @@
 }
 
 
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kLazyCompile);
-  // Do a tail-call of the compiled function.
-  __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+  GenerateTailCallToReturnedCode(masm);
 }
 
 
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
-  // Do a tail-call of the compiled function.
-  __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
+static void CallCompileOptimized(MacroAssembler* masm,
+                                            bool concurrent) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  // Push a copy of the function onto the stack.
+  // Push call kind information and function as parameter to the runtime call.
+  __ Push(a1, t1, a1);
+  // Whether to compile in a background thread.
+  __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
+  // Restore call kind information and receiver.
+  __ Pop(a1, t1);
 }
 
 
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  CallCompileOptimized(masm, false);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  CallCompileOptimized(masm, true);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+
 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
   // For now, we are relying on the fact that make_code_young doesn't do any
   // garbage collection which allows us to save/restore the registers without
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index ca765aa..61e05f8 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -1234,6 +1234,31 @@
 }
 
 
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+  __ mov(t9, ra);
+  __ pop(ra);
+  if (save_doubles_ == kSaveFPRegs) {
+    __ PushSafepointRegistersAndDoubles();
+  } else {
+    __ PushSafepointRegisters();
+  }
+  __ Jump(t9);
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+  __ mov(t9, ra);
+  __ pop(ra);
+  __ StoreToSafepointRegisterSlot(t9, t9);
+  if (save_doubles_ == kSaveFPRegs) {
+    __ PopSafepointRegistersAndDoubles();
+  } else {
+    __ PopSafepointRegisters();
+  }
+  __ Jump(t9);
+}
+
+
 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
   // We don't allow a GC during a store buffer overflow so there is no need to
   // store the registers in any particular way, but we do have to store and
@@ -1501,6 +1526,28 @@
   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
+  StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+  RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(
+    Isolate* isolate) {
+  StoreRegistersStateStub stub1(kDontSaveFPRegs);
+  stub1.GetCode(isolate);
+  // Hydrogen code stubs need stub2 at snapshot time.
+  StoreRegistersStateStub stub2(kSaveFPRegs);
+  stub2.GetCode(isolate);
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(
+    Isolate* isolate) {
+  RestoreRegistersStateStub stub1(kDontSaveFPRegs);
+  stub1.GetCode(isolate);
+  // Hydrogen code stubs need stub2 at snapshot time.
+  RestoreRegistersStateStub stub2(kSaveFPRegs);
+  stub2.GetCode(isolate);
 }
 
 
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index a0e01b2..3019220 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -157,6 +157,33 @@
   void Generate(MacroAssembler* masm);
 };
 
+class StoreRegistersStateStub: public PlatformCodeStub {
+ public:
+  explicit StoreRegistersStateStub(SaveFPRegsMode with_fp)
+      : save_doubles_(with_fp) {}
+
+  static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+  Major MajorKey() { return StoreRegistersState; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+  SaveFPRegsMode save_doubles_;
+
+  void Generate(MacroAssembler* masm);
+};
+
+class RestoreRegistersStateStub: public PlatformCodeStub {
+ public:
+  explicit RestoreRegistersStateStub(SaveFPRegsMode with_fp)
+      : save_doubles_(with_fp) {}
+
+  static void GenerateAheadOfTime(Isolate* isolate);
+ private:
+  Major MajorKey() { return RestoreRegistersState; }
+  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
+  SaveFPRegsMode save_doubles_;
+
+  void Generate(MacroAssembler* masm);
+};
 
 class StringCompareStub: public PlatformCodeStub {
  public:
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index b94f351..4225270 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -179,20 +179,21 @@
     // Generators allocate locals, if any, in context slots.
     ASSERT(!info->function()->is_generator() || locals_count == 0);
     if (locals_count > 0) {
-      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
       // Emit a loop to initialize stack cells for locals when optimizing for
       // size. Otherwise, unroll the loop for maximum performance.
       __ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
-      if (FLAG_optimize_for_size && locals_count > 4) {
+      if ((FLAG_optimize_for_size && locals_count > 4) ||
+          !is_int16(locals_count)) {
         Label loop;
-        __ li(a2, Operand(locals_count));
+        __ Subu(a2, sp, Operand(locals_count * kPointerSize));
         __ bind(&loop);
-        __ Subu(a2, a2, 1);
-        __ push(t5);
-        __ Branch(&loop, gt, a2, Operand(zero_reg));
+        __ Subu(sp, sp, Operand(kPointerSize));
+        __ Branch(&loop, gt, sp, Operand(a2), USE_DELAY_SLOT);
+        __ sw(t5, MemOperand(sp, 0));  // Push in the delay slot.
       } else {
+        __ Subu(sp, sp, Operand(locals_count * kPointerSize));
         for (int i = 0; i < locals_count; i++) {
-          __ push(t5);
+          __ sw(t5, MemOperand(sp, i * kPointerSize));
         }
       }
     }
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index cd8ea40..83a10de 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -422,12 +422,18 @@
       codegen_->expected_safepoint_kind_ = kind;
 
       switch (codegen_->expected_safepoint_kind_) {
-        case Safepoint::kWithRegisters:
-          codegen_->masm_->PushSafepointRegisters();
+        case Safepoint::kWithRegisters: {
+          StoreRegistersStateStub stub1(kDontSaveFPRegs);
+          codegen_->masm_->push(ra);
+          codegen_->masm_->CallStub(&stub1);
           break;
-        case Safepoint::kWithRegistersAndDoubles:
-          codegen_->masm_->PushSafepointRegistersAndDoubles();
+        }
+        case Safepoint::kWithRegistersAndDoubles: {
+          StoreRegistersStateStub stub2(kSaveFPRegs);
+          codegen_->masm_->push(ra);
+          codegen_->masm_->CallStub(&stub2);
           break;
+        }
         default:
           UNREACHABLE();
       }
@@ -437,12 +443,18 @@
       Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
       ASSERT((kind & Safepoint::kWithRegisters) != 0);
       switch (kind) {
-        case Safepoint::kWithRegisters:
-          codegen_->masm_->PopSafepointRegisters();
+        case Safepoint::kWithRegisters: {
+          RestoreRegistersStateStub stub1(kDontSaveFPRegs);
+          codegen_->masm_->push(ra);
+          codegen_->masm_->CallStub(&stub1);
           break;
-        case Safepoint::kWithRegistersAndDoubles:
-          codegen_->masm_->PopSafepointRegistersAndDoubles();
+        }
+        case Safepoint::kWithRegistersAndDoubles: {
+          RestoreRegistersStateStub stub2(kSaveFPRegs);
+          codegen_->masm_->push(ra);
+          codegen_->masm_->CallStub(&stub2);
           break;
+        }
         default:
           UNREACHABLE();
       }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 311afc0..5ad0453 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2364,9 +2364,7 @@
 
 Object** DescriptorArray::GetKeySlot(int descriptor_number) {
   ASSERT(descriptor_number < number_of_descriptors());
-  return HeapObject::RawField(
-      reinterpret_cast<HeapObject*>(this),
-      OffsetOfElementAt(ToKeyIndex(descriptor_number)));
+  return RawFieldOfElementAt(ToKeyIndex(descriptor_number));
 }
 
 
@@ -2421,9 +2419,7 @@
 
 Object** DescriptorArray::GetValueSlot(int descriptor_number) {
   ASSERT(descriptor_number < number_of_descriptors());
-  return HeapObject::RawField(
-      reinterpret_cast<HeapObject*>(this),
-      OffsetOfElementAt(ToValueIndex(descriptor_number)));
+  return RawFieldOfElementAt(ToValueIndex(descriptor_number));
 }
 
 
@@ -3224,7 +3220,7 @@
 
 void JSFunctionResultCache::Clear() {
   int cache_size = size();
-  Object** entries_start = RawField(this, OffsetOfElementAt(kEntriesIndex));
+  Object** entries_start = RawFieldOfElementAt(kEntriesIndex);
   MemsetPointer(entries_start,
                 GetHeap()->the_hole_value(),
                 cache_size - kEntriesIndex);
@@ -3830,8 +3826,7 @@
 
 
 Object** DependentCode::slot_at(int i) {
-  return HeapObject::RawField(
-      this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
+  return RawFieldOfElementAt(kCodesStartIndex + i);
 }
 
 
@@ -4950,7 +4945,7 @@
 
 bool SharedFunctionInfo::is_compiled() {
   return code() !=
-      GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
+      GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
 }
 
 
@@ -5073,20 +5068,21 @@
 }
 
 
-bool JSFunction::IsMarkedForLazyRecompilation() {
-  return code() == GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile);
+bool JSFunction::IsMarkedForOptimization() {
+  return code() == GetIsolate()->builtins()->builtin(
+      Builtins::kCompileOptimized);
 }
 
 
-bool JSFunction::IsMarkedForConcurrentRecompilation() {
+bool JSFunction::IsMarkedForConcurrentOptimization() {
   return code() == GetIsolate()->builtins()->builtin(
-      Builtins::kConcurrentRecompile);
+      Builtins::kCompileOptimizedConcurrent);
 }
 
 
-bool JSFunction::IsInRecompileQueue() {
+bool JSFunction::IsInOptimizationQueue() {
   return code() == GetIsolate()->builtins()->builtin(
-      Builtins::kInRecompileQueue);
+      Builtins::kInOptimizationQueue);
 }
 
 
@@ -5196,7 +5192,8 @@
 
 
 bool JSFunction::is_compiled() {
-  return code() != GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
+  return code() !=
+      GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
 }
 
 
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index 1a68344..a7fc84f 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -331,8 +331,7 @@
   for (int idx = Context::FIRST_WEAK_SLOT;
        idx < Context::NATIVE_CONTEXT_SLOTS;
        ++idx) {
-    Object** slot =
-        HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
+    Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
     collector->RecordSlot(slot, slot, *slot);
   }
 }
diff --git a/src/objects.cc b/src/objects.cc
index aa7f500..0e2f4c1 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -9298,14 +9298,6 @@
 }
 
 
-MaybeObject* String::SubString(int start, int end, PretenureFlag pretenure) {
-  Heap* heap = GetHeap();
-  if (start == 0 && end == length()) return this;
-  MaybeObject* result = heap->AllocateSubString(this, start, end, pretenure);
-  return result;
-}
-
-
 void String::PrintOn(FILE* file) {
   int length = this->length();
   for (int i = 0; i < length; i++) {
@@ -9483,19 +9475,19 @@
 }
 
 
-void JSFunction::MarkForLazyRecompilation() {
+void JSFunction::MarkForOptimization() {
   ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
   ASSERT(!IsOptimized());
   ASSERT(shared()->allows_lazy_compilation() ||
          code()->optimizable());
   ASSERT(!shared()->is_generator());
   set_code_no_write_barrier(
-      GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile));
+      GetIsolate()->builtins()->builtin(Builtins::kCompileOptimized));
   // No write barrier required, since the builtin is part of the root set.
 }
 
 
-void JSFunction::MarkForConcurrentRecompilation() {
+void JSFunction::MarkForConcurrentOptimization() {
   ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
   ASSERT(!IsOptimized());
   ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
@@ -9507,16 +9499,16 @@
     PrintF(" for concurrent recompilation.\n");
   }
   set_code_no_write_barrier(
-      GetIsolate()->builtins()->builtin(Builtins::kConcurrentRecompile));
+      GetIsolate()->builtins()->builtin(Builtins::kCompileOptimizedConcurrent));
   // No write barrier required, since the builtin is part of the root set.
 }
 
 
-void JSFunction::MarkInRecompileQueue() {
+void JSFunction::MarkInOptimizationQueue() {
   // We can only arrive here via the concurrent-recompilation builtin.  If
   // break points were set, the code would point to the lazy-compile builtin.
   ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
-  ASSERT(IsMarkedForConcurrentRecompilation() && !IsOptimized());
+  ASSERT(IsMarkedForConcurrentOptimization() && !IsOptimized());
   ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
   ASSERT(GetIsolate()->concurrent_recompilation_enabled());
   if (FLAG_trace_concurrent_recompilation) {
@@ -9525,73 +9517,57 @@
     PrintF(" for concurrent recompilation.\n");
   }
   set_code_no_write_barrier(
-      GetIsolate()->builtins()->builtin(Builtins::kInRecompileQueue));
+      GetIsolate()->builtins()->builtin(Builtins::kInOptimizationQueue));
   // No write barrier required, since the builtin is part of the root set.
 }
 
 
-static bool CompileLazyHelper(CompilationInfo* info,
-                              ClearExceptionFlag flag) {
-  // Compile the source information to a code object.
-  ASSERT(info->IsOptimizing() || !info->shared_info()->is_compiled());
-  ASSERT(!info->isolate()->has_pending_exception());
-  bool result = Compiler::CompileLazy(info);
-  ASSERT(result != info->isolate()->has_pending_exception());
-  if (!result && flag == CLEAR_EXCEPTION) {
-    info->isolate()->clear_pending_exception();
-  }
-  return result;
-}
-
-
-bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
-                                     ClearExceptionFlag flag) {
-  ASSERT(shared->allows_lazy_compilation_without_context());
-  CompilationInfoWithZone info(shared);
-  return CompileLazyHelper(&info, flag);
-}
-
-
 void SharedFunctionInfo::AddToOptimizedCodeMap(
     Handle<SharedFunctionInfo> shared,
     Handle<Context> native_context,
     Handle<Code> code,
-    Handle<FixedArray> literals) {
+    Handle<FixedArray> literals,
+    BailoutId osr_ast_id) {
   CALL_HEAP_FUNCTION_VOID(
       shared->GetIsolate(),
-      shared->AddToOptimizedCodeMap(*native_context, *code, *literals));
+      shared->AddToOptimizedCodeMap(
+          *native_context, *code, *literals, osr_ast_id));
 }
 
 
 MaybeObject* SharedFunctionInfo::AddToOptimizedCodeMap(Context* native_context,
                                                        Code* code,
-                                                       FixedArray* literals) {
+                                                       FixedArray* literals,
+                                                       BailoutId osr_ast_id) {
   ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
   ASSERT(native_context->IsNativeContext());
-  STATIC_ASSERT(kEntryLength == 3);
+  STATIC_ASSERT(kEntryLength == 4);
   Heap* heap = GetHeap();
   FixedArray* new_code_map;
   Object* value = optimized_code_map();
+  Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
   if (value->IsSmi()) {
     // No optimized code map.
     ASSERT_EQ(0, Smi::cast(value)->value());
     // Create 3 entries per context {context, code, literals}.
     MaybeObject* maybe = heap->AllocateFixedArray(kInitialLength);
     if (!maybe->To(&new_code_map)) return maybe;
-    new_code_map->set(kEntriesStart + 0, native_context);
-    new_code_map->set(kEntriesStart + 1, code);
-    new_code_map->set(kEntriesStart + 2, literals);
+    new_code_map->set(kEntriesStart + kContextOffset, native_context);
+    new_code_map->set(kEntriesStart + kCachedCodeOffset, code);
+    new_code_map->set(kEntriesStart + kLiteralsOffset, literals);
+    new_code_map->set(kEntriesStart + kOsrAstIdOffset, osr_ast_id_smi);
   } else {
     // Copy old map and append one new entry.
     FixedArray* old_code_map = FixedArray::cast(value);
-    ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context));
+    ASSERT_EQ(-1, SearchOptimizedCodeMap(native_context, osr_ast_id));
     int old_length = old_code_map->length();
     int new_length = old_length + kEntryLength;
     MaybeObject* maybe = old_code_map->CopySize(new_length);
     if (!maybe->To(&new_code_map)) return maybe;
-    new_code_map->set(old_length + 0, native_context);
-    new_code_map->set(old_length + 1, code);
-    new_code_map->set(old_length + 2, literals);
+    new_code_map->set(old_length + kContextOffset, native_context);
+    new_code_map->set(old_length + kCachedCodeOffset, code);
+    new_code_map->set(old_length + kLiteralsOffset, literals);
+    new_code_map->set(old_length + kOsrAstIdOffset, osr_ast_id_smi);
     // Zap the old map for the sake of the heap verifier.
     if (Heap::ShouldZapGarbage()) {
       Object** data = old_code_map->data_start();
@@ -9600,11 +9576,12 @@
   }
 #ifdef DEBUG
   for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
-    ASSERT(new_code_map->get(i)->IsNativeContext());
-    ASSERT(new_code_map->get(i + 1)->IsCode());
-    ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
+    ASSERT(new_code_map->get(i + kContextOffset)->IsNativeContext());
+    ASSERT(new_code_map->get(i + kCachedCodeOffset)->IsCode());
+    ASSERT(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() ==
            Code::OPTIMIZED_FUNCTION);
-    ASSERT(new_code_map->get(i + 2)->IsFixedArray());
+    ASSERT(new_code_map->get(i + kLiteralsOffset)->IsFixedArray());
+    ASSERT(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
   }
 #endif
   set_optimized_code_map(new_code_map);
@@ -9612,19 +9589,24 @@
 }
 
 
-void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
-                                                     int index) {
+FixedArray* SharedFunctionInfo::GetLiteralsFromOptimizedCodeMap(int index) {
   ASSERT(index > kEntriesStart);
   FixedArray* code_map = FixedArray::cast(optimized_code_map());
   if (!bound()) {
     FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
-    ASSERT(cached_literals != NULL);
-    function->set_literals(cached_literals);
+    ASSERT_NE(NULL, cached_literals);
+    return cached_literals;
   }
+  return NULL;
+}
+
+
+Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) {
+  ASSERT(index > kEntriesStart);
+  FixedArray* code_map = FixedArray::cast(optimized_code_map());
   Code* code = Code::cast(code_map->get(index));
-  ASSERT(code != NULL);
-  ASSERT(function->context()->native_context() == code_map->get(index - 1));
-  function->ReplaceCode(code);
+  ASSERT_NE(NULL, code);
+  return code;
 }
 
 
@@ -9663,9 +9645,14 @@
     }
   }
   while (i < (code_map->length() - kEntryLength)) {
-    code_map->set(i, code_map->get(i + kEntryLength));
-    code_map->set(i + 1, code_map->get(i + 1 + kEntryLength));
-    code_map->set(i + 2, code_map->get(i + 2 + kEntryLength));
+    code_map->set(i + kContextOffset,
+                  code_map->get(i + kContextOffset + kEntryLength));
+    code_map->set(i + kCachedCodeOffset,
+                  code_map->get(i + kCachedCodeOffset + kEntryLength));
+    code_map->set(i + kLiteralsOffset,
+                  code_map->get(i + kLiteralsOffset + kEntryLength));
+    code_map->set(i + kOsrAstIdOffset,
+                  code_map->get(i + kOsrAstIdOffset + kEntryLength));
     i += kEntryLength;
   }
   if (removed_entry) {
@@ -9690,50 +9677,6 @@
 }
 
 
-bool JSFunction::CompileLazy(Handle<JSFunction> function,
-                             ClearExceptionFlag flag) {
-  bool result = true;
-  if (function->shared()->is_compiled()) {
-    function->ReplaceCode(function->shared()->code());
-  } else {
-    ASSERT(function->shared()->allows_lazy_compilation());
-    CompilationInfoWithZone info(function);
-    result = CompileLazyHelper(&info, flag);
-    ASSERT(!result || function->is_compiled());
-  }
-  return result;
-}
-
-
-Handle<Code> JSFunction::CompileOsr(Handle<JSFunction> function,
-                                    BailoutId osr_ast_id,
-                                    ClearExceptionFlag flag) {
-  CompilationInfoWithZone info(function);
-  info.SetOptimizing(osr_ast_id);
-  if (CompileLazyHelper(&info, flag)) {
-    // TODO(titzer): don't install the OSR code.
-    // ASSERT(function->code() != *info.code());
-    return info.code();
-  } else {
-    return Handle<Code>::null();
-  }
-}
-
-
-bool JSFunction::CompileOptimized(Handle<JSFunction> function,
-                                  ClearExceptionFlag flag) {
-  CompilationInfoWithZone info(function);
-  info.SetOptimizing(BailoutId::None());
-  return CompileLazyHelper(&info, flag);
-}
-
-
-bool JSFunction::EnsureCompiled(Handle<JSFunction> function,
-                                ClearExceptionFlag flag) {
-  return function->is_compiled() || CompileLazy(function, flag);
-}
-
-
 void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
   if (object->IsGlobalObject()) return;
 
@@ -10271,16 +10214,19 @@
 }
 
 
-int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context) {
+int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
+                                               BailoutId osr_ast_id) {
   ASSERT(native_context->IsNativeContext());
   if (!FLAG_cache_optimized_code) return -1;
   Object* value = optimized_code_map();
   if (!value->IsSmi()) {
     FixedArray* optimized_code_map = FixedArray::cast(value);
     int length = optimized_code_map->length();
+    Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
     for (int i = kEntriesStart; i < length; i += kEntryLength) {
-      if (optimized_code_map->get(i) == native_context) {
-        return i + 1;
+      if (optimized_code_map->get(i + kContextOffset) == native_context &&
+          optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
+        return i + kCachedCodeOffset;
       }
     }
     if (FLAG_trace_opt) {
@@ -10706,6 +10652,18 @@
 }
 
 
+uint32_t Code::TranslateAstIdToPcOffset(BailoutId ast_id) {
+  DisallowHeapAllocation no_gc;
+  ASSERT(kind() == FUNCTION);
+  BackEdgeTable back_edges(this, &no_gc);
+  for (uint32_t i = 0; i < back_edges.length(); i++) {
+    if (back_edges.ast_id(i) == ast_id) return back_edges.pc_offset(i);
+  }
+  UNREACHABLE();  // We expect to find the back edge.
+  return 0;
+}
+
+
 void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
   PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge, NO_MARKING_PARITY);
 }
diff --git a/src/objects.h b/src/objects.h
index 0fa8304..7a557e9 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1056,47 +1056,47 @@
                                                                               \
   V(k32BitValueInRegisterIsNotZeroExtended,                                   \
     "32 bit value in register is not zero-extended")                          \
-  V(kAlignmentMarkerExpected, "alignment marker expected")                    \
+  V(kAlignmentMarkerExpected, "Alignment marker expected")                    \
   V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned")        \
   V(kAPICallReturnedInvalidObject, "API call returned invalid object")        \
   V(kArgumentsObjectValueInATestContext,                                      \
-    "arguments object value in a test context")                               \
-  V(kArrayBoilerplateCreationFailed, "array boilerplate creation failed")     \
-  V(kArrayIndexConstantValueTooBig, "array index constant value too big")     \
-  V(kAssignmentToArguments, "assignment to arguments")                        \
+    "Arguments object value in a test context")                               \
+  V(kArrayBoilerplateCreationFailed, "Array boilerplate creation failed")     \
+  V(kArrayIndexConstantValueTooBig, "Array index constant value too big")     \
+  V(kAssignmentToArguments, "Assignment to arguments")                        \
   V(kAssignmentToLetVariableBeforeInitialization,                             \
-    "assignment to let variable before initialization")                       \
-  V(kAssignmentToLOOKUPVariable, "assignment to LOOKUP variable")             \
+    "Assignment to let variable before initialization")                       \
+  V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable")             \
   V(kAssignmentToParameterFunctionUsesArgumentsObject,                        \
-    "assignment to parameter, function uses arguments object")                \
+    "Assignment to parameter, function uses arguments object")                \
   V(kAssignmentToParameterInArgumentsObject,                                  \
-    "assignment to parameter in arguments object")                            \
+    "Assignment to parameter in arguments object")                            \
   V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache")            \
   V(kBadValueContextForArgumentsObjectValue,                                  \
-    "bad value context for arguments object value")                           \
+    "Bad value context for arguments object value")                           \
   V(kBadValueContextForArgumentsValue,                                        \
-    "bad value context for arguments value")                                  \
-  V(kBailedOutDueToDependencyChange, "bailed out due to dependency change")   \
-  V(kBailoutWasNotPrepared, "bailout was not prepared")                       \
+    "Bad value context for arguments value")                                  \
+  V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change")   \
+  V(kBailoutWasNotPrepared, "Bailout was not prepared")                       \
   V(kBinaryStubGenerateFloatingPointCode,                                     \
     "BinaryStub_GenerateFloatingPointCode")                                   \
   V(kBothRegistersWereSmisInSelectNonSmi,                                     \
     "Both registers were smis in SelectNonSmi")                               \
   V(kCallToAJavaScriptRuntimeFunction,                                        \
-    "call to a JavaScript runtime function")                                  \
+    "Call to a JavaScript runtime function")                                  \
   V(kCannotTranslatePositionInChangedArea,                                    \
     "Cannot translate position in changed area")                              \
-  V(kCodeGenerationFailed, "code generation failed")                          \
-  V(kCodeObjectNotProperlyPatched, "code object not properly patched")        \
-  V(kCompoundAssignmentToLookupSlot, "compound assignment to lookup slot")    \
-  V(kContextAllocatedArguments, "context-allocated arguments")                \
-  V(kDebuggerIsActive, "debugger is active")                                  \
+  V(kCodeGenerationFailed, "Code generation failed")                          \
+  V(kCodeObjectNotProperlyPatched, "Code object not properly patched")        \
+  V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot")    \
+  V(kContextAllocatedArguments, "Context-allocated arguments")                \
+  V(kDebuggerIsActive, "Debugger is active")                                  \
   V(kDebuggerStatement, "DebuggerStatement")                                  \
   V(kDeclarationInCatchContext, "Declaration in catch context")               \
   V(kDeclarationInWithContext, "Declaration in with context")                 \
   V(kDefaultNaNModeNotSet, "Default NaN mode not set")                        \
-  V(kDeleteWithGlobalVariable, "delete with global variable")                 \
-  V(kDeleteWithNonGlobalVariable, "delete with non-global variable")          \
+  V(kDeleteWithGlobalVariable, "Delete with global variable")                 \
+  V(kDeleteWithNonGlobalVariable, "Delete with non-global variable")          \
   V(kDestinationOfCopyNotAligned, "Destination of copy not aligned")          \
   V(kDontDeleteCellsCannotContainTheHole,                                     \
     "DontDelete cells can't contain the hole")                                \
@@ -1104,9 +1104,9 @@
     "DoPushArgument not implemented for double type")                         \
   V(kEmitLoadRegisterUnsupportedDoubleImmediate,                              \
     "EmitLoadRegister: Unsupported double immediate")                         \
-  V(kEval, "eval")                                                            \
+  V(kEval, "Eval")                                                            \
   V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel")                 \
-  V(kExpectedAlignmentMarker, "expected alignment marker")                    \
+  V(kExpectedAlignmentMarker, "Expected alignment marker")                    \
   V(kExpectedAllocationSiteInCell,                                            \
     "Expected AllocationSite in property cell")                               \
   V(kExpectedPropertyCellInRegisterA2,                                        \
@@ -1119,47 +1119,48 @@
     "Expecting alignment for CopyBytes")                                      \
   V(kExportDeclaration, "Export declaration")                                 \
   V(kExternalStringExpectedButNotFound,                                       \
-    "external string expected, but not found")                                \
-  V(kFailedBailedOutLastTime, "failed/bailed out last time")                  \
+    "External string expected, but not found")                                \
+  V(kFailedBailedOutLastTime, "Failed/bailed out last time")                  \
   V(kForInStatementIsNotFastCase, "ForInStatement is not fast case")          \
   V(kForInStatementOptimizationIsDisabled,                                    \
     "ForInStatement optimization is disabled")                                \
   V(kForInStatementWithNonLocalEachVariable,                                  \
     "ForInStatement with non-local each variable")                            \
   V(kForOfStatement, "ForOfStatement")                                        \
-  V(kFrameIsExpectedToBeAligned, "frame is expected to be aligned")           \
-  V(kFunctionCallsEval, "function calls eval")                                \
-  V(kFunctionIsAGenerator, "function is a generator")                         \
-  V(kFunctionWithIllegalRedeclaration, "function with illegal redeclaration") \
+  V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned")           \
+  V(kFunctionCallsEval, "Function calls eval")                                \
+  V(kFunctionIsAGenerator, "Function is a generator")                         \
+  V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
   V(kGeneratedCodeIsTooLarge, "Generated code is too large")                  \
   V(kGeneratorFailedToResume, "Generator failed to resume")                   \
-  V(kGenerator, "generator")                                                  \
+  V(kGenerator, "Generator")                                                  \
   V(kGlobalFunctionsMustHaveInitialMap,                                       \
     "Global functions must have initial map")                                 \
   V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered")      \
+  V(kHydrogenFilter, "Optimization disabled by filter")                       \
   V(kImportDeclaration, "Import declaration")                                 \
   V(kImproperObjectOnPrototypeChainForStore,                                  \
-    "improper object on prototype chain for store")                           \
+    "Improper object on prototype chain for store")                           \
   V(kIndexIsNegative, "Index is negative")                                    \
   V(kIndexIsTooLarge, "Index is too large")                                   \
-  V(kInlinedRuntimeFunctionClassOf, "inlined runtime function: ClassOf")      \
+  V(kInlinedRuntimeFunctionClassOf, "Inlined runtime function: ClassOf")      \
   V(kInlinedRuntimeFunctionFastAsciiArrayJoin,                                \
-    "inlined runtime function: FastAsciiArrayJoin")                           \
+    "Inlined runtime function: FastAsciiArrayJoin")                           \
   V(kInlinedRuntimeFunctionGeneratorNext,                                     \
-    "inlined runtime function: GeneratorNext")                                \
+    "Inlined runtime function: GeneratorNext")                                \
   V(kInlinedRuntimeFunctionGeneratorThrow,                                    \
-    "inlined runtime function: GeneratorThrow")                               \
+    "Inlined runtime function: GeneratorThrow")                               \
   V(kInlinedRuntimeFunctionGetFromCache,                                      \
-    "inlined runtime function: GetFromCache")                                 \
+    "Inlined runtime function: GetFromCache")                                 \
   V(kInlinedRuntimeFunctionIsNonNegativeSmi,                                  \
-    "inlined runtime function: IsNonNegativeSmi")                             \
+    "Inlined runtime function: IsNonNegativeSmi")                             \
   V(kInlinedRuntimeFunctionIsRegExpEquivalent,                                \
-    "inlined runtime function: IsRegExpEquivalent")                           \
+    "Inlined runtime function: IsRegExpEquivalent")                           \
   V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf,              \
-    "inlined runtime function: IsStringWrapperSafeForDefaultValueOf")         \
-  V(kInliningBailedOut, "inlining bailed out")                                \
+    "Inlined runtime function: IsStringWrapperSafeForDefaultValueOf")         \
+  V(kInliningBailedOut, "Inlining bailed out")                                \
   V(kInputGPRIsExpectedToHaveUpper32Cleared,                                  \
-    "input GPR is expected to have upper32 cleared")                          \
+    "Input GPR is expected to have upper32 cleared")                          \
   V(kInstanceofStubUnexpectedCallSiteCacheCheck,                              \
     "InstanceofStub unexpected call site cache (check)")                      \
   V(kInstanceofStubUnexpectedCallSiteCacheCmp1,                               \
@@ -1174,9 +1175,9 @@
   V(kInvalidElementsKindForInternalArrayOrInternalPackedArray,                \
     "Invalid ElementsKind for InternalArray or InternalPackedArray")          \
   V(kInvalidHandleScopeLevel, "Invalid HandleScope level")                    \
-  V(kInvalidLeftHandSideInAssignment, "invalid left-hand side in assignment") \
-  V(kInvalidLhsInCompoundAssignment, "invalid lhs in compound assignment")    \
-  V(kInvalidLhsInCountOperation, "invalid lhs in count operation")            \
+  V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
+  V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment")    \
+  V(kInvalidLhsInCountOperation, "Invalid lhs in count operation")            \
   V(kInvalidMinLength, "Invalid min_length")                                  \
   V(kJSGlobalObjectNativeContextShouldBeANativeContext,                       \
     "JSGlobalObject::native_context should be a native context")              \
@@ -1192,7 +1193,7 @@
     "LiveEdit frame dropping is not supported on mips")                       \
   V(kLiveEdit, "LiveEdit")                                                    \
   V(kLookupVariableInCountOperation,                                          \
-    "lookup variable in count operation")                                     \
+    "Lookup variable in count operation")                                     \
   V(kMapIsNoLongerInEax, "Map is no longer in eax")                           \
   V(kModuleDeclaration, "Module declaration")                                 \
   V(kModuleLiteral, "Module literal")                                         \
@@ -1201,26 +1202,26 @@
   V(kModuleVariable, "Module variable")                                       \
   V(kModuleUrl, "Module url")                                                 \
   V(kNativeFunctionLiteral, "Native function literal")                        \
-  V(kNoCasesLeft, "no cases left")                                            \
+  V(kNoCasesLeft, "No cases left")                                            \
   V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin,                               \
     "No empty arrays here in EmitFastAsciiArrayJoin")                         \
   V(kNonInitializerAssignmentToConst,                                         \
-    "non-initializer assignment to const")                                    \
+    "Non-initializer assignment to const")                                    \
   V(kNonSmiIndex, "Non-smi index")                                            \
   V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal")                 \
   V(kNonSmiValue, "Non-smi value")                                            \
   V(kNonObject, "Non-object value")                                           \
   V(kNotEnoughVirtualRegistersForValues,                                      \
-    "not enough virtual registers for values")                                \
+    "Not enough virtual registers for values")                                \
   V(kNotEnoughSpillSlotsForOsr,                                               \
-    "not enough spill slots for OSR")                                         \
+    "Not enough spill slots for OSR")                                         \
   V(kNotEnoughVirtualRegistersRegalloc,                                       \
-    "not enough virtual registers (regalloc)")                                \
-  V(kObjectFoundInSmiOnlyArray, "object found in smi-only array")             \
+    "Not enough virtual registers (regalloc)")                                \
+  V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array")             \
   V(kObjectLiteralWithComplexProperty,                                        \
     "Object literal with complex property")                                   \
   V(kOddballInStringTableIsNotUndefinedOrTheHole,                             \
-    "oddball in string table is not undefined or the hole")                   \
+    "Oddball in string table is not undefined or the hole")                   \
   V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name")             \
   V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string")         \
   V(kOperandIsASmi, "Operand is a smi")                                       \
@@ -1230,24 +1231,25 @@
   V(kOperandIsNotAString, "Operand is not a string")                          \
   V(kOperandIsNotSmi, "Operand is not smi")                                   \
   V(kOperandNotANumber, "Operand not a number")                               \
-  V(kOptimizedTooManyTimes, "optimized too many times")                       \
+  V(kOptimizationDisabled, "Optimization is disabled")                        \
+  V(kOptimizedTooManyTimes, "Optimized too many times")                       \
   V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister,                  \
     "Out of virtual registers while trying to allocate temp register")        \
-  V(kParseScopeError, "parse/scope error")                                    \
-  V(kPossibleDirectCallToEval, "possible direct call to eval")                \
+  V(kParseScopeError, "Parse/scope error")                                    \
+  V(kPossibleDirectCallToEval, "Possible direct call to eval")                \
   V(kPropertyAllocationCountFailed, "Property allocation count failed")       \
   V(kReceivedInvalidReturnAddress, "Received invalid return address")         \
   V(kReferenceToAVariableWhichRequiresDynamicLookup,                          \
-    "reference to a variable which requires dynamic lookup")                  \
+    "Reference to a variable which requires dynamic lookup")                  \
   V(kReferenceToGlobalLexicalVariable,                                        \
-    "reference to global lexical variable")                                   \
-  V(kReferenceToUninitializedVariable, "reference to uninitialized variable") \
+    "Reference to global lexical variable")                                   \
+  V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
   V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
-  V(kRegisterWasClobbered, "register was clobbered")                          \
+  V(kRegisterWasClobbered, "Register was clobbered")                          \
   V(kScopedBlock, "ScopedBlock")                                              \
   V(kSmiAdditionOverflow, "Smi addition overflow")                            \
   V(kSmiSubtractionOverflow, "Smi subtraction overflow")                      \
-  V(kStackFrameTypesMustMatch, "stack frame types must match")                \
+  V(kStackFrameTypesMustMatch, "Stack frame types must match")                \
   V(kSwitchStatementMixedOrNonLiteralSwitchLabels,                            \
     "SwitchStatement: mixed or non-literal switch labels")                    \
   V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses")      \
@@ -1259,8 +1261,8 @@
     "The instruction to patch should be a lui")                               \
   V(kTheInstructionToPatchShouldBeAnOri,                                      \
     "The instruction to patch should be an ori")                              \
-  V(kTooManyParametersLocals, "too many parameters/locals")                   \
-  V(kTooManyParameters, "too many parameters")                                \
+  V(kTooManyParametersLocals, "Too many parameters/locals")                   \
+  V(kTooManyParameters, "Too many parameters")                                \
   V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR")    \
   V(kToOperandIsDoubleRegisterUnimplemented,                                  \
     "ToOperand IsDoubleRegister unimplemented")                               \
@@ -1311,23 +1313,23 @@
   V(kUnexpectedUnusedPropertiesOfStringWrapper,                               \
     "Unexpected unused properties of string wrapper")                         \
   V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
-  V(kUnknown, "unknown")                                                      \
+  V(kUnknown, "Unknown")                                                      \
   V(kUnsupportedConstCompoundAssignment,                                      \
-    "unsupported const compound assignment")                                  \
+    "Unsupported const compound assignment")                                  \
   V(kUnsupportedCountOperationWithConst,                                      \
-    "unsupported count operation with const")                                 \
-  V(kUnsupportedDoubleImmediate, "unsupported double immediate")              \
-  V(kUnsupportedLetCompoundAssignment, "unsupported let compound assignment") \
+    "Unsupported count operation with const")                                 \
+  V(kUnsupportedDoubleImmediate, "Unsupported double immediate")              \
+  V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
   V(kUnsupportedLookupSlotInDeclaration,                                      \
-    "unsupported lookup slot in declaration")                                 \
+    "Unsupported lookup slot in declaration")                                 \
   V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare")     \
   V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments")        \
   V(kUnsupportedPhiUseOfConstVariable,                                        \
     "Unsupported phi use of const variable")                                  \
-  V(kUnsupportedTaggedImmediate, "unsupported tagged immediate")              \
+  V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate")              \
   V(kVariableResolvedToWithContext, "Variable resolved to with context")      \
   V(kWeShouldNotHaveAnEmptyLexicalContext,                                    \
-    "we should not have an empty lexical context")                            \
+    "We should not have an empty lexical context")                            \
   V(kWithStatement, "WithStatement")                                          \
   V(kWrongAddressOrValuePassedToRecordWrite,                                  \
     "Wrong address or value passed to RecordWrite")                           \
@@ -2955,6 +2957,11 @@
   // Code Generation support.
   static int OffsetOfElementAt(int index) { return SizeFor(index); }
 
+  // Garbage collection support.
+  Object** RawFieldOfElementAt(int index) {
+    return HeapObject::RawField(this, OffsetOfElementAt(index));
+  }
+
   // Casting.
   static inline FixedArray* cast(Object* obj);
 
@@ -5358,6 +5365,7 @@
   void ClearTypeFeedbackCells(Heap* heap);
 
   BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
+  uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
 
 #define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
   enum Age {
@@ -6533,14 +6541,16 @@
   // and a shared literals array or Smi(0) if none.
   DECL_ACCESSORS(optimized_code_map, Object)
 
-  // Returns index i of the entry with the specified context. At position
-  // i - 1 is the context, position i the code, and i + 1 the literals array.
-  // Returns -1 when no matching entry is found.
-  int SearchOptimizedCodeMap(Context* native_context);
+  // Returns index i of the entry with the specified context and OSR entry.
+  // At position i - 1 is the context, position i the code, and i + 1 the
+  // literals array.  Returns -1 when no matching entry is found.
+  int SearchOptimizedCodeMap(Context* native_context, BailoutId osr_ast_id);
 
   // Installs optimized code from the code map on the given closure. The
   // index has to be consistent with a search result as defined above.
-  void InstallFromOptimizedCodeMap(JSFunction* function, int index);
+  FixedArray* GetLiteralsFromOptimizedCodeMap(int index);
+
+  Code* GetCodeFromOptimizedCodeMap(int index);
 
   // Clear optimized code map.
   void ClearOptimizedCodeMap();
@@ -6554,18 +6564,28 @@
   // Add a new entry to the optimized code map.
   MUST_USE_RESULT MaybeObject* AddToOptimizedCodeMap(Context* native_context,
                                                      Code* code,
-                                                     FixedArray* literals);
+                                                     FixedArray* literals,
+                                                     BailoutId osr_ast_id);
   static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
                                     Handle<Context> native_context,
                                     Handle<Code> code,
-                                    Handle<FixedArray> literals);
+                                    Handle<FixedArray> literals,
+                                    BailoutId osr_ast_id);
 
   // Layout description of the optimized code map.
   static const int kNextMapIndex = 0;
   static const int kEntriesStart = 1;
-  static const int kEntryLength = 3;
-  static const int kFirstContextSlot = FixedArray::kHeaderSize + kPointerSize;
-  static const int kFirstCodeSlot = FixedArray::kHeaderSize + 2 * kPointerSize;
+  static const int kContextOffset = 0;
+  static const int kCachedCodeOffset = 1;
+  static const int kLiteralsOffset = 2;
+  static const int kOsrAstIdOffset = 3;
+  static const int kEntryLength = 4;
+  static const int kFirstContextSlot = FixedArray::kHeaderSize +
+      (kEntriesStart + kContextOffset) * kPointerSize;
+  static const int kFirstCodeSlot = FixedArray::kHeaderSize +
+      (kEntriesStart + kCachedCodeOffset) * kPointerSize;
+  static const int kFirstOsrAstIdSlot = FixedArray::kHeaderSize +
+      (kEntriesStart + kOsrAstIdOffset) * kPointerSize;
   static const int kSecondEntryIndex = kEntryLength + kEntriesStart;
   static const int kInitialLength = kEntriesStart + kEntryLength;
 
@@ -6924,12 +6944,6 @@
 
   void ResetForNewContext(int new_ic_age);
 
-  // Helper to compile the shared code.  Returns true on success, false on
-  // failure (e.g., stack overflow during compilation). This is only used by
-  // the debugger, it is not possible to compile without a context otherwise.
-  static bool CompileLazy(Handle<SharedFunctionInfo> shared,
-                          ClearExceptionFlag flag);
-
   // Casting.
   static inline SharedFunctionInfo* cast(Object* obj);
 
@@ -7260,29 +7274,20 @@
 
   // Mark this function for lazy recompilation. The function will be
   // recompiled the next time it is executed.
-  void MarkForLazyRecompilation();
-  void MarkForConcurrentRecompilation();
-  void MarkInRecompileQueue();
+  void MarkForOptimization();
+  void MarkForConcurrentOptimization();
+  void MarkInOptimizationQueue();
 
-  // Helpers to compile this function.  Returns true on success, false on
-  // failure (e.g., stack overflow during compilation).
-  static bool EnsureCompiled(Handle<JSFunction> function,
-                             ClearExceptionFlag flag);
-  static bool CompileLazy(Handle<JSFunction> function,
-                          ClearExceptionFlag flag);
-  static Handle<Code> CompileOsr(Handle<JSFunction> function,
-                                 BailoutId osr_ast_id,
-                                 ClearExceptionFlag flag);
   static bool CompileOptimized(Handle<JSFunction> function,
                                ClearExceptionFlag flag);
 
   // Tells whether or not the function is already marked for lazy
   // recompilation.
-  inline bool IsMarkedForLazyRecompilation();
-  inline bool IsMarkedForConcurrentRecompilation();
+  inline bool IsMarkedForOptimization();
+  inline bool IsMarkedForConcurrentOptimization();
 
   // Tells whether or not the function is on the concurrent recompilation queue.
-  inline bool IsInRecompileQueue();
+  inline bool IsInOptimizationQueue();
 
   // [literals_or_bindings]: Fixed array holding either
   // the materialized literals or the bindings of a bound function.
@@ -8647,11 +8652,6 @@
   // ASCII and two byte string types.
   bool MarkAsUndetectable();
 
-  // Return a substring.
-  MUST_USE_RESULT MaybeObject* SubString(int from,
-                                         int to,
-                                         PretenureFlag pretenure = NOT_TENURED);
-
   // String equality operations.
   inline bool Equals(String* other);
   bool IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match = false);
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
index 32a7f97..d215070 100644
--- a/src/optimizing-compiler-thread.cc
+++ b/src/optimizing-compiler-thread.cc
@@ -106,10 +106,10 @@
 }
 
 
-RecompileJob* OptimizingCompilerThread::NextInput() {
+OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
   LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
   if (input_queue_length_ == 0) return NULL;
-  RecompileJob* job = input_queue_[InputQueueIndex(0)];
+  OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
   ASSERT_NE(NULL, job);
   input_queue_shift_ = InputQueueIndex(1);
   input_queue_length_--;
@@ -118,13 +118,13 @@
 
 
 void OptimizingCompilerThread::CompileNext() {
-  RecompileJob* job = NextInput();
+  OptimizedCompileJob* job = NextInput();
   ASSERT_NE(NULL, job);
 
   // The function may have already been optimized by OSR.  Simply continue.
-  RecompileJob::Status status = job->OptimizeGraph();
+  OptimizedCompileJob::Status status = job->OptimizeGraph();
   USE(status);   // Prevent an unused-variable error in release mode.
-  ASSERT(status != RecompileJob::FAILED);
+  ASSERT(status != OptimizedCompileJob::FAILED);
 
   // The function may have already been optimized by OSR.  Simply continue.
   // Use a mutex to make sure that functions marked for install
@@ -134,13 +134,18 @@
 }
 
 
-static void DisposeRecompileJob(RecompileJob* job,
-                                bool restore_function_code) {
+static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
+                                       bool restore_function_code) {
   // The recompile job is allocated in the CompilationInfo's zone.
   CompilationInfo* info = job->info();
   if (restore_function_code) {
     if (info->is_osr()) {
-      if (!job->IsWaitingForInstall()) BackEdgeTable::RemoveStackCheck(info);
+      if (!job->IsWaitingForInstall()) {
+        // Remove stack check that guards OSR entry on original code.
+        Handle<Code> code = info->unoptimized_code();
+        uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
+        BackEdgeTable::RemoveStackCheck(code, offset);
+      }
     } else {
       Handle<JSFunction> function = info->closure();
       function->ReplaceCode(function->shared()->code());
@@ -151,25 +156,25 @@
 
 
 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
-  RecompileJob* job;
+  OptimizedCompileJob* job;
   while ((job = NextInput())) {
     // This should not block, since we have one signal on the input queue
     // semaphore corresponding to each element in the input queue.
     input_queue_semaphore_.Wait();
     // OSR jobs are dealt with separately.
     if (!job->info()->is_osr()) {
-      DisposeRecompileJob(job, restore_function_code);
+      DisposeOptimizedCompileJob(job, restore_function_code);
     }
   }
 }
 
 
 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
-  RecompileJob* job;
+  OptimizedCompileJob* job;
   while (output_queue_.Dequeue(&job)) {
     // OSR jobs are dealt with separately.
     if (!job->info()->is_osr()) {
-      DisposeRecompileJob(job, restore_function_code);
+      DisposeOptimizedCompileJob(job, restore_function_code);
     }
   }
 }
@@ -178,7 +183,7 @@
 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
   for (int i = 0; i < osr_buffer_capacity_; i++) {
     if (osr_buffer_[i] != NULL) {
-      DisposeRecompileJob(osr_buffer_[i], restore_function_code);
+      DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
       osr_buffer_[i] = NULL;
     }
   }
@@ -236,9 +241,10 @@
   ASSERT(!IsOptimizerThread());
   HandleScope handle_scope(isolate_);
 
-  RecompileJob* job;
+  OptimizedCompileJob* job;
   while (output_queue_.Dequeue(&job)) {
     CompilationInfo* info = job->info();
+    Handle<JSFunction> function(*info->closure());
     if (info->is_osr()) {
       if (FLAG_trace_osr) {
         PrintF("[COSR - ");
@@ -247,26 +253,25 @@
                info->osr_ast_id().ToInt());
       }
       job->WaitForInstall();
-      BackEdgeTable::RemoveStackCheck(info);
+      // Remove stack check that guards OSR entry on original code.
+      Handle<Code> code = info->unoptimized_code();
+      uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
+      BackEdgeTable::RemoveStackCheck(code, offset);
     } else {
-      Compiler::InstallOptimizedCode(job);
+      Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
+      function->ReplaceCode(
+          code.is_null() ? function->shared()->code() : *code);
     }
   }
 }
 
 
-void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
+void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
   ASSERT(IsQueueAvailable());
   ASSERT(!IsOptimizerThread());
   CompilationInfo* info = job->info();
   if (info->is_osr()) {
-    if (FLAG_trace_concurrent_recompilation) {
-      PrintF("  ** Queueing ");
-      info->closure()->PrintName();
-      PrintF(" for concurrent on-stack replacement.\n");
-    }
     osr_attempts_++;
-    BackEdgeTable::AddStackCheck(info);
     AddToOsrBuffer(job);
     // Add job to the front of the input queue.
     LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
@@ -276,7 +281,6 @@
     input_queue_[InputQueueIndex(0)] = job;
     input_queue_length_++;
   } else {
-    info->closure()->MarkInRecompileQueue();
     // Add job to the back of the input queue.
     LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
     ASSERT_LT(input_queue_length_, input_queue_capacity_);
@@ -300,14 +304,14 @@
 }
 
 
-RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
-    Handle<JSFunction> function, uint32_t osr_pc_offset) {
+OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
+    Handle<JSFunction> function, BailoutId osr_ast_id) {
   ASSERT(!IsOptimizerThread());
   for (int i = 0; i < osr_buffer_capacity_; i++) {
-    RecompileJob* current = osr_buffer_[i];
+    OptimizedCompileJob* current = osr_buffer_[i];
     if (current != NULL &&
         current->IsWaitingForInstall() &&
-        current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+        current->info()->HasSameOsrEntry(function, osr_ast_id)) {
       osr_hits_++;
       osr_buffer_[i] = NULL;
       return current;
@@ -318,12 +322,12 @@
 
 
 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
-                                              uint32_t osr_pc_offset) {
+                                              BailoutId osr_ast_id) {
   ASSERT(!IsOptimizerThread());
   for (int i = 0; i < osr_buffer_capacity_; i++) {
-    RecompileJob* current = osr_buffer_[i];
+    OptimizedCompileJob* current = osr_buffer_[i];
     if (current != NULL &&
-        current->info()->HasSameOsrEntry(function, osr_pc_offset)) {
+        current->info()->HasSameOsrEntry(function, osr_ast_id)) {
       return !current->IsWaitingForInstall();
     }
   }
@@ -334,7 +338,7 @@
 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
   ASSERT(!IsOptimizerThread());
   for (int i = 0; i < osr_buffer_capacity_; i++) {
-    RecompileJob* current = osr_buffer_[i];
+    OptimizedCompileJob* current = osr_buffer_[i];
     if (current != NULL && *current->info()->closure() == function) {
       return !current->IsWaitingForInstall();
     }
@@ -343,10 +347,10 @@
 }
 
 
-void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) {
+void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
   ASSERT(!IsOptimizerThread());
   // Find the next slot that is empty or has a stale job.
-  RecompileJob* stale = NULL;
+  OptimizedCompileJob* stale = NULL;
   while (true) {
     stale = osr_buffer_[osr_buffer_cursor_];
     if (stale == NULL || stale->IsWaitingForInstall()) break;
@@ -362,7 +366,7 @@
       info->closure()->PrintName();
       PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
     }
-    DisposeRecompileJob(stale, false);
+    DisposeOptimizedCompileJob(stale, false);
   }
   osr_buffer_[osr_buffer_cursor_] = job;
   osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
index 795fa65..eae1f60 100644
--- a/src/optimizing-compiler-thread.h
+++ b/src/optimizing-compiler-thread.h
@@ -40,7 +40,7 @@
 namespace internal {
 
 class HOptimizedGraphBuilder;
-class RecompileJob;
+class OptimizedCompileJob;
 class SharedFunctionInfo;
 
 class OptimizingCompilerThread : public Thread {
@@ -62,10 +62,10 @@
       osr_attempts_(0),
       blocked_jobs_(0) {
     NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
-    input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_);
+    input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
     if (FLAG_concurrent_osr) {
       // Allocate and mark OSR buffer slots as empty.
-      osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_);
+      osr_buffer_ = NewArray<OptimizedCompileJob*>(osr_buffer_capacity_);
       for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
     }
   }
@@ -75,12 +75,12 @@
   void Run();
   void Stop();
   void Flush();
-  void QueueForOptimization(RecompileJob* optimizing_compiler);
+  void QueueForOptimization(OptimizedCompileJob* optimizing_compiler);
   void Unblock();
   void InstallOptimizedFunctions();
-  RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
-                                      uint32_t osr_pc_offset);
-  bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
+  OptimizedCompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
+                                             BailoutId osr_ast_id);
+  bool IsQueuedForOSR(Handle<JSFunction> function, BailoutId osr_ast_id);
 
   bool IsQueuedForOSR(JSFunction* function);
 
@@ -112,11 +112,11 @@
   void FlushOutputQueue(bool restore_function_code);
   void FlushOsrBuffer(bool restore_function_code);
   void CompileNext();
-  RecompileJob* NextInput();
+  OptimizedCompileJob* NextInput();
 
   // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
   // Tasks evicted from the cyclic buffer are discarded.
-  void AddToOsrBuffer(RecompileJob* compiler);
+  void AddToOsrBuffer(OptimizedCompileJob* compiler);
 
   inline int InputQueueIndex(int i) {
     int result = (i + input_queue_shift_) % input_queue_capacity_;
@@ -135,17 +135,17 @@
   Semaphore input_queue_semaphore_;
 
   // Circular queue of incoming recompilation tasks (including OSR).
-  RecompileJob** input_queue_;
+  OptimizedCompileJob** input_queue_;
   int input_queue_capacity_;
   int input_queue_length_;
   int input_queue_shift_;
   Mutex input_queue_mutex_;
 
   // Queue of recompilation tasks ready to be installed (excluding OSR).
-  UnboundQueue<RecompileJob*> output_queue_;
+  UnboundQueue<OptimizedCompileJob*> output_queue_;
 
   // Cyclic buffer of recompilation tasks for OSR.
-  RecompileJob** osr_buffer_;
+  OptimizedCompileJob** osr_buffer_;
   int osr_buffer_capacity_;
   int osr_buffer_cursor_;
 
diff --git a/src/parser.h b/src/parser.h
index dd8e600..d3c24d1 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -418,7 +418,12 @@
   // Parses the source code represented by the compilation info and sets its
   // function literal.  Returns false (and deallocates any allocated AST
   // nodes) if parsing failed.
-  static bool Parse(CompilationInfo* info) { return Parser(info).Parse(); }
+  static bool Parse(CompilationInfo* info,
+                    bool allow_lazy = false) {
+    Parser parser(info);
+    parser.set_allow_lazy(allow_lazy);
+    return parser.Parse();
+  }
   bool Parse();
 
  private:
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 390222d..5784e4d 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -124,11 +124,11 @@
       // recompilation race.  This goes away as soon as OSR becomes one-shot.
       return;
     }
-    ASSERT(!function->IsInRecompileQueue());
-    function->MarkForConcurrentRecompilation();
+    ASSERT(!function->IsInOptimizationQueue());
+    function->MarkForConcurrentOptimization();
   } else {
     // The next call to the function will trigger optimization.
-    function->MarkForLazyRecompilation();
+    function->MarkForOptimization();
   }
 }
 
@@ -186,7 +186,7 @@
     Code* shared_code = shared->code();
 
     if (shared_code->kind() != Code::FUNCTION) continue;
-    if (function->IsInRecompileQueue()) continue;
+    if (function->IsInOptimizationQueue()) continue;
 
     if (FLAG_always_osr &&
         shared_code->allow_osr_at_loop_nesting_level() == 0) {
@@ -198,8 +198,8 @@
       }
       // Fall through and do a normal optimized compile as well.
     } else if (!frame->is_optimized() &&
-        (function->IsMarkedForLazyRecompilation() ||
-         function->IsMarkedForConcurrentRecompilation() ||
+        (function->IsMarkedForOptimization() ||
+         function->IsMarkedForConcurrentOptimization() ||
          function->IsOptimized())) {
       // Attempt OSR if we are still running unoptimized code even though the
       // the function has long been marked or even already been optimized.
diff --git a/src/runtime.cc b/src/runtime.cc
index 4d84a15..a175836 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -2957,7 +2957,7 @@
   Handle<SharedFunctionInfo> target_shared(target->shared());
   Handle<SharedFunctionInfo> source_shared(source->shared());
 
-  if (!JSFunction::EnsureCompiled(source, KEEP_EXCEPTION)) {
+  if (!Compiler::EnsureCompiled(source, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
 
@@ -4449,10 +4449,10 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
-  SealHandleScope shs(isolate);
+  HandleScope scope(isolate);
   ASSERT(args.length() == 3);
 
-  CONVERT_ARG_CHECKED(String, value, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
   int start, end;
   // We have a fast integer-only case here to avoid a conversion to double in
   // the common case where from and to are Smis.
@@ -4469,9 +4469,10 @@
   }
   RUNTIME_ASSERT(end >= start);
   RUNTIME_ASSERT(start >= 0);
-  RUNTIME_ASSERT(end <= value->length());
+  RUNTIME_ASSERT(end <= string->length());
   isolate->counters()->sub_string_runtime()->Increment();
-  return value->SubString(start, end);
+
+  return *isolate->factory()->NewSubString(string, start, end);
 }
 
 
@@ -6552,30 +6553,31 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
-  SealHandleScope shs(isolate);
+  HandleScope scope(isolate);
   ASSERT(args.length() == 3);
 
-  CONVERT_ARG_CHECKED(String, s, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
   CONVERT_BOOLEAN_ARG_CHECKED(trimRight, 2);
 
-  s->TryFlatten();
-  int length = s->length();
+  string = FlattenGetString(string);
+  int length = string->length();
 
   int left = 0;
   if (trimLeft) {
-    while (left < length && IsTrimWhiteSpace(s->Get(left))) {
+    while (left < length && IsTrimWhiteSpace(string->Get(left))) {
       left++;
     }
   }
 
   int right = length;
   if (trimRight) {
-    while (right > left && IsTrimWhiteSpace(s->Get(right - 1))) {
+    while (right > left && IsTrimWhiteSpace(string->Get(right - 1))) {
       right--;
     }
   }
-  return s->SubString(left, right);
+
+  return *isolate->factory()->NewSubString(string, left, right);
 }
 
 
@@ -6978,12 +6980,12 @@
 
 
 RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
-  SealHandleScope shs(isolate);
+  HandleScope scope(isolate);
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(String, str1, 0);
-  CONVERT_ARG_CHECKED(String, str2, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
   isolate->counters()->string_add_runtime()->Increment();
-  return isolate->heap()->AllocateConsString(str1, str2);
+  return *isolate->factory()->NewConsString(str1, str2);
 }
 
 
@@ -8265,7 +8267,7 @@
 
   // The function should be compiled for the optimization hints to be
   // available.
-  JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION);
+  Compiler::EnsureCompiled(function, CLEAR_EXCEPTION);
 
   Handle<SharedFunctionInfo> shared(function->shared(), isolate);
   if (!function->has_initial_map() &&
@@ -8297,42 +8299,53 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileUnoptimized) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
 
   Handle<JSFunction> function = args.at<JSFunction>(0);
 #ifdef DEBUG
   if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
-    PrintF("[lazy: ");
+    PrintF("[unoptimized: ");
     function->PrintName();
     PrintF("]\n");
   }
 #endif
 
   // Compile the target function.
-  ASSERT(!function->is_compiled());
-  if (!JSFunction::CompileLazy(function, KEEP_EXCEPTION)) {
-    return Failure::Exception();
-  }
+  ASSERT(function->shared()->allows_lazy_compilation());
+
+  Handle<Code> code = Compiler::GetUnoptimizedCode(function);
+  RETURN_IF_EMPTY_HANDLE(isolate, code);
+  function->ReplaceCode(*code);
 
   // All done. Return the compiled code.
   ASSERT(function->is_compiled());
-  return function->code();
+  ASSERT(function->code()->kind() == Code::FUNCTION ||
+         (FLAG_always_opt &&
+          function->code()->kind() == Code::OPTIMIZED_FUNCTION));
+  return *code;
 }
 
 
-bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) {
-  // If the function is not compiled ignore the lazy
-  // recompilation. This can happen if the debugger is activated and
-  // the function is returned to the not compiled state.
-  if (!function->shared()->is_compiled()) return false;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileOptimized) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+  Handle<JSFunction> function = args.at<JSFunction>(0);
+  CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
 
-  // If the function is not optimizable or debugger is active continue using the
-  // code from the full compiler.
-  if (!isolate->use_crankshaft() ||
-      function->shared()->optimization_disabled() ||
-      isolate->DebuggerHasBreakPoints()) {
+  Handle<Code> unoptimized(function->shared()->code());
+  if (!function->shared()->is_compiled()) {
+    // If the function is not compiled, do not optimize.
+    // This can happen if the debugger is activated and
+    // the function is returned to the not compiled state.
+    // TODO(yangguo): reconsider this.
+    function->ReplaceCode(function->shared()->code());
+  } else if (!isolate->use_crankshaft() ||
+             function->shared()->optimization_disabled() ||
+             isolate->DebuggerHasBreakPoints()) {
+    // If the function is not optimizable or debugger is active continue
+    // using the code from the full compiler.
     if (FLAG_trace_opt) {
       PrintF("[failed to optimize ");
       function->PrintName();
@@ -8340,53 +8353,21 @@
           function->shared()->optimization_disabled() ? "F" : "T",
           isolate->DebuggerHasBreakPoints() ? "T" : "F");
     }
-    return false;
+    function->ReplaceCode(*unoptimized);
+  } else {
+    Compiler::ConcurrencyMode mode = concurrent ? Compiler::CONCURRENT
+                                                : Compiler::NOT_CONCURRENT;
+    Handle<Code> code = Compiler::GetOptimizedCode(function, unoptimized, mode);
+    function->ReplaceCode(code.is_null() ? *unoptimized : *code);
   }
-  return true;
-}
 
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  Handle<JSFunction> function = args.at<JSFunction>(0);
-
-  if (!AllowOptimization(isolate, function)) {
-    function->ReplaceCode(function->shared()->code());
-    return function->code();
-  }
-  function->shared()->code()->set_profiler_ticks(0);
-  if (JSFunction::CompileOptimized(function, CLEAR_EXCEPTION)) {
-    return function->code();
-  }
-  if (FLAG_trace_opt) {
-    PrintF("[failed to optimize ");
-    function->PrintName();
-    PrintF(": optimized compilation failed]\n");
-  }
-  function->ReplaceCode(function->shared()->code());
+  ASSERT(function->code()->kind() == Code::FUNCTION ||
+         function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
+         function->IsInOptimizationQueue());
   return function->code();
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ConcurrentRecompile) {
-  HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  if (!AllowOptimization(isolate, function)) {
-    function->ReplaceCode(function->shared()->code());
-    return isolate->heap()->undefined_value();
-  }
-  Handle<Code> shared_code(function->shared()->code());
-  shared_code->set_profiler_ticks(0);
-  ASSERT(isolate->concurrent_recompilation_enabled());
-  if (!Compiler::RecompileConcurrent(function, shared_code)) {
-    function->ReplaceCode(*shared_code);
-  }
-  return isolate->heap()->undefined_value();
-}
-
-
 class ActivationsFinder : public ThreadVisitor {
  public:
   Code* code_;
@@ -8527,7 +8508,7 @@
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
 
   if (!function->IsOptimizable()) return isolate->heap()->undefined_value();
-  function->MarkForLazyRecompilation();
+  function->MarkForOptimization();
 
   Code* unoptimized = function->shared()->code();
   if (args.length() == 2 &&
@@ -8541,8 +8522,9 @@
         unoptimized->set_allow_osr_at_loop_nesting_level(i);
         isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
       }
-    } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent"))) {
-      function->MarkForConcurrentRecompilation();
+    } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent")) &&
+               isolate->concurrent_recompilation_enabled()) {
+      function->MarkForConcurrentOptimization();
     }
   }
 
@@ -8576,7 +8558,7 @@
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   if (isolate->concurrent_recompilation_enabled() &&
       sync_with_compiler_thread) {
-    while (function->IsInRecompileQueue()) {
+    while (function->IsInOptimizationQueue()) {
       isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
       OS::Sleep(50);
     }
@@ -8612,9 +8594,9 @@
 
 static bool IsSuitableForOnStackReplacement(Isolate* isolate,
                                             Handle<JSFunction> function,
-                                            Handle<Code> unoptimized) {
+                                            Handle<Code> current_code) {
   // Keep track of whether we've succeeded in optimizing.
-  if (!isolate->use_crankshaft() || !unoptimized->optimizable()) return false;
+  if (!isolate->use_crankshaft() || !current_code->optimizable()) return false;
   // If we are trying to do OSR when there are already optimized
   // activations of the function, it means (a) the function is directly or
   // indirectly recursive and (b) an optimized invocation has been
@@ -8633,79 +8615,79 @@
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  Handle<Code> unoptimized(function->shared()->code(), isolate);
+  Handle<Code> caller_code(function->shared()->code());
+
+  // We're not prepared to handle a function with arguments object.
+  ASSERT(!function->shared()->uses_arguments());
 
   // Passing the PC in the javascript frame from the caller directly is
   // not GC safe, so we walk the stack to get it.
   JavaScriptFrameIterator it(isolate);
   JavaScriptFrame* frame = it.frame();
-  if (!unoptimized->contains(frame->pc())) {
+  if (!caller_code->contains(frame->pc())) {
     // Code on the stack may not be the code object referenced by the shared
     // function info.  It may have been replaced to include deoptimization data.
-    unoptimized = Handle<Code>(frame->LookupCode());
+    caller_code = Handle<Code>(frame->LookupCode());
   }
 
-  uint32_t pc_offset = static_cast<uint32_t>(frame->pc() -
-                                             unoptimized->instruction_start());
+  uint32_t pc_offset = static_cast<uint32_t>(
+      frame->pc() - caller_code->instruction_start());
 
 #ifdef DEBUG
   ASSERT_EQ(frame->function(), *function);
-  ASSERT_EQ(frame->LookupCode(), *unoptimized);
-  ASSERT(unoptimized->contains(frame->pc()));
+  ASSERT_EQ(frame->LookupCode(), *caller_code);
+  ASSERT(caller_code->contains(frame->pc()));
 #endif  // DEBUG
 
-  // We're not prepared to handle a function with arguments object.
-  ASSERT(!function->shared()->uses_arguments());
 
+  BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
+  ASSERT(!ast_id.IsNone());
+
+  Compiler::ConcurrencyMode mode = isolate->concurrent_osr_enabled()
+      ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
   Handle<Code> result = Handle<Code>::null();
-  BailoutId ast_id = BailoutId::None();
 
-  if (isolate->concurrent_osr_enabled()) {
-    if (isolate->optimizing_compiler_thread()->
-            IsQueuedForOSR(function, pc_offset)) {
-      // Still waiting for the optimizing compiler thread to finish.  Carry on.
+  OptimizedCompileJob* job = NULL;
+  if (mode == Compiler::CONCURRENT) {
+    // Gate the OSR entry with a stack check.
+    BackEdgeTable::AddStackCheck(caller_code, pc_offset);
+    // Poll already queued compilation jobs.
+    OptimizingCompilerThread* thread = isolate->optimizing_compiler_thread();
+    if (thread->IsQueuedForOSR(function, ast_id)) {
       if (FLAG_trace_osr) {
-        PrintF("[COSR - polling recompile tasks for ");
+        PrintF("[OSR - Still waiting for queued: ");
         function->PrintName();
-        PrintF("]\n");
+        PrintF(" at AST id %d]\n", ast_id.ToInt());
       }
       return NULL;
     }
 
-    RecompileJob* job = isolate->optimizing_compiler_thread()->
-        FindReadyOSRCandidate(function, pc_offset);
+    job = thread->FindReadyOSRCandidate(function, ast_id);
+  }
 
-    if (job == NULL) {
-      if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) &&
-          Compiler::RecompileConcurrent(function, unoptimized, pc_offset)) {
-        if (function->IsMarkedForLazyRecompilation() ||
-            function->IsMarkedForConcurrentRecompilation()) {
-          // Prevent regular recompilation if we queue this for OSR.
-          // TODO(yangguo): remove this as soon as OSR becomes one-shot.
-          function->ReplaceCode(function->shared()->code());
-        }
-        return NULL;
-      }
-      // Fall through to the end in case of failure.
-    } else {
-      // TODO(titzer): don't install the OSR code into the function.
-      ast_id = job->info()->osr_ast_id();
-      result = Compiler::InstallOptimizedCode(job);
-    }
-  } else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) {
-    ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset);
-    ASSERT(!ast_id.IsNone());
+  if (job != NULL) {
     if (FLAG_trace_osr) {
-      PrintF("[OSR - replacing at AST id %d in ", ast_id.ToInt());
+      PrintF("[OSR - Found ready: ");
       function->PrintName();
-      PrintF("]\n");
+      PrintF(" at AST id %d]\n", ast_id.ToInt());
     }
-    // Attempt OSR compilation.
-    result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
+    result = Compiler::GetConcurrentlyOptimizedCode(job);
+  } else if (result.is_null() &&
+             IsSuitableForOnStackReplacement(isolate, function, caller_code)) {
+    if (FLAG_trace_osr) {
+      PrintF("[OSR - Compiling: ");
+      function->PrintName();
+      PrintF(" at AST id %d]\n", ast_id.ToInt());
+    }
+    result = Compiler::GetOptimizedCode(function, caller_code, mode, ast_id);
+    if (result.is_identical_to(isolate->builtins()->InOptimizationQueue())) {
+      // Optimization is queued.  Return to check later.
+      return NULL;
+    }
   }
 
   // Revert the patched back edge table, regardless of whether OSR succeeds.
-  BackEdgeTable::Revert(isolate, *unoptimized);
+  BackEdgeTable::Revert(isolate, *caller_code);
 
   // Check whether we ended up with usable optimized code.
   if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
@@ -8715,26 +8697,27 @@
     if (data->OsrPcOffset()->value() >= 0) {
       ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
       if (FLAG_trace_osr) {
-        PrintF("[OSR - entry at AST id %d, offset %d in optimized code]\n",
+        PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n",
                ast_id.ToInt(), data->OsrPcOffset()->value());
       }
       // TODO(titzer): this is a massive hack to make the deopt counts
       // match. Fix heuristics for reenabling optimizations!
       function->shared()->increment_deopt_count();
+
+      // TODO(titzer): Do not install code into the function.
+      function->ReplaceCode(*result);
       return *result;
     }
   }
 
+  // Failed.
   if (FLAG_trace_osr) {
-    PrintF("[OSR - optimization failed for ");
+    PrintF("[OSR - Failed: ");
     function->PrintName();
-    PrintF("]\n");
+    PrintF(" at AST id %d]\n", ast_id.ToInt());
   }
 
-  if (function->IsMarkedForLazyRecompilation() ||
-      function->IsMarkedForConcurrentRecompilation()) {
-    function->ReplaceCode(function->shared()->code());
-  }
+  function->ReplaceCode(function->shared()->code());
   return NULL;
 }
 
@@ -9436,7 +9419,7 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallRecompiledCode) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TryInstallOptimizedCode) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
@@ -9695,13 +9678,9 @@
   // Compile source string in the native context.
   ParseRestriction restriction = function_literal_only
       ? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION;
-  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
-      source, context, true, CLASSIC_MODE, restriction, RelocInfo::kNoPosition);
-  RETURN_IF_EMPTY_HANDLE(isolate, shared);
-  Handle<JSFunction> fun =
-      isolate->factory()->NewFunctionFromSharedFunctionInfo(shared,
-                                                            context,
-                                                            NOT_TENURED);
+  Handle<JSFunction> fun = Compiler::GetFunctionFromEval(
+      source, context, CLASSIC_MODE, restriction, RelocInfo::kNoPosition);
+  RETURN_IF_EMPTY_HANDLE(isolate, fun);
   return *fun;
 }
 
@@ -9727,18 +9706,11 @@
 
   // Deal with a normal eval call with a string argument. Compile it
   // and return the compiled function bound in the local context.
-  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
-      source,
-      context,
-      context->IsNativeContext(),
-      language_mode,
-      NO_PARSE_RESTRICTION,
-      scope_position);
-  RETURN_IF_EMPTY_HANDLE_VALUE(isolate, shared,
+  static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
+  Handle<JSFunction> compiled = Compiler::GetFunctionFromEval(
+      source, context, language_mode, restriction, scope_position);
+  RETURN_IF_EMPTY_HANDLE_VALUE(isolate, compiled,
                                MakePair(Failure::Exception(), NULL));
-  Handle<JSFunction> compiled =
-      isolate->factory()->NewFunctionFromSharedFunctionInfo(
-          shared, context, NOT_TENURED);
   return MakePair(*compiled, *receiver);
 }
 
@@ -11373,6 +11345,12 @@
 
   // First fill all parameters.
   for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+    Handle<String> name(scope_info->ParameterName(i));
+    VariableMode mode;
+    InitializationFlag init_flag;
+    // Do not materialize the parameter if it is shadowed by a context local.
+    if (scope_info->ContextSlotIndex(*name, &mode, &init_flag) != -1) continue;
+
     Handle<Object> value(i < frame_inspector->GetParametersCount()
                              ? frame_inspector->GetParameter(i)
                              : isolate->heap()->undefined_value(),
@@ -11381,29 +11359,21 @@
 
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
-        Runtime::SetObjectProperty(isolate,
-                                   target,
-                                   Handle<String>(scope_info->ParameterName(i)),
-                                   value,
-                                   NONE,
-                                   kNonStrictMode),
+        Runtime::SetObjectProperty(
+            isolate, target, name, value, NONE, kNonStrictMode),
         Handle<JSObject>());
   }
 
   // Second fill all stack locals.
   for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+    Handle<String> name(scope_info->StackLocalName(i));
     Handle<Object> value(frame_inspector->GetExpression(i), isolate);
     if (value->IsTheHole()) continue;
 
     RETURN_IF_EMPTY_HANDLE_VALUE(
         isolate,
         Runtime::SetObjectProperty(
-            isolate,
-            target,
-            Handle<String>(scope_info->StackLocalName(i)),
-            value,
-            NONE,
-            kNonStrictMode),
+            isolate, target, name, value, NONE, kNonStrictMode),
         Handle<JSObject>());
   }
 
@@ -12572,7 +12542,7 @@
   if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
                                                 &source_position,
                                                 alignment)) {
-    return  isolate->heap()->undefined_value();
+    return isolate->heap()->undefined_value();
   }
 
   return Smi::FromInt(source_position);
@@ -12732,18 +12702,14 @@
     context = isolate->factory()->NewWithContext(closure, context, extension);
   }
 
-  Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
-      source,
-      context,
-      context->IsNativeContext(),
-      CLASSIC_MODE,
-      NO_PARSE_RESTRICTION,
-      RelocInfo::kNoPosition);
-  RETURN_IF_EMPTY_HANDLE(isolate, shared);
-
   Handle<JSFunction> eval_fun =
-      isolate->factory()->NewFunctionFromSharedFunctionInfo(
-          shared, context, NOT_TENURED);
+      Compiler::GetFunctionFromEval(source,
+                                    context,
+                                    CLASSIC_MODE,
+                                    NO_PARSE_RESTRICTION,
+                                    RelocInfo::kNoPosition);
+  RETURN_IF_EMPTY_HANDLE(isolate, eval_fun);
+
   bool pending_exception;
   Handle<Object> result = Execution::Call(
       isolate, eval_fun, receiver, 0, NULL, &pending_exception);
@@ -13159,7 +13125,7 @@
   ASSERT(args.length() == 1);
   // Get the function and make sure it is compiled.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
-  if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
+  if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
   func->code()->PrintLn();
@@ -13174,7 +13140,7 @@
   ASSERT(args.length() == 1);
   // Get the function and make sure it is compiled.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
-  if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
+  if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
   func->shared()->construct_stub()->PrintLn();
diff --git a/src/runtime.h b/src/runtime.h
index fe8a26b..af6c112 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -86,10 +86,9 @@
   F(GetConstructorDelegate, 1, 1) \
   F(NewArgumentsFast, 3, 1) \
   F(NewStrictArgumentsFast, 3, 1) \
-  F(LazyCompile, 1, 1) \
-  F(LazyRecompile, 1, 1) \
-  F(ConcurrentRecompile, 1, 1) \
-  F(TryInstallRecompiledCode, 1, 1) \
+  F(CompileUnoptimized, 1, 1) \
+  F(CompileOptimized, 2, 1) \
+  F(TryInstallOptimizedCode, 1, 1) \
   F(NotifyDeoptimized, 1, 1) \
   F(NotifyStubFailure, 0, 1) \
   F(DeoptimizeFunction, 1, 1) \
diff --git a/src/string.js b/src/string.js
index 14b44ca..dd5115d 100644
--- a/src/string.js
+++ b/src/string.js
@@ -616,24 +616,22 @@
   var subject = TO_STRING_INLINE(this);
   limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
 
-  // ECMA-262 says that if separator is undefined, the result should
-  // be an array of size 1 containing the entire string.
-  if (IS_UNDEFINED(separator)) {
-    return [subject];
-  }
-
   var length = subject.length;
   if (!IS_REGEXP(separator)) {
-    separator = TO_STRING_INLINE(separator);
+    var separator_string = TO_STRING_INLINE(separator);
 
     if (limit === 0) return [];
 
-    var separator_length = separator.length;
+    // ECMA-262 says that if separator is undefined, the result should
+    // be an array of size 1 containing the entire string.
+    if (IS_UNDEFINED(separator)) return [subject];
+
+    var separator_length = separator_string.length;
 
     // If the separator string is empty then return the elements in the subject.
     if (separator_length === 0) return %StringToArray(subject, limit);
 
-    var result = %StringSplit(subject, separator, limit);
+    var result = %StringSplit(subject, separator_string, limit);
 
     return result;
   }
diff --git a/src/transitions-inl.h b/src/transitions-inl.h
index c4825fc..7d8608b 100644
--- a/src/transitions-inl.h
+++ b/src/transitions-inl.h
@@ -115,9 +115,7 @@
 Object** TransitionArray::GetKeySlot(int transition_number) {
   ASSERT(!IsSimpleTransition());
   ASSERT(transition_number < number_of_transitions());
-  return HeapObject::RawField(
-      reinterpret_cast<HeapObject*>(this),
-      OffsetOfElementAt(ToKeyIndex(transition_number)));
+  return RawFieldOfElementAt(ToKeyIndex(transition_number));
 }
 
 
diff --git a/src/version.cc b/src/version.cc
index eb320a9..b694488 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     24
-#define BUILD_NUMBER      6
+#define BUILD_NUMBER      7
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index f2c955c..7803073 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -73,8 +73,8 @@
 }
 
 
-static void CallRuntimePassFunction(MacroAssembler* masm,
-                                    Runtime::FunctionId function_id) {
+static void CallRuntimePassFunction(
+    MacroAssembler* masm, Runtime::FunctionId function_id) {
   FrameScope scope(masm, StackFrame::INTERNAL);
   // Push a copy of the function onto the stack.
   __ push(rdi);
@@ -101,7 +101,13 @@
 }
 
 
-void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+  __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+  __ jmp(rax);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
   // Checking whether the queued function is ready for install is optional,
   // since we come across interrupts and stack checks elsewhere.  However,
   // not checking may delay installing ready functions, and always checking
@@ -111,22 +117,14 @@
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
   __ j(above_equal, &ok);
 
-  CallRuntimePassFunction(masm, Runtime::kTryInstallRecompiledCode);
-  // Tail call to returned code.
-  __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
-  __ jmp(rax);
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+  GenerateTailCallToReturnedCode(masm);
 
   __ bind(&ok);
   GenerateTailCallToSharedCode(masm);
 }
 
 
-void Builtins::Generate_ConcurrentRecompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kConcurrentRecompile);
-  GenerateTailCallToSharedCode(masm);
-}
-
-
 static void Generate_JSConstructStubHelper(MacroAssembler* masm,
                                            bool is_api_function,
                                            bool count_constructions) {
@@ -573,19 +571,41 @@
 }
 
 
-void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kLazyCompile);
-  // Do a tail-call of the compiled function.
-  __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
-  __ jmp(rax);
+void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+  GenerateTailCallToReturnedCode(masm);
 }
 
 
-void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kLazyRecompile);
-  // Do a tail-call of the compiled function.
-  __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
-  __ jmp(rax);
+static void CallCompileOptimized(MacroAssembler* masm,
+                                            bool concurrent) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  // Push a copy of the function onto the stack.
+  __ push(rdi);
+  // Push call kind information.
+  __ push(rcx);
+  // Function is also the parameter to the runtime call.
+  __ push(rdi);
+  // Whether to compile in a background thread.
+  __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
+  // Restore call kind information.
+  __ pop(rcx);
+  // Restore receiver.
+  __ pop(rdi);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  CallCompileOptimized(masm, false);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  CallCompileOptimized(masm, true);
+  GenerateTailCallToReturnedCode(masm);
 }