Version 3.12.0

Fixed Chromium issues: 115100, 129628, 131994, 132727, 132741, 132742, 133211

Fixed V8 issues: 915, 1914, 2034, 2087, 2094, 2134, 2156, 2166, 2172, 2177, 2179, 2185

Added --extra-code flag to mksnapshot to load JS code into the VM before creating the snapshot.

Support 'restart call frame' command in the debugger.

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@11882 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index ec28da4..68fcc28 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -2435,6 +2435,14 @@
 }
 
 
+void Assembler::RecordConstPool(int size) {
+  // We only need this for debugger support, to correctly compute offsets in the
+  // code.
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
+#endif
+}
+
 void Assembler::GrowBuffer() {
   if (!own_buffer_) FATAL("external code buffer is too small");
 
@@ -2511,12 +2519,15 @@
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   // We do not try to reuse pool constants.
   RelocInfo rinfo(pc_, rmode, data, NULL);
-  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
+  if (((rmode >= RelocInfo::JS_RETURN) &&
+       (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+      (rmode == RelocInfo::CONST_POOL)) {
     // Adjust code for new modes.
     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
            || RelocInfo::IsJSReturn(rmode)
            || RelocInfo::IsComment(rmode)
-           || RelocInfo::IsPosition(rmode));
+           || RelocInfo::IsPosition(rmode)
+           || RelocInfo::IsConstPool(rmode));
     // These modes do not need an entry in the constant pool.
   } else {
     ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
@@ -2602,13 +2613,15 @@
   // pool (include the jump over the pool and the constant pool marker and
   // the gap to the relocation information).
   int jump_instr = require_jump ? kInstrSize : 0;
-  int needed_space = jump_instr + kInstrSize +
-                     num_pending_reloc_info_ * kInstrSize + kGap;
+  int size = jump_instr + kInstrSize + num_pending_reloc_info_ * kPointerSize;
+  int needed_space = size + kGap;
   while (buffer_space() <= needed_space) GrowBuffer();
 
   {
     // Block recursive calls to CheckConstPool.
     BlockConstPoolScope block_const_pool(this);
+    RecordComment("[ Constant Pool");
+    RecordConstPool(size);
 
     // Emit jump over constant pool if necessary.
     Label after_pool;
@@ -2616,8 +2629,6 @@
       b(&after_pool);
     }
 
-    RecordComment("[ Constant Pool");
-
     // Put down constant pool marker "Undefined instruction" as specified by
     // A5.6 (ARMv7) Instruction set encoding.
     emit(kConstantPoolMarker | num_pending_reloc_info_);
@@ -2627,7 +2638,8 @@
       RelocInfo& rinfo = pending_reloc_info_[i];
       ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
              rinfo.rmode() != RelocInfo::POSITION &&
-             rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+             rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
+             rinfo.rmode() != RelocInfo::CONST_POOL);
 
       Instr instr = instr_at(rinfo.pc());
       // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index e2d5f59..497d0d4 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1219,6 +1219,25 @@
   // Use --code-comments to enable.
   void RecordComment(const char* msg);
 
+  // Record the emission of a constant pool.
+  //
+  // The emission of constant pool depends on the size of the code generated and
+  // the number of RelocInfo recorded.
+  // The Debug mechanism needs to map code offsets between two versions of a
+  // function, compiled with and without debugger support (see for example
+  // Debug::PrepareForBreakPoints()).
+  // Compiling functions with debugger support generates additional code
+  // (Debug::GenerateSlot()). This may affect the emission of the constant
+  // pools and cause the version of the code with debugger support to have
+  // constant pools generated in different places.
+  // Recording the position and size of emitted constant pools allows to
+  // correctly compute the offset mappings between the different versions of a
+  // function in all situations.
+  //
+  // The parameter indicates the size of the constant pool (in bytes), including
+  // the marker and branch over the data.
+  void RecordConstPool(int size);
+
   // Writes a single byte or word of data in the code stream.  Used
   // for inline tables, e.g., jump-tables. The constant pool should be
   // emitted before any use of db and dd to ensure that constant pools
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 761123f..169a032 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -85,6 +85,8 @@
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
   // Create a new closure from the given function info in new
   // space. Set the context to the current context in cp.
+  Counters* counters = masm->isolate()->counters();
+
   Label gc;
 
   // Pop the function info from the stack.
@@ -98,6 +100,8 @@
                         &gc,
                         TAG_OBJECT);
 
+  __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
+
   int map_index = (language_mode_ == CLASSIC_MODE)
       ? Context::FUNCTION_MAP_INDEX
       : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
@@ -106,24 +110,34 @@
   // as the map of the allocated object.
   __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
-  __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
-  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index)));
+  __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset));
 
   // Initialize the rest of the function. We don't have to update the
   // write barrier because the allocated object is in new space.
   __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
-  __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
-  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+  __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
   __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
   __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
-  __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
+  __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
   __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
   __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
   __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
-  __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
+  // But first check if there is an optimized version for our context.
+  Label check_optimized;
+  Label install_unoptimized;
+  if (FLAG_cache_optimized_code) {
+    __ ldr(r1,
+           FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset));
+    __ tst(r1, r1);
+    __ b(ne, &check_optimized);
+  }
+  __ bind(&install_unoptimized);
+  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+  __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
   __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
   __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
@@ -131,6 +145,72 @@
   // Return result. The argument function info has been popped already.
   __ Ret();
 
+  __ bind(&check_optimized);
+
+  __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7);
+
+  // r2 holds global context, r1 points to fixed array of 3-element entries
+  // (global context, optimized code, literals).
+  // The optimized code map must never be empty, so check the first elements.
+  Label install_optimized;
+  // Speculatively move code object into r4.
+  __ ldr(r4, FieldMemOperand(r1, FixedArray::kHeaderSize + kPointerSize));
+  __ ldr(r5, FieldMemOperand(r1, FixedArray::kHeaderSize));
+  __ cmp(r2, r5);
+  __ b(eq, &install_optimized);
+
+  // Iterate through the rest of map backwards.  r4 holds an index as a Smi.
+  Label loop;
+  __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset));
+  __ bind(&loop);
+  // Do not double check first entry.
+
+  __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ b(eq, &install_unoptimized);
+  __ sub(r4, r4, Operand(
+      Smi::FromInt(SharedFunctionInfo::kEntryLength)));  // Skip an entry.
+  __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ ldr(r5, MemOperand(r5));
+  __ cmp(r2, r5);
+  __ b(ne, &loop);
+  // Hit: fetch the optimized code.
+  __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ add(r5, r5, Operand(kPointerSize));
+  __ ldr(r4, MemOperand(r5));
+
+  __ bind(&install_optimized);
+  __ IncrementCounter(counters->fast_new_closure_install_optimized(),
+                      1, r6, r7);
+
+  // TODO(fschneider): Idea: store proper code pointers in the map and either
+  // unmangle them on marking or do nothing as the whole map is discarded on
+  // major GC anyway.
+  __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
+
+  // Now link a function into a list of optimized functions.
+  __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+  __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
+  // No need for write barrier as JSFunction (eax) is in the new space.
+
+  __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST));
+  // Store JSFunction (eax) into edx before issuing write barrier as
+  // it clobbers all the registers passed.
+  __ mov(r4, r0);
+  __ RecordWriteContextSlot(
+      r2,
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+      r4,
+      r1,
+      kLRHasNotBeenSaved,
+      kDontSaveFPRegs);
+
+  // Return result. The argument function info has been popped already.
+  __ Ret();
+
   // Create a new closure through the slower runtime call.
   __ bind(&gc);
   __ LoadRoot(r4, Heap::kFalseValueRootIndex);
@@ -7131,6 +7211,8 @@
   { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
   // StoreArrayLiteralElementStub::Generate
   { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
+  // FastNewClosureStub::Generate
+  { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
   // Null termination.
   { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
 };
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 699e6aa..c75fdd4 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -50,6 +50,10 @@
 
   if (!function->IsOptimized()) return;
 
+  // The optimized code is going to be patched, so we cannot use it
+  // any more.  Play safe and reset the whole cache.
+  function->shared()->ClearOptimizedCodeMap();
+
   // Get the optimized code.
   Code* code = function->code();
   Address code_start_address = code->instruction_start();
@@ -97,8 +101,19 @@
   // ignore all slots that might have been recorded on it.
   isolate->heap()->mark_compact_collector()->InvalidateCode(code);
 
-  // Set the code for the function to non-optimized version.
-  function->ReplaceCode(function->shared()->code());
+  // Iterate over all the functions which share the same code object
+  // and make them use unoptimized version.
+  Context* context = function->context()->global_context();
+  Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+  SharedFunctionInfo* shared = function->shared();
+  while (!element->IsUndefined()) {
+    JSFunction* func = JSFunction::cast(element);
+    // Grab element before code replacement as ReplaceCode alters the list.
+    element = func->next_function_link();
+    if (func->code() == code) {
+      func->ReplaceCode(shared->code());
+    }
+  }
 
   if (FLAG_trace_deopt) {
     PrintF("[forced deoptimization: ");
@@ -239,9 +254,9 @@
   unsigned node_id = iterator.Next();
   USE(node_id);
   ASSERT(node_id == ast_id);
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
-  USE(function);
-  ASSERT(function == function_);
+  int closure_id = iterator.Next();
+  USE(closure_id);
+  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
   unsigned height = iterator.Next();
   unsigned height_in_bytes = height * kPointerSize;
   USE(height_in_bytes);
@@ -352,8 +367,8 @@
   if (FLAG_trace_osr) {
     PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
            ok ? "finished" : "aborted",
-           reinterpret_cast<intptr_t>(function));
-    function->PrintName();
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
     PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
   }
 }
@@ -583,7 +598,15 @@
                                    int frame_index) {
   // Read the ast node id, function, and frame height for this output frame.
   int node_id = iterator->Next();
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  JSFunction* function;
+  if (frame_index != 0) {
+    function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  } else {
+    int closure_id = iterator->Next();
+    USE(closure_id);
+    ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+    function = function_;
+  }
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
   if (FLAG_trace_deopt) {
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index ff7c3c1..33a22f1 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -328,7 +328,7 @@
   }
   if (isolate()->IsDebuggerActive()) {
     // Detect debug break requests as soon as possible.
-    reset_value = 10;
+    reset_value = FLAG_interrupt_budget >> 4;
   }
   __ mov(r2, Operand(profiling_counter_));
   __ mov(r3, Operand(Smi::FromInt(reset_value)));
@@ -1607,7 +1607,7 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore(zone());
 
-  AccessorTable accessor_table(isolate()->zone());
+  AccessorTable accessor_table(zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 256d180..6a6a062 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -478,7 +478,10 @@
   int height = translation_size - environment->parameter_count();
 
   WriteTranslation(environment->outer(), translation);
-  int closure_id = DefineDeoptimizationLiteral(environment->closure());
+  int closure_id = *info()->closure() != *environment->closure()
+      ? DefineDeoptimizationLiteral(environment->closure())
+      : Translation::kSelfLiteralId;
+
   switch (environment->frame_type()) {
     case JS_FUNCTION:
       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -3089,7 +3092,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ LoadHeapObject(result, instr->hydrogen()->closure());
+  __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index f35c69b..880d225 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -43,26 +43,25 @@
 
 class LCodeGen BASE_EMBEDDED {
  public:
-  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
-           Zone* zone)
-      : chunk_(chunk),
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : zone_(info->zone()),
+        chunk_(chunk),
         masm_(assembler),
         info_(info),
         current_block_(-1),
         current_instruction_(-1),
         instructions_(chunk->instructions()),
-        deoptimizations_(4, zone),
-        deopt_jump_table_(4, zone),
-        deoptimization_literals_(8, zone),
+        deoptimizations_(4, info->zone()),
+        deopt_jump_table_(4, info->zone()),
+        deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
-        translations_(zone),
-        deferred_(8, zone),
+        translations_(info->zone()),
+        deferred_(8, info->zone()),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
-        safepoints_(zone),
-        zone_(zone),
+        safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -350,6 +349,7 @@
 
   void EnsureSpaceForLazyDeopt();
 
+  Zone* zone_;
   LChunk* const chunk_;
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
@@ -372,8 +372,6 @@
   // itself is emitted at the end of the generated code.
   SafepointTableBuilder safepoints_;
 
-  Zone* zone_;
-
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 9bebb4d..f723fa2 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -35,14 +35,7 @@
 namespace internal {
 
 
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
- public:
-  RegExpMacroAssemblerARM();
-  virtual ~RegExpMacroAssemblerARM();
-};
-
-#else  // V8_INTERPRETED_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
 class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
  public:
   RegExpMacroAssemblerARM(Mode mode, int registers_to_save, Zone* zone);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 629c209..394ef27 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -276,7 +276,7 @@
   // make them invisible to all commands.
   UndoBreakpoints();
 
-  while (!done) {
+  while (!done && !sim_->has_bad_pc()) {
     if (last_pc != sim_->get_pc()) {
       disasm::NameConverter converter;
       disasm::Disassembler dasm(converter);
diff --git a/src/array.js b/src/array.js
index a1cc5b6..1cedd8d 100644
--- a/src/array.js
+++ b/src/array.js
@@ -777,78 +777,103 @@
     }
   };
 
-  var QuickSort = function QuickSort(a, from, to) {
-    // Insertion sort is faster for short arrays.
-    if (to - from <= 10) {
-      InsertionSort(a, from, to);
-      return;
+  var GetThirdIndex = function(a, from, to) {
+    var t_array = [];
+    // Use both 'from' and 'to' to determine the pivot candidates.
+    var increment = 200 + ((to - from) & 15);
+    for (var i = from + 1; i < to - 1; i += increment) {
+      t_array.push([i, a[i]]);
     }
-    // Find a pivot as the median of first, last and middle element.
-    var v0 = a[from];
-    var v1 = a[to - 1];
-    var middle_index = from + ((to - from) >> 1);
-    var v2 = a[middle_index];
-    var c01 = %_CallFunction(receiver, v0, v1, comparefn);
-    if (c01 > 0) {
-      // v1 < v0, so swap them.
-      var tmp = v0;
-      v0 = v1;
-      v1 = tmp;
-    } // v0 <= v1.
-    var c02 = %_CallFunction(receiver, v0, v2, comparefn);
-    if (c02 >= 0) {
-      // v2 <= v0 <= v1.
-      var tmp = v0;
-      v0 = v2;
-      v2 = v1;
-      v1 = tmp;
-    } else {
-      // v0 <= v1 && v0 < v2
-      var c12 = %_CallFunction(receiver, v1, v2, comparefn);
-      if (c12 > 0) {
-        // v0 <= v2 < v1
-        var tmp = v1;
-        v1 = v2;
-        v2 = tmp;
-      }
-    }
-    // v0 <= v1 <= v2
-    a[from] = v0;
-    a[to - 1] = v2;
-    var pivot = v1;
-    var low_end = from + 1;   // Upper bound of elements lower than pivot.
-    var high_start = to - 1;  // Lower bound of elements greater than pivot.
-    a[middle_index] = a[low_end];
-    a[low_end] = pivot;
+    t_array.sort(function(a, b) {
+        return %_CallFunction(receiver, a[1], b[1], comparefn) } );
+    var third_index = t_array[t_array.length >> 1][0];
+    return third_index;
+  }
 
-    // From low_end to i are elements equal to pivot.
-    // From i to high_start are elements that haven't been compared yet.
-    partition: for (var i = low_end + 1; i < high_start; i++) {
-      var element = a[i];
-      var order = %_CallFunction(receiver, element, pivot, comparefn);
-      if (order < 0) {
-        a[i] = a[low_end];
-        a[low_end] = element;
-        low_end++;
-      } else if (order > 0) {
-        do {
-          high_start--;
-          if (high_start == i) break partition;
-          var top_elem = a[high_start];
-          order = %_CallFunction(receiver, top_elem, pivot, comparefn);
-        } while (order > 0);
-        a[i] = a[high_start];
-        a[high_start] = element;
+  var QuickSort = function QuickSort(a, from, to) {
+    var third_index = 0;
+    while (true) {
+      // Insertion sort is faster for short arrays.
+      if (to - from <= 10) {
+        InsertionSort(a, from, to);
+        return;
+      }
+      if (to - from > 1000) {
+        third_index = GetThirdIndex(a, from, to);
+      } else {
+        third_index = from + ((to - from) >> 1);
+      }
+      // Find a pivot as the median of first, last and middle element.
+      var v0 = a[from];
+      var v1 = a[to - 1];
+      var v2 = a[third_index];
+      var c01 = %_CallFunction(receiver, v0, v1, comparefn);
+      if (c01 > 0) {
+        // v1 < v0, so swap them.
+        var tmp = v0;
+        v0 = v1;
+        v1 = tmp;
+      } // v0 <= v1.
+      var c02 = %_CallFunction(receiver, v0, v2, comparefn);
+      if (c02 >= 0) {
+        // v2 <= v0 <= v1.
+        var tmp = v0;
+        v0 = v2;
+        v2 = v1;
+        v1 = tmp;
+      } else {
+        // v0 <= v1 && v0 < v2
+        var c12 = %_CallFunction(receiver, v1, v2, comparefn);
+        if (c12 > 0) {
+          // v0 <= v2 < v1
+          var tmp = v1;
+          v1 = v2;
+          v2 = tmp;
+        }
+      }
+      // v0 <= v1 <= v2
+      a[from] = v0;
+      a[to - 1] = v2;
+      var pivot = v1;
+      var low_end = from + 1;   // Upper bound of elements lower than pivot.
+      var high_start = to - 1;  // Lower bound of elements greater than pivot.
+      a[third_index] = a[low_end];
+      a[low_end] = pivot;
+
+      // From low_end to i are elements equal to pivot.
+      // From i to high_start are elements that haven't been compared yet.
+      partition: for (var i = low_end + 1; i < high_start; i++) {
+        var element = a[i];
+        var order = %_CallFunction(receiver, element, pivot, comparefn);
         if (order < 0) {
-          element = a[i];
           a[i] = a[low_end];
           a[low_end] = element;
           low_end++;
+        } else if (order > 0) {
+          do {
+            high_start--;
+            if (high_start == i) break partition;
+            var top_elem = a[high_start];
+            order = %_CallFunction(receiver, top_elem, pivot, comparefn);
+          } while (order > 0);
+          a[i] = a[high_start];
+          a[high_start] = element;
+          if (order < 0) {
+            element = a[i];
+            a[i] = a[low_end];
+            a[low_end] = element;
+            low_end++;
+          }
         }
       }
+      if (to - high_start < low_end - from) {
+        QuickSort(a, high_start, to);
+        to = low_end;
+      } else {
+        QuickSort(a, from, low_end);
+        from = high_start;
+      }
     }
-    QuickSort(a, from, low_end);
-    QuickSort(a, high_start, to);
   };
 
   // Copy elements in the range 0..length from obj's prototype chain
diff --git a/src/assembler.cc b/src/assembler.cc
index d4c49dd..6dcd2a0 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -141,7 +141,7 @@
 // an iteration.
 //
 // The encoding relies on the fact that there are fewer than 14
-// different non-compactly encoded relocation modes.
+// different relocation modes using standard non-compact encoding.
 //
 // The first byte of a relocation record has a tag in its low 2 bits:
 // Here are the record schemes, depending on the low tag and optional higher
@@ -173,7 +173,9 @@
 //                              00 [4 bit middle_tag] 11 followed by
 //                              00 [6 bit pc delta]
 //
-//      1101: not used (would allow one more relocation mode to be added)
+//      1101: constant pool. Used on ARM only for now.
+//        The format is:       11 1101 11
+//                             signed int (size of the constant pool).
 //      1110: long_data_record
 //        The format is:       [2-bit data_type_tag] 1110 11
 //                             signed intptr_t, lowest byte written first
@@ -194,7 +196,7 @@
 //                dropped, and last non-zero chunk tagged with 1.)
 
 
-const int kMaxRelocModes = 14;
+const int kMaxStandardNonCompactModes = 14;
 
 const int kTagBits = 2;
 const int kTagMask = (1 << kTagBits) - 1;
@@ -228,6 +230,9 @@
 const int kStatementPositionTag = 2;
 const int kCommentTag = 3;
 
+const int kConstPoolExtraTag = kPCJumpExtraTag - 2;
+const int kConstPoolTag = 3;
+
 
 uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
   // Return if the pc_delta can fit in kSmallPCDeltaBits bits.
@@ -285,6 +290,15 @@
   }
 }
 
+void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) {
+  WriteExtraTag(kConstPoolExtraTag, kConstPoolTag);
+  for (int i = 0; i < kIntSize; i++) {
+    *--pos_ = static_cast<byte>(data);
+    // Signed right shift is arithmetic shift.  Tested in test-utils.cc.
+    data = data >> kBitsPerByte;
+  }
+}
+
 void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
   WriteExtraTag(kDataJumpExtraTag, top_tag);
   for (int i = 0; i < kIntptrSize; i++) {
@@ -300,8 +314,8 @@
   byte* begin_pos = pos_;
 #endif
   ASSERT(rinfo->pc() - last_pc_ >= 0);
-  ASSERT(RelocInfo::NUMBER_OF_MODES - RelocInfo::LAST_COMPACT_ENUM <=
-         kMaxRelocModes);
+  ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
+         <= kMaxStandardNonCompactModes);
   // Use unsigned delta-encoding for pc.
   uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
   RelocInfo::Mode rmode = rinfo->rmode();
@@ -347,6 +361,9 @@
     WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
     WriteExtraTaggedData(rinfo->data(), kCommentTag);
     ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
+  } else if (RelocInfo::IsConstPool(rmode)) {
+      WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
+      WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data()));
   } else {
     ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
     int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
@@ -397,6 +414,15 @@
 }
 
 
+void RelocIterator::AdvanceReadConstPoolData() {
+  int x = 0;
+  for (int i = 0; i < kIntSize; i++) {
+    x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
+  }
+  rinfo_.data_ = x;
+}
+
+
 void RelocIterator::AdvanceReadPosition() {
   int x = 0;
   for (int i = 0; i < kIntSize; i++) {
@@ -500,8 +526,7 @@
       ASSERT(tag == kDefaultTag);
       int extra_tag = GetExtraTag();
       if (extra_tag == kPCJumpExtraTag) {
-        int top_tag = GetTopTag();
-        if (top_tag == kVariableLengthPCJumpTopTag) {
+        if (GetTopTag() == kVariableLengthPCJumpTopTag) {
           AdvanceReadVariableLengthPCJump();
         } else {
           AdvanceReadPC();
@@ -531,6 +556,13 @@
           }
           Advance(kIntptrSize);
         }
+      } else if ((extra_tag == kConstPoolExtraTag) &&
+                 (GetTopTag() == kConstPoolTag)) {
+        if (SetMode(RelocInfo::CONST_POOL)) {
+          AdvanceReadConstPoolData();
+          return;
+        }
+        Advance(kIntSize);
       } else {
         AdvanceReadPC();
         int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
@@ -613,6 +645,8 @@
       return "external reference";
     case RelocInfo::INTERNAL_REFERENCE:
       return "internal reference";
+    case RelocInfo::CONST_POOL:
+      return "constant pool";
     case RelocInfo::DEBUG_BREAK_SLOT:
 #ifndef ENABLE_DEBUGGER_SUPPORT
       UNREACHABLE();
@@ -698,6 +732,7 @@
     case STATEMENT_POSITION:
     case EXTERNAL_REFERENCE:
     case INTERNAL_REFERENCE:
+    case CONST_POOL:
     case DEBUG_BREAK_SLOT:
     case NONE:
       break;
diff --git a/src/assembler.h b/src/assembler.h
index 619c69c..fb5ac1f 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -204,14 +204,19 @@
     EXTERNAL_REFERENCE,  // The address of an external C++ function.
     INTERNAL_REFERENCE,  // An address inside the same function.
 
+    // Marks a constant pool. Only used on ARM.
+    // It uses a custom noncompact encoding.
+    CONST_POOL,
+
     // add more as needed
     // Pseudo-types
-    NUMBER_OF_MODES,  // There are at most 14 modes with noncompact encoding.
+    NUMBER_OF_MODES,  // There are at most 15 modes with noncompact encoding.
     NONE,  // never recorded
     LAST_CODE_ENUM = DEBUG_BREAK,
     LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
     // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
-    LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID
+    LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
+    LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE
   };
 
 
@@ -240,6 +245,9 @@
   static inline bool IsComment(Mode mode) {
     return mode == COMMENT;
   }
+  static inline bool IsConstPool(Mode mode) {
+    return mode == CONST_POOL;
+  }
   static inline bool IsPosition(Mode mode) {
     return mode == POSITION || mode == STATEMENT_POSITION;
   }
@@ -416,6 +424,7 @@
   inline void WriteTaggedPC(uint32_t pc_delta, int tag);
   inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
   inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
+  inline void WriteExtraTaggedConstPoolData(int data);
   inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
   inline void WriteTaggedData(intptr_t data_delta, int tag);
   inline void WriteExtraTag(int extra_tag, int top_tag);
@@ -466,6 +475,7 @@
   void ReadTaggedPC();
   void AdvanceReadPC();
   void AdvanceReadId();
+  void AdvanceReadConstPoolData();
   void AdvanceReadPosition();
   void AdvanceReadData();
   void AdvanceReadVariableLengthPCJump();
diff --git a/src/ast.cc b/src/ast.cc
index 0970253..b790e25 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -156,6 +156,11 @@
 }
 
 
+bool FunctionLiteral::AllowsLazyCompilationWithoutContext() {
+  return scope()->AllowsLazyCompilationWithoutContext();
+}
+
+
 int FunctionLiteral::start_position() const {
   return scope()->start_position();
 }
diff --git a/src/ast.h b/src/ast.h
index 02ece7f..2ebf7f9 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -2065,6 +2065,7 @@
   int parameter_count() { return parameter_count_; }
 
   bool AllowsLazyCompilation();
+  bool AllowsLazyCompilationWithoutContext();
 
   Handle<String> debug_name() const {
     if (name_->length() > 0) return name_;
@@ -2638,9 +2639,9 @@
 template<class Visitor>
 class AstNodeFactory BASE_EMBEDDED {
  public:
-  explicit AstNodeFactory(Isolate* isolate)
+  AstNodeFactory(Isolate* isolate, Zone* zone)
       : isolate_(isolate),
-        zone_(isolate_->zone()) { }
+        zone_(zone) { }
 
   Visitor* visitor() { return &visitor_; }
 
@@ -2710,10 +2711,9 @@
 
   Block* NewBlock(ZoneStringList* labels,
                   int capacity,
-                  bool is_initializer_block,
-                  Zone* zone) {
+                  bool is_initializer_block) {
     Block* block = new(zone_) Block(
-        isolate_, labels, capacity, is_initializer_block, zone);
+        isolate_, labels, capacity, is_initializer_block, zone_);
     VISIT_AND_RETURN(Block, block)
   }
 
diff --git a/src/collection.js b/src/collection.js
index 75fe3d5..9ca0aae 100644
--- a/src/collection.js
+++ b/src/collection.js
@@ -79,7 +79,12 @@
   if (IS_UNDEFINED(key)) {
     key = undefined_sentinel;
   }
-  return %SetDelete(this, key);
+  if (%SetHas(this, key)) {
+    %SetDelete(this, key);
+    return true;
+  } else {
+    return false;
+  }
 }
 
 
diff --git a/src/compiler.cc b/src/compiler.cc
index d44718b..f706c7d 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -51,7 +51,7 @@
 namespace internal {
 
 
-CompilationInfo::CompilationInfo(Handle<Script> script)
+CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
     : isolate_(script->GetIsolate()),
       flags_(LanguageModeField::encode(CLASSIC_MODE)),
       function_(NULL),
@@ -60,12 +60,14 @@
       script_(script),
       extension_(NULL),
       pre_parse_data_(NULL),
-      osr_ast_id_(AstNode::kNoNumber) {
+      osr_ast_id_(AstNode::kNoNumber),
+      zone_(zone) {
   Initialize(BASE);
 }
 
 
-CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
+CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
+                                 Zone* zone)
     : isolate_(shared_info->GetIsolate()),
       flags_(LanguageModeField::encode(CLASSIC_MODE) |
              IsLazy::encode(true)),
@@ -76,12 +78,13 @@
       script_(Handle<Script>(Script::cast(shared_info->script()))),
       extension_(NULL),
       pre_parse_data_(NULL),
-      osr_ast_id_(AstNode::kNoNumber) {
+      osr_ast_id_(AstNode::kNoNumber),
+      zone_(zone) {
   Initialize(BASE);
 }
 
 
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
+CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
     : isolate_(closure->GetIsolate()),
       flags_(LanguageModeField::encode(CLASSIC_MODE) |
              IsLazy::encode(true)),
@@ -93,7 +96,8 @@
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
       extension_(NULL),
       pre_parse_data_(NULL),
-      osr_ast_id_(AstNode::kNoNumber) {
+      osr_ast_id_(AstNode::kNoNumber),
+      zone_(zone) {
   Initialize(BASE);
 }
 
@@ -118,7 +122,7 @@
       FLAG_crankshaft &&
       !function()->flags()->Contains(kDontSelfOptimize) &&
       !function()->flags()->Contains(kDontOptimize) &&
-      function()->scope()->AllowsLazyRecompilation() &&
+      function()->scope()->AllowsLazyCompilation() &&
       (shared_info().is_null() || !shared_info()->optimization_disabled());
 }
 
@@ -137,9 +141,8 @@
 // all. However crankshaft support recompilation of functions, so in this case
 // the full compiler need not be be used if a debugger is attached, but only if
 // break points has actually been set.
-static bool is_debugging_active() {
+static bool IsDebuggerActive(Isolate* isolate) {
 #ifdef ENABLE_DEBUGGER_SUPPORT
-  Isolate* isolate = Isolate::Current();
   return V8::UseCrankshaft() ?
     isolate->debug()->has_break_points() :
     isolate->debugger()->IsDebuggerActive();
@@ -149,8 +152,8 @@
 }
 
 
-static bool AlwaysFullCompiler() {
-  return FLAG_always_full_compiler || is_debugging_active();
+static bool AlwaysFullCompiler(Isolate* isolate) {
+  return FLAG_always_full_compiler || IsDebuggerActive(isolate);
 }
 
 
@@ -205,7 +208,7 @@
   // Fall back to using the full code generator if it's not possible
   // to use the Hydrogen-based optimizing compiler. We already have
   // generated code for this from the shared function object.
-  if (AlwaysFullCompiler()) {
+  if (AlwaysFullCompiler(info->isolate())) {
     info->SetCode(code);
     return true;
   }
@@ -262,7 +265,7 @@
   bool should_recompile = !info->shared_info()->has_deoptimization_support();
   if (should_recompile || FLAG_hydrogen_stats) {
     HPhase phase(HPhase::kFullCodeGen);
-    CompilationInfo unoptimized(info->shared_info());
+    CompilationInfoWithZone unoptimized(info->shared_info());
     // Note that we use the same AST that we will use for generating the
     // optimized code.
     unoptimized.SetFunction(info->function());
@@ -295,8 +298,8 @@
 
   Handle<Context> global_context(info->closure()->context()->global_context());
   TypeFeedbackOracle oracle(code, global_context, info->isolate(),
-                            info->isolate()->zone());
-  HGraphBuilder builder(info, &oracle, info->isolate()->zone());
+                            info->zone());
+  HGraphBuilder builder(info, &oracle);
   HPhase phase(HPhase::kTotal);
   HGraph* graph = builder.CreateGraph();
   if (info->isolate()->has_pending_exception()) {
@@ -305,7 +308,7 @@
   }
 
   if (graph != NULL) {
-    Handle<Code> optimized_code = graph->Compile(info, graph->zone());
+    Handle<Code> optimized_code = graph->Compile();
     if (!optimized_code.is_null()) {
       info->SetCode(optimized_code);
       FinishOptimization(info->closure(), start);
@@ -348,7 +351,7 @@
   bool succeeded = MakeCode(info);
   if (!info->shared_info().is_null()) {
     Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(),
-                                                     info->isolate()->zone());
+                                                     info->zone());
     info->shared_info()->set_scope_info(*scope_info);
   }
   return succeeded;
@@ -358,7 +361,7 @@
 
 static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
-  ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+  ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
   PostponeInterruptsScope postpone(isolate);
 
   ASSERT(!isolate->global_context().is_null());
@@ -422,7 +425,7 @@
           lit->name(),
           lit->materialized_literal_count(),
           info->code(),
-          ScopeInfo::Create(info->scope(), info->isolate()->zone()));
+          ScopeInfo::Create(info->scope(), info->zone()));
 
   ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
   Compiler::SetFunctionInfo(result, lit, true, script);
@@ -464,7 +467,7 @@
       script, Debugger::NO_AFTER_COMPILE_FLAGS);
 #endif
 
-  live_edit_tracker.RecordFunctionInfo(result, lit, isolate->zone());
+  live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
 
   return result;
 }
@@ -522,7 +525,7 @@
                                            : *script_data);
 
     // Compile the function and add it to the cache.
-    CompilationInfo info(script);
+    CompilationInfoWithZone info(script);
     info.MarkAsGlobal();
     info.SetExtension(extension);
     info.SetPreParseData(pre_data);
@@ -570,7 +573,7 @@
   if (result.is_null()) {
     // Create a script object describing the script to be compiled.
     Handle<Script> script = isolate->factory()->NewScript(source);
-    CompilationInfo info(script);
+    CompilationInfoWithZone info(script);
     info.MarkAsEval();
     if (is_global) info.MarkAsGlobal();
     info.SetLanguageMode(language_mode);
@@ -605,7 +608,7 @@
 bool Compiler::CompileLazy(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
 
-  ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+  ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
 
   // The VM is in the COMPILER state until exiting this function.
   VMState state(isolate, COMPILER);
@@ -616,6 +619,25 @@
   int compiled_size = shared->end_position() - shared->start_position();
   isolate->counters()->total_compile_size()->Increment(compiled_size);
 
+  if (FLAG_cache_optimized_code && info->IsOptimizing()) {
+    Handle<JSFunction> function = info->closure();
+    ASSERT(!function.is_null());
+    Handle<Context> global_context(function->context()->global_context());
+    int index = function->shared()->SearchOptimizedCodeMap(*global_context);
+    if (index > 0) {
+      if (FLAG_trace_opt) {
+        PrintF("  [Found optimized code for");
+        function->PrintName();
+        PrintF("\n");
+      }
+      Code* code = Code::cast(
+          FixedArray::cast(shared->optimized_code_map())->get(index));
+      ASSERT(code != NULL);
+      function->ReplaceCode(code);
+      return true;
+    }
+  }
+
   // Generate the AST for the lazily compiled function.
   if (ParserApi::Parse(info, kNoParsingFlags)) {
     // Measure how long it takes to do the lazy compilation; only take the
@@ -647,6 +669,26 @@
       if (info->IsOptimizing()) {
         ASSERT(shared->scope_info() != ScopeInfo::Empty());
         function->ReplaceCode(*code);
+        if (FLAG_cache_optimized_code &&
+            code->kind() == Code::OPTIMIZED_FUNCTION) {
+          Handle<SharedFunctionInfo> shared(function->shared());
+          Handle<Context> global_context(function->context()->global_context());
+
+          // Create literals array that will be shared for this global context.
+          int number_of_literals = shared->num_literals();
+          Handle<FixedArray> literals =
+              isolate->factory()->NewFixedArray(number_of_literals);
+          if (number_of_literals > 0) {
+            // Store the object, regexp and array functions in the literals
+            // array prefix.  These functions will be used when creating
+            // object, regexp and array literals in this function.
+            literals->set(JSFunction::kLiteralGlobalContextIndex,
+                          function->context()->global_context());
+          }
+
+          SharedFunctionInfo::AddToOptimizedCodeMap(
+              shared, global_context, code, literals);
+        }
       } else {
         // Update the shared function info with the compiled code and the
         // scope info.  Please note, that the order of the shared function
@@ -654,7 +696,7 @@
         // trigger a GC, causing the ASSERT below to be invalid if the code
         // was flushed. By setting the code object last we avoid this.
         Handle<ScopeInfo> scope_info =
-            ScopeInfo::Create(info->scope(), info->isolate()->zone());
+            ScopeInfo::Create(info->scope(), info->zone());
         shared->set_scope_info(*scope_info);
         shared->set_code(*code);
         if (!function.is_null()) {
@@ -689,7 +731,7 @@
           // active as it makes no sense to compile optimized code then.
           if (FLAG_always_opt &&
               !Isolate::Current()->DebuggerHasBreakPoints()) {
-            CompilationInfo optimized(function);
+            CompilationInfoWithZone optimized(function);
             optimized.SetOptimizing(AstNode::kNoNumber);
             return CompileLazy(&optimized);
           }
@@ -708,7 +750,7 @@
 Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
                                                        Handle<Script> script) {
   // Precondition: code has been parsed and scopes have been analyzed.
-  CompilationInfo info(script);
+  CompilationInfoWithZone info(script);
   info.SetFunction(literal);
   info.SetScope(literal->scope());
   info.SetLanguageMode(literal->scope()->language_mode());
@@ -719,8 +761,14 @@
   // builtins cannot be handled lazily by the parser, since we have to know
   // if a function uses the special natives syntax, which is something the
   // parser records.
+  // If the debugger requests compilation for break points, we cannot be
+  // aggressive about lazy compilation, because it might trigger compilation
+  // of functions without an outer context when setting a breakpoint through
+  // Runtime::FindSharedFunctionInfoInScript.
+  bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
   bool allow_lazy = literal->AllowsLazyCompilation() &&
-      !LiveEditFunctionTracker::IsActive(info.isolate());
+      !LiveEditFunctionTracker::IsActive(info.isolate()) &&
+      (!info.isolate()->DebuggerHasBreakPoints() || allow_lazy_without_ctx);
 
   Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
 
@@ -731,7 +779,7 @@
   } else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
              (!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
     ASSERT(!info.code().is_null());
-    scope_info = ScopeInfo::Create(info.scope(), info.isolate()->zone());
+    scope_info = ScopeInfo::Create(info.scope(), info.zone());
   } else {
     return Handle<SharedFunctionInfo>::null();
   }
@@ -745,12 +793,13 @@
   SetFunctionInfo(result, literal, false, script);
   RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
   result->set_allows_lazy_compilation(allow_lazy);
+  result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
 
   // Set the expected number of properties for instances and return
   // the resulting function.
   SetExpectedNofPropertiesFromEstimate(result,
                                        literal->expected_property_count());
-  live_edit_tracker.RecordFunctionInfo(result, literal, info.isolate()->zone());
+  live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
   return result;
 }
 
@@ -777,6 +826,8 @@
       lit->has_only_simple_this_property_assignments(),
       *lit->this_property_assignments());
   function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
+  function_info->set_allows_lazy_compilation_without_context(
+      lit->AllowsLazyCompilationWithoutContext());
   function_info->set_language_mode(lit->language_mode());
   function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
   function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
diff --git a/src/compiler.h b/src/compiler.h
index 44df9e0..26c0ac4 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -41,14 +41,17 @@
 // is constructed based on the resources available at compile-time.
 class CompilationInfo BASE_EMBEDDED {
  public:
-  explicit CompilationInfo(Handle<Script> script);
-  explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info);
-  explicit CompilationInfo(Handle<JSFunction> closure);
+  CompilationInfo(Handle<Script> script, Zone* zone);
+  CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
+  CompilationInfo(Handle<JSFunction> closure, Zone* zone);
 
   Isolate* isolate() {
     ASSERT(Isolate::Current() == isolate_);
     return isolate_;
   }
+  Zone* zone() {
+    return zone_;
+  }
   bool is_lazy() const { return IsLazy::decode(flags_); }
   bool is_eval() const { return IsEval::decode(flags_); }
   bool is_global() const { return IsGlobal::decode(flags_); }
@@ -184,8 +187,6 @@
     NONOPT
   };
 
-  CompilationInfo() : function_(NULL) {}
-
   void Initialize(Mode mode) {
     mode_ = V8::UseCrankshaft() ? mode : NONOPT;
     ASSERT(!script_.is_null());
@@ -254,10 +255,37 @@
   Mode mode_;
   int osr_ast_id_;
 
+  // The zone from which the compilation pipeline working on this
+  // CompilationInfo allocates.
+  Zone* zone_;
+
   DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
 };
 
 
+// Exactly like a CompilationInfo, except also creates and enters a
+// Zone on construction and deallocates it on exit.
+class CompilationInfoWithZone: public CompilationInfo {
+ public:
+  explicit CompilationInfoWithZone(Handle<Script> script)
+      : CompilationInfo(script, &zone_),
+        zone_(script->GetIsolate()),
+        zone_scope_(&zone_, DELETE_ON_EXIT) {}
+  explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
+      : CompilationInfo(shared_info, &zone_),
+        zone_(shared_info->GetIsolate()),
+        zone_scope_(&zone_, DELETE_ON_EXIT) {}
+  explicit CompilationInfoWithZone(Handle<JSFunction> closure)
+      : CompilationInfo(closure, &zone_),
+        zone_(closure->GetIsolate()),
+        zone_scope_(&zone_, DELETE_ON_EXIT) {}
+
+ private:
+  Zone zone_;
+  ZoneScope zone_scope_;
+};
+
+
 // The V8 compiler
 //
 // General strategy: Source code is translated into an anonymous function w/o
diff --git a/src/d8.cc b/src/d8.cc
index 7a01d55..adfe667 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -284,9 +284,9 @@
   return Undefined();
 }
 
-static size_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
-  if (value_in->IsUint32()) {
-    return value_in->Uint32Value();
+static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
+  if (value_in->IsInt32()) {
+    return value_in->Int32Value();
   }
 
   Local<Value> number = value_in->ToNumber();
@@ -312,7 +312,7 @@
     ThrowException(
         String::New("Array length exceeds maximum length."));
   }
-  return static_cast<size_t>(raw_value);
+  return raw_value;
 }
 
 
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 91838e8..d1bafb4 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -1449,6 +1449,8 @@
         this.profileRequest_(request, response);
       } else if (request.command == 'changelive') {
         this.changeLiveRequest_(request, response);
+      } else if (request.command == 'restartframe') {
+        this.restartFrameRequest_(request, response);
       } else if (request.command == 'flags') {
         this.debuggerFlagsRequest_(request, response);
       } else if (request.command == 'v8flags') {
@@ -2358,9 +2360,6 @@
 
 DebugCommandProcessor.prototype.changeLiveRequest_ = function(
     request, response) {
-  if (!Debug.LiveEdit) {
-    return response.failed('LiveEdit feature is not supported');
-  }
   if (!request.arguments) {
     return response.failed('Missing arguments');
   }
@@ -2398,6 +2397,37 @@
 };
 
 
+DebugCommandProcessor.prototype.restartFrameRequest_ = function(
+    request, response) {
+  if (!request.arguments) {
+    return response.failed('Missing arguments');
+  }
+  var frame = request.arguments.frame;
+
+  // No frames to evaluate in frame.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No frames');
+  }
+
+  var frame_mirror;
+  // Check whether a frame was specified.
+  if (!IS_UNDEFINED(frame)) {
+    var frame_number = %ToNumber(frame);
+    if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
+      return response.failed('Invalid frame "' + frame + '"');
+    }
+    // Restart specified frame.
+    frame_mirror = this.exec_state_.frame(frame_number);
+  } else {
+    // Restart selected frame.
+    frame_mirror = this.exec_state_.frame();
+  }
+
+  var result_description = Debug.LiveEdit.RestartFrame(frame_mirror);
+  response.body = {result: result_description};
+};
+
+
 DebugCommandProcessor.prototype.debuggerFlagsRequest_ = function(request,
                                                                  response) {
   // Check for legal request.
diff --git a/src/debug.cc b/src/debug.cc
index 543ce9f..67d47c0 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1170,14 +1170,16 @@
 }
 
 
-void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
+void Debug::SetBreakPoint(Handle<JSFunction> function,
                           Handle<Object> break_point_object,
                           int* source_position) {
   HandleScope scope(isolate_);
 
   PrepareForBreakPoints();
 
-  if (!EnsureDebugInfo(shared)) {
+  // Make sure the function is compiled and has set up the debug info.
+  Handle<SharedFunctionInfo> shared(function->shared());
+  if (!EnsureDebugInfo(shared, function)) {
     // Return if retrieving debug info failed.
     return;
   }
@@ -1198,6 +1200,51 @@
 }
 
 
+bool Debug::SetBreakPointForScript(Handle<Script> script,
+                                   Handle<Object> break_point_object,
+                                   int* source_position) {
+  HandleScope scope(isolate_);
+
+  // No need to call PrepareForBreakPoints because it will be called
+  // implicitly by Runtime::FindSharedFunctionInfoInScript.
+  Object* result = Runtime::FindSharedFunctionInfoInScript(isolate_,
+                                                           script,
+                                                           *source_position);
+  if (result->IsUndefined()) return false;
+
+  // Make sure the function has set up the debug info.
+  Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
+  if (!EnsureDebugInfo(shared, Handle<JSFunction>::null())) {
+    // Return if retrieving debug info failed.
+    return false;
+  }
+
+  // Find position within function. The script position might be before the
+  // source position of the first function.
+  int position;
+  if (shared->start_position() > *source_position) {
+    position = 0;
+  } else {
+    position = *source_position - shared->start_position();
+  }
+
+  Handle<DebugInfo> debug_info = GetDebugInfo(shared);
+  // Source positions starts with zero.
+  ASSERT(position >= 0);
+
+  // Find the break point and change it.
+  BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
+  it.FindBreakLocationFromPosition(position);
+  it.SetBreakPoint(break_point_object);
+
+  *source_position = it.position() + shared->start_position();
+
+  // At least one active break point now.
+  ASSERT(debug_info->GetBreakPointCount() > 0);
+  return true;
+}
+
+
 void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
   HandleScope scope(isolate_);
 
@@ -1249,10 +1296,12 @@
 }
 
 
-void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
+void Debug::FloodWithOneShot(Handle<JSFunction> function) {
   PrepareForBreakPoints();
-  // Make sure the function has set up the debug info.
-  if (!EnsureDebugInfo(shared)) {
+
+  // Make sure the function is compiled and has set up the debug info.
+  Handle<SharedFunctionInfo> shared(function->shared());
+  if (!EnsureDebugInfo(shared, function)) {
     // Return if we failed to retrieve the debug info.
     return;
   }
@@ -1272,8 +1321,8 @@
 
   if (!bindee.is_null() && bindee->IsJSFunction() &&
       !JSFunction::cast(*bindee)->IsBuiltin()) {
-    Handle<SharedFunctionInfo> shared_info(JSFunction::cast(*bindee)->shared());
-    Debug::FloodWithOneShot(shared_info);
+    Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
+    Debug::FloodWithOneShot(bindee_function);
   }
 }
 
@@ -1288,11 +1337,9 @@
   for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
     JavaScriptFrame* frame = it.frame();
     if (frame->HasHandler()) {
-      Handle<SharedFunctionInfo> shared =
-          Handle<SharedFunctionInfo>(
-              JSFunction::cast(frame->function())->shared());
       // Flood the function with the catch block with break points
-      FloodWithOneShot(shared);
+      JSFunction* function = JSFunction::cast(frame->function());
+      FloodWithOneShot(Handle<JSFunction>(function));
       return;
     }
   }
@@ -1359,14 +1406,14 @@
     frames_it.Advance();
     // Fill the function to return to with one-shot break points.
     JSFunction* function = JSFunction::cast(frames_it.frame()->function());
-    FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+    FloodWithOneShot(Handle<JSFunction>(function));
     return;
   }
 
   // Get the debug info (create it if it does not exist).
-  Handle<SharedFunctionInfo> shared =
-      Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
-  if (!EnsureDebugInfo(shared)) {
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<SharedFunctionInfo> shared(function->shared());
+  if (!EnsureDebugInfo(shared, function)) {
     // Return if ensuring debug info failed.
     return;
   }
@@ -1436,7 +1483,7 @@
     if (!frames_it.done()) {
       // Fill the function to return to with one-shot break points.
       JSFunction* function = JSFunction::cast(frames_it.frame()->function());
-      FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+      FloodWithOneShot(Handle<JSFunction>(function));
       // Set target frame pointer.
       ActivateStepOut(frames_it.frame());
     }
@@ -1446,7 +1493,7 @@
     // Step next or step min.
 
     // Fill the current function with one-shot break points.
-    FloodWithOneShot(shared);
+    FloodWithOneShot(function);
 
     // Remember source position and frame to handle step next.
     thread_local_.last_statement_position_ =
@@ -1458,9 +1505,7 @@
     if (is_at_restarted_function) {
       Handle<JSFunction> restarted_function(
           JSFunction::cast(*thread_local_.restarter_frame_function_pointer_));
-      Handle<SharedFunctionInfo> restarted_shared(
-          restarted_function->shared());
-      FloodWithOneShot(restarted_shared);
+      FloodWithOneShot(restarted_function);
     } else if (!call_function_stub.is_null()) {
       // If it's CallFunction stub ensure target function is compiled and flood
       // it with one shot breakpoints.
@@ -1502,7 +1547,7 @@
         } else if (!js_function->IsBuiltin()) {
           // Don't step into builtins.
           // It will also compile target function if it's not compiled yet.
-          FloodWithOneShot(Handle<SharedFunctionInfo>(js_function->shared()));
+          FloodWithOneShot(js_function);
         }
       }
     }
@@ -1511,7 +1556,7 @@
     // a call target as the function called might be a native function for
     // which step in will not stop. It also prepares for stepping in
     // getters/setters.
-    FloodWithOneShot(shared);
+    FloodWithOneShot(function);
 
     if (is_load_or_store) {
       // Remember source position and frame to handle step in getter/setter. If
@@ -1711,12 +1756,11 @@
         // function.
         if (!holder.is_null() && holder->IsJSFunction() &&
             !JSFunction::cast(*holder)->IsBuiltin()) {
-          Handle<SharedFunctionInfo> shared_info(
-              JSFunction::cast(*holder)->shared());
-          Debug::FloodWithOneShot(shared_info);
+          Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
+          Debug::FloodWithOneShot(js_function);
         }
       } else {
-        Debug::FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
+        Debug::FloodWithOneShot(function);
       }
     }
   }
@@ -1796,7 +1840,7 @@
                                         Handle<Code> current_code) {
   ASSERT(!current_code->has_debug_break_slots());
 
-  CompilationInfo info(function);
+  CompilationInfoWithZone info(function);
   info.MarkCompilingForDebugging(current_code);
   ASSERT(!info.shared_info()->is_compiled());
   ASSERT(!info.isolate()->has_pending_exception());
@@ -1868,29 +1912,48 @@
       continue;
     }
 
-    intptr_t delta = frame->pc() - frame_code->instruction_start();
-    int debug_break_slot_count = 0;
-    int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
+    // Iterate over the RelocInfo in the original code to compute the sum of the
+    // constant pools sizes. (See Assembler::CheckConstPool())
+    // Note that this is only useful for architectures using constant pools.
+    int constpool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL);
+    int frame_const_pool_size = 0;
+    for (RelocIterator it(*frame_code, constpool_mask); !it.done(); it.next()) {
+      RelocInfo* info = it.rinfo();
+      if (info->pc() >= frame->pc()) break;
+      frame_const_pool_size += static_cast<int>(info->data());
+    }
+    intptr_t frame_offset =
+      frame->pc() - frame_code->instruction_start() - frame_const_pool_size;
+
+    // Iterate over the RelocInfo for new code to find the number of bytes
+    // generated for debug slots and constant pools.
+    int debug_break_slot_bytes = 0;
+    int new_code_const_pool_size = 0;
+    int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+               RelocInfo::ModeMask(RelocInfo::CONST_POOL);
     for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
       // Check if the pc in the new code with debug break
       // slots is before this slot.
       RelocInfo* info = it.rinfo();
-      int debug_break_slot_bytes =
-          debug_break_slot_count * Assembler::kDebugBreakSlotLength;
-      intptr_t new_delta =
-          info->pc() -
-          new_code->instruction_start() -
-          debug_break_slot_bytes;
-      if (new_delta > delta) {
+      intptr_t new_offset = info->pc() - new_code->instruction_start() -
+                            new_code_const_pool_size - debug_break_slot_bytes;
+      if (new_offset >= frame_offset) {
         break;
       }
 
-      // Passed a debug break slot in the full code with debug
-      // break slots.
-      debug_break_slot_count++;
+      if (RelocInfo::IsDebugBreakSlot(info->rmode())) {
+        debug_break_slot_bytes += Assembler::kDebugBreakSlotLength;
+      } else {
+        ASSERT(RelocInfo::IsConstPool(info->rmode()));
+        // The size of the constant pool is encoded in the data.
+        new_code_const_pool_size += static_cast<int>(info->data());
+      }
     }
-    int debug_break_slot_bytes =
-        debug_break_slot_count * Assembler::kDebugBreakSlotLength;
+
+    // Compute the equivalent pc in the new code.
+    byte* new_pc = new_code->instruction_start() + frame_offset +
+                   debug_break_slot_bytes + new_code_const_pool_size;
+
     if (FLAG_trace_deopt) {
       PrintF("Replacing code %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
              "with %08" V8PRIxPTR " - %08" V8PRIxPTR " (%d) "
@@ -1907,14 +1970,12 @@
              new_code->instruction_size(),
              new_code->instruction_size(),
              reinterpret_cast<intptr_t>(frame->pc()),
-             reinterpret_cast<intptr_t>(new_code->instruction_start()) +
-             delta + debug_break_slot_bytes);
+             reinterpret_cast<intptr_t>(new_pc));
     }
 
     // Patch the return address to return into the code with
     // debug break slots.
-    frame->set_pc(
-        new_code->instruction_start() + delta + debug_break_slot_bytes);
+    frame->set_pc(new_pc);
   }
 }
 
@@ -1956,6 +2017,9 @@
     Handle<Code> lazy_compile =
         Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile));
 
+    // There will be at least one break point when we are done.
+    has_break_points_ = true;
+
     // Keep the list of activated functions in a handlified list as it
     // is used both in GC and non-GC code.
     List<Handle<JSFunction> > active_functions(100);
@@ -2033,7 +2097,6 @@
         // Try to compile the full code with debug break slots. If it
         // fails just keep the current code.
         Handle<Code> current_code(function->shared()->code());
-        ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
         shared->set_code(*lazy_compile);
         bool prev_force_debugger_active =
             isolate_->debugger()->force_debugger_active();
@@ -2063,15 +2126,20 @@
 
 
 // Ensures the debug information is present for shared.
-bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
+bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
+                            Handle<JSFunction> function) {
   // Return if we already have the debug info for shared.
   if (HasDebugInfo(shared)) {
     ASSERT(shared->is_compiled());
     return true;
   }
 
-  // Ensure shared in compiled. Return false if this failed.
-  if (!SharedFunctionInfo::EnsureCompiled(shared, CLEAR_EXCEPTION)) {
+  // There will be at least one break point when we are done.
+  has_break_points_ = true;
+
+  // Ensure function is compiled. Return false if this failed.
+  if (!function.is_null() &&
+      !JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION)) {
     return false;
   }
 
@@ -2083,9 +2151,6 @@
   node->set_next(debug_info_list_);
   debug_info_list_ = node;
 
-  // Now there is at least one break point.
-  has_break_points_ = true;
-
   return true;
 }
 
@@ -2127,9 +2192,9 @@
   PrepareForBreakPoints();
 
   // Get the executing function in which the debug break occurred.
-  Handle<SharedFunctionInfo> shared =
-      Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
-  if (!EnsureDebugInfo(shared)) {
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<SharedFunctionInfo> shared(function->shared());
+  if (!EnsureDebugInfo(shared, function)) {
     // Return if we failed to retrieve the debug info.
     return;
   }
@@ -2219,9 +2284,9 @@
   PrepareForBreakPoints();
 
   // Get the executing function in which the debug break occurred.
-  Handle<SharedFunctionInfo> shared =
-      Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared());
-  if (!EnsureDebugInfo(shared)) {
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<SharedFunctionInfo> shared(function->shared());
+  if (!EnsureDebugInfo(shared, function)) {
     // Return if we failed to retrieve the debug info.
     return false;
   }
diff --git a/src/debug.h b/src/debug.h
index d9c966c..c7c4b55 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -239,12 +239,15 @@
                                         int count,
                                         int end));
   Object* Break(Arguments args);
-  void SetBreakPoint(Handle<SharedFunctionInfo> shared,
+  void SetBreakPoint(Handle<JSFunction> function,
                      Handle<Object> break_point_object,
                      int* source_position);
+  bool SetBreakPointForScript(Handle<Script> script,
+                              Handle<Object> break_point_object,
+                              int* source_position);
   void ClearBreakPoint(Handle<Object> break_point_object);
   void ClearAllBreakPoints();
-  void FloodWithOneShot(Handle<SharedFunctionInfo> shared);
+  void FloodWithOneShot(Handle<JSFunction> function);
   void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
   void FloodHandlerWithOneShot();
   void ChangeBreakOnException(ExceptionBreakType type, bool enable);
@@ -260,8 +263,11 @@
 
   void PrepareForBreakPoints();
 
-  // Returns whether the operation succeeded.
-  bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
+  // Returns whether the operation succeeded. Compilation can only be triggered
+  // if a valid closure is passed as the second argument, otherwise the shared
+  // function needs to be compiled already.
+  bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
+                       Handle<JSFunction> function);
 
   // Returns true if the current stub call is patched to call the debugger.
   static bool IsDebugBreak(Address addr);
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 3debf55..f6eafd8 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -268,20 +268,29 @@
 
 void Deoptimizer::VisitAllOptimizedFunctionsForContext(
     Context* context, OptimizedFunctionVisitor* visitor) {
+  Isolate* isolate = context->GetIsolate();
+  ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
   AssertNoAllocation no_allocation;
 
   ASSERT(context->IsGlobalContext());
 
   visitor->EnterContext(context);
-  // Run through the list of optimized functions and deoptimize them.
+
+  // Create a snapshot of the optimized functions list. This is needed because
+  // visitors might remove more than one link from the list at once.
+  ZoneList<JSFunction*> snapshot(1, isolate->runtime_zone());
   Object* element = context->OptimizedFunctionsListHead();
   while (!element->IsUndefined()) {
     JSFunction* element_function = JSFunction::cast(element);
-    // Get the next link before deoptimizing as deoptimizing will clear the
-    // next link.
+    snapshot.Add(element_function, isolate->runtime_zone());
     element = element_function->next_function_link();
-    visitor->VisitFunction(element_function);
   }
+
+  // Run through the snapshot of optimized functions and visit them.
+  for (int i = 0; i < snapshot.length(); ++i) {
+    visitor->VisitFunction(snapshot.at(i));
+  }
+
   visitor->LeaveContext(context);
 }
 
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 9e8a549..120f9de 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -608,6 +608,9 @@
   static const char* StringFor(Opcode opcode);
 #endif
 
+  // A literal id which refers to the JSFunction itself.
+  static const int kSelfLiteralId = -239;
+
  private:
   TranslationBuffer* buffer_;
   int index_;
diff --git a/src/factory.cc b/src/factory.cc
index 28b318a..682125e 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -554,18 +554,44 @@
   }
 
   result->set_context(*context);
+
+  int index = FLAG_cache_optimized_code
+      ? function_info->SearchOptimizedCodeMap(context->global_context())
+      : -1;
   if (!function_info->bound()) {
-    int number_of_literals = function_info->num_literals();
-    Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
-    if (number_of_literals > 0) {
-      // Store the object, regexp and array functions in the literals
-      // array prefix.  These functions will be used when creating
-      // object, regexp and array literals in this function.
-      literals->set(JSFunction::kLiteralGlobalContextIndex,
-                    context->global_context());
+    if (index > 0) {
+      FixedArray* code_map =
+          FixedArray::cast(function_info->optimized_code_map());
+      FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
+      ASSERT(cached_literals != NULL);
+      ASSERT(function_info->num_literals() == 0 ||
+             (code_map->get(index - 1) ==
+              cached_literals->get(JSFunction::kLiteralGlobalContextIndex)));
+      result->set_literals(cached_literals);
+    } else {
+      int number_of_literals = function_info->num_literals();
+      Handle<FixedArray> literals =
+          NewFixedArray(number_of_literals, pretenure);
+      if (number_of_literals > 0) {
+        // Store the object, regexp and array functions in the literals
+        // array prefix.  These functions will be used when creating
+        // object, regexp and array literals in this function.
+        literals->set(JSFunction::kLiteralGlobalContextIndex,
+                      context->global_context());
+      }
+      result->set_literals(*literals);
     }
-    result->set_literals(*literals);
   }
+
+  if (index > 0) {
+    // Caching of optimized code enabled and optimized code found.
+    Code* code = Code::cast(
+        FixedArray::cast(function_info->optimized_code_map())->get(index));
+    ASSERT(code != NULL);
+    result->ReplaceCode(code);
+    return result;
+  }
+
   if (V8::UseCrankshaft() &&
       FLAG_always_opt &&
       result->is_compiled() &&
@@ -699,7 +725,7 @@
         MaybeObject* maybe_arg = args->GetElement(i);
         Handle<String> arg_str(reinterpret_cast<String*>(maybe_arg));
         const char* arg = *arg_str->ToCString();
-        Vector<char> v2(p, space);
+        Vector<char> v2(p, static_cast<int>(space));
         OS::StrNCpy(v2, arg, space);
         space -= Min(space, strlen(arg));
         p = &buffer[kBufferSize] - space;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 2b4c53c..125d151 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -206,6 +206,11 @@
 DEFINE_bool(trace_osr, false, "trace on-stack replacement")
 DEFINE_int(stress_runs, 0, "number of stress runs")
 DEFINE_bool(optimize_closures, true, "optimize closures")
+DEFINE_bool(lookup_sample_by_shared, true,
+            "when picking a function to optimize, watch for shared function "
+            "info, not JSFunction itself")
+DEFINE_bool(cache_optimized_code, true,
+            "cache optimized code for closures")
 DEFINE_bool(inline_construct, true, "inline constructor calls")
 DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
 DEFINE_int(loop_weight, 1, "loop weight for representation inference")
@@ -446,6 +451,10 @@
               "file in which to serialize heap")
 #endif
 
+// mksnapshot.cc
+DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
+                  " the snapshot (mksnapshot only)")
+
 //
 // Dev shell flags
 //
@@ -605,6 +614,7 @@
 DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
 DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
 
+
 //
 // Disassembler only flags
 //
diff --git a/src/flags.cc b/src/flags.cc
index 5720cbd..14c230a 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -343,6 +343,7 @@
 int FlagList::SetFlagsFromCommandLine(int* argc,
                                       char** argv,
                                       bool remove_flags) {
+  int return_code = 0;
   // parse arguments
   for (int i = 1; i < *argc;) {
     int j = i;  // j > 0
@@ -368,7 +369,8 @@
         } else {
           fprintf(stderr, "Error: unrecognized flag %s\n"
                   "Try --help for options\n", arg);
-          return j;
+          return_code = j;
+          break;
         }
       }
 
@@ -382,7 +384,8 @@
           fprintf(stderr, "Error: missing value for flag %s of type %s\n"
                   "Try --help for options\n",
                   arg, Type2String(flag->type()));
-          return j;
+          return_code = j;
+          break;
         }
       }
 
@@ -424,7 +427,8 @@
         fprintf(stderr, "Error: illegal value for flag %s of type %s\n"
                 "Try --help for options\n",
                 arg, Type2String(flag->type()));
-        return j;
+        return_code = j;
+        break;
       }
 
       // remove the flag & value from the command
@@ -451,7 +455,7 @@
     exit(0);
   }
   // parsed all flags successfully
-  return 0;
+  return return_code;
 }
 
 
diff --git a/src/frames.cc b/src/frames.cc
index b7e0286..c801123 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -832,12 +832,23 @@
 }
 
 
+JSFunction* OptimizedFrame::LiteralAt(FixedArray* literal_array,
+                                      int literal_id) {
+  if (literal_id == Translation::kSelfLiteralId) {
+    return JSFunction::cast(function());
+  }
+
+  return JSFunction::cast(literal_array->get(literal_id));
+}
+
+
 void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
   ASSERT(frames->length() == 0);
   ASSERT(is_optimized());
 
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+  FixedArray* literal_array = data->LiteralArray();
 
   // BUG(3243555): Since we don't have a lazy-deopt registered at
   // throw-statements, we can't use the translation at the call-site of
@@ -865,10 +876,8 @@
     if (opcode == Translation::JS_FRAME) {
       i--;
       int ast_id = it.Next();
-      int function_id = it.Next();
+      JSFunction* function = LiteralAt(literal_array, it.Next());
       it.Next();  // Skip height.
-      JSFunction* function =
-          JSFunction::cast(data->LiteralArray()->get(function_id));
 
       // The translation commands are ordered and the receiver is always
       // at the first position. Since we are always at a call when we need
@@ -975,6 +984,7 @@
 
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
+  FixedArray* literal_array = data->LiteralArray();
 
   TranslationIterator it(data->TranslationByteArray(),
                          data->TranslationIndex(deopt_index)->value());
@@ -990,10 +1000,8 @@
     if (opcode == Translation::JS_FRAME) {
       jsframe_count--;
       it.Next();  // Skip ast id.
-      int function_id = it.Next();
+      JSFunction* function = LiteralAt(literal_array, it.Next());
       it.Next();  // Skip height.
-      JSFunction* function =
-          JSFunction::cast(data->LiteralArray()->get(function_id));
       functions->Add(function);
     } else {
       // Skip over operands to advance to the next opcode.
diff --git a/src/frames.h b/src/frames.h
index 2d45932..30f7e1f 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -577,6 +577,8 @@
   inline explicit OptimizedFrame(StackFrameIterator* iterator);
 
  private:
+  JSFunction* LiteralAt(FixedArray* literal_array, int literal_id);
+
   friend class StackFrameIterator;
 };
 
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 4da4e53..e286b80 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -303,7 +303,7 @@
   masm.positions_recorder()->StartGDBJITLineInfoRecording();
 #endif
 
-  FullCodeGenerator cgen(&masm, info, isolate->zone());
+  FullCodeGenerator cgen(&masm, info);
   cgen.Generate();
   if (cgen.HasStackOverflow()) {
     ASSERT(!isolate->has_pending_exception());
@@ -315,7 +315,7 @@
   Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
   code->set_optimizable(info->IsOptimizable() &&
                         !info->function()->flags()->Contains(kDontOptimize) &&
-                        info->function()->scope()->AllowsLazyRecompilation());
+                        info->function()->scope()->AllowsLazyCompilation());
   cgen.PopulateDeoptimizationData(code);
   cgen.PopulateTypeFeedbackInfo(code);
   cgen.PopulateTypeFeedbackCells(code);
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 928de47..a07df91 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -77,8 +77,7 @@
     TOS_REG
   };
 
-  FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info,
-                    Zone* zone)
+  FullCodeGenerator(MacroAssembler* masm, CompilationInfo* info)
       : masm_(masm),
         info_(info),
         scope_(info->scope()),
@@ -87,12 +86,14 @@
         globals_(NULL),
         context_(NULL),
         bailout_entries_(info->HasDeoptimizationSupport()
-                         ? info->function()->ast_node_count() : 0, zone),
-        stack_checks_(2, zone),  // There's always at least one.
+                         ? info->function()->ast_node_count() : 0,
+                         info->zone()),
+        stack_checks_(2, info->zone()),  // There's always at least one.
         type_feedback_cells_(info->HasDeoptimizationSupport()
-                             ? info->function()->ast_node_count() : 0, zone),
+                             ? info->function()->ast_node_count() : 0,
+                             info->zone()),
         ic_total_count_(0),
-        zone_(zone) { }
+        zone_(info->zone()) { }
 
   static bool MakeCode(CompilationInfo* info);
 
diff --git a/src/heap.cc b/src/heap.cc
index 172405b..df8dde6 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -118,8 +118,8 @@
       debug_utils_(NULL),
 #endif  // DEBUG
       new_space_high_promotion_mode_active_(false),
-      old_gen_promotion_limit_(kMinimumPromotionLimit),
-      old_gen_allocation_limit_(kMinimumAllocationLimit),
+      old_gen_promotion_limit_(kMinPromotionLimit),
+      old_gen_allocation_limit_(kMinAllocationLimit),
       old_gen_limit_factor_(1),
       size_of_old_gen_at_last_old_space_gc_(0),
       external_allocation_limit_(0),
@@ -829,9 +829,9 @@
     }
 
     old_gen_promotion_limit_ =
-        OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
+        OldGenLimit(size_of_old_gen_at_last_old_space_gc_, kMinPromotionLimit);
     old_gen_allocation_limit_ =
-        OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
+        OldGenLimit(size_of_old_gen_at_last_old_space_gc_, kMinAllocationLimit);
 
     old_gen_exhausted_ = false;
   } else {
@@ -2822,7 +2822,7 @@
   // The idea is to have a small number string cache in the snapshot to keep
   // boot-time memory usage down.  If we expand the number string cache already
   // while creating the snapshot then that didn't work out.
-  ASSERT(!Serializer::enabled());
+  ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
   MaybeObject* maybe_obj =
       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
   Object* new_cache;
@@ -3010,6 +3010,7 @@
   share->set_name(name);
   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
   share->set_code(illegal);
+  share->ClearOptimizedCodeMap();
   share->set_scope_info(ScopeInfo::Empty());
   Code* construct_stub =
       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
@@ -6635,7 +6636,7 @@
   ASSERT((search_target_ == kAnyGlobalObject) ||
          search_target_->IsHeapObject());
   found_target_in_trace_ = false;
-  object_stack_.Clear();
+  Reset();
 
   MarkVisitor mark_visitor(this);
   MarkRecursively(root, &mark_visitor);
@@ -6739,11 +6740,7 @@
     for (int i = 0; i < object_stack_.length(); i++) {
       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
       Object* obj = object_stack_[i];
-#ifdef OBJECT_PRINT
       obj->Print();
-#else
-      obj->ShortPrint();
-#endif
     }
     PrintF("=====================================\n");
   }
diff --git a/src/heap.h b/src/heap.h
index dd1f710..93978ea 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1359,24 +1359,13 @@
     return max_old_generation_size_ - PromotedTotalSize();
   }
 
-  static const intptr_t kMinimumPromotionLimit = 5 * Page::kPageSize;
-  static const intptr_t kMinimumAllocationLimit =
+  static const intptr_t kMinPromotionLimit = 5 * Page::kPageSize;
+  static const intptr_t kMinAllocationLimit =
       8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
 
-  intptr_t OldGenPromotionLimit(intptr_t old_gen_size) {
-    const int divisor = FLAG_stress_compaction ? 10 : 3;
-    intptr_t limit =
-        Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit);
-    limit += new_space_.Capacity();
-    limit *= old_gen_limit_factor_;
-    intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
-    return Min(limit, halfway_to_the_max);
-  }
-
-  intptr_t OldGenAllocationLimit(intptr_t old_gen_size) {
+  intptr_t OldGenLimit(intptr_t old_gen_size, intptr_t min_limit) {
     const int divisor = FLAG_stress_compaction ? 8 : 2;
-    intptr_t limit =
-        Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit);
+    intptr_t limit = Max(old_gen_size + old_gen_size / divisor, min_limit);
     limit += new_space_.Capacity();
     limit *= old_gen_limit_factor_;
     intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 780d57d..0920024 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -1469,7 +1469,7 @@
 
 class HThisFunction: public HTemplateInstruction<0> {
  public:
-  explicit HThisFunction(Handle<JSFunction> closure) : closure_(closure) {
+  HThisFunction() {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
   }
@@ -1478,18 +1478,10 @@
     return Representation::None();
   }
 
-  Handle<JSFunction> closure() const { return closure_; }
-
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
 
  protected:
-  virtual bool DataEquals(HValue* other) {
-    HThisFunction* b = HThisFunction::cast(other);
-    return *closure() == *b->closure();
-  }
-
- private:
-  Handle<JSFunction> closure_;
+  virtual bool DataEquals(HValue* other) { return true; }
 };
 
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 61488af..a25c6db 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -605,8 +605,7 @@
 
 
 HGraphBuilder::HGraphBuilder(CompilationInfo* info,
-                             TypeFeedbackOracle* oracle,
-                             Zone* zone)
+                             TypeFeedbackOracle* oracle)
     : function_state_(NULL),
       initial_function_state_(this, info, oracle, NORMAL_RETURN),
       ast_context_(NULL),
@@ -614,8 +613,8 @@
       graph_(NULL),
       current_block_(NULL),
       inlined_count_(0),
-      globals_(10, zone),
-      zone_(zone),
+      globals_(10, info->zone()),
+      zone_(info->zone()),
       inline_bailout_(false) {
   // This is not initialized in the initializer list because the
   // constructor for the initial state relies on function_state_ == NULL
@@ -674,24 +673,25 @@
 }
 
 
-HGraph::HGraph(CompilationInfo* info, Zone* zone)
+HGraph::HGraph(CompilationInfo* info)
     : isolate_(info->isolate()),
       next_block_id_(0),
       entry_block_(NULL),
-      blocks_(8, zone),
-      values_(16, zone),
+      blocks_(8, info->zone()),
+      values_(16, info->zone()),
       phi_list_(NULL),
-      zone_(zone),
+      info_(info),
+      zone_(info->zone()),
       is_recursive_(false) {
   start_environment_ =
-      new(zone) HEnvironment(NULL, info->scope(), info->closure(), zone);
+      new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
   start_environment_->set_ast_id(AstNode::kFunctionEntryId);
   entry_block_ = CreateBasicBlock();
   entry_block_->SetInitialEnvironment(start_environment_);
 }
 
 
-Handle<Code> HGraph::Compile(CompilationInfo* info, Zone* zone) {
+Handle<Code> HGraph::Compile() {
   int values = GetMaximumValueID();
   if (values > LUnallocated::kMaxVirtualRegisters) {
     if (FLAG_trace_bailout) {
@@ -700,7 +700,7 @@
     return Handle<Code>::null();
   }
   LAllocator allocator(values, this);
-  LChunkBuilder builder(info, this, &allocator);
+  LChunkBuilder builder(info(), this, &allocator);
   LChunk* chunk = builder.Build();
   if (chunk == NULL) return Handle<Code>::null();
 
@@ -711,8 +711,8 @@
     return Handle<Code>::null();
   }
 
-  MacroAssembler assembler(info->isolate(), NULL, 0);
-  LCodeGen generator(chunk, &assembler, info, zone);
+  MacroAssembler assembler(isolate(), NULL, 0);
+  LCodeGen generator(chunk, &assembler, info());
 
   chunk->MarkEmptyBlocks();
 
@@ -720,12 +720,12 @@
     if (FLAG_trace_codegen) {
       PrintF("Crankshaft Compiler - ");
     }
-    CodeGenerator::MakeCodePrologue(info);
+    CodeGenerator::MakeCodePrologue(info());
     Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
     Handle<Code> code =
-        CodeGenerator::MakeCodeEpilogue(&assembler, flags, info);
+        CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
     generator.FinishCode(code);
-    CodeGenerator::PrintCode(code, info);
+    CodeGenerator::PrintCode(code, info());
     return code;
   }
   return Handle<Code>::null();
@@ -752,6 +752,300 @@
   }
 }
 
+// Block ordering was implemented with two mutually recursive methods,
+// HGraph::Postorder and HGraph::PostorderLoopBlocks.
+// The recursion could lead to stack overflow so the algorithm has been
+// implemented iteratively.
+// At a high level the algorithm looks like this:
+//
+// Postorder(block, loop_header) : {
+//   if (block has already been visited or is of another loop) return;
+//   mark block as visited;
+//   if (block is a loop header) {
+//     VisitLoopMembers(block, loop_header);
+//     VisitSuccessorsOfLoopHeader(block);
+//   } else {
+//     VisitSuccessors(block)
+//   }
+//   put block in result list;
+// }
+//
+// VisitLoopMembers(block, outer_loop_header) {
+//   foreach (block b in block loop members) {
+//     VisitSuccessorsOfLoopMember(b, outer_loop_header);
+//     if (b is loop header) VisitLoopMembers(b);
+//   }
+// }
+//
+// VisitSuccessorsOfLoopMember(block, outer_loop_header) {
+//   foreach (block b in block successors) Postorder(b, outer_loop_header)
+// }
+//
+// VisitSuccessorsOfLoopHeader(block) {
+//   foreach (block b in block successors) Postorder(b, block)
+// }
+//
+// VisitSuccessors(block, loop_header) {
+//   foreach (block b in block successors) Postorder(b, loop_header)
+// }
+//
+// The ordering is started calling Postorder(entry, NULL).
+//
+// Each instance of PostorderProcessor represents the "stack frame" of the
+// recursion, and particularly keeps the state of the loop (iteration) of the
+// "Visit..." function it represents.
+// To recycle memory we keep all the frames in a double linked list but
+// this means that we cannot use constructors to initialize the frames.
+//
+class PostorderProcessor : public ZoneObject {
+ public:
+  // Back link (towards the stack bottom).
+  PostorderProcessor* parent() {return father_; }
+  // Forward link (towards the stack top).
+  PostorderProcessor* child() {return child_; }
+  HBasicBlock* block() { return block_; }
+  HLoopInformation* loop() { return loop_; }
+  HBasicBlock* loop_header() { return loop_header_; }
+
+  static PostorderProcessor* CreateEntryProcessor(Zone* zone,
+                                                  HBasicBlock* block,
+                                                  BitVector* visited) {
+    PostorderProcessor* result = new(zone) PostorderProcessor(NULL);
+    return result->SetupSuccessors(zone, block, NULL, visited);
+  }
+
+  PostorderProcessor* PerformStep(Zone* zone,
+                                  BitVector* visited,
+                                  ZoneList<HBasicBlock*>* order) {
+    PostorderProcessor* next =
+        PerformNonBacktrackingStep(zone, visited, order);
+    if (next != NULL) {
+      return next;
+    } else {
+      return Backtrack(zone, visited, order);
+    }
+  }
+
+ private:
+  explicit PostorderProcessor(PostorderProcessor* father)
+      : father_(father), child_(NULL), successor_iterator(NULL) { }
+
+  // Each enum value states the cycle whose state is kept by this instance.
+  enum LoopKind {
+    NONE,
+    SUCCESSORS,
+    SUCCESSORS_OF_LOOP_HEADER,
+    LOOP_MEMBERS,
+    SUCCESSORS_OF_LOOP_MEMBER
+  };
+
+  // Each "Setup..." method is like a constructor for a cycle state.
+  PostorderProcessor* SetupSuccessors(Zone* zone,
+                                      HBasicBlock* block,
+                                      HBasicBlock* loop_header,
+                                      BitVector* visited) {
+    if (block == NULL || visited->Contains(block->block_id()) ||
+        block->parent_loop_header() != loop_header) {
+      kind_ = NONE;
+      block_ = NULL;
+      loop_ = NULL;
+      loop_header_ = NULL;
+      return this;
+    } else {
+      block_ = block;
+      loop_ = NULL;
+      visited->Add(block->block_id());
+
+      if (block->IsLoopHeader()) {
+        kind_ = SUCCESSORS_OF_LOOP_HEADER;
+        loop_header_ = block;
+        InitializeSuccessors();
+        PostorderProcessor* result = Push(zone);
+        return result->SetupLoopMembers(zone, block, block->loop_information(),
+                                        loop_header);
+      } else {
+        ASSERT(block->IsFinished());
+        kind_ = SUCCESSORS;
+        loop_header_ = loop_header;
+        InitializeSuccessors();
+        return this;
+      }
+    }
+  }
+
+  PostorderProcessor* SetupLoopMembers(Zone* zone,
+                                       HBasicBlock* block,
+                                       HLoopInformation* loop,
+                                       HBasicBlock* loop_header) {
+    kind_ = LOOP_MEMBERS;
+    block_ = block;
+    loop_ = loop;
+    loop_header_ = loop_header;
+    InitializeLoopMembers();
+    return this;
+  }
+
+  PostorderProcessor* SetupSuccessorsOfLoopMember(
+      HBasicBlock* block,
+      HLoopInformation* loop,
+      HBasicBlock* loop_header) {
+    kind_ = SUCCESSORS_OF_LOOP_MEMBER;
+    block_ = block;
+    loop_ = loop;
+    loop_header_ = loop_header;
+    InitializeSuccessors();
+    return this;
+  }
+
+  // This method "allocates" a new stack frame.
+  PostorderProcessor* Push(Zone* zone) {
+    if (child_ == NULL) {
+      child_ = new(zone) PostorderProcessor(this);
+    }
+    return child_;
+  }
+
+  void ClosePostorder(ZoneList<HBasicBlock*>* order, Zone* zone) {
+    ASSERT(block_->end()->FirstSuccessor() == NULL ||
+           order->Contains(block_->end()->FirstSuccessor()) ||
+           block_->end()->FirstSuccessor()->IsLoopHeader());
+    ASSERT(block_->end()->SecondSuccessor() == NULL ||
+           order->Contains(block_->end()->SecondSuccessor()) ||
+           block_->end()->SecondSuccessor()->IsLoopHeader());
+    order->Add(block_, zone);
+  }
+
+  // This method is the basic block to walk up the stack.
+  PostorderProcessor* Pop(Zone* zone,
+                          BitVector* visited,
+                          ZoneList<HBasicBlock*>* order) {
+    switch (kind_) {
+      case SUCCESSORS:
+      case SUCCESSORS_OF_LOOP_HEADER:
+        ClosePostorder(order, zone);
+        return father_;
+      case LOOP_MEMBERS:
+        return father_;
+      case SUCCESSORS_OF_LOOP_MEMBER:
+        if (block()->IsLoopHeader() && block() != loop_->loop_header()) {
+          // In this case we need to perform a LOOP_MEMBERS cycle so we
+          // initialize it and return this instead of father.
+          return SetupLoopMembers(zone, block(),
+                                  block()->loop_information(), loop_header_);
+        } else {
+          return father_;
+        }
+      case NONE:
+        return father_;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+  // Walks up the stack.
+  PostorderProcessor* Backtrack(Zone* zone,
+                                BitVector* visited,
+                                ZoneList<HBasicBlock*>* order) {
+    PostorderProcessor* parent = Pop(zone, visited, order);
+    while (parent != NULL) {
+      PostorderProcessor* next =
+          parent->PerformNonBacktrackingStep(zone, visited, order);
+      if (next != NULL) {
+        return next;
+      } else {
+        parent = parent->Pop(zone, visited, order);
+      }
+    }
+    return NULL;
+  }
+
+  PostorderProcessor* PerformNonBacktrackingStep(
+      Zone* zone,
+      BitVector* visited,
+      ZoneList<HBasicBlock*>* order) {
+    HBasicBlock* next_block;
+    switch (kind_) {
+      case SUCCESSORS:
+        next_block = AdvanceSuccessors();
+        if (next_block != NULL) {
+          PostorderProcessor* result = Push(zone);
+          return result->SetupSuccessors(zone, next_block,
+                                         loop_header_, visited);
+        }
+        break;
+      case SUCCESSORS_OF_LOOP_HEADER:
+        next_block = AdvanceSuccessors();
+        if (next_block != NULL) {
+          PostorderProcessor* result = Push(zone);
+          return result->SetupSuccessors(zone, next_block,
+                                         block(), visited);
+        }
+        break;
+      case LOOP_MEMBERS:
+        next_block = AdvanceLoopMembers();
+        if (next_block != NULL) {
+          PostorderProcessor* result = Push(zone);
+          return result->SetupSuccessorsOfLoopMember(next_block,
+                                                     loop_, loop_header_);
+        }
+        break;
+      case SUCCESSORS_OF_LOOP_MEMBER:
+        next_block = AdvanceSuccessors();
+        if (next_block != NULL) {
+          PostorderProcessor* result = Push(zone);
+          return result->SetupSuccessors(zone, next_block,
+                                         loop_header_, visited);
+        }
+        break;
+      case NONE:
+        return NULL;
+    }
+    return NULL;
+  }
+
+  // The following two methods implement a "foreach b in successors" cycle.
+  void InitializeSuccessors() {
+    loop_index = 0;
+    loop_length = 0;
+    successor_iterator = HSuccessorIterator(block_->end());
+  }
+
+  HBasicBlock* AdvanceSuccessors() {
+    if (!successor_iterator.Done()) {
+      HBasicBlock* result = successor_iterator.Current();
+      successor_iterator.Advance();
+      return result;
+    }
+    return NULL;
+  }
+
+  // The following two methods implement a "foreach b in loop members" cycle.
+  void InitializeLoopMembers() {
+    loop_index = 0;
+    loop_length = loop_->blocks()->length();
+  }
+
+  HBasicBlock* AdvanceLoopMembers() {
+    if (loop_index < loop_length) {
+      HBasicBlock* result = loop_->blocks()->at(loop_index);
+      loop_index++;
+      return result;
+    } else {
+      return NULL;
+    }
+  }
+
+  LoopKind kind_;
+  PostorderProcessor* father_;
+  PostorderProcessor* child_;
+  HLoopInformation* loop_;
+  HBasicBlock* block_;
+  HBasicBlock* loop_header_;
+  int loop_index;
+  int loop_length;
+  HSuccessorIterator successor_iterator;
+};
+
 
 void HGraph::OrderBlocks() {
   HPhase phase("H_Block ordering");
@@ -759,8 +1053,11 @@
 
   ZoneList<HBasicBlock*> reverse_result(8, zone());
   HBasicBlock* start = blocks_[0];
-  Postorder(start, &visited, &reverse_result, NULL);
-
+  PostorderProcessor* postorder =
+      PostorderProcessor::CreateEntryProcessor(zone(), start, &visited);
+  while (postorder != NULL) {
+    postorder = postorder->PerformStep(zone(), &visited, &reverse_result);
+  }
   blocks_.Rewind(0);
   int index = 0;
   for (int i = reverse_result.length() - 1; i >= 0; --i) {
@@ -771,50 +1068,6 @@
 }
 
 
-void HGraph::PostorderLoopBlocks(HLoopInformation* loop,
-                                 BitVector* visited,
-                                 ZoneList<HBasicBlock*>* order,
-                                 HBasicBlock* loop_header) {
-  for (int i = 0; i < loop->blocks()->length(); ++i) {
-    HBasicBlock* b = loop->blocks()->at(i);
-    for (HSuccessorIterator it(b->end()); !it.Done(); it.Advance()) {
-      Postorder(it.Current(), visited, order, loop_header);
-    }
-    if (b->IsLoopHeader() && b != loop->loop_header()) {
-      PostorderLoopBlocks(b->loop_information(), visited, order, loop_header);
-    }
-  }
-}
-
-
-void HGraph::Postorder(HBasicBlock* block,
-                       BitVector* visited,
-                       ZoneList<HBasicBlock*>* order,
-                       HBasicBlock* loop_header) {
-  if (block == NULL || visited->Contains(block->block_id())) return;
-  if (block->parent_loop_header() != loop_header) return;
-  visited->Add(block->block_id());
-  if (block->IsLoopHeader()) {
-    PostorderLoopBlocks(block->loop_information(), visited, order, loop_header);
-    for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
-      Postorder(it.Current(), visited, order, block);
-    }
-  } else {
-    ASSERT(block->IsFinished());
-    for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
-      Postorder(it.Current(), visited, order, loop_header);
-    }
-  }
-  ASSERT(block->end()->FirstSuccessor() == NULL ||
-         order->Contains(block->end()->FirstSuccessor()) ||
-         block->end()->FirstSuccessor()->IsLoopHeader());
-  ASSERT(block->end()->SecondSuccessor() == NULL ||
-         order->Contains(block->end()->SecondSuccessor()) ||
-         block->end()->SecondSuccessor()->IsLoopHeader());
-  order->Add(block, zone());
-}
-
-
 void HGraph::AssignDominators() {
   HPhase phase("H_Assign dominators", this);
   for (int i = 0; i < blocks_.length(); ++i) {
@@ -2795,7 +3048,7 @@
 
 
 HGraph* HGraphBuilder::CreateGraph() {
-  graph_ = new(zone()) HGraph(info(), zone());
+  graph_ = new(zone()) HGraph(info());
   if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
 
   {
@@ -4698,7 +4951,7 @@
   // If the property does not exist yet, we have to check that it wasn't made
   // readonly or turned into a setter by some meanwhile modifications on the
   // prototype chain.
-  if (!lookup->IsProperty()) {
+  if (!lookup->IsProperty() && type->prototype()->IsJSReceiver()) {
     Object* proto = type->prototype();
     // First check that the prototype chain isn't affected already.
     LookupResult proto_result(isolate());
@@ -6223,7 +6476,7 @@
   }
 
   // Parse and allocate variables.
-  CompilationInfo target_info(target);
+  CompilationInfo target_info(target, zone());
   if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
       !Scope::Analyze(&target_info)) {
     if (target_info.isolate()->has_pending_exception()) {
@@ -7963,13 +8216,25 @@
 }
 
 
+HInstruction* HGraphBuilder::BuildThisFunction() {
+  // If we share optimized code between different closures, the
+  // this-function is not a constant, except inside an inlined body.
+  if (function_state()->outer() != NULL) {
+      return new(zone()) HConstant(
+          function_state()->compilation_info()->closure(),
+          Representation::Tagged());
+  } else {
+      return new(zone()) HThisFunction;
+  }
+}
+
+
 void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  HThisFunction* self = new(zone()) HThisFunction(
-      function_state()->compilation_info()->closure());
-  return ast_context()->ReturnInstruction(self, expr->id());
+  HInstruction* instr = BuildThisFunction();
+  return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
@@ -8789,8 +9054,6 @@
     bool is_construct) const {
   ASSERT(frame_type() == JS_FUNCTION);
 
-  Zone* zone = closure()->GetIsolate()->zone();
-
   // Outer environment is a copy of this one without the arguments.
   int arity = function->scope()->num_parameters();
 
@@ -8811,7 +9074,7 @@
   }
 
   HEnvironment* inner =
-      new(zone) HEnvironment(outer, function->scope(), target, zone);
+      new(zone()) HEnvironment(outer, function->scope(), target, zone());
   // Get the argument values from the original environment.
   for (int i = 0; i <= arity; ++i) {  // Include receiver.
     HValue* push = (i <= arguments) ?
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 6fa3d1b..abaa440 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -244,10 +244,11 @@
 class BoundsCheckTable;
 class HGraph: public ZoneObject {
  public:
-  HGraph(CompilationInfo* info, Zone* zone);
+  explicit HGraph(CompilationInfo* info);
 
   Isolate* isolate() { return isolate_; }
   Zone* zone() const { return zone_; }
+  CompilationInfo* info() const { return info_; }
 
   const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
   const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
@@ -280,7 +281,7 @@
 
   void CollectPhis();
 
-  Handle<Code> Compile(CompilationInfo* info, Zone* zone);
+  Handle<Code> Compile();
 
   void set_undefined_constant(HConstant* constant) {
     undefined_constant_.set(constant);
@@ -345,14 +346,6 @@
   }
 
  private:
-  void Postorder(HBasicBlock* block,
-                 BitVector* visited,
-                 ZoneList<HBasicBlock*>* order,
-                 HBasicBlock* loop_header);
-  void PostorderLoopBlocks(HLoopInformation* loop,
-                           BitVector* visited,
-                           ZoneList<HBasicBlock*>* order,
-                           HBasicBlock* loop_header);
   HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
                          Object* value);
 
@@ -388,6 +381,7 @@
   SetOncePointer<HBasicBlock> osr_loop_entry_;
   SetOncePointer<ZoneList<HUnknownOSRValue*> > osr_values_;
 
+  CompilationInfo* info_;
   Zone* zone_;
 
   bool is_recursive_;
@@ -838,7 +832,7 @@
     BreakAndContinueScope* next_;
   };
 
-  HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle, Zone* zone);
+  HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
 
   HGraph* CreateGraph();
 
@@ -1156,6 +1150,8 @@
 
   HValue* BuildContextChainWalk(Variable* var);
 
+  HInstruction* BuildThisFunction();
+
   void AddCheckConstantFunction(Call* expr,
                                 HValue* receiver,
                                 Handle<Map> receiver_map,
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index df04b28..afa3e1c 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -66,9 +66,13 @@
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
   // Create a new closure from the given function info in new
   // space. Set the context to the current context in esi.
+  Counters* counters = masm->isolate()->counters();
+
   Label gc;
   __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
 
+  __ IncrementCounter(counters->fast_new_closure_total(), 1);
+
   // Get the function info from the stack.
   __ mov(edx, Operand(esp, 1 * kPointerSize));
 
@@ -80,8 +84,8 @@
   // as the map of the allocated object.
   __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
-  __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
-  __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
+  __ mov(ebx, Operand(ecx, Context::SlotOffset(map_index)));
+  __ mov(FieldOperand(eax, JSObject::kMapOffset), ebx);
 
   // Initialize the rest of the function. We don't have to update the
   // write barrier because the allocated object is in new space.
@@ -94,11 +98,20 @@
   __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
   __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
   __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
-  __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
-         Immediate(factory->undefined_value()));
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
+  // But first check if there is an optimized version for our context.
+  Label check_optimized;
+  Label install_unoptimized;
+  if (FLAG_cache_optimized_code) {
+    __ mov(ebx, FieldOperand(edx, SharedFunctionInfo::kOptimizedCodeMapOffset));
+    __ test(ebx, ebx);
+    __ j(not_zero, &check_optimized, Label::kNear);
+  }
+  __ bind(&install_unoptimized);
+  __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
+         Immediate(factory->undefined_value()));
   __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
   __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
@@ -106,6 +119,68 @@
   // Return and remove the on-stack parameter.
   __ ret(1 * kPointerSize);
 
+  __ bind(&check_optimized);
+
+  __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
+
+  // ecx holds global context, ebx points to fixed array of 3-element entries
+  // (global context, optimized code, literals).
+  // Map must never be empty, so check the first elements.
+  Label install_optimized;
+  // Speculatively move code object into edx.
+  __ mov(edx, FieldOperand(ebx, FixedArray::kHeaderSize + kPointerSize));
+  __ cmp(ecx, FieldOperand(ebx, FixedArray::kHeaderSize));
+  __ j(equal, &install_optimized);
+
+  // Iterate through the rest of map backwards.  edx holds an index as a Smi.
+  Label loop;
+  Label restore;
+  __ mov(edx, FieldOperand(ebx, FixedArray::kLengthOffset));
+  __ bind(&loop);
+  // Do not double check first entry.
+  __ cmp(edx, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ j(equal, &restore);
+  __ sub(edx, Immediate(Smi::FromInt(
+      SharedFunctionInfo::kEntryLength)));  // Skip an entry.
+  __ cmp(ecx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 0));
+  __ j(not_equal, &loop, Label::kNear);
+  // Hit: fetch the optimized code.
+  __ mov(edx, CodeGenerator::FixedArrayElementOperand(ebx, edx, 1));
+
+  __ bind(&install_optimized);
+  __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
+
+  // TODO(fschneider): Idea: store proper code pointers in the optimized code
+  // map and either unmangle them on marking or do nothing as the whole map is
+  // discarded on major GC anyway.
+  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
+  __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
+
+  // Now link a function into a list of optimized functions.
+  __ mov(edx, ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+  __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset), edx);
+  // No need for write barrier as JSFunction (eax) is in the new space.
+
+  __ mov(ContextOperand(ecx, Context::OPTIMIZED_FUNCTIONS_LIST), eax);
+  // Store JSFunction (eax) into edx before issuing write barrier as
+  // it clobbers all the registers passed.
+  __ mov(edx, eax);
+  __ RecordWriteContextSlot(
+      ecx,
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+      edx,
+      ebx,
+      kDontSaveFPRegs);
+
+  // Return and remove the on-stack parameter.
+  __ ret(1 * kPointerSize);
+
+  __ bind(&restore);
+  // Restore SharedFunctionInfo into edx.
+  __ mov(edx, Operand(esp, 1 * kPointerSize));
+  __ jmp(&install_unoptimized);
+
   // Create a new closure through the slower runtime call.
   __ bind(&gc);
   __ pop(ecx);  // Temporarily remove return address.
@@ -7073,6 +7148,8 @@
   { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
   // StoreArrayLiteralElementStub::Generate
   { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
+  // FastNewClosureStub
+  { REG(ecx), REG(edx), REG(ebx), EMIT_REMEMBERED_SET},
   // Null termination.
   { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
 };
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 326207f..32421ae 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -117,6 +117,10 @@
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
   if (!function->IsOptimized()) return;
 
+  // The optimized code is going to be patched, so we cannot use it
+  // any more.  Play safe and reset the whole cache.
+  function->shared()->ClearOptimizedCodeMap();
+
   Isolate* isolate = function->GetIsolate();
   HandleScope scope(isolate);
   AssertNoAllocation no_allocation;
@@ -194,8 +198,19 @@
   // ignore all slots that might have been recorded on it.
   isolate->heap()->mark_compact_collector()->InvalidateCode(code);
 
-  // Set the code for the function to non-optimized version.
-  function->ReplaceCode(function->shared()->code());
+  // Iterate over all the functions which share the same code object
+  // and make them use unoptimized version.
+  Context* context = function->context()->global_context();
+  Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+  SharedFunctionInfo* shared = function->shared();
+  while (!element->IsUndefined()) {
+    JSFunction* func = JSFunction::cast(element);
+    // Grab element before code replacement as ReplaceCode alters the list.
+    element = func->next_function_link();
+    if (func->code() == code) {
+      func->ReplaceCode(shared->code());
+    }
+  }
 
   if (FLAG_trace_deopt) {
     PrintF("[forced deoptimization: ");
@@ -330,9 +345,9 @@
   unsigned node_id = iterator.Next();
   USE(node_id);
   ASSERT(node_id == ast_id);
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
-  USE(function);
-  ASSERT(function == function_);
+  int closure_id = iterator.Next();
+  USE(closure_id);
+  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
   unsigned height = iterator.Next();
   unsigned height_in_bytes = height * kPointerSize;
   USE(height_in_bytes);
@@ -456,15 +471,15 @@
     output_[0]->SetPc(pc);
   }
   Code* continuation =
-      function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
+      function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
   output_[0]->SetContinuation(
       reinterpret_cast<uint32_t>(continuation->entry()));
 
   if (FLAG_trace_osr) {
     PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
            ok ? "finished" : "aborted",
-           reinterpret_cast<intptr_t>(function));
-    function->PrintName();
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
     PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
   }
 }
@@ -682,7 +697,15 @@
 void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
                                    int frame_index) {
   int node_id = iterator->Next();
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  JSFunction* function;
+  if (frame_index != 0) {
+    function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  } else {
+    int closure_id = iterator->Next();
+    USE(closure_id);
+    ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+    function = function_;
+  }
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
   if (FLAG_trace_deopt) {
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 5a513fd..96357bd 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -317,10 +317,6 @@
     // Self-optimization is a one-off thing: if it fails, don't try again.
     reset_value = Smi::kMaxValue;
   }
-  if (isolate()->IsDebuggerActive()) {
-    // Detect debug break requests as soon as possible.
-    reset_value = 10;
-  }
   __ mov(ebx, Immediate(profiling_counter_));
   __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
          Immediate(Smi::FromInt(reset_value)));
@@ -1559,7 +1555,7 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore(zone());
 
-  AccessorTable accessor_table(isolate()->zone());
+  AccessorTable accessor_table(zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 7fd64ca..1ea2188 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -420,7 +420,9 @@
   int height = translation_size - environment->parameter_count();
 
   WriteTranslation(environment->outer(), translation);
-  int closure_id = DefineDeoptimizationLiteral(environment->closure());
+  int closure_id = *info()->closure() != *environment->closure()
+      ? DefineDeoptimizationLiteral(environment->closure())
+      : Translation::kSelfLiteralId;
   switch (environment->frame_type()) {
     case JS_FUNCTION:
       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -2830,7 +2832,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ LoadHeapObject(result, instr->hydrogen()->closure());
+  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
 }
 
 
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index b241aaf..2c6fb8b 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -46,26 +46,25 @@
 
 class LCodeGen BASE_EMBEDDED {
  public:
-  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
-           Zone* zone)
-      : chunk_(chunk),
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : zone_(info->zone()),
+        chunk_(chunk),
         masm_(assembler),
         info_(info),
         current_block_(-1),
         current_instruction_(-1),
         instructions_(chunk->instructions()),
-        deoptimizations_(4, zone),
-        deoptimization_literals_(8, zone),
+        deoptimizations_(4, info->zone()),
+        deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
-        translations_(zone),
-        deferred_(8, zone),
+        translations_(info->zone()),
+        deferred_(8, info->zone()),
         dynamic_frame_alignment_(false),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
-        safepoints_(zone),
-        zone_(zone),
+        safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -330,6 +329,7 @@
   // register, or a stack slot operand.
   void EmitPushTaggedOperand(LOperand* operand);
 
+  Zone* zone_;
   LChunk* const chunk_;
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
@@ -352,8 +352,6 @@
   // itself is emitted at the end of the generated code.
   SafepointTableBuilder safepoints_;
 
-  Zone* zone_;
-
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 07782cc..622dc42 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -316,6 +316,11 @@
   // uncaptured. In either case succeed immediately.
   __ j(equal, &fallthrough);
 
+  // Check that there are sufficient characters left in the input.
+  __ mov(eax, edi);
+  __ add(eax, ebx);
+  BranchOrBacktrack(greater, on_no_match);
+
   if (mode_ == ASCII) {
     Label success;
     Label fail;
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 760fadc..7aea385 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -34,14 +34,7 @@
 namespace v8 {
 namespace internal {
 
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
- public:
-  RegExpMacroAssemblerIA32() { }
-  virtual ~RegExpMacroAssemblerIA32() { }
-};
-
-#else  // V8_INTERPRETED_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
 class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
  public:
   RegExpMacroAssemblerIA32(Mode mode, int registers_to_save, Zone* zone);
diff --git a/src/isolate.cc b/src/isolate.cc
index 8fcb370..61e43b1 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -945,9 +945,12 @@
   // When scheduling a throw we first throw the exception to get the
   // error reporting if it is uncaught before rescheduling it.
   Throw(exception);
-  thread_local_top()->scheduled_exception_ = pending_exception();
-  thread_local_top()->external_caught_exception_ = false;
-  clear_pending_exception();
+  PropagatePendingExceptionToExternalTryCatch();
+  if (has_pending_exception()) {
+    thread_local_top()->scheduled_exception_ = pending_exception();
+    thread_local_top()->external_caught_exception_ = false;
+    clear_pending_exception();
+  }
 }
 
 
@@ -1470,6 +1473,7 @@
       descriptor_lookup_cache_(NULL),
       handle_scope_implementer_(NULL),
       unicode_cache_(NULL),
+      runtime_zone_(this),
       in_use_list_(0),
       free_list_(0),
       preallocated_storage_preallocated_(false),
@@ -1490,7 +1494,6 @@
       sizeof(isolate_addresses_[0]) * (kIsolateAddressCount + 1));
 
   heap_.isolate_ = this;
-  zone_.isolate_ = this;
   stack_guard_.isolate_ = this;
 
   // ThreadManager is initialized early to support locking an isolate
@@ -1547,6 +1550,11 @@
     thread_data_table_->RemoveAllThreads(this);
   }
 
+  if (serialize_partial_snapshot_cache_ != NULL) {
+    delete[] serialize_partial_snapshot_cache_;
+    serialize_partial_snapshot_cache_ = NULL;
+  }
+
   if (!IsDefaultIsolate()) {
     delete this;
   }
@@ -1595,6 +1603,26 @@
 }
 
 
+void Isolate::PushToPartialSnapshotCache(Object* obj) {
+  int length = serialize_partial_snapshot_cache_length();
+  int capacity = serialize_partial_snapshot_cache_capacity();
+
+  if (length >= capacity) {
+    int new_capacity = static_cast<int>((capacity + 10) * 1.2);
+    Object** new_array = new Object*[new_capacity];
+    for (int i = 0; i < length; i++) {
+      new_array[i] = serialize_partial_snapshot_cache()[i];
+    }
+    if (capacity != 0) delete[] serialize_partial_snapshot_cache();
+    set_serialize_partial_snapshot_cache(new_array);
+    set_serialize_partial_snapshot_cache_capacity(new_capacity);
+  }
+
+  serialize_partial_snapshot_cache()[length] = obj;
+  set_serialize_partial_snapshot_cache_length(length + 1);
+}
+
+
 void Isolate::SetIsolateThreadLocals(Isolate* isolate,
                                      PerIsolateThreadData* data) {
   Thread::SetThreadLocal(isolate_key_, isolate);
@@ -1606,7 +1634,7 @@
   TRACE_ISOLATE(destructor);
 
   // Has to be called while counters_ are still alive.
-  zone_.DeleteKeptSegment();
+  runtime_zone_.DeleteKeptSegment();
 
   delete[] assembler_spare_buffer_;
   assembler_spare_buffer_ = NULL;
@@ -1778,7 +1806,7 @@
   global_handles_ = new GlobalHandles(this);
   bootstrapper_ = new Bootstrapper();
   handle_scope_implementer_ = new HandleScopeImplementer(this);
-  stub_cache_ = new StubCache(this, zone());
+  stub_cache_ = new StubCache(this, runtime_zone());
   regexp_stack_ = new RegExpStack();
   regexp_stack_->isolate_ = this;
   date_cache_ = new DateCache();
@@ -1812,6 +1840,11 @@
     return false;
   }
 
+  if (create_heap_objects) {
+    // Terminate the cache array with the sentinel so we can iterate.
+    PushToPartialSnapshotCache(heap_.undefined_value());
+  }
+
   InitializeThreadLocal();
 
   bootstrapper_->Initialize(create_heap_objects);
@@ -1838,7 +1871,7 @@
 #endif
 
   // If we are deserializing, read the state into the now-empty heap.
-  if (des != NULL) {
+  if (!create_heap_objects) {
     des->Deserialize();
   }
   stub_cache_->Initialize();
@@ -1853,7 +1886,7 @@
   heap_.SetStackLimits();
 
   // Quiet the heap NaN if needed on target platform.
-  if (des != NULL) Assembler::QuietNaN(heap_.nan_value());
+  if (create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
 
   deoptimizer_data_ = new DeoptimizerData;
   runtime_profiler_ = new RuntimeProfiler(this);
@@ -1861,7 +1894,7 @@
 
   // If we are deserializing, log non-function code objects and compiled
   // functions found in the snapshot.
-  if (des != NULL && (FLAG_log_code || FLAG_ll_prof)) {
+  if (create_heap_objects && (FLAG_log_code || FLAG_ll_prof)) {
     HandleScope scope;
     LOG(this, LogCodeObjects());
     LOG(this, LogCompiledFunctions());
diff --git a/src/isolate.h b/src/isolate.h
index 5ca2b87..50ecc53 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -307,7 +307,6 @@
 
 #define ISOLATE_INIT_ARRAY_LIST(V)                                             \
   /* SerializerDeserializer state. */                                          \
-  V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity)  \
   V(int, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize)     \
   V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
   V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
@@ -320,6 +319,8 @@
 #define ISOLATE_INIT_LIST(V)                                                   \
   /* SerializerDeserializer state. */                                          \
   V(int, serialize_partial_snapshot_cache_length, 0)                           \
+  V(int, serialize_partial_snapshot_cache_capacity, 0)                         \
+  V(Object**, serialize_partial_snapshot_cache, NULL)                          \
   /* Assembler state. */                                                       \
   /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */    \
   V(byte*, assembler_spare_buffer, NULL)                                       \
@@ -610,6 +611,9 @@
         (exception != heap()->termination_exception());
   }
 
+  // Serializer.
+  void PushToPartialSnapshotCache(Object* obj);
+
   // JS execution stack (see frames.h).
   static Address c_entry_fp(ThreadLocalTop* thread) {
     return thread->c_entry_fp_;
@@ -850,7 +854,7 @@
     ASSERT(handle_scope_implementer_);
     return handle_scope_implementer_;
   }
-  Zone* zone() { return &zone_; }
+  Zone* runtime_zone() { return &runtime_zone_; }
 
   UnicodeCache* unicode_cache() {
     return unicode_cache_;
@@ -1196,7 +1200,7 @@
   v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
   HandleScopeImplementer* handle_scope_implementer_;
   UnicodeCache* unicode_cache_;
-  Zone zone_;
+  Zone runtime_zone_;
   PreallocatedStorage in_use_list_;
   PreallocatedStorage free_list_;
   bool preallocated_storage_preallocated_;
diff --git a/src/json-parser.h b/src/json-parser.h
index 7265165..ba05230 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -326,7 +326,7 @@
 // Parse a JSON array. Position must be right at '['.
 template <bool seq_ascii>
 Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
-  ZoneScope zone_scope(isolate(), DELETE_ON_EXIT);
+  ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
   ZoneList<Handle<Object> > elements(4, zone());
   ASSERT_EQ(c0_, '[');
 
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index cd51db8..e730e14 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -167,7 +167,9 @@
 
 Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
                                    Handle<String> pattern,
-                                   Handle<String> flag_str) {
+                                   Handle<String> flag_str,
+                                   Zone* zone) {
+  ZoneScope zone_scope(zone, DELETE_ON_EXIT);
   Isolate* isolate = re->GetIsolate();
   JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
   CompilationCache* compilation_cache = isolate->compilation_cache();
@@ -181,12 +183,11 @@
     return re;
   }
   pattern = FlattenGetString(pattern);
-  ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
   PostponeInterruptsScope postpone(isolate);
   RegExpCompileData parse_result;
   FlatStringReader reader(isolate, pattern);
   if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
-                                 &parse_result)) {
+                                 &parse_result, zone)) {
     // Throw an exception if we fail to parse the pattern.
     ThrowRegExpException(re,
                          pattern,
@@ -231,14 +232,13 @@
 Handle<Object> RegExpImpl::Exec(Handle<JSRegExp> regexp,
                                 Handle<String> subject,
                                 int index,
-                                Handle<JSArray> last_match_info,
-                                Zone* zone) {
+                                Handle<JSArray> last_match_info) {
   switch (regexp->TypeTag()) {
     case JSRegExp::ATOM:
       return AtomExec(regexp, subject, index, last_match_info);
     case JSRegExp::IRREGEXP: {
       Handle<Object> result =
-          IrregexpExec(regexp, subject, index, last_match_info, zone);
+          IrregexpExec(regexp, subject, index, last_match_info);
       ASSERT(!result.is_null() ||
              regexp->GetIsolate()->has_pending_exception());
       return result;
@@ -345,8 +345,7 @@
 // If compilation fails, an exception is thrown and this function
 // returns false.
 bool RegExpImpl::EnsureCompiledIrregexp(
-    Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii,
-    Zone* zone) {
+    Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii) {
   Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
 #ifdef V8_INTERPRETED_REGEXP
   if (compiled_code->IsByteArray()) return true;
@@ -362,7 +361,7 @@
     ASSERT(compiled_code->IsSmi());
     return true;
   }
-  return CompileIrregexp(re, sample_subject, is_ascii, zone);
+  return CompileIrregexp(re, sample_subject, is_ascii);
 }
 
 
@@ -384,11 +383,10 @@
 
 bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
                                  Handle<String> sample_subject,
-                                 bool is_ascii,
-                                 Zone* zone) {
+                                 bool is_ascii) {
   // Compile the RegExp.
   Isolate* isolate = re->GetIsolate();
-  ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
+  ZoneScope zone_scope(isolate->runtime_zone(), DELETE_ON_EXIT);
   PostponeInterruptsScope postpone(isolate);
   // If we had a compilation error the last time this is saved at the
   // saved code index.
@@ -419,8 +417,10 @@
   if (!pattern->IsFlat()) FlattenString(pattern);
   RegExpCompileData compile_data;
   FlatStringReader reader(isolate, pattern);
+  Zone* zone = isolate->runtime_zone();
   if (!RegExpParser::ParseRegExp(&reader, flags.is_multiline(),
-                                 &compile_data)) {
+                                 &compile_data,
+                                 zone)) {
     // Throw an exception if we fail to parse the pattern.
     // THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
     ThrowRegExpException(re,
@@ -502,13 +502,12 @@
 
 
 int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
-                                Handle<String> subject,
-                                Zone* zone) {
+                                Handle<String> subject) {
   if (!subject->IsFlat()) FlattenString(subject);
 
   // Check the asciiness of the underlying storage.
   bool is_ascii = subject->IsAsciiRepresentationUnderneath();
-  if (!EnsureCompiledIrregexp(regexp, subject, is_ascii, zone)) return -1;
+  if (!EnsureCompiledIrregexp(regexp, subject, is_ascii)) return -1;
 
 #ifdef V8_INTERPRETED_REGEXP
   // Byte-code regexp needs space allocated for all its registers.
@@ -541,8 +540,7 @@
     Handle<JSRegExp> regexp,
     Handle<String> subject,
     int index,
-    Vector<int> output,
-    Zone* zone) {
+    Vector<int> output) {
   Isolate* isolate = regexp->GetIsolate();
 
   Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
@@ -556,7 +554,7 @@
 #ifndef V8_INTERPRETED_REGEXP
   ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
   do {
-    EnsureCompiledIrregexp(regexp, subject, is_ascii, zone);
+    EnsureCompiledIrregexp(regexp, subject, is_ascii);
     Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
     NativeRegExpMacroAssembler::Result res =
         NativeRegExpMacroAssembler::Match(code,
@@ -582,7 +580,7 @@
     // the, potentially, different subject (the string can switch between
     // being internal and external, and even between being ASCII and UC16,
     // but the characters are always the same).
-    IrregexpPrepare(regexp, subject, zone);
+    IrregexpPrepare(regexp, subject);
     is_ascii = subject->IsAsciiRepresentationUnderneath();
   } while (true);
   UNREACHABLE();
@@ -617,8 +615,7 @@
 Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
                                         Handle<String> subject,
                                         int previous_index,
-                                        Handle<JSArray> last_match_info,
-                                        Zone* zone) {
+                                        Handle<JSArray> last_match_info) {
   Isolate* isolate = jsregexp->GetIsolate();
   ASSERT_EQ(jsregexp->TypeTag(), JSRegExp::IRREGEXP);
 
@@ -632,7 +629,7 @@
   }
 #endif
 #endif
-  int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject, zone);
+  int required_registers = RegExpImpl::IrregexpPrepare(jsregexp, subject);
   if (required_registers < 0) {
     // Compiling failed with an exception.
     ASSERT(isolate->has_pending_exception());
@@ -643,8 +640,7 @@
 
   int res = RegExpImpl::IrregexpExecRaw(jsregexp, subject, previous_index,
                                         Vector<int>(registers.vector(),
-                                                    registers.length()),
-                                        zone);
+                                                    registers.length()));
   if (res == RE_SUCCESS) {
     int capture_register_count =
         (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
@@ -5987,7 +5983,7 @@
 #else  // V8_INTERPRETED_REGEXP
   // Interpreted regexp implementation.
   EmbeddedVector<byte, 1024> codes;
-  RegExpMacroAssemblerIrregexp macro_assembler(codes);
+  RegExpMacroAssemblerIrregexp macro_assembler(codes, zone);
 #endif  // V8_INTERPRETED_REGEXP
 
   // Inserted here, instead of in Assembler, because it depends on information
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 782c5b0..9a84237 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -71,15 +71,15 @@
   // Returns false if compilation fails.
   static Handle<Object> Compile(Handle<JSRegExp> re,
                                 Handle<String> pattern,
-                                Handle<String> flags);
+                                Handle<String> flags,
+                                Zone* zone);
 
   // See ECMA-262 section 15.10.6.2.
   // This function calls the garbage collector if necessary.
   static Handle<Object> Exec(Handle<JSRegExp> regexp,
                              Handle<String> subject,
                              int index,
-                             Handle<JSArray> lastMatchInfo,
-                             Zone* zone);
+                             Handle<JSArray> lastMatchInfo);
 
   // Prepares a JSRegExp object with Irregexp-specific data.
   static void IrregexpInitialize(Handle<JSRegExp> re,
@@ -108,8 +108,7 @@
   // as its "registers" argument. If the regexp cannot be compiled,
   // an exception is set as pending, and this function returns negative.
   static int IrregexpPrepare(Handle<JSRegExp> regexp,
-                             Handle<String> subject,
-                             Zone* zone);
+                             Handle<String> subject);
 
   // Calculate the size of offsets vector for the case of global regexp
   // and the number of matches this vector is able to store.
@@ -126,8 +125,7 @@
   static int IrregexpExecRaw(Handle<JSRegExp> regexp,
                              Handle<String> subject,
                              int index,
-                             Vector<int> registers,
-                             Zone* zone);
+                             Vector<int> registers);
 
   // Execute an Irregexp bytecode pattern.
   // On a successful match, the result is a JSArray containing
@@ -136,8 +134,7 @@
   static Handle<Object> IrregexpExec(Handle<JSRegExp> regexp,
                                      Handle<String> subject,
                                      int index,
-                                     Handle<JSArray> lastMatchInfo,
-                                     Zone* zone);
+                                     Handle<JSArray> lastMatchInfo);
 
   // Array index in the lastMatchInfo array.
   static const int kLastCaptureCount = 0;
@@ -202,11 +199,9 @@
   static String* two_byte_cached_string_;
 
   static bool CompileIrregexp(
-      Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii,
-      Zone* zone);
+      Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
   static inline bool EnsureCompiledIrregexp(
-      Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii,
-      Zone* zone);
+      Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
 
 
   // Set the subject cache.  The previous string buffer is not deleted, so the
diff --git a/src/list.h b/src/list.h
index 3ca4a3f..7fd4f5c 100644
--- a/src/list.h
+++ b/src/list.h
@@ -74,6 +74,11 @@
     AllocationPolicy::Delete(p);
   }
 
+  // Please the MSVC compiler.  We should never have to execute this.
+  INLINE(void operator delete(void* p, AllocationPolicy allocator)) {
+    UNREACHABLE();
+  }
+
   // Returns a reference to the element at index i.  This reference is
   // not safe to use after operations that can change the list's
   // backing store (e.g. Add).
diff --git a/src/lithium.h b/src/lithium.h
index 1f42b68..ff644ee 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -470,7 +470,7 @@
         parameter_count_(parameter_count),
         pc_offset_(-1),
         values_(value_count, zone),
-        is_tagged_(value_count, closure->GetHeap()->isolate()->zone()),
+        is_tagged_(value_count, zone),
         spilled_registers_(NULL),
         spilled_double_registers_(NULL),
         outer_(outer),
diff --git a/src/liveedit-debugger.js b/src/liveedit-debugger.js
index 4463c93..cfcdb81 100644
--- a/src/liveedit-debugger.js
+++ b/src/liveedit-debugger.js
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,6 +47,8 @@
   // Forward declaration for minifier.
   var FunctionStatus;
 
+  var NEEDS_STEP_IN_PROPERTY_NAME = "stack_update_needs_step_in";
+
   // Applies the change to the script.
   // The change is in form of list of chunks encoded in a single array as
   // a series of triplets (pos1_start, pos1_end, pos2_end)
@@ -161,7 +163,7 @@
 
     // Our current implementation requires client to manually issue "step in"
     // command for correct stack state.
-    preview_description.stack_update_needs_step_in =
+    preview_description[NEEDS_STEP_IN_PROPERTY_NAME] =
         preview_description.stack_modified;
 
     // Start with breakpoints. Convert their line/column positions and
@@ -1078,6 +1080,18 @@
     return ProcessOldNode(old_code_tree);
   }
 
+  // Restarts call frame and returns value similar to what LiveEdit returns.
+  function RestartFrame(frame_mirror) {
+    var result = frame_mirror.restart();
+    if (IS_STRING(result)) {
+      throw new Failure("Failed to restart frame: " + result);
+    }
+    var result = {};
+    result[NEEDS_STEP_IN_PROPERTY_NAME] = true;
+    return result;
+  }
+  // Function is public.
+  this.RestartFrame = RestartFrame;
 
   // Functions are public for tests.
   this.TestApi = {
diff --git a/src/liveedit.cc b/src/liveedit.cc
index e670b44..7f64554 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -601,7 +601,7 @@
   PostponeInterruptsScope postpone(isolate);
 
   // Build AST.
-  CompilationInfo info(script);
+  CompilationInfoWithZone info(script);
   info.MarkAsGlobal();
   // Parse and don't allow skipping lazy functions.
   if (ParserApi::Parse(&info, kNoParsingFlags)) {
@@ -894,7 +894,6 @@
 JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
                                      Handle<String> source) {
   Isolate* isolate = Isolate::Current();
-  ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
 
   FunctionInfoListener listener;
   Handle<Object> original_source = Handle<Object>(script->source());
@@ -923,37 +922,35 @@
 }
 
 
-// Visitor that collects all references to a particular code object,
-// including "CODE_TARGET" references in other code objects.
-// It works in context of ZoneScope.
-class ReferenceCollectorVisitor : public ObjectVisitor {
+// Visitor that finds all references to a particular code object,
+// including "CODE_TARGET" references in other code objects and replaces
+// them on the fly.
+class ReplacingVisitor : public ObjectVisitor {
  public:
-  ReferenceCollectorVisitor(Code* original, Zone* zone)
-      : original_(original),
-        rvalues_(10, zone),
-        reloc_infos_(10, zone),
-        code_entries_(10, zone),
-        zone_(zone) {
+  explicit ReplacingVisitor(Code* original, Code* substitution)
+    : original_(original), substitution_(substitution) {
   }
 
   virtual void VisitPointers(Object** start, Object** end) {
     for (Object** p = start; p < end; p++) {
       if (*p == original_) {
-        rvalues_.Add(p, zone_);
+        *p = substitution_;
       }
     }
   }
 
   virtual void VisitCodeEntry(Address entry) {
     if (Code::GetObjectFromEntryAddress(entry) == original_) {
-      code_entries_.Add(entry, zone_);
+      Address substitution_entry = substitution_->instruction_start();
+      Memory::Address_at(entry) = substitution_entry;
     }
   }
 
   virtual void VisitCodeTarget(RelocInfo* rinfo) {
     if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
         Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
-      reloc_infos_.Add(*rinfo, zone_);
+      Address substitution_entry = substitution_->instruction_start();
+      rinfo->set_target_address(substitution_entry);
     }
   }
 
@@ -961,28 +958,9 @@
     VisitCodeTarget(rinfo);
   }
 
-  // Post-visiting method that iterates over all collected references and
-  // modifies them.
-  void Replace(Code* substitution) {
-    for (int i = 0; i < rvalues_.length(); i++) {
-      *(rvalues_[i]) = substitution;
-    }
-    Address substitution_entry = substitution->instruction_start();
-    for (int i = 0; i < reloc_infos_.length(); i++) {
-      reloc_infos_[i].set_target_address(substitution_entry);
-    }
-    for (int i = 0; i < code_entries_.length(); i++) {
-      Address entry = code_entries_[i];
-      Memory::Address_at(entry) = substitution_entry;
-    }
-  }
-
  private:
   Code* original_;
-  ZoneList<Object**> rvalues_;
-  ZoneList<RelocInfo> reloc_infos_;
-  ZoneList<Address> code_entries_;
-  Zone* zone_;
+  Code* substitution_;
 };
 
 
@@ -990,28 +968,21 @@
 static void ReplaceCodeObject(Code* original, Code* substitution) {
   ASSERT(!HEAP->InNewSpace(substitution));
 
-  HeapIterator iterator;
   AssertNoAllocation no_allocations_please;
 
-  // A zone scope for ReferenceCollectorVisitor.
-  ZoneScope scope(Isolate::Current(), DELETE_ON_EXIT);
-
-  ReferenceCollectorVisitor visitor(original, Isolate::Current()->zone());
+  ReplacingVisitor visitor(original, substitution);
 
   // Iterate over all roots. Stack frames may have pointer into original code,
   // so temporary replace the pointers with offset numbers
   // in prologue/epilogue.
-  {
-    HEAP->IterateStrongRoots(&visitor, VISIT_ALL);
-  }
+  HEAP->IterateRoots(&visitor, VISIT_ALL);
 
   // Now iterate over all pointers of all objects, including code_target
   // implicit pointers.
+  HeapIterator iterator;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     obj->Iterate(&visitor);
   }
-
-  visitor.Replace(substitution);
 }
 
 
@@ -1595,17 +1566,36 @@
   return !frame->is_exit();
 }
 
-// Fills result array with statuses of functions. Modifies the stack
-// removing all listed function if possible and if do_drop is true.
-static const char* DropActivationsInActiveThread(
-    Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop,
-    Zone* zone) {
+
+// Describes a set of call frames that execute any of listed functions.
+// Finding no such frames does not mean error.
+class MultipleFunctionTarget {
+ public:
+  MultipleFunctionTarget(Handle<JSArray> shared_info_array,
+      Handle<JSArray> result)
+      : m_shared_info_array(shared_info_array),
+        m_result(result) {}
+  bool MatchActivation(StackFrame* frame,
+      LiveEdit::FunctionPatchabilityStatus status) {
+    return CheckActivation(m_shared_info_array, m_result, frame, status);
+  }
+  const char* GetNotFoundMessage() {
+    return NULL;
+  }
+ private:
+  Handle<JSArray> m_shared_info_array;
+  Handle<JSArray> m_result;
+};
+
+// Drops all call frame matched by target and all frames above them.
+template<typename TARGET>
+static const char* DropActivationsInActiveThreadImpl(
+    TARGET& target, bool do_drop, Zone* zone) {
   Isolate* isolate = Isolate::Current();
   Debug* debug = isolate->debug();
-  ZoneScope scope(isolate, DELETE_ON_EXIT);
+  ZoneScope scope(zone, DELETE_ON_EXIT);
   Vector<StackFrame*> frames = CreateStackMap(zone);
 
-  int array_len = Smi::cast(shared_info_array->length())->value();
 
   int top_frame_index = -1;
   int frame_index = 0;
@@ -1615,8 +1605,8 @@
       top_frame_index = frame_index;
       break;
     }
-    if (CheckActivation(shared_info_array, result, frame,
-                        LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
+    if (target.MatchActivation(
+            frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
       // We are still above break_frame. It is not a target frame,
       // it is a problem.
       return "Debugger mark-up on stack is not found";
@@ -1625,7 +1615,7 @@
 
   if (top_frame_index == -1) {
     // We haven't found break frame, but no function is blocking us anyway.
-    return NULL;
+    return target.GetNotFoundMessage();
   }
 
   bool target_frame_found = false;
@@ -1638,8 +1628,8 @@
       c_code_found = true;
       break;
     }
-    if (CheckActivation(shared_info_array, result, frame,
-                        LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
+    if (target.MatchActivation(
+            frame, LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
       target_frame_found = true;
       bottom_js_frame_index = frame_index;
     }
@@ -1651,8 +1641,8 @@
     for (; frame_index < frames.length(); frame_index++) {
       StackFrame* frame = frames[frame_index];
       if (frame->is_java_script()) {
-        if (CheckActivation(shared_info_array, result, frame,
-                            LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
+        if (target.MatchActivation(
+                frame, LiveEdit::FUNCTION_BLOCKED_UNDER_NATIVE_CODE)) {
           // Cannot drop frame under C frames.
           return NULL;
         }
@@ -1667,7 +1657,7 @@
 
   if (!target_frame_found) {
     // Nothing to drop.
-    return NULL;
+    return target.GetNotFoundMessage();
   }
 
   Debug::FrameDropMode drop_mode = Debug::FRAMES_UNTOUCHED;
@@ -1690,6 +1680,23 @@
   }
   debug->FramesHaveBeenDropped(new_id, drop_mode,
                                restarter_frame_function_pointer);
+  return NULL;
+}
+
+// Fills result array with statuses of functions. Modifies the stack
+// removing all listed function if possible and if do_drop is true.
+static const char* DropActivationsInActiveThread(
+    Handle<JSArray> shared_info_array, Handle<JSArray> result, bool do_drop,
+    Zone* zone) {
+  MultipleFunctionTarget target(shared_info_array, result);
+
+  const char* message =
+      DropActivationsInActiveThreadImpl(target, do_drop, zone);
+  if (message) {
+    return message;
+  }
+
+  int array_len = Smi::cast(shared_info_array->length())->value();
 
   // Replace "blocked on active" with "replaced on active" status.
   for (int i = 0; i < array_len; i++) {
@@ -1766,6 +1773,41 @@
 }
 
 
+// Describes a single callframe a target. Not finding this frame
+// means an error.
+class SingleFrameTarget {
+ public:
+  explicit SingleFrameTarget(JavaScriptFrame* frame) : m_frame(frame) {}
+
+  bool MatchActivation(StackFrame* frame,
+      LiveEdit::FunctionPatchabilityStatus status) {
+    if (frame->fp() == m_frame->fp()) {
+      m_saved_status = status;
+      return true;
+    }
+    return false;
+  }
+  const char* GetNotFoundMessage() {
+    return "Failed to found requested frame";
+  }
+  LiveEdit::FunctionPatchabilityStatus saved_status() {
+    return m_saved_status;
+  }
+ private:
+  JavaScriptFrame* m_frame;
+  LiveEdit::FunctionPatchabilityStatus m_saved_status;
+};
+
+
+// Finds a drops required frame and all frames above.
+// Returns error message or NULL.
+const char* LiveEdit::RestartFrame(JavaScriptFrame* frame, Zone* zone) {
+  SingleFrameTarget target(frame);
+
+  return DropActivationsInActiveThreadImpl(target, true, zone);
+}
+
+
 LiveEditFunctionTracker::LiveEditFunctionTracker(Isolate* isolate,
                                                  FunctionLiteral* fun)
     : isolate_(isolate) {
diff --git a/src/liveedit.h b/src/liveedit.h
index 424c24e..5b12854 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -123,6 +123,10 @@
   static Handle<JSArray> CheckAndDropActivations(
       Handle<JSArray> shared_info_array, bool do_drop, Zone* zone);
 
+  // Restarts the call frame and completely drops all frames above it.
+  // Return error message or NULL.
+  static const char* RestartFrame(JavaScriptFrame* frame, Zone* zone);
+
   // A copy of this is in liveedit-debugger.js.
   enum FunctionPatchabilityStatus {
     FUNCTION_AVAILABLE_FOR_PATCH = 1,
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 878c974..67f6e8e 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1296,9 +1296,7 @@
 
 
   static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
-    SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
-
-    if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
+    SharedFunctionInfo::cast(object)->BeforeVisitingPointers();
 
     FixedBodyVisitor<StaticMarkingVisitor,
                      SharedFunctionInfo::BodyDescriptor,
@@ -1402,7 +1400,7 @@
     Heap* heap = map->GetHeap();
     SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
 
-    if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
+    shared->BeforeVisitingPointers();
 
     if (!known_flush_code_candidate) {
       known_flush_code_candidate = IsFlushable(heap, shared);
@@ -1539,8 +1537,8 @@
     }
 
     VisitPointers(heap,
-                  SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
-                  SLOT_ADDR(object, SharedFunctionInfo::kSize));
+        SLOT_ADDR(object, SharedFunctionInfo::kOptimizedCodeMapOffset),
+        SLOT_ADDR(object, SharedFunctionInfo::kSize));
   }
 
   #undef SLOT_ADDR
@@ -1883,12 +1881,9 @@
                                          enum_cache);
   }
 
-  // TODO(verwaest) Make sure we free unused transitions.
   if (descriptors->elements_transition_map() != NULL) {
     Object** transitions_slot = descriptors->GetTransitionsSlot();
     Object* transitions = *transitions_slot;
-    base_marker()->MarkObjectAndPush(
-        reinterpret_cast<HeapObject*>(transitions));
     mark_compact_collector()->RecordSlot(descriptor_start,
                                          transitions_slot,
                                          transitions);
@@ -3819,8 +3814,9 @@
 
   intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
   intptr_t space_left =
-      Min(heap()->OldGenPromotionLimit(old_space_size),
-          heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
+      Min(heap()->OldGenLimit(old_space_size, Heap::kMinPromotionLimit),
+          heap()->OldGenLimit(old_space_size, Heap::kMinAllocationLimit)) -
+      old_space_size;
 
   while (it.has_next()) {
     Page* p = it.next();
diff --git a/src/messages.js b/src/messages.js
index 2a00ba8..d91c251 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -921,17 +921,25 @@
   var fileLocation = "";
   if (this.isNative()) {
     fileLocation = "native";
-  } else if (this.isEval()) {
-    fileName = this.getScriptNameOrSourceURL();
-    if (!fileName) {
-      fileLocation = this.getEvalOrigin();
-    }
   } else {
-    fileName = this.getFileName();
-  }
+    if (this.isEval()) {
+      fileName = this.getScriptNameOrSourceURL();
+      if (!fileName) {
+        fileLocation = this.getEvalOrigin();
+        fileLocation += ", ";  // Expecting source position to follow.
+      }
+    } else {
+      fileName = this.getFileName();
+    }
 
-  if (fileName) {
-    fileLocation += fileName;
+    if (fileName) {
+      fileLocation += fileName;
+    } else {
+      // Source code does not originate from a file and is not native, but we
+      // can still get the source position inside the source string, e.g. in
+      // an eval string.
+      fileLocation += "<anonymous>";
+    }
     var lineNumber = this.getLineNumber();
     if (lineNumber != null) {
       fileLocation += ":" + lineNumber;
@@ -942,9 +950,6 @@
     }
   }
 
-  if (!fileLocation) {
-    fileLocation = "unknown source";
-  }
   var line = "";
   var functionName = this.getFunctionName();
   var addSuffix = true;
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index a7c2597..7ae84fa 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -87,6 +87,8 @@
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
   // Create a new closure from the given function info in new
   // space. Set the context to the current context in cp.
+  Counters* counters = masm->isolate()->counters();
+
   Label gc;
 
   // Pop the function info from the stack.
@@ -100,6 +102,8 @@
                         &gc,
                         TAG_OBJECT);
 
+  __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
+
   int map_index = (language_mode_ == CLASSIC_MODE)
       ? Context::FUNCTION_MAP_INDEX
       : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
@@ -108,24 +112,34 @@
   // as the map of the allocated object.
   __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
-  __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
-  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lw(t1, MemOperand(a2, Context::SlotOffset(map_index)));
+  __ sw(t1, FieldMemOperand(v0, HeapObject::kMapOffset));
 
   // Initialize the rest of the function. We don't have to update the
   // write barrier because the allocated object is in new space.
   __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
-  __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
-  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
   __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
   __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
-  __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
+  __ sw(t1, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
   __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
   __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
   __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
-  __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
+  // But first check if there is an optimized version for our context.
+  Label check_optimized;
+  Label install_unoptimized;
+  if (FLAG_cache_optimized_code) {
+    __ lw(a1,
+          FieldMemOperand(a3, SharedFunctionInfo::kOptimizedCodeMapOffset));
+    __ And(at, a1, a1);
+    __ Branch(&check_optimized, ne, at, Operand(zero_reg));
+  }
+  __ bind(&install_unoptimized);
+  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+  __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
   __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
   __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
 
@@ -133,6 +147,72 @@
   __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
   __ Ret();
 
+  __ bind(&check_optimized);
+
+  __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, t2, t3);
+
+  // a2 holds global context, a1 points to fixed array of 3-element entries
+  // (global context, optimized code, literals).
+  // The optimized code map must never be empty, so check the first elements.
+  Label install_optimized;
+  // Speculatively move code object into t0.
+  __ lw(t0, FieldMemOperand(a1, FixedArray::kHeaderSize + kPointerSize));
+  __ lw(t1, FieldMemOperand(a1, FixedArray::kHeaderSize));
+  __ Branch(&install_optimized, eq, a2, Operand(t1));
+
+  // Iterate through the rest of map backwards.  t0 holds an index as a Smi.
+  Label loop;
+  __ lw(t0, FieldMemOperand(a1, FixedArray::kLengthOffset));
+  __ bind(&loop);
+  // Do not double check first entry.
+
+  __ Branch(&install_unoptimized, eq, t0,
+            Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
+  __ Subu(t0, t0, Operand(
+      Smi::FromInt(SharedFunctionInfo::kEntryLength)));  // Skip an entry.
+  __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t1, t1, Operand(at));
+  __ lw(t1, MemOperand(t1));
+  __ Branch(&loop, ne, a2, Operand(t1));
+  // Hit: fetch the optimized code.
+  __ Addu(t1, a1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(at, t0, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t1, t1, Operand(at));
+  __ Addu(t1, t1, Operand(kPointerSize));
+  __ lw(t0, MemOperand(t1));
+
+  __ bind(&install_optimized);
+  __ IncrementCounter(counters->fast_new_closure_install_optimized(),
+                      1, t2, t3);
+
+  // TODO(fschneider): Idea: store proper code pointers in the map and either
+  // unmangle them on marking or do nothing as the whole map is discarded on
+  // major GC anyway.
+  __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sw(t0, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
+
+  // Now link a function into a list of optimized functions.
+  __ lw(t0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+  __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
+  // No need for write barrier as JSFunction (eax) is in the new space.
+
+  __ sw(v0, ContextOperand(a2, Context::OPTIMIZED_FUNCTIONS_LIST));
+  // Store JSFunction (eax) into edx before issuing write barrier as
+  // it clobbers all the registers passed.
+  __ mov(t0, v0);
+  __ RecordWriteContextSlot(
+      a2,
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+      t0,
+      a1,
+      kRAHasNotBeenSaved,
+      kDontSaveFPRegs);
+
+  // Return result. The argument function info has been popped already.
+  __ Ret();
+
   // Create a new closure through the slower runtime call.
   __ bind(&gc);
   __ LoadRoot(t0, Heap::kFalseValueRootIndex);
@@ -7380,6 +7460,8 @@
   { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
   // StoreArrayLiteralElementStub::Generate
   { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
+  // FastNewClosureStub::Generate
+  { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
   // Null termination.
   { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
 };
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 62f3155..b57d2d3 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -48,6 +48,10 @@
 
   if (!function->IsOptimized()) return;
 
+  // The optimized code is going to be patched, so we cannot use it
+  // any more.  Play safe and reset the whole cache.
+  function->shared()->ClearOptimizedCodeMap();
+
   // Get the optimized code.
   Code* code = function->code();
   Address code_start_address = code->instruction_start();
@@ -96,8 +100,19 @@
   // ignore all slots that might have been recorded on it.
   isolate->heap()->mark_compact_collector()->InvalidateCode(code);
 
-  // Set the code for the function to non-optimized version.
-  function->ReplaceCode(function->shared()->code());
+  // Iterate over all the functions which share the same code object
+  // and make them use unoptimized version.
+  Context* context = function->context()->global_context();
+  Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+  SharedFunctionInfo* shared = function->shared();
+  while (!element->IsUndefined()) {
+    JSFunction* func = JSFunction::cast(element);
+    // Grab element before code replacement as ReplaceCode alters the list.
+    element = func->next_function_link();
+    if (func->code() == code) {
+      func->ReplaceCode(shared->code());
+    }
+  }
 
   if (FLAG_trace_deopt) {
     PrintF("[forced deoptimization: ");
@@ -229,9 +244,9 @@
   unsigned node_id = iterator.Next();
   USE(node_id);
   ASSERT(node_id == ast_id);
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
-  USE(function);
-  ASSERT(function == function_);
+  int closure_id = iterator.Next();
+  USE(closure_id);
+  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
   unsigned height = iterator.Next();
   unsigned height_in_bytes = height * kPointerSize;
   USE(height_in_bytes);
@@ -342,8 +357,8 @@
   if (FLAG_trace_osr) {
     PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
            ok ? "finished" : "aborted",
-           reinterpret_cast<intptr_t>(function));
-    function->PrintName();
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
     PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
   }
 }
@@ -573,7 +588,15 @@
                                    int frame_index) {
   // Read the ast node id, function, and frame height for this output frame.
   int node_id = iterator->Next();
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  JSFunction* function;
+  if (frame_index != 0) {
+    function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  } else {
+    int closure_id = iterator->Next();
+    USE(closure_id);
+    ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+    function = function_;
+  }
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
   if (FLAG_trace_deopt) {
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 263656e..079e8b3 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -332,7 +332,7 @@
   }
   if (isolate()->IsDebuggerActive()) {
     // Detect debug break requests as soon as possible.
-    reset_value = 10;
+    reset_value = FLAG_interrupt_budget >> 4;
   }
   __ li(a2, Operand(profiling_counter_));
   __ li(a3, Operand(Smi::FromInt(reset_value)));
@@ -1614,7 +1614,7 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore(zone());
 
-  AccessorTable accessor_table(isolate()->zone());
+  AccessorTable accessor_table(zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 67dbe69..2a022b5 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -446,7 +446,10 @@
   int height = translation_size - environment->parameter_count();
 
   WriteTranslation(environment->outer(), translation);
-  int closure_id = DefineDeoptimizationLiteral(environment->closure());
+  int closure_id = *info()->closure() != *environment->closure()
+      ? DefineDeoptimizationLiteral(environment->closure())
+      : Translation::kSelfLiteralId;
+
   switch (environment->frame_type()) {
     case JS_FUNCTION:
       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -2847,7 +2850,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ LoadHeapObject(result, instr->hydrogen()->closure());
+  __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
 }
 
 
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index 32a696b..148273d 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -43,26 +43,25 @@
 
 class LCodeGen BASE_EMBEDDED {
  public:
-  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
-           Zone* zone)
-      : chunk_(chunk),
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : zone_(info->zone()),
+        chunk_(chunk),
         masm_(assembler),
         info_(info),
         current_block_(-1),
         current_instruction_(-1),
         instructions_(chunk->instructions()),
-        deoptimizations_(4, zone),
-        deopt_jump_table_(4, zone),
-        deoptimization_literals_(8, zone),
+        deoptimizations_(4, info->zone()),
+        deopt_jump_table_(4, info->zone()),
+        deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
-        translations_(zone),
-        deferred_(8, zone),
+        translations_(info->zone()),
+        deferred_(8, info->zone()),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
-        safepoints_(zone),
-        zone_(zone),
+        safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -354,6 +353,7 @@
 
   void EnsureSpaceForLazyDeopt();
 
+  Zone* zone_;
   LChunk* const chunk_;
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
@@ -376,8 +376,6 @@
   // itself is emitted at the end of the generated code.
   SafepointTableBuilder safepoints_;
 
-  Zone* zone_;
-
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 51b3a38..1a6bc21 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -4459,7 +4459,8 @@
                 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
   size_t offset = expected_kind * kPointerSize +
       FixedArrayBase::kHeaderSize;
-  Branch(no_map_match, ne, map_in_out, Operand(scratch));
+  lw(at, FieldMemOperand(scratch, offset));
+  Branch(no_map_match, ne, map_in_out, Operand(at));
 
   // Use the transitioned cached map.
   offset = transitioned_kind * kPointerSize +
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
index d3fff0d..5446f52 100644
--- a/src/mips/regexp-macro-assembler-mips.h
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -38,13 +38,7 @@
 namespace v8 {
 namespace internal {
 
-#ifdef V8_INTERPRETED_REGEXP
-class RegExpMacroAssemblerMIPS: public RegExpMacroAssembler {
- public:
-  RegExpMacroAssemblerMIPS();
-  virtual ~RegExpMacroAssemblerMIPS();
-};
-#else  // V8_INTERPRETED_REGEXP
+#ifndef V8_INTERPRETED_REGEXP
 class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
  public:
   RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone);
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index c7f0dcc..f71483a 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2012 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -1750,6 +1750,15 @@
 };
 
 
+FrameMirror.prototype.restart = function() {
+  var result = %LiveEditRestartFrame(this.break_id_, this.index_);
+  if (IS_UNDEFINED(result)) {
+    result = "Failed to find requested frame";
+  }
+  return result;
+};
+
+
 FrameMirror.prototype.toText = function(opt_locals) {
   var result = '';
   result += '#' + (this.index() <= 9 ? '0' : '') + this.index();
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index e426a58..275c8ac 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include <errno.h>
+#include <stdio.h>
 #ifdef COMPRESS_STARTUP_DATA_BZ2
 #include <bzlib.h>
 #endif
@@ -33,6 +35,7 @@
 #include "v8.h"
 
 #include "bootstrapper.h"
+#include "flags.h"
 #include "natives.h"
 #include "platform.h"
 #include "serialize.h"
@@ -308,6 +311,62 @@
             "\nException thrown while compiling natives - see above.\n\n");
     exit(1);
   }
+  if (i::FLAG_extra_code != NULL) {
+    context->Enter();
+    // Capture 100 frames if anything happens.
+    V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
+    HandleScope scope;
+    const char* name = i::FLAG_extra_code;
+    FILE* file = i::OS::FOpen(name, "rb");
+    if (file == NULL) {
+      fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno);
+      exit(1);
+    }
+
+    fseek(file, 0, SEEK_END);
+    int size = ftell(file);
+    rewind(file);
+
+    char* chars = new char[size + 1];
+    chars[size] = '\0';
+    for (int i = 0; i < size;) {
+      int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
+      if (read < 0) {
+        fprintf(stderr, "Failed to read '%s': errno %d\n", name, errno);
+        exit(1);
+      }
+      i += read;
+    }
+    fclose(file);
+    Local<String> source = String::New(chars);
+    TryCatch try_catch;
+    Local<Script> script = Script::Compile(source);
+    if (try_catch.HasCaught()) {
+      fprintf(stderr, "Failure compiling '%s' (see above)\n", name);
+      exit(1);
+    }
+    script->Run();
+    if (try_catch.HasCaught()) {
+      fprintf(stderr, "Failure running '%s'\n", name);
+      Local<Message> message = try_catch.Message();
+      Local<String> message_string = message->Get();
+      Local<String> message_line = message->GetSourceLine();
+      int len = 2 + message_string->Utf8Length() + message_line->Utf8Length();
+      char* buf = new char(len);
+      message_string->WriteUtf8(buf);
+      fprintf(stderr, "%s at line %d\n", buf, message->GetLineNumber());
+      message_line->WriteUtf8(buf);
+      fprintf(stderr, "%s\n", buf);
+      int from = message->GetStartColumn();
+      int to = message->GetEndColumn();
+      int i;
+      for (i = 0; i < from; i++) fprintf(stderr, " ");
+      for ( ; i <= to; i++) fprintf(stderr, "^");
+      fprintf(stderr, "\n");
+      exit(1);
+    }
+    context->Exit();
+  }
   // Make sure all builtin scripts are cached.
   { HandleScope scope;
     for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 5aac503..cefa3f8 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -502,6 +502,7 @@
   CHECK(IsSharedFunctionInfo());
   VerifyObjectField(kNameOffset);
   VerifyObjectField(kCodeOffset);
+  VerifyObjectField(kOptimizedCodeMapOffset);
   VerifyObjectField(kScopeInfoOffset);
   VerifyObjectField(kInstanceClassNameOffset);
   VerifyObjectField(kFunctionDataOffset);
@@ -921,6 +922,11 @@
 
 
 bool DescriptorArray::IsConsistentWithBackPointers(Map* current_map) {
+  Map* elements_transition = elements_transition_map();
+  if (elements_transition != NULL &&
+      !CheckOneBackPointer(current_map, elements_transition)) {
+    return false;
+  }
   for (int i = 0; i < number_of_descriptors(); ++i) {
     switch (GetType(i)) {
       case MAP_TRANSITION:
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 4f66af2..2cdfad6 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1874,15 +1874,14 @@
 
 
 bool DescriptorArray::IsEmpty() {
-  ASSERT(this->IsSmi() ||
-         this->MayContainTransitions() ||
+  ASSERT(length() >= kFirstIndex ||
          this == HEAP->empty_descriptor_array());
-  return this->IsSmi() || length() < kFirstIndex;
+  return length() < kFirstIndex;
 }
 
 
 bool DescriptorArray::MayContainTransitions() {
-  return length() >= kTransitionsIndex;
+  return !IsEmpty();
 }
 
 
@@ -1958,6 +1957,11 @@
 }
 
 
+void DescriptorArray::ClearElementsTransition() {
+  WRITE_FIELD(this, kTransitionsOffset, Smi::FromInt(0));
+}
+
+
 Object** DescriptorArray::GetKeySlot(int descriptor_number) {
   ASSERT(descriptor_number < number_of_descriptors());
   return HeapObject::RawField(
@@ -3648,6 +3652,8 @@
 #endif
 
 ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
+ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
+                 kOptimizedCodeMapOffset)
 ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
 ACCESSORS(SharedFunctionInfo, initial_map, Object, kInitialMapOffset)
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
@@ -3682,6 +3688,10 @@
                kAllowLazyCompilation)
 BOOL_ACCESSORS(SharedFunctionInfo,
                compiler_hints,
+               allows_lazy_compilation_without_context,
+               kAllowLazyCompilationWithoutContext)
+BOOL_ACCESSORS(SharedFunctionInfo,
+               compiler_hints,
                uses_arguments,
                kUsesArguments)
 BOOL_ACCESSORS(SharedFunctionInfo,
@@ -3859,6 +3869,17 @@
                kDontOptimize)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
 
+void SharedFunctionInfo::BeforeVisitingPointers() {
+  if (IsInobjectSlackTrackingInProgress()) DetachInitialMap();
+
+  // Flush optimized code map on major GC.
+  // Note: we may experiment with rebuilding it or retaining entries
+  // which should survive as we iterate through optimized functions
+  // anyway.
+  set_optimized_code_map(Smi::FromInt(0));
+}
+
+
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
 
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index b886168..6714cdc 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -761,8 +761,10 @@
   instance_class_name()->Print(out);
   PrintF(out, "\n - code = ");
   code()->ShortPrint(out);
-  PrintF(out, "\n - source code = ");
-  GetSourceCode()->ShortPrint(out);
+  if (HasSourceCode()) {
+    PrintF(out, "\n - source code = ");
+    GetSourceCode()->ShortPrint(out);
+  }
   // Script files are often large, hard to read.
   // PrintF(out, "\n - script =");
   // script()->Print(out);
diff --git a/src/objects.cc b/src/objects.cc
index 712366f..9dbe849 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -763,7 +763,6 @@
 
 bool Object::SameValue(Object* other) {
   if (other == this) return true;
-  if (!IsHeapObject() || !other->IsHeapObject()) return false;
 
   // The object is either a number, a string, an odd-ball,
   // a real JS object, or a Harmony proxy.
@@ -7353,6 +7352,11 @@
   DescriptorArray* d = DescriptorArray::cast(
       *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
   if (d->IsEmpty()) return;
+  Map* elements_transition = d->elements_transition_map();
+  if (elements_transition != NULL &&
+      ClearBackPointer(heap, elements_transition)) {
+    d->ClearElementsTransition();
+  }
   Smi* NullDescriptorDetails =
     PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
   for (int i = 0; i < d->number_of_descriptors(); ++i) {
@@ -7388,7 +7392,7 @@
             } else {
               keep_entry = true;
             }
-          } else if (!getter->IsTheHole()) {
+          } else if (!setter->IsTheHole()) {
             keep_entry = true;
           }
         } else {
@@ -7468,12 +7472,6 @@
 }
 
 
-bool SharedFunctionInfo::EnsureCompiled(Handle<SharedFunctionInfo> shared,
-                                        ClearExceptionFlag flag) {
-  return shared->is_compiled() || CompileLazy(shared, flag);
-}
-
-
 static bool CompileLazyHelper(CompilationInfo* info,
                               ClearExceptionFlag flag) {
   // Compile the source information to a code object.
@@ -7490,11 +7488,60 @@
 
 bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
                                      ClearExceptionFlag flag) {
-  CompilationInfo info(shared);
+  ASSERT(shared->allows_lazy_compilation_without_context());
+  CompilationInfoWithZone info(shared);
   return CompileLazyHelper(&info, flag);
 }
 
 
+void SharedFunctionInfo::ClearOptimizedCodeMap() {
+  set_optimized_code_map(Smi::FromInt(0));
+}
+
+
+void SharedFunctionInfo::AddToOptimizedCodeMap(
+    Handle<SharedFunctionInfo> shared,
+    Handle<Context> global_context,
+    Handle<Code> code,
+    Handle<FixedArray> literals) {
+  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+  ASSERT(global_context->IsGlobalContext());
+  STATIC_ASSERT(kEntryLength == 3);
+  Object* value = shared->optimized_code_map();
+  Handle<FixedArray> new_code_map;
+  if (value->IsSmi()) {
+    // No optimized code map.
+    ASSERT_EQ(0, Smi::cast(value)->value());
+    // Crate 3 entries per context {context, code, literals}.
+    new_code_map = FACTORY->NewFixedArray(kEntryLength);
+    new_code_map->set(0, *global_context);
+    new_code_map->set(1, *code);
+    new_code_map->set(2, *literals);
+  } else {
+    // Copy old map and append one new entry.
+    Handle<FixedArray> old_code_map(FixedArray::cast(value));
+    ASSERT_EQ(-1, shared->SearchOptimizedCodeMap(*global_context));
+    int old_length = old_code_map->length();
+    int new_length = old_length + kEntryLength;
+    new_code_map = FACTORY->NewFixedArray(new_length);
+    old_code_map->CopyTo(0, *new_code_map, 0, old_length);
+    new_code_map->set(old_length, *global_context);
+    new_code_map->set(old_length + 1, *code);
+    new_code_map->set(old_length + 2, *literals);
+  }
+#ifdef DEBUG
+  for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
+    ASSERT(new_code_map->get(i)->IsGlobalContext());
+    ASSERT(new_code_map->get(i + 1)->IsCode());
+    ASSERT(Code::cast(new_code_map->get(i + 1))->kind() ==
+           Code::OPTIMIZED_FUNCTION);
+    ASSERT(new_code_map->get(i + 2)->IsFixedArray());
+  }
+#endif
+  shared->set_optimized_code_map(*new_code_map);
+}
+
+
 bool JSFunction::CompileLazy(Handle<JSFunction> function,
                              ClearExceptionFlag flag) {
   bool result = true;
@@ -7502,7 +7549,8 @@
     function->ReplaceCode(function->shared()->code());
     function->shared()->set_code_age(0);
   } else {
-    CompilationInfo info(function);
+    ASSERT(function->shared()->allows_lazy_compilation());
+    CompilationInfoWithZone info(function);
     result = CompileLazyHelper(&info, flag);
     ASSERT(!result || function->is_compiled());
   }
@@ -7513,12 +7561,18 @@
 bool JSFunction::CompileOptimized(Handle<JSFunction> function,
                                   int osr_ast_id,
                                   ClearExceptionFlag flag) {
-  CompilationInfo info(function);
+  CompilationInfoWithZone info(function);
   info.SetOptimizing(osr_ast_id);
   return CompileLazyHelper(&info, flag);
 }
 
 
+bool JSFunction::EnsureCompiled(Handle<JSFunction> function,
+                                ClearExceptionFlag flag) {
+  return function->is_compiled() || CompileLazy(function, flag);
+}
+
+
 bool JSFunction::IsInlineable() {
   if (IsBuiltin()) return false;
   SharedFunctionInfo* shared_info = shared();
@@ -8039,6 +8093,22 @@
 }
 
 
+int SharedFunctionInfo::SearchOptimizedCodeMap(Context* global_context) {
+  ASSERT(global_context->IsGlobalContext());
+  Object* value = optimized_code_map();
+  if (!value->IsSmi()) {
+    FixedArray* optimized_code_map = FixedArray::cast(value);
+    int length = optimized_code_map->length();
+    for (int i = 0; i < length; i += 3) {
+      if (optimized_code_map->get(i) == global_context) {
+        return i + 1;
+      }
+    }
+  }
+  return -1;
+}
+
+
 void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
   v->VisitSharedFunctionInfo(this);
   SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
@@ -8330,11 +8400,14 @@
         case Translation::JS_FRAME: {
           int ast_id = iterator.Next();
           int function_id = iterator.Next();
-          JSFunction* function =
-              JSFunction::cast(LiteralArray()->get(function_id));
           unsigned height = iterator.Next();
           PrintF(out, "{ast_id=%d, function=", ast_id);
-          function->PrintName(out);
+          if (function_id != Translation::kSelfLiteralId) {
+            Object* function = LiteralArray()->get(function_id);
+            JSFunction::cast(function)->PrintName(out);
+          } else {
+            PrintF(out, "<self>");
+          }
           PrintF(out, ", height=%u}", height);
           break;
         }
diff --git a/src/objects.h b/src/objects.h
index 88e3243..692df44 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2438,6 +2438,7 @@
   inline bool MayContainTransitions();
 
   DECL_ACCESSORS(elements_transition_map, Map)
+  inline void ClearElementsTransition();
 
   // Returns the number of descriptors in the array.
   int number_of_descriptors() {
@@ -5217,6 +5218,25 @@
   // [code]: Function code.
   DECL_ACCESSORS(code, Code)
 
+  // [optimized_code_map]: Map from global context to optimized code
+  // and a shared literals array or Smi 0 if none.
+  DECL_ACCESSORS(optimized_code_map, Object)
+
+  // Returns index i of the entry with the specified context. At position
+  // i - 1 is the context, position i the code, and i + 1 the literals array.
+  // Returns -1 when no matching entry is found.
+  int SearchOptimizedCodeMap(Context* global_context);
+
+  // Clear optimized code map.
+  void ClearOptimizedCodeMap();
+
+  // Add a new entry to the optimized code map.
+  static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
+                                    Handle<Context> global_context,
+                                    Handle<Code> code,
+                                    Handle<FixedArray> literals);
+  static const int kEntryLength = 3;
+
   // [scope_info]: Scope info.
   DECL_ACCESSORS(scope_info, ScopeInfo)
 
@@ -5324,6 +5344,10 @@
   // IsInobjectSlackTrackingInProgress is false after this call.
   void CompleteInobjectSlackTracking();
 
+  // Invoked before pointers in SharedFunctionInfo are being marked.
+  // Also clears the optimized code map.
+  inline void BeforeVisitingPointers();
+
   // Clears the initial_map before the GC marking phase to ensure the reference
   // is weak. IsInobjectSlackTrackingInProgress is false after this call.
   void DetachInitialMap();
@@ -5436,6 +5460,12 @@
   // when doing GC if we expect that the function will no longer be used.
   DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation)
 
+  // Indicates if this function can be lazy compiled without a context.
+  // This is used to determine if we can force compilation without reaching
+  // the function through program execution but through other means (e.g. heap
+  // iteration by the debugger).
+  DECL_BOOLEAN_ACCESSORS(allows_lazy_compilation_without_context)
+
   // Indicates how many full GCs this function has survived with assigned
   // code object. Used to determine when it is relatively safe to flush
   // this code object and replace it with lazy compilation stub.
@@ -5582,10 +5612,9 @@
 
   void ResetForNewContext(int new_ic_age);
 
-  // Helpers to compile the shared code.  Returns true on success, false on
-  // failure (e.g., stack overflow during compilation).
-  static bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
-                             ClearExceptionFlag flag);
+  // Helper to compile the shared code.  Returns true on success, false on
+  // failure (e.g., stack overflow during compilation). This is only used by
+  // the debugger, it is not possible to compile without a context otherwise.
   static bool CompileLazy(Handle<SharedFunctionInfo> shared,
                           ClearExceptionFlag flag);
 
@@ -5601,7 +5630,8 @@
   // Pointer fields.
   static const int kNameOffset = HeapObject::kHeaderSize;
   static const int kCodeOffset = kNameOffset + kPointerSize;
-  static const int kScopeInfoOffset = kCodeOffset + kPointerSize;
+  static const int kOptimizedCodeMapOffset = kCodeOffset + kPointerSize;
+  static const int kScopeInfoOffset = kOptimizedCodeMapOffset + kPointerSize;
   static const int kConstructStubOffset = kScopeInfoOffset + kPointerSize;
   static const int kInstanceClassNameOffset =
       kConstructStubOffset + kPointerSize;
@@ -5719,6 +5749,7 @@
   enum CompilerHints {
     kHasOnlySimpleThisPropertyAssignments,
     kAllowLazyCompilation,
+    kAllowLazyCompilationWithoutContext,
     kLiveObjectsMayExist,
     kCodeAgeShift,
     kOptimizationDisabled = kCodeAgeShift + kCodeAgeSize,
@@ -5867,6 +5898,8 @@
 
   // Helpers to compile this function.  Returns true on success, false on
   // failure (e.g., stack overflow during compilation).
+  static bool EnsureCompiled(Handle<JSFunction> function,
+                             ClearExceptionFlag flag);
   static bool CompileLazy(Handle<JSFunction> function,
                           ClearExceptionFlag flag);
   static bool CompileOptimized(Handle<JSFunction> function,
diff --git a/src/parser.cc b/src/parser.cc
index 7c51b69..6437241 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -493,7 +493,7 @@
       outer_function_state_(parser->current_function_state_),
       outer_scope_(parser->top_scope_),
       saved_ast_node_id_(isolate->ast_node_id()),
-      factory_(isolate) {
+      factory_(isolate, parser->zone()) {
   parser->top_scope_ = scope;
   parser->current_function_state_ = this;
   isolate->set_ast_node_id(AstNode::kDeclarationsId + 1);
@@ -532,14 +532,13 @@
 // ----------------------------------------------------------------------------
 // Implementation of Parser
 
-Parser::Parser(Handle<Script> script,
+Parser::Parser(CompilationInfo* info,
                int parser_flags,
                v8::Extension* extension,
-               ScriptDataImpl* pre_data,
-               Zone* zone)
-    : isolate_(script->GetIsolate()),
-      symbol_cache_(pre_data ? pre_data->symbol_count() : 0, zone),
-      script_(script),
+               ScriptDataImpl* pre_data)
+    : isolate_(info->isolate()),
+      symbol_cache_(pre_data ? pre_data->symbol_count() : 0, info->zone()),
+      script_(info->script()),
       scanner_(isolate_->unicode_cache()),
       reusable_preparser_(NULL),
       top_scope_(NULL),
@@ -553,7 +552,9 @@
       allow_modules_((parser_flags & kAllowModules) != 0),
       stack_overflow_(false),
       parenthesized_function_(false),
-      zone_(zone) {
+      zone_(info->zone()),
+      info_(info) {
+  ASSERT(!script_.is_null());
   isolate_->set_ast_node_id(0);
   if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) {
     scanner().SetHarmonyScoping(true);
@@ -564,8 +565,8 @@
 }
 
 
-FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
-  ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
+FunctionLiteral* Parser::ParseProgram() {
+  ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
 
   HistogramTimerScope timer(isolate()->counters()->parse());
   Handle<String> source(String::cast(script_->source()));
@@ -581,11 +582,11 @@
     ExternalTwoByteStringUtf16CharacterStream stream(
         Handle<ExternalTwoByteString>::cast(source), 0, source->length());
     scanner_.Initialize(&stream);
-    return DoParseProgram(info, source, &zone_scope);
+    return DoParseProgram(info(), source, &zone_scope);
   } else {
     GenericStringUtf16CharacterStream stream(source, 0, source->length());
     scanner_.Initialize(&stream);
-    return DoParseProgram(info, source, &zone_scope);
+    return DoParseProgram(info(), source, &zone_scope);
   }
 }
 
@@ -662,13 +663,13 @@
 }
 
 
-FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
-  ZoneScope zone_scope(isolate(), DONT_DELETE_ON_EXIT);
+FunctionLiteral* Parser::ParseLazy() {
+  ZoneScope zone_scope(zone(), DONT_DELETE_ON_EXIT);
   HistogramTimerScope timer(isolate()->counters()->parse_lazy());
   Handle<String> source(String::cast(script_->source()));
   isolate()->counters()->total_parse_size()->Increment(source->length());
 
-  Handle<SharedFunctionInfo> shared_info = info->shared_info();
+  Handle<SharedFunctionInfo> shared_info = info()->shared_info();
   // Initialize parser state.
   source->TryFlatten();
   if (source->IsExternalTwoByteString()) {
@@ -676,22 +677,21 @@
         Handle<ExternalTwoByteString>::cast(source),
         shared_info->start_position(),
         shared_info->end_position());
-    FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
+    FunctionLiteral* result = ParseLazy(&stream, &zone_scope);
     return result;
   } else {
     GenericStringUtf16CharacterStream stream(source,
                                              shared_info->start_position(),
                                              shared_info->end_position());
-    FunctionLiteral* result = ParseLazy(info, &stream, &zone_scope);
+    FunctionLiteral* result = ParseLazy(&stream, &zone_scope);
     return result;
   }
 }
 
 
-FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
-                                   Utf16CharacterStream* source,
+FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
                                    ZoneScope* zone_scope) {
-  Handle<SharedFunctionInfo> shared_info = info->shared_info();
+  Handle<SharedFunctionInfo> shared_info = info()->shared_info();
   scanner_.Initialize(source);
   ASSERT(top_scope_ == NULL);
   ASSERT(target_stack_ == NULL);
@@ -708,16 +708,16 @@
   {
     // Parse the function literal.
     Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
-    info->SetGlobalScope(scope);
-    if (!info->closure().is_null()) {
-      scope = Scope::DeserializeScopeChain(info->closure()->context(), scope,
+    info()->SetGlobalScope(scope);
+    if (!info()->closure().is_null()) {
+      scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
                                            zone());
     }
     FunctionState function_state(this, scope, isolate());
-    ASSERT(scope->language_mode() != STRICT_MODE || !info->is_classic_mode());
+    ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
     ASSERT(scope->language_mode() != EXTENDED_MODE ||
-           info->is_extended_mode());
-    ASSERT(info->language_mode() == shared_info->language_mode());
+           info()->is_extended_mode());
+    ASSERT(info()->language_mode() == shared_info->language_mode());
     scope->SetLanguageMode(shared_info->language_mode());
     FunctionLiteral::Type type = shared_info->is_expression()
         ? (shared_info->is_anonymous()
@@ -1251,7 +1251,7 @@
   //    'module' Identifier Module
 
   // Create new block with one expected declaration.
-  Block* block = factory()->NewBlock(NULL, 1, true, zone());
+  Block* block = factory()->NewBlock(NULL, 1, true);
   Handle<String> name = ParseIdentifier(CHECK_OK);
 
 #ifdef DEBUG
@@ -1314,7 +1314,7 @@
   //    '{' ModuleElement '}'
 
   // Construct block expecting 16 statements.
-  Block* body = factory()->NewBlock(NULL, 16, false, zone());
+  Block* body = factory()->NewBlock(NULL, 16, false);
 #ifdef DEBUG
   if (FLAG_print_interface_details) PrintF("# Literal ");
 #endif
@@ -1468,7 +1468,7 @@
 
   // Generate a separate declaration for each identifier.
   // TODO(ES6): once we implement destructuring, make that one declaration.
-  Block* block = factory()->NewBlock(NULL, 1, true, zone());
+  Block* block = factory()->NewBlock(NULL, 1, true);
   for (int i = 0; i < names.length(); ++i) {
 #ifdef DEBUG
     if (FLAG_print_interface_details)
@@ -1683,7 +1683,7 @@
       // one must take great care not to treat it as a
       // fall-through. It is much easier just to wrap the entire
       // try-statement in a statement block and put the labels there
-      Block* result = factory()->NewBlock(labels, 1, false, zone());
+      Block* result = factory()->NewBlock(labels, 1, false);
       Target target(&this->target_stack_, result);
       TryStatement* statement = ParseTryStatement(CHECK_OK);
       if (statement) {
@@ -1996,7 +1996,7 @@
   // (ECMA-262, 3rd, 12.2)
   //
   // Construct block expecting 16 statements.
-  Block* result = factory()->NewBlock(labels, 16, false, zone());
+  Block* result = factory()->NewBlock(labels, 16, false);
   Target target(&this->target_stack_, result);
   Expect(Token::LBRACE, CHECK_OK);
   InitializationBlockFinder block_finder(top_scope_, target_stack_);
@@ -2019,7 +2019,7 @@
   //   '{' BlockElement* '}'
 
   // Construct block expecting 16 statements.
-  Block* body = factory()->NewBlock(labels, 16, false, zone());
+  Block* body = factory()->NewBlock(labels, 16, false);
   Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE);
 
   // Parse the statements and collect escaping labels.
@@ -2176,7 +2176,7 @@
   // is inside an initializer block, it is ignored.
   //
   // Create new block with one expected declaration.
-  Block* block = factory()->NewBlock(NULL, 1, true, zone());
+  Block* block = factory()->NewBlock(NULL, 1, true);
   int nvars = 0;  // the number of variables declared
   Handle<String> name;
   do {
@@ -2787,7 +2787,7 @@
     TryCatchStatement* statement = factory()->NewTryCatchStatement(
         index, try_block, catch_scope, catch_variable, catch_block);
     statement->set_escaping_targets(try_collector.targets());
-    try_block = factory()->NewBlock(NULL, 1, false, zone());
+    try_block = factory()->NewBlock(NULL, 1, false);
     try_block->AddStatement(statement, zone());
     catch_block = NULL;  // Clear to indicate it's been handled.
   }
@@ -2893,7 +2893,7 @@
 
         Statement* body = ParseStatement(NULL, CHECK_OK);
         loop->Initialize(each, enumerable, body);
-        Block* result = factory()->NewBlock(NULL, 2, false, zone());
+        Block* result = factory()->NewBlock(NULL, 2, false);
         result->AddStatement(variable_statement, zone());
         result->AddStatement(loop, zone());
         top_scope_ = saved_scope;
@@ -2939,7 +2939,7 @@
         Expect(Token::RPAREN, CHECK_OK);
 
         Statement* body = ParseStatement(NULL, CHECK_OK);
-        Block* body_block = factory()->NewBlock(NULL, 3, false, zone());
+        Block* body_block = factory()->NewBlock(NULL, 3, false);
         Assignment* assignment = factory()->NewAssignment(
             Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition);
         Statement* assignment_statement =
@@ -3028,7 +3028,7 @@
     //     for (; c; n) b
     //   }
     ASSERT(init != NULL);
-    Block* result = factory()->NewBlock(NULL, 2, false, zone());
+    Block* result = factory()->NewBlock(NULL, 2, false);
     result->AddStatement(init, zone());
     result->AddStatement(loop, zone());
     result->set_scope(for_scope);
@@ -4521,7 +4521,6 @@
     // The heuristics are:
     // - It must not have been prohibited by the caller to Parse (some callers
     //   need a full AST).
-    // - The outer scope must be trivial (only global variables in scope).
     // - The function mustn't be a function expression with an open parenthesis
     //   before; we consider that a hint that the function will be called
     //   immediately, and it would be a waste of time to make it lazily
@@ -4529,8 +4528,6 @@
     // These are all things we can know at this point, without looking at the
     // function itself.
     bool is_lazily_compiled = (mode() == PARSE_LAZILY &&
-                               top_scope_->outer_scope()->is_global_scope() &&
-                               top_scope_->HasTrivialOuterContext() &&
                                !parenthesized_function_);
     parenthesized_function_ = false;  // The bit was set for this function only.
 
@@ -5069,8 +5066,10 @@
 
 RegExpParser::RegExpParser(FlatStringReader* in,
                            Handle<String>* error,
-                           bool multiline)
+                           bool multiline,
+                           Zone* zone)
     : isolate_(Isolate::Current()),
+      zone_(zone),
       error_(error),
       captures_(NULL),
       in_(in),
@@ -5101,7 +5100,7 @@
     StackLimitCheck check(isolate());
     if (check.HasOverflowed()) {
       ReportError(CStrVector(Isolate::kStackOverflowMessage));
-    } else if (isolate()->zone()->excess_allocation()) {
+    } else if (zone()->excess_allocation()) {
       ReportError(CStrVector("Regular expression too large"));
     } else {
       current_ = in()->Get(next_pos_);
@@ -6003,9 +6002,10 @@
 
 bool RegExpParser::ParseRegExp(FlatStringReader* input,
                                bool multiline,
-                               RegExpCompileData* result) {
+                               RegExpCompileData* result,
+                               Zone* zone) {
   ASSERT(result != NULL);
-  RegExpParser parser(input, &result->error, multiline);
+  RegExpParser parser(input, &result->error, multiline, zone);
   RegExpTree* tree = parser.ParsePattern();
   if (parser.failed()) {
     ASSERT(tree == NULL);
@@ -6041,16 +6041,15 @@
   }
   if (info->is_lazy()) {
     ASSERT(!info->is_eval());
-    Parser parser(script, parsing_flags, NULL, NULL, info->isolate()->zone());
+    Parser parser(info, parsing_flags, NULL, NULL);
     if (info->shared_info()->is_function()) {
-      result = parser.ParseLazy(info);
+      result = parser.ParseLazy();
     } else {
-      result = parser.ParseProgram(info);
+      result = parser.ParseProgram();
     }
   } else {
     ScriptDataImpl* pre_data = info->pre_parse_data();
-    Parser parser(script, parsing_flags, info->extension(), pre_data,
-                  info->isolate()->zone());
+    Parser parser(info, parsing_flags, info->extension(), pre_data);
     if (pre_data != NULL && pre_data->has_error()) {
       Scanner::Location loc = pre_data->MessageLocation();
       const char* message = pre_data->BuildMessage();
@@ -6063,7 +6062,7 @@
       DeleteArray(args.start());
       ASSERT(info->isolate()->has_pending_exception());
     } else {
-      result = parser.ParseProgram(info);
+      result = parser.ParseProgram();
     }
   }
   info->SetFunction(result);
diff --git a/src/parser.h b/src/parser.h
index 773d59a..52d3d03 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -306,11 +306,13 @@
  public:
   RegExpParser(FlatStringReader* in,
                Handle<String>* error,
-               bool multiline_mode);
+               bool multiline_mode,
+               Zone* zone);
 
   static bool ParseRegExp(FlatStringReader* input,
                           bool multiline,
-                          RegExpCompileData* result);
+                          RegExpCompileData* result,
+                          Zone* zone);
 
   RegExpTree* ParsePattern();
   RegExpTree* ParseDisjunction();
@@ -398,7 +400,7 @@
   };
 
   Isolate* isolate() { return isolate_; }
-  Zone* zone() const { return isolate_->zone(); }
+  Zone* zone() const { return zone_; }
 
   uc32 current() { return current_; }
   bool has_more() { return has_more_; }
@@ -408,6 +410,7 @@
   void ScanForCaptures();
 
   Isolate* isolate_;
+  Zone* zone_;
   Handle<String>* error_;
   ZoneList<RegExpCapture*>* captures_;
   FlatStringReader* in_;
@@ -431,19 +434,18 @@
 
 class Parser {
  public:
-  Parser(Handle<Script> script,
+  Parser(CompilationInfo* info,
          int parsing_flags,  // Combination of ParsingFlags
          v8::Extension* extension,
-         ScriptDataImpl* pre_data,
-         Zone* zone);
+         ScriptDataImpl* pre_data);
   virtual ~Parser() {
     delete reusable_preparser_;
     reusable_preparser_ = NULL;
   }
 
   // Returns NULL if parsing failed.
-  FunctionLiteral* ParseProgram(CompilationInfo* info);
-  FunctionLiteral* ParseLazy(CompilationInfo* info);
+  FunctionLiteral* ParseProgram();
+  FunctionLiteral* ParseLazy();
 
   void ReportMessageAt(Scanner::Location loc,
                        const char* message,
@@ -543,12 +545,12 @@
 
 
 
-  FunctionLiteral* ParseLazy(CompilationInfo* info,
-                             Utf16CharacterStream* source,
+  FunctionLiteral* ParseLazy(Utf16CharacterStream* source,
                              ZoneScope* zone_scope);
 
   Isolate* isolate() { return isolate_; }
   Zone* zone() const { return zone_; }
+  CompilationInfo* info() const { return info_; }
 
   // Called by ParseProgram after setting up the scanner.
   FunctionLiteral* DoParseProgram(CompilationInfo* info,
@@ -837,6 +839,7 @@
   bool parenthesized_function_;
 
   Zone* zone_;
+  CompilationInfo* info_;
   friend class BlockState;
   friend class FunctionState;
 };
diff --git a/src/regexp-macro-assembler-irregexp.cc b/src/regexp-macro-assembler-irregexp.cc
index d2cd22e..16766ca 100644
--- a/src/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp-macro-assembler-irregexp.cc
@@ -38,8 +38,10 @@
 
 #ifdef V8_INTERPRETED_REGEXP
 
-RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer)
-    : buffer_(buffer),
+RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Vector<byte> buffer,
+                                                           Zone* zone)
+    : RegExpMacroAssembler(zone),
+      buffer_(buffer),
       pc_(0),
       own_buffer_(false),
       advance_current_end_(kInvalidPC) {
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index 7232342..4bc2980 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -48,7 +48,7 @@
   // for code generation and assumes its size to be buffer_size. If the buffer
   // is too small, a fatal error occurs. No deallocation of the buffer is done
   // upon destruction of the assembler.
-  explicit RegExpMacroAssemblerIrregexp(Vector<byte>);
+  RegExpMacroAssemblerIrregexp(Vector<byte>, Zone* zone);
   virtual ~RegExpMacroAssemblerIrregexp();
   // The byte-code interpreter checks on each push anyway.
   virtual int stack_limit_slack() { return 1; }
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 3fcd603..f791dc3 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -38,12 +38,12 @@
 
 class Processor: public AstVisitor {
  public:
-  explicit Processor(Variable* result)
+  Processor(Variable* result, Zone* zone)
       : result_(result),
         result_assigned_(false),
         is_set_(false),
         in_try_(false),
-        factory_(isolate()) { }
+        factory_(isolate(), zone) { }
 
   virtual ~Processor() { }
 
@@ -243,7 +243,7 @@
   if (!body->is_empty()) {
     Variable* result = scope->NewTemporary(
         info->isolate()->factory()->result_symbol());
-    Processor processor(result);
+    Processor processor(result, info->zone());
     processor.Process(body);
     if (processor.HasStackOverflow()) return false;
 
@@ -262,7 +262,7 @@
       Statement* result_statement =
           processor.factory()->NewReturnStatement(result_proxy);
       result_statement->set_statement_pos(position);
-      body->Add(result_statement, info->isolate()->zone());
+      body->Add(result_statement, info->zone());
     }
   }
 
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 003b882..cdbc77a 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -218,7 +218,10 @@
   for (int i = 0; i < kSamplerWindowSize; i++) {
     Object* sample = sampler_window_[i];
     if (sample != NULL) {
-      if (function == sample) {
+      bool fits = FLAG_lookup_sample_by_shared
+          ? (function->shared() == JSFunction::cast(sample)->shared())
+          : (function == JSFunction::cast(sample));
+      if (fits) {
         weight += sampler_window_weight_[i];
       }
     }
diff --git a/src/runtime.cc b/src/runtime.cc
index 9e38949..12b753b 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -754,7 +754,7 @@
   Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
   table = ObjectHashSetAdd(table, key);
   holder->set_table(*table);
-  return isolate->heap()->undefined_symbol();
+  return isolate->heap()->undefined_value();
 }
 
 
@@ -776,7 +776,7 @@
   Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
   table = ObjectHashSetRemove(table, key);
   holder->set_table(*table);
-  return isolate->heap()->undefined_symbol();
+  return isolate->heap()->undefined_value();
 }
 
 
@@ -808,7 +808,7 @@
   Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
   Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
   holder->set_table(*new_table);
-  return *value;
+  return isolate->heap()->undefined_value();
 }
 
 
@@ -842,7 +842,7 @@
   Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
   Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
   weakmap->set_table(*new_table);
-  return *value;
+  return isolate->heap()->undefined_value();
 }
 
 
@@ -1123,11 +1123,13 @@
     elms->set(IS_ACCESSOR_INDEX, heap->true_value());
 
     AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
-    if (CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
-      elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
+    Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
+    if (!getter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
+      elms->set(GETTER_INDEX, getter);
     }
-    if (CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
-      elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
+    Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
+    if (!setter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
+      elms->set(SETTER_INDEX, setter);
     }
   } else {
     elms->set(IS_ACCESSOR_INDEX, heap->false_value());
@@ -1188,7 +1190,8 @@
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
   CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
-  Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
+  Handle<Object> result =
+      RegExpImpl::Compile(re, pattern, flags, isolate->runtime_zone());
   if (result.is_null()) return Failure::Exception();
   return *result;
 }
@@ -1309,12 +1312,18 @@
     if (is_var || is_const) {
       // Lookup the property in the global object, and don't set the
       // value of the variable if the property is already there.
-      // Do the lookup locally only, see ES5 errata.
+      // Do the lookup locally only, see ES5 erratum.
       LookupResult lookup(isolate);
-      if (FLAG_es52_globals)
-        global->LocalLookup(*name, &lookup);
-      else
+      if (FLAG_es52_globals) {
+        Object* obj = *global;
+        do {
+          JSObject::cast(obj)->LocalLookup(*name, &lookup);
+          obj = obj->GetPrototype();
+        } while (!lookup.IsFound() && obj->IsJSObject() &&
+                 JSObject::cast(obj)->map()->is_hidden_prototype());
+      } else {
         global->Lookup(*name, &lookup);
+      }
       if (lookup.IsProperty()) {
         // We found an existing property. Unless it was an interceptor
         // that claims the property is absent, skip this declaration.
@@ -1748,8 +1757,7 @@
   Handle<Object> result = RegExpImpl::Exec(regexp,
                                            subject,
                                            index,
-                                           last_match_info,
-                                           isolate->zone());
+                                           last_match_info);
   if (result.is_null()) return Failure::Exception();
   return *result;
 }
@@ -2183,8 +2191,7 @@
   Handle<SharedFunctionInfo> target_shared(target->shared());
   Handle<SharedFunctionInfo> source_shared(source->shared());
 
-  if (!source->is_compiled() &&
-      !JSFunction::CompileLazy(source, KEEP_EXCEPTION)) {
+  if (!JSFunction::EnsureCompiled(source, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
 
@@ -2989,8 +2996,8 @@
   ASSERT(subject->IsFlat());
   ASSERT(replacement->IsFlat());
 
-  ZoneScope zone_space(isolate, DELETE_ON_EXIT);
-  ZoneList<int> indices(8, isolate->zone());
+  ZoneScope zone_space(isolate->runtime_zone(), DELETE_ON_EXIT);
+  ZoneList<int> indices(8, isolate->runtime_zone());
   ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
   String* pattern =
       String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
@@ -3083,8 +3090,7 @@
   Handle<Object> match = RegExpImpl::Exec(regexp_handle,
                                           subject_handle,
                                           0,
-                                          last_match_info_handle,
-                                          isolate->zone());
+                                          last_match_info_handle);
   if (match.is_null()) {
     return Failure::Exception();
   }
@@ -3095,8 +3101,8 @@
   int capture_count = regexp_handle->CaptureCount();
 
   // CompiledReplacement uses zone allocation.
-  ZoneScope zonescope(isolate, DELETE_ON_EXIT);
-  CompiledReplacement compiled_replacement(isolate->zone());
+  ZoneScope zonescope(zone, DELETE_ON_EXIT);
+  CompiledReplacement compiled_replacement(zone);
   compiled_replacement.Compile(replacement_handle,
                                capture_count,
                                length);
@@ -3185,8 +3191,7 @@
     match = RegExpImpl::Exec(regexp_handle,
                              subject_handle,
                              next,
-                             last_match_info_handle,
-                             isolate->zone());
+                             last_match_info_handle);
     if (match.is_null()) {
       return Failure::Exception();
     }
@@ -3242,8 +3247,7 @@
   Handle<Object> match = RegExpImpl::Exec(regexp_handle,
                                           subject_handle,
                                           0,
-                                          last_match_info_handle,
-                                          isolate->zone());
+                                          last_match_info_handle);
   if (match.is_null()) return Failure::Exception();
   if (match->IsNull()) return *subject_handle;
 
@@ -3317,8 +3321,7 @@
     match = RegExpImpl::Exec(regexp_handle,
                              subject_handle,
                              next,
-                             last_match_info_handle,
-                             isolate->zone());
+                             last_match_info_handle);
     if (match.is_null()) return Failure::Exception();
     if (match->IsNull()) break;
 
@@ -3394,7 +3397,7 @@
 
   ASSERT(last_match_info->HasFastObjectElements());
 
-  Zone* zone = isolate->zone();
+  Zone* zone = isolate->runtime_zone();
   if (replacement->length() == 0) {
     if (subject->HasOnlyAsciiChars()) {
       return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
@@ -3733,8 +3736,7 @@
   CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
   HandleScope handles;
 
-  Handle<Object> match = RegExpImpl::Exec(regexp, subject, 0, regexp_info,
-                                          isolate->zone());
+  Handle<Object> match = RegExpImpl::Exec(regexp, subject, 0, regexp_info);
 
   if (match.is_null()) {
     return Failure::Exception();
@@ -3744,8 +3746,8 @@
   }
   int length = subject->length();
 
-  Zone* zone = isolate->zone();
-  ZoneScope zone_space(isolate, DELETE_ON_EXIT);
+  Zone* zone = isolate->runtime_zone();
+  ZoneScope zone_space(zone, DELETE_ON_EXIT);
   ZoneList<int> offsets(8, zone);
   int start;
   int end;
@@ -3759,8 +3761,7 @@
     offsets.Add(start, zone);
     offsets.Add(end, zone);
     if (start == end) if (++end > length) break;
-    match = RegExpImpl::Exec(regexp, subject, end, regexp_info,
-                             isolate->zone());
+    match = RegExpImpl::Exec(regexp, subject, end, regexp_info);
     if (match.is_null()) {
       return Failure::Exception();
     }
@@ -3858,8 +3859,7 @@
   int match_start = -1;
   int match_end = 0;
   int pos = 0;
-  int registers_per_match = RegExpImpl::IrregexpPrepare(regexp, subject,
-                                                        isolate->zone());
+  int registers_per_match = RegExpImpl::IrregexpPrepare(regexp, subject);
   if (registers_per_match < 0) return RegExpImpl::RE_EXCEPTION;
 
   int max_matches;
@@ -3874,8 +3874,7 @@
     int num_matches = RegExpImpl::IrregexpExecRaw(regexp,
                                                   subject,
                                                   pos,
-                                                  register_vector,
-                                                  isolate->zone());
+                                                  register_vector);
     if (num_matches > 0) {
       for (int match_index = 0; match_index < num_matches; match_index++) {
         int32_t* current_match = &register_vector[match_index * 2];
@@ -3942,11 +3941,11 @@
     Handle<String> subject,
     Handle<JSRegExp> regexp,
     Handle<JSArray> last_match_array,
-    FixedArrayBuilder* builder) {
+    FixedArrayBuilder* builder,
+    Zone* zone) {
 
   ASSERT(subject->IsFlat());
-  int registers_per_match = RegExpImpl::IrregexpPrepare(regexp, subject,
-                                                        isolate->zone());
+  int registers_per_match = RegExpImpl::IrregexpPrepare(regexp, subject);
   if (registers_per_match < 0) return RegExpImpl::RE_EXCEPTION;
 
   int max_matches;
@@ -3959,8 +3958,7 @@
   int num_matches = RegExpImpl::IrregexpExecRaw(regexp,
                                                 subject,
                                                 0,
-                                                register_vector,
-                                                isolate->zone());
+                                                register_vector);
 
   int capture_count = regexp->CaptureCount();
   int subject_length = subject->length();
@@ -4046,8 +4044,7 @@
       num_matches = RegExpImpl::IrregexpExecRaw(regexp,
                                                 subject,
                                                 pos,
-                                                register_vector,
-                                                isolate->zone());
+                                                register_vector);
     } while (num_matches > 0);
 
     if (num_matches != RegExpImpl::RE_EXCEPTION) {
@@ -4128,7 +4125,8 @@
                                   subject,
                                   regexp,
                                   last_match_info,
-                                  &builder);
+                                  &builder,
+                                  isolate->runtime_zone());
   }
   if (result == RegExpImpl::RE_SUCCESS) return *builder.ToJSArray(result_array);
   if (result == RegExpImpl::RE_FAILURE) return isolate->heap()->null_value();
@@ -4855,14 +4853,13 @@
 RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
   Debug* debug = isolate->debug();
   if (!debug->IsStepping()) return NULL;
-  CONVERT_ARG_CHECKED(Object, callback, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0);
   HandleScope scope(isolate);
-  Handle<SharedFunctionInfo> shared_info(JSFunction::cast(callback)->shared());
   // When leaving the callback, step out has been activated, but not performed
   // if we do not leave the builtin.  To be able to step into the callback
   // again, we need to clear the step out at this point.
   debug->ClearStepOut();
-  debug->FloodWithOneShot(shared_info);
+  debug->FloodWithOneShot(callback);
   return NULL;
 }
 
@@ -6463,8 +6460,8 @@
 
   static const int kMaxInitialListCapacity = 16;
 
-  Zone* zone = isolate->zone();
-  ZoneScope scope(isolate, DELETE_ON_EXIT);
+  Zone* zone = isolate->runtime_zone();
+  ZoneScope scope(zone, DELETE_ON_EXIT);
 
   // Find (up to limit) indices of separator and end-of-string in subject
   int initial_capacity = Min<uint32_t>(kMaxInitialListCapacity, limit);
@@ -8140,12 +8137,8 @@
   }
 
   // The function should be compiled for the optimization hints to be
-  // available. We cannot use EnsureCompiled because that forces a
-  // compilation through the shared function info which makes it
-  // impossible for us to optimize.
-  if (!function->is_compiled()) {
-    JSFunction::CompileLazy(function, CLEAR_EXCEPTION);
-  }
+  // available.
+  JSFunction::EnsureCompiled(function, CLEAR_EXCEPTION);
 
   Handle<SharedFunctionInfo> shared(function->shared(), isolate);
   if (!function->has_initial_map() &&
@@ -8366,6 +8359,9 @@
   } else {
     Deoptimizer::DeoptimizeFunction(*function);
   }
+  // Flush optimized code cache for this function.
+  function->shared()->ClearOptimizedCodeMap();
+
   return isolate->heap()->undefined_value();
 }
 
@@ -9313,7 +9309,7 @@
   ASSERT_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
 
-  Zone* zone = isolate->zone();
+  Zone* zone = isolate->runtime_zone();
   source = Handle<String>(source->TryFlattenGetString());
   // Optimized fast case where we only have ASCII characters.
   Handle<Object> result;
@@ -11155,7 +11151,7 @@
     }
 
     // Get the debug info (create it if it does not exist).
-    if (!isolate->debug()->EnsureDebugInfo(shared_info)) {
+    if (!isolate->debug()->EnsureDebugInfo(shared_info, function_)) {
       // Return if ensuring debug info failed.
       return;
     }
@@ -11180,7 +11176,6 @@
       if (scope_info->Type() != EVAL_SCOPE) nested_scope_chain_.Add(scope_info);
     } else {
       // Reparse the code and analyze the scopes.
-      ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
       Handle<Script> script(Script::cast(shared_info->script()));
       Scope* scope = NULL;
 
@@ -11188,7 +11183,7 @@
       Handle<ScopeInfo> scope_info(shared_info->scope_info());
       if (scope_info->Type() != FUNCTION_SCOPE) {
         // Global or eval code.
-        CompilationInfo info(script);
+        CompilationInfoWithZone info(script);
         if (scope_info->Type() == GLOBAL_SCOPE) {
           info.MarkAsGlobal();
         } else {
@@ -11199,25 +11194,14 @@
         if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
           scope = info.function()->scope();
         }
+        RetrieveScopeChain(scope, shared_info);
       } else {
         // Function code
-        CompilationInfo info(shared_info);
+        CompilationInfoWithZone info(shared_info);
         if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
           scope = info.function()->scope();
         }
-      }
-
-      // Retrieve the scope chain for the current position.
-      if (scope != NULL) {
-        int source_position = shared_info->code()->SourcePosition(frame_->pc());
-        scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
-      } else {
-        // A failed reparse indicates that the preparser has diverged from the
-        // parser or that the preparse data given to the initial parse has been
-        // faulty. We fail in debug mode but in release mode we only provide the
-        // information we get from the context chain but nothing about
-        // completely stack allocated scopes or stack allocated locals.
-        UNREACHABLE();
+        RetrieveScopeChain(scope, shared_info);
       }
     }
   }
@@ -11417,6 +11401,21 @@
   Handle<Context> context_;
   List<Handle<ScopeInfo> > nested_scope_chain_;
 
+  void RetrieveScopeChain(Scope* scope,
+                          Handle<SharedFunctionInfo> shared_info) {
+    if (scope != NULL) {
+      int source_position = shared_info->code()->SourcePosition(frame_->pc());
+      scope->GetNestedScopeChain(&nested_scope_chain_, source_position);
+    } else {
+      // A failed reparse indicates that the preparser has diverged from the
+      // parser or that the preparse data given to the initial parse has been
+      // faulty. We fail in debug mode but in release mode we only provide the
+      // information we get from the context chain but nothing about
+      // completely stack allocated scopes or stack allocated locals.
+      UNREACHABLE();
+    }
+  }
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
 };
 
@@ -11678,30 +11677,14 @@
 }
 
 
-// Set a break point in a function
-// args[0]: function
-// args[1]: number: break source position (within the function source)
-// args[2]: number: break point object
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
-  Handle<SharedFunctionInfo> shared(fun->shared());
-  CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
-  RUNTIME_ASSERT(source_position >= 0);
-  Handle<Object> break_point_object_arg = args.at<Object>(2);
-
-  // Set break point.
-  isolate->debug()->SetBreakPoint(shared, break_point_object_arg,
-                                  &source_position);
-
-  return Smi::FromInt(source_position);
-}
-
-
 Object* Runtime::FindSharedFunctionInfoInScript(Isolate* isolate,
                                                 Handle<Script> script,
                                                 int position) {
+  // The below fix-point iteration depends on all functions that cannot be
+  // compiled lazily without a context to not be compiled at all. Compilation
+  // will be triggered at points where we do not need a context.
+  isolate->debug()->PrepareForBreakPoints();
+
   // Iterate the heap looking for SharedFunctionInfo generated from the
   // script. The inner most SharedFunctionInfo containing the source position
   // for the requested break point is found.
@@ -11713,6 +11696,7 @@
   bool done = false;
   // The current candidate for the source position:
   int target_start_position = RelocInfo::kNoPosition;
+  Handle<JSFunction> target_function;
   Handle<SharedFunctionInfo> target;
   while (!done) {
     { // Extra scope for iterator and no-allocation.
@@ -11721,60 +11705,80 @@
       HeapIterator iterator;
       for (HeapObject* obj = iterator.next();
            obj != NULL; obj = iterator.next()) {
-        if (obj->IsSharedFunctionInfo()) {
-          Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
-          if (shared->script() == *script) {
-            // If the SharedFunctionInfo found has the requested script data and
-            // contains the source position it is a candidate.
-            int start_position = shared->function_token_position();
-            if (start_position == RelocInfo::kNoPosition) {
-              start_position = shared->start_position();
-            }
-            if (start_position <= position &&
-                position <= shared->end_position()) {
-              // If there is no candidate or this function is within the current
-              // candidate this is the new candidate.
-              if (target.is_null()) {
-                target_start_position = start_position;
-                target = shared;
-              } else {
-                if (target_start_position == start_position &&
-                    shared->end_position() == target->end_position()) {
-                    // If a top-level function contain only one function
-                    // declartion the source for the top-level and the
-                    // function is the same. In that case prefer the non
-                    // top-level function.
-                  if (!shared->is_toplevel()) {
-                    target_start_position = start_position;
-                    target = shared;
-                  }
-                } else if (target_start_position <= start_position &&
-                           shared->end_position() <= target->end_position()) {
-                  // This containment check includes equality as a function
-                  // inside a top-level function can share either start or end
-                  // position with the top-level function.
+        bool found_next_candidate = false;
+        Handle<JSFunction> function;
+        Handle<SharedFunctionInfo> shared;
+        if (obj->IsJSFunction()) {
+          function = Handle<JSFunction>(JSFunction::cast(obj));
+          shared = Handle<SharedFunctionInfo>(function->shared());
+          ASSERT(shared->allows_lazy_compilation() || shared->is_compiled());
+          found_next_candidate = true;
+        } else if (obj->IsSharedFunctionInfo()) {
+          shared = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
+          // Skip functions that we cannot compile lazily without a context,
+          // which is not available here, because there is no closure.
+          found_next_candidate = shared->is_compiled() ||
+              shared->allows_lazy_compilation_without_context();
+        }
+        if (!found_next_candidate) continue;
+        if (shared->script() == *script) {
+          // If the SharedFunctionInfo found has the requested script data and
+          // contains the source position it is a candidate.
+          int start_position = shared->function_token_position();
+          if (start_position == RelocInfo::kNoPosition) {
+            start_position = shared->start_position();
+          }
+          if (start_position <= position &&
+              position <= shared->end_position()) {
+            // If there is no candidate or this function is within the current
+            // candidate this is the new candidate.
+            if (target.is_null()) {
+              target_start_position = start_position;
+              target_function = function;
+              target = shared;
+            } else {
+              if (target_start_position == start_position &&
+                  shared->end_position() == target->end_position()) {
+                // If a top-level function contains only one function
+                // declaration the source for the top-level and the function
+                // is the same. In that case prefer the non top-level function.
+                if (!shared->is_toplevel()) {
                   target_start_position = start_position;
+                  target_function = function;
                   target = shared;
                 }
+              } else if (target_start_position <= start_position &&
+                         shared->end_position() <= target->end_position()) {
+                // This containment check includes equality as a function
+                // inside a top-level function can share either start or end
+                // position with the top-level function.
+                target_start_position = start_position;
+                target_function = function;
+                target = shared;
               }
             }
           }
         }
       }  // End for loop.
-    }  // End No allocation scope.
+    }  // End no-allocation scope.
 
     if (target.is_null()) {
       return isolate->heap()->undefined_value();
     }
 
-    // If the candidate found is compiled we are done. NOTE: when lazy
-    // compilation of inner functions is introduced some additional checking
-    // needs to be done here to compile inner functions.
+    // If the candidate found is compiled we are done.
     done = target->is_compiled();
     if (!done) {
-      // If the candidate is not compiled compile it to reveal any inner
-      // functions which might contain the requested source position.
-      SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
+      // If the candidate is not compiled, compile it to reveal any inner
+      // functions which might contain the requested source position. This
+      // will compile all inner functions that cannot be compiled without a
+      // context, because Compiler::BuildFunctionInfo checks whether the
+      // debugger is active.
+      if (target_function.is_null()) {
+        SharedFunctionInfo::CompileLazy(target, KEEP_EXCEPTION);
+      } else {
+        JSFunction::CompileLazy(target_function, KEEP_EXCEPTION);
+      }
     }
   }  // End while loop.
 
@@ -11782,6 +11786,26 @@
 }
 
 
+// Set a break point in a function.
+// args[0]: function
+// args[1]: number: break source position (within the function source)
+// args[2]: number: break point object
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFunctionBreakPoint) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
+  RUNTIME_ASSERT(source_position >= 0);
+  Handle<Object> break_point_object_arg = args.at<Object>(2);
+
+  // Set break point.
+  isolate->debug()->SetBreakPoint(function, break_point_object_arg,
+                                  &source_position);
+
+  return Smi::FromInt(source_position);
+}
+
+
 // Changes the state of a break point in a script and returns source position
 // where break point was set. NOTE: Regarding performance see the NOTE for
 // GetScriptFromScriptData.
@@ -11800,23 +11824,13 @@
   RUNTIME_ASSERT(wrapper->value()->IsScript());
   Handle<Script> script(Script::cast(wrapper->value()));
 
-  Object* result = Runtime::FindSharedFunctionInfoInScript(
-      isolate, script, source_position);
-  if (!result->IsUndefined()) {
-    Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
-    // Find position within function. The script position might be before the
-    // source position of the first function.
-    int position;
-    if (shared->start_position() > source_position) {
-      position = 0;
-    } else {
-      position = source_position - shared->start_position();
-    }
-    isolate->debug()->SetBreakPoint(shared, break_point_object_arg, &position);
-    position += shared->start_position();
-    return Smi::FromInt(position);
+  // Set break point.
+  if (!isolate->debug()->SetBreakPointForScript(script, break_point_object_arg,
+                                                &source_position)) {
+    return  isolate->heap()->undefined_value();
   }
-  return  isolate->heap()->undefined_value();
+
+  return Smi::FromInt(source_position);
 }
 
 
@@ -12106,7 +12120,11 @@
   Handle<Context> context =
       isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
                                              go_between);
-  context->set_extension(*local_scope);
+
+  // Use the materialized local scope in a with context.
+  context =
+      isolate->factory()->NewWithContext(go_between, context, local_scope);
+
   // Copy any with contexts present and chain them in front of this context.
   Handle<Context> frame_context(Context::cast(frame->context()));
   Handle<Context> function_context;
@@ -12540,7 +12558,7 @@
   ASSERT(args.length() == 1);
   // Get the function and make sure it is compiled.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
-  if (!JSFunction::CompileLazy(func, KEEP_EXCEPTION)) {
+  if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
   func->code()->PrintLn();
@@ -12555,7 +12573,7 @@
   ASSERT(args.length() == 1);
   // Get the function and make sure it is compiled.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
-  if (!JSFunction::CompileLazy(func, KEEP_EXCEPTION)) {
+  if (!JSFunction::EnsureCompiled(func, KEEP_EXCEPTION)) {
     return Failure::Exception();
   }
   func->shared()->construct_stub()->PrintLn();
@@ -12776,7 +12794,7 @@
   CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
 
   return *LiveEdit::CheckAndDropActivations(shared_array, do_drop,
-                                            isolate->zone());
+                                            isolate->runtime_zone());
 }
 
 // Compares 2 strings line-by-line, then token-wise and returns diff in form
@@ -12792,6 +12810,45 @@
 }
 
 
+// Restarts a call frame and completely drops all frames above.
+// Returns true if successful. Otherwise returns undefined or an error message.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
+  HandleScope scope(isolate);
+  ASSERT(args.length() == 2);
+
+  // Check arguments.
+  Object* check;
+  { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+      RUNTIME_ARGUMENTS(isolate, args));
+    if (!maybe_check->ToObject(&check)) return maybe_check;
+  }
+  CONVERT_NUMBER_CHECKED(int, index, Int32, args[1]);
+  Heap* heap = isolate->heap();
+
+  // Find the relevant frame with the requested index.
+  StackFrame::Id id = isolate->debug()->break_frame_id();
+  if (id == StackFrame::NO_ID) {
+    // If there are no JavaScript stack frames return undefined.
+    return heap->undefined_value();
+  }
+
+  int count = 0;
+  JavaScriptFrameIterator it(isolate, id);
+  for (; !it.done(); it.Advance()) {
+    if (index < count + it.frame()->GetInlineCount()) break;
+    count += it.frame()->GetInlineCount();
+  }
+  if (it.done()) return heap->undefined_value();
+
+  const char* error_message =
+      LiveEdit::RestartFrame(it.frame(), isolate->runtime_zone());
+  if (error_message) {
+    return *(isolate->factory()->LookupAsciiSymbol(error_message));
+  }
+  return heap->true_value();
+}
+
+
 // A testing entry. Returns statement position which is the closest to
 // source_position.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) {
diff --git a/src/runtime.h b/src/runtime.h
index f5a4f50..9968b29 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -442,6 +442,7 @@
   F(LiveEditPatchFunctionPositions, 2, 1) \
   F(LiveEditCheckAndDropActivations, 2, 1) \
   F(LiveEditCompareStrings, 2, 1) \
+  F(LiveEditRestartFrame, 2, 1) \
   F(GetFunctionCodePositionFromSource, 2, 1) \
   F(ExecuteInDebugContext, 2, 1) \
   \
diff --git a/src/scopes.cc b/src/scopes.cc
index ad6692e..faedb5f 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -274,7 +274,8 @@
 
   // Allocate the variables.
   {
-    AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate());
+    AstNodeFactory<AstNullVisitor> ast_node_factory(info->isolate(),
+                                                    info->zone());
     if (!top->AllocateVariables(info, &ast_node_factory)) return false;
   }
 
@@ -637,11 +638,6 @@
 }
 
 
-bool Scope::AllowsLazyCompilation() const {
-  return !force_eager_compilation_ && HasTrivialOuterContext();
-}
-
-
 bool Scope::HasTrivialContext() const {
   // A function scope has a trivial context if it always is the global
   // context. We iteratively scan out the context chain to see if
@@ -666,12 +662,17 @@
 }
 
 
-bool Scope::AllowsLazyRecompilation() const {
+bool Scope::AllowsLazyCompilation() const {
   return !force_eager_compilation_ &&
          !TrivialDeclarationScopesBeforeWithScope();
 }
 
 
+bool Scope::AllowsLazyCompilationWithoutContext() const {
+  return !force_eager_compilation_ && HasTrivialOuterContext();
+}
+
+
 bool Scope::TrivialDeclarationScopesBeforeWithScope() const {
   Scope* outer = outer_scope_;
   if (outer == NULL) return false;
diff --git a/src/scopes.h b/src/scopes.h
index decd74d..2868cde 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -374,8 +374,8 @@
   // Determine if we can use lazy compilation for this scope.
   bool AllowsLazyCompilation() const;
 
-  // True if we can lazily recompile functions with this scope.
-  bool AllowsLazyRecompilation() const;
+  // Determine if we can use lazy compilation for this scope without a context.
+  bool AllowsLazyCompilationWithoutContext() const;
 
   // True if the outer context of this scope is always the global context.
   bool HasTrivialOuterContext() const;
diff --git a/src/serialize.cc b/src/serialize.cc
index cf8e5e1..e4a90f1 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -37,6 +37,7 @@
 #include "platform.h"
 #include "runtime.h"
 #include "serialize.h"
+#include "snapshot.h"
 #include "stub-cache.h"
 #include "v8threads.h"
 
@@ -674,10 +675,6 @@
   ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
   // No active handles.
   ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
-  // Make sure the entire partial snapshot cache is traversed, filling it with
-  // valid object pointers.
-  isolate_->set_serialize_partial_snapshot_cache_length(
-      Isolate::kPartialSnapshotCacheCapacity);
   ASSERT_EQ(NULL, external_reference_decoder_);
   external_reference_decoder_ = new ExternalReferenceDecoder();
   isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
@@ -1149,22 +1146,6 @@
 
 void PartialSerializer::Serialize(Object** object) {
   this->VisitPointer(object);
-  Isolate* isolate = Isolate::Current();
-
-  // After we have done the partial serialization the partial snapshot cache
-  // will contain some references needed to decode the partial snapshot.  We
-  // fill it up with undefineds so it has a predictable length so the
-  // deserialization code doesn't need to know the length.
-  for (int index = isolate->serialize_partial_snapshot_cache_length();
-       index < Isolate::kPartialSnapshotCacheCapacity;
-       index++) {
-    isolate->serialize_partial_snapshot_cache()[index] =
-        isolate->heap()->undefined_value();
-    startup_serializer_->VisitPointer(
-        &isolate->serialize_partial_snapshot_cache()[index]);
-  }
-  isolate->set_serialize_partial_snapshot_cache_length(
-      Isolate::kPartialSnapshotCacheCapacity);
 }
 
 
@@ -1194,26 +1175,29 @@
 
 // This ensures that the partial snapshot cache keeps things alive during GC and
 // tracks their movement.  When it is called during serialization of the startup
-// snapshot the partial snapshot is empty, so nothing happens.  When the partial
-// (context) snapshot is created, this array is populated with the pointers that
-// the partial snapshot will need. As that happens we emit serialized objects to
-// the startup snapshot that correspond to the elements of this cache array.  On
-// deserialization we therefore need to visit the cache array.  This fills it up
-// with pointers to deserialized objects.
+// snapshot nothing happens.  When the partial (context) snapshot is created,
+// this array is populated with the pointers that the partial snapshot will
+// need. As that happens we emit serialized objects to the startup snapshot
+// that correspond to the elements of this cache array.  On deserialization we
+// therefore need to visit the cache array.  This fills it up with pointers to
+// deserialized objects.
 void SerializerDeserializer::Iterate(ObjectVisitor* visitor) {
+  if (Serializer::enabled()) return;
   Isolate* isolate = Isolate::Current();
-  visitor->VisitPointers(
-      isolate->serialize_partial_snapshot_cache(),
-      &isolate->serialize_partial_snapshot_cache()[
-          isolate->serialize_partial_snapshot_cache_length()]);
-}
-
-
-// When deserializing we need to set the size of the snapshot cache.  This means
-// the root iteration code (above) will iterate over array elements, writing the
-// references to deserialized objects in them.
-void SerializerDeserializer::SetSnapshotCacheSize(int size) {
-  Isolate::Current()->set_serialize_partial_snapshot_cache_length(size);
+  for (int i = 0; ; i++) {
+    if (isolate->serialize_partial_snapshot_cache_length() <= i) {
+      // Extend the array ready to get a value from the visitor when
+      // deserializing.
+      isolate->PushToPartialSnapshotCache(Smi::FromInt(0));
+    }
+    Object** cache = isolate->serialize_partial_snapshot_cache();
+    visitor->VisitPointers(&cache[i], &cache[i + 1]);
+    // Sentinel is the undefined object, which is a root so it will not normally
+    // be found in the cache.
+    if (cache[i] == isolate->heap()->undefined_value()) {
+      break;
+    }
+  }
 }
 
 
@@ -1231,14 +1215,11 @@
   // then visit the pointer so that it becomes part of the startup snapshot
   // and we can refer to it from the partial snapshot.
   int length = isolate->serialize_partial_snapshot_cache_length();
-  CHECK(length < Isolate::kPartialSnapshotCacheCapacity);
-  isolate->serialize_partial_snapshot_cache()[length] = heap_object;
-  startup_serializer_->VisitPointer(
-      &isolate->serialize_partial_snapshot_cache()[length]);
+  isolate->PushToPartialSnapshotCache(heap_object);
+  startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
   // We don't recurse from the startup snapshot generator into the partial
   // snapshot generator.
-  ASSERT(length == isolate->serialize_partial_snapshot_cache_length());
-  isolate->set_serialize_partial_snapshot_cache_length(length + 1);
+  ASSERT(length == isolate->serialize_partial_snapshot_cache_length() - 1);
   return length;
 }
 
@@ -1337,12 +1318,14 @@
 
 
 void StartupSerializer::SerializeWeakReferences() {
-  for (int i = Isolate::Current()->serialize_partial_snapshot_cache_length();
-       i < Isolate::kPartialSnapshotCacheCapacity;
-       i++) {
-    sink_->Put(kRootArray + kPlain + kStartOfObject, "RootSerialization");
-    sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
-  }
+  // This phase comes right after the partial serialization (of the snapshot).
+  // After we have done the partial serialization the partial snapshot cache
+  // will contain some references needed to decode the partial snapshot.  We
+  // add one entry with 'undefined' which is the sentinel that the deserializer
+  // uses to know it is done deserializing the array.
+  Isolate* isolate = Isolate::Current();
+  Object* undefined = isolate->heap()->undefined_value();
+  VisitPointer(&undefined);
   HEAP->IterateWeakRoots(this, VISIT_ALL);
 }
 
diff --git a/src/serialize.h b/src/serialize.h
index f50e23e..d42231a 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -210,7 +210,6 @@
 class SerializerDeserializer: public ObjectVisitor {
  public:
   static void Iterate(ObjectVisitor* visitor);
-  static void SetSnapshotCacheSize(int size);
 
  protected:
   // Where the pointed-to object can be found:
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index ef89a5e..3a4ac70 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -60,6 +60,11 @@
 }
 
 
+bool Snapshot::HaveASnapshotToStartFrom() {
+  return size_ != 0;
+}
+
+
 Handle<Context> Snapshot::NewContextFromSnapshot() {
   if (context_size_ == 0) {
     return Handle<Context>();
diff --git a/src/snapshot.h b/src/snapshot.h
index 4f01a2d..ab4529e 100644
--- a/src/snapshot.h
+++ b/src/snapshot.h
@@ -40,6 +40,8 @@
   // could be found.
   static bool Initialize(const char* snapshot_file = NULL);
 
+  static bool HaveASnapshotToStartFrom();
+
   // Create a new context using the internal partial snapshot.
   static Handle<Context> NewContextFromSnapshot();
 
diff --git a/src/splay-tree.h b/src/splay-tree.h
index 388f9b5..8844d8a 100644
--- a/src/splay-tree.h
+++ b/src/splay-tree.h
@@ -66,9 +66,13 @@
                             AllocationPolicy allocator = AllocationPolicy())) {
     return allocator.New(static_cast<int>(size));
   }
-  INLINE(void operator delete(void* p, size_t)) {
+  INLINE(void operator delete(void* p)) {
     AllocationPolicy::Delete(p);
   }
+  // Please the MSVC compiler.  We should never have to execute this.
+  INLINE(void operator delete(void* p, AllocationPolicy policy)) {
+    UNREACHABLE();
+  }
 
   // Inserts the given key in this tree with the given value.  Returns
   // true if a node was inserted, otherwise false.  If found the locator
@@ -119,9 +123,14 @@
     INLINE(void* operator new(size_t size, AllocationPolicy allocator)) {
       return allocator.New(static_cast<int>(size));
     }
-    INLINE(void operator delete(void* p, size_t)) {
+    INLINE(void operator delete(void* p)) {
       return AllocationPolicy::Delete(p);
     }
+    // Please the MSVC compiler.  We should never have to execute
+    // this.
+    INLINE(void operator delete(void* p, AllocationPolicy allocator)) {
+      UNREACHABLE();
+    }
 
     Key key() { return key_; }
     Value value() { return value_; }
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 2794891..d790f03 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -44,7 +44,7 @@
 
 
 StubCache::StubCache(Isolate* isolate, Zone* zone)
-    : isolate_(isolate), zone_(zone) {
+    : isolate_(isolate) {
   ASSERT(isolate == Isolate::Current());
 }
 
@@ -927,7 +927,8 @@
 void StubCache::CollectMatchingMaps(SmallMapList* types,
                                     String* name,
                                     Code::Flags flags,
-                                    Handle<Context> global_context) {
+                                    Handle<Context> global_context,
+                                    Zone* zone) {
   for (int i = 0; i < kPrimaryTableSize; i++) {
     if (primary_[i].key == name) {
       Map* map = primary_[i].value->FindFirstMap();
@@ -938,7 +939,7 @@
       int offset = PrimaryOffset(name, flags, map);
       if (entry(primary_, offset) == &primary_[i] &&
           !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
-        types->Add(Handle<Map>(map), zone());
+        types->Add(Handle<Map>(map), zone);
       }
     }
   }
@@ -962,7 +963,7 @@
       int offset = SecondaryOffset(name, flags, primary_offset);
       if (entry(secondary_, offset) == &secondary_[i] &&
           !TypeFeedbackOracle::CanRetainOtherContext(map, *global_context)) {
-        types->Add(Handle<Map>(map), zone());
+        types->Add(Handle<Map>(map), zone);
       }
     }
   }
diff --git a/src/stub-cache.h b/src/stub-cache.h
index cd04143..de8a76b 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -260,7 +260,8 @@
   void CollectMatchingMaps(SmallMapList* types,
                            String* name,
                            Code::Flags flags,
-                           Handle<Context> global_context);
+                           Handle<Context> global_context,
+                           Zone* zone);
 
   // Generate code for probing the stub cache table.
   // Arguments extra, extra2 and extra3 may be used to pass additional scratch
@@ -310,7 +311,6 @@
   Isolate* isolate() { return isolate_; }
   Heap* heap() { return isolate()->heap(); }
   Factory* factory() { return isolate()->factory(); }
-  Zone* zone() const { return zone_; }
 
  private:
   StubCache(Isolate* isolate, Zone* zone);
@@ -386,7 +386,6 @@
   Entry primary_[kPrimaryTableSize];
   Entry secondary_[kSecondaryTableSize];
   Isolate* isolate_;
-  Zone* zone_;
 
   friend class Isolate;
   friend class SCTableReference;
diff --git a/src/type-info.cc b/src/type-info.cc
index f5e9106..dfdfd89 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -511,7 +511,8 @@
     isolate_->stub_cache()->CollectMatchingMaps(types,
                                                 *name,
                                                 flags,
-                                                global_context_);
+                                                global_context_,
+                                                zone());
   }
 }
 
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 6db9c77..f36b0ed 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -210,6 +210,9 @@
   SC(compute_entry_frame, V8.ComputeEntryFrame)                       \
   SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls)            \
   SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs)   \
+  SC(fast_new_closure_total, V8.FastNewClosureTotal)                  \
+  SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized)   \
+  SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
   SC(string_add_runtime, V8.StringAddRuntime)                         \
   SC(string_add_native, V8.StringAddNative)                           \
   SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii)  \
@@ -240,10 +243,6 @@
   SC(transcendental_cache_miss, V8.TranscendentalCacheMiss)           \
   SC(stack_interrupts, V8.StackInterrupts)                            \
   SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks)                 \
-  SC(other_ticks, V8.OtherTicks)                                      \
-  SC(js_opt_ticks, V8.JsOptTicks)                                     \
-  SC(js_non_opt_ticks, V8.JsNonoptTicks)                              \
-  SC(js_other_ticks, V8.JsOtherTicks)                                 \
   SC(smi_checks_removed, V8.SmiChecksRemoved)                         \
   SC(map_checks_removed, V8.MapChecksRemoved)                         \
   SC(quote_json_char_count, V8.QuoteJsonCharacterCount)               \
diff --git a/src/version.cc b/src/version.cc
index 1edb296..420a330 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     11
-#define BUILD_NUMBER      10
-#define PATCH_LEVEL       6
+#define MINOR_VERSION     12
+#define BUILD_NUMBER      0
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 61d6c87..ecdb392 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -62,9 +62,13 @@
 void FastNewClosureStub::Generate(MacroAssembler* masm) {
   // Create a new closure from the given function info in new
   // space. Set the context to the current context in rsi.
+  Counters* counters = masm->isolate()->counters();
+
   Label gc;
   __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
 
+  __ IncrementCounter(counters->fast_new_closure_total(), 1);
+
   // Get the function info from the stack.
   __ movq(rdx, Operand(rsp, 1 * kPointerSize));
 
@@ -76,32 +80,109 @@
   // as the map of the allocated object.
   __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
-  __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
-  __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
+  __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
+  __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
 
   // Initialize the rest of the function. We don't have to update the
   // write barrier because the allocated object is in new space.
   __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
-  __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
+  __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
   __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
   __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
   __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
-  __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
+  __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
   __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
   __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
   __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
-  __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
 
   // Initialize the code pointer in the function to be the one
   // found in the shared function info object.
+  // But first check if there is an optimized version for our context.
+  Label check_optimized;
+  Label install_unoptimized;
+  if (FLAG_cache_optimized_code) {
+    __ movq(rbx,
+            FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
+    __ testq(rbx, rbx);
+    __ j(not_zero, &check_optimized, Label::kNear);
+  }
+  __ bind(&install_unoptimized);
+  __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
+          rdi);  // Initialize with undefined.
   __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
   __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
   __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
 
+  // Return and remove the on-stack parameter.
+  __ ret(1 * kPointerSize);
+
+  __ bind(&check_optimized);
+
+  __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
+
+  // rcx holds global context, ebx points to fixed array of 3-element entries
+  // (global context, optimized code, literals).
+  // The optimized code map must never be empty, so check the first elements.
+  Label install_optimized;
+  // Speculatively move code object into edx.
+  __ movq(rdx, FieldOperand(rbx, FixedArray::kHeaderSize + kPointerSize));
+  __ cmpq(rcx, FieldOperand(rbx, FixedArray::kHeaderSize));
+  __ j(equal, &install_optimized);
+
+  // Iterate through the rest of map backwards. rdx holds an index.
+  Label loop;
+  Label restore;
+  __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
+  __ SmiToInteger32(rdx, rdx);
+  __ bind(&loop);
+  // Do not double check first entry.
+  __ cmpq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
+  __ j(equal, &restore);
+  __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength));  // Skip an entry.
+  __ cmpq(rcx, FieldOperand(rbx,
+                            rdx,
+                            times_pointer_size,
+                            FixedArray::kHeaderSize));
+  __ j(not_equal, &loop, Label::kNear);
+  // Hit: fetch the optimized code.
+  __ movq(rdx, FieldOperand(rbx,
+                            rdx,
+                            times_pointer_size,
+                            FixedArray::kHeaderSize + 1 * kPointerSize));
+
+  __ bind(&install_optimized);
+  __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
+
+  // TODO(fschneider): Idea: store proper code pointers in the map and either
+  // unmangle them on marking or do nothing as the whole map is discarded on
+  // major GC anyway.
+  __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+  __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
+
+  // Now link a function into a list of optimized functions.
+  __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
+
+  __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
+  // No need for write barrier as JSFunction (rax) is in the new space.
+
+  __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
+  // Store JSFunction (rax) into rdx before issuing write barrier as
+  // it clobbers all the registers passed.
+  __ movq(rdx, rax);
+  __ RecordWriteContextSlot(
+      rcx,
+      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
+      rdx,
+      rbx,
+      kDontSaveFPRegs);
 
   // Return and remove the on-stack parameter.
   __ ret(1 * kPointerSize);
 
+  __ bind(&restore);
+  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+  __ jmp(&install_unoptimized);
+
   // Create a new closure through the slower runtime call.
   __ bind(&gc);
   __ pop(rcx);  // Temporarily remove return address.
@@ -6014,6 +6095,8 @@
   { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
   // StoreArrayLiteralElementStub::Generate
   { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
+  // FastNewClosureStub::Generate
+  { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
   // Null termination.
   { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
 };
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index f3046b9..2813bef 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -52,6 +52,10 @@
 
   if (!function->IsOptimized()) return;
 
+  // The optimized code is going to be patched, so we cannot use it
+  // any more.  Play safe and reset the whole cache.
+  function->shared()->ClearOptimizedCodeMap();
+
   // Get the optimized code.
   Code* code = function->code();
 
@@ -100,8 +104,19 @@
   // ignore all slots that might have been recorded on it.
   isolate->heap()->mark_compact_collector()->InvalidateCode(code);
 
-  // Set the code for the function to non-optimized version.
-  function->ReplaceCode(function->shared()->code());
+  // Iterate over all the functions which share the same code object
+  // and make them use unoptimized version.
+  Context* context = function->context()->global_context();
+  Object* element = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+  SharedFunctionInfo* shared = function->shared();
+  while (!element->IsUndefined()) {
+    JSFunction* func = JSFunction::cast(element);
+    // Grab element before code replacement as ReplaceCode alters the list.
+    element = func->next_function_link();
+    if (func->code() == code) {
+      func->ReplaceCode(shared->code());
+    }
+  }
 
   if (FLAG_trace_deopt) {
     PrintF("[forced deoptimization: ");
@@ -234,9 +249,9 @@
   unsigned node_id = iterator.Next();
   USE(node_id);
   ASSERT(node_id == ast_id);
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
-  USE(function);
-  ASSERT(function == function_);
+  int closure_id = iterator.Next();
+  USE(closure_id);
+  ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
   unsigned height = iterator.Next();
   unsigned height_in_bytes = height * kPointerSize;
   USE(height_in_bytes);
@@ -341,15 +356,15 @@
     output_[0]->SetPc(pc);
   }
   Code* continuation =
-      function->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
+      function_->GetIsolate()->builtins()->builtin(Builtins::kNotifyOSR);
   output_[0]->SetContinuation(
       reinterpret_cast<intptr_t>(continuation->entry()));
 
   if (FLAG_trace_osr) {
     PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
            ok ? "finished" : "aborted",
-           reinterpret_cast<intptr_t>(function));
-    function->PrintName();
+           reinterpret_cast<intptr_t>(function_));
+    function_->PrintName();
     PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
   }
 }
@@ -579,7 +594,15 @@
 void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
                                    int frame_index) {
   int node_id = iterator->Next();
-  JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  JSFunction* function;
+  if (frame_index != 0) {
+    function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+  } else {
+    int closure_id = iterator->Next();
+    USE(closure_id);
+    ASSERT_EQ(Translation::kSelfLiteralId, closure_id);
+    function = function_;
+  }
   unsigned height = iterator->Next();
   unsigned height_in_bytes = height * kPointerSize;
   if (FLAG_trace_deopt) {
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index a3e42eb..a55788d 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -310,10 +310,6 @@
     // Self-optimization is a one-off thing; if it fails, don't try again.
     reset_value = Smi::kMaxValue;
   }
-  if (isolate()->IsDebuggerActive()) {
-    // Detect debug break requests as soon as possible.
-    reset_value = 10;
-  }
   __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
   __ movq(kScratchRegister,
           reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
@@ -1570,7 +1566,7 @@
   // marked expressions, no store code is emitted.
   expr->CalculateEmitStore(zone());
 
-  AccessorTable accessor_table(isolate()->zone());
+  AccessorTable accessor_table(zone());
   for (int i = 0; i < expr->properties()->length(); i++) {
     ObjectLiteral::Property* property = expr->properties()->at(i);
     if (property->IsCompileTimeValue()) continue;
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index bc8f848..ccc81bb 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -367,7 +367,10 @@
   int height = translation_size - environment->parameter_count();
 
   WriteTranslation(environment->outer(), translation);
-  int closure_id = DefineDeoptimizationLiteral(environment->closure());
+  int closure_id = *info()->closure() != *environment->closure()
+      ? DefineDeoptimizationLiteral(environment->closure())
+      : Translation::kSelfLiteralId;
+
   switch (environment->frame_type()) {
     case JS_FUNCTION:
       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
@@ -2731,7 +2734,7 @@
 
 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   Register result = ToRegister(instr->result());
-  __ LoadHeapObject(result, instr->hydrogen()->closure());
+  __ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
 }
 
 
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 99e7ec8..d7fe26b 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -45,26 +45,25 @@
 
 class LCodeGen BASE_EMBEDDED {
  public:
-  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info,
-           Zone* zone)
-      : chunk_(chunk),
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : zone_(info->zone()),
+        chunk_(chunk),
         masm_(assembler),
         info_(info),
         current_block_(-1),
         current_instruction_(-1),
         instructions_(chunk->instructions()),
-        deoptimizations_(4, zone),
-        jump_table_(4, zone),
-        deoptimization_literals_(8, zone),
+        deoptimizations_(4, info->zone()),
+        jump_table_(4, info->zone()),
+        deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
-        translations_(zone),
-        deferred_(8, zone),
+        translations_(info->zone()),
+        deferred_(8, info->zone()),
         osr_pc_offset_(-1),
         last_lazy_deopt_pc_(0),
-        safepoints_(zone),
-        zone_(zone),
+        safepoints_(info->zone()),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -325,6 +324,7 @@
 
   void EnsureSpaceForLazyDeopt(int space_needed);
 
+  Zone* zone_;
   LChunk* const chunk_;
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
@@ -347,8 +347,6 @@
   // itself is emitted at the end of the generated code.
   SafepointTableBuilder safepoints_;
 
-  Zone* zone_;
-
   // Compiler from a set of parallel moves to a sequential list of moves.
   LGapResolver resolver_;
 
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index a72a0a0..86f7bfe 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -353,6 +353,14 @@
   // In either case succeed immediately.
   __ j(equal, &fallthrough);
 
+  // -----------------------
+  // rdx - Start of capture
+  // rbx - length of capture
+  // Check that there are sufficient characters left in the input.
+  __ movl(rax, rdi);
+  __ addl(rax, rbx);
+  BranchOrBacktrack(greater, on_no_match);
+
   if (mode_ == ASCII) {
     Label loop_increment;
     if (on_no_match == NULL) {
diff --git a/src/zone-inl.h b/src/zone-inl.h
index d75e297..e312b20 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -40,7 +40,7 @@
 
 
 inline void* Zone::New(int size) {
-  ASSERT(ZoneScope::nesting() > 0);
+  ASSERT(scope_nesting_ > 0);
   // Round up the requested size to fit the alignment.
   size = RoundUp(size, kAlignment);
 
@@ -100,7 +100,7 @@
 
 inline void* ZoneAllocationPolicy::New(size_t size) {
   ASSERT(zone_);
-  return zone_->New(size);
+  return zone_->New(static_cast<int>(size));
 }
 
 
@@ -110,19 +110,14 @@
 }
 
 
-ZoneScope::ZoneScope(Isolate* isolate, ZoneScopeMode mode)
-    : isolate_(isolate), mode_(mode) {
-  isolate_->zone()->scope_nesting_++;
+ZoneScope::ZoneScope(Zone* zone, ZoneScopeMode mode)
+    : zone_(zone), mode_(mode) {
+  zone_->scope_nesting_++;
 }
 
 
 bool ZoneScope::ShouldDeleteOnExit() {
-  return isolate_->zone()->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
-}
-
-
-int ZoneScope::nesting() {
-  return Isolate::Current()->zone()->scope_nesting_;
+  return zone_->scope_nesting_ == 1 && mode_ == DELETE_ON_EXIT;
 }
 
 
diff --git a/src/zone.cc b/src/zone.cc
index d5d05ab..51b8113 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -67,20 +67,20 @@
 };
 
 
-Zone::Zone()
+Zone::Zone(Isolate* isolate)
     : zone_excess_limit_(256 * MB),
       segment_bytes_allocated_(0),
       position_(0),
       limit_(0),
       scope_nesting_(0),
-      segment_head_(NULL) {
+      segment_head_(NULL),
+      isolate_(isolate) {
 }
 unsigned Zone::allocation_size_ = 0;
 
 ZoneScope::~ZoneScope() {
-  ASSERT_EQ(Isolate::Current(), isolate_);
-  if (ShouldDeleteOnExit()) isolate_->zone()->DeleteAll();
-  isolate_->zone()->scope_nesting_--;
+  if (ShouldDeleteOnExit()) zone_->DeleteAll();
+  zone_->scope_nesting_--;
 }
 
 
diff --git a/src/zone.h b/src/zone.h
index 1bc4984..01e887e 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -64,6 +64,8 @@
 
 class Zone {
  public:
+  explicit Zone(Isolate* isolate);
+  ~Zone() { DeleteKeptSegment(); }
   // Allocate 'size' bytes of memory in the Zone; expands the Zone by
   // allocating new segments of memory on demand using malloc().
   inline void* New(int size);
@@ -114,9 +116,6 @@
   // the zone.
   int segment_bytes_allocated_;
 
-  // Each isolate gets its own zone.
-  Zone();
-
   // Expand the Zone to hold at least 'size' more bytes and allocate
   // the bytes. Returns the address of the newly allocated chunk of
   // memory in the Zone. Should only be called if there isn't enough
@@ -235,7 +234,7 @@
 // outer-most scope.
 class ZoneScope BASE_EMBEDDED {
  public:
-  INLINE(ZoneScope(Isolate* isolate, ZoneScopeMode mode));
+  INLINE(ZoneScope(Zone* zone, ZoneScopeMode mode));
 
   virtual ~ZoneScope();
 
@@ -250,7 +249,7 @@
   inline static int nesting();
 
  private:
-  Isolate* isolate_;
+  Zone* zone_;
   ZoneScopeMode mode_;
 };