Version 2.4.1

Added the ability for an embedding application to receive a callback when V8 allocates (V8::AddMemoryAllocationCallback) or deallocates (V8::RemoveMemoryAllocationCallback) from the OS.

Fixed several JSON bugs (including issue 855).

Fixed memory overrun crash bug triggered during V8's tick-based profiling.

Performance improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@5412 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 37f427c..cdf1b0f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,17 @@
+2010-09-06: Version 2.4.1
+
+      Added the ability for an embedding application to receive a callback
+      when V8 allocates (V8::AddMemoryAllocationCallback) or deallocates
+      (V8::RemoveMemoryAllocationCallback) from the OS.
+
+      Fixed several JSON bugs (including issue 855).
+
+      Fixed memory overrun crash bug triggered during V8's tick-based
+      profiling.
+
+      Performance improvements on all platforms.
+
+
 2010-09-01: Version 2.4.0
 
       Fix bug in Object.freeze and Object.seal when Array.prototype or
@@ -41,7 +55,7 @@
 
         Removed specialized handling of GCC 4.4 (issue 830).
 
-        Fixed DST cache to take into account the suspension of DST in 
+        Fixed DST cache to take into account the suspension of DST in
         Egypt during the 2010 Ramadan (issue http://crbug.com/51855).
 
         Performance improvements on all platforms.
diff --git a/include/v8.h b/include/v8.h
index d62d669..b89c244 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -2359,6 +2359,30 @@
 
 typedef void (*AddHistogramSampleCallback)(void* histogram, int sample);
 
+// --- M e m o r y  A l l o c a t i o n   C a l l b a c k ---
+  enum ObjectSpace {
+    kObjectSpaceNewSpace = 1 << 0,
+    kObjectSpaceOldPointerSpace = 1 << 1,
+    kObjectSpaceOldDataSpace = 1 << 2,
+    kObjectSpaceCodeSpace = 1 << 3,
+    kObjectSpaceMapSpace = 1 << 4,
+    kObjectSpaceLoSpace = 1 << 5,
+
+    kObjectSpaceAll = kObjectSpaceNewSpace | kObjectSpaceOldPointerSpace |
+      kObjectSpaceOldDataSpace | kObjectSpaceCodeSpace | kObjectSpaceMapSpace |
+      kObjectSpaceLoSpace
+  };
+
+  enum AllocationAction {
+    kAllocationActionAllocate = 1 << 0,
+    kAllocationActionFree = 1 << 1,
+    kAllocationActionAll = kAllocationActionAllocate | kAllocationActionFree
+  };
+
+typedef void (*MemoryAllocationCallback)(ObjectSpace space,
+                                         AllocationAction action,
+                                         int size);
+
 // --- F a i l e d A c c e s s C h e c k C a l l b a c k ---
 typedef void (*FailedAccessCheckCallback)(Local<Object> target,
                                           AccessType type,
@@ -2579,6 +2603,20 @@
   static void SetGlobalGCEpilogueCallback(GCCallback);
 
   /**
+   * Enables the host application to provide a mechanism to be notified
+   * and perform custom logging when V8 Allocates Executable Memory.
+   */
+  static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+                                          ObjectSpace space,
+                                          AllocationAction action);
+
+  /**
+   * This function removes callback which was installed by
+   * AddMemoryAllocationCallback function.
+   */
+  static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+
+  /**
    * Allows the host application to group objects together. If one
    * object in the group is alive, all objects in the group are alive.
    * After each garbage collection, object groups are removed. It is
diff --git a/src/api.cc b/src/api.cc
index e7a9e5c..4710557 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3905,6 +3905,22 @@
 }
 
 
+void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+                                     ObjectSpace space,
+                                     AllocationAction action) {
+  if (IsDeadCheck("v8::V8::AddMemoryAllocationCallback()")) return;
+  i::MemoryAllocator::AddMemoryAllocationCallback(callback,
+                                                  space,
+                                                  action);
+}
+
+
+void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) {
+  if (IsDeadCheck("v8::V8::RemoveMemoryAllocationCallback()")) return;
+  i::MemoryAllocator::RemoveMemoryAllocationCallback(callback);
+}
+
+
 void V8::PauseProfiler() {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   PauseProfilerEx(PROFILER_MODULE_CPU);
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index f75ee8b..0e479e2 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1463,95 +1463,6 @@
 }
 
 
-// Tries to get a signed int32 out of a double precision floating point heap
-// number.  Rounds towards 0.  Fastest for doubles that are in the ranges
-// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff.  This corresponds
-// almost to the range of signed int32 values that are not Smis.  Jumps to the
-// label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
-// (excluding the endpoints).
-static void GetInt32(MacroAssembler* masm,
-                     Register source,
-                     Register dest,
-                     Register scratch,
-                     Register scratch2,
-                     Label* slow) {
-  Label right_exponent, done;
-  // Get exponent word.
-  __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
-  // Get exponent alone in scratch2.
-  __ Ubfx(scratch2,
-          scratch,
-          HeapNumber::kExponentShift,
-          HeapNumber::kExponentBits);
-  // Load dest with zero.  We use this either for the final shift or
-  // for the answer.
-  __ mov(dest, Operand(0));
-  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
-  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).  This is
-  // the exponent that we are fastest at and also the highest exponent we can
-  // handle here.
-  const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
-  // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
-  // split it up to avoid a constant pool entry.  You can't do that in general
-  // for cmp because of the overflow flag, but we know the exponent is in the
-  // range 0-2047 so there is no overflow.
-  int fudge_factor = 0x400;
-  __ sub(scratch2, scratch2, Operand(fudge_factor));
-  __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
-  // If we have a match of the int32-but-not-Smi exponent then skip some logic.
-  __ b(eq, &right_exponent);
-  // If the exponent is higher than that then go to slow case.  This catches
-  // numbers that don't fit in a signed int32, infinities and NaNs.
-  __ b(gt, slow);
-
-  // We know the exponent is smaller than 30 (biased).  If it is less than
-  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
-  // it rounds to zero.
-  const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
-  __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
-  // Dest already has a Smi zero.
-  __ b(lt, &done);
-  if (!CpuFeatures::IsSupported(VFP3)) {
-    // We have an exponent between 0 and 30 in scratch2.  Subtract from 30 to
-    // get how much to shift down.
-    __ rsb(dest, scratch2, Operand(30));
-  }
-  __ bind(&right_exponent);
-  if (CpuFeatures::IsSupported(VFP3)) {
-    CpuFeatures::Scope scope(VFP3);
-    // ARMv7 VFP3 instructions implementing double precision to integer
-    // conversion using round to zero.
-    __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
-    __ vmov(d7, scratch2, scratch);
-    __ vcvt_s32_f64(s15, d7);
-    __ vmov(dest, s15);
-  } else {
-    // Get the top bits of the mantissa.
-    __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
-    // Put back the implicit 1.
-    __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
-    // Shift up the mantissa bits to take up the space the exponent used to
-    // take. We just orred in the implicit bit so that took care of one and
-    // we want to leave the sign bit 0 so we subtract 2 bits from the shift
-    // distance.
-    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-    __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
-    // Put sign in zero flag.
-    __ tst(scratch, Operand(HeapNumber::kSignMask));
-    // Get the second half of the double. For some exponents we don't
-    // actually need this because the bits get shifted out again, but
-    // it's probably slower to test than just to do it.
-    __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
-    // Shift down 22 bits to get the last 10 bits.
-    __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
-    // Move down according to the exponent.
-    __ mov(dest, Operand(scratch, LSR, dest));
-    // Fix sign if sign bit was set.
-    __ rsb(dest, dest, Operand(0), LeaveCC, ne);
-  }
-  __ bind(&done);
-}
-
 // For bitwise ops where the inputs are not both Smis we here try to determine
 // whether both inputs are either Smis or at least heap numbers that can be
 // represented by a 32 bit signed value.  We truncate towards zero as required
@@ -1574,7 +1485,7 @@
   __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
   __ cmp(r4, heap_number_map);
   __ b(ne, &slow);
-  GetInt32(masm, lhs, r3, r5, r4, &slow);
+  __ ConvertToInt32(lhs, r3, r5, r4, &slow);
   __ jmp(&done_checking_lhs);
   __ bind(&lhs_is_smi);
   __ mov(r3, Operand(lhs, ASR, 1));
@@ -1585,7 +1496,7 @@
   __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
   __ cmp(r4, heap_number_map);
   __ b(ne, &slow);
-  GetInt32(masm, rhs, r2, r5, r4, &slow);
+  __ ConvertToInt32(rhs, r2, r5, r4, &slow);
   __ jmp(&done_checking_rhs);
   __ bind(&rhs_is_smi);
   __ mov(r2, Operand(rhs, ASR, 1));
@@ -2440,7 +2351,7 @@
     __ b(ne, &slow);
 
     // Convert the heap number is r0 to an untagged integer in r1.
-    GetInt32(masm, r0, r1, r2, r3, &slow);
+    __ ConvertToInt32(r0, r1, r2, r3, &slow);
 
     // Do the bitwise operation (move negated) and check if the result
     // fits in a smi.
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 08a8da0..698d0c8 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -917,16 +917,55 @@
   }
 
   virtual void Generate();
+  // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
+  // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty
+  // methods, it is the responsibility of the deferred code to save and restore
+  // registers.
+  virtual bool AutoSaveAndRestore() { return false; }
+
+  void JumpToNonSmiInput(Condition cond);
+  void JumpToAnswerOutOfRange(Condition cond);
 
  private:
+  void GenerateNonSmiInput();
+  void GenerateAnswerOutOfRange();
+  void WriteNonSmiAnswer(Register answer,
+                         Register heap_number,
+                         Register scratch);
+
   Token::Value op_;
   int value_;
   bool reversed_;
   OverwriteMode overwrite_mode_;
   Register tos_register_;
+  Label non_smi_input_;
+  Label answer_out_of_range_;
 };
 
 
+// For bit operations we try harder and handle the case where the input is not
+// a Smi but a 32bits integer without calling the generic stub.
+void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) {
+  ASSERT(Token::IsBitOp(op_));
+
+  __ b(cond, &non_smi_input_);
+}
+
+
+// For bit operations the result is always 32bits so we handle the case where
+// the result does not fit in a Smi without calling the generic stub.
+void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) {
+  ASSERT(Token::IsBitOp(op_));
+
+  if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) {
+    // >>> requires an unsigned to double conversion and the non VFP code
+    // does not support this conversion.
+    __ b(cond, entry_label());
+  } else {
+    __ b(cond, &answer_out_of_range_);
+  }
+}
+
 
 // On entry the non-constant side of the binary operation is in tos_register_
 // and the constant smi side is nowhere.  The tos_register_ is not used by the
@@ -1005,6 +1044,172 @@
   // came into this function with, so we can merge back to that frame
   // without trashing it.
   copied_frame.MergeTo(frame_state()->frame());
+
+  Exit();
+
+  if (non_smi_input_.is_linked()) {
+    GenerateNonSmiInput();
+  }
+
+  if (answer_out_of_range_.is_linked()) {
+    GenerateAnswerOutOfRange();
+  }
+}
+
+
+// Convert and write the integer answer into heap_number.
+void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
+                                                   Register heap_number,
+                                                   Register scratch) {
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    __ vmov(s0, answer);
+    if (op_ == Token::SHR) {
+      __ vcvt_f64_u32(d0, s0);
+    } else {
+      __ vcvt_f64_s32(d0, s0);
+    }
+    __ sub(scratch, heap_number, Operand(kHeapObjectTag));
+    __ vstr(d0, scratch, HeapNumber::kValueOffset);
+  } else {
+    WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch);
+    __ CallStub(&stub);
+  }
+}
+
+
+void DeferredInlineSmiOperation::GenerateNonSmiInput() {
+  // We know the left hand side is not a Smi and the right hand side is an
+  // immediate value (value_) which can be represented as a Smi. We only
+  // handle bit operations.
+  ASSERT(Token::IsBitOp(op_));
+
+  if (FLAG_debug_code) {
+    __ Abort("Should not fall through!");
+  }
+
+  __ bind(&non_smi_input_);
+  if (FLAG_debug_code) {
+    __ AbortIfSmi(tos_register_);
+  }
+
+  // This routine uses the registers from r2 to r6.  At the moment they are
+  // not used by the register allocator, but when they are it should use
+  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
+
+  Register heap_number_map = r7;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
+  __ cmp(r3, heap_number_map);
+  // Not a number, fall back to the GenericBinaryOpStub.
+  __ b(ne, entry_label());
+
+  Register int32 = r2;
+  // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
+  __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label());
+
+  // tos_register_ (r0 or r1): Original heap number.
+  // int32: signed 32bits int.
+
+  Label result_not_a_smi;
+  int shift_value = value_ & 0x1f;
+  switch (op_) {
+    case Token::BIT_OR:  __ orr(int32, int32, Operand(value_)); break;
+    case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
+    case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
+    case Token::SAR:
+      ASSERT(!reversed_);
+      if (shift_value != 0) {
+         __ mov(int32, Operand(int32, ASR, shift_value));
+      }
+      break;
+    case Token::SHR:
+      ASSERT(!reversed_);
+      if (shift_value != 0) {
+        __ mov(int32, Operand(int32, LSR, shift_value), SetCC);
+      } else {
+        // SHR is special because it is required to produce a positive answer.
+        __ cmp(int32, Operand(0));
+      }
+      if (CpuFeatures::IsSupported(VFP3)) {
+        __ b(mi, &result_not_a_smi);
+      } else {
+        // Non VFP code cannot convert from unsigned to double, so fall back
+        // to GenericBinaryOpStub.
+        __ b(mi, entry_label());
+      }
+      break;
+    case Token::SHL:
+      ASSERT(!reversed_);
+      if (shift_value != 0) {
+        __ mov(int32, Operand(int32, LSL, shift_value));
+      }
+      break;
+    default: UNREACHABLE();
+  }
+  // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
+  // if the shift if more than 0 or SHR if the shit is more than 1.
+  if (!( (op_ == Token::AND) ||
+        ((op_ == Token::SAR) && (shift_value > 0)) ||
+        ((op_ == Token::SHR) && (shift_value > 1)))) {
+    __ add(r3, int32, Operand(0x40000000), SetCC);
+    __ b(mi, &result_not_a_smi);
+  }
+  __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize));
+  Exit();
+
+  if (result_not_a_smi.is_linked()) {
+    __ bind(&result_not_a_smi);
+    if (overwrite_mode_ != OVERWRITE_LEFT) {
+      ASSERT((overwrite_mode_ == NO_OVERWRITE) ||
+             (overwrite_mode_ == OVERWRITE_RIGHT));
+      // If the allocation fails, fall back to the GenericBinaryOpStub.
+      __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label());
+      // Nothing can go wrong now, so overwrite tos.
+      __ mov(tos_register_, Operand(r4));
+    }
+
+    // int32: answer as signed 32bits integer.
+    // tos_register_: Heap number to write the answer into.
+    WriteNonSmiAnswer(int32, tos_register_, r3);
+
+    Exit();
+  }
+}
+
+
+void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
+  // The input from a bitwise operation were Smis but the result cannot fit
+  // into a Smi, so we store it into a heap number. tos_resgiter_ holds the
+  // result to be converted.
+  ASSERT(Token::IsBitOp(op_));
+  ASSERT(!reversed_);
+
+  if (FLAG_debug_code) {
+    __ Abort("Should not fall through!");
+  }
+
+  __ bind(&answer_out_of_range_);
+  if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) {
+    // >>> 0 is a special case where the result is already tagged but wrong
+    // because the Smi is negative. We untag it.
+    __ mov(tos_register_, Operand(tos_register_, ASR, kSmiTagSize));
+  }
+
+  // This routine uses the registers from r2 to r6.  At the moment they are
+  // not used by the register allocator, but when they are it should use
+  // SpillAll and MergeTo like DeferredInlineSmiOperation::Generate() above.
+
+  // Allocate the result heap number.
+  Register heap_number_map = r7;
+  Register heap_number = r4;
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  // If the allocation fails, fall back to the GenericBinaryOpStub.
+  __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label());
+  WriteNonSmiAnswer(tos_register_, heap_number, r3);
+  __ mov(tos_register_, Operand(heap_number));
+
+  Exit();
 }
 
 
@@ -1191,10 +1396,10 @@
         }
         frame_->EmitPush(tos, TypeInfo::Smi());
       } else {
-        DeferredCode* deferred =
+        DeferredInlineSmiOperation* deferred =
           new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos);
         __ tst(tos, Operand(kSmiTagMask));
-        deferred->Branch(ne);
+        deferred->JumpToNonSmiInput(ne);
         switch (op) {
           case Token::BIT_OR:  __ orr(tos, tos, Operand(value)); break;
           case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break;
@@ -1240,17 +1445,17 @@
     case Token::SHR:
     case Token::SAR: {
       ASSERT(!reversed);
-      int shift_amount = int_value & 0x1f;
+      int shift_value = int_value & 0x1f;
       TypeInfo result = TypeInfo::Number();
 
       if (op == Token::SHR) {
-        if (shift_amount > 1) {
+        if (shift_value > 1) {
           result = TypeInfo::Smi();
-        } else if (shift_amount > 0) {
+        } else if (shift_value > 0) {
           result = TypeInfo::Integer32();
         }
       } else if (op == Token::SAR) {
-        if (shift_amount > 0) {
+        if (shift_value > 0) {
           result = TypeInfo::Smi();
         } else {
           result = TypeInfo::Integer32();
@@ -1260,77 +1465,67 @@
         result = TypeInfo::Integer32();
       }
 
-      Register scratch = VirtualFrame::scratch0();
-      Register scratch2 = VirtualFrame::scratch1();
-      int shift_value = int_value & 0x1f;  // least significant 5 bits
-      DeferredCode* deferred =
+      DeferredInlineSmiOperation* deferred =
         new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
-      uint32_t problematic_mask = kSmiTagMask;
-      // For unsigned shift by zero all negative smis are problematic.
-      bool skip_smi_test = both_sides_are_smi;
-      if (shift_value == 0 && op == Token::SHR) {
-        problematic_mask |= 0x80000000;
-        skip_smi_test = false;
-      }
-      if (!skip_smi_test) {
-        __ tst(tos, Operand(problematic_mask));
-        deferred->Branch(ne);  // Go slow for problematic input.
+      if (!both_sides_are_smi) {
+        __ tst(tos, Operand(kSmiTagMask));
+        deferred->JumpToNonSmiInput(ne);
       }
       switch (op) {
         case Token::SHL: {
           if (shift_value != 0) {
+            Register scratch = VirtualFrame::scratch0();
             int adjusted_shift = shift_value - kSmiTagSize;
             ASSERT(adjusted_shift >= 0);
+
             if (adjusted_shift != 0) {
-              __ mov(scratch, Operand(tos, LSL, adjusted_shift));
-              // Check that the *signed* result fits in a smi.
-              __ add(scratch2, scratch, Operand(0x40000000), SetCC);
-              deferred->Branch(mi);
-              __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
-            } else {
-              // Check that the *signed* result fits in a smi.
-              __ add(scratch2, tos, Operand(0x40000000), SetCC);
-              deferred->Branch(mi);
-              __ mov(tos, Operand(tos, LSL, kSmiTagSize));
+              __ mov(tos, Operand(tos, LSL, adjusted_shift));
             }
+            // Check that the *signed* result fits in a smi.
+            __ add(scratch, tos, Operand(0x40000000), SetCC);
+            deferred->JumpToAnswerOutOfRange(mi);
+            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
           }
           break;
         }
         case Token::SHR: {
           if (shift_value != 0) {
+            Register scratch = VirtualFrame::scratch0();
             __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Remove tag.
-            // LSR by immediate 0 means shifting 32 bits.
-            __ mov(scratch, Operand(scratch, LSR, shift_value));
+            __ mov(tos, Operand(scratch, LSR, shift_value));
             if (shift_value == 1) {
-              // check that the *unsigned* result fits in a smi
-              // neither of the two high-order bits can be set:
+              // Check that the *unsigned* result fits in a smi.
+              // Neither of the two high-order bits can be set:
               // - 0x80000000: high bit would be lost when smi tagging
-              // - 0x40000000: this number would convert to negative when
-              // smi tagging these two cases can only happen with shifts
-              // by 0 or 1 when handed a valid smi
-              __ tst(scratch, Operand(0xc0000000));
-              deferred->Branch(ne);
-            } else {
-              ASSERT(shift_value >= 2);
-              result = TypeInfo::Smi();  // SHR by at least 2 gives a Smi.
+              // - 0x40000000: this number would convert to negative when Smi
+              //   tagging.
+              // These two cases can only happen with shifts by 0 or 1 when
+              // handed a valid smi.
+              __ tst(tos, Operand(0xc0000000));
+              if (!CpuFeatures::IsSupported(VFP3)) {
+                // If the unsigned result does not fit in a Smi, we require an
+                // unsigned to double conversion. Without VFP V8 has to fall
+                // back to the runtime. The deferred code will expect tos
+                // to hold the original Smi to be shifted.
+                __ mov(tos, Operand(scratch, LSL, kSmiTagSize), LeaveCC, ne);
+              }
+              deferred->JumpToAnswerOutOfRange(ne);
             }
-            __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
+            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
+          } else {
+            __ cmp(tos, Operand(0));
+            deferred->JumpToAnswerOutOfRange(mi);
           }
           break;
         }
         case Token::SAR: {
-          // In the ARM instructions set, ASR by immediate 0 means shifting 32
-          // bits.
           if (shift_value != 0) {
-            // Do the shift and the tag removal in one operation.  If the shift
+            // Do the shift and the tag removal in one operation. If the shift
             // is 31 bits (the highest possible value) then we emit the
-            // instruction as a shift by 0 which means shift arithmetically by
-            // 32.
+            // instruction as a shift by 0 which in the ARM ISA means shift
+            // arithmetically by 32.
             __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
-            // Put tag back.
             __ mov(tos, Operand(tos, LSL, kSmiTagSize));
-            // SAR by at least 1 gives a Smi.
-            result = TypeInfo::Smi();
           }
           break;
         }
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index c522154..162d97f 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -455,9 +455,6 @@
 
   static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
   bool CheckForInlineRuntimeCall(CallRuntime* node);
-  static bool PatchInlineRuntimeEntry(Handle<String> name,
-                                      const InlineRuntimeLUT& new_entry,
-                                      InlineRuntimeLUT* old_entry);
 
   static Handle<Code> ComputeLazyCompile(int argc);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 0b6e7b3..36a36be 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include <limits.h>  // For LONG_MIN, LONG_MAX.
+
 #include "v8.h"
 
 #if defined(V8_TARGET_ARCH_ARM)
@@ -1333,6 +1335,104 @@
 }
 
 
+// Tries to get a signed int32 out of a double precision floating point heap
+// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
+// 32bits signed integer range.
+void MacroAssembler::ConvertToInt32(Register source,
+                                    Register dest,
+                                    Register scratch,
+                                    Register scratch2,
+                                    Label *not_int32) {
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    sub(scratch, source, Operand(kHeapObjectTag));
+    vldr(d0, scratch, HeapNumber::kValueOffset);
+    vcvt_s32_f64(s0, d0);
+    vmov(dest, s0);
+    // Signed vcvt instruction will saturate to the minimum (0x80000000) or
+    // maximun (0x7fffffff) signed 32bits integer when the double is out of
+    // range. When substracting one, the minimum signed integer becomes the
+    // maximun signed integer.
+    sub(scratch, dest, Operand(1));
+    cmp(scratch, Operand(LONG_MAX - 1));
+    // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
+    b(ge, not_int32);
+  } else {
+    // This code is faster for doubles that are in the ranges -0x7fffffff to
+    // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to
+    // the range of signed int32 values that are not Smis.  Jumps to the label
+    // 'not_int32' if the double isn't in the range -0x80000000.0 to
+    // 0x80000000.0 (excluding the endpoints).
+    Label right_exponent, done;
+    // Get exponent word.
+    ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+    // Get exponent alone in scratch2.
+    Ubfx(scratch2,
+            scratch,
+            HeapNumber::kExponentShift,
+            HeapNumber::kExponentBits);
+    // Load dest with zero.  We use this either for the final shift or
+    // for the answer.
+    mov(dest, Operand(0));
+    // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+    // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
+    // the exponent that we are fastest at and also the highest exponent we can
+    // handle here.
+    const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
+    // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
+    // split it up to avoid a constant pool entry.  You can't do that in general
+    // for cmp because of the overflow flag, but we know the exponent is in the
+    // range 0-2047 so there is no overflow.
+    int fudge_factor = 0x400;
+    sub(scratch2, scratch2, Operand(fudge_factor));
+    cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
+    // If we have a match of the int32-but-not-Smi exponent then skip some
+    // logic.
+    b(eq, &right_exponent);
+    // If the exponent is higher than that then go to slow case.  This catches
+    // numbers that don't fit in a signed int32, infinities and NaNs.
+    b(gt, not_int32);
+
+    // We know the exponent is smaller than 30 (biased).  If it is less than
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+    // it rounds to zero.
+    const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
+    sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
+    // Dest already has a Smi zero.
+    b(lt, &done);
+
+    // We have an exponent between 0 and 30 in scratch2.  Subtract from 30 to
+    // get how much to shift down.
+    rsb(dest, scratch2, Operand(30));
+
+    bind(&right_exponent);
+    // Get the top bits of the mantissa.
+    and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+    // Put back the implicit 1.
+    orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+    // Shift up the mantissa bits to take up the space the exponent used to
+    // take. We just orred in the implicit bit so that took care of one and
+    // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+    // distance.
+    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+    mov(scratch2, Operand(scratch2, LSL, shift_distance));
+    // Put sign in zero flag.
+    tst(scratch, Operand(HeapNumber::kSignMask));
+    // Get the second half of the double. For some exponents we don't
+    // actually need this because the bits get shifted out again, but
+    // it's probably slower to test than just to do it.
+    ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+    // Shift down 22 bits to get the last 10 bits.
+    orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+    // Move down according to the exponent.
+    mov(dest, Operand(scratch, LSR, dest));
+    // Fix sign if sign bit was set.
+    rsb(dest, dest, Operand(0), LeaveCC, ne);
+    bind(&done);
+  }
+}
+
+
 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
                                          Register src,
                                          int num_least_bits) {
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 207ee5c..febd87e 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -504,6 +504,15 @@
                               Register scratch1,
                               SwVfpRegister scratch2);
 
+  // Convert the HeapNumber pointed to by source to a 32bits signed integer
+  // dest. If the HeapNumber does not fit into a 32bits signed integer branch
+  // to not_int32 label.
+  void ConvertToInt32(Register source,
+                      Register dest,
+                      Register scratch,
+                      Register scratch2,
+                      Label *not_int32);
+
   // Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
   // instruction.  On pre-ARM5 hardware this routine gives the wrong answer
   // for 0 (31 instead of 32).  Source and scratch can be the same in which case
diff --git a/src/ast.cc b/src/ast.cc
index 0d07a58..9ff1be7 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -84,7 +84,10 @@
 
 
 VariableProxy::VariableProxy(bool is_this)
-    : is_this_(is_this) {
+  : var_(NULL),
+    is_this_(is_this),
+    inside_with_(false),
+    is_trivial_(false) {
 }
 
 
diff --git a/src/ast.h b/src/ast.h
index 5071b2c..9fcf256 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1747,7 +1747,7 @@
   void AddElement(TextElement elm)  {
     elements_.Add(elm);
     length_ += elm.length();
-  };
+  }
   ZoneList<TextElement>* elements() { return &elements_; }
  private:
   ZoneList<TextElement> elements_;
diff --git a/src/char-predicates-inl.h b/src/char-predicates-inl.h
index fadbc9a..0dfc80d 100644
--- a/src/char-predicates-inl.h
+++ b/src/char-predicates-inl.h
@@ -34,6 +34,14 @@
 namespace internal {
 
 
+// If c is in 'A'-'Z' or 'a'-'z', return its lower-case.
+// Else, return something outside of 'A'-'Z' and 'a'-'z'.
+// Note: it ignores LOCALE.
+inline int AsciiAlphaToLower(uc32 c) {
+  return c | 0x20;
+}
+
+
 inline bool IsCarriageReturn(uc32 c) {
   return c == 0x000D;
 }
@@ -59,12 +67,12 @@
 
 inline bool IsHexDigit(uc32 c) {
   // ECMA-262, 3rd, 7.6 (p 15)
-  return IsDecimalDigit(c) || IsInRange(c | 0x20, 'a', 'f');
+  return IsDecimalDigit(c) || IsInRange(AsciiAlphaToLower(c), 'a', 'f');
 }
 
 
 inline bool IsRegExpWord(uc16 c) {
-  return IsInRange(c | 0x20, 'a', 'z')
+  return IsInRange(AsciiAlphaToLower(c), 'a', 'z')
       || IsDecimalDigit(c)
       || (c == '_');
 }
diff --git a/src/code-stubs.h b/src/code-stubs.h
index c2dd0a7..98a5cf6 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -110,10 +110,10 @@
 
   static Major MajorKeyFromKey(uint32_t key) {
     return static_cast<Major>(MajorKeyBits::decode(key));
-  };
+  }
   static int MinorKeyFromKey(uint32_t key) {
     return MinorKeyBits::decode(key);
-  };
+  }
 
   // Gets the major key from a code object that is a code stub or binary op IC.
   static Major GetMajorKey(Code* code_stub) {
diff --git a/src/codegen.cc b/src/codegen.cc
index 20fb310..148cefc 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -385,21 +385,6 @@
 }
 
 
-bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
-    const CodeGenerator::InlineRuntimeLUT& new_entry,
-    CodeGenerator::InlineRuntimeLUT* old_entry) {
-  InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
-  if (entry == NULL) return false;
-  if (old_entry != NULL) {
-    old_entry->name = entry->name;
-    old_entry->method = entry->method;
-  }
-  entry->name = new_entry.name;
-  entry->method = new_entry.method;
-  return true;
-}
-
-
 int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle<String> name) {
   CodeGenerator::InlineRuntimeLUT* f =
       CodeGenerator::FindInlineRuntimeLUT(name);
diff --git a/src/codegen.h b/src/codegen.h
index 3373d1c..aa2d442 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -64,7 +64,6 @@
 //   DeclareGlobals
 //   FindInlineRuntimeLUT
 //   CheckForInlineRuntimeCall
-//   PatchInlineRuntimeEntry
 //   AnalyzeCondition
 //   CodeForFunctionPosition
 //   CodeForReturnPosition
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index c0ed929..4248a64 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -235,8 +235,19 @@
     const TickSampleEventRecord* rec =
         TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
     if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
-    if (rec->order == dequeue_order) {
-      generator_->RecordTickSample(rec->sample);
+    // Make a local copy of tick sample record to ensure that it won't
+    // be modified as we are processing it. This is possible as the
+    // sampler writes w/o any sync to the queue, so if the processor
+    // will get far behind, a record may be modified right under its
+    // feet.
+    TickSampleEventRecord record = *rec;
+    if (record.order == dequeue_order) {
+      // A paranoid check to make sure that we don't get a memory overrun
+      // in case of frames_count having a wild value.
+      if (record.sample.frames_count < 0
+          || record.sample.frames_count >= TickSample::kMaxFramesCount)
+        record.sample.frames_count = 0;
+      generator_->RecordTickSample(record.sample);
       ticks_buffer_.FinishDequeue();
     } else {
       return true;
diff --git a/src/d8.cc b/src/d8.cc
index 7fd7925..5a1e63a 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -486,7 +486,7 @@
 
   // Start the debugger agent if requested.
   if (i::FLAG_debugger_agent) {
-    v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port);
+    v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
   }
 
   // Start the in-process debugger if requested.
diff --git a/src/dateparser.h b/src/dateparser.h
index d999d9c..cae9b08 100644
--- a/src/dateparser.h
+++ b/src/dateparser.h
@@ -92,7 +92,7 @@
     int ReadWord(uint32_t* prefix, int prefix_size) {
       int len;
       for (len = 0; IsAsciiAlphaOrAbove(); Next(), len++) {
-        if (len < prefix_size) prefix[len] = GetAsciiAlphaLower();
+        if (len < prefix_size) prefix[len] = AsciiAlphaToLower(ch_);
       }
       for (int i = len; i < prefix_size; i++) prefix[i] = 0;
       return len;
@@ -130,10 +130,6 @@
     bool HasReadNumber() const { return has_read_number_; }
 
    private:
-    // If current character is in 'A'-'Z' or 'a'-'z', return its lower-case.
-    // Else, return something outside of 'A'-'Z' and 'a'-'z'.
-    uint32_t GetAsciiAlphaLower() const { return ch_ | 32; }
-
     int index_;
     Vector<Char> buffer_;
     bool has_read_number_;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index a143bcd..a63088d 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -283,7 +283,7 @@
                                     "debugger agent in another process")
 DEFINE_bool(debugger_agent, false, "Enable debugger agent")
 DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
-DEFINE_string(map_counters, false, "Map counters to a file")
+DEFINE_string(map_counters, NULL, "Map counters to a file")
 DEFINE_args(js_arguments, JSArguments(),
             "Pass all remaining arguments to the script. Alias for \"--\".")
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 8106886..bfd2650 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -868,7 +868,7 @@
               FloatingPointHelper::LoadSSE2Operands(masm);
             }
           } else {
-            FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+            FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
           }
 
           switch (op_) {
@@ -889,7 +889,7 @@
               __ AbortIfNotNumber(eax);
             }
           } else {
-            FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+            FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
           }
           FloatingPointHelper::LoadFloatOperands(
               masm,
@@ -1001,10 +1001,15 @@
     }
   }
 
+  // Avoid hitting the string ADD code below when allocation fails in
+  // the floating point code above.
+  if (op_ != Token::ADD) {
+    __ bind(&call_runtime);
+  }
+
   // If all else fails, use the runtime system to get the correct
   // result. If arguments was passed in registers now place them on the
   // stack in the correct order below the return address.
-  __ bind(&call_runtime);
   if (HasArgsInRegisters()) {
     GenerateRegisterArgsPush(masm);
   }
@@ -1012,7 +1017,6 @@
   switch (op_) {
     case Token::ADD: {
       // Test for string arguments before calling runtime.
-      Label not_strings, not_string1, string1, string1_smi2;
 
       // If this stub has already generated FP-specific code then the arguments
       // are already in edx, eax
@@ -1030,49 +1034,31 @@
         rhs = eax;
       }
 
-      // Test if first argument is a string.
+      // Test if left operand is a string.
+      Label lhs_not_string;
       __ test(lhs, Immediate(kSmiTagMask));
-      __ j(zero, &not_string1);
+      __ j(zero, &lhs_not_string);
       __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
-      __ j(above_equal, &not_string1);
+      __ j(above_equal, &lhs_not_string);
 
-      // First argument is a string, test second.
+      StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+      __ TailCallStub(&string_add_left_stub);
+
+      // Left operand is not a string, test right.
+      __ bind(&lhs_not_string);
       __ test(rhs, Immediate(kSmiTagMask));
-      __ j(zero, &string1_smi2);
+      __ j(zero, &call_runtime);
       __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
-      __ j(above_equal, &string1);
+      __ j(above_equal, &call_runtime);
 
-      // First and second argument are strings. Jump to the string add stub.
-      StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
-      __ TailCallStub(&string_add_stub);
+      StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+      __ TailCallStub(&string_add_right_stub);
 
-      __ bind(&string1_smi2);
-      // First argument is a string, second is a smi. Try to lookup the number
-      // string for the smi in the number string cache.
-      NumberToStringStub::GenerateLookupNumberStringCache(
-          masm, rhs, edi, ebx, ecx, true, &string1);
-
-      // Replace second argument on stack and tailcall string add stub to make
-      // the result.
-      __ mov(Operand(esp, 1 * kPointerSize), edi);
-      __ TailCallStub(&string_add_stub);
-
-      // Only first argument is a string.
-      __ bind(&string1);
-      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
-
-      // First argument was not a string, test second.
-      __ bind(&not_string1);
-      __ test(rhs, Immediate(kSmiTagMask));
-      __ j(zero, &not_strings);
-      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
-      __ j(above_equal, &not_strings);
-
-      // Only second argument is a string.
-      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
-
-      __ bind(&not_strings);
       // Neither argument is a string.
+      __ bind(&call_runtime);
+      if (HasArgsInRegisters()) {
+        GenerateRegisterArgsPush(masm);
+      }
       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
       break;
     }
@@ -3765,14 +3751,15 @@
 
 
 void StringAddStub::Generate(MacroAssembler* masm) {
-  Label string_add_runtime;
+  Label string_add_runtime, call_builtin;
+  Builtins::JavaScript builtin_id = Builtins::ADD;
 
   // Load the two arguments.
   __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
   __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
 
   // Make sure that both arguments are strings if not known in advance.
-  if (string_check_) {
+  if (flags_ == NO_STRING_ADD_FLAGS) {
     __ test(eax, Immediate(kSmiTagMask));
     __ j(zero, &string_add_runtime);
     __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
@@ -3783,6 +3770,20 @@
     __ j(zero, &string_add_runtime);
     __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
     __ j(above_equal, &string_add_runtime);
+  } else {
+    // Here at least one of the arguments is definitely a string.
+    // We convert the one that is not known to be a string.
+    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
+      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
+      GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
+                              &call_builtin);
+      builtin_id = Builtins::STRING_ADD_RIGHT;
+    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
+      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
+      GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
+                              &call_builtin);
+      builtin_id = Builtins::STRING_ADD_LEFT;
+    }
   }
 
   // Both arguments are strings.
@@ -3828,21 +3829,41 @@
   __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
                                          &string_add_runtime);
 
-  // Get the two characters forming the sub string.
+  // Get the two characters forming the new string.
   __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
   __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
 
   // Try to lookup two character string in symbol table. If it is not found
   // just allocate a new one.
-  Label make_two_character_string, make_flat_ascii_string;
+  Label make_two_character_string, make_two_character_string_no_reload;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
+      masm, ebx, ecx, eax, edx, edi,
+      &make_two_character_string_no_reload, &make_two_character_string);
   __ IncrementCounter(&Counters::string_add_native, 1);
   __ ret(2 * kPointerSize);
 
+  // Allocate a two character string.
   __ bind(&make_two_character_string);
-  __ Set(ebx, Immediate(Smi::FromInt(2)));
-  __ jmp(&make_flat_ascii_string);
+  // Reload the arguments.
+  __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
+  __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
+  // Get the two characters forming the new string.
+  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
+  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+  __ bind(&make_two_character_string_no_reload);
+  __ IncrementCounter(&Counters::string_add_make_two_char, 1);
+  __ AllocateAsciiString(eax,  // Result.
+                         2,    // Length.
+                         edi,  // Scratch 1.
+                         edx,  // Scratch 2.
+                         &string_add_runtime);
+  // Pack both characters in ebx.
+  __ shl(ecx, kBitsPerByte);
+  __ or_(ebx, Operand(ecx));
+  // Set the characters in the new string.
+  __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
+  __ IncrementCounter(&Counters::string_add_native, 1);
+  __ ret(2 * kPointerSize);
 
   __ bind(&longer_than_two);
   // Check if resulting string will be flat.
@@ -3921,7 +3942,6 @@
   __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
   __ j(zero, &string_add_runtime);
 
-  __ bind(&make_flat_ascii_string);
   // Both strings are ascii strings.  As they are short they are both flat.
   // ebx: length of resulting flat string as a smi
   __ SmiUntag(ebx);
@@ -3997,6 +4017,56 @@
   // Just jump to runtime to add the two strings.
   __ bind(&string_add_runtime);
   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
+
+  if (call_builtin.is_linked()) {
+    __ bind(&call_builtin);
+    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
+  }
+}
+
+
+void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
+                                            int stack_offset,
+                                            Register arg,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Register scratch3,
+                                            Label* slow) {
+  // First check if the argument is already a string.
+  Label not_string, done;
+  __ test(arg, Immediate(kSmiTagMask));
+  __ j(zero, &not_string);
+  __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
+  __ j(below, &done);
+
+  // Check the number to string cache.
+  Label not_cached;
+  __ bind(&not_string);
+  // Puts the cached result into scratch1.
+  NumberToStringStub::GenerateLookupNumberStringCache(masm,
+                                                      arg,
+                                                      scratch1,
+                                                      scratch2,
+                                                      scratch3,
+                                                      false,
+                                                      &not_cached);
+  __ mov(arg, scratch1);
+  __ mov(Operand(esp, stack_offset), arg);
+  __ jmp(&done);
+
+  // Check if the argument is a safe string wrapper.
+  __ bind(&not_cached);
+  __ test(arg, Immediate(kSmiTagMask));
+  __ j(zero, slow);
+  __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1);  // map -> scratch1.
+  __ j(not_equal, slow);
+  __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
+            1 << Map::kStringWrapperSafeForDefaultValueOf);
+  __ j(zero, slow);
+  __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
+  __ mov(Operand(esp, stack_offset), arg);
+
+  __ bind(&done);
 }
 
 
@@ -4092,6 +4162,7 @@
                                                         Register scratch1,
                                                         Register scratch2,
                                                         Register scratch3,
+                                                        Label* not_probed,
                                                         Label* not_found) {
   // Register scratch3 is the general scratch register in this function.
   Register scratch = scratch3;
@@ -4106,7 +4177,7 @@
   __ mov(scratch, c2);
   __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
   __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
-  __ j(below_equal, not_found);
+  __ j(below_equal, not_probed);
 
   __ bind(&not_array_index);
   // Calculate the two character string hash.
@@ -4320,7 +4391,8 @@
   // Try to lookup two character string in symbol table.
   Label make_two_character_string;
   StringHelper::GenerateTwoCharacterSymbolTableProbe(
-      masm, ebx, ecx, eax, edx, edi, &make_two_character_string);
+      masm, ebx, ecx, eax, edx, edi,
+      &make_two_character_string, &make_two_character_string);
   __ ret(3 * kPointerSize);
 
   __ bind(&make_two_character_string);
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index acf4a6f..351636f 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -234,16 +234,21 @@
                                         Register scratch,  // Neither of above.
                                         bool ascii);
 
-  // Probe the symbol table for a two character string. If the string is
-  // not found by probing a jump to the label not_found is performed. This jump
-  // does not guarantee that the string is not in the symbol table. If the
-  // string is found the code falls through with the string in register eax.
+  // Probe the symbol table for a two character string. If the string
+  // requires non-standard hashing a jump to the label not_probed is
+  // performed and registers c1 and c2 are preserved. In all other
+  // cases they are clobbered. If the string is not found by probing a
+  // jump to the label not_found is performed. This jump does not
+  // guarantee that the string is not in the symbol table. If the
+  // string is found the code falls through with the string in
+  // register eax.
   static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
                                                    Register c1,
                                                    Register c2,
                                                    Register scratch1,
                                                    Register scratch2,
                                                    Register scratch3,
+                                                   Label* not_probed,
                                                    Label* not_found);
 
   // Generate string hash.
@@ -267,24 +272,35 @@
 // Flag that indicates how to generate code for the stub StringAddStub.
 enum StringAddFlags {
   NO_STRING_ADD_FLAGS = 0,
-  NO_STRING_CHECK_IN_STUB = 1 << 0  // Omit string check in stub.
+  // Omit left string check in stub (left is definitely a string).
+  NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
+  // Omit right string check in stub (right is definitely a string).
+  NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
+  // Omit both string checks in stub.
+  NO_STRING_CHECK_IN_STUB =
+      NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
 };
 
 
 class StringAddStub: public CodeStub {
  public:
-  explicit StringAddStub(StringAddFlags flags) {
-    string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
-  }
+  explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
 
  private:
   Major MajorKey() { return StringAdd; }
-  int MinorKey() { return string_check_ ? 0 : 1; }
+  int MinorKey() { return flags_; }
 
   void Generate(MacroAssembler* masm);
 
-  // Should the stub check whether arguments are strings?
-  bool string_check_;
+  void GenerateConvertArgument(MacroAssembler* masm,
+                               int stack_offset,
+                               Register arg,
+                               Register scratch1,
+                               Register scratch2,
+                               Register scratch3,
+                               Label* slow);
+
+  const StringAddFlags flags_;
 };
 
 
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index d399c35..854052a 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1411,12 +1411,12 @@
           StringAddStub stub(NO_STRING_CHECK_IN_STUB);
           answer = frame_->CallStub(&stub, 2);
         } else {
-          answer =
-            frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
+          StringAddStub stub(NO_STRING_CHECK_LEFT_IN_STUB);
+          answer = frame_->CallStub(&stub, 2);
         }
       } else if (right_is_string) {
-        answer =
-          frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
+        StringAddStub stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+        answer = frame_->CallStub(&stub, 2);
       }
       answer.set_type_info(TypeInfo::String());
       frame_->Push(&answer);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 2a8d313..adc0005 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -632,9 +632,6 @@
 
   static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
   bool CheckForInlineRuntimeCall(CallRuntime* node);
-  static bool PatchInlineRuntimeEntry(Handle<String> name,
-                                      const InlineRuntimeLUT& new_entry,
-                                      InlineRuntimeLUT* old_entry);
 
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index c215142..87e25d7 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -782,6 +782,31 @@
 }
 
 
+void MacroAssembler::AllocateAsciiString(Register result,
+                                         int length,
+                                         Register scratch1,
+                                         Register scratch2,
+                                         Label* gc_required) {
+  ASSERT(length > 0);
+
+  // Allocate ascii string in new space.
+  AllocateInNewSpace(SeqAsciiString::SizeFor(length),
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  mov(FieldOperand(result, HeapObject::kMapOffset),
+      Immediate(Factory::ascii_string_map()));
+  mov(FieldOperand(result, String::kLengthOffset),
+      Immediate(Smi::FromInt(length)));
+  mov(FieldOperand(result, String::kHashFieldOffset),
+      Immediate(String::kEmptyHashField));
+}
+
+
 void MacroAssembler::AllocateConsString(Register result,
                                         Register scratch1,
                                         Register scratch2,
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 5e850c0..a7534cb 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -356,6 +356,11 @@
                            Register scratch2,
                            Register scratch3,
                            Label* gc_required);
+  void AllocateAsciiString(Register result,
+                           int length,
+                           Register scratch1,
+                           Register scratch2,
+                           Label* gc_required);
 
   // Allocate a raw cons string object. Only the map field of the result is
   // initialized.
diff --git a/src/json.js b/src/json.js
index e7ec610..a39d7c4 100644
--- a/src/json.js
+++ b/src/json.js
@@ -68,15 +68,13 @@
 }
 
 var characterQuoteCache = {
+  '\b': '\\b',  // ASCII 8, Backspace
+  '\t': '\\t',  // ASCII 9, Tab
+  '\n': '\\n',  // ASCII 10, Newline
+  '\f': '\\f',  // ASCII 12, Formfeed
+  '\r': '\\r',  // ASCII 13, Carriage Return
   '\"': '\\"',
-  '\\': '\\\\',
-  '/': '\\/',
-  '\b': '\\b',
-  '\f': '\\f',
-  '\n': '\\n',
-  '\r': '\\r',
-  '\t': '\\t',
-  '\x0B': '\\u000b'
+  '\\': '\\\\'
 };
 
 function QuoteSingleJSONCharacter(c) {
@@ -95,7 +93,7 @@
 }
 
 function QuoteJSONString(str) {
-  var quotable = /[\\\"\x00-\x1f\x80-\uffff]/g;
+  var quotable = /[\\\"\x00-\x1f]/g;
   return '"' + str.replace(quotable, QuoteSingleJSONCharacter) + '"';
 }
 
diff --git a/src/messages.js b/src/messages.js
index 0375e8a..f26c3b5 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -831,11 +831,11 @@
   }
   var line = "";
   var functionName = frame.getFunction().name;
-  var methodName = frame.getMethodName();
   var addPrefix = true;
   var isConstructor = frame.isConstructor();
   var isMethodCall = !(frame.isToplevel() || isConstructor);
   if (isMethodCall) {
+    var methodName = frame.getMethodName();
     line += frame.getTypeName() + ".";
     if (functionName) {
       line += functionName;
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 3ad94e8..75e7a29 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -309,9 +309,6 @@
 
   static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
   bool CheckForInlineRuntimeCall(CallRuntime* node);
-  static bool PatchInlineRuntimeEntry(Handle<String> name,
-                                      const InlineRuntimeLUT& new_entry,
-                                      InlineRuntimeLUT* old_entry);
 
   static Handle<Code> ComputeLazyCompile(int argc);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
diff --git a/src/platform.h b/src/platform.h
index e0dd66e..e9e7c22 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -302,7 +302,7 @@
   void* address() {
     ASSERT(IsReserved());
     return address_;
-  };
+  }
 
   // Returns the size of the reserved memory.
   size_t size() { return size_; }
diff --git a/src/scanner.cc b/src/scanner.cc
index 1a8d721..15b1d44 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -430,6 +430,7 @@
     stack_overflow_ = true;
     next_.token = Token::ILLEGAL;
   } else {
+    has_line_terminator_before_next_ = false;
     Scan();
   }
   return current_.token;
@@ -742,7 +743,7 @@
       AddCharAdvance();
     } while (c0_ >= '0' && c0_ <= '9');
   }
-  if ((c0_ | 0x20) == 'e') {
+  if (AsciiAlphaToLower(c0_) == 'e') {
     AddCharAdvance();
     if (c0_ == '-' || c0_ == '+') AddCharAdvance();
     if (c0_ < '0' || c0_ > '9') return Token::ILLEGAL;
@@ -772,7 +773,6 @@
 void Scanner::ScanJavaScript() {
   next_.literal_chars = Vector<const char>();
   Token::Value token;
-  has_line_terminator_before_next_ = false;
   do {
     // Remember the position of the next token
     next_.location.beg_pos = source_pos();
@@ -1013,6 +1013,10 @@
 void Scanner::SeekForward(int pos) {
   source_->SeekForward(pos - 1);
   Advance();
+  // This function is only called to seek to the location
+  // of the end of a function (at the "}" token). It doesn't matter
+  // whether there was a line terminator in the part we skip.
+  has_line_terminator_before_next_ = false;
   Scan();
 }
 
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 3b4718b..fbb2673 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -303,6 +303,14 @@
 // -----------------------------------------------------------------------------
 // MemoryAllocator
 
+void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
+  address_ = a;
+  size_ = s;
+  owner_ = o;
+  executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
+}
+
+
 bool MemoryAllocator::IsValidChunk(int chunk_id) {
   if (!IsValidChunkId(chunk_id)) return false;
 
diff --git a/src/spaces.cc b/src/spaces.cc
index 50afd03..3d2d42f 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -274,6 +274,9 @@
 int MemoryAllocator::size_       = 0;
 int MemoryAllocator::size_executable_ = 0;
 
+List<MemoryAllocator::MemoryAllocationCallbackRegistration>
+  MemoryAllocator::memory_allocation_callbacks_;
+
 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
 
 // 270 is an estimate based on the static default heap size of a pair of 256K
@@ -299,8 +302,6 @@
 }
 
 
-void *executable_memory_histogram = NULL;
-
 bool MemoryAllocator::Setup(int capacity) {
   capacity_ = RoundUp(capacity, Page::kPageSize);
 
@@ -318,8 +319,6 @@
 
   size_ = 0;
   size_executable_ = 0;
-  executable_memory_histogram =
-      StatsTable::CreateHistogram("V8.ExecutableMemoryMax", 0, MB * 512, 50);
   ChunkInfo info;  // uninitialized element.
   for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
     chunks_.Add(info);
@@ -366,15 +365,7 @@
   int alloced = static_cast<int>(*allocated);
   size_ += alloced;
 
-  if (executable == EXECUTABLE) {
-    size_executable_ += alloced;
-    static int size_executable_max_observed_ = 0;
-    if (size_executable_max_observed_ < size_executable_) {
-      size_executable_max_observed_ = size_executable_;
-      StatsTable::AddHistogramSample(executable_memory_histogram,
-          size_executable_);
-    }
-  }
+  if (executable == EXECUTABLE) size_executable_ += alloced;
 #ifdef DEBUG
   ZapBlock(reinterpret_cast<Address>(mem), alloced);
 #endif
@@ -397,10 +388,56 @@
   Counters::memory_allocated.Decrement(static_cast<int>(length));
   size_ -= static_cast<int>(length);
   if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
+
   ASSERT(size_ >= 0);
 }
 
 
+void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
+                                                AllocationAction action,
+                                                size_t size) {
+  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+    MemoryAllocationCallbackRegistration registration =
+      memory_allocation_callbacks_[i];
+    if ((registration.space & space) == space &&
+        (registration.action & action) == action)
+      registration.callback(space, action, static_cast<int>(size));
+  }
+}
+
+
+bool MemoryAllocator::MemoryAllocationCallbackRegistered(
+    MemoryAllocationCallback callback) {
+  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+    if (memory_allocation_callbacks_[i].callback == callback) return true;
+  }
+  return false;
+}
+
+
+void MemoryAllocator::AddMemoryAllocationCallback(
+    MemoryAllocationCallback callback,
+    ObjectSpace space,
+    AllocationAction action) {
+  ASSERT(callback != NULL);
+  MemoryAllocationCallbackRegistration registration(callback, space, action);
+  ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
+  return memory_allocation_callbacks_.Add(registration);
+}
+
+
+void MemoryAllocator::RemoveMemoryAllocationCallback(
+     MemoryAllocationCallback callback) {
+  ASSERT(callback != NULL);
+  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+    if (memory_allocation_callbacks_[i].callback == callback) {
+      memory_allocation_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
   ASSERT(initial_chunk_ == NULL);
 
@@ -458,6 +495,8 @@
   int chunk_id = Pop();
   chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
 
+  ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+  PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
   return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
 }
 
@@ -616,7 +655,10 @@
     Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
   } else {
     LOG(DeleteEvent("PagedChunk", c.address()));
-    FreeRawMemory(c.address(), c.size(), c.owner()->executable());
+    ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner()->identity());
+    size_t size = c.size();
+    FreeRawMemory(c.address(), size, c.executable());
+    PerformAllocationCallback(space, kAllocationActionFree, size);
   }
   c.init(NULL, 0, NULL);
   Push(chunk_id);
@@ -2614,6 +2656,11 @@
     LOG(DeleteEvent("LargeObjectChunk", mem));
     return NULL;
   }
+  ObjectSpace space =
+      (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
+  MemoryAllocator::PerformAllocationCallback(space,
+                                             kAllocationActionAllocate,
+                                             *chunk_size);
   return reinterpret_cast<LargeObjectChunk*>(mem);
 }
 
@@ -2651,9 +2698,12 @@
     Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
     Executability executable =
         page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
-    MemoryAllocator::FreeRawMemory(chunk->address(),
-                                   chunk->size(),
-                                   executable);
+    ObjectSpace space = kObjectSpaceLoSpace;
+    if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
+    size_t size = chunk->size();
+    MemoryAllocator::FreeRawMemory(chunk->address(), size, executable);
+    MemoryAllocator::PerformAllocationCallback(
+        space, kAllocationActionFree, size);
   }
 
   size_ = 0;
@@ -2867,7 +2917,11 @@
       MarkCompactCollector::ReportDeleteIfNeeded(object);
       size_ -= static_cast<int>(chunk_size);
       page_count_--;
+      ObjectSpace space = kObjectSpaceLoSpace;
+      if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
       MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
+      MemoryAllocator::PerformAllocationCallback(space, kAllocationActionFree,
+                                                 size_);
       LOG(DeleteEvent("LargeObjectChunk", chunk_address));
     }
   }
diff --git a/src/spaces.h b/src/spaces.h
index 04e0c79..9ffa940 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -567,6 +567,17 @@
   static void FreeRawMemory(void* buf,
                             size_t length,
                             Executability executable);
+  static void PerformAllocationCallback(ObjectSpace space,
+                                        AllocationAction action,
+                                        size_t size);
+
+  static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+                                          ObjectSpace space,
+                                          AllocationAction action);
+  static void RemoveMemoryAllocationCallback(
+      MemoryAllocationCallback callback);
+  static bool MemoryAllocationCallbackRegistered(
+      MemoryAllocationCallback callback);
 
   // Returns the maximum available bytes of heaps.
   static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
@@ -643,26 +654,43 @@
   // Allocated executable space size in bytes.
   static int size_executable_;
 
+  struct MemoryAllocationCallbackRegistration {
+    MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
+                                         ObjectSpace space,
+                                         AllocationAction action)
+        : callback(callback), space(space), action(action) {
+    }
+    MemoryAllocationCallback callback;
+    ObjectSpace space;
+    AllocationAction action;
+  };
+  // A List of callback that are triggered when memory is allocated or free'd
+  static List<MemoryAllocationCallbackRegistration>
+      memory_allocation_callbacks_;
+
   // The initial chunk of virtual memory.
   static VirtualMemory* initial_chunk_;
 
   // Allocated chunk info: chunk start address, chunk size, and owning space.
   class ChunkInfo BASE_EMBEDDED {
    public:
-    ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
-    void init(Address a, size_t s, PagedSpace* o) {
-      address_ = a;
-      size_ = s;
-      owner_ = o;
-    }
+    ChunkInfo() : address_(NULL),
+                  size_(0),
+                  owner_(NULL),
+                  executable_(NOT_EXECUTABLE) {}
+    inline void init(Address a, size_t s, PagedSpace* o);
     Address address() { return address_; }
     size_t size() { return size_; }
     PagedSpace* owner() { return owner_; }
+    // We save executability of the owner to allow using it
+    // when collecting stats after the owner has been destroyed.
+    Executability executable() const { return executable_; }
 
    private:
     Address address_;
     size_t size_;
     PagedSpace* owner_;
+    Executability executable_;
   };
 
   // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
diff --git a/src/v8-counters.h b/src/v8-counters.h
index b8b3d9f..af657f1 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -203,6 +203,7 @@
   SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii)  \
   SC(sub_string_runtime, V8.SubStringRuntime)                         \
   SC(sub_string_native, V8.SubStringNative)                           \
+  SC(string_add_make_two_char, V8.StringAddMakeTwoChar)               \
   SC(string_compare_native, V8.StringCompareNative)                   \
   SC(string_compare_runtime, V8.StringCompareRuntime)                 \
   SC(regexp_entry_runtime, V8.RegExpEntryRuntime)                     \
diff --git a/src/version.cc b/src/version.cc
index 4ad62cf..3ba41b7 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     4
-#define BUILD_NUMBER      0
+#define BUILD_NUMBER      1
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 911ca16..07bdadf 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -591,9 +591,7 @@
   };
   static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
   bool CheckForInlineRuntimeCall(CallRuntime* node);
-  static bool PatchInlineRuntimeEntry(Handle<String> name,
-                                      const InlineRuntimeLUT& new_entry,
-                                      InlineRuntimeLUT* old_entry);
+
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
   static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
diff --git a/test/cctest/test-ast.cc b/test/cctest/test-ast.cc
index 9931f56..9c292bc 100644
--- a/test/cctest/test-ast.cc
+++ b/test/cctest/test-ast.cc
@@ -57,35 +57,6 @@
 }
 
 
-TEST(RemoveLast) {
-  List<int> list(4);
-  CHECK_EQ(0, list.length());
-  list.Add(1);
-  CHECK_EQ(1, list.length());
-  CHECK_EQ(1, list.last());
-  list.RemoveLast();
-  CHECK_EQ(0, list.length());
-  list.Add(2);
-  list.Add(3);
-  CHECK_EQ(2, list.length());
-  CHECK_EQ(3, list.last());
-  list.RemoveLast();
-  CHECK_EQ(1, list.length());
-  CHECK_EQ(2, list.last());
-  list.RemoveLast();
-  CHECK_EQ(0, list.length());
-
-  const int kElements = 100;
-  for (int i = 0; i < kElements; i++) list.Add(i);
-  for (int j = kElements - 1; j >= 0; j--) {
-    CHECK_EQ(j + 1, list.length());
-    CHECK_EQ(j, list.last());
-    list.RemoveLast();
-    CHECK_EQ(j, list.length());
-  }
-}
-
-
 TEST(DeleteEmpty) {
   {
     List<int>* list = new List<int>(0);
diff --git a/test/cctest/test-list.cc b/test/cctest/test-list.cc
index 624b6e9..e20ee8a 100644
--- a/test/cctest/test-list.cc
+++ b/test/cctest/test-list.cc
@@ -99,3 +99,42 @@
     CHECK_EQ(i % 3, list[i]);
   }
 }
+
+
+TEST(RemoveLast) {
+  List<int> list(4);
+  CHECK_EQ(0, list.length());
+  list.Add(1);
+  CHECK_EQ(1, list.length());
+  CHECK_EQ(1, list.last());
+  list.RemoveLast();
+  CHECK_EQ(0, list.length());
+  list.Add(2);
+  list.Add(3);
+  CHECK_EQ(2, list.length());
+  CHECK_EQ(3, list.last());
+  list.RemoveLast();
+  CHECK_EQ(1, list.length());
+  CHECK_EQ(2, list.last());
+  list.RemoveLast();
+  CHECK_EQ(0, list.length());
+
+  const int kElements = 100;
+  for (int i = 0; i < kElements; i++) list.Add(i);
+  for (int j = kElements - 1; j >= 0; j--) {
+    CHECK_EQ(j + 1, list.length());
+    CHECK_EQ(j, list.last());
+    list.RemoveLast();
+    CHECK_EQ(j, list.length());
+  }
+}
+
+
+TEST(Clear) {
+  List<int> list(4);
+  CHECK_EQ(0, list.length());
+  for (int i = 0; i < 4; ++i) list.Add(i);
+  CHECK_EQ(4, list.length());
+  list.Clear();
+  CHECK_EQ(0, list.length());
+}
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index 312a443..c921176 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -1,4 +1,29 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
 // Tests of profiler-related functions from log.h
 
@@ -107,11 +132,17 @@
 
 
 Address TraceExtension::GetFP(const v8::Arguments& args) {
-  CHECK_EQ(1, args.Length());
-  // CodeGenerator::GenerateGetFramePointer pushes EBP / RBP value
-  // on stack. In 64-bit mode we can't use Smi operations code because
-  // they check that value is within Smi bounds.
+  // Convert frame pointer from encoding as smis in the arguments to a pointer.
+  CHECK_EQ(2, args.Length());  // Ignore second argument on 32-bit platform.
+#if defined(V8_HOST_ARCH_32_BIT)
   Address fp = *reinterpret_cast<Address*>(*args[0]);
+#elif defined(V8_HOST_ARCH_64_BIT)
+  int64_t low_bits = *reinterpret_cast<uint64_t*>(*args[0]) >> 32;
+  int64_t high_bits = *reinterpret_cast<uint64_t*>(*args[1]);
+  Address fp = reinterpret_cast<Address>(high_bits | low_bits);
+#else
+#error Host architecture is neither 32-bit nor 64-bit.
+#endif
   printf("Trace: %p\n", fp);
   return fp;
 }
@@ -210,51 +241,60 @@
 }
 
 
-namespace v8 {
-namespace internal {
+// This C++ function is called as a constructor, to grab the frame pointer
+// from the calling function.  When this function runs, the stack contains
+// a C_Entry frame and a Construct frame above the calling function's frame.
+static v8::Handle<Value> construct_call(const v8::Arguments& args) {
+  i::StackFrameIterator frame_iterator;
+  CHECK(frame_iterator.frame()->is_exit());
+  frame_iterator.Advance();
+  CHECK(frame_iterator.frame()->is_construct());
+  frame_iterator.Advance();
+  i::StackFrame* calling_frame = frame_iterator.frame();
+  CHECK(calling_frame->is_java_script());
 
-class CodeGeneratorPatcher {
- public:
-  CodeGeneratorPatcher() {
-    CodeGenerator::InlineRuntimeLUT gen_get_frame_pointer =
-        {&CodeGenerator::GenerateGetFramePointer, "_GetFramePointer", 0};
-    // _RandomHeapNumber is just used as a dummy function that has zero
-    // arguments, the same as the _GetFramePointer function we actually patch
-    // in.
-    bool result = CodeGenerator::PatchInlineRuntimeEntry(
-        NewString("_RandomHeapNumber"),
-        gen_get_frame_pointer, &old_inline_entry);
-    CHECK(result);
-  }
+#if defined(V8_HOST_ARCH_32_BIT)
+  int32_t low_bits = reinterpret_cast<intptr_t>(calling_frame->fp());
+  args.This()->Set(v8_str("low_bits"), v8_num(low_bits >> 1));
+#elif defined(V8_HOST_ARCH_64_BIT)
+  int32_t low_bits = reinterpret_cast<uintptr_t>(calling_frame->fp());
+  int32_t high_bits = reinterpret_cast<uintptr_t>(calling_frame->fp()) >> 32;
+  args.This()->Set(v8_str("low_bits"), v8_num(low_bits));
+  args.This()->Set(v8_str("high_bits"), v8_num(high_bits));
+#else
+#error Host architecture is neither 32-bit nor 64-bit.
+#endif
+  return args.This();
+}
 
-  ~CodeGeneratorPatcher() {
-    CHECK(CodeGenerator::PatchInlineRuntimeEntry(
-        NewString("_GetFramePointer"),
-        old_inline_entry, NULL));
-  }
 
- private:
-  CodeGenerator::InlineRuntimeLUT old_inline_entry;
-};
-
-} }  // namespace v8::internal
+// Use the API to create a JSFunction object that calls the above C++ function.
+void CreateFramePointerGrabberConstructor(const char* constructor_name) {
+    Local<v8::FunctionTemplate> constructor_template =
+        v8::FunctionTemplate::New(construct_call);
+    constructor_template->SetClassName(v8_str("FPGrabber"));
+    Local<Function> fun = constructor_template->GetFunction();
+    env->Global()->Set(v8_str(constructor_name), fun);
+}
 
 
 // Creates a global function named 'func_name' that calls the tracing
 // function 'trace_func_name' with an actual EBP register value,
-// shifted right to be presented as Smi.
+// encoded as one or two Smis.
 static void CreateTraceCallerFunction(const char* func_name,
                                       const char* trace_func_name) {
   i::EmbeddedVector<char, 256> trace_call_buf;
-  i::OS::SNPrintF(trace_call_buf, "%s(%%_GetFramePointer());", trace_func_name);
+  i::OS::SNPrintF(trace_call_buf,
+                  "fp = new FPGrabber(); %s(fp.low_bits, fp.high_bits);",
+                  trace_func_name);
+
+  // Create the FPGrabber function, which grabs the caller's frame pointer
+  // when called as a constructor.
+  CreateFramePointerGrabberConstructor("FPGrabber");
 
   // Compile the script.
-  i::CodeGeneratorPatcher patcher;
-  bool allow_natives_syntax = i::FLAG_allow_natives_syntax;
-  i::FLAG_allow_natives_syntax = true;
   Handle<JSFunction> func = CompileFunction(trace_call_buf.start());
   CHECK(!func.is_null());
-  i::FLAG_allow_natives_syntax = allow_natives_syntax;
   func->shared()->set_name(*NewString(func_name));
 
 #ifdef DEBUG
@@ -273,12 +313,6 @@
 // StackTracer uses Top::c_entry_fp as a starting point for stack
 // walking.
 TEST(CFromJSStackTrace) {
-  // TODO(711): The hack of replacing the inline runtime function
-  // RandomHeapNumber with GetFrameNumber does not work with the way
-  // the full compiler generates inline runtime calls.
-  i::FLAG_full_compiler = false;
-  i::FLAG_always_full_compiler = false;
-
   TickSample sample;
   InitTraceEnv(&sample);
 
@@ -314,12 +348,6 @@
 // Top::c_entry_fp value. In this case, StackTracer uses passed frame
 // pointer value as a starting point for stack walking.
 TEST(PureJSStackTrace) {
-  // TODO(711): The hack of replacing the inline runtime function
-  // RandomHeapNumber with GetFrameNumber does not work with the way
-  // the full compiler generates inline runtime calls.
-  i::FLAG_full_compiler = false;
-  i::FLAG_always_full_compiler = false;
-
   TickSample sample;
   InitTraceEnv(&sample);
 
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index d62b6a5..ed0c8b5 100755
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -32,6 +32,7 @@
 #include "token.h"
 #include "scanner.h"
 #include "utils.h"
+#include "execution.h"
 
 #include "cctest.h"
 
@@ -127,3 +128,35 @@
   CHECK_EQ(i::Token::IDENTIFIER, full_stop.token());
 }
 
+
+TEST(ScanHTMLEndComments) {
+  // Regression test. See:
+  //    http://code.google.com/p/chromium/issues/detail?id=53548
+  // Tests that --> is correctly interpreted as comment-to-end-of-line if there
+  // is only whitespace before it on the line, even after a multiline-comment
+  // comment. This was not the case if it occurred before the first real token
+  // in the input.
+  const char* tests[] = {
+      // Before first real token.
+      "--> is eol-comment\nvar y = 37;\n",
+      "\n --> is eol-comment\nvar y = 37;\n",
+      "/* precomment */ --> is eol-comment\nvar y = 37;\n",
+      "\n/* precomment */ --> is eol-comment\nvar y = 37;\n",
+      // After first real token.
+      "var x = 42;\n--> is eol-comment\nvar y = 37;\n",
+      "var x = 42;\n/* precomment */ --> is eol-comment\nvar y = 37;\n",
+      NULL
+  };
+
+  // Parser needs a stack limit.
+  int marker;
+  i::StackGuard::SetStackLimit(
+      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+  for (int i = 0; tests[i]; i++) {
+    v8::ScriptData* data =
+        v8::ScriptData::PreCompile(tests[i], strlen(tests[i]));
+    CHECK(data != NULL && !data->HasError());
+    delete data;
+  }
+}
diff --git a/test/mjsunit/binary-op-newspace.js b/test/mjsunit/binary-op-newspace.js
index 8034209..40d53b9 100644
--- a/test/mjsunit/binary-op-newspace.js
+++ b/test/mjsunit/binary-op-newspace.js
@@ -25,21 +25,38 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-/**
- * @fileoverview Check that a mod where the stub code hits a failure
- * in heap number allocation still works.
- */
-
 // Flags: --max-new-space-size=262144
 
+
+// Check that a mod where the stub code hits a failure in heap number
+// allocation still works.
+
 function f(x) {
   return x % 3;
 }
 
-function test() {
+function testMod() {
   for (var i = 0; i < 40000; i++) {
     assertEquals(-1 / 0, 1 / f(-3));
   }
 }
 
-test();
+testMod();
+
+
+// Check that an add where the stub code hits a failure in heap number
+// allocation still works.
+
+function g(x, y) {
+  return x + y;
+}
+
+function testAdd() {
+  var lhs = 17.42;
+  var rhs = 42.17;
+  for (var i = 0; i < 40000; i++) {
+    assertEquals(59.59, g(lhs, rhs));
+  }
+}
+
+testAdd();
diff --git a/test/mjsunit/json.js b/test/mjsunit/json.js
index 945b662..5353d6c 100644
--- a/test/mjsunit/json.js
+++ b/test/mjsunit/json.js
@@ -317,3 +317,32 @@
 // Test string conversion of argument.
 var o = { toString: function() { return "42"; } };
 assertEquals(42, JSON.parse(o));
+
+
+for (var i = 0; i < 65536; i++) {
+  var string = String.fromCharCode(i);
+  var encoded = JSON.stringify(string);
+  var expected = "uninitialized";
+  // Following the ES5 specification of the abstraction function Quote.
+  if (string == '"' || string == '\\') {
+    // Step 2.a
+    expected = '\\' + string;
+  } else if ("\b\t\n\r\f".indexOf(string) >= 0) {
+    // Step 2.b 
+    if (string == '\b') expected = '\\b';
+    else if (string == '\t') expected = '\\t';
+    else if (string == '\n') expected = '\\n';
+    else if (string == '\f') expected = '\\f';
+    else if (string == '\r') expected = '\\r';
+  } else if (i < 32) {
+    // Step 2.c
+    if (i < 16) {
+      expected = "\\u000" + i.toString(16);
+    } else {
+      expected = "\\u00" + i.toString(16);
+    }
+  } else {
+    expected = string;
+  }  
+  assertEquals('"' + expected + '"', encoded, "Codepoint " + i);
+}