Push version 1.2.8 to trunk.

Optimized math on ARM platforms.

Fixed two crash bugs in the handling of getters and setters.

Improved the debugger support by adding scope chain information.

Improved the profiler support by compressing log data transmitted to clients.

Improved overall performance.



git-svn-id: http://v8.googlecode.com/svn/trunk@2181 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 3df6885..41b3234 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,17 @@
+2009-06-16: Version 1.2.8
+
+        Optimized math on ARM platforms.
+
+        Fixed two crash bugs in the handling of getters and setters.
+
+        Improved the debugger support by adding scope chain information.
+
+        Improved the profiler support by compressing log data transmitted
+        to clients.
+
+        Improved overall performance.
+
+
 2009-06-08: Version 1.2.7
 
         Improved debugger and profiler support.
diff --git a/include/v8.h b/include/v8.h
index 87ce2a2..e7b2677 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -212,9 +212,9 @@
    */
   bool IsEmpty() const { return val_ == 0; }
 
-  T* operator->() const;
+  T* operator->() const { return val_; }
 
-  T* operator*() const;
+  T* operator*() const { return val_; }
 
   /**
    * Sets the handle to be empty. IsEmpty() will then return true.
@@ -2509,18 +2509,6 @@
   V8::ClearWeak(reinterpret_cast<void**>(**this));
 }
 
-template <class T>
-T* Handle<T>::operator->() const {
-  return val_;
-}
-
-
-template <class T>
-T* Handle<T>::operator*() const {
-  return val_;
-}
-
-
 Local<Value> Arguments::operator[](int i) const {
   if (i < 0 || length_ <= i) return Local<Value>(*Undefined());
   return Local<Value>(reinterpret_cast<Value*>(values_ - i));
diff --git a/src/SConscript b/src/SConscript
index 64d2063..f1ca875 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -77,7 +77,8 @@
     'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
     'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
     # 'x64/regexp-macro-assembler-x64.cc',
-    'x64/stub-cache-x64.cc'
+    'x64/register-allocator-x64.cc',
+    'x64/stub-cache-x64.cc', 'x64/virtual-frame-x64.cc'
   ],
   'simulator:arm': ['arm/simulator-arm.cc'],
   'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
diff --git a/src/api.cc b/src/api.cc
index 7b7f290..097c2ea 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2124,7 +2124,9 @@
   } else {
     int attempts = 0;
     do {
-      hash_value = random() & i::Smi::kMaxValue;  // Limit range to fit a smi.
+      // Generate a random 32-bit hash value but limit range to fit
+      // within a smi.
+      hash_value = i::V8::Random() & i::Smi::kMaxValue;
       attempts++;
     } while (hash_value == 0 && attempts < 30);
     hash_value = hash_value != 0 ? hash_value : 1;  // never return 0
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 588798b..6d23a19 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -64,9 +64,7 @@
   __ tst(r1, Operand(kSmiTagMask));
   __ b(eq, &non_function_call);
   // Check that the function is a JSFunction.
-  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
   __ b(ne, &non_function_call);
 
   // Enter a construct frame.
@@ -159,9 +157,7 @@
 
   // If the type of the result (stored in its map) is less than
   // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
-  __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-  __ cmp(r3, Operand(FIRST_JS_OBJECT_TYPE));
+  __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
   __ b(ge, &exit);
 
   // Throw away the result of the constructor invocation and use the
@@ -290,9 +286,7 @@
     __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
     __ tst(r1, Operand(kSmiTagMask));
     __ b(eq, &non_function);
-    __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-    __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-    __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
     __ b(eq, &function);
 
     // Non-function called: Clear the function to force exception.
@@ -328,9 +322,7 @@
     __ cmp(r2, r3);
     __ b(eq, &use_global_receiver);
 
-    __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-    __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-    __ cmp(r3, Operand(FIRST_JS_OBJECT_TYPE));
+    __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
     __ b(lt, &call_to_object);
     __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
     __ b(le, &done);
@@ -501,9 +493,7 @@
 
   // Check if the receiver is already a JavaScript object.
   // r0: receiver
-  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-  __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
   __ b(lt, &call_to_object);
   __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
   __ b(le, &push_receiver);
diff --git a/src/arm/codegen-arm-inl.h b/src/arm/codegen-arm-inl.h
index 544331a..5a29a45 100644
--- a/src/arm/codegen-arm-inl.h
+++ b/src/arm/codegen-arm-inl.h
@@ -39,6 +39,16 @@
 void DeferredCode::Jump() { __ jmp(&entry_label_); }
 void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
 
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(SIN, args);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(COS, args);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 7428d3b..8c28b24 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -289,7 +289,6 @@
     // r0: result
     // sp: stack pointer
     // fp: frame pointer
-    // pp: parameter pointer
     // cp: callee's context
     __ mov(r0, Operand(Factory::undefined_value()));
 
@@ -703,6 +702,7 @@
   }
 
   void Generate(MacroAssembler* masm);
+  void HandleNonSmiBitwiseOp(MacroAssembler* masm);
 
   const char* GetName() {
     switch (op_) {
@@ -1503,9 +1503,7 @@
   // Test for a Smi value in a HeapNumber.
   __ tst(r0, Operand(kSmiTagMask));
   is_smi.Branch(eq);
-  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-  __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
+  __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
   default_target->Branch(ne);
   frame_->EmitPush(r0);
   frame_->CallRuntime(Runtime::kNumberToSmi, 1);
@@ -1872,9 +1870,7 @@
   // Check if enumerable is already a JSObject
   __ tst(r0, Operand(kSmiTagMask));
   primitive.Branch(eq);
-  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-  __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
   jsobject.Branch(hs);
 
   primitive.Bind();
@@ -2107,14 +2103,16 @@
   // Get an external reference to the handler address.
   ExternalReference handler_address(Top::k_handler_address);
 
-  // The next handler address is at kNextIndex in the stack.
-  const int kNextIndex = StackHandlerConstants::kNextOffset / kPointerSize;
   // If we can fall off the end of the try block, unlink from try chain.
   if (has_valid_frame()) {
-    __ ldr(r1, frame_->ElementAt(kNextIndex));
+    // The next handler address is on top of the frame.  Unlink from
+    // the handler list and drop the rest of this handler from the
+    // frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    frame_->EmitPop(r1);
     __ mov(r3, Operand(handler_address));
     __ str(r1, MemOperand(r3));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize);
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
     if (has_unlinks) {
       exit.Jump();
     }
@@ -2134,15 +2132,11 @@
       // break from (eg, for...in) may have left stuff on the stack.
       __ mov(r3, Operand(handler_address));
       __ ldr(sp, MemOperand(r3));
-      // The stack pointer was restored to just below the code slot
-      // (the topmost slot) in the handler.
-      frame_->Forget(frame_->height() - handler_height + 1);
+      frame_->Forget(frame_->height() - handler_height);
 
-      // kNextIndex is off by one because the code slot has already
-      // been dropped.
-      __ ldr(r1, frame_->ElementAt(kNextIndex - 1));
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      frame_->EmitPop(r1);
       __ str(r1, MemOperand(r3));
-      // The code slot has already been dropped from the handler.
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
 
       if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
@@ -2223,15 +2217,15 @@
   // Get an external reference to the handler address.
   ExternalReference handler_address(Top::k_handler_address);
 
-  // The next handler address is at kNextIndex in the stack.
-  const int kNextIndex = StackHandlerConstants::kNextOffset / kPointerSize;
   // If we can fall off the end of the try block, unlink from the try
   // chain and set the state on the frame to FALLING.
   if (has_valid_frame()) {
-    __ ldr(r1, frame_->ElementAt(kNextIndex));
+    // The next handler address is on top of the frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    frame_->EmitPop(r1);
     __ mov(r3, Operand(handler_address));
     __ str(r1, MemOperand(r3));
-    frame_->Drop(StackHandlerConstants::kSize / kPointerSize);
+    frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
 
     // Fake a top of stack value (unneeded when FALLING) and set the
     // state in r2, then jump around the unlink blocks if any.
@@ -2262,17 +2256,14 @@
       // stack.
       __ mov(r3, Operand(handler_address));
       __ ldr(sp, MemOperand(r3));
-      // The stack pointer was restored to the address slot in the handler.
-      ASSERT(StackHandlerConstants::kNextOffset == 1 * kPointerSize);
-      frame_->Forget(frame_->height() - handler_height + 1);
+      frame_->Forget(frame_->height() - handler_height);
 
       // Unlink this handler and drop it from the frame.  The next
-      // handler address is now on top of the frame.
+      // handler address is currently on top of the frame.
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
       frame_->EmitPop(r1);
       __ str(r1, MemOperand(r3));
-      // The top (code) and the second (handler) slot have both been
-      // dropped already.
-      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 2);
+      frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
 
       if (i == kReturnShadowIndex) {
         // If this label shadowed the function return, materialize the
@@ -3281,11 +3272,8 @@
   // if (object->IsSmi()) return the object.
   __ tst(r0, Operand(kSmiTagMask));
   leave.Branch(eq);
-  // It is a heap object - get map.
-  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-  // if (!object->IsJSValue()) return the object.
-  __ cmp(r1, Operand(JS_VALUE_TYPE));
+  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
+  __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
   leave.Branch(ne);
   // Load the value.
   __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
@@ -3305,11 +3293,8 @@
   // if (object->IsSmi()) return object.
   __ tst(r1, Operand(kSmiTagMask));
   leave.Branch(eq);
-  // It is a heap object - get map.
-  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  // if (!object->IsJSValue()) return object.
-  __ cmp(r2, Operand(JS_VALUE_TYPE));
+  // It is a heap object - get map. If (!object->IsJSValue()) return the object.
+  __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
   leave.Branch(ne);
   // Store the value.
   __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
@@ -3381,11 +3366,8 @@
   __ and_(r1, r0, Operand(kSmiTagMask));
   __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
   answer.Branch(ne);
-  // It is a heap object - get the map.
-  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-  // Check if the object is a JS array or not.
-  __ cmp(r1, Operand(JS_ARRAY_TYPE));
+  // It is a heap object - get the map. Check if the object is a JS array.
+  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
   answer.Bind();
   cc_reg_ = eq;
 }
@@ -3423,6 +3405,30 @@
 }
 
 
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 0);
+  __ Call(ExternalReference::random_positive_smi_function().address(),
+          RelocInfo::RUNTIME_ENTRY);
+  frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  LoadAndSpill(args->at(0));
+  switch (op) {
+    case SIN:
+      frame_->CallRuntime(Runtime::kMath_sin, 1);
+      break;
+    case COS:
+      frame_->CallRuntime(Runtime::kMath_cos, 1);
+      break;
+  }
+  frame_->EmitPush(r0);
+}
+
+
 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
   VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 2);
@@ -3571,7 +3577,10 @@
         break;
 
       case Token::SUB: {
-        UnarySubStub stub;
+        bool overwrite =
+            (node->AsBinaryOperation() != NULL &&
+             node->AsBinaryOperation()->ResultOverwriteAllowed());
+        UnarySubStub stub(overwrite);
         frame_->CallStub(&stub, 0);
         break;
       }
@@ -4001,9 +4010,7 @@
     } else if (check->Equals(Heap::function_symbol())) {
       __ tst(r1, Operand(kSmiTagMask));
       false_target()->Branch(eq);
-      __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
-      __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
-      __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+      __ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE);
       cc_reg_ = eq;
 
     } else if (check->Equals(Heap::object_symbol())) {
@@ -4076,13 +4083,9 @@
     }
 
     case Token::INSTANCEOF: {
-      Result arg_count = allocator_->Allocate(r0);
-      ASSERT(arg_count.is_valid());
-      __ mov(arg_count.reg(), Operand(1));  // not counting receiver
-      Result result = frame_->InvokeBuiltin(Builtins::INSTANCE_OF,
-                                            CALL_JS,
-                                            &arg_count,
-                                            2);
+      InstanceofStub stub;
+      Result result = frame_->CallStub(&stub, 2);
+      // At this point if instanceof succeeded then r0 == 0.
       __ tst(result.reg(), Operand(result.reg()));
       cc_reg_ = eq;
       break;
@@ -4341,6 +4344,229 @@
 }
 
 
+// Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
+// instruction.  On pre-ARM5 hardware this routine gives the wrong answer for 0
+// (31 instead of 32).
+static void CountLeadingZeros(
+    MacroAssembler* masm,
+    Register source,
+    Register scratch,
+    Register zeros) {
+#ifdef __ARM_ARCH_5__
+  __ clz(zeros, source);  // This instruction is only supported after ARM5.
+#else
+  __ mov(zeros, Operand(0));
+  __ mov(scratch, source);
+  // Top 16.
+  __ tst(scratch, Operand(0xffff0000));
+  __ add(zeros, zeros, Operand(16), LeaveCC, eq);
+  __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
+  // Top 8.
+  __ tst(scratch, Operand(0xff000000));
+  __ add(zeros, zeros, Operand(8), LeaveCC, eq);
+  __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
+  // Top 4.
+  __ tst(scratch, Operand(0xf0000000));
+  __ add(zeros, zeros, Operand(4), LeaveCC, eq);
+  __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
+  // Top 2.
+  __ tst(scratch, Operand(0xc0000000));
+  __ add(zeros, zeros, Operand(2), LeaveCC, eq);
+  __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
+  // Top bit.
+  __ tst(scratch, Operand(0x80000000));
+  __ add(zeros, zeros, Operand(1), LeaveCC, eq);
+#endif
+}
+
+
+// Takes a Smi and converts to an IEEE 64 bit floating point value in two
+// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
+// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
+// scratch register.  Destroys the source register.  No GC occurs during this
+// stub so you don't have to set up the frame.
+class ConvertToDoubleStub : public CodeStub {
+ public:
+  ConvertToDoubleStub(Register result_reg_1,
+                      Register result_reg_2,
+                      Register source_reg,
+                      Register scratch_reg)
+      : result1_(result_reg_1),
+        result2_(result_reg_2),
+        source_(source_reg),
+        zeros_(scratch_reg) { }
+
+ private:
+  Register result1_;
+  Register result2_;
+  Register source_;
+  Register zeros_;
+
+  // Minor key encoding in 16 bits.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 14> {};
+
+  Major MajorKey() { return ConvertToDouble; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return  result1_.code() +
+           (result2_.code() << 4) +
+           (source_.code() << 8) +
+           (zeros_.code() << 12);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "ConvertToDoubleStub"; }
+
+#ifdef DEBUG
+  void Print() { PrintF("ConvertToDoubleStub\n"); }
+#endif
+};
+
+
+void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+#ifndef BIG_ENDIAN_FLOATING_POINT
+  Register exponent = result1_;
+  Register mantissa = result2_;
+#else
+  Register exponent = result2_;
+  Register mantissa = result1_;
+#endif
+  Label not_special;
+  // Convert from Smi to integer.
+  __ mov(source_, Operand(source_, ASR, kSmiTagSize));
+  // Move sign bit from source to destination.  This works because the sign bit
+  // in the exponent word of the double has the same position and polarity as
+  // the 2's complement sign bit in a Smi.
+  ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
+  // Subtract from 0 if source was negative.
+  __ rsb(source_, source_, Operand(0), LeaveCC, ne);
+  __ cmp(source_, Operand(1));
+  __ b(gt, &not_special);
+
+  // We have -1, 0 or 1, which we treat specially.
+  __ cmp(source_, Operand(0));
+  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
+  static const uint32_t exponent_word_for_1 =
+      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
+  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
+  // 1, 0 and -1 all have 0 for the second word.
+  __ mov(mantissa, Operand(0));
+  __ Ret();
+
+  __ bind(&not_special);
+  // Count leading zeros.  Uses result2 for a scratch register on pre-ARM5.
+  // Gets the wrong answer for 0, but we already checked for that case above.
+  CountLeadingZeros(masm, source_, mantissa, zeros_);
+  // Compute exponent and or it into the exponent register.
+  // We use result2 as a scratch register here.
+  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
+  __ orr(exponent,
+         exponent,
+         Operand(mantissa, LSL, HeapNumber::kExponentShift));
+  // Shift up the source chopping the top bit off.
+  __ add(zeros_, zeros_, Operand(1));
+  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
+  __ mov(source_, Operand(source_, LSL, zeros_));
+  // Compute lower part of fraction (last 12 bits).
+  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
+  // And the top (top 20 bits).
+  __ orr(exponent,
+         exponent,
+         Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
+  __ Ret();
+}
+
+
+// This stub can convert a signed int32 to a heap number (double).  It does
+// not work for int32s that are in Smi range!  No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+  WriteInt32ToHeapNumberStub(Register the_int,
+                             Register the_heap_number,
+                             Register scratch)
+      : the_int_(the_int),
+        the_heap_number_(the_heap_number),
+        scratch_(scratch) { }
+
+ private:
+  Register the_int_;
+  Register the_heap_number_;
+  Register scratch_;
+
+  // Minor key encoding in 16 bits.
+  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+  class OpBits: public BitField<Token::Value, 2, 14> {};
+
+  Major MajorKey() { return WriteInt32ToHeapNumber; }
+  int MinorKey() {
+    // Encode the parameters in a unique 16 bit value.
+    return  the_int_.code() +
+           (the_heap_number_.code() << 4) +
+           (scratch_.code() << 8);
+  }
+
+  void Generate(MacroAssembler* masm);
+
+  const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+  void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
+// See comment for class.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) {
+  Label max_negative_int;
+  // the_int_ has the answer which is a signed int32 but not a Smi.
+  // We test for the special value that has a different exponent.  This test
+  // has the neat side effect of setting the flags according to the sign.
+  ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  __ cmp(the_int_, Operand(0x80000000));
+  __ b(eq, &max_negative_int);
+  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
+  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+  uint32_t non_smi_exponent =
+      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+  __ mov(scratch_, Operand(non_smi_exponent));
+  // Set the sign bit in scratch_ if the value was negative.
+  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
+  // Subtract from 0 if the value was negative.
+  __ rsb(the_int_, the_int_, Operand(0), LeaveCC, cs);
+  // We should be masking the implict first digit of the mantissa away here,
+  // but it just ends up combining harmlessly with the last digit of the
+  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
+  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
+  __ str(scratch_, FieldMemOperand(the_heap_number_,
+                                   HeapNumber::kExponentOffset));
+  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
+  __ str(scratch_, FieldMemOperand(the_heap_number_,
+                                   HeapNumber::kMantissaOffset));
+  __ Ret();
+
+  __ bind(&max_negative_int);
+  // The max negative int32 is stored as a positive number in the mantissa of
+  // a double because it uses a sign bit instead of using two's complement.
+  // The actual mantissa bits stored are all 0 because the implicit most
+  // significant 1 bit is not stored.
+  non_smi_exponent += 1 << HeapNumber::kExponentShift;
+  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
+  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+  __ mov(ip, Operand(0));
+  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+  __ Ret();
+}
+
+
+// Allocates a heap number or jumps to the label if the young space is full and
+// a scavenge is needed.
 static void AllocateHeapNumber(
     MacroAssembler* masm,
     Label* need_gc,       // Jump here if young space is full.
@@ -4379,78 +4605,121 @@
 
 // We fall into this code if the operands were Smis, but the result was
 // not (eg. overflow).  We branch into this code (to the not_smi label) if
-// the operands were not both Smi.
+// the operands were not both Smi.  The operands are in r0 and r1.  In order
+// to call the C-implemented binary fp operation routines we need to end up
+// with the double precision floating point operands in r0 and r1 (for the
+// value in r1) and r2 and r3 (for the value in r0).
 static void HandleBinaryOpSlowCases(MacroAssembler* masm,
                                     Label* not_smi,
                                     const Builtins::JavaScript& builtin,
                                     Token::Value operation,
-                                    int swi_number,
                                     OverwriteMode mode) {
-  Label slow;
+  Label slow, slow_pop_2_first, do_the_call;
+  Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
+  // Smi-smi case (overflow).
+  // Since both are Smis there is no heap number to overwrite, so allocate.
+  // The new heap number is in r5.  r6 and r7 are scratch.
+  AllocateHeapNumber(masm, &slow, r5, r6, r7);
+  // Write Smi from r0 to r3 and r2 in double format.  r6 is scratch.
+  ConvertToDoubleStub stub1(r3, r2, r0, r6);
+  __ push(lr);
+  __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+  // Write Smi from r1 to r1 and r0 in double format.  r6 is scratch.
+  __ mov(r7, Operand(r1));
+  ConvertToDoubleStub stub2(r1, r0, r7, r6);
+  __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+  __ pop(lr);
+  __ jmp(&do_the_call);  // Tail call.  No return.
+
+  // We jump to here if something goes wrong (one param is not a number of any
+  // sort or new-space allocation fails).
   __ bind(&slow);
   __ push(r1);
   __ push(r0);
   __ mov(r0, Operand(1));  // Set number of arguments.
-  __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.
+  __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return.
 
+  // We branch here if at least one of r0 and r1 is not a Smi.
   __ bind(not_smi);
+  if (mode == NO_OVERWRITE) {
+    // In the case where there is no chance of an overwritable float we may as
+    // well do the allocation immediately while r0 and r1 are untouched.
+    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+  }
+
+  // Move r0 to a double in r2-r3.
   __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &slow);  // We can't handle a Smi-double combination yet.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &slow);  // We can't handle a Smi-double combination yet.
-  // Get map of r0 into r2.
-  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
-  // Get type of r0 into r3.
-  __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
+  __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
+  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
   __ b(ne, &slow);
-  // Get type of r1 into r3.
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  // Check they are both the same map (heap number map).
-  __ cmp(r2, r3);
-  __ b(ne, &slow);
-  // Both are doubles.
+  if (mode == OVERWRITE_RIGHT) {
+    __ mov(r5, Operand(r0));  // Overwrite this heap number.
+  }
   // Calling convention says that second double is in r2 and r3.
   __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
-  __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
-
-  if (mode == NO_OVERWRITE) {
-    // Get address of new heap number into r5.
+  __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+  __ jmp(&finished_loading_r0);
+  __ bind(&r0_is_smi);
+  if (mode == OVERWRITE_RIGHT) {
+    // We can't overwrite a Smi so get address of new heap number into r5.
     AllocateHeapNumber(masm, &slow, r5, r6, r7);
-    __ push(lr);
-    __ push(r5);
-  } else if (mode == OVERWRITE_LEFT) {
-    __ push(lr);
-    __ push(r1);
-  } else {
-    ASSERT(mode == OVERWRITE_RIGHT);
-    __ push(lr);
-    __ push(r0);
+  }
+  // Write Smi from r0 to r3 and r2 in double format.
+  __ mov(r7, Operand(r0));
+  ConvertToDoubleStub stub3(r3, r2, r7, r6);
+  __ push(lr);
+  __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
+  __ pop(lr);
+  __ bind(&finished_loading_r0);
+
+  // Move r1 to a double in r0-r1.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
+  __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+  if (mode == OVERWRITE_LEFT) {
+    __ mov(r5, Operand(r1));  // Overwrite this heap number.
   }
   // Calling convention says that first double is in r0 and r1.
   __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
-  __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+  __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+  __ jmp(&finished_loading_r1);
+  __ bind(&r1_is_smi);
+  if (mode == OVERWRITE_LEFT) {
+    // We can't overwrite a Smi so get address of new heap number into r5.
+    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+  }
+  // Write Smi from r1 to r1 and r0 in double format.
+  __ mov(r7, Operand(r1));
+  ConvertToDoubleStub stub4(r1, r0, r7, r6);
+  __ push(lr);
+  __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
+  __ pop(lr);
+  __ bind(&finished_loading_r1);
+
+  __ bind(&do_the_call);
+  // r0: Left value (least significant part of mantissa).
+  // r1: Left value (sign, exponent, top of mantissa).
+  // r2: Right value (least significant part of mantissa).
+  // r3: Right value (sign, exponent, top of mantissa).
+  // r5: Address of heap number for result.
+  __ push(lr);   // For later.
+  __ push(r5);   // Address of heap number that is answer.
   // Call C routine that may not cause GC or other trouble.
   __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
-#if !defined(__arm__)
-  // Notify the simulator that we are calling an add routine in C.
-  __ swi(swi_number);
-#else
-  // Actually call the add routine written in C.
   __ Call(r5);
-#endif
   // Store answer in the overwritable heap number.
   __ pop(r4);
-#if !defined(__ARM_EABI__) && defined(__arm__)
+#if !defined(USE_ARM_EABI)
   // Double returned in fp coprocessor register 0 and 1, encoded as register
   // cr8.  Offsets must be divisible by 4 for coprocessor so we need to
   // substract the tag from r4.
   __ sub(r5, r4, Operand(kHeapObjectTag));
   __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
 #else
-  // Double returned in fp coprocessor register 0 and 1.
+  // Double returned in registers 0 and 1.
   __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
-  __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
+  __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + 4));
 #endif
   __ mov(r0, Operand(r4));
   // And we are done.
@@ -4458,6 +4727,185 @@
 }
 
 
+// Tries to get a signed int32 out of a double precision floating point heap
+// number.  Rounds towards 0.  Only succeeds for doubles that are in the ranges
+// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff.  This corresponds
+// almost to the range of signed int32 values that are not Smis.  Jumps to the
+// label if the double isn't in the range it can cope with.
+static void GetInt32(MacroAssembler* masm,
+                     Register source,
+                     Register dest,
+                     Register scratch,
+                     Label* slow) {
+  Register scratch2 = dest;
+  // Get exponent word.
+  __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+  // Get exponent alone in scratch2.
+  __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+  const uint32_t non_smi_exponent =
+      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+  __ cmp(scratch2, Operand(non_smi_exponent));
+  // If not, then we go slow.
+  __ b(ne, slow);
+  // Get the top bits of the mantissa.
+  __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
+  // Put back the implicit 1.
+  __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
+  // Shift up the mantissa bits to take up the space the exponent used to take.
+  // We just orred in the implicit bit so that took care of one and we want to
+  // leave the sign bit 0 so we subtract 2 bits from the shift distance.
+  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+  __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
+  // Put sign in zero flag.
+  __ tst(scratch, Operand(HeapNumber::kSignMask));
+  // Get the second half of the double.
+  __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+  // Shift down 22 bits to get the last 10 bits.
+  __ orr(dest, scratch2, Operand(scratch, LSR, 32 - shift_distance));
+  // Fix sign if sign bit was set.
+  __ rsb(dest, dest, Operand(0), LeaveCC, ne);
+}
+
+
+// For bitwise ops where the inputs are not both Smis we here try to determine
+// whether both inputs are either Smis or at least heap numbers that can be
+// represented by a 32 bit signed value.  We truncate towards zero as required
+// by the ES spec.  If this is the case we do the bitwise op and see if the
+// result is a Smi.  If so, great, otherwise we try to find a heap number to
+// write the answer into (either by allocating or by overwriting).
+// On entry the operands are in r0 and r1.  On exit the answer is in r0.
+void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
+  Label slow, result_not_a_smi;
+  Label r0_is_smi, r1_is_smi;
+  Label done_checking_r0, done_checking_r1;
+
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
+  __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+  GetInt32(masm, r1, r3, r4, &slow);
+  __ jmp(&done_checking_r1);
+  __ bind(&r1_is_smi);
+  __ mov(r3, Operand(r1, ASR, 1));
+  __ bind(&done_checking_r1);
+
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
+  __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+  GetInt32(masm, r0, r2, r4, &slow);
+  __ jmp(&done_checking_r0);
+  __ bind(&r0_is_smi);
+  __ mov(r2, Operand(r0, ASR, 1));
+  __ bind(&done_checking_r0);
+
+  // r0 and r1: Original operands (Smi or heap numbers).
+  // r2 and r3: Signed int32 operands.
+  switch (op_) {
+    case Token::BIT_OR:  __ orr(r2, r2, Operand(r3)); break;
+    case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
+    case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
+    case Token::SAR:
+      // Use only the 5 least significant bits of the shift count.
+      __ and_(r2, r2, Operand(0x1f));
+      __ mov(r2, Operand(r3, ASR, r2));
+      break;
+    case Token::SHR:
+      // Use only the 5 least significant bits of the shift count.
+      __ and_(r2, r2, Operand(0x1f));
+      __ mov(r2, Operand(r3, LSR, r2), SetCC);
+      // SHR is special because it is required to produce a positive answer.
+      // The code below for writing into heap numbers isn't capable of writing
+      // the register as an unsigned int so we go to slow case if we hit this
+      // case.
+      __ b(mi, &slow);
+      break;
+    case Token::SHL:
+      // Use only the 5 least significant bits of the shift count.
+      __ and_(r2, r2, Operand(0x1f));
+      __ mov(r2, Operand(r3, LSL, r2));
+      break;
+    default: UNREACHABLE();
+  }
+  // check that the *signed* result fits in a smi
+  __ add(r3, r2, Operand(0x40000000), SetCC);
+  __ b(mi, &result_not_a_smi);
+  __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+  __ Ret();
+
+  Label have_to_allocate, got_a_heap_number;
+  __ bind(&result_not_a_smi);
+  switch (mode_) {
+    case OVERWRITE_RIGHT: {
+      __ tst(r0, Operand(kSmiTagMask));
+      __ b(eq, &have_to_allocate);
+      __ mov(r5, Operand(r0));
+      break;
+    }
+    case OVERWRITE_LEFT: {
+      __ tst(r1, Operand(kSmiTagMask));
+      __ b(eq, &have_to_allocate);
+      __ mov(r5, Operand(r1));
+      break;
+    }
+    case NO_OVERWRITE: {
+      // Get a new heap number in r5.  r6 and r7 are scratch.
+      AllocateHeapNumber(masm, &slow, r5, r6, r7);
+    }
+    default: break;
+  }
+  __ bind(&got_a_heap_number);
+  // r2: Answer as signed int32.
+  // r5: Heap number to write answer into.
+
+  // Nothing can go wrong now, so move the heap number to r0, which is the
+  // result.
+  __ mov(r0, Operand(r5));
+
+  // Tail call that writes the int32 in r2 to the heap number in r0, using
+  // r3 as scratch.  r0 is preserved and returned.
+  WriteInt32ToHeapNumberStub stub(r2, r0, r3);
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  if (mode_ != NO_OVERWRITE) {
+    __ bind(&have_to_allocate);
+    // Get a new heap number in r5.  r6 and r7 are scratch.
+    AllocateHeapNumber(masm, &slow, r5, r6, r7);
+    __ jmp(&got_a_heap_number);
+  }
+
+  // If all else failed then we go to the runtime system.
+  __ bind(&slow);
+  __ push(r1);  // restore stack
+  __ push(r0);
+  __ mov(r0, Operand(1));  // 1 argument (not counting receiver).
+  switch (op_) {
+    case Token::BIT_OR:
+      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+      break;
+    case Token::BIT_AND:
+      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+      break;
+    case Token::BIT_XOR:
+      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+      break;
+    case Token::SAR:
+      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
+      break;
+    case Token::SHR:
+      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
+      break;
+    case Token::SHL:
+      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   // r1 : x
   // r0 : y
@@ -4483,7 +4931,6 @@
                               &not_smi,
                               Builtins::ADD,
                               Token::ADD,
-                              assembler::arm::simulator_fp_add,
                               mode_);
       break;
     }
@@ -4503,7 +4950,6 @@
                               &not_smi,
                               Builtins::SUB,
                               Token::SUB,
-                              assembler::arm::simulator_fp_sub,
                               mode_);
       break;
     }
@@ -4532,14 +4978,16 @@
                               &not_smi,
                               Builtins::MUL,
                               Token::MUL,
-                              assembler::arm::simulator_fp_mul,
-                              mode_);
+                                mode_);
       break;
     }
 
     case Token::BIT_OR:
     case Token::BIT_AND:
-    case Token::BIT_XOR: {
+    case Token::BIT_XOR:
+    case Token::SAR:
+    case Token::SHR:
+    case Token::SHL: {
       Label slow;
       ASSERT(kSmiTag == 0);  // adjust code below
       __ tst(r2, Operand(kSmiTagMask));
@@ -4548,84 +4996,47 @@
         case Token::BIT_OR:  __ orr(r0, r0, Operand(r1)); break;
         case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
         case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
-        default: UNREACHABLE();
-      }
-      __ Ret();
-      __ bind(&slow);
-      __ push(r1);  // restore stack
-      __ push(r0);
-      __ mov(r0, Operand(1));  // 1 argument (not counting receiver).
-      switch (op_) {
-        case Token::BIT_OR:
-          __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
-          break;
-        case Token::BIT_AND:
-          __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
-          break;
-        case Token::BIT_XOR:
-          __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
-          break;
-        default:
-          UNREACHABLE();
-      }
-      break;
-    }
-
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR: {
-      Label slow;
-      ASSERT(kSmiTag == 0);  // adjust code below
-      __ tst(r2, Operand(kSmiTagMask));
-      __ b(ne, &slow);
-      // remove tags from operands (but keep sign)
-      __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
-      __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
-      // use only the 5 least significant bits of the shift count
-      __ and_(r2, r2, Operand(0x1f));
-      // perform operation
-      switch (op_) {
         case Token::SAR:
-          __ mov(r3, Operand(r3, ASR, r2));
-          // no checks of result necessary
+          // Remove tags from right operand.
+          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
+          // Use only the 5 least significant bits of the shift count.
+          __ and_(r2, r2, Operand(0x1f));
+          __ mov(r0, Operand(r1, ASR, r2));
+          // Smi tag result.
+          __ and_(r0, r0, Operand(~kSmiTagMask));
           break;
-
         case Token::SHR:
+          // Remove tags from operands.  We can't do this on a 31 bit number
+          // because then the 0s get shifted into bit 30 instead of bit 31.
+          __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
+          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
+          // Use only the 5 least significant bits of the shift count.
+          __ and_(r2, r2, Operand(0x1f));
           __ mov(r3, Operand(r3, LSR, r2));
-          // check that the *unsigned* result fits in a smi
-          // neither of the two high-order bits can be set:
-          // - 0x80000000: high bit would be lost when smi tagging
-          // - 0x40000000: this number would convert to negative when
-          // smi tagging these two cases can only happen with shifts
-          // by 0 or 1 when handed a valid smi
-          __ and_(r2, r3, Operand(0xc0000000), SetCC);
+          // Unsigned shift is not allowed to produce a negative number, so
+          // check the sign bit and the sign bit after Smi tagging.
+          __ tst(r3, Operand(0xc0000000));
           __ b(ne, &slow);
+          // Smi tag result.
+          __ mov(r0, Operand(r3, LSL, kSmiTagSize));
           break;
-
         case Token::SHL:
+          // Remove tags from operands.
+          __ mov(r3, Operand(r1, ASR, kSmiTagSize));  // x
+          __ mov(r2, Operand(r0, ASR, kSmiTagSize));  // y
+          // Use only the 5 least significant bits of the shift count.
+          __ and_(r2, r2, Operand(0x1f));
           __ mov(r3, Operand(r3, LSL, r2));
-          // check that the *signed* result fits in a smi
+          // Check that the signed result fits in a Smi.
           __ add(r2, r3, Operand(0x40000000), SetCC);
           __ b(mi, &slow);
+          __ mov(r0, Operand(r3, LSL, kSmiTagSize));
           break;
-
         default: UNREACHABLE();
       }
-      // tag result and store it in r0
-      ASSERT(kSmiTag == 0);  // adjust code below
-      __ mov(r0, Operand(r3, LSL, kSmiTagSize));
       __ Ret();
-      // slow case
       __ bind(&slow);
-      __ push(r1);  // restore stack
-      __ push(r0);
-      __ mov(r0, Operand(1));  // 1 argument (not counting receiver).
-      switch (op_) {
-        case Token::SAR: __ InvokeBuiltin(Builtins::SAR, JUMP_JS); break;
-        case Token::SHR: __ InvokeBuiltin(Builtins::SHR, JUMP_JS); break;
-        case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break;
-        default: UNREACHABLE();
-      }
+      HandleNonSmiBitwiseOp(masm);
       break;
     }
 
@@ -4657,10 +5068,11 @@
   Label undo;
   Label slow;
   Label done;
+  Label not_smi;
 
   // Enter runtime system if the value is not a smi.
   __ tst(r0, Operand(kSmiTagMask));
-  __ b(ne, &slow);
+  __ b(ne, &not_smi);
 
   // Enter runtime system if the value of the expression is zero
   // to make sure that we switch between 0 and -0.
@@ -4672,33 +5084,59 @@
   __ rsb(r1, r0, Operand(0), SetCC);
   __ b(vs, &slow);
 
-  // If result is a smi we are done.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ mov(r0, Operand(r1), LeaveCC, eq);  // conditionally set r0 to result
-  __ b(eq, &done);
+  __ mov(r0, Operand(r1));  // Set r0 to result.
+  __ StubReturn(1);
 
   // Enter runtime system.
   __ bind(&slow);
   __ push(r0);
-  __ mov(r0, Operand(0));  // set number of arguments
+  __ mov(r0, Operand(0));  // Set number of arguments.
   __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
 
   __ bind(&done);
   __ StubReturn(1);
+
+  __ bind(&not_smi);
+  __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+  __ b(ne, &slow);
+  // r0 is a heap number.  Get a new heap number in r1.
+  if (overwrite_) {
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+  } else {
+    AllocateHeapNumber(masm, &slow, r1, r2, r3);
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+    __ str(r2, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+    __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
+    __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
+    __ mov(r0, Operand(r1));
+  }
+  __ StubReturn(1);
 }
 
 
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
-  // r0 holds exception
-  ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize);  // adjust this code
+  // r0 holds the exception.
+
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop the sp to the top of the handler.
   __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
   __ ldr(sp, MemOperand(r3));
-  __ pop(r2);  // pop next in chain
+
+  // Restore the next handler and frame pointer, discard handler state.
+  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  __ pop(r2);
   __ str(r2, MemOperand(r3));
-  // restore parameter- and frame-pointer and pop state.
-  __ ldm(ia_w, sp, r3.bit() | pp.bit() | fp.bit());
-  // Before returning we restore the context from the frame pointer if not NULL.
-  // The frame pointer is NULL in the exception handler of a JS entry frame.
+  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  __ ldm(ia_w, sp, r3.bit() | fp.bit());  // r3: discarded state.
+
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of a
+  // JS entry frame.
   __ cmp(fp, Operand(0));
   // Set cp to NULL if fp is NULL.
   __ mov(cp, Operand(0), LeaveCC, eq);
@@ -4709,39 +5147,41 @@
     __ mov(lr, Operand(pc));
   }
 #endif
+  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   __ pop(pc);
 }
 
 
 void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
-  // Fetch top stack handler.
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop sp to the top stack handler.
   __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
-  __ ldr(r3, MemOperand(r3));
+  __ ldr(sp, MemOperand(r3));
 
   // Unwind the handlers until the ENTRY handler is found.
   Label loop, done;
   __ bind(&loop);
   // Load the type of the current stack handler.
-  const int kStateOffset = StackHandlerConstants::kAddressDisplacement +
-      StackHandlerConstants::kStateOffset;
-  __ ldr(r2, MemOperand(r3, kStateOffset));
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  __ ldr(r2, MemOperand(sp, kStateOffset));
   __ cmp(r2, Operand(StackHandler::ENTRY));
   __ b(eq, &done);
   // Fetch the next handler in the list.
-  const int kNextOffset =  StackHandlerConstants::kAddressDisplacement +
-      StackHandlerConstants::kNextOffset;
-  __ ldr(r3, MemOperand(r3, kNextOffset));
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  __ ldr(sp, MemOperand(sp, kNextOffset));
   __ jmp(&loop);
   __ bind(&done);
 
   // Set the top handler address to next handler past the current ENTRY handler.
-  __ ldr(r0, MemOperand(r3, kNextOffset));
-  __ mov(r2, Operand(ExternalReference(Top::k_handler_address)));
-  __ str(r0, MemOperand(r2));
+  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  __ pop(r0);
+  __ str(r0, MemOperand(r3));
 
   // Set external caught exception to false.
-  __ mov(r0, Operand(false));
   ExternalReference external_caught(Top::k_external_caught_exception_address);
+  __ mov(r0, Operand(false));
   __ mov(r2, Operand(external_caught));
   __ str(r0, MemOperand(r2));
 
@@ -4751,21 +5191,17 @@
   __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
   __ str(r0, MemOperand(r2));
 
-  // Restore the stack to the address of the ENTRY handler
-  __ mov(sp, Operand(r3));
+  // Stack layout at this point. See also StackHandlerConstants.
+  // sp ->   state (ENTRY)
+  //         fp
+  //         lr
 
-  // Stack layout at this point. See also PushTryHandler
-  // r3, sp ->   next handler
-  //             state (ENTRY)
-  //             pp
-  //             fp
-  //             lr
-
-  // Discard ENTRY state (r2 is not used), and restore parameter-
-  // and frame-pointer and pop state.
-  __ ldm(ia_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit());
-  // Before returning we restore the context from the frame pointer if not NULL.
-  // The frame pointer is NULL in the exception handler of a JS entry frame.
+  // Discard handler state (r2 is not used) and restore frame pointer.
+  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  __ ldm(ia_w, sp, r2.bit() | fp.bit());  // r2: discarded state.
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of a
+  // JS entry frame.
   __ cmp(fp, Operand(0));
   // Set cp to NULL if fp is NULL.
   __ mov(cp, Operand(0), LeaveCC, eq);
@@ -4776,6 +5212,7 @@
     __ mov(lr, Operand(pc));
   }
 #endif
+  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   __ pop(pc);
 }
 
@@ -4793,7 +5230,8 @@
 
   if (do_gc) {
     // Passing r0.
-    __ Call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
+    ExternalReference gc_reference = ExternalReference::perform_gc_function();
+    __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY);
   }
 
   ExternalReference scope_depth =
@@ -4819,12 +5257,7 @@
   // sequence that it is not moving ever.
   __ add(lr, pc, Operand(4));  // compute return address: (pc + 8) + 4
   __ push(lr);
-#if !defined(__arm__)
-  // Notify the simulator of the transition to C code.
-  __ swi(assembler::arm::call_rt_r5);
-#else /* !defined(__arm__) */
   __ Jump(r5);
-#endif /* !defined(__arm__) */
 
   if (always_allocate) {
     // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@@ -4847,7 +5280,6 @@
   // r0:r1: result
   // sp: stack pointer
   // fp: frame pointer
-  // pp: caller's parameter pointer pp  (restored as C callee-saved)
   __ LeaveExitFrame(frame_type);
 
   // check if we should retry or throw exception
@@ -4887,9 +5319,8 @@
   // r0: number of arguments including receiver
   // r1: pointer to builtin function
   // fp: frame pointer  (restored after C call)
-  // sp: stack pointer  (restored as callee's pp after C call)
+  // sp: stack pointer  (restored as callee's sp after C call)
   // cp: current context  (C callee-saved)
-  // pp: caller's parameter pointer pp  (C callee-saved)
 
   // NOTE: Invocations of builtins may return failure objects
   // instead of a proper result. The builtin entry handles
@@ -4960,7 +5391,7 @@
 
   // Called from C, so do not pop argc and args on exit (preserve sp)
   // No need to save register-passed args
-  // Save callee-saved registers (incl. cp, pp, and fp), sp, and lr
+  // Save callee-saved registers (incl. cp and fp), sp, and lr
   __ stm(db_w, sp, kCalleeSaved | lr.bit());
 
   // Get address of argv, see stm above.
@@ -5004,10 +5435,10 @@
   __ bind(&invoke);
   // Must preserve r0-r4, r5-r7 are available.
   __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-  // If an exception not caught by another handler occurs, this handler returns
-  // control to the code after the bl(&invoke) above, which restores all
-  // kCalleeSaved registers (including cp, pp and fp) to their saved values
-  // before returning a failure to C.
+  // If an exception not caught by another handler occurs, this handler
+  // returns control to the code after the bl(&invoke) above, which
+  // restores all kCalleeSaved registers (including cp and fp) to their
+  // saved values before returning a failure to C.
 
   // Clear any pending exceptions.
   __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
@@ -5070,6 +5501,66 @@
 }
 
 
+// This stub performs an instanceof, calling the builtin function if
+// necessary.  Uses r1 for the object, r0 for the function that it may
+// be an instance of (these are fetched from the stack).
+void InstanceofStub::Generate(MacroAssembler* masm) {
+  // Get the object - slow case for smis (we may need to throw an exception
+  // depending on the rhs).
+  Label slow, loop, is_instance, is_not_instance;
+  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
+  __ BranchOnSmi(r0, &slow);
+
+  // Check that the left hand is a JS object and put map in r3.
+  __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, &slow);
+  __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+  __ b(gt, &slow);
+
+  // Get the prototype of the function (r4 is result, r2 is scratch).
+  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+  __ TryGetFunctionPrototype(r1, r4, r2, &slow);
+
+  // Check that the function prototype is a JS object.
+  __ BranchOnSmi(r4, &slow);
+  __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, &slow);
+  __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE));
+  __ b(gt, &slow);
+
+  // Register mapping: r3 is object map and r4 is function prototype.
+  // Get prototype of object into r2.
+  __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset));
+
+  // Loop through the prototype chain looking for the function prototype.
+  __ bind(&loop);
+  __ cmp(r2, Operand(r4));
+  __ b(eq, &is_instance);
+  __ cmp(r2, Operand(Factory::null_value()));
+  __ b(eq, &is_not_instance);
+  __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+  __ jmp(&loop);
+
+  __ bind(&is_instance);
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ pop();
+  __ pop();
+  __ mov(pc, Operand(lr));  // Return.
+
+  __ bind(&is_not_instance);
+  __ mov(r0, Operand(Smi::FromInt(1)));
+  __ pop();
+  __ pop();
+  __ mov(pc, Operand(lr));  // Return.
+
+  // Slow-case.  Tail call builtin.
+  __ bind(&slow);
+  __ mov(r0, Operand(1));  // Arg count without receiver.
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
+}
+
+
 void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor;
@@ -5098,8 +5589,7 @@
 
   // Check that the key is a smi.
   Label slow;
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(ne, &slow);
+  __ BranchOnNotSmi(r1, &slow);
 
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor;
@@ -5171,12 +5661,9 @@
 
   // Check that the function is really a JavaScript function.
   // r1: pushed function (to be verified)
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &slow);
+  __ BranchOnSmi(r1, &slow);
   // Get the map of the function object.
-  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
   __ b(ne, &slow);
 
   // Fast-case: Invoke the function now.
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index a8cb777..0df793a 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -349,6 +349,15 @@
 
   void GenerateLog(ZoneList<Expression*>* args);
 
+  // Fast support for Math.random().
+  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+  // Fast support for Math.sin and Math.cos.
+  enum MathOp { SIN, COS };
+  void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
+  inline void GenerateMathSin(ZoneList<Expression*>* args);
+  inline void GenerateMathCos(ZoneList<Expression*>* args);
+
   // Methods and constants for fast case switch statement support.
   //
   // Only allow fast-case switch if the range of labels is at most
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 99eab23..d5f967f 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -28,6 +28,14 @@
 #ifndef V8_ARM_CONSTANTS_ARM_H_
 #define V8_ARM_CONSTANTS_ARM_H_
 
+// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
+// are not running on real ARM hardware.  One reason for this is that the
+// old ABI uses fp registers in the calling convention and the simulator does
+// not simulate fp registers or coroutine instructions.
+#if defined(__ARM_EABI__) || !defined(__arm__)
+# define USE_ARM_EABI 1
+#endif
+
 namespace assembler {
 namespace arm {
 
@@ -104,15 +112,9 @@
 // simulator.
 enum SoftwareInterruptCodes {
   // transition to C code
-  call_rt_r5 = 0x10,
-  call_rt_r2 = 0x11,
+  call_rt_redirected = 0x10,
   // break point
-  break_point = 0x20,
-  // FP operations.  These simulate calling into C for a moment to do fp ops.
-  // They should trash all caller-save registers.
-  simulator_fp_add = 0x21,
-  simulator_fp_sub = 0x22,
-  simulator_fp_mul = 0x23
+  break_point = 0x20
 };
 
 
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 71da1ec..cafefce 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -46,6 +46,8 @@
 #if !defined (__arm__)
   // Not generating ARM instructions for C-code. This means that we are
   // building an ARM emulator based target. No I$ flushes are necessary.
+  // None of this code ends up in the snapshot so there are no issues
+  // around whether or not to generate the code when building snapshots.
 #else
   // Ideally, we would call
   //   syscall(__ARM_NR_cacheflush, start,
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index f56a599..8083ce3 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Copyright 2007-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -253,24 +253,12 @@
 // the FormatOption method.
 void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes swi) {
   switch (swi) {
-    case call_rt_r5:
-      Print("call_rt_r5");
-      return;
-    case call_rt_r2:
-      Print("call_rt_r2");
+    case call_rt_redirected:
+      Print("call_rt_redirected");
       return;
     case break_point:
       Print("break_point");
       return;
-    case simulator_fp_add:
-      Print("simulator_fp_add");
-      return;
-    case simulator_fp_mul:
-      Print("simulator_fp_mul");
-      return;
-    case simulator_fp_sub:
-      Print("simulator_fp_sub");
-      return;
     default:
       out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
                                            "%d",
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index a67b18a..0874c09 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -68,7 +68,7 @@
   1 <<  8 |  //  r8 v5 (cp in JavaScript code)
   kR9Available
     <<  9 |  //  r9 v6
-  1 << 10 |  // r10 v7 (pp in JavaScript code)
+  1 << 10 |  // r10 v7
   1 << 11;   // r11 v8 (fp in JavaScript code)
 
 static const int kNumCalleeSaved = 7 + kR9Available;
@@ -79,15 +79,11 @@
 
 class StackHandlerConstants : public AllStatic {
  public:
-  // TODO(1233780): Get rid of the code slot in stack handlers.
-  static const int kCodeOffset  = 0 * kPointerSize;
-  static const int kNextOffset  = 1 * kPointerSize;
-  static const int kStateOffset = 2 * kPointerSize;
-  static const int kPPOffset    = 3 * kPointerSize;
-  static const int kFPOffset    = 4 * kPointerSize;
-  static const int kPCOffset    = 5 * kPointerSize;
+  static const int kNextOffset  = 0 * kPointerSize;
+  static const int kStateOffset = 1 * kPointerSize;
+  static const int kFPOffset    = 2 * kPointerSize;
+  static const int kPCOffset    = 3 * kPointerSize;
 
-  static const int kAddressDisplacement = -1 * kPointerSize;
   static const int kSize = kPCOffset + kPointerSize;
 };
 
@@ -108,14 +104,14 @@
 
   static const int kSavedRegistersOffset = 0 * kPointerSize;
 
-  // Let the parameters pointer for exit frames point just below the
-  // frame structure on the stack.
-  static const int kPPDisplacement = 3 * kPointerSize;
-
   // The caller fields are below the frame pointer on the stack.
   static const int kCallerFPOffset = +0 * kPointerSize;
-  static const int kCallerPPOffset = +1 * kPointerSize;
+  // The calling JS function is between FP and PC.
   static const int kCallerPCOffset = +2 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.  It points just
+  // below the saved PC.
+  static const int kCallerSPDisplacement = +3 * kPointerSize;
 };
 
 
@@ -137,7 +133,7 @@
   static const int kSavedRegistersOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
-  // PP-relative.
+  // Caller SP-relative.
   static const int kParam0Offset   = -2 * kPointerSize;
   static const int kReceiverOffset = -1 * kPointerSize;
 };
@@ -161,220 +157,6 @@
 }
 
 
-// ----------------------------------------------------
-
-
-
-
-  //    lower    |    Stack    |
-  //  addresses  |      ^      |
-  //             |      |      |
-  //             |             |
-  //             |  JS frame   |
-  //             |             |
-  //             |             |
-  // ----------- +=============+ <--- sp (stack pointer)
-  //             |  function   |
-  //             +-------------+
-  //             +-------------+
-  //             |             |
-  //             | expressions |
-  //             |             |
-  //             +-------------+
-  //             |             |
-  //      a      |   locals    |
-  //      c      |             |
-  //      t      +- - - - - - -+ <---
-  //      i   -4 |   local0    |   ^
-  //      v      +-------------+   |
-  //      a   -3 |    code     |   |
-  //      t      +-------------+   | kLocal0Offset
-  //      i   -2 |   context   |   |
-  //      o      +-------------+   |
-  //      n   -1 | args_length |   v
-  //             +-------------+ <--- fp (frame pointer)
-  //           0 |  caller_pp  |
-  //      f      +-------------+
-  //      r    1 |  caller_fp  |
-  //      a      +-------------+
-  //      m    2 |  sp_on_exit |  (pp if return, caller_sp if no return)
-  //      e      +-------------+
-  //           3 |  caller_pc  |
-  //             +-------------+ <--- caller_sp (incl. parameters)
-  //             |             |
-  //             | parameters  |
-  //             |             |
-  //             +- - - - - - -+ <---
-  //          -2 | parameter0  |   ^
-  //             +-------------+   | kParam0Offset
-  //          -1 |  receiver   |   v
-  // ----------- +=============+ <--- pp (parameter pointer, r10)
-  //           0 |  function   |
-  //             +-------------+
-  //             |             |
-  //             |caller-saved |  (must be valid JS values, traversed during GC)
-  //             |    regs     |
-  //             |             |
-  //             +-------------+
-  //             |             |
-  //             |   caller    |
-  //   higher    | expressions |
-  //  addresses  |             |
-  //             |             |
-  //             |  JS frame   |
-
-
-
-  // Handler frames (part of expressions of JS frames):
-
-  //    lower    |    Stack    |
-  //  addresses  |      ^      |
-  //             |      |      |
-  //             |             |
-  //      h      | expressions |
-  //      a      |             |
-  //      n      +-------------+
-  //      d   -1 |    code     |
-  //      l      +-------------+ <--- handler sp
-  //      e    0 |   next_sp   |  link to next handler (next handler's sp)
-  //      r      +-------------+
-  //           1 |    state    |
-  //      f      +-------------+
-  //      r    2 |     pp      |
-  //      a      +-------------+
-  //      m    3 |     fp      |
-  //      e      +-------------+
-  //           4 |     pc      |
-  //             +-------------+
-  //             |             |
-  //   higher    | expressions |
-  //  addresses  |             |
-
-
-
-  // JS entry frames: When calling from C to JS, we construct two extra
-  // frames: An entry frame (C) and a trampoline frame (JS). The
-  // following pictures shows the two frames:
-
-  //    lower    |    Stack    |
-  //  addresses  |      ^      |
-  //             |      |      |
-  //             |             |
-  //             |  JS frame   |
-  //             |             |
-  //             |             |
-  // ----------- +=============+ <--- sp (stack pointer)
-  //             |             |
-  //             | parameters  |
-  //      t      |             |
-  //      r      +- - - - - - -+
-  //      a      | parameter0  |
-  //      m      +-------------+
-  //      p      |  receiver   |
-  //      o      +-------------+
-  //      l      |  function   |
-  //      i      +-------------+
-  //      n   -3 |    code     |
-  //      e      +-------------+
-  //          -2 |    NULL     |  context is always NULL
-  //             +-------------+
-  //      f   -1 |      0      |  args_length is always zero
-  //      r      +-------------+ <--- fp (frame pointer)
-  //      a    0 |    NULL     |  caller pp is always NULL for entries
-  //      m      +-------------+
-  //      e    1 |  caller_fp  |
-  //             +-------------+
-  //           2 |  sp_on_exit |  (caller_sp)
-  //             +-------------+
-  //           3 |  caller_pc  |
-  // ----------- +=============+ <--- caller_sp == pp
-  //                    .          ^
-  //                    .          |  try-handler, fake, not GC'ed
-  //                    .          v
-  //             +-------------+ <---
-  //          -2 | next top pp |
-  //             +-------------+
-  //          -1 | next top fp |
-  //             +-------------+ <--- fp
-  //             |     r4      |  r4-r9 holding non-JS values must be preserved
-  //             +-------------+
-  //      J      |     r5      |  before being initialized not to confuse GC
-  //      S      +-------------+
-  //             |     r6      |
-  //             +-------------+
-  //      e      |     r7      |
-  //      n      +-------------+
-  //      t      |     r8      |
-  //      r      +-------------+
-  //      y    [ |     r9      | ]  only if r9 available
-  //             +-------------+
-  //             |     r10     |
-  //      f      +-------------+
-  //      r      |     r11     |
-  //      a      +-------------+
-  //      m      |  caller_sp  |
-  //      e      +-------------+
-  //             |  caller_pc  |
-  //             +-------------+ <--- caller_sp
-  //             |    argv     |    passed on stack from C code
-  //             +-------------+
-  //             |             |
-  //   higher    |             |
-  //  addresses  |   C frame   |
-
-
-  // The first 4 args are passed from C in r0-r3 and are not spilled on entry:
-  // r0: code entry
-  // r1: function
-  // r2: receiver
-  // r3: argc
-  // [sp+0]: argv
-
-
-  // C entry frames: When calling from JS to C, we construct one extra
-  // frame:
-
-  //    lower    |    Stack    |
-  //  addresses  |      ^      |
-  //             |      |      |
-  //             |             |
-  //             |   C frame   |
-  //             |             |
-  //             |             |
-  // ----------- +=============+ <--- sp (stack pointer)
-  //             |             |
-  //             | parameters  |  (first 4 args are passed in r0-r3)
-  //             |             |
-  //             +-------------+ <--- fp (frame pointer)
-  //      f  4/5 |  caller_fp  |
-  //      r      +-------------+
-  //      a  5/6 |  sp_on_exit |  (pp)
-  //      m      +-------------+
-  //      e  6/7 |  caller_pc  |
-  //             +-------------+ <--- caller_sp (incl. parameters)
-  //         7/8 |             |
-  //             | parameters  |
-  //             |             |
-  //             +- - - - - - -+ <---
-  //          -2 | parameter0  |   ^
-  //             +-------------+   | kParam0Offset
-  //          -1 |  receiver   |   v
-  // ----------- +=============+ <--- pp (parameter pointer, r10)
-  //           0 |  function   |
-  //             +-------------+
-  //             |             |
-  //             |caller-saved |
-  //             |    regs     |
-  //             |             |
-  //             +-------------+
-  //             |             |
-  //             |   caller    |
-  //             | expressions |
-  //             |             |
-  //   higher    |             |
-  //  addresses  |  JS frame   |
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_FRAMES_ARM_H_
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 9b45c46..8b4e087 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -223,9 +223,7 @@
   // Check for number.
   __ tst(r1, Operand(kSmiTagMask));
   __ b(eq, &number);
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-  __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
+  __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
   __ b(ne, &non_number);
   __ bind(&number);
   StubCompiler::GenerateLoadGlobalFunctionPrototype(
@@ -272,9 +270,7 @@
   __ b(eq, miss);
 
   // Check that the value is a JSFunction.
-  __ ldr(r0, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
-  __ cmp(r0, Operand(JS_FUNCTION_TYPE));
+  __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
   __ b(ne, miss);
 
   // Check that the function has been loaded.
@@ -312,10 +308,8 @@
   __ tst(r1, Operand(kSmiTagMask));
   __ b(eq, &miss);
 
-  // Check that the receiver is a valid JS object.
-  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldrb(r0, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-  __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE));
+  // Check that the receiver is a valid JS object.  Put the map in r3.
+  __ CompareObjectType(r1, r3, r0, FIRST_JS_OBJECT_TYPE);
   __ b(lt, &miss);
 
   // If this assert fails, we have to check upper bound too.
@@ -392,9 +386,7 @@
   __ ldr(r2, MemOperand(sp, argc * kPointerSize));  // receiver
   __ tst(r2, Operand(kSmiTagMask));
   __ b(eq, &invoke);
-  __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-  __ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
   __ b(eq, &global);
   __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
   __ b(ne, &invoke);
@@ -447,10 +439,8 @@
   __ tst(r0, Operand(kSmiTagMask));
   __ b(eq, &miss);
 
-  // Check that the receiver is a valid JS object.
-  __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ ldrb(r1, FieldMemOperand(r3, Map::kInstanceTypeOffset));
-  __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+  // Check that the receiver is a valid JS object.  Put the map in r3.
+  __ CompareObjectType(r0, r3, r1, FIRST_JS_OBJECT_TYPE);
   __ b(lt, &miss);
   // If this assert fails, we have to check upper bound too.
   ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
@@ -513,6 +503,12 @@
   return false;
 }
 
+void KeyedStoreIC::ClearInlinedVersion(Address address) {}
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  return false;
+}
+
 
 Object* KeyedLoadIC_Miss(Arguments args);
 
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index 65e7eaf..a925c51 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -149,7 +149,7 @@
 }
 
 
-void JumpTarget::DoBind(int mergable_elements) {
+void JumpTarget::DoBind() {
   ASSERT(!is_bound());
 
   // Live non-frame registers are not allowed at the start of a basic
@@ -207,7 +207,7 @@
 
   // Compute the frame to use for entry to the block.
   if (entry_frame_ == NULL) {
-    ComputeEntryFrame(mergable_elements);
+    ComputeEntryFrame();
   }
 
   // Some moves required to merge to an expected frame require purely
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 4e24063..897b5a7 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,11 +35,6 @@
 namespace v8 {
 namespace internal {
 
-// Give alias names to registers
-Register cp = {  8 };  // JavaScript context pointer
-Register pp = { 10 };  // parameter pointer
-
-
 MacroAssembler::MacroAssembler(void* buffer, int size)
     : Assembler(buffer, size),
       unresolved_(0),
@@ -128,26 +123,10 @@
 
 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
                           Condition cond) {
-#if !defined(__arm__)
-  if (rmode == RelocInfo::RUNTIME_ENTRY) {
-    mov(r2, Operand(target, rmode), LeaveCC, cond);
-    // Set lr for return at current pc + 8.
-    mov(lr, Operand(pc), LeaveCC, cond);
-    // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
-    // Notify the simulator of the transition to C code.
-    swi(assembler::arm::call_rt_r2);
-  } else {
-    // set lr for return at current pc + 8
-    mov(lr, Operand(pc), LeaveCC, cond);
-    // emit a ldr<cond> pc, [pc + offset of target in constant pool]
-    mov(pc, Operand(target, rmode), LeaveCC, cond);
-  }
-#else
   // Set lr for return at current pc + 8.
   mov(lr, Operand(pc), LeaveCC, cond);
   // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
   mov(pc, Operand(target, rmode), LeaveCC, cond);
-#endif  // !defined(__arm__)
   // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a
   // 'blx ip'; however, the code would not be shorter than the above sequence
   // and the target address of the call would be referenced by the first
@@ -301,8 +280,8 @@
   add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
   sub(r6, r6, Operand(kPointerSize));
 
-  // Compute parameter pointer before making changes and save it as ip
-  // register so that it is restored as sp register on exit, thereby
+  // Compute callee's stack pointer before making changes and save it as
+  // ip register so that it is restored as sp register on exit, thereby
   // popping the args.
 
   // ip = sp + kPointerSize * #args;
@@ -573,41 +552,48 @@
 }
 #endif
 
+
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
-  ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize);  // adjust this code
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
   // The pc (return address) is passed in register lr.
   if (try_location == IN_JAVASCRIPT) {
-    stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
     if (type == TRY_CATCH_HANDLER) {
       mov(r3, Operand(StackHandler::TRY_CATCH));
     } else {
       mov(r3, Operand(StackHandler::TRY_FINALLY));
     }
-    push(r3);  // state
+    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+           && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+    stm(db_w, sp, r3.bit() | fp.bit() | lr.bit());
+    // Save the current handler as the next handler.
     mov(r3, Operand(ExternalReference(Top::k_handler_address)));
     ldr(r1, MemOperand(r3));
-    push(r1);  // next sp
-    str(sp, MemOperand(r3));  // chain handler
-    mov(r0, Operand(Smi::FromInt(StackHandler::kCodeNotPresent)));  // new TOS
-    push(r0);
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    push(r1);
+    // Link this handler as the new current one.
+    str(sp, MemOperand(r3));
   } else {
     // Must preserve r0-r4, r5-r7 are available.
     ASSERT(try_location == IN_JS_ENTRY);
-    // The parameter pointer is meaningless here and fp does not point to a JS
-    // frame. So we save NULL for both pp and fp. We expect the code throwing an
-    // exception to check fp before dereferencing it to restore the context.
-    mov(pp, Operand(0));  // set pp to NULL
-    mov(ip, Operand(0));  // to save a NULL fp
-    stm(db_w, sp, pp.bit() | ip.bit() | lr.bit());
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for fp. We expect the code throwing an exception to check fp
+    // before dereferencing it to restore the context.
+    mov(ip, Operand(0));  // To save a NULL frame pointer.
     mov(r6, Operand(StackHandler::ENTRY));
-    push(r6);  // state
+    ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize
+           && StackHandlerConstants::kFPOffset == 2 * kPointerSize
+           && StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+    stm(db_w, sp, r6.bit() | ip.bit() | lr.bit());
+    // Save the current handler as the next handler.
     mov(r7, Operand(ExternalReference(Top::k_handler_address)));
     ldr(r6, MemOperand(r7));
-    push(r6);  // next sp
-    str(sp, MemOperand(r7));  // chain handler
-    mov(r5, Operand(Smi::FromInt(StackHandler::kCodeNotPresent)));  // new TOS
-    push(r5);  // flush TOS
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    push(r6);
+    // Link this handler as the new current one.
+    str(sp, MemOperand(r7));
   }
 }
 
@@ -759,6 +745,62 @@
 }
 
 
+void MacroAssembler::CompareObjectType(Register function,
+                                       Register map,
+                                       Register type_reg,
+                                       InstanceType type) {
+  ldr(map, FieldMemOperand(function, HeapObject::kMapOffset));
+  ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  cmp(type_reg, Operand(type));
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+                                             Register result,
+                                             Register scratch,
+                                             Label* miss) {
+  // Check that the receiver isn't a smi.
+  BranchOnSmi(function, miss);
+
+  // Check that the function really is a function.  Load map into result reg.
+  CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
+  b(ne, miss);
+
+  // Make sure that the function has an instance prototype.
+  Label non_instance;
+  ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+  tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
+  b(ne, &non_instance);
+
+  // Get the prototype or initial map from the function.
+  ldr(result,
+      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // If the prototype or initial map is the hole, don't return it and
+  // simply miss the cache instead. This will allow us to allocate a
+  // prototype object on-demand in the runtime system.
+  cmp(result, Operand(Factory::the_hole_value()));
+  b(eq, miss);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  CompareObjectType(result, scratch, scratch, MAP_TYPE);
+  b(ne, &done);
+
+  // Get the prototype from the initial map.
+  ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+  jmp(&done);
+
+  // Non-instance prototype: Fetch prototype from constructor field
+  // in initial map.
+  bind(&non_instance);
+  ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+  // All done.
+  bind(&done);
+}
+
+
 void MacroAssembler::CallStub(CodeStub* stub) {
   ASSERT(allow_stub_calls());  // stub calls are not allowed in some stubs
   Call(stub->GetCode(), RelocInfo::CODE_TARGET);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 27eeab2..ab74805 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -35,8 +35,7 @@
 
 
 // Give alias names to registers
-extern Register cp;  // JavaScript context pointer
-extern Register pp;  // parameter pointer
+const Register cp = { 8 };  // JavaScript context pointer
 
 
 // Helper types to make boolean flag easier to read at call-site.
@@ -187,6 +186,38 @@
   // ---------------------------------------------------------------------------
   // Support functions.
 
+  // Try to get function prototype of a function and puts the value in
+  // the result register. Checks that the function really is a
+  // function and jumps to the miss label if the fast checks fail. The
+  // function register will be untouched; the other registers may be
+  // clobbered.
+  void TryGetFunctionPrototype(Register function,
+                               Register result,
+                               Register scratch,
+                               Label* miss);
+
+  // Compare object type for heap object.  heap_object contains a non-Smi
+  // whose object type should be compared with the given type.  This both
+  // sets the flags and leaves the object type in the type_reg register.
+  // It leaves the map in the map register (unless the type_reg and map register
+  // are the same register).  It leaves the heap object in the heap_object
+  // register unless the heap_object register is the same register as one of the
+  // other // registers.
+  void CompareObjectType(Register heap_object,
+                         Register map,
+                         Register type_reg,
+                         InstanceType type);
+
+  inline void BranchOnSmi(Register value, Label* smi_label) {
+    tst(value, Operand(kSmiTagMask));
+    b(eq, smi_label);
+  }
+
+  inline void BranchOnNotSmi(Register value, Label* not_smi_label) {
+    tst(value, Operand(kSmiTagMask));
+    b(ne, not_smi_label);
+  }
+
   // Generates code for reporting that an illegal operation has
   // occurred.
   void IllegalOperation(int num_arguments);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index b8b6663..af4f28e 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -30,6 +30,7 @@
 #include "v8.h"
 
 #include "disasm.h"
+#include "assembler.h"
 #include "arm/constants-arm.h"
 #include "arm/simulator-arm.h"
 
@@ -380,7 +381,23 @@
 }
 
 
+// Create one simulator per thread and keep it in thread local storage.
+static v8::internal::Thread::LocalStorageKey simulator_key;
+
+
+bool Simulator::initialized_ = false;
+
+
+void Simulator::Initialize() {
+  if (initialized_) return;
+  simulator_key = v8::internal::Thread::CreateThreadLocalKey();
+  initialized_ = true;
+  ::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
+}
+
+
 Simulator::Simulator() {
+  ASSERT(initialized_);
   // Setup simulator support first. Some of this information is needed to
   // setup the architecture state.
   size_t stack_size = 1 * 1024*1024;  // allocate 1MB for stack
@@ -412,9 +429,63 @@
 }
 
 
-// Create one simulator per thread and keep it in thread local storage.
-static v8::internal::Thread::LocalStorageKey simulator_key =
-    v8::internal::Thread::CreateThreadLocalKey();
+// When the generated code calls an external reference we need to catch that in
+// the simulator.  The external reference will be a function compiled for the
+// host architecture.  We need to call that function instead of trying to
+// execute it with the simulator.  We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator.  We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ public:
+  Redirection(void* external_function, bool fp_return)
+      : external_function_(external_function),
+        swi_instruction_((AL << 28) | (0xf << 24) | call_rt_redirected),
+        fp_return_(fp_return),
+        next_(list_) {
+    list_ = this;
+  }
+
+  void* address_of_swi_instruction() {
+    return reinterpret_cast<void*>(&swi_instruction_);
+  }
+
+  void* external_function() { return external_function_; }
+  bool fp_return() { return fp_return_; }
+
+  static Redirection* Get(void* external_function, bool fp_return) {
+    Redirection* current;
+    for (current = list_; current != NULL; current = current->next_) {
+      if (current->external_function_ == external_function) return current;
+    }
+    return new Redirection(external_function, fp_return);
+  }
+
+  static Redirection* FromSwiInstruction(Instr* swi_instruction) {
+    char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+    char* addr_of_redirection =
+        addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+    return reinterpret_cast<Redirection*>(addr_of_redirection);
+  }
+
+ private:
+  void* external_function_;
+  uint32_t swi_instruction_;
+  bool fp_return_;
+  Redirection* next_;
+  static Redirection* list_;
+};
+
+
+Redirection* Redirection::list_ = NULL;
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+                                           bool fp_return) {
+  Redirection* redirection = Redirection::Get(external_function, fp_return);
+  return redirection->address_of_swi_instruction();
+}
+
 
 // Get the active Simulator for the current thread.
 Simulator* Simulator::current() {
@@ -921,7 +992,14 @@
 // 64-bit value. With the code below we assume that all runtime calls return
 // 64 bits of result. If they don't, the r1 result register contains a bogus
 // value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1);
+typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
+                                        int32_t arg1,
+                                        int32_t arg2,
+                                        int32_t arg3);
+typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
+                                         int32_t arg1,
+                                         int32_t arg2,
+                                         int32_t arg3);
 
 
 // Software interrupt instructions are used by the simulator to call into the
@@ -929,30 +1007,51 @@
 void Simulator::SoftwareInterrupt(Instr* instr) {
   int swi = instr->SwiField();
   switch (swi) {
-    case call_rt_r5: {
-      SimulatorRuntimeCall target =
-          reinterpret_cast<SimulatorRuntimeCall>(get_register(r5));
-      intptr_t arg0 = get_register(r0);
-      intptr_t arg1 = get_register(r1);
-      int64_t result = target(arg0, arg1);
-      int32_t lo_res = static_cast<int32_t>(result);
-      int32_t hi_res = static_cast<int32_t>(result >> 32);
-      set_register(r0, lo_res);
-      set_register(r1, hi_res);
-      set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
-      break;
-    }
-    case call_rt_r2: {
-      SimulatorRuntimeCall target =
-          reinterpret_cast<SimulatorRuntimeCall>(get_register(r2));
-      intptr_t arg0 = get_register(r0);
-      intptr_t arg1 = get_register(r1);
-      int64_t result = target(arg0, arg1);
-      int32_t lo_res = static_cast<int32_t>(result);
-      int32_t hi_res = static_cast<int32_t>(result >> 32);
-      set_register(r0, lo_res);
-      set_register(r1, hi_res);
-      set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+    case call_rt_redirected: {
+      Redirection* redirection = Redirection::FromSwiInstruction(instr);
+      int32_t arg0 = get_register(r0);
+      int32_t arg1 = get_register(r1);
+      int32_t arg2 = get_register(r2);
+      int32_t arg3 = get_register(r3);
+      // This is dodgy but it works because the C entry stubs are never moved.
+      // See comment in codegen-arm.cc and bug 1242173.
+      int32_t saved_lr = get_register(lr);
+      if (redirection->fp_return()) {
+        intptr_t external =
+            reinterpret_cast<intptr_t>(redirection->external_function());
+        SimulatorRuntimeFPCall target =
+            reinterpret_cast<SimulatorRuntimeFPCall>(external);
+        if (::v8::internal::FLAG_trace_sim) {
+          double x, y;
+          GetFpArgs(&x, &y);
+          PrintF("Call to host function at %p with args %f, %f\n",
+                 FUNCTION_ADDR(target), x, y);
+        }
+        double result = target(arg0, arg1, arg2, arg3);
+        SetFpResult(result);
+      } else {
+        intptr_t external =
+            reinterpret_cast<int32_t>(redirection->external_function());
+        SimulatorRuntimeCall target =
+            reinterpret_cast<SimulatorRuntimeCall>(external);
+        if (::v8::internal::FLAG_trace_sim) {
+          PrintF(
+              "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+              FUNCTION_ADDR(target),
+              arg0,
+              arg1,
+              arg2,
+              arg3);
+        }
+        int64_t result = target(arg0, arg1, arg2, arg3);
+        int32_t lo_res = static_cast<int32_t>(result);
+        int32_t hi_res = static_cast<int32_t>(result >> 32);
+        set_register(r0, lo_res);
+        set_register(r1, hi_res);
+        set_register(r0, result);
+      }
+      set_register(lr, saved_lr);
+      set_pc(get_register(lr));
       break;
     }
     case break_point: {
@@ -960,30 +1059,6 @@
       dbg.Debug();
       break;
     }
-    {
-      double x, y, z;
-    case simulator_fp_add:
-      GetFpArgs(&x, &y);
-      z = x + y;
-      SetFpResult(z);
-      TrashCallerSaveRegisters();
-      set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
-      break;
-    case simulator_fp_sub:
-      GetFpArgs(&x, &y);
-      z = x - y;
-      SetFpResult(z);
-      TrashCallerSaveRegisters();
-      set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
-      break;
-    case simulator_fp_mul:
-      GetFpArgs(&x, &y);
-      z = x * y;
-      SetFpResult(z);
-      TrashCallerSaveRegisters();
-      set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
-      break;
-    }
     default: {
       UNREACHABLE();
       break;
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index d4a395a..15b92a5 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -106,6 +106,9 @@
   // Executes ARM instructions until the PC reaches end_sim_pc.
   void Execute();
 
+  // Call on program start.
+  static void Initialize();
+
   // V8 generally calls into generated code with 5 parameters. This is a
   // convenience function, which sets up the simulator state and grabs the
   // result on return.
@@ -175,6 +178,10 @@
   // Executes one instruction.
   void InstructionDecode(Instr* instr);
 
+  // Runtime call support.
+  static void* RedirectExternalReference(void* external_function,
+                                         bool fp_return);
+
   // For use in calls that take two double values, constructed from r0, r1, r2
   // and r3.
   void GetFpArgs(double* x, double* y);
@@ -192,6 +199,7 @@
   char* stack_;
   bool pc_modified_;
   int icount_;
+  static bool initialized_;
 
   // registered breakpoints
   Instr* break_pc_;
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index c09f9e3..7824557 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -283,9 +283,7 @@
   __ b(eq, miss_label);
 
   // Check that the object is a JS array.
-  __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  __ cmp(scratch, Operand(JS_ARRAY_TYPE));
+  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
   __ b(ne, miss_label);
 
   // Load length directly from the JS array.
@@ -523,9 +521,7 @@
   __ tst(r1, Operand(kSmiTagMask));
   __ b(eq, &miss);
   // Get the map.
-  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
   __ b(ne, &miss);
 
   // Patch the receiver on the stack with the global proxy if
@@ -588,9 +584,7 @@
 
     case STRING_CHECK:
       // Check that the object is a two-byte string or a symbol.
-      __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-      __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-      __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
+      __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
       __ b(hs, &miss);
       // Check that the maps starting from the prototype haven't changed.
       GenerateLoadGlobalFunctionPrototype(masm(),
@@ -605,9 +599,7 @@
       // Check that the object is a smi or a heap number.
       __ tst(r1, Operand(kSmiTagMask));
       __ b(eq, &fast);
-      __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-      __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-      __ cmp(r2, Operand(HEAP_NUMBER_TYPE));
+      __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
       __ b(ne, &miss);
       __ bind(&fast);
       // Check that the maps starting from the prototype haven't changed.
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 9527383..3d0ada7 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -156,9 +156,7 @@
     __ b(ne, &map_check);
     __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
     __ bind(&map_check);
-    __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-    __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-    __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+    __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
     __ b(eq, &done);
     __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
     __ bind(&done);
@@ -230,8 +228,8 @@
 
 
 void VirtualFrame::PushTryHandler(HandlerType type) {
-  // Grow the expression stack by handler size less one (the return address
-  // is already pushed by a call instruction).
+  // Grow the expression stack by handler size less one (the return
+  // address in lr is already counted by a call instruction).
   Adjust(kHandlerSize - 1);
   __ PushTryHandler(IN_JAVASCRIPT, type);
 }
diff --git a/src/array.js b/src/array.js
index ed84b5f..eb69f97 100644
--- a/src/array.js
+++ b/src/array.js
@@ -769,6 +769,63 @@
     }
   }
 
+  function SafeRemoveArrayHoles(obj) {
+    // Copy defined elements from the end to fill in all holes and undefineds
+    // in the beginning of the array.  Write undefineds and holes at the end
+    // after loop is finished.
+    var first_undefined = 0;
+    var last_defined = length - 1;
+    var num_holes = 0;
+    while (first_undefined < last_defined) {
+      // Find first undefined element.
+      while (first_undefined < last_defined &&
+             !IS_UNDEFINED(obj[first_undefined])) {
+        first_undefined++;
+      }
+      // Maintain the invariant num_holes = the number of holes in the original
+      // array with indices <= first_undefined or > last_defined.
+      if (!obj.hasOwnProperty(first_undefined)) {
+        num_holes++;
+      }
+
+      // Find last defined element.
+      while (first_undefined < last_defined &&
+             IS_UNDEFINED(obj[last_defined])) {
+        if (!obj.hasOwnProperty(last_defined)) {
+          num_holes++;
+        }
+        last_defined--;
+      }
+      if (first_undefined < last_defined) {
+        // Fill in hole or undefined.
+        obj[first_undefined] = obj[last_defined];
+        obj[last_defined] = void 0;
+      }
+    }
+    // If there were any undefineds in the entire array, first_undefined
+    // points to one past the last defined element.  Make this true if
+    // there were no undefineds, as well, so that first_undefined == number
+    // of defined elements.
+    if (!IS_UNDEFINED(obj[first_undefined])) first_undefined++;
+    // Fill in the undefineds and the holes.  There may be a hole where
+    // an undefined should be and vice versa.
+    var i;
+    for (i = first_undefined; i < length - num_holes; i++) {
+      obj[i] = void 0;
+    }
+    for (i = length - num_holes; i < length; i++) {
+      // For compatability with Webkit, do not expose elements in the prototype.
+      if (i in obj.__proto__) {
+        obj[i] = void 0;
+      } else {
+        delete obj[i];
+      }
+    }
+
+    // Return the number of defined elements.
+    return first_undefined;
+  }
+
   var length = ToUint32(this.length);
   if (length < 2) return this;
 
@@ -787,6 +844,12 @@
   }
 
   var num_non_undefined = %RemoveArrayHoles(this, length);
+  if (num_non_undefined == -1) {
+    // There were indexed accessors in the array.  Move array holes and
+    // undefineds to the end using a Javascript function that is safe
+    // in the presence of accessors.
+    num_non_undefined = SafeRemoveArrayHoles(this);
+  }
 
   QuickSort(this, 0, num_non_undefined);
 
diff --git a/src/assembler.cc b/src/assembler.cc
index 5dba75d..7b7778c 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -363,7 +363,7 @@
           if (SetMode(DebugInfoModeFromTag(top_tag))) return;
         } else {
           // Otherwise, just skip over the data.
-          Advance(kIntSize);
+          Advance(kIntptrSize);
         }
       } else {
         AdvanceReadPC();
@@ -508,7 +508,7 @@
 // Implementation of ExternalReference
 
 ExternalReference::ExternalReference(Builtins::CFunctionId id)
-  : address_(Builtins::c_function_address(id)) {}
+  : address_(Redirect(Builtins::c_function_address(id))) {}
 
 
 ExternalReference::ExternalReference(Builtins::Name name)
@@ -516,15 +516,15 @@
 
 
 ExternalReference::ExternalReference(Runtime::FunctionId id)
-  : address_(Runtime::FunctionForId(id)->entry) {}
+  : address_(Redirect(Runtime::FunctionForId(id)->entry)) {}
 
 
 ExternalReference::ExternalReference(Runtime::Function* f)
-  : address_(f->entry) {}
+  : address_(Redirect(f->entry)) {}
 
 
 ExternalReference::ExternalReference(const IC_Utility& ic_utility)
-  : address_(ic_utility.address()) {}
+  : address_(Redirect(ic_utility.address())) {}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 ExternalReference::ExternalReference(const Debug_Address& debug_address)
@@ -543,10 +543,21 @@
   : address_(table_ref.address()) {}
 
 
+ExternalReference ExternalReference::perform_gc_function() {
+  return ExternalReference(Redirect(FUNCTION_ADDR(Runtime::PerformGC)));
+}
+
+
 ExternalReference ExternalReference::builtin_passed_function() {
   return ExternalReference(&Builtins::builtin_passed_function);
 }
 
+
+ExternalReference ExternalReference::random_positive_smi_function() {
+  return ExternalReference(Redirect(FUNCTION_ADDR(V8::RandomPositiveSmi)));
+}
+
+
 ExternalReference ExternalReference::the_hole_value_location() {
   return ExternalReference(Factory::the_hole_value().location());
 }
@@ -614,13 +625,17 @@
     default:
       UNREACHABLE();
   }
-  return ExternalReference(FUNCTION_ADDR(function));
+  // Passing true as 2nd parameter indicates that they return an fp value.
+  return ExternalReference(Redirect(FUNCTION_ADDR(function), true));
 }
 
 
+ExternalReferenceRedirector* ExternalReference::redirector_ = NULL;
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
 ExternalReference ExternalReference::debug_break() {
-  return ExternalReference(FUNCTION_ADDR(Debug::Break));
+  return ExternalReference(Redirect(FUNCTION_ADDR(Debug::Break)));
 }
 
 
diff --git a/src/assembler.h b/src/assembler.h
index 66f952a..0abd852 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -30,7 +30,7 @@
 
 // The original source code covered by the above license above has been
 // modified significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 
 #ifndef V8_ASSEMBLER_H_
 #define V8_ASSEMBLER_H_
@@ -352,10 +352,15 @@
 class Debug_Address;
 #endif
 
-// An ExternalReference represents a C++ address called from the generated
-// code. All references to C++ functions and must be encapsulated in an
-// ExternalReference instance. This is done in order to track the origin of
-// all external references in the code.
+
+typedef void* ExternalReferenceRedirector(void* original, bool fp_return);
+
+
+// An ExternalReference represents a C++ address used in the generated
+// code. All references to C++ functions and variables must be encapsulated in
+// an ExternalReference instance. This is done in order to track the origin of
+// all external references in the code so that they can be bound to the correct
+// addresses when deserializing a heap.
 class ExternalReference BASE_EMBEDDED {
  public:
   explicit ExternalReference(Builtins::CFunctionId id);
@@ -382,7 +387,9 @@
   // pattern. This means that they have to be added to the
   // ExternalReferenceTable in serialize.cc manually.
 
+  static ExternalReference perform_gc_function();
   static ExternalReference builtin_passed_function();
+  static ExternalReference random_positive_smi_function();
 
   // Static variable Factory::the_hole_value.location()
   static ExternalReference the_hole_value_location();
@@ -403,7 +410,7 @@
 
   static ExternalReference double_fp_operation(Token::Value operation);
 
-  Address address() const {return address_;}
+  Address address() const {return reinterpret_cast<Address>(address_);}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Function Debug::Break()
@@ -413,11 +420,30 @@
   static ExternalReference debug_step_in_fp_address();
 #endif
 
+  // This lets you register a function that rewrites all external references.
+  // Used by the ARM simulator to catch calls to external references.
+  static void set_redirector(ExternalReferenceRedirector* redirector) {
+    ASSERT(redirector_ == NULL);  // We can't stack them.
+    redirector_ = redirector;
+  }
+
  private:
   explicit ExternalReference(void* address)
-    : address_(reinterpret_cast<Address>(address)) {}
+      : address_(address) {}
 
-  Address address_;
+  static ExternalReferenceRedirector* redirector_;
+
+  static void* Redirect(void* address, bool fp_return = false) {
+    if (redirector_ == NULL) return address;
+    return (*redirector_)(address, fp_return);
+  }
+
+  static void* Redirect(Address address_arg, bool fp_return = false) {
+    void* address = reinterpret_cast<void*>(address_arg);
+    return redirector_ == NULL ? address : (*redirector_)(address, fp_return);
+  }
+
+  void* address_;
 };
 
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 89c92b0..2dbc030 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1113,6 +1113,9 @@
   }
 
 #ifdef V8_HOST_ARCH_64_BIT
+  // TODO(X64): Remove these tests when code generation works and is stable.
+  MacroAssembler::ConstructAndTestJSFunction();
+  CodeGenerator::TestCodeGenerator();
   // TODO(X64): Reenable remaining initialization when code generation works.
   return true;
 #endif  // V8_HOST_ARCH_64_BIT
diff --git a/src/builtins.cc b/src/builtins.cc
index 1c43f7a..0648e54 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -720,7 +720,8 @@
       // bootstrapper.
       Bootstrapper::AddFixup(Code::cast(code), &masm);
       // Log the event and add the code to the builtins array.
-      LOG(CodeCreateEvent("Builtin", Code::cast(code), functions[i].s_name));
+      LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
+                          Code::cast(code), functions[i].s_name));
       builtins_[i] = code;
 #ifdef ENABLE_DISASSEMBLER
       if (FLAG_print_builtin_code) {
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index b14ede1..f4d8ce8 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -66,7 +66,7 @@
     // Add unresolved entries in the code to the fixup list.
     Bootstrapper::AddFixup(*code, &masm);
 
-    LOG(CodeCreateEvent("Stub", *code, GetName()));
+    LOG(CodeCreateEvent(Logger::STUB_TAG, *code, GetName()));
     Counters::total_stubs_code_size.Increment(code->instruction_size());
 
 #ifdef ENABLE_DISASSEMBLER
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 183a64a..76ec787 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -41,6 +41,8 @@
     SmiOp,
     Compare,
     RecordWrite,  // Last stub that allows stub calls inside.
+    ConvertToDouble,
+    WriteInt32ToHeapNumber,
     StackCheck,
     UnarySub,
     RevertToNumber,
diff --git a/src/codegen.cc b/src/codegen.cc
index f46269f..e359c34 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -302,12 +302,12 @@
     }
 
     // Function compilation complete.
-    LOG(CodeCreateEvent("Function", *code, *node->name()));
+    LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *node->name()));
 
 #ifdef ENABLE_OPROFILE_AGENT
     OProfileAgent::CreateNativeCodeRegion(*node->name(),
-                                          code->address(),
-                                          code->ExecutableSize());
+                                          code->instruction_start(),
+                                          code->instruction_size());
 #endif
   }
 
@@ -422,7 +422,10 @@
   {&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
   {&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
   {&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
-  {&CodeGenerator::GenerateLog, "_Log"}
+  {&CodeGenerator::GenerateLog, "_Log"},
+  {&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
+  {&CodeGenerator::GenerateMathSin, "_Math_sin"},
+  {&CodeGenerator::GenerateMathCos, "_Math_cos"}
 };
 
 
diff --git a/src/codegen.h b/src/codegen.h
index e1758e1..0b42935 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -228,13 +228,27 @@
 };
 
 
-class UnarySubStub : public CodeStub {
+class InstanceofStub: public CodeStub {
  public:
-  UnarySubStub() { }
+  InstanceofStub() { }
+
+  void Generate(MacroAssembler* masm);
 
  private:
-  Major MajorKey() { return UnarySub; }
+  Major MajorKey() { return Instanceof; }
   int MinorKey() { return 0; }
+};
+
+
+class UnarySubStub : public CodeStub {
+ public:
+  explicit UnarySubStub(bool overwrite)
+      : overwrite_(overwrite) { }
+
+ private:
+  bool overwrite_;
+  Major MajorKey() { return UnarySub; }
+  int MinorKey() { return overwrite_ ? 1 : 0; }
   void Generate(MacroAssembler* masm);
 
   const char* GetName() { return "UnarySubStub"; }
diff --git a/src/compiler.cc b/src/compiler.cc
index ea7c134..73d2002 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -179,13 +179,17 @@
     if (script->name()->IsString()) {
       SmartPointer<char> data =
           String::cast(script->name())->ToCString(DISALLOW_NULLS);
-      LOG(CodeCreateEvent(is_eval ? "Eval" : "Script", *code, *data));
-      OProfileAgent::CreateNativeCodeRegion(*data, code->address(),
-                                            code->ExecutableSize());
+      LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
+                          *code, *data));
+      OProfileAgent::CreateNativeCodeRegion(*data,
+                                            code->instruction_start(),
+                                            code->instruction_size());
     } else {
-      LOG(CodeCreateEvent(is_eval ? "Eval" : "Script", *code, ""));
+      LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
+                          *code, ""));
       OProfileAgent::CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
-          code->address(), code->ExecutableSize());
+                                            code->instruction_start(),
+                                            code->instruction_size());
     }
   }
 #endif
@@ -380,16 +384,18 @@
       if (line_num > 0) {
         line_num += script->line_offset()->value() + 1;
       }
-      LOG(CodeCreateEvent("LazyCompile", *code, *func_name,
+      LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name,
                           String::cast(script->name()), line_num));
       OProfileAgent::CreateNativeCodeRegion(*func_name,
                                             String::cast(script->name()),
-                                            line_num, code->address(),
-                                            code->ExecutableSize());
+                                            line_num,
+                                            code->instruction_start(),
+                                            code->instruction_size());
     } else {
-      LOG(CodeCreateEvent("LazyCompile", *code, *func_name));
-      OProfileAgent::CreateNativeCodeRegion(*func_name, code->address(),
-                                            code->ExecutableSize());
+      LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name));
+      OProfileAgent::CreateNativeCodeRegion(*func_name,
+                                            code->instruction_start(),
+                                            code->instruction_size());
     }
   }
 #endif
diff --git a/src/d8.cc b/src/d8.cc
index ee845ee..e02c80a 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -460,6 +460,16 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Set the security token of the debug context to allow access.
   i::Debug::debug_context()->set_security_token(i::Heap::undefined_value());
+
+  // Start the debugger agent if requested.
+  if (i::FLAG_debugger_agent) {
+    v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port);
+  }
+
+  // Start the in-process debugger if requested.
+  if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
+    v8::Debug::SetDebugEventListener(HandleDebugEvent);
+  }
 #endif
 }
 
@@ -721,16 +731,6 @@
       RunRemoteDebugger(i::FLAG_debugger_port);
       return 0;
     }
-
-    // Start the debugger agent if requested.
-    if (i::FLAG_debugger_agent) {
-      v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port);
-    }
-
-    // Start the in-process debugger if requested.
-    if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
-      v8::Debug::SetDebugEventListener(HandleDebugEvent);
-    }
 #endif
   }
   if (run_shell)
diff --git a/src/d8.js b/src/d8.js
index a8db9e1..2d52170 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// How crappy is it that I have to implement completely basic stuff
-// like this myself?  Answer: very.
 String.prototype.startsWith = function (str) {
   if (str.length > this.length)
     return false;
@@ -100,6 +98,13 @@
                                 JSON: 2 };
 
 
+// The different types of scopes matching constants runtime.cc.
+Debug.ScopeType = { Global: 0,
+                    Local: 1,
+                    With: 2,
+                    Closure: 3 };
+
+
 // Current debug state.
 const kNoFrame = -1;
 Debug.State = {
@@ -297,6 +302,14 @@
       this.request_ = this.frameCommandToJSONRequest_(args);
       break;
       
+    case 'scopes':
+      this.request_ = this.scopesCommandToJSONRequest_(args);
+      break;
+      
+    case 'scope':
+      this.request_ = this.scopeCommandToJSONRequest_(args);
+      break;
+      
     case 'print':
     case 'p':
       this.request_ = this.printCommandToJSONRequest_(args);
@@ -396,13 +409,17 @@
 
 // Create a JSON request for the evaluation command.
 DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
+  // Global varaible used to store whether a handle was requested.
+  lookup_handle = null;
   // Check if the expression is a handle id in the form #<handle>#.
   var handle_match = expression.match(/^#([0-9]*)#$/);
   if (handle_match) {
+    // Remember the handle requested in a global variable.
+    lookup_handle = parseInt(handle_match[1]);
     // Build a lookup request.
     var request = this.createRequest('lookup');
     request.arguments = {};
-    request.arguments.handle = parseInt(handle_match[1]);
+    request.arguments.handles = [ lookup_handle ];
     return request.toJSONProtocol();
   } else {
     // Build an evaluate request.
@@ -561,6 +578,27 @@
 };
 
 
+// Create a JSON request for the scopes command.
+DebugRequest.prototype.scopesCommandToJSONRequest_ = function(args) {
+  // Build a scopes request from the text command.
+  var request = this.createRequest('scopes');
+  return request.toJSONProtocol();
+};
+
+
+// Create a JSON request for the scope command.
+DebugRequest.prototype.scopeCommandToJSONRequest_ = function(args) {
+  // Build a scope request from the text command.
+  var request = this.createRequest('scope');
+  args = args.split(/\s*[ ]+\s*/g);
+  if (args.length > 0 && args[0].length > 0) {
+    request.arguments = {};
+    request.arguments.number = args[0];
+  }
+  return request.toJSONProtocol();
+};
+
+
 // Create a JSON request for the print command.
 DebugRequest.prototype.printCommandToJSONRequest_ = function(args) {
   // Build an evaluate request from the text command.
@@ -785,8 +823,11 @@
   print('clear <breakpoint #>');
   print('backtrace [n] | [-n] | [from to]');
   print('frame <frame #>');
+  print('scopes');
+  print('scope <scope #>');
   print('step [in | next | out| min [step count]]');
   print('print <expression>');
+  print('dir <expression>');
   print('source [from line [num lines]]');
   print('scripts');
   print('continue');
@@ -796,7 +837,11 @@
 
 
 function formatHandleReference_(value) {
-  return '#' + value.handle() + '#';
+  if (value.handle() >= 0) {
+    return '#' + value.handle() + '#';
+  } else {
+    return '#Transient#';
+  }
 }
 
 
@@ -820,10 +865,14 @@
       result += value.propertyName(i);
       result += ': ';
       var property_value = value.propertyValue(i);
-      if (property_value && property_value.type()) {
-        result += property_value.type();
-      } else {
+      if (property_value instanceof ProtocolReference) {
         result += '<no type>';
+      } else {
+        if (property_value && property_value.type()) {
+          result += property_value.type();
+        } else {
+          result += '<no type>';
+        }
       }
       result += ' ';
       result += formatHandleReference_(property_value);
@@ -834,6 +883,33 @@
 }
 
 
+function formatScope_(scope) {
+  var result = '';
+  var index = scope.index;
+  result += '#' + (index <= 9 ? '0' : '') + index;
+  result += ' ';
+  switch (scope.type) {
+    case Debug.ScopeType.Global:
+      result += 'Global, ';
+      result += '#' + scope.object.ref + '#';
+      break;
+    case Debug.ScopeType.Local:
+      result += 'Local';
+      break;
+    case Debug.ScopeType.With:
+      result += 'With, ';
+      result += '#' + scope.object.ref + '#';
+      break;
+    case Debug.ScopeType.Closure:
+      result += 'Closure';
+      break;
+    default:
+      result += 'UNKNOWN';
+  }
+  return result;
+}
+
+
 // Convert a JSON response to text for display in a text based debugger.
 function DebugResponseDetails(response) {
   details = {text:'', running:false}
@@ -883,12 +959,41 @@
         Debug.State.currentFrame = body.index;
         break;
         
+      case 'scopes':
+        if (body.totalScopes == 0) {
+          result = '(no scopes)';
+        } else {
+          result = 'Scopes #' + body.fromScope + ' to #' +
+                   (body.toScope - 1) + ' of ' + body.totalScopes + '\n';
+          for (i = 0; i < body.scopes.length; i++) {
+            if (i != 0) {
+              result += '\n';
+            }
+            result += formatScope_(body.scopes[i]);
+          }
+        }
+        details.text = result;
+        break;
+
+      case 'scope':
+        result += formatScope_(body);
+        result += '\n';
+        var scope_object_value = response.lookup(body.object.ref);
+        result += formatObject_(scope_object_value, true);
+        details.text = result;
+        break;
+      
       case 'evaluate':
       case 'lookup':
         if (last_cmd == 'p' || last_cmd == 'print') {
           result = body.text;
         } else {
-          var value = response.bodyValue();
+          var value;
+          if (lookup_handle) {
+            value = response.bodyValue(lookup_handle);
+          } else {
+            value = response.bodyValue();
+          }
           if (value.isObject()) {
             result += formatObject_(value, true);
           } else {
@@ -1105,7 +1210,7 @@
 
 
 ProtocolPackage.prototype.bodyValue = function(index) {
-  if (index) {
+  if (index != null) {
     return new ProtocolValue(this.packet_.body[index], this);
   } else {
     return new ProtocolValue(this.packet_.body, this);
diff --git a/src/date-delay.js b/src/date-delay.js
index f06e8b7..9aecadb 100644
--- a/src/date-delay.js
+++ b/src/date-delay.js
@@ -115,7 +115,7 @@
   // - leap year.
   // - week day of first day.
   var time = TimeFromYear(year);
-  var recent_year = (InLeapYear(time) == 0 ? 1967 : 1956) + 
+  var recent_year = (InLeapYear(time) == 0 ? 1967 : 1956) +
       (WeekDay(time) * 12) % 28;
   // Find the year in the range 2008..2037 that is equivalent mod 28.
   // Add 3*28 to give a positive argument to the modulus operator.
@@ -129,23 +129,82 @@
   // (measured in whole seconds based on the 1970 epoch).
   // We solve this by mapping the time to a year with same leap-year-ness
   // and same starting day for the year.  The ECMAscript specification says
-  // we must do this, but for compatability with other browsers, we use
+  // we must do this, but for compatibility with other browsers, we use
   // the actual year if it is in the range 1970..2037
   if (t >= 0 && t <= 2.1e12) return t;
   var day = MakeDay(EquivalentYear(YearFromTime(t)), MonthFromTime(t), DateFromTime(t));
   return TimeClip(MakeDate(day, TimeWithinDay(t)));
 }
 
-var daylight_cache_time = $NaN;
-var daylight_cache_offset;
+
+// Because computing the DST offset is a pretty expensive operation
+// we keep a cache of last computed offset along with a time interval
+// where we know the cache is valid.
+var DST_offset_cache = {
+  // Cached DST offset.
+  offset: 0,
+  // Time interval where the cached offset is valid.
+  start: 0, end: -1,
+  // Size of next interval expansion.
+  increment: 0
+};
+
 
 function DaylightSavingsOffset(t) {
-  if (t == daylight_cache_time) {
-    return daylight_cache_offset;
+  // Load the cache object from the builtins object.
+  var cache = DST_offset_cache;
+
+  // Cache the start and the end in local variables for fast access.
+  var start = cache.start;
+  var end = cache.end;
+
+  if (start <= t) {
+    // If the time fits in the cached interval, return the cached offset.
+    if (t <= end) return cache.offset;
+
+    // Compute a possible new interval end.
+    var new_end = end + cache.increment;
+
+    if (t <= new_end) {
+      var end_offset = %DateDaylightSavingsOffset(EquivalentTime(new_end));
+      if (cache.offset == end_offset) {
+        // If the offset at the end of the new interval still matches
+        // the offset in the cache, we grow the cached time interval
+        // and return the offset.
+        cache.end = new_end;
+        cache.increment = msPerMonth;
+        return end_offset;
+      } else {
+        var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
+        if (offset == end_offset) {
+          // The offset at the given time is equal to the offset at the
+          // new end of the interval, so that means that we've just skipped
+          // the point in time where the DST offset change occurred. Updated
+          // the interval to reflect this and reset the increment.
+          cache.start = t;
+          cache.end = new_end;
+          cache.increment = msPerMonth;
+        } else {
+          // The interval contains a DST offset change and the given time is
+          // before it. Adjust the increment to avoid a linear search for
+          // the offset change point and change the end of the interval.
+          cache.increment /= 3;
+          cache.end = t;
+        }
+        // Update the offset in the cache and return it.
+        cache.offset = offset;
+        return offset;
+      }
+    }
   }
+
+  // Compute the DST offset for the time and shrink the cache interval
+  // to only contain the time. This allows fast repeated DST offset
+  // computations for the same time.
   var offset = %DateDaylightSavingsOffset(EquivalentTime(t));
-  daylight_cache_time = t;
-  daylight_cache_offset = offset;
+  cache.offset = offset;
+  cache.start = cache.end = t;
+  cache.increment = msPerMonth;
   return offset;
 }
 
@@ -154,7 +213,7 @@
 var timezone_cache_timezone;
 
 function LocalTimezone(t) {
-  if(t == timezone_cache_time) {
+  if (t == timezone_cache_time) {
     return timezone_cache_timezone;
   }
   var timezone = %DateLocalTimezone(EquivalentTime(t));
diff --git a/src/debug-delay.js b/src/debug-delay.js
index 0b0501f..21cd68a 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -1208,6 +1208,10 @@
         this.backtraceRequest_(request, response);
       } else if (request.command == 'frame') {
         this.frameRequest_(request, response);
+      } else if (request.command == 'scopes') {
+        this.scopesRequest_(request, response);
+      } else if (request.command == 'scope') {
+        this.scopeRequest_(request, response);
       } else if (request.command == 'evaluate') {
         this.evaluateRequest_(request, response);
       } else if (request.command == 'lookup') {
@@ -1540,7 +1544,7 @@
 
   // With no arguments just keep the selected frame.
   if (request.arguments) {
-    index = request.arguments.number;
+    var index = request.arguments.number;
     if (index < 0 || this.exec_state_.frameCount() <= index) {
       return response.failed('Invalid frame number');
     }
@@ -1551,6 +1555,67 @@
 };
 
 
+DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
+  // Get the frame for which the scope or scopes are requested. With no frameNumber
+  // argument use the currently selected frame.
+  if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
+    frame_index = request.arguments.frameNumber;
+    if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
+      return response.failed('Invalid frame number');
+    }
+    return this.exec_state_.frame(frame_index);
+  } else {
+    return this.exec_state_.frame();
+  }
+}
+
+
+DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
+  // No frames no scopes.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No scopes');
+  }
+
+  // Get the frame for which the scopes are requested.
+  var frame = this.frameForScopeRequest_(request);
+  
+  // Fill all scopes for this frame.
+  var total_scopes = frame.scopeCount();
+  var scopes = [];
+  for (var i = 0; i < total_scopes; i++) {
+    scopes.push(frame.scope(i));
+  }
+  response.body = {
+    fromScope: 0,
+    toScope: total_scopes,
+    totalScopes: total_scopes,
+    scopes: scopes
+  }
+};
+
+
+DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
+  // No frames no scopes.
+  if (this.exec_state_.frameCount() == 0) {
+    return response.failed('No scopes');
+  }
+
+  // Get the frame for which the scope is requested.
+  var frame = this.frameForScopeRequest_(request);
+
+  // With no scope argument just return top scope.
+  var scope_index = 0;
+  if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
+    scope_index = %ToNumber(request.arguments.number);
+    if (scope_index < 0 || frame.scopeCount() <= scope_index) {
+      return response.failed('Invalid scope number');
+    }
+  }
+
+  response.body = frame.scope(scope_index);
+};
+
+
 DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
   if (!request.arguments) {
     return response.failed('Missing arguments');
diff --git a/src/debug.cc b/src/debug.cc
index 0daf564..e37bfb7 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -382,6 +382,7 @@
     // the code copy and will therefore have no effect on the running code
     // keeping it from using the inlined code.
     if (code->is_keyed_load_stub()) KeyedLoadIC::ClearInlinedVersion(pc());
+    if (code->is_keyed_store_stub()) KeyedStoreIC::ClearInlinedVersion(pc());
   }
 }
 
@@ -389,6 +390,19 @@
 void BreakLocationIterator::ClearDebugBreakAtIC() {
   // Patch the code to the original invoke.
   rinfo()->set_target_address(original_rinfo()->target_address());
+
+  RelocInfo::Mode mode = rmode();
+  if (RelocInfo::IsCodeTarget(mode)) {
+    Address target = original_rinfo()->target_address();
+    Handle<Code> code(Code::GetCodeFromTargetAddress(target));
+
+    // Restore the inlined version of keyed stores to get back to the
+    // fast case.  We need to patch back the keyed store because no
+    // patching happens when running normally.  For keyed loads, the
+    // map check will get patched back when running normally after ICs
+    // have been cleared at GC.
+    if (code->is_keyed_store_stub()) KeyedStoreIC::RestoreInlinedVersion(pc());
+  }
 }
 
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 13e41e3..8110e12 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -332,6 +332,8 @@
 DEFINE_bool(log_handles, false, "Log global handle events.")
 DEFINE_bool(log_state_changes, false, "Log state changes.")
 DEFINE_bool(log_suspect, false, "Log suspect operations.")
+DEFINE_bool(compress_log, false,
+            "Compress log to save space (makes log less human-readable).")
 DEFINE_bool(prof, false,
             "Log statistical profiling information (implies --log-code).")
 DEFINE_bool(prof_auto, true,
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 28be430..0e2adb9 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -43,13 +43,7 @@
 
 
 inline Address StackHandler::address() const {
-  // NOTE: There's an obvious problem with the address of the NULL
-  // stack handler. Right now, it benefits us that the subtraction
-  // leads to a very high address (above everything else on the
-  // stack), but maybe we should stop relying on it?
-  const int displacement = StackHandlerConstants::kAddressDisplacement;
-  Address address = reinterpret_cast<Address>(const_cast<StackHandler*>(this));
-  return address + displacement;
+  return reinterpret_cast<Address>(const_cast<StackHandler*>(this));
 }
 
 
@@ -68,13 +62,7 @@
 
 inline void StackHandler::Iterate(ObjectVisitor* v) const {
   // Stack handlers do not contain any pointers that need to be
-  // traversed. The only field that have to worry about is the code
-  // field which is unused and should always be uninitialized.
-#ifdef DEBUG
-  const int offset = StackHandlerConstants::kCodeOffset;
-  Object* code = Memory::Object_at(address() + offset);
-  ASSERT(Smi::cast(code)->value() == StackHandler::kCodeNotPresent);
-#endif
+  // traversed.
 }
 
 
@@ -122,11 +110,6 @@
 }
 
 
-inline Address StandardFrame::caller_sp() const {
-  return pp();
-}
-
-
 inline Address StandardFrame::caller_fp() const {
   return Memory::Address_at(fp() + StandardFrameConstants::kCallerFPOffset);
 }
@@ -157,13 +140,13 @@
 
 inline Object* JavaScriptFrame::receiver() const {
   const int offset = JavaScriptFrameConstants::kReceiverOffset;
-  return Memory::Object_at(pp() + offset);
+  return Memory::Object_at(caller_sp() + offset);
 }
 
 
 inline void JavaScriptFrame::set_receiver(Object* value) {
   const int offset = JavaScriptFrameConstants::kReceiverOffset;
-  Memory::Object_at(pp() + offset) = value;
+  Memory::Object_at(caller_sp() + offset) = value;
 }
 
 
diff --git a/src/frames.cc b/src/frames.cc
index dd0ea00..5cd8332 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -49,7 +49,9 @@
 
   StackHandler* handler() const { return handler_; }
 
-  bool done() { return handler_->address() > limit_; }
+  bool done() {
+    return handler_ == NULL || handler_->address() > limit_;
+  }
   void Advance() {
     ASSERT(!done());
     handler_ = handler_->next();
@@ -398,7 +400,7 @@
 
 void ExitFrame::ComputeCallerState(State* state) const {
   // Setup the caller state.
-  state->sp = pp();
+  state->sp = caller_sp();
   state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
   state->pc_address
       = reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset);
@@ -406,7 +408,7 @@
 
 
 Address ExitFrame::GetCallerStackPointer() const {
-  return fp() + ExitFrameConstants::kPPDisplacement;
+  return fp() + ExitFrameConstants::kCallerSPDisplacement;
 }
 
 
@@ -451,12 +453,12 @@
 Object* JavaScriptFrame::GetParameter(int index) const {
   ASSERT(index >= 0 && index < ComputeParametersCount());
   const int offset = JavaScriptFrameConstants::kParam0Offset;
-  return Memory::Object_at(pp() + offset - (index * kPointerSize));
+  return Memory::Object_at(caller_sp() + offset - (index * kPointerSize));
 }
 
 
 int JavaScriptFrame::ComputeParametersCount() const {
-  Address base  = pp() + JavaScriptFrameConstants::kReceiverOffset;
+  Address base  = caller_sp() + JavaScriptFrameConstants::kReceiverOffset;
   Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset;
   return (base - limit) / kPointerSize;
 }
@@ -681,7 +683,7 @@
   const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
   const int kLimitOffset = JavaScriptFrameConstants::kReceiverOffset;
   Object** base = &Memory::Object_at(fp() + kBaseOffset);
-  Object** limit = &Memory::Object_at(pp() + kLimitOffset) + 1;
+  Object** limit = &Memory::Object_at(caller_sp() + kLimitOffset) + 1;
   v->VisitPointers(base, limit);
 }
 
diff --git a/src/frames.h b/src/frames.h
index e250609..f002e12 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -78,9 +78,6 @@
   void Cook(Code* code);
   void Uncook(Code* code);
 
-  // TODO(1233780): Get rid of the code slot in stack handlers.
-  static const int kCodeNotPresent = 0;
-
  private:
   // Accessors.
   inline State state() const;
@@ -132,7 +129,7 @@
   // Accessors.
   Address sp() const { return state_.sp; }
   Address fp() const { return state_.fp; }
-  Address pp() const { return GetCallerStackPointer(); }
+  Address caller_sp() const { return GetCallerStackPointer(); }
 
   Address pc() const { return *pc_address(); }
   void set_pc(Address pc) { *pc_address() = pc; }
@@ -140,7 +137,7 @@
   Address* pc_address() const { return state_.pc_address; }
 
   // Get the id of this stack frame.
-  Id id() const { return static_cast<Id>(OffsetFrom(pp())); }
+  Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
 
   // Checks if this frame includes any stack handlers.
   bool HasHandler() const;
@@ -337,7 +334,6 @@
   virtual void ComputeCallerState(State* state) const;
 
   // Accessors.
-  inline Address caller_sp() const;
   inline Address caller_fp() const;
   inline Address caller_pc() const;
 
diff --git a/src/heap.cc b/src/heap.cc
index 772cf32..eb70f21 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -79,9 +79,15 @@
 
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
-int Heap::semispace_size_  = 2*MB;
+#if V8_HOST_ARCH_ARM
+int Heap::semispace_size_  = 512*KB;
+int Heap::old_generation_size_ = 128*MB;
+int Heap::initial_semispace_size_ = 128*KB;
+#else
+int Heap::semispace_size_  = 8*MB;
 int Heap::old_generation_size_ = 512*MB;
-int Heap::initial_semispace_size_ = 256*KB;
+int Heap::initial_semispace_size_ = 512*KB;
+#endif
 
 GCCallback Heap::global_gc_prologue_callback_ = NULL;
 GCCallback Heap::global_gc_epilogue_callback_ = NULL;
@@ -90,9 +96,8 @@
 // ConfigureHeap.
 int Heap::young_generation_size_ = 0;  // Will be 2 * semispace_size_.
 
-// Double the new space after this many scavenge collections.
-int Heap::new_space_growth_limit_ = 8;
-int Heap::scavenge_count_ = 0;
+int Heap::survived_since_last_expansion_ = 0;
+
 Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
 
 int Heap::mc_count_ = 0;
@@ -421,7 +426,7 @@
     old_gen_promotion_limit_ =
         old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
     old_gen_allocation_limit_ =
-        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 3);
+        old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
     old_gen_exhausted_ = false;
 
     // If we have used the mark-compact collector to collect the new
@@ -624,16 +629,17 @@
   // Implements Cheney's copying algorithm
   LOG(ResourceEvent("scavenge", "begin"));
 
-  scavenge_count_++;
+  // Used for updating survived_since_last_expansion_ at function end.
+  int survived_watermark = PromotedSpaceSize();
+
   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
-      scavenge_count_ > new_space_growth_limit_) {
-    // Double the size of the new space, and double the limit.  The next
-    // doubling attempt will occur after the current new_space_growth_limit_
-    // more collections.
+      survived_since_last_expansion_ > new_space_.Capacity()) {
+    // Double the size of new space if there is room to grow and enough
+    // data has survived scavenge since the last expansion.
     // TODO(1240712): NewSpace::Double has a return value which is
     // ignored here.
     new_space_.Double();
-    new_space_growth_limit_ *= 2;
+    survived_since_last_expansion_ = 0;
   }
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
@@ -737,6 +743,10 @@
   // Set age mark.
   new_space_.set_age_mark(new_space_.top());
 
+  // Update how much has survived scavenge.
+  survived_since_last_expansion_ +=
+      (PromotedSpaceSize() - survived_watermark) + new_space_.Size();
+
   LOG(ResourceEvent("scavenge", "end"));
 
   gc_state_ = NOT_IN_GC;
@@ -1766,7 +1776,6 @@
   // through the self_reference parameter.
   code->CopyFrom(desc);
   if (sinfo != NULL) sinfo->Serialize(code);  // write scope info
-  LOG(CodeAllocateEvent(code, desc.origin));
 
 #ifdef DEBUG
   code->Verify();
diff --git a/src/heap.h b/src/heap.h
index d8080b6..08b2a99 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -827,8 +827,9 @@
   static int young_generation_size_;
   static int old_generation_size_;
 
-  static int new_space_growth_limit_;
-  static int scavenge_count_;
+  // For keeping track of how much data has survived
+  // scavenge since last new space expansion.
+  static int survived_since_last_expansion_;
 
   static int always_allocate_scope_depth_;
   static bool context_disposed_pending_;
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 434bf07..b5efe9e 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -117,7 +117,8 @@
   Object* code =
       Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
   if (!code->IsCode()) return;
-  LOG(CodeCreateEvent("Builtin", Code::cast(code), "CpuFeatures::Probe"));
+  LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
+                      Code::cast(code), "CpuFeatures::Probe"));
   typedef uint64_t (*F0)();
   F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
   supported_ = probe();
@@ -1655,6 +1656,22 @@
 }
 
 
+void Assembler::fcos() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xFF);
+}
+
+
+void Assembler::fsin() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xD9);
+  EMIT(0xFE);
+}
+
+
 void Assembler::fadd(int i) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 79f239d..ae16e70 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -658,6 +658,8 @@
 
   void fabs();
   void fchs();
+  void fcos();
+  void fsin();
 
   void fadd(int i);
   void fsub(int i);
diff --git a/src/ia32/codegen-ia32-inl.h b/src/ia32/codegen-ia32-inl.h
index 49c706d..44e937a 100644
--- a/src/ia32/codegen-ia32-inl.h
+++ b/src/ia32/codegen-ia32-inl.h
@@ -39,6 +39,16 @@
 void DeferredCode::Jump() { __ jmp(&entry_label_); }
 void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
 
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(SIN, args);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(COS, args);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 61651a9..3357f57 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -317,9 +317,7 @@
         if (function_return_.is_bound()) {
           function_return_.Jump(&undefined);
         } else {
-          // Though this is a (possibly) backward block, the frames
-          // can only differ on their top element.
-          function_return_.Bind(&undefined, 1);
+          function_return_.Bind(&undefined);
           GenerateReturnSequence(&undefined);
         }
       } else if (function_return_.is_linked()) {
@@ -329,9 +327,7 @@
         // compile an artificial return statement just above, and (b) there
         // are return statements in the body but (c) they are all shadowed.
         Result return_value;
-        // Though this is a (possibly) backward block, the frames can
-        // only differ on their top element.
-        function_return_.Bind(&return_value, 1);
+        function_return_.Bind(&return_value);
         GenerateReturnSequence(&return_value);
       }
     }
@@ -718,6 +714,11 @@
 
 class FloatingPointHelper : public AllStatic {
  public:
+  // Code pattern for loading a floating point value. Input value must
+  // be either a smi or a heap number object (fp value). Requirements:
+  // operand on TOS+1. Returns operand as floating point number on FPU
+  // stack.
+  static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
   // Code pattern for loading floating point values. Input values must
   // be either smi or heap number objects (fp values). Requirements:
   // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
@@ -734,7 +735,8 @@
   static void AllocateHeapNumber(MacroAssembler* masm,
                                  Label* need_gc,
                                  Register scratch1,
-                                 Register scratch2);
+                                 Register scratch2,
+                                 Register result);
 };
 
 
@@ -1875,13 +1877,19 @@
       // Implement comparison against a constant Smi, inlining the case
       // where both sides are Smis.
       left_side.ToRegister();
-      ASSERT(left_side.is_valid());
-      JumpTarget is_smi;
-      __ test(left_side.reg(), Immediate(kSmiTagMask));
-      is_smi.Branch(zero, &left_side, &right_side, taken);
 
-      // Setup and call the compare stub, which expects its arguments
-      // in registers.
+      // Here we split control flow to the stub call and inlined cases
+      // before finally splitting it to the control destination.  We use
+      // a jump target and branching to duplicate the virtual frame at
+      // the first split.  We manually handle the off-frame references
+      // by reconstituting them on the non-fall-through path.
+      JumpTarget is_smi;
+      Register left_reg = left_side.reg();
+      Handle<Object> right_val = right_side.handle();
+      __ test(left_side.reg(), Immediate(kSmiTagMask));
+      is_smi.Branch(zero, taken);
+
+      // Setup and call the compare stub.
       CompareStub stub(cc, strict);
       Result result = frame_->CallStub(&stub, &left_side, &right_side);
       result.ToRegister();
@@ -1890,12 +1898,12 @@
       dest->true_target()->Branch(cc);
       dest->false_target()->Jump();
 
-      is_smi.Bind(&left_side, &right_side);
-      left_side.ToRegister();
+      is_smi.Bind();
+      left_side = Result(left_reg);
+      right_side = Result(right_val);
       // Test smi equality and comparison by signed int comparison.
       if (IsUnsafeSmi(right_side.handle())) {
         right_side.ToRegister();
-        ASSERT(right_side.is_valid());
         __ cmp(left_side.reg(), Operand(right_side.reg()));
       } else {
         __ cmp(Operand(left_side.reg()), Immediate(right_side.handle()));
@@ -1947,35 +1955,50 @@
         (right_side.is_constant() && !right_side.handle()->IsSmi());
     left_side.ToRegister();
     right_side.ToRegister();
-    JumpTarget is_smi;
-    if (!known_non_smi) {
-      // Check for the smi case.
+
+    if (known_non_smi) {
+      // When non-smi, call out to the compare stub.
+      CompareStub stub(cc, strict);
+      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+      if (cc == equal) {
+        __ test(answer.reg(), Operand(answer.reg()));
+      } else {
+        __ cmp(answer.reg(), 0);
+      }
+      answer.Unuse();
+      dest->Split(cc);
+    } else {
+      // Here we split control flow to the stub call and inlined cases
+      // before finally splitting it to the control destination.  We use
+      // a jump target and branching to duplicate the virtual frame at
+      // the first split.  We manually handle the off-frame references
+      // by reconstituting them on the non-fall-through path.
+      JumpTarget is_smi;
+      Register left_reg = left_side.reg();
+      Register right_reg = right_side.reg();
+
       Result temp = allocator_->Allocate();
       ASSERT(temp.is_valid());
       __ mov(temp.reg(), left_side.reg());
       __ or_(temp.reg(), Operand(right_side.reg()));
       __ test(temp.reg(), Immediate(kSmiTagMask));
       temp.Unuse();
-      is_smi.Branch(zero, &left_side, &right_side, taken);
-    }
-    // When non-smi, call out to the compare stub, which expects its
-    // arguments in registers.
-    CompareStub stub(cc, strict);
-    Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-    if (cc == equal) {
-      __ test(answer.reg(), Operand(answer.reg()));
-    } else {
-      __ cmp(answer.reg(), 0);
-    }
-    answer.Unuse();
-    if (known_non_smi) {
-      dest->Split(cc);
-    } else {
+      is_smi.Branch(zero, taken);
+      // When non-smi, call out to the compare stub.
+      CompareStub stub(cc, strict);
+      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+      if (cc == equal) {
+        __ test(answer.reg(), Operand(answer.reg()));
+      } else {
+        __ cmp(answer.reg(), 0);
+      }
+      answer.Unuse();
       dest->true_target()->Branch(cc);
       dest->false_target()->Jump();
-      is_smi.Bind(&left_side, &right_side);
-      left_side.ToRegister();
-      right_side.ToRegister();
+
+      is_smi.Bind();
+      left_side = Result(left_reg);
+      right_side = Result(right_reg);
       __ cmp(left_side.reg(), Operand(right_side.reg()));
       right_side.Unuse();
       left_side.Unuse();
@@ -2328,9 +2351,7 @@
       // code by jumping to the return site.
       function_return_.Jump(&return_value);
     } else {
-      // Though this is a (possibly) backward block, the frames can
-      // only differ on their top element.
-      function_return_.Bind(&return_value, 1);
+      function_return_.Bind(&return_value);
       GenerateReturnSequence(&return_value);
     }
   }
@@ -3255,7 +3276,6 @@
   // handler structure.
   if (FLAG_debug_code) {
     __ mov(eax, Operand::StaticVariable(handler_address));
-    __ lea(eax, Operand(eax, StackHandlerConstants::kAddressDisplacement));
     __ cmp(esp, Operand(eax));
     __ Assert(equal, "stack pointer should point to top handler");
   }
@@ -3265,6 +3285,7 @@
     // The next handler address is on top of the frame.  Unlink from
     // the handler list and drop the rest of this handler from the
     // frame.
+    ASSERT(StackHandlerConstants::kNextOffset == 0);
     frame_->EmitPop(Operand::StaticVariable(handler_address));
     frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
     if (has_unlinks) {
@@ -3292,15 +3313,12 @@
 
       // Reload sp from the top handler, because some statements that we
       // break from (eg, for...in) may have left stuff on the stack.
-      __ mov(edx, Operand::StaticVariable(handler_address));
-      const int kNextOffset = StackHandlerConstants::kNextOffset +
-          StackHandlerConstants::kAddressDisplacement;
-      __ lea(esp, Operand(edx, kNextOffset));
+      __ mov(esp, Operand::StaticVariable(handler_address));
       frame_->Forget(frame_->height() - handler_height);
 
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
       frame_->EmitPop(Operand::StaticVariable(handler_address));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
-      // next_sp popped.
 
       if (i == kReturnShadowIndex) {
         if (!function_return_is_shadowed_) frame_->PrepareForReturn();
@@ -3385,8 +3403,7 @@
   if (has_valid_frame()) {
     // The next handler address is on top of the frame.
     ASSERT(StackHandlerConstants::kNextOffset == 0);
-    frame_->EmitPop(eax);
-    __ mov(Operand::StaticVariable(handler_address), eax);
+    frame_->EmitPop(Operand::StaticVariable(handler_address));
     frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
 
     // Fake a top of stack value (unneeded when FALLING) and set the
@@ -3420,13 +3437,11 @@
       // Reload sp from the top handler, because some statements that
       // we break from (eg, for...in) may have left stuff on the
       // stack.
-      __ mov(edx, Operand::StaticVariable(handler_address));
-      const int kNextOffset = StackHandlerConstants::kNextOffset +
-          StackHandlerConstants::kAddressDisplacement;
-      __ lea(esp, Operand(edx, kNextOffset));
+      __ mov(esp, Operand::StaticVariable(handler_address));
       frame_->Forget(frame_->height() - handler_height);
 
       // Unlink this handler and drop it from the frame.
+      ASSERT(StackHandlerConstants::kNextOffset == 0);
       frame_->EmitPop(Operand::StaticVariable(handler_address));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
 
@@ -4627,48 +4642,82 @@
 // cons.  The slow case will flatten the string, which will ensure that
 // the answer is in the left hand side the next time around.
 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+  Comment(masm_, "[ GenerateFastCharCodeAt");
   ASSERT(args->length() == 2);
 
-  JumpTarget slow_case;
-  JumpTarget end;
-  JumpTarget not_a_flat_string;
-  JumpTarget a_cons_string;
-  JumpTarget try_again_with_new_string(JumpTarget::BIDIRECTIONAL);
-  JumpTarget ascii_string;
-  JumpTarget got_char_code;
+  Label slow_case;
+  Label end;
+  Label not_a_flat_string;
+  Label a_cons_string;
+  Label try_again_with_new_string;
+  Label ascii_string;
+  Label got_char_code;
 
   Load(args->at(0));
   Load(args->at(1));
-  // Reserve register ecx, to use as shift amount later
-  Result shift_amount = allocator()->Allocate(ecx);
-  ASSERT(shift_amount.is_valid());
   Result index = frame_->Pop();
-  index.ToRegister();
   Result object = frame_->Pop();
+
+  // Get register ecx to use as shift amount later.
+  Result shift_amount;
+  if (object.is_register() && object.reg().is(ecx)) {
+    Result fresh = allocator_->Allocate();
+    shift_amount = object;
+    object = fresh;
+    __ mov(object.reg(), ecx);
+  }
+  if (index.is_register() && index.reg().is(ecx)) {
+    Result fresh = allocator_->Allocate();
+    shift_amount = index;
+    index = fresh;
+    __ mov(index.reg(), ecx);
+  }
+  // There could be references to ecx in the frame. Allocating will
+  // spill them, otherwise spill explicitly.
+  if (shift_amount.is_valid()) {
+    frame_->Spill(ecx);
+  } else {
+    shift_amount = allocator()->Allocate(ecx);
+  }
+  ASSERT(shift_amount.is_register());
+  ASSERT(shift_amount.reg().is(ecx));
+  ASSERT(allocator_->count(ecx) == 1);
+
+  // We will mutate the index register and possibly the object register.
+  // The case where they are somehow the same register is handled
+  // because we only mutate them in the case where the receiver is a
+  // heap object and the index is not.
   object.ToRegister();
-  // If the receiver is a smi return undefined.
+  index.ToRegister();
+  frame_->Spill(object.reg());
+  frame_->Spill(index.reg());
+
+  // We need a single extra temporary register.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+
+  // There is no virtual frame effect from here up to the final result
+  // push.
+
+  // If the receiver is a smi trigger the slow case.
   ASSERT(kSmiTag == 0);
   __ test(object.reg(), Immediate(kSmiTagMask));
-  slow_case.Branch(zero, not_taken);
+  __ j(zero, &slow_case);
 
-  // Check for negative or non-smi index.
+  // If the index is negative or non-smi trigger the slow case.
   ASSERT(kSmiTag == 0);
   __ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
-  slow_case.Branch(not_zero, not_taken);
-  // Get rid of the smi tag on the index.
-  frame_->Spill(index.reg());
+  __ j(not_zero, &slow_case);
+  // Untag the index.
   __ sar(index.reg(), kSmiTagSize);
 
-  try_again_with_new_string.Bind(&object, &index, &shift_amount);
-  // Get the type of the heap object.
-  Result object_type = allocator()->Allocate();
-  ASSERT(object_type.is_valid());
-  __ mov(object_type.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
-  __ movzx_b(object_type.reg(),
-             FieldOperand(object_type.reg(), Map::kInstanceTypeOffset));
-  // We don't handle non-strings.
-  __ test(object_type.reg(), Immediate(kIsNotStringMask));
-  slow_case.Branch(not_zero, not_taken);
+  __ bind(&try_again_with_new_string);
+  // Fetch the instance type of the receiver into ecx.
+  __ mov(ecx, FieldOperand(object.reg(), HeapObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the slow case.
+  __ test(ecx, Immediate(kIsNotStringMask));
+  __ j(not_zero, &slow_case);
 
   // Here we make assumptions about the tag values and the shifts needed.
   // See the comment in objects.h.
@@ -4677,86 +4726,75 @@
          String::kMediumLengthShift);
   ASSERT(kShortStringTag + String::kLongLengthShift ==
          String::kShortLengthShift);
-  __ mov(shift_amount.reg(), Operand(object_type.reg()));
-  __ and_(shift_amount.reg(), kStringSizeMask);
-  __ add(Operand(shift_amount.reg()), Immediate(String::kLongLengthShift));
-  // Get the length field. Temporary register now used for length.
-  Result length = object_type;
-  __ mov(length.reg(), FieldOperand(object.reg(), String::kLengthOffset));
-  __ shr(length.reg());  // shift_amount, in ecx, is implicit operand.
+  __ and_(ecx, kStringSizeMask);
+  __ add(Operand(ecx), Immediate(String::kLongLengthShift));
+  // Fetch the length field into the temporary register.
+  __ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
+  __ shr(temp.reg());  // The shift amount in ecx is implicit operand.
   // Check for index out of range.
-  __ cmp(index.reg(), Operand(length.reg()));
-  slow_case.Branch(greater_equal, not_taken);
-  length.Unuse();
-  // Load the object type into object_type again.
-  // These two instructions are duplicated from above, to save a register.
-  __ mov(object_type.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
-  __ movzx_b(object_type.reg(),
-             FieldOperand(object_type.reg(), Map::kInstanceTypeOffset));
+  __ cmp(index.reg(), Operand(temp.reg()));
+  __ j(greater_equal, &slow_case);
+  // Reload the instance type (into the temp register this time)..
+  __ mov(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+  __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
 
   // We need special handling for non-flat strings.
   ASSERT(kSeqStringTag == 0);
-  __ test(object_type.reg(), Immediate(kStringRepresentationMask));
-  not_a_flat_string.Branch(not_zero, &object, &index, &object_type,
-                           &shift_amount, not_taken);
-  shift_amount.Unuse();
+  __ test(temp.reg(), Immediate(kStringRepresentationMask));
+  __ j(not_zero, &not_a_flat_string);
   // Check for 1-byte or 2-byte string.
-  __ test(object_type.reg(), Immediate(kStringEncodingMask));
-  ascii_string.Branch(not_zero, &object, &index, &object_type, taken);
+  __ test(temp.reg(), Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string);
 
   // 2-byte string.
-  // Load the 2-byte character code.
-  __ movzx_w(object_type.reg(), FieldOperand(object.reg(),
-                                             index.reg(),
-                                             times_2,
-                                             SeqTwoByteString::kHeaderSize));
-  object.Unuse();
-  index.Unuse();
-  got_char_code.Jump(&object_type);
+  // Load the 2-byte character code into the temp register.
+  __ movzx_w(temp.reg(), FieldOperand(object.reg(),
+                                      index.reg(),
+                                      times_2,
+                                      SeqTwoByteString::kHeaderSize));
+  __ jmp(&got_char_code);
 
   // ASCII string.
-  ascii_string.Bind(&object, &index, &object_type);
-  // Load the byte.
-  __ movzx_b(object_type.reg(), FieldOperand(object.reg(),
-                                             index.reg(),
-                                             times_1,
-                                             SeqAsciiString::kHeaderSize));
-  object.Unuse();
-  index.Unuse();
-  got_char_code.Bind(&object_type);
+  __ bind(&ascii_string);
+  // Load the byte into the temp register.
+  __ movzx_b(temp.reg(), FieldOperand(object.reg(),
+                                      index.reg(),
+                                      times_1,
+                                      SeqAsciiString::kHeaderSize));
+  __ bind(&got_char_code);
   ASSERT(kSmiTag == 0);
-  __ shl(object_type.reg(), kSmiTagSize);
-  frame_->Push(&object_type);
-  end.Jump();
+  __ shl(temp.reg(), kSmiTagSize);
+  __ jmp(&end);
 
   // Handle non-flat strings.
-  not_a_flat_string.Bind(&object, &index, &object_type, &shift_amount);
-  __ and_(object_type.reg(), kStringRepresentationMask);
-  __ cmp(object_type.reg(), kConsStringTag);
-  a_cons_string.Branch(equal, &object, &index, &shift_amount, taken);
-  __ cmp(object_type.reg(), kSlicedStringTag);
-  slow_case.Branch(not_equal, not_taken);
-  object_type.Unuse();
+  __ bind(&not_a_flat_string);
+  __ and_(temp.reg(), kStringRepresentationMask);
+  __ cmp(temp.reg(), kConsStringTag);
+  __ j(equal, &a_cons_string);
+  __ cmp(temp.reg(), kSlicedStringTag);
+  __ j(not_equal, &slow_case);
 
   // SlicedString.
-  // Add the offset to the index.
+  // Add the offset to the index and trigger the slow case on overflow.
   __ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
-  slow_case.Branch(overflow);
+  __ j(overflow, &slow_case);
   // Getting the underlying string is done by running the cons string code.
 
   // ConsString.
-  a_cons_string.Bind(&object, &index, &shift_amount);
-  // Get the first of the two strings.
-  frame_->Spill(object.reg());
-  // Both sliced and cons strings store their source string at the same place.
+  __ bind(&a_cons_string);
+  // Get the first of the two strings.  Both sliced and cons strings
+  // store their source string at the same offset.
   ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
   __ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
-  try_again_with_new_string.Jump(&object, &index, &shift_amount);
+  __ jmp(&try_again_with_new_string);
 
-  // No results live at this point.
-  slow_case.Bind();
-  frame_->Push(Factory::undefined_value());
-  end.Bind();
+  __ bind(&slow_case);
+  // Move the undefined value into the result register, which will
+  // trigger the slow case.
+  __ Set(temp.reg(), Immediate(Factory::undefined_value()));
+
+  __ bind(&end);
+  frame_->Push(&temp);
 }
 
 
@@ -4902,6 +4940,98 @@
 }
 
 
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  frame_->SpillAll();
+
+  // Make sure the frame is aligned like the OS expects.
+  static const int kFrameAlignment = OS::ActivationFrameAlignment();
+  if (kFrameAlignment > 0) {
+    ASSERT(IsPowerOf2(kFrameAlignment));
+    __ mov(edi, Operand(esp));  // Save in callee-saved register.
+    __ and_(esp, -kFrameAlignment);
+  }
+
+  // Call V8::RandomPositiveSmi().
+  __ call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
+
+  // Restore stack pointer from callee-saved register edi.
+  if (kFrameAlignment > 0) {
+    __ mov(esp, Operand(edi));
+  }
+
+  Result result = allocator_->Allocate(eax);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
+  JumpTarget done;
+  JumpTarget call_runtime;
+  ASSERT(args->length() == 1);
+
+  // Load number and duplicate it.
+  Load(args->at(0));
+  frame_->Dup();
+
+  // Get the number into an unaliased register and load it onto the
+  // floating point stack still leaving one copy on the frame.
+  Result number = frame_->Pop();
+  number.ToRegister();
+  frame_->Spill(number.reg());
+  FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
+  number.Unuse();
+
+  // Perform the operation on the number.
+  switch (op) {
+    case SIN:
+      __ fsin();
+      break;
+    case COS:
+      __ fcos();
+      break;
+  }
+
+  // Go slow case if argument to operation is out of range.
+  __ fnstsw_ax();
+  __ sahf();
+  call_runtime.Branch(parity_even, not_taken);
+
+  // Allocate heap number for result if possible.
+  Result scratch1 = allocator()->Allocate();
+  Result scratch2 = allocator()->Allocate();
+  Result heap_number = allocator()->Allocate();
+  FloatingPointHelper::AllocateHeapNumber(masm_,
+                                          call_runtime.entry_label(),
+                                          scratch1.reg(),
+                                          scratch2.reg(),
+                                          heap_number.reg());
+  scratch1.Unuse();
+  scratch2.Unuse();
+
+  // Store the result in the allocated heap number.
+  __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
+  // Replace the extra copy of the argument with the result.
+  frame_->SetElementAt(0, &heap_number);
+  done.Jump();
+
+  call_runtime.Bind();
+  // Free ST(0) which was not popped before calling into the runtime.
+  __ ffree(0);
+  Result answer;
+  switch (op) {
+    case SIN:
+      answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
+      break;
+    case COS:
+      answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
+      break;
+  }
+  frame_->Push(&answer);
+  done.Bind();
+}
+
+
 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
   if (CheckForInlineRuntimeCall(node)) {
     return;
@@ -5042,7 +5172,10 @@
         break;
 
       case Token::SUB: {
-        UnarySubStub stub;
+        bool overwrite =
+            (node->AsBinaryOperation() != NULL &&
+             node->AsBinaryOperation()->ResultOverwriteAllowed());
+        UnarySubStub stub(overwrite);
         // TODO(1222589): remove dependency of TOS being cached inside stub
         Result operand = frame_->Pop();
         Result answer = frame_->CallStub(&stub, &operand);
@@ -5448,18 +5581,6 @@
 }
 
 
-class InstanceofStub: public CodeStub {
- public:
-  InstanceofStub() { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Major MajorKey() { return Instanceof; }
-  int MinorKey() { return 0; }
-};
-
-
 void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
   Comment cmnt(masm_, "[ CompareOperation");
 
@@ -5730,9 +5851,58 @@
 }
 
 
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+  DeferredReferenceSetKeyedValue(Register value,
+                                 Register key,
+                                 Register receiver)
+      : value_(value), key_(key), receiver_(receiver) {
+    set_comment("[ DeferredReferenceSetKeyedValue");
+  }
+
+  virtual void Generate();
+
+  Label* patch_site() { return &patch_site_; }
+
+ private:
+  Register value_;
+  Register key_;
+  Register receiver_;
+  Label patch_site_;
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+  __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+  // Push receiver and key arguments on the stack.
+  __ push(receiver_);
+  __ push(key_);
+  // Move value argument to eax as expected by the IC stub.
+  if (!value_.is(eax)) __ mov(eax, value_);
+  // Call the IC stub.
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  __ call(ic, RelocInfo::CODE_TARGET);
+  // The delta from the start of the map-compare instruction to the
+  // test instruction.  We use masm_-> directly here instead of the
+  // __ macro because the macro sometimes uses macro expansion to turn
+  // into something that can't return a value.  This is encountered
+  // when doing generated code coverage tests.
+  int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+  // Here we use masm_-> instead of the __ macro because this is the
+  // instruction that gets patched and coverage code gets in the way.
+  masm_->test(eax, Immediate(-delta_to_patch_site));
+  // Restore value (returned from store IC), key and receiver
+  // registers.
+  if (!value_.is(eax)) __ mov(value_, eax);
+  __ pop(key_);
+  __ pop(receiver_);
+}
+
+
 #undef __
 #define __ ACCESS_MASM(masm)
 
+
 Handle<String> Reference::GetName() {
   ASSERT(type_ == NAMED);
   Property* property = expression_->AsProperty();
@@ -5851,7 +6021,7 @@
       // a check against an invalid map.  In the inline cache code, we
       // patch the map check if appropriate.
       if (cgen_->loop_nesting() > 0) {
-        Comment cmnt(masm, "[ Inlined array index load");
+        Comment cmnt(masm, "[ Inlined load from keyed Property");
 
         Result key = cgen_->frame()->Pop();
         Result receiver = cgen_->frame()->Pop();
@@ -5992,9 +6162,10 @@
 void Reference::SetValue(InitState init_state) {
   ASSERT(cgen_->HasValidEntryRegisters());
   ASSERT(!is_illegal());
+  MacroAssembler* masm = cgen_->masm();
   switch (type_) {
     case SLOT: {
-      Comment cmnt(cgen_->masm(), "[ Store to Slot");
+      Comment cmnt(masm, "[ Store to Slot");
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
       cgen_->StoreToSlot(slot, init_state);
@@ -6002,7 +6173,7 @@
     }
 
     case NAMED: {
-      Comment cmnt(cgen_->masm(), "[ Store to named Property");
+      Comment cmnt(masm, "[ Store to named Property");
       cgen_->frame()->Push(GetName());
       Result answer = cgen_->frame()->CallStoreIC();
       cgen_->frame()->Push(&answer);
@@ -6010,9 +6181,104 @@
     }
 
     case KEYED: {
-      Comment cmnt(cgen_->masm(), "[ Store to keyed Property");
-      Result answer = cgen_->frame()->CallKeyedStoreIC();
-      cgen_->frame()->Push(&answer);
+      Comment cmnt(masm, "[ Store to keyed Property");
+
+      // Generate inlined version of the keyed store if the code is in
+      // a loop and the key is likely to be a smi.
+      Property* property = expression()->AsProperty();
+      ASSERT(property != NULL);
+      SmiAnalysis* key_smi_analysis = property->key()->type();
+
+      if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
+        Comment cmnt(masm, "[ Inlined store to keyed Property");
+
+        // Get the receiver, key and value into registers.
+        Result value = cgen_->frame()->Pop();
+        Result key = cgen_->frame()->Pop();
+        Result receiver = cgen_->frame()->Pop();
+
+        Result tmp = cgen_->allocator_->Allocate();
+        ASSERT(tmp.is_valid());
+
+        // Determine whether the value is a constant before putting it
+        // in a register.
+        bool value_is_constant = value.is_constant();
+
+        // Make sure that value, key and receiver are in registers.
+        value.ToRegister();
+        key.ToRegister();
+        receiver.ToRegister();
+
+        DeferredReferenceSetKeyedValue* deferred =
+            new DeferredReferenceSetKeyedValue(value.reg(),
+                                               key.reg(),
+                                               receiver.reg());
+
+        // Check that the value is a smi if it is not a constant.  We
+        // can skip the write barrier for smis and constants.
+        if (!value_is_constant) {
+          __ test(value.reg(), Immediate(kSmiTagMask));
+          deferred->Branch(not_zero);
+        }
+
+        // Check that the key is a non-negative smi.
+        __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
+        deferred->Branch(not_zero);
+
+        // Check that the receiver is not a smi.
+        __ test(receiver.reg(), Immediate(kSmiTagMask));
+        deferred->Branch(zero);
+
+        // Check that the receiver is a JSArray.
+        __ mov(tmp.reg(),
+               FieldOperand(receiver.reg(), HeapObject::kMapOffset));
+        __ movzx_b(tmp.reg(),
+                   FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
+        __ cmp(tmp.reg(), JS_ARRAY_TYPE);
+        deferred->Branch(not_equal);
+
+        // Check that the key is within bounds.  Both the key and the
+        // length of the JSArray are smis.
+        __ cmp(key.reg(),
+               FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+        deferred->Branch(greater_equal);
+
+        // Get the elements array from the receiver and check that it
+        // is not a dictionary.
+        __ mov(tmp.reg(),
+               FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+        // Bind the deferred code patch site to be able to locate the
+        // fixed array map comparison.  When debugging, we patch this
+        // comparison to always fail so that we will hit the IC call
+        // in the deferred code which will allow the debugger to
+        // break for fast case stores.
+        __ bind(deferred->patch_site());
+        __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+               Immediate(Factory::fixed_array_map()));
+        deferred->Branch(not_equal);
+
+        // Store the value.
+        __ mov(Operand(tmp.reg(),
+                       key.reg(),
+                       times_2,
+                       Array::kHeaderSize - kHeapObjectTag),
+               value.reg());
+        __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+        deferred->BindExit();
+
+        cgen_->frame()->Push(&receiver);
+        cgen_->frame()->Push(&key);
+        cgen_->frame()->Push(&value);
+      } else {
+        Result answer = cgen_->frame()->CallKeyedStoreIC();
+        // Make sure that we do not have a test instruction after the
+        // call.  A test instruction after the call is used to
+        // indicate that we have generated an inline version of the
+        // keyed store.
+        __ nop();
+        cgen_->frame()->Push(&answer);
+      }
       break;
     }
 
@@ -6269,7 +6535,8 @@
           FloatingPointHelper::AllocateHeapNumber(masm,
                                                   &call_runtime,
                                                   ecx,
-                                                  edx);
+                                                  edx,
+                                                  eax);
           __ bind(&skip_allocation);
           break;
         default: UNREACHABLE();
@@ -6377,7 +6644,7 @@
             // Fall through!
           case NO_OVERWRITE:
             FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
-                                                    ecx, edx);
+                                                    ecx, edx, eax);
             __ bind(&skip_allocation);
             break;
           default: UNREACHABLE();
@@ -6462,22 +6729,42 @@
 void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
                                              Label* need_gc,
                                              Register scratch1,
-                                             Register scratch2) {
+                                             Register scratch2,
+                                             Register result) {
   ExternalReference allocation_top =
       ExternalReference::new_space_allocation_top_address();
   ExternalReference allocation_limit =
       ExternalReference::new_space_allocation_limit_address();
   __ mov(Operand(scratch1), Immediate(allocation_top));
-  __ mov(eax, Operand(scratch1, 0));
-  __ lea(scratch2, Operand(eax, HeapNumber::kSize));  // scratch2: new top
+  __ mov(result, Operand(scratch1, 0));
+  __ lea(scratch2, Operand(result, HeapNumber::kSize));  // scratch2: new top
   __ cmp(scratch2, Operand::StaticVariable(allocation_limit));
   __ j(above, need_gc, not_taken);
 
   __ mov(Operand(scratch1, 0), scratch2);  // store new top
-  __ mov(Operand(eax, HeapObject::kMapOffset),
+  __ mov(Operand(result, HeapObject::kMapOffset),
          Immediate(Factory::heap_number_map()));
   // Tag old top and use as result.
-  __ add(Operand(eax), Immediate(kHeapObjectTag));
+  __ add(Operand(result), Immediate(kHeapObjectTag));
+}
+
+
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+                                           Register scratch) {
+  Label load_smi, done;
+
+  __ test(scratch, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi, not_taken);
+  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi);
+  __ sar(scratch, kSmiTagSize);
+  __ push(scratch);
+  __ fild_s(Operand(esp, 0));
+  __ pop(scratch);
+
+  __ bind(&done);
 }
 
 
@@ -6579,13 +6866,21 @@
   __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
   __ cmp(edx, Factory::heap_number_map());
   __ j(not_equal, &slow);
-  __ mov(edx, Operand(eax));
-  // edx: operand
-  FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx);
-  // eax: allocated 'empty' number
-  __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
-  __ fchs();
-  __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+  if (overwrite_) {
+    __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+    __ xor_(edx, HeapNumber::kSignMask);  // Flip sign.
+    __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+  } else {
+    __ mov(edx, Operand(eax));
+    // edx: operand
+    FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx, eax);
+    // eax: allocated 'empty' number
+    __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+    __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
+    __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+    __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+    __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+  }
 
   __ bind(&done);
 
@@ -6729,7 +7024,7 @@
       // The representation of NaN values has all exponent bits (52..62) set,
       // and not all mantissa bits (0..51) clear.
       // Read top bits of double representation (second word of value).
-      __ mov(eax, FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize));
+      __ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
       // Test that exponent bits are all set.
       __ not_(eax);
       __ test(eax, Immediate(0x7ff00000));
@@ -6739,7 +7034,7 @@
       // Shift out flag and all exponent bits, retaining only mantissa.
       __ shl(eax, 12);
       // Or with all low-bits of mantissa.
-      __ or_(eax, FieldOperand(edx, HeapNumber::kValueOffset));
+      __ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset));
       // Return zero equal if all bits in mantissa is zero (it's an Infinity)
       // and non-zero if not (it's a NaN).
       __ ret(0);
@@ -6937,26 +7232,33 @@
 
 
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
-  ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize);  // adjust this code
-  ExternalReference handler_address(Top::k_handler_address);
-  __ mov(edx, Operand::StaticVariable(handler_address));
-  __ mov(ecx, Operand(edx, -1 * kPointerSize));  // get next in chain
-  __ mov(Operand::StaticVariable(handler_address), ecx);
-  __ mov(esp, Operand(edx));
-  __ pop(edi);
-  __ pop(ebp);
-  __ pop(edx);  // remove code pointer
-  __ pop(edx);  // remove state
+  // eax holds the exception.
 
-  // Before returning we restore the context from the frame pointer if not NULL.
-  // The frame pointer is NULL in the exception handler of a JS entry frame.
-  __ xor_(esi, Operand(esi));  // tentatively set context pointer to NULL
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop the sp to the top of the handler.
+  ExternalReference handler_address(Top::k_handler_address);
+  __ mov(esp, Operand::StaticVariable(handler_address));
+
+  // Restore next handler and frame pointer, discard handler state.
+  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  __ pop(Operand::StaticVariable(handler_address));
+  ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+  __ pop(ebp);
+  __ pop(edx);  // Remove state.
+
+  // Before returning we restore the context from the frame pointer if
+  // not NULL.  The frame pointer is NULL in the exception handler of
+  // a JS entry frame.
+  __ xor_(esi, Operand(esi));  // Tentatively set context pointer to NULL.
   Label skip;
   __ cmp(ebp, 0);
   __ j(equal, &skip, not_taken);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   __ bind(&skip);
 
+  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   __ ret(0);
 }
 
@@ -7042,51 +7344,49 @@
 
 
 void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
-  // Fetch top stack handler.
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // Drop sp to the top stack handler.
   ExternalReference handler_address(Top::k_handler_address);
-  __ mov(edx, Operand::StaticVariable(handler_address));
+  __ mov(esp, Operand::StaticVariable(handler_address));
 
   // Unwind the handlers until the ENTRY handler is found.
   Label loop, done;
   __ bind(&loop);
   // Load the type of the current stack handler.
-  const int kStateOffset = StackHandlerConstants::kAddressDisplacement +
-      StackHandlerConstants::kStateOffset;
-  __ cmp(Operand(edx, kStateOffset), Immediate(StackHandler::ENTRY));
+  const int kStateOffset = StackHandlerConstants::kStateOffset;
+  __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
   __ j(equal, &done);
   // Fetch the next handler in the list.
-  const int kNextOffset = StackHandlerConstants::kAddressDisplacement +
-      StackHandlerConstants::kNextOffset;
-  __ mov(edx, Operand(edx, kNextOffset));
+  const int kNextOffset = StackHandlerConstants::kNextOffset;
+  __ mov(esp, Operand(esp, kNextOffset));
   __ jmp(&loop);
   __ bind(&done);
 
   // Set the top handler address to next handler past the current ENTRY handler.
-  __ mov(eax, Operand(edx, kNextOffset));
-  __ mov(Operand::StaticVariable(handler_address), eax);
+  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  __ pop(Operand::StaticVariable(handler_address));
 
   // Set external caught exception to false.
-  __ mov(eax, false);
   ExternalReference external_caught(Top::k_external_caught_exception_address);
+  __ mov(eax, false);
   __ mov(Operand::StaticVariable(external_caught), eax);
 
   // Set pending exception and eax to out of memory exception.
-  __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
   ExternalReference pending_exception(Top::k_pending_exception_address);
+  __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
   __ mov(Operand::StaticVariable(pending_exception), eax);
 
-  // Restore the stack to the address of the ENTRY handler
-  __ mov(esp, Operand(edx));
-
   // Clear the context pointer;
   __ xor_(esi, Operand(esi));
 
-  // Restore registers from handler.
-  __ pop(edi);  // PP
-  __ pop(ebp);  // FP
-  __ pop(edx);  // Code
-  __ pop(edx);  // State
+  // Restore fp from handler and discard handler state.
+  ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+  __ pop(ebp);
+  __ pop(edx);  // State.
 
+  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   __ ret(0);
 }
 
@@ -7097,12 +7397,11 @@
   // ebp: frame pointer  (restored after C call)
   // esp: stack pointer  (restored after C call)
   // esi: current context (C callee-saved)
-  // edi: caller's parameter pointer pp  (C callee-saved)
+  // edi: JS function of the caller (C callee-saved)
 
-  // NOTE: Invocations of builtins may return failure objects
-  // instead of a proper result. The builtin entry handles
-  // this by performing a garbage collection and retrying the
-  // builtin once.
+  // NOTE: Invocations of builtins may return failure objects instead
+  // of a proper result. The builtin entry handles this by performing
+  // a garbage collection and retrying the builtin (twice).
 
   StackFrame::Type frame_type = is_debug_break ?
       StackFrame::EXIT_DEBUG :
@@ -7205,7 +7504,6 @@
   // Invoke: Link this frame into the handler chain.
   __ bind(&invoke);
   __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-  __ push(eax);  // flush TOS
 
   // Clear any pending exceptions.
   __ mov(edx,
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 9b609a1..e409513 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -518,6 +518,15 @@
 
   void GenerateGetFramePointer(ZoneList<Expression*>* args);
 
+  // Fast support for Math.random().
+  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+  // Fast support for Math.sin and Math.cos.
+  enum MathOp { SIN, COS };
+  void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
+  inline void GenerateMathSin(ZoneList<Expression*>* args);
+  inline void GenerateMathCos(ZoneList<Expression*>* args);
+
   // Methods and constants for fast case switch statement support.
   //
   // Only allow fast-case switch if the range of labels is at most
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index aec1f48..3a7c86b 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -55,16 +55,10 @@
 class StackHandlerConstants : public AllStatic {
  public:
   static const int kNextOffset  = 0 * kPointerSize;
-  static const int kPPOffset    = 1 * kPointerSize;
-  static const int kFPOffset    = 2 * kPointerSize;
+  static const int kFPOffset    = 1 * kPointerSize;
+  static const int kStateOffset = 2 * kPointerSize;
+  static const int kPCOffset    = 3 * kPointerSize;
 
-  // TODO(1233780): Get rid of the code slot in stack handlers.
-  static const int kCodeOffset  = 3 * kPointerSize;
-
-  static const int kStateOffset = 4 * kPointerSize;
-  static const int kPCOffset    = 5 * kPointerSize;
-
-  static const int kAddressDisplacement = -1 * kPointerSize;
   static const int kSize = kPCOffset + kPointerSize;
 };
 
@@ -85,12 +79,12 @@
   static const int kDebugMarkOffset = -2 * kPointerSize;
   static const int kSPOffset        = -1 * kPointerSize;
 
-  // Let the parameters pointer for exit frames point just below the
-  // frame structure on the stack (frame pointer and return address).
-  static const int kPPDisplacement = +2 * kPointerSize;
-
   static const int kCallerFPOffset =  0 * kPointerSize;
   static const int kCallerPCOffset = +1 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.  It points just
+  // below the saved PC.
+  static const int kCallerSPDisplacement = +2 * kPointerSize;
 };
 
 
@@ -112,7 +106,7 @@
   static const int kSavedRegistersOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
-  // CallerSP-relative (aka PP-relative)
+  // Caller SP-relative.
   static const int kParam0Offset   = -2 * kPointerSize;
   static const int kReceiverOffset = -1 * kPointerSize;
 };
@@ -136,157 +130,6 @@
 }
 
 
-// ----------------------------------------------------
-
-
-
-
-  // C Entry frames:
-
-  //    lower    |    Stack    |
-  //  addresses  |      ^      |
-  //             |      |      |
-  //             |             |
-  //             +-------------+
-  //             |  entry_pc   |
-  //             +-------------+ <--+ entry_sp
-  //                    .           |
-  //                    .           |
-  //                    .           |
-  //             +-------------+    |
-  //          -3 |  entry_sp --+----+
-  //      e      +-------------+
-  //      n   -2 | C function  |
-  //      t      +-------------+
-  //      r   -1 |  caller_pp  |
-  //      y      +-------------+ <--- fp (frame pointer, ebp)
-  //           0 |  caller_fp  |
-  //      f      +-------------+
-  //      r    1 |  caller_pc  |
-  //      a      +-------------+ <--- caller_sp (stack pointer, esp)
-  //      m    2 |             |
-  //      e      |  arguments  |
-  //             |             |
-  //             +- - - - - - -+
-  //             |  argument0  |
-  //             +=============+
-  //             |             |
-  //             |   caller    |
-  //   higher    | expressions |
-  //  addresses  |             |
-
-
-  // Proper JS frames:
-
-  //    lower    |    Stack    |
-  //  addresses  |      ^      |
-  //             |      |      |
-  //             |             |
-  // ----------- +=============+ <--- sp (stack pointer, esp)
-  //             |  function   |
-  //             +-------------+
-  //             |             |
-  //             | expressions |
-  //             |             |
-  //             +-------------+
-  //      a      |             |
-  //      c      |   locals    |
-  //      t      |             |
-  //      i      +- - - - - - -+ <---
-  //      v   -4 |   local0    |   ^
-  //      a      +-------------+   |
-  //      t   -3 |    code     |   |
-  //      i      +-------------+   |
-  //      o   -2 |   context   |   | kLocal0Offset
-  //      n      +-------------+   |
-  //          -1 |  caller_pp  |   v
-  //      f      +-------------+ <--- fp (frame pointer, ebp)
-  //      r    0 |  caller_fp  |
-  //      a      +-------------+
-  //      m    1 |  caller_pc  |
-  //      e      +-------------+ <--- caller_sp (incl. parameters)
-  //           2 |             |
-  //             | parameters  |
-  //             |             |
-  //             +- - - - - - -+ <---
-  //          -2 | parameter0  |   ^
-  //             +-------------+   | kParam0Offset
-  //          -1 |  receiver   |   v
-  // ----------- +=============+ <--- pp (parameter pointer, edi)
-  //           0 |  function   |
-  //             +-------------+
-  //             |             |
-  //             |   caller    |
-  //   higher    | expressions |
-  //  addresses  |             |
-
-
-  // JS entry frames: When calling from C to JS, we construct two extra
-  // frames: An entry frame (C) and a trampoline frame (JS). The
-  // following pictures shows the two frames:
-
-  //    lower    |    Stack    |
-  //  addresses  |      ^      |
-  //             |      |      |
-  //             |             |
-  // ----------- +=============+ <--- sp (stack pointer, esp)
-  //             |             |
-  //             | parameters  |
-  //      t      |             |
-  //      r      +- - - - - - -+
-  //      a      | parameter0  |
-  //      m      +-------------+
-  //      p      |  receiver   |
-  //      o      +-------------+ <---
-  //      l      |  function   |   ^
-  //      i      +-------------+   |
-  //      n   -3 |    code     |   | kLocal0Offset
-  //      e      +-------------+
-  //          -2 |    NULL     | context is always NULL
-  //             +-------------+
-  //      f   -1 |    NULL     | caller pp is always NULL for entry frames
-  //      r      +-------------+ <--- fp (frame pointer, ebp)
-  //      a    0 |  caller fp  |
-  //      m      +-------------+
-  //      e    1 |  caller pc  |
-  //             +-------------+ <--- caller_sp (incl. parameters)
-  //             |      0      |
-  // ----------- +=============+ <--- pp (parameter pointer, edi)
-  //             |      0      |
-  //             +-------------+ <---
-  //                    .          ^
-  //                    .          |  try-handler (HandlerOffsets::kSize)
-  //                    .          v
-  //             +-------------+ <---
-  //          -5 | next top pp |
-  //             +-------------+
-  //      e   -4 | next top fp |
-  //      n      +-------------+ <---
-  //      t   -3 |     ebx     |   ^
-  //      r      +-------------+   |
-  //      y   -2 |     esi     |   |  callee-saved registers
-  //             +-------------+   |
-  //          -1 |     edi     |   v
-  //      f      +-------------+ <--- fp
-  //      r    0 |  caller fp  |
-  //      a      +-------------+      pp == NULL (parameter pointer)
-  //      m    1 |  caller pc  |
-  //      e      +-------------+ <--- caller sp
-  //           2 | code  entry |   ^
-  //             +-------------+   |
-  //           3 |  function   |   |
-  //             +-------------+   |  arguments passed from C code
-  //           4 |  receiver   |   |
-  //             +-------------+   |
-  //           5 |    argc     |   |
-  //             +-------------+   |
-  //           6 |    argv     |   v
-  //             +-------------+ <---
-  //             |             |
-  //   higher    |             |
-  //  addresses  |             |
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_FRAMES_IA32_H_
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index d7f264d..5da9b2f 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -747,6 +747,21 @@
 }
 
 
+void KeyedStoreIC::ClearInlinedVersion(Address address) {
+  // Insert null as the elements map to check for.  This will make
+  // sure that the elements fast-case map check fails so that control
+  // flows to the IC instead of the inlined version.
+  PatchInlinedStore(address, Heap::null_value());
+}
+
+
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {
+  // Restore the fast-case elements map check so that the inlined
+  // version can be used again.
+  PatchInlinedStore(address, Heap::fixed_array_map());
+}
+
+
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
   // The address of the instruction following the call.
   Address test_instruction_address = address + 4;
@@ -774,7 +789,7 @@
 }
 
 
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+static bool PatchInlinedMapCheck(Address address, Object* map) {
   Address test_instruction_address = address + 4;  // 4 = stub address
   // The keyed load has a fast inlined case if the IC call instruction
   // is immediately followed by a test instruction.
@@ -795,6 +810,16 @@
 }
 
 
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+  return PatchInlinedMapCheck(address, map);
+}
+
+
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  return PatchInlinedMapCheck(address, map);
+}
+
+
 // Defined in ic.cc.
 Object* KeyedLoadIC_Miss(Arguments args);
 
diff --git a/src/ia32/jump-target-ia32.cc b/src/ia32/jump-target-ia32.cc
index 9644a16..587fb2d 100644
--- a/src/ia32/jump-target-ia32.cc
+++ b/src/ia32/jump-target-ia32.cc
@@ -164,7 +164,7 @@
 }
 
 
-void JumpTarget::DoBind(int mergable_elements) {
+void JumpTarget::DoBind() {
   ASSERT(cgen() != NULL);
   ASSERT(!is_bound());
 
@@ -210,7 +210,7 @@
       // Fast case: no forward jumps, possible backward ones.  Remove
       // constants and copies above the watermark on the fall-through
       // frame and use it as the entry frame.
-      cgen()->frame()->MakeMergable(mergable_elements);
+      cgen()->frame()->MakeMergable();
       entry_frame_ = new VirtualFrame(cgen()->frame());
     }
     __ bind(&entry_label_);
@@ -252,7 +252,7 @@
   }
 
   // Compute the frame to use for entry to the block.
-  ComputeEntryFrame(mergable_elements);
+  ComputeEntryFrame();
 
   // Some moves required to merge to an expected frame require purely
   // frame state changes, and do not require any code generation.
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 7636c4e..479b8ca 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -358,7 +358,7 @@
   ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
 
   // Setup the frame structure on the stack.
-  ASSERT(ExitFrameConstants::kPPDisplacement == +2 * kPointerSize);
+  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
   push(ebp);
@@ -448,7 +448,8 @@
 
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
-  ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize);  // adjust this code
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
   // The pc (return address) is already on TOS.
   if (try_location == IN_JAVASCRIPT) {
     if (type == TRY_CATCH_HANDLER) {
@@ -456,23 +457,18 @@
     } else {
       push(Immediate(StackHandler::TRY_FINALLY));
     }
-    push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
     push(ebp);
-    push(edi);
   } else {
     ASSERT(try_location == IN_JS_ENTRY);
-    // The parameter pointer is meaningless here and ebp does not
-    // point to a JS frame. So we save NULL for both pp and ebp. We
-    // expect the code throwing an exception to check ebp before
-    // dereferencing it to restore the context.
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for ebp. We expect the code throwing an exception to check ebp
+    // before dereferencing it to restore the context.
     push(Immediate(StackHandler::ENTRY));
-    push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
-    push(Immediate(0));  // NULL frame pointer
-    push(Immediate(0));  // NULL parameter pointer
+    push(Immediate(0));  // NULL frame pointer.
   }
-  // Cached TOS.
-  mov(eax, Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
-  // Link this handler.
+  // Save the current handler as the next handler.
+  push(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
+  // Link this handler as the new current one.
   mov(Operand::StaticVariable(ExternalReference(Top::k_handler_address)), esp);
 }
 
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 940a8b4..42620dd 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -154,9 +154,8 @@
   // ---------------------------------------------------------------------------
   // Exception handling
 
-  // Push a new try handler and link into try handler chain.
-  // The return address must be pushed before calling this helper.
-  // On exit, eax contains TOS (next_sp).
+  // Push a new try handler and link into try handler chain.  The return
+  // address must be pushed before calling this helper.
   void PushTryHandler(CodeLocation try_location, HandlerType type);
 
 
@@ -286,7 +285,7 @@
   List<Unresolved> unresolved_;
   bool generating_stub_;
   bool allow_stub_calls_;
-  Handle<Object> code_object_;  // This handle will be patched with the code
+  Handle<Object> code_object_;  // This handle will be patched with the
                                 // code object on installation.
 
   // Helper functions for generating invokes.
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 5f85de7..3d97a66 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -174,14 +174,8 @@
 }
 
 
-void VirtualFrame::MakeMergable(int mergable_elements) {
-  if (mergable_elements == JumpTarget::kAllElements) {
-    mergable_elements = element_count();
-  }
-  ASSERT(mergable_elements <= element_count());
-
-  int start_index = element_count() - mergable_elements;
-  for (int i = start_index; i < element_count(); i++) {
+void VirtualFrame::MakeMergable() {
+  for (int i = 0; i < element_count(); i++) {
     FrameElement element = elements_[i];
 
     if (element.is_constant() || element.is_copy()) {
@@ -775,14 +769,10 @@
 
 void VirtualFrame::PushTryHandler(HandlerType type) {
   ASSERT(cgen()->HasValidEntryRegisters());
-  // Grow the expression stack by handler size less two (the return address
-  // is already pushed by a call instruction, and PushTryHandler from the
-  // macro assembler will leave the top of stack in the eax register to be
-  // pushed separately).
-  Adjust(kHandlerSize - 2);
+  // Grow the expression stack by handler size less one (the return
+  // address is already pushed by a call instruction).
+  Adjust(kHandlerSize - 1);
   __ PushTryHandler(IN_JAVASCRIPT, type);
-  // TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
-  EmitPush(eax);
 }
 
 
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index 6e6ebd5..b69b800 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -153,11 +153,8 @@
   void SyncRange(int begin, int end);
 
   // Make this frame so that an arbitrary frame of the same height can
-  // be merged to it.  Copies and constants are removed from the
-  // topmost mergable_elements elements of the frame.  A
-  // mergable_elements of JumpTarget::kAllElements indicates constants
-  // and copies are should be removed from the entire frame.
-  void MakeMergable(int mergable_elements);
+  // be merged to it.  Copies and constants are removed from the frame.
+  void MakeMergable();
 
   // Prepare this virtual frame for merging to an expected frame by
   // performing some state changes that do not require generating
diff --git a/src/ic.cc b/src/ic.cc
index 657614a..16235db 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -849,6 +849,20 @@
 }
 
 
+static bool StoreICableLookup(LookupResult* lookup) {
+  // Bail out if we didn't find a result.
+  if (!lookup->IsValid() || !lookup->IsCacheable()) return false;
+
+  // If the property is read-only, we leave the IC in its current
+  // state.
+  if (lookup->IsReadOnly()) return false;
+
+  if (!lookup->IsLoaded()) return false;
+
+  return true;
+}
+
+
 Object* StoreIC::Store(State state,
                        Handle<Object> object,
                        Handle<String> name,
@@ -873,12 +887,12 @@
   }
 
   // Lookup the property locally in the receiver.
-  LookupResult lookup;
-  receiver->LocalLookup(*name, &lookup);
-
-  // Update inline cache and stub cache.
-  if (FLAG_use_ic && lookup.IsLoaded()) {
-    UpdateCaches(&lookup, state, receiver, name, value);
+  if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
+    LookupResult lookup;
+    receiver->LocalLookup(*name, &lookup);
+    if (StoreICableLookup(&lookup)) {
+      UpdateCaches(&lookup, state, receiver, name, value);
+    }
   }
 
   // Set the property.
@@ -893,14 +907,9 @@
                            Handle<Object> value) {
   ASSERT(lookup->IsLoaded());
   // Skip JSGlobalProxy.
-  if (receiver->IsJSGlobalProxy()) return;
+  ASSERT(!receiver->IsJSGlobalProxy());
 
-  // Bail out if we didn't find a result.
-  if (!lookup->IsValid() || !lookup->IsCacheable()) return;
-
-  // If the property is read-only, we leave the IC in its current
-  // state.
-  if (lookup->IsReadOnly()) return;
+  ASSERT(StoreICableLookup(lookup));
 
   // If the property has a non-field type allowing map transitions
   // where there is extra room in the object, we leave the IC in its
diff --git a/src/ic.h b/src/ic.h
index bd94fd8..9c96ba2 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -356,6 +356,12 @@
   static void GenerateGeneric(MacroAssembler* masm);
   static void GenerateExtendStorage(MacroAssembler* masm);
 
+  // Clear the inlined version so the IC is always hit.
+  static void ClearInlinedVersion(Address address);
+
+  // Restore the inlined version so the fast case can get hit.
+  static void RestoreInlinedVersion(Address address);
+
  private:
   static void Generate(MacroAssembler* masm, const ExternalReference& f);
 
@@ -378,6 +384,11 @@
   }
 
   static void Clear(Address address, Code* target);
+
+  // Support for patching the map that is checked in an inlined
+  // version of keyed store.
+  static bool PatchInlinedStore(Address address, Object* map);
+
   friend class IC;
 };
 
diff --git a/src/jump-target.cc b/src/jump-target.cc
index a8eda6b..a9d7770 100644
--- a/src/jump-target.cc
+++ b/src/jump-target.cc
@@ -48,7 +48,7 @@
 }
 
 
-void JumpTarget::ComputeEntryFrame(int mergable_elements) {
+void JumpTarget::ComputeEntryFrame() {
   // Given: a collection of frames reaching by forward CFG edges and
   // the directionality of the block.  Compute: an entry frame for the
   // block.
@@ -77,14 +77,6 @@
   int length = initial_frame->element_count();
   ZoneList<FrameElement*> elements(length);
 
-  // Convert the number of mergable elements (counted from the top
-  // down) to a frame high-water mark (counted from the bottom up).
-  // Elements strictly above the high-water index will be mergable in
-  // entry frames for bidirectional jump targets.
-  int high_water_mark = (mergable_elements == kAllElements)
-      ? VirtualFrame::kIllegalIndex  // All frame indices are above this.
-      : length - mergable_elements - 1;  // Top index if m_e == 0.
-
   // Initially populate the list of elements based on the initial
   // frame.
   for (int i = 0; i < length; i++) {
@@ -92,7 +84,7 @@
     // We do not allow copies or constants in bidirectional frames.  All
     // elements above the water mark on bidirectional frames have
     // unknown static types.
-    if (direction_ == BIDIRECTIONAL && i > high_water_mark) {
+    if (direction_ == BIDIRECTIONAL) {
       if (element.is_constant() || element.is_copy()) {
         elements.Add(NULL);
         continue;
@@ -158,7 +150,7 @@
       int best_reg_num = RegisterAllocator::kInvalidRegister;
 
       StaticType type;  // Initially invalid.
-      if (direction_ != BIDIRECTIONAL || i < high_water_mark) {
+      if (direction_ != BIDIRECTIONAL) {
         type = reaching_frames_[0]->elements_[i].static_type();
       }
 
@@ -241,25 +233,6 @@
 }
 
 
-void JumpTarget::Jump(Result* arg0, Result* arg1) {
-  ASSERT(cgen()->has_valid_frame());
-
-  cgen()->frame()->Push(arg0);
-  cgen()->frame()->Push(arg1);
-  DoJump();
-}
-
-
-void JumpTarget::Jump(Result* arg0, Result* arg1, Result* arg2) {
-  ASSERT(cgen()->has_valid_frame());
-
-  cgen()->frame()->Push(arg0);
-  cgen()->frame()->Push(arg1);
-  cgen()->frame()->Push(arg2);
-  DoJump();
-}
-
-
 void JumpTarget::Branch(Condition cc, Hint hint) {
   DoBranch(cc, hint);
 }
@@ -295,84 +268,6 @@
 }
 
 
-void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
-  ASSERT(cgen()->frame() != NULL);
-
-  // We want to check that non-frame registers at the call site stay in
-  // the same registers on the fall-through branch.
-  DECLARE_ARGCHECK_VARS(arg0);
-  DECLARE_ARGCHECK_VARS(arg1);
-
-  cgen()->frame()->Push(arg0);
-  cgen()->frame()->Push(arg1);
-  DoBranch(cc, hint);
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-
-  ASSERT_ARGCHECK(arg0);
-  ASSERT_ARGCHECK(arg1);
-}
-
-
-void JumpTarget::Branch(Condition cc,
-                        Result* arg0,
-                        Result* arg1,
-                        Result* arg2,
-                        Hint hint) {
-  ASSERT(cgen()->frame() != NULL);
-
-  // We want to check that non-frame registers at the call site stay in
-  // the same registers on the fall-through branch.
-  DECLARE_ARGCHECK_VARS(arg0);
-  DECLARE_ARGCHECK_VARS(arg1);
-  DECLARE_ARGCHECK_VARS(arg2);
-
-  cgen()->frame()->Push(arg0);
-  cgen()->frame()->Push(arg1);
-  cgen()->frame()->Push(arg2);
-  DoBranch(cc, hint);
-  *arg2 = cgen()->frame()->Pop();
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-
-  ASSERT_ARGCHECK(arg0);
-  ASSERT_ARGCHECK(arg1);
-  ASSERT_ARGCHECK(arg2);
-}
-
-
-void JumpTarget::Branch(Condition cc,
-                        Result* arg0,
-                        Result* arg1,
-                        Result* arg2,
-                        Result* arg3,
-                        Hint hint) {
-  ASSERT(cgen()->frame() != NULL);
-
-  // We want to check that non-frame registers at the call site stay in
-  // the same registers on the fall-through branch.
-  DECLARE_ARGCHECK_VARS(arg0);
-  DECLARE_ARGCHECK_VARS(arg1);
-  DECLARE_ARGCHECK_VARS(arg2);
-  DECLARE_ARGCHECK_VARS(arg3);
-
-  cgen()->frame()->Push(arg0);
-  cgen()->frame()->Push(arg1);
-  cgen()->frame()->Push(arg2);
-  cgen()->frame()->Push(arg3);
-  DoBranch(cc, hint);
-  *arg3 = cgen()->frame()->Pop();
-  *arg2 = cgen()->frame()->Pop();
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-
-  ASSERT_ARGCHECK(arg0);
-  ASSERT_ARGCHECK(arg1);
-  ASSERT_ARGCHECK(arg2);
-  ASSERT_ARGCHECK(arg3);
-}
-
-
 void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
   ASSERT(cgen()->has_valid_frame());
 
@@ -400,66 +295,20 @@
 #undef ASSERT_ARGCHECK
 
 
-void JumpTarget::Bind(int mergable_elements) {
-  DoBind(mergable_elements);
+void JumpTarget::Bind() {
+  DoBind();
 }
 
 
-void JumpTarget::Bind(Result* arg, int mergable_elements) {
+void JumpTarget::Bind(Result* arg) {
   if (cgen()->has_valid_frame()) {
     cgen()->frame()->Push(arg);
   }
-  DoBind(mergable_elements);
+  DoBind();
   *arg = cgen()->frame()->Pop();
 }
 
 
-void JumpTarget::Bind(Result* arg0, Result* arg1, int mergable_elements) {
-  if (cgen()->has_valid_frame()) {
-    cgen()->frame()->Push(arg0);
-    cgen()->frame()->Push(arg1);
-  }
-  DoBind(mergable_elements);
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::Bind(Result* arg0,
-                      Result* arg1,
-                      Result* arg2,
-                      int mergable_elements) {
-  if (cgen()->has_valid_frame()) {
-    cgen()->frame()->Push(arg0);
-    cgen()->frame()->Push(arg1);
-    cgen()->frame()->Push(arg2);
-  }
-  DoBind(mergable_elements);
-  *arg2 = cgen()->frame()->Pop();
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-}
-
-
-void JumpTarget::Bind(Result* arg0,
-                      Result* arg1,
-                      Result* arg2,
-                      Result* arg3,
-                      int mergable_elements) {
-  if (cgen()->has_valid_frame()) {
-    cgen()->frame()->Push(arg0);
-    cgen()->frame()->Push(arg1);
-    cgen()->frame()->Push(arg2);
-    cgen()->frame()->Push(arg3);
-  }
-  DoBind(mergable_elements);
-  *arg3 = cgen()->frame()->Pop();
-  *arg2 = cgen()->frame()->Pop();
-  *arg1 = cgen()->frame()->Pop();
-  *arg0 = cgen()->frame()->Pop();
-}
-
-
 void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
   ASSERT(reaching_frames_.length() == merge_labels_.length());
   ASSERT(entry_frame_ == NULL);
@@ -531,7 +380,7 @@
 }
 
 
-void BreakTarget::Bind(int mergable_elements) {
+void BreakTarget::Bind() {
 #ifdef DEBUG
   // All the forward-reaching frames should have been adjusted at the
   // jumps to this target.
@@ -547,11 +396,11 @@
     int count = cgen()->frame()->height() - expected_height_;
     cgen()->frame()->ForgetElements(count);
   }
-  DoBind(mergable_elements);
+  DoBind();
 }
 
 
-void BreakTarget::Bind(Result* arg, int mergable_elements) {
+void BreakTarget::Bind(Result* arg) {
 #ifdef DEBUG
   // All the forward-reaching frames should have been adjusted at the
   // jumps to this target.
@@ -568,7 +417,7 @@
     cgen()->frame()->ForgetElements(count);
     cgen()->frame()->Push(arg);
   }
-  DoBind(mergable_elements);
+  DoBind();
   *arg = cgen()->frame()->Pop();
 }
 
diff --git a/src/jump-target.h b/src/jump-target.h
index 7585faf..0c42f1b 100644
--- a/src/jump-target.h
+++ b/src/jump-target.h
@@ -107,52 +107,18 @@
   // jump and there will be no current frame after the jump.
   virtual void Jump();
   virtual void Jump(Result* arg);
-  void Jump(Result* arg0, Result* arg1);
-  void Jump(Result* arg0, Result* arg1, Result* arg2);
 
   // Emit a conditional branch to the target.  There must be a current
   // frame at the branch.  The current frame will fall through to the
   // code after the branch.
   virtual void Branch(Condition cc, Hint hint = no_hint);
   virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
-  void Branch(Condition cc, Result* arg0, Result* arg1, Hint hint = no_hint);
-  void Branch(Condition cc,
-              Result* arg0,
-              Result* arg1,
-              Result* arg2,
-              Hint hint = no_hint);
-  void Branch(Condition cc,
-              Result* arg0,
-              Result* arg1,
-              Result* arg2,
-              Result* arg3,
-              Hint hint = no_hint);
 
   // Bind a jump target.  If there is no current frame at the binding
   // site, there must be at least one frame reaching via a forward
   // jump.
-  //
-  // The number of mergable elements is a number of frame elements
-  // counting from the top down which must be "mergable" (not
-  // constants or copies) in the entry frame at the jump target.
-  // Backward jumps to the target must contain the same constants and
-  // sharing as the entry frame, except for the mergable elements.
-  //
-  // A mergable elements argument of kAllElements indicates that all
-  // frame elements must be mergable.  Mergable elements are ignored
-  // completely for forward-only jump targets.
-  virtual void Bind(int mergable_elements = kAllElements);
-  virtual void Bind(Result* arg, int mergable_elements = kAllElements);
-  void Bind(Result* arg0, Result* arg1, int mergable_elements = kAllElements);
-  void Bind(Result* arg0,
-            Result* arg1,
-            Result* arg2,
-            int mergable_elements = kAllElements);
-  void Bind(Result* arg0,
-            Result* arg1,
-            Result* arg2,
-            Result* arg3,
-            int mergable_elements = kAllElements);
+  virtual void Bind();
+  virtual void Bind(Result* arg);
 
   // Emit a call to a jump target.  There must be a current frame at
   // the call.  The frame at the target is the same as the current
@@ -160,8 +126,6 @@
   // after the call is the same as the frame before the call.
   void Call();
 
-  static const int kAllElements = -1;  // Not a valid number of elements.
-
   static void set_compiling_deferred_code(bool flag) {
     compiling_deferred_code_ = flag;
   }
@@ -188,7 +152,7 @@
   // return values using the virtual frame.
   void DoJump();
   void DoBranch(Condition cc, Hint hint);
-  void DoBind(int mergable_elements);
+  void DoBind();
 
  private:
   static bool compiling_deferred_code_;
@@ -202,9 +166,8 @@
   // target.
   inline void InitializeEntryElement(int index, FrameElement* target);
 
-  // Compute a frame to use for entry to this block.  Mergable
-  // elements is as described for the Bind function.
-  void ComputeEntryFrame(int mergable_elements);
+  // Compute a frame to use for entry to this block.
+  void ComputeEntryFrame();
 
   DISALLOW_COPY_AND_ASSIGN(JumpTarget);
 };
@@ -251,8 +214,8 @@
   // Bind a break target.  If there is no current frame at the binding
   // site, there must be at least one frame reaching via a forward
   // jump.
-  virtual void Bind(int mergable_elements = kAllElements);
-  virtual void Bind(Result* arg, int mergable_elements = kAllElements);
+  virtual void Bind();
+  virtual void Bind(Result* arg);
 
   // Setter for expected height.
   void set_expected_height(int expected) { expected_height_ = expected; }
diff --git a/src/log-utils.cc b/src/log-utils.cc
index 4361049..028eb3a 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -123,7 +123,7 @@
 Log::WritePtr Log::Write = NULL;
 FILE* Log::output_handle_ = NULL;
 LogDynamicBuffer* Log::output_buffer_ = NULL;
-// Must be the same message as in Logger::PauseProfiler
+// Must be the same message as in Logger::PauseProfiler.
 const char* Log::kDynamicBufferSeal = "profiler,\"pause\"\n";
 Mutex* Log::mutex_ = NULL;
 char* Log::message_buffer_ = NULL;
@@ -173,6 +173,9 @@
   }
   Write = NULL;
 
+  DeleteArray(message_buffer_);
+  message_buffer_ = NULL;
+
   delete mutex_;
   mutex_ = NULL;
 
@@ -212,13 +215,13 @@
                    Log::kMessageBufferSize - pos_);
   va_list args;
   va_start(args, format);
-  Append(format, args);
+  AppendVA(format, args);
   va_end(args);
   ASSERT(pos_ <= Log::kMessageBufferSize);
 }
 
 
-void LogMessageBuilder::Append(const char* format, va_list args) {
+void LogMessageBuilder::AppendVA(const char* format, va_list args) {
   Vector<char> buf(Log::message_buffer_ + pos_,
                    Log::kMessageBufferSize - pos_);
   int result = v8::internal::OS::VSNPrintF(buf, format, args);
@@ -250,6 +253,27 @@
 }
 
 
+void LogMessageBuilder::AppendAddress(Address addr) {
+  static Address last_address_ = NULL;
+  AppendAddress(addr, last_address_);
+  last_address_ = addr;
+}
+
+
+void LogMessageBuilder::AppendAddress(Address addr, Address bias) {
+  if (!FLAG_compress_log || bias == NULL) {
+    Append("0x%" V8PRIxPTR, addr);
+  } else {
+    intptr_t delta = addr - bias;
+    // To avoid printing negative offsets in an unsigned form,
+    // we are printing an absolute value with a sign.
+    const char sign = delta >= 0 ? '+' : '-';
+    if (sign == '-') { delta = -delta; }
+    Append("%c%" V8PRIxPTR, sign, delta);
+  }
+}
+
+
 void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
   AssertNoAllocation no_heap_allocation;  // Ensure string stay valid.
   int len = str->length();
@@ -280,6 +304,24 @@
 }
 
 
+bool LogMessageBuilder::StoreInCompressor(LogRecordCompressor* compressor) {
+  return compressor->Store(Vector<const char>(Log::message_buffer_, pos_));
+}
+
+
+bool LogMessageBuilder::RetrieveCompressedPrevious(
+    LogRecordCompressor* compressor, const char* prefix) {
+  pos_ = 0;
+  if (prefix[0] != '\0') Append(prefix);
+  Vector<char> prev_record(Log::message_buffer_ + pos_,
+                           Log::kMessageBufferSize - pos_);
+  const bool has_prev = compressor->RetrievePreviousCompressed(&prev_record);
+  if (!has_prev) return false;
+  pos_ += prev_record.length();
+  return true;
+}
+
+
 void LogMessageBuilder::WriteToLogFile() {
   ASSERT(pos_ <= Log::kMessageBufferSize);
   const int written = Log::Write(Log::message_buffer_, pos_);
@@ -297,6 +339,145 @@
   }
 }
 
+
+// Formatting string for back references to the whole line. E.g. "#2" means
+// "the second line above".
+const char* LogRecordCompressor::kLineBackwardReferenceFormat = "#%d";
+
+// Formatting string for back references. E.g. "#2:10" means
+// "the second line above, start from char 10 (0-based)".
+const char* LogRecordCompressor::kBackwardReferenceFormat = "#%d:%d";
+
+
+LogRecordCompressor::~LogRecordCompressor() {
+  for (int i = 0; i < buffer_.length(); ++i) {
+    buffer_[i].Dispose();
+  }
+}
+
+
+static int GetNumberLength(int number) {
+  ASSERT(number >= 0);
+  ASSERT(number < 10000);
+  if (number < 10) return 1;
+  if (number < 100) return 2;
+  if (number < 1000) return 3;
+  return 4;
+}
+
+
+int LogRecordCompressor::GetBackwardReferenceSize(int distance, int pos) {
+  // See kLineBackwardReferenceFormat and kBackwardReferenceFormat.
+  return pos == 0 ? GetNumberLength(distance) + 1
+      : GetNumberLength(distance) + GetNumberLength(pos) + 2;
+}
+
+
+void LogRecordCompressor::PrintBackwardReference(Vector<char> dest,
+                                                 int distance,
+                                                 int pos) {
+  if (pos == 0) {
+    OS::SNPrintF(dest, kLineBackwardReferenceFormat, distance);
+  } else {
+    OS::SNPrintF(dest, kBackwardReferenceFormat, distance, pos);
+  }
+}
+
+
+bool LogRecordCompressor::Store(const Vector<const char>& record) {
+  // Check if the record is the same as the last stored one.
+  if (curr_ != -1) {
+    Vector<const char>& curr = buffer_[curr_];
+    if (record.length() == curr.length()
+        && strncmp(record.start(), curr.start(), record.length()) == 0) {
+      return false;
+    }
+  }
+  // buffer_ is circular.
+  prev_ = curr_++;
+  curr_ %= buffer_.length();
+  Vector<char> record_copy = Vector<char>::New(record.length());
+  memcpy(record_copy.start(), record.start(), record.length());
+  buffer_[curr_].Dispose();
+  buffer_[curr_] =
+      Vector<const char>(record_copy.start(), record_copy.length());
+  return true;
+}
+
+
+bool LogRecordCompressor::RetrievePreviousCompressed(
+    Vector<char>* prev_record) {
+  if (prev_ == -1) return false;
+
+  int index = prev_;
+  // Distance from prev_.
+  int distance = 0;
+  // Best compression result among records in the buffer.
+  struct {
+    intptr_t truncated_len;
+    int distance;
+    int copy_from_pos;
+    int backref_size;
+  } best = {-1, 0, 0, 0};
+  Vector<const char>& prev = buffer_[prev_];
+  const char* const prev_start = prev.start();
+  const char* const prev_end = prev.start() + prev.length();
+  do {
+    // We're moving backwards until we reach the current record.
+    // Remember that buffer_ is circular.
+    if (--index == -1) index = buffer_.length() - 1;
+    ++distance;
+    if (index == curr_) break;
+
+    Vector<const char>& data = buffer_[index];
+    if (data.start() == NULL) break;
+    const char* const data_end = data.start() + data.length();
+    const char* prev_ptr = prev_end;
+    const char* data_ptr = data_end;
+    // Compare strings backwards, stop on the last matching character.
+    while (prev_ptr != prev_start && data_ptr != data.start()
+          && *(prev_ptr - 1) == *(data_ptr - 1)) {
+      --prev_ptr;
+      --data_ptr;
+    }
+    const intptr_t truncated_len = prev_end - prev_ptr;
+    const int copy_from_pos = data_ptr - data.start();
+    // Check if the length of compressed tail is enough.
+    if (truncated_len <= kMaxBackwardReferenceSize
+        && truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) {
+      continue;
+    }
+
+    // Record compression results.
+    if (truncated_len > best.truncated_len) {
+      best.truncated_len = truncated_len;
+      best.distance = distance;
+      best.copy_from_pos = copy_from_pos;
+      best.backref_size = GetBackwardReferenceSize(distance, copy_from_pos);
+    }
+  } while (true);
+
+  if (best.distance == 0) {
+    // Can't compress the previous record. Return as is.
+    ASSERT(prev_record->length() >= prev.length());
+    memcpy(prev_record->start(), prev.start(), prev.length());
+    prev_record->Truncate(prev.length());
+  } else {
+    // Copy the uncompressible part unchanged.
+    const intptr_t unchanged_len = prev.length() - best.truncated_len;
+    // + 1 for '\0'.
+    ASSERT(prev_record->length() >= unchanged_len + best.backref_size + 1);
+    memcpy(prev_record->start(), prev.start(), unchanged_len);
+    // Append the backward reference.
+    Vector<char> backref(
+        prev_record->start() + unchanged_len, best.backref_size + 1);
+    PrintBackwardReference(backref, best.distance, best.copy_from_pos);
+    ASSERT(strlen(backref.start()) - best.backref_size == 0);
+    prev_record->Truncate(unchanged_len + best.backref_size);
+  }
+  return true;
+}
+
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/log-utils.h b/src/log-utils.h
index 2e8b3a3..ad669d5 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -170,6 +170,50 @@
   static char* message_buffer_;
 
   friend class LogMessageBuilder;
+  friend class LogRecordCompressor;
+};
+
+
+// An utility class for performing backward reference compression
+// of string ends. It operates using a window of previous strings.
+class LogRecordCompressor {
+ public:
+  // 'window_size' is the size of backward lookup window.
+  explicit LogRecordCompressor(int window_size)
+      : buffer_(window_size + kNoCompressionWindowSize),
+        kMaxBackwardReferenceSize(
+            GetBackwardReferenceSize(window_size, Log::kMessageBufferSize)),
+        curr_(-1), prev_(-1) {
+  }
+
+  ~LogRecordCompressor();
+
+  // Fills vector with a compressed version of the previous record.
+  // Returns false if there is no previous record.
+  bool RetrievePreviousCompressed(Vector<char>* prev_record);
+
+  // Stores a record if it differs from a previous one (or there's no previous).
+  // Returns true, if the record has been stored.
+  bool Store(const Vector<const char>& record);
+
+ private:
+  // The minimum size of a buffer: a place needed for the current and
+  // the previous record. Since there is no place for precedessors of a previous
+  // record, it can't be compressed at all.
+  static const int kNoCompressionWindowSize = 2;
+
+  // Formatting strings for back references.
+  static const char* kLineBackwardReferenceFormat;
+  static const char* kBackwardReferenceFormat;
+
+  static int GetBackwardReferenceSize(int distance, int pos);
+
+  static void PrintBackwardReference(Vector<char> dest, int distance, int pos);
+
+  ScopedVector< Vector<const char> > buffer_;
+  const int kMaxBackwardReferenceSize;
+  int curr_;
+  int prev_;
 };
 
 
@@ -186,7 +230,7 @@
   void Append(const char* format, ...);
 
   // Append string data to the log message.
-  void Append(const char* format, va_list args);
+  void AppendVA(const char* format, va_list args);
 
   // Append a character to the log message.
   void Append(const char c);
@@ -194,8 +238,29 @@
   // Append a heap string.
   void Append(String* str);
 
+  // Appends an address, compressing it if needed by offsetting
+  // from Logger::last_address_.
+  void AppendAddress(Address addr);
+
+  // Appends an address, compressing it if needed.
+  void AppendAddress(Address addr, Address bias);
+
   void AppendDetailed(String* str, bool show_impl_info);
 
+  // Stores log message into compressor, returns true if the message
+  // was stored (i.e. doesn't repeat the previous one).
+  bool StoreInCompressor(LogRecordCompressor* compressor);
+
+  // Sets log message to a previous version of compressed message.
+  // Returns false, if there is no previous message.
+  bool RetrieveCompressedPrevious(LogRecordCompressor* compressor) {
+    return RetrieveCompressedPrevious(compressor, "");
+  }
+
+  // Does the same at the version without arguments, and sets a prefix.
+  bool RetrieveCompressedPrevious(LogRecordCompressor* compressor,
+                                  const char* prefix);
+
   // Write the log message to the log file currently opened.
   void WriteToLogFile();
 
diff --git a/src/log.cc b/src/log.cc
index c1edf4d..af49128 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -262,6 +262,7 @@
   Logger::ticker_->SetProfiler(this);
 
   Logger::ProfilerBeginEvent();
+  Logger::LogAliases();
 }
 
 
@@ -301,6 +302,20 @@
 VMState* Logger::current_state_ = NULL;
 VMState Logger::bottom_state_(EXTERNAL);
 SlidingStateWindow* Logger::sliding_state_window_ = NULL;
+const char** Logger::log_events_ = NULL;
+CompressionHelper* Logger::compression_helper_ = NULL;
+
+#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
+const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+  LOG_EVENTS_AND_TAGS_LIST(DECLARE_LONG_EVENT)
+};
+#undef DECLARE_LONG_EVENT
+
+#define DECLARE_SHORT_EVENT(ignore1, ignore2, short_name) short_name,
+const char* kCompressedLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
+  LOG_EVENTS_AND_TAGS_LIST(DECLARE_SHORT_EVENT)
+};
+#undef DECLARE_SHORT_EVENT
 
 
 bool Logger::IsEnabled() {
@@ -312,6 +327,20 @@
   if (!Log::IsEnabled()) return;
   LogMessageBuilder msg;
   msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
+  if (FLAG_compress_log) {
+    msg.Append("profiler,\"compression\",%d\n", kCompressionWindowSize);
+  }
+  msg.WriteToLogFile();
+}
+
+
+void Logger::LogAliases() {
+  if (!Log::IsEnabled() || !FLAG_compress_log) return;
+  LogMessageBuilder msg;
+  for (int i = 0; i < NUMBER_OF_LOG_EVENTS; ++i) {
+    msg.Append("alias,%s,%s\n",
+               kCompressedLogEventsNames[i], kLongLogEventsNames[i]);
+  }
   msg.WriteToLogFile();
 }
 
@@ -373,7 +402,7 @@
   LogMessageBuilder msg;
   va_list ap;
   va_start(ap, format);
-  msg.Append(format, ap);
+  msg.AppendVA(format, ap);
   va_end(ap);
   msg.WriteToLogFile();
 }
@@ -594,12 +623,15 @@
 }
 
 
-void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+                             Code* code,
+                             const char* comment) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"", tag, code->address(),
-             code->ExecutableSize());
+  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"", code->ExecutableSize());
   for (const char* p = comment; *p != '\0'; p++) {
     if (*p == '"') {
       msg.Append('\\');
@@ -613,20 +645,22 @@
 }
 
 
-void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) {
+void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   SmartPointer<char> str =
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"%s\"\n",
-             tag, code->address(), code->ExecutableSize(), *str);
+  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"%s\"\n", code->ExecutableSize(), *str);
   msg.WriteToLogFile();
 #endif
 }
 
 
-void Logger::CodeCreateEvent(const char* tag, Code* code, String* name,
+void Logger::CodeCreateEvent(LogEventsAndTags tag,
+                             Code* code, String* name,
                              String* source, int line) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_code) return;
@@ -635,23 +669,22 @@
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   SmartPointer<char> sourcestr =
       source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"%s %s:%d\"\n",
-             tag, code->address(),
-             code->ExecutableSize(),
-             *str, *sourcestr, line);
+  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"%s %s:%d\"\n",
+             code->ExecutableSize(), *str, *sourcestr, line);
   msg.WriteToLogFile();
 #endif
 }
 
 
-void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) {
+void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"args_count: %d\"\n", tag,
-             code->address(),
-             code->ExecutableSize(),
-             args_count);
+  msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"args_count: %d\"\n", code->ExecutableSize(), args_count);
   msg.WriteToLogFile();
 #endif
 }
@@ -661,9 +694,10 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-creation,%s,0x%" V8PRIxPTR ",%d,\"", "RegExp",
-             code->address(),
-             code->ExecutableSize());
+  msg.Append("%s,%s,",
+             log_events_[CODE_CREATION_EVENT], log_events_[REG_EXP_TAG]);
+  msg.AppendAddress(code->address());
+  msg.Append(",%d,\"", code->ExecutableSize());
   msg.AppendDetailed(source, false);
   msg.Append("\"\n");
   msg.WriteToLogFile();
@@ -671,23 +705,57 @@
 }
 
 
-void Logger::CodeAllocateEvent(Code* code, Assembler* assem) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::IsEnabled() || !FLAG_log_code) return;
-  LogMessageBuilder msg;
-  msg.Append("code-allocate,0x%" V8PRIxPTR ",0x%" V8PRIxPTR "\n",
-             code->address(),
-             assem);
-  msg.WriteToLogFile();
-#endif
-}
+
+// A class that contains all common code dealing with record compression.
+class CompressionHelper {
+ public:
+  explicit CompressionHelper(int window_size)
+      : compressor_(window_size), repeat_count_(0) { }
+
+  // Handles storing message in compressor, retrieving the previous one and
+  // prefixing it with repeat count, if needed.
+  // Returns true if message needs to be written to log.
+  bool HandleMessage(LogMessageBuilder* msg) {
+    if (!msg->StoreInCompressor(&compressor_)) {
+      // Current message repeats the previous one, don't write it.
+      ++repeat_count_;
+      return false;
+    }
+    if (repeat_count_ == 0) {
+      return msg->RetrieveCompressedPrevious(&compressor_);
+    }
+    OS::SNPrintF(prefix_, "%s,%d,",
+                 Logger::log_events_[Logger::REPEAT_META_EVENT],
+                 repeat_count_ + 1);
+    repeat_count_ = 0;
+    return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
+  }
+
+ private:
+  LogRecordCompressor compressor_;
+  int repeat_count_;
+  EmbeddedVector<char, 20> prefix_;
+};
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
 
 
 void Logger::CodeMoveEvent(Address from, Address to) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
+  static Address prev_to_ = NULL;
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-move,0x%" V8PRIxPTR ",0x%" V8PRIxPTR "\n", from, to);
+  msg.Append("%s,", log_events_[CODE_MOVE_EVENT]);
+  msg.AppendAddress(from);
+  msg.Append(',');
+  msg.AppendAddress(to, prev_to_);
+  prev_to_ = to;
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
   msg.WriteToLogFile();
 #endif
 }
@@ -697,7 +765,13 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-delete,0x%" V8PRIxPTR "\n", from);
+  msg.Append("%s,", log_events_[CODE_DELETE_EVENT]);
+  msg.AppendAddress(from);
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
   msg.WriteToLogFile();
 #endif
 }
@@ -802,14 +876,26 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::TickEvent(TickSample* sample, bool overflow) {
   if (!Log::IsEnabled() || !FLAG_prof) return;
+  static Address prev_sp = NULL;
   LogMessageBuilder msg;
-  msg.Append("tick,0x%" V8PRIxPTR ",0x%" V8PRIxPTR ",%d",
-             sample->pc, sample->sp, static_cast<int>(sample->state));
+  msg.Append("%s,", log_events_[TICK_EVENT]);
+  Address prev_addr = reinterpret_cast<Address>(sample->pc);
+  msg.AppendAddress(prev_addr);
+  msg.Append(',');
+  msg.AppendAddress(reinterpret_cast<Address>(sample->sp), prev_sp);
+  prev_sp = reinterpret_cast<Address>(sample->sp);
+  msg.Append(",%d", static_cast<int>(sample->state));
   if (overflow) {
     msg.Append(",overflow");
   }
   for (int i = 0; i < sample->frames_count; ++i) {
-    msg.Append(",0x%" V8PRIxPTR, sample->stack[i]);
+    msg.Append(',');
+    msg.AppendAddress(sample->stack[i], prev_addr);
+    prev_addr = sample->stack[i];
+  }
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
   }
   msg.Append('\n');
   msg.WriteToLogFile();
@@ -913,17 +999,19 @@
         int line_num = GetScriptLineNumber(script, shared->start_position());
         if (line_num > 0) {
           line_num += script->line_offset()->value() + 1;
-          LOG(CodeCreateEvent("LazyCompile", shared->code(), *func_name,
+          LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+                              shared->code(), *func_name,
                               *script_name, line_num));
         } else {
           // Can't distinguish enum and script here, so always use Script.
-          LOG(CodeCreateEvent("Script", shared->code(), *script_name));
+          LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
+                              shared->code(), *script_name));
         }
         continue;
       }
     }
     // If no script or script has no name.
-    LOG(CodeCreateEvent("LazyCompile", shared->code(), *func_name));
+    LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, shared->code(), *func_name));
   }
 
   DeleteArray(sfis);
@@ -1013,6 +1101,12 @@
     sliding_state_window_ = new SlidingStateWindow();
   }
 
+  log_events_ = FLAG_compress_log ?
+      kCompressedLogEventsNames : kLongLogEventsNames;
+  if (FLAG_compress_log) {
+    compression_helper_ = new CompressionHelper(kCompressionWindowSize);
+  }
+
   if (FLAG_prof) {
     profiler_ = new Profiler();
     if (!FLAG_prof_auto)
@@ -1041,6 +1135,9 @@
     profiler_ = NULL;
   }
 
+  delete compression_helper_;
+  compression_helper_ = NULL;
+
   delete sliding_state_window_;
   sliding_state_window_ = NULL;
 
diff --git a/src/log.h b/src/log.h
index 2f8f81c..08e957a 100644
--- a/src/log.h
+++ b/src/log.h
@@ -71,6 +71,7 @@
 class Semaphore;
 class SlidingStateWindow;
 class LogMessageBuilder;
+class CompressionHelper;
 
 #undef LOG
 #ifdef ENABLE_LOGGING_AND_PROFILING
@@ -102,8 +103,41 @@
 };
 
 
+#define LOG_EVENTS_AND_TAGS_LIST(V) \
+  V(CODE_CREATION_EVENT,            "code-creation",          "cc")       \
+  V(CODE_MOVE_EVENT,                "code-move",              "cm")       \
+  V(CODE_DELETE_EVENT,              "code-delete",            "cd")       \
+  V(TICK_EVENT,                     "tick",                   "t")        \
+  V(REPEAT_META_EVENT,              "repeat",                 "r")        \
+  V(BUILTIN_TAG,                    "Builtin",                "bi")       \
+  V(CALL_DEBUG_BREAK_TAG,           "CallDebugBreak",         "cdb")      \
+  V(CALL_DEBUG_PREPARE_STEP_IN_TAG, "CallDebugPrepareStepIn", "cdbsi")    \
+  V(CALL_IC_TAG,                    "CallIC",                 "cic")      \
+  V(CALL_INITIALIZE_TAG,            "CallInitialize",         "ci")       \
+  V(CALL_MEGAMORPHIC_TAG,           "CallMegamorphic",        "cmm")      \
+  V(CALL_MISS_TAG,                  "CallMiss",               "cm")       \
+  V(CALL_NORMAL_TAG,                "CallNormal",             "cn")       \
+  V(CALL_PRE_MONOMORPHIC_TAG,       "CallPreMonomorphic",     "cpm")      \
+  V(EVAL_TAG,                       "Eval",                   "e")        \
+  V(FUNCTION_TAG,                   "Function",               "f")        \
+  V(KEYED_LOAD_IC_TAG,              "KeyedLoadIC",            "klic")     \
+  V(KEYED_STORE_IC_TAG,             "KeyedStoreIC",           "ksic")     \
+  V(LAZY_COMPILE_TAG,               "LazyCompile",            "lc")       \
+  V(LOAD_IC_TAG,                    "LoadIC",                 "lic")      \
+  V(REG_EXP_TAG,                    "RegExp",                 "re")       \
+  V(SCRIPT_TAG,                     "Script",                 "sc")       \
+  V(STORE_IC_TAG,                   "StoreIC",                "sic")      \
+  V(STUB_TAG,                       "Stub",                   "s")
+
 class Logger {
  public:
+#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+  enum LogEventsAndTags {
+    LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
+    NUMBER_OF_LOG_EVENTS
+  };
+#undef DECLARE_ENUM
+
   // Acquires resources for logging if the right flags are set.
   static bool Setup();
 
@@ -163,14 +197,14 @@
 
   // ==== Events logged by --log-code. ====
   // Emits a code create event.
-  static void CodeCreateEvent(const char* tag, Code* code, const char* source);
-  static void CodeCreateEvent(const char* tag, Code* code, String* name);
-  static void CodeCreateEvent(const char* tag, Code* code, String* name,
+  static void CodeCreateEvent(LogEventsAndTags tag,
+                              Code* code, const char* source);
+  static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name);
+  static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name,
                               String* source, int line);
-  static void CodeCreateEvent(const char* tag, Code* code, int args_count);
+  static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
   // Emits a code create event for a RegExp.
   static void RegExpCodeCreateEvent(Code* code, String* source);
-  static void CodeAllocateEvent(Code* code, Assembler* assem);
   // Emits a code move event.
   static void CodeMoveEvent(Address from, Address to);
   // Emits a code delete event.
@@ -223,9 +257,15 @@
   // Profiler's sampling interval (in milliseconds).
   static const int kSamplingIntervalMs = 1;
 
+  // Size of window used for log records compression.
+  static const int kCompressionWindowSize = 4;
+
   // Emits the profiler's first message.
   static void ProfilerBeginEvent();
 
+  // Emits aliases for compressed messages.
+  static void LogAliases();
+
   // Emits the source code of a regexp. Used by regexp events.
   static void LogRegExpSource(Handle<JSRegExp> regexp);
 
@@ -261,8 +301,15 @@
   // recent VM states.
   static SlidingStateWindow* sliding_state_window_;
 
+  // An array of log events names.
+  static const char** log_events_;
+
+  // An instance of helper created if log compression is enabled.
+  static CompressionHelper* compression_helper_;
+
   // Internal implementation classes with access to
   // private members.
+  friend class CompressionHelper;
   friend class EventLog;
   friend class TimeLog;
   friend class Profiler;
diff --git a/src/macros.py b/src/macros.py
index ebfd816..fdbdb58 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -60,6 +60,7 @@
 const msPerMinute      = 60000;
 const msPerHour        = 3600000;
 const msPerDay         = 86400000;
+const msPerMonth       = 2592000000;
 
 # For apinatives.js
 const kUninitialized = -1;
diff --git a/src/math.js b/src/math.js
index 86d6dd1..1f5ce87 100644
--- a/src/math.js
+++ b/src/math.js
@@ -44,39 +44,73 @@
 
 // ECMA 262 - 15.8.2.1
 function MathAbs(x) {
-  if (%_IsSmi(x)) {
-    return x >= 0 ? x : -x;
-  } else {
-    return %Math_abs(ToNumber(x));
-  }
+  if (%_IsSmi(x)) return x >= 0 ? x : -x;
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_abs(x);
 }
 
 // ECMA 262 - 15.8.2.2
-function MathAcos(x) { return %Math_acos(ToNumber(x)); }
+function MathAcos(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_acos(x);
+}
 
 // ECMA 262 - 15.8.2.3
-function MathAsin(x) { return %Math_asin(ToNumber(x)); }
+function MathAsin(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_asin(x);
+}
 
 // ECMA 262 - 15.8.2.4
-function MathAtan(x) { return %Math_atan(ToNumber(x)); }
+function MathAtan(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_atan(x);
+}
 
 // ECMA 262 - 15.8.2.5
-function MathAtan2(x, y) { return %Math_atan2(ToNumber(x), ToNumber(y)); }
+function MathAtan2(x, y) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  if (!IS_NUMBER(y)) y = ToNumber(y);
+  return %Math_atan2(x, y);
+}
 
 // ECMA 262 - 15.8.2.6
-function MathCeil(x) { return %Math_ceil(ToNumber(x)); }
+function MathCeil(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_ceil(x);
+}
 
 // ECMA 262 - 15.8.2.7
-function MathCos(x) { return %Math_cos(ToNumber(x)); }
+function MathCos(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %_Math_cos(x);
+}
 
 // ECMA 262 - 15.8.2.8
-function MathExp(x) { return %Math_exp(ToNumber(x)); }
+function MathExp(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_exp(x);
+}
 
 // ECMA 262 - 15.8.2.9
-function MathFloor(x) { return %Math_floor(ToNumber(x)); }
+function MathFloor(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  if (0 < x && x <= 0x7FFFFFFF) {
+    // Numbers in the range [0, 2^31) can be floored by converting
+    // them to an unsigned 32-bit value using the shift operator.
+    // We avoid doing so for -0, because the result of Math.floor(-0)
+    // has to be -0, which wouldn't be the case with the shift.
+    return x << 0;
+  } else {
+    return %Math_floor(x);
+  }
+}
 
 // ECMA 262 - 15.8.2.10
-function MathLog(x) { return %Math_log(ToNumber(x)); }
+function MathLog(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_log(x);
+}
 
 // ECMA 262 - 15.8.2.11
 function MathMax(arg1, arg2) {  // length == 2
@@ -103,22 +137,40 @@
 }
 
 // ECMA 262 - 15.8.2.13
-function MathPow(x, y) { return %Math_pow(ToNumber(x), ToNumber(y)); }
+function MathPow(x, y) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  if (!IS_NUMBER(y)) y = ToNumber(y);
+  return %Math_pow(x, y);
+}
 
 // ECMA 262 - 15.8.2.14
-function MathRandom() { return %Math_random(); }
+function MathRandom() {
+  return %_RandomPositiveSmi() / 0x40000000;
+}
 
 // ECMA 262 - 15.8.2.15
-function MathRound(x) { return %Math_round(ToNumber(x)); }
+function MathRound(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_round(x);
+}
 
 // ECMA 262 - 15.8.2.16
-function MathSin(x) { return %Math_sin(ToNumber(x)); }
+function MathSin(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %_Math_sin(x);
+}
 
 // ECMA 262 - 15.8.2.17
-function MathSqrt(x) { return %Math_sqrt(ToNumber(x)); }
+function MathSqrt(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_sqrt(x);
+}
 
 // ECMA 262 - 15.8.2.18
-function MathTan(x) { return %Math_tan(ToNumber(x)); }
+function MathTan(x) {
+  if (!IS_NUMBER(x)) x = ToNumber(x);
+  return %Math_tan(x);
+}
 
 
 // -------------------------------------------------------------------
diff --git a/src/messages.js b/src/messages.js
index df8a2d1..7805d47 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -230,6 +230,40 @@
   return MakeGenericError($Error, type, args);
 }
 
+/**
+ * Find a line number given a specific source position.
+ * @param {number} position The source position.
+ * @return {number} 0 if input too small, -1 if input too large,
+       else the line number.
+ */
+Script.prototype.lineFromPosition = function(position) {
+  var lower = 0;
+  var upper = this.lineCount() - 1;
+
+  // We'll never find invalid positions so bail right away.
+  if (position > this.line_ends[upper]) {
+    return -1;
+  }
+
+  // This means we don't have to safe-guard indexing line_ends[i - 1].
+  if (position <= this.line_ends[0]) {
+    return 0;
+  }
+
+  // Binary search to find line # from position range.
+  while (upper >= 1) {
+    var i = (lower + upper) >> 1;
+
+    if (position > this.line_ends[i]) {
+      lower = i + 1;
+    } else if (position <= this.line_ends[i - 1]) {
+      upper = i - 1;
+    } else {
+      return i;
+    }
+  }
+  return -1;
+}
 
 /**
  * Get information on a specific source position.
@@ -241,19 +275,7 @@
  */
 Script.prototype.locationFromPosition = function (position,
                                                   include_resource_offset) {
-  var lineCount = this.lineCount();
-  var line = -1;
-  if (position <= this.line_ends[0]) {
-    line = 0;
-  } else {
-    for (var i = 1; i < lineCount; i++) {
-      if (this.line_ends[i - 1] < position && position <= this.line_ends[i]) {
-        line = i;
-        break;
-      }
-    }
-  }
-
+  var line = this.lineFromPosition(position);
   if (line == -1) return null;
 
   // Determine start, end and column.
@@ -308,16 +330,13 @@
   if (line == 0) {
     return this.locationFromPosition(offset_position + column, false);
   } else {
-    // Find the line where the offset position is located
-    var lineCount = this.lineCount();
-    var offset_line;
-    for (var i = 0; i < lineCount; i++) {
-      if (offset_position <= this.line_ends[i]) {
-        offset_line = i;
-        break;
-      }
+    // Find the line where the offset position is located.
+    var offset_line = this.lineFromPosition(offset_position);
+
+    if (offset_line == -1 || offset_line + line >= this.lineCount()) {
+      return null;
     }
-    if (offset_line + line >= lineCount) return null;
+
     return this.locationFromPosition(this.line_ends[offset_line + line - 1] + 1 + column);  // line > 0 here.
   }
 }
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index f5a12c7..060586d 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -34,9 +34,14 @@
 Date;
 
 
+// Handle id counters.
 var next_handle_ = 0;
+var next_transient_handle_ = -1;
+
+// Mirror cache.
 var mirror_cache_ = [];
 
+
 /**
  * Clear the mirror handle cache.
  */
@@ -50,19 +55,25 @@
  * Returns the mirror for a specified value or object.
  *
  * @param {value or Object} value the value or object to retreive the mirror for
+ * @param {boolean} transient indicate whether this object is transient and
+ *    should not be added to the mirror cache. The default is not transient.
  * @returns {Mirror} the mirror reflects the passed value or object
  */
-function MakeMirror(value) {
+function MakeMirror(value, opt_transient) {
   var mirror;
-  for (id in mirror_cache_) {
-    mirror = mirror_cache_[id];
-    if (mirror.value() === value) {
-      return mirror;
-    }
-    // Special check for NaN as NaN == NaN is false.
-    if (mirror.isNumber() && isNaN(mirror.value()) &&
-        typeof value == 'number' && isNaN(value)) {
-      return mirror;
+
+  // Look for non transient mirrors in the mirror cache.
+  if (!opt_transient) {
+    for (id in mirror_cache_) {
+      mirror = mirror_cache_[id];
+      if (mirror.value() === value) {
+        return mirror;
+      }
+      // Special check for NaN as NaN == NaN is false.
+      if (mirror.isNumber() && isNaN(mirror.value()) &&
+          typeof value == 'number' && isNaN(value)) {
+        return mirror;
+      }
     }
   }
   
@@ -89,7 +100,7 @@
   } else if (IS_SCRIPT(value)) {
     mirror = new ScriptMirror(value);
   } else {
-    mirror = new ObjectMirror(value);
+    mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
   }
 
   mirror_cache_[mirror.handle()] = mirror;
@@ -155,6 +166,7 @@
 const FRAME_TYPE = 'frame';
 const SCRIPT_TYPE = 'script';
 const CONTEXT_TYPE = 'context';
+const SCOPE_TYPE = 'scope';
 
 // Maximum length when sending strings through the JSON protocol.
 const kMaxProtocolStringLength = 80;
@@ -185,6 +197,13 @@
 PropertyAttribute.DontDelete = DONT_DELETE;
 
 
+// A copy of the scope types from runtime.cc.
+ScopeType = { Global: 0,
+              Local: 1,
+              With: 2,
+              Closure: 3 };
+
+
 // Mirror hierarchy:
 //   - Mirror
 //     - ValueMirror
@@ -373,6 +392,15 @@
 
 
 /**
+ * Check whether the mirror reflects a scope.
+ * @returns {boolean} True if the mirror reflects a scope
+ */
+Mirror.prototype.isScope = function() {
+  return this instanceof ScopeMirror;
+}
+
+
+/**
  * Allocate a handle id for this object.
  */
 Mirror.prototype.allocateHandle_ = function() {
@@ -380,6 +408,15 @@
 }
 
 
+/**
+ * Allocate a transient handle id for this object. Transient handles are
+ * negative.
+ */
+Mirror.prototype.allocateTransientHandle_ = function() {
+  this.handle_ = next_transient_handle_--;
+}
+
+
 Mirror.prototype.toText = function() {
   // Simpel to text which is used when on specialization in subclass.
   return "#<" + builtins.GetInstanceName(this.constructor.name) + ">";
@@ -390,13 +427,19 @@
  * Base class for all value mirror objects.
  * @param {string} type The type of the mirror
  * @param {value} value The value reflected by this mirror
+ * @param {boolean} transient indicate whether this object is transient with a
+ *    transient handle
  * @constructor
  * @extends Mirror
  */
-function ValueMirror(type, value) {
+function ValueMirror(type, value, transient) {
   Mirror.call(this, type);
   this.value_ = value;
-  this.allocateHandle_();
+  if (!transient) {
+    this.allocateHandle_();
+  } else {
+    this.allocateTransientHandle_();
+  }
 }
 inherits(ValueMirror, Mirror);
 
@@ -525,11 +568,13 @@
 /**
  * Mirror object for objects.
  * @param {object} value The object reflected by this mirror
+ * @param {boolean} transient indicate whether this object is transient with a
+ *    transient handle
  * @constructor
  * @extends ValueMirror
  */
-function ObjectMirror(value, type) {
-  ValueMirror.call(this, type || OBJECT_TYPE, value);
+function ObjectMirror(value, type, transient) {
+  ValueMirror.call(this, type || OBJECT_TYPE, value, transient);
 }
 inherits(ObjectMirror, ValueMirror);
 
@@ -1080,7 +1125,7 @@
 
 
 PropertyMirror.prototype.value = function() {
-  return MakeMirror(this.value_);
+  return MakeMirror(this.value_, false);
 }
 
 
@@ -1135,7 +1180,7 @@
   if (this.hasGetter()) {
     return MakeMirror(this.getter_);
   } else {
-    return new UndefinedMirror();
+    return GetUndefinedMirror();
   }
 }
 
@@ -1149,7 +1194,7 @@
   if (this.hasSetter()) {
     return MakeMirror(this.setter_);
   } else {
-    return new UndefinedMirror();
+    return GetUndefinedMirror();
   }
 }
 
@@ -1294,6 +1339,11 @@
 }
 
 
+FrameDetails.prototype.scopeCount = function() {
+  return %GetScopeCount(this.break_id_, this.frameId());
+}
+
+
 /**
  * Mirror object for stack frames.
  * @param {number} break_id The break id in the VM for which this frame is
@@ -1419,6 +1469,16 @@
 };
 
 
+FrameMirror.prototype.scopeCount = function() {
+  return this.details_.scopeCount();
+};
+
+
+FrameMirror.prototype.scope = function(index) {
+  return new ScopeMirror(this, index);
+};
+
+
 FrameMirror.prototype.evaluate = function(source, disable_break) {
   var result = %DebugEvaluate(this.break_id_, this.details_.frameId(),
                               source, Boolean(disable_break));
@@ -1562,6 +1622,70 @@
 }
 
 
+const kScopeDetailsTypeIndex = 0;
+const kScopeDetailsObjectIndex = 1;
+
+function ScopeDetails(frame, index) {
+  this.break_id_ = frame.break_id_;
+  this.details_ = %GetScopeDetails(frame.break_id_,
+                                   frame.details_.frameId(),
+                                   index);
+}
+
+
+ScopeDetails.prototype.type = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kScopeDetailsTypeIndex];
+}
+
+
+ScopeDetails.prototype.object = function() {
+  %CheckExecutionState(this.break_id_);
+  return this.details_[kScopeDetailsObjectIndex];
+}
+
+
+/**
+ * Mirror object for scope.
+ * @param {FrameMirror} frame The frame this scope is a part of
+ * @param {number} index The scope index in the frame
+ * @constructor
+ * @extends Mirror
+ */
+function ScopeMirror(frame, index) {
+  Mirror.call(this, SCOPE_TYPE);
+  this.frame_index_ = frame.index_;
+  this.scope_index_ = index;
+  this.details_ = new ScopeDetails(frame, index);
+}
+inherits(ScopeMirror, Mirror);
+
+
+ScopeMirror.prototype.frameIndex = function() {
+  return this.frame_index_;
+};
+
+
+ScopeMirror.prototype.scopeIndex = function() {
+  return this.scope_index_;
+};
+
+
+ScopeMirror.prototype.scopeType = function() {
+  return this.details_.type();
+};
+
+
+ScopeMirror.prototype.scopeObject = function() {
+  // For local and closure scopes create a transient mirror as these objects are
+  // created on the fly materializing the local or closure scopes and
+  // therefore will not preserve identity.
+  var transient = this.scopeType() == ScopeType.Local ||
+                  this.scopeType() == ScopeType.Closure;
+  return MakeMirror(this.details_.object(), transient);
+};
+
+
 /**
  * Mirror object for script source.
  * @param {Script} script The script object
@@ -1829,6 +1953,7 @@
   return o;
 };
 
+
 JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
                                                        details) {
   // If serializing a reference to a mirror just return the reference and add
@@ -1900,6 +2025,11 @@
       this.serializeFrame_(mirror, content);
       break;
 
+    case SCOPE_TYPE:
+      // Add object representation.
+      this.serializeScope_(mirror, content);
+      break;
+
     case SCRIPT_TYPE:
       // Script is represented by id, name and source attributes.
       if (mirror.name()) {
@@ -2102,6 +2232,14 @@
 }
 
 
+JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
+  content.index = mirror.scopeIndex();
+  content.frameIndex = mirror.frameIndex();
+  content.type = mirror.scopeType();
+  content.object = this.serializeReference(mirror.scopeObject());
+}
+
+
 /**
  * Convert a number to a protocol value. For all finite numbers the number
  * itself is returned. For non finite numbers NaN, Infinite and
diff --git a/src/objects.cc b/src/objects.cc
index 0546578..cbd36e0 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1695,7 +1695,7 @@
 
   // Check access rights if needed.
   if (IsAccessCheckNeeded()
-    && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+      && !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
     return SetPropertyWithFailedAccessCheck(result, name, value);
   }
 
@@ -5203,27 +5203,6 @@
 }
 
 
-Object* JSObject::SetElementPostInterceptor(uint32_t index, Object* value) {
-  if (HasFastElements()) return SetFastElement(index, value);
-
-  // Dictionary case.
-  ASSERT(!HasFastElements());
-
-  FixedArray* elms = FixedArray::cast(elements());
-  Object* result = Dictionary::cast(elms)->AtNumberPut(index, value);
-  if (result->IsFailure()) return result;
-  if (elms != FixedArray::cast(result)) {
-    set_elements(FixedArray::cast(result));
-  }
-
-  if (IsJSArray()) {
-    return JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value);
-  }
-
-  return value;
-}
-
-
 Object* JSObject::SetElementWithInterceptor(uint32_t index, Object* value) {
   // Make sure that the top context does not change when doing
   // callbacks or interceptor calls.
@@ -5250,7 +5229,7 @@
     if (!result.IsEmpty()) return *value_handle;
   }
   Object* raw_result =
-      this_handle->SetElementPostInterceptor(index, *value_handle);
+      this_handle->SetElementWithoutInterceptor(index, *value_handle);
   RETURN_IF_SCHEDULED_EXCEPTION();
   return raw_result;
 }
@@ -5332,6 +5311,11 @@
     return SetElementWithInterceptor(index, value);
   }
 
+  return SetElementWithoutInterceptor(index, value);
+}
+
+
+Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
   // Fast case.
   if (HasFastElements()) return SetFastElement(index, value);
 
@@ -5438,7 +5422,21 @@
     Dictionary* dictionary = element_dictionary();
     int entry = dictionary->FindNumberEntry(index);
     if (entry != -1) {
-      return dictionary->ValueAt(entry);
+      Object* element = dictionary->ValueAt(entry);
+      PropertyDetails details = dictionary->DetailsAt(entry);
+      if (details.type() == CALLBACKS) {
+        // Only accessors allowed as elements.
+        FixedArray* structure = FixedArray::cast(element);
+        Object* getter = structure->get(kGetterIndex);
+        if (getter->IsJSFunction()) {
+          return GetPropertyWithDefinedGetter(receiver,
+                                              JSFunction::cast(getter));
+        } else {
+          // Getter is not a function.
+          return Heap::undefined_value();
+        }
+      }
+      return element;
     }
   }
 
@@ -6436,10 +6434,6 @@
 
   AssertNoAllocation no_alloc;
 
-  // Loose all details on properties when moving them around.
-  // Elements do not have special details like properties.
-  PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
-
   uint32_t pos = 0;
   uint32_t undefs = 0;
   for (int i = 0; i < capacity; i++) {
@@ -6450,21 +6444,27 @@
       ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
       ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
       Object* value = dict->ValueAt(i);
+      PropertyDetails details = dict->DetailsAt(i);
+      if (details.type() == CALLBACKS) {
+        // Bail out and do the sorting of undefineds and array holes in JS.
+        return Smi::FromInt(-1);
+      }
       uint32_t key = NumberToUint32(k);
       if (key < limit) {
         if (value->IsUndefined()) {
           undefs++;
         } else {
-          new_dict->AddNumberEntry(pos, value, no_details);
+          new_dict->AddNumberEntry(pos, value, details);
           pos++;
         }
       } else {
-        new_dict->AddNumberEntry(key, value, no_details);
+        new_dict->AddNumberEntry(key, value, details);
       }
     }
   }
 
   uint32_t result = pos;
+  PropertyDetails no_details = PropertyDetails(NONE, NORMAL);
   while (undefs > 0) {
     new_dict->AddNumberEntry(pos, Heap::undefined_value(), no_details);
     pos++;
diff --git a/src/objects.h b/src/objects.h
index 493d22b..21907f8 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1162,8 +1162,28 @@
 
   // Layout description.
   static const int kValueOffset = HeapObject::kHeaderSize;
+  // IEEE doubles are two 32 bit words.  The first is just mantissa, the second
+  // is a mixture of sign, exponent and mantissa.  Our current platforms are all
+  // little endian apart from non-EABI arm which is little endian with big
+  // endian floating point word ordering!
+#if !defined(V8_HOST_ARCH_ARM) || __ARM_EABI__
+  static const int kMantissaOffset = kValueOffset;
+  static const int kExponentOffset = kValueOffset + 4;
+#else
+  static const int kMantissaOffset = kValueOffset + 4;
+  static const int kExponentOffset = kValueOffset;
+# define BIG_ENDIAN_FLOATING_POINT 1
+#endif
   static const int kSize = kValueOffset + kDoubleSize;
 
+  static const uint32_t kSignMask = 0x80000000u;
+  static const uint32_t kExponentMask = 0x7ff00000u;
+  static const uint32_t kMantissaMask = 0xfffffu;
+  static const int kExponentBias = 1023;
+  static const int kExponentShift = 20;
+  static const int kMantissaBitsInTopWord = 20;
+  static const int kNonMantissaBitsInTopWord = 12;
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
 };
@@ -1518,7 +1538,7 @@
 
  private:
   Object* SetElementWithInterceptor(uint32_t index, Object* value);
-  Object* SetElementPostInterceptor(uint32_t index, Object* value);
+  Object* SetElementWithoutInterceptor(uint32_t index, Object* value);
 
   Object* GetElementPostInterceptor(JSObject* receiver, uint32_t index);
 
@@ -2470,7 +2490,7 @@
     return ((1 << kIsHiddenPrototype) & bit_field()) != 0;
   }
 
-  // Tells whether the instance has a named interceptor.
+  // Records and queries whether the instance has a named interceptor.
   inline void set_has_named_interceptor() {
     set_bit_field(bit_field() | (1 << kHasNamedInterceptor));
   }
@@ -2479,7 +2499,7 @@
     return ((1 << kHasNamedInterceptor) & bit_field()) != 0;
   }
 
-  // Tells whether the instance has a named interceptor.
+  // Records and queries whether the instance has an indexed interceptor.
   inline void set_has_indexed_interceptor() {
     set_bit_field(bit_field() | (1 << kHasIndexedInterceptor));
   }
@@ -4008,10 +4028,9 @@
 // If an accessor was found and it does not have a setter,
 // the request is ignored.
 //
-// To allow shadow an accessor property, the accessor can
-// have READ_ONLY property attribute so that a new value
-// is added to the local object to shadow the accessor
-// in prototypes.
+// If the accessor in the prototype has the READ_ONLY property attribute, then
+// a new value is added to the local object when the property is set.
+// This shadows the accessor in the prototype.
 class AccessorInfo: public Struct {
  public:
   DECL_ACCESSORS(getter, Object)
diff --git a/src/parser.cc b/src/parser.cc
index 271c3fd..a9a5e32 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -2647,6 +2647,21 @@
         }
       }
 
+      // Convert constant divisions to multiplications for speed.
+      if (op == Token::DIV &&
+          y && y->AsLiteral() && y->AsLiteral()->handle()->IsNumber()) {
+        double y_val = y->AsLiteral()->handle()->Number();
+        int64_t y_int = static_cast<int64_t>(y_val);
+        // There are rounding issues with this optimization, but they don't
+        // apply if the number to be divided with has a reciprocal that can
+        // be precisely represented as a floating point number.  This is
+        // the case if the number is an integer power of 2.
+        if (static_cast<double>(y_int) == y_val && IsPowerOf2(y_int)) {
+          y = NewNumberLiteral(1 / y_val);
+          op = Token::MUL;
+        }
+      }
+
       // For now we distinguish between comparisons and other binary
       // operations.  (We could combine the two and get rid of this
       // code an AST node eventually.)
diff --git a/src/regexp-macro-assembler-irregexp.cc b/src/regexp-macro-assembler-irregexp.cc
index b87c51f..eea3c23 100644
--- a/src/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp-macro-assembler-irregexp.cc
@@ -47,6 +47,7 @@
 
 RegExpMacroAssemblerIrregexp::~RegExpMacroAssemblerIrregexp() {
   if (backtrack_.is_linked()) backtrack_.Unuse();
+  if (own_buffer_) buffer_.Dispose();
 }
 
 
diff --git a/src/runtime.cc b/src/runtime.cc
index 78be512..d1c9162 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -2416,6 +2416,19 @@
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
 
+  // Fast case where the result is a one character string.
+  if (args[0]->IsSmi() && args[1]->IsSmi()) {
+    int value = Smi::cast(args[0])->value();
+    int radix = Smi::cast(args[1])->value();
+    if (value >= 0 && value < radix) {
+      RUNTIME_ASSERT(radix <= 36);
+      // Character array used for conversion.
+      static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
+      return Heap::LookupSingleCharacterStringFromCode(kCharTable[value]);
+    }
+  }
+
+  // Slow case.
   CONVERT_DOUBLE_CHECKED(value, args[0]);
   if (isnan(value)) {
     return Heap::AllocateStringFromAscii(CStrVector("NaN"));
@@ -4168,24 +4181,6 @@
   }
 }
 
-// Returns a number value with positive sign, greater than or equal to
-// 0 but less than 1, chosen randomly.
-static Object* Runtime_Math_random(Arguments args) {
-  NoHandleAllocation ha;
-  ASSERT(args.length() == 0);
-
-  // To get much better precision, we combine the results of two
-  // invocations of random(). The result is computed by normalizing a
-  // double in the range [0, RAND_MAX + 1) obtained by adding the
-  // high-order bits in the range [0, RAND_MAX] with the low-order
-  // bits in the range [0, 1).
-  double lo = static_cast<double>(random()) * (1.0 / (RAND_MAX + 1.0));
-  double hi = static_cast<double>(random());
-  double result = (hi + lo) * (1.0 / (RAND_MAX + 1.0));
-  ASSERT(result >= 0 && result < 1);
-  return Heap::AllocateHeapNumber(result);
-}
-
 
 static Object* Runtime_Math_round(Arguments args) {
   NoHandleAllocation ha;
@@ -4821,8 +4816,8 @@
     // and print some interesting cpu debugging info.
     JavaScriptFrameIterator it;
     JavaScriptFrame* frame = it.frame();
-    PrintF("fp = %p, sp = %p, pp = %p: ",
-           frame->fp(), frame->sp(), frame->pp());
+    PrintF("fp = %p, sp = %p, caller_sp = %p: ",
+           frame->fp(), frame->sp(), frame->caller_sp());
   } else {
     PrintF("DebugPrint: ");
   }
@@ -6106,6 +6101,405 @@
 }
 
 
+// Copy all the context locals into an object used to materialize a scope.
+static void CopyContextLocalsToScopeObject(Handle<Code> code,
+                                           ScopeInfo<>& scope_info,
+                                           Handle<Context> context,
+                                           Handle<JSObject> scope_object) {
+  // Fill all context locals to the context extension.
+  for (int i = Context::MIN_CONTEXT_SLOTS;
+       i < scope_info.number_of_context_slots();
+       i++) {
+    int context_index =
+        ScopeInfo<>::ContextSlotIndex(*code,
+                                      *scope_info.context_slot_name(i),
+                                      NULL);
+
+    // Don't include the arguments shadow (.arguments) context variable.
+    if (*scope_info.context_slot_name(i) != Heap::arguments_shadow_symbol()) {
+      SetProperty(scope_object,
+                  scope_info.context_slot_name(i),
+                  Handle<Object>(context->get(context_index)), NONE);
+    }
+  }
+}
+
+
+// Create a plain JSObject which materializes the local scope for the specified
+// frame.
+static Handle<JSObject> MaterializeLocalScope(JavaScriptFrame* frame) {
+  Handle<JSFunction> function(JSFunction::cast(frame->function()));
+  Handle<Code> code(function->code());
+  ScopeInfo<> scope_info(*code);
+
+  // Allocate and initialize a JSObject with all the arguments, stack locals
+  // heap locals and extension properties of the debugged function.
+  Handle<JSObject> local_scope = Factory::NewJSObject(Top::object_function());
+
+  // First fill all parameters.
+  for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
+    SetProperty(local_scope,
+                scope_info.parameter_name(i),
+                Handle<Object>(frame->GetParameter(i)), NONE);
+  }
+
+  // Second fill all stack locals.
+  for (int i = 0; i < scope_info.number_of_stack_slots(); i++) {
+    SetProperty(local_scope,
+                scope_info.stack_slot_name(i),
+                Handle<Object>(frame->GetExpression(i)), NONE);
+  }
+
+  // Third fill all context locals.
+  Handle<Context> frame_context(Context::cast(frame->context()));
+  Handle<Context> function_context(frame_context->fcontext());
+  CopyContextLocalsToScopeObject(code, scope_info,
+                                 function_context, local_scope);
+
+  // Finally copy any properties from the function context extension. This will
+  // be variables introduced by eval.
+  if (function_context->closure() == *function) {
+    if (function_context->has_extension() &&
+        !function_context->IsGlobalContext()) {
+      Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+      Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
+      for (int i = 0; i < keys->length(); i++) {
+        // Names of variables introduced by eval are strings.
+        ASSERT(keys->get(i)->IsString());
+        Handle<String> key(String::cast(keys->get(i)));
+        SetProperty(local_scope, key, GetProperty(ext, key), NONE);
+      }
+    }
+  }
+  return local_scope;
+}
+
+
+// Create a plain JSObject which materializes the closure content for the
+// context.
+static Handle<JSObject> MaterializeClosure(Handle<Context> context) {
+  ASSERT(context->is_function_context());
+
+  Handle<Code> code(context->closure()->code());
+  ScopeInfo<> scope_info(*code);
+
+  // Allocate and initialize a JSObject with all the content of theis function
+  // closure.
+  Handle<JSObject> closure_scope = Factory::NewJSObject(Top::object_function());
+
+  // Check whether the arguments shadow object exists.
+  int arguments_shadow_index =
+      ScopeInfo<>::ContextSlotIndex(*code,
+                                    Heap::arguments_shadow_symbol(),
+                                    NULL);
+  if (arguments_shadow_index >= 0) {
+    // In this case all the arguments are available in the arguments shadow
+    // object.
+    Handle<JSObject> arguments_shadow(
+        JSObject::cast(context->get(arguments_shadow_index)));
+    for (int i = 0; i < scope_info.number_of_parameters(); ++i) {
+      SetProperty(closure_scope,
+                  scope_info.parameter_name(i),
+                  Handle<Object>(arguments_shadow->GetElement(i)), NONE);
+    }
+  }
+
+  // Fill all context locals to the context extension.
+  CopyContextLocalsToScopeObject(code, scope_info, context, closure_scope);
+
+  // Finally copy any properties from the function context extension. This will
+  // be variables introduced by eval.
+  if (context->has_extension()) {
+    Handle<JSObject> ext(JSObject::cast(context->extension()));
+    Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
+    for (int i = 0; i < keys->length(); i++) {
+      // Names of variables introduced by eval are strings.
+      ASSERT(keys->get(i)->IsString());
+      Handle<String> key(String::cast(keys->get(i)));
+      SetProperty(closure_scope, key, GetProperty(ext, key), NONE);
+    }
+  }
+
+  return closure_scope;
+}
+
+
+// Iterate over the actual scopes visible from a stack frame. All scopes are
+// backed by an actual context except the local scope, which is inserted
+// "artifically" in the context chain.
+class ScopeIterator {
+ public:
+  enum ScopeType {
+    ScopeTypeGlobal = 0,
+    ScopeTypeLocal,
+    ScopeTypeWith,
+    ScopeTypeClosure
+  };
+
+  explicit ScopeIterator(JavaScriptFrame* frame)
+    : frame_(frame),
+      function_(JSFunction::cast(frame->function())),
+      context_(Context::cast(frame->context())),
+      local_done_(false),
+      at_local_(false) {
+
+    // Check whether the first scope is actually a local scope.
+    if (context_->IsGlobalContext()) {
+      // If there is a stack slot for .result then this local scope has been
+      // created for evaluating top level code and it is not a real local scope.
+      // Checking for the existence of .result seems fragile, but the scope info
+      // saved with the code object does not otherwise have that information.
+      Handle<Code> code(function_->code());
+      int index = ScopeInfo<>::StackSlotIndex(*code, Heap::result_symbol());
+      at_local_ = index < 0;
+    } else if (context_->is_function_context()) {
+      at_local_ = true;
+    }
+  }
+
+  // More scopes?
+  bool Done() { return context_.is_null(); }
+
+  // Move to the next scope.
+  void Next() {
+    // If at a local scope mark the local scope as passed.
+    if (at_local_) {
+      at_local_ = false;
+      local_done_ = true;
+
+      // If the current context is not associated with the local scope the
+      // current context is the next real scope, so don't move to the next
+      // context in this case.
+      if (context_->closure() != *function_) {
+        return;
+      }
+    }
+
+    // The global scope is always the last in the chain.
+    if (context_->IsGlobalContext()) {
+      context_ = Handle<Context>();
+      return;
+    }
+
+    // Move to the next context.
+    if (context_->is_function_context()) {
+      context_ = Handle<Context>(Context::cast(context_->closure()->context()));
+    } else {
+      context_ = Handle<Context>(context_->previous());
+    }
+
+    // If passing the local scope indicate that the current scope is now the
+    // local scope.
+    if (!local_done_ &&
+        (context_->IsGlobalContext() || (context_->is_function_context()))) {
+      at_local_ = true;
+    }
+  }
+
+  // Return the type of the current scope.
+  int Type() {
+    if (at_local_) {
+      return ScopeTypeLocal;
+    }
+    if (context_->IsGlobalContext()) {
+      ASSERT(context_->global()->IsGlobalObject());
+      return ScopeTypeGlobal;
+    }
+    if (context_->is_function_context()) {
+      return ScopeTypeClosure;
+    }
+    ASSERT(context_->has_extension());
+    ASSERT(!context_->extension()->IsJSContextExtensionObject());
+    return ScopeTypeWith;
+  }
+
+  // Return the JavaScript object with the content of the current scope.
+  Handle<JSObject> ScopeObject() {
+    switch (Type()) {
+      case ScopeIterator::ScopeTypeGlobal:
+        return Handle<JSObject>(CurrentContext()->global());
+        break;
+      case ScopeIterator::ScopeTypeLocal:
+        // Materialize the content of the local scope into a JSObject.
+        return MaterializeLocalScope(frame_);
+        break;
+      case ScopeIterator::ScopeTypeWith:
+        // Return the with object.
+        return Handle<JSObject>(CurrentContext()->extension());
+        break;
+      case ScopeIterator::ScopeTypeClosure:
+        // Materialize the content of the closure scope into a JSObject.
+        return MaterializeClosure(CurrentContext());
+        break;
+    }
+    UNREACHABLE();
+    return Handle<JSObject>();
+  }
+
+  // Return the context for this scope. For the local context there might not
+  // be an actual context.
+  Handle<Context> CurrentContext() {
+    if (at_local_ && context_->closure() != *function_) {
+      return Handle<Context>();
+    }
+    return context_;
+  }
+
+#ifdef DEBUG
+  // Debug print of the content of the current scope.
+  void DebugPrint() {
+    switch (Type()) {
+      case ScopeIterator::ScopeTypeGlobal:
+        PrintF("Global:\n");
+        CurrentContext()->Print();
+        break;
+
+      case ScopeIterator::ScopeTypeLocal: {
+        PrintF("Local:\n");
+        Handle<Code> code(function_->code());
+        ScopeInfo<> scope_info(*code);
+        scope_info.Print();
+        if (!CurrentContext().is_null()) {
+          CurrentContext()->Print();
+          if (CurrentContext()->has_extension()) {
+            Handle<JSObject> extension =
+                Handle<JSObject>(CurrentContext()->extension());
+            if (extension->IsJSContextExtensionObject()) {
+              extension->Print();
+            }
+          }
+        }
+        break;
+      }
+
+      case ScopeIterator::ScopeTypeWith: {
+        PrintF("With:\n");
+        Handle<JSObject> extension =
+            Handle<JSObject>(CurrentContext()->extension());
+        extension->Print();
+        break;
+      }
+
+      case ScopeIterator::ScopeTypeClosure: {
+        PrintF("Closure:\n");
+        CurrentContext()->Print();
+        if (CurrentContext()->has_extension()) {
+          Handle<JSObject> extension =
+              Handle<JSObject>(CurrentContext()->extension());
+          if (extension->IsJSContextExtensionObject()) {
+            extension->Print();
+          }
+        }
+        break;
+      }
+
+      default:
+        UNREACHABLE();
+    }
+    PrintF("\n");
+  }
+#endif
+
+ private:
+  JavaScriptFrame* frame_;
+  Handle<JSFunction> function_;
+  Handle<Context> context_;
+  bool local_done_;
+  bool at_local_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
+};
+
+
+static Object* Runtime_GetScopeCount(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+
+  // Check arguments.
+  Object* check = Runtime_CheckExecutionState(args);
+  if (check->IsFailure()) return check;
+  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+
+  // Get the frame where the debugging is performed.
+  StackFrame::Id id = UnwrapFrameId(wrapped_id);
+  JavaScriptFrameIterator it(id);
+  JavaScriptFrame* frame = it.frame();
+
+  // Count the visible scopes.
+  int n = 0;
+  for (ScopeIterator it(frame); !it.Done(); it.Next()) {
+    n++;
+  }
+
+  return Smi::FromInt(n);
+}
+
+
+static const int kScopeDetailsTypeIndex = 0;
+static const int kScopeDetailsObjectIndex = 1;
+static const int kScopeDetailsSize = 2;
+
+// Return an array with scope details
+// args[0]: number: break id
+// args[1]: number: frame index
+// args[2]: number: scope index
+//
+// The array returned contains the following information:
+// 0: Scope type
+// 1: Scope object
+static Object* Runtime_GetScopeDetails(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 3);
+
+  // Check arguments.
+  Object* check = Runtime_CheckExecutionState(args);
+  if (check->IsFailure()) return check;
+  CONVERT_CHECKED(Smi, wrapped_id, args[1]);
+  CONVERT_NUMBER_CHECKED(int, index, Int32, args[2]);
+
+  // Get the frame where the debugging is performed.
+  StackFrame::Id id = UnwrapFrameId(wrapped_id);
+  JavaScriptFrameIterator frame_it(id);
+  JavaScriptFrame* frame = frame_it.frame();
+
+  // Find the requested scope.
+  int n = 0;
+  ScopeIterator it(frame);
+  for (; !it.Done() && n < index; it.Next()) {
+    n++;
+  }
+  if (it.Done()) {
+    return Heap::undefined_value();
+  }
+
+  // Calculate the size of the result.
+  int details_size = kScopeDetailsSize;
+  Handle<FixedArray> details = Factory::NewFixedArray(details_size);
+
+  // Fill in scope details.
+  details->set(kScopeDetailsTypeIndex, Smi::FromInt(it.Type()));
+  details->set(kScopeDetailsObjectIndex, *it.ScopeObject());
+
+  return *Factory::NewJSArrayWithElements(details);
+}
+
+
+static Object* Runtime_DebugPrintScopes(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 0);
+
+#ifdef DEBUG
+  // Print the scopes for the top frame.
+  StackFrameLocator locator;
+  JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
+  for (ScopeIterator it(frame); !it.Done(); it.Next()) {
+    it.DebugPrint();
+  }
+#endif
+  return Heap::undefined_value();
+}
+
+
 static Object* Runtime_GetCFrames(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 1);
@@ -6568,54 +6962,17 @@
   ASSERT(go_between_sinfo.number_of_context_slots() == 0);
 #endif
 
-  // Allocate and initialize a context extension object with all the
-  // arguments, stack locals heap locals and extension properties of the
-  // debugged function.
-  Handle<JSObject> context_ext = Factory::NewJSObject(Top::object_function());
-  // First fill all parameters to the context extension.
-  for (int i = 0; i < sinfo.number_of_parameters(); ++i) {
-    SetProperty(context_ext,
-                sinfo.parameter_name(i),
-                Handle<Object>(frame->GetParameter(i)), NONE);
-  }
-  // Second fill all stack locals to the context extension.
-  for (int i = 0; i < sinfo.number_of_stack_slots(); i++) {
-    SetProperty(context_ext,
-                sinfo.stack_slot_name(i),
-                Handle<Object>(frame->GetExpression(i)), NONE);
-  }
-  // Third fill all context locals to the context extension.
-  Handle<Context> frame_context(Context::cast(frame->context()));
-  Handle<Context> function_context(frame_context->fcontext());
-  for (int i = Context::MIN_CONTEXT_SLOTS;
-       i < sinfo.number_of_context_slots();
-       ++i) {
-    int context_index =
-        ScopeInfo<>::ContextSlotIndex(*code, *sinfo.context_slot_name(i), NULL);
-    SetProperty(context_ext,
-                sinfo.context_slot_name(i),
-                Handle<Object>(function_context->get(context_index)), NONE);
-  }
-  // Finally copy any properties from the function context extension. This will
-  // be variables introduced by eval.
-  if (function_context->has_extension() &&
-      !function_context->IsGlobalContext()) {
-    Handle<JSObject> ext(JSObject::cast(function_context->extension()));
-    Handle<FixedArray> keys = GetKeysInFixedArrayFor(ext);
-    for (int i = 0; i < keys->length(); i++) {
-      // Names of variables introduced by eval are strings.
-      ASSERT(keys->get(i)->IsString());
-      Handle<String> key(String::cast(keys->get(i)));
-      SetProperty(context_ext, key, GetProperty(ext, key), NONE);
-    }
-  }
+  // Materialize the content of the local scope into a JSObject.
+  Handle<JSObject> local_scope = MaterializeLocalScope(frame);
 
   // Allocate a new context for the debug evaluation and set the extension
   // object build.
   Handle<Context> context =
       Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, go_between);
-  context->set_extension(*context_ext);
+  context->set_extension(*local_scope);
   // Copy any with contexts present and chain them in front of this context.
+  Handle<Context> frame_context(Context::cast(frame->context()));
+  Handle<Context> function_context(frame_context->fcontext());
   context = CopyWithContextChain(frame_context, context);
 
   // Wrap the evaluation statement in a new function compiled in the newly
@@ -6657,6 +7014,13 @@
       Execution::Call(Handle<JSFunction>::cast(evaluation_function), receiver,
                       argc, argv, &has_pending_exception);
   if (has_pending_exception) return Failure::Exception();
+
+  // Skip the global proxy as it has no properties and always delegates to the
+  // real global object.
+  if (result->IsJSGlobalProxy()) {
+    result = Handle<JSObject>(JSObject::cast(result->GetPrototype()));
+  }
+
   return *result;
 }
 
diff --git a/src/runtime.h b/src/runtime.h
index 30bb7c5..15dd9b4 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -135,7 +135,6 @@
   F(Math_floor, 1) \
   F(Math_log, 1) \
   F(Math_pow, 2) \
-  F(Math_random, 0) \
   F(Math_round, 1) \
   F(Math_sin, 1) \
   F(Math_sqrt, 1) \
@@ -288,6 +287,9 @@
   F(CheckExecutionState, 1) \
   F(GetFrameCount, 1) \
   F(GetFrameDetails, 2) \
+  F(GetScopeCount, 2) \
+  F(GetScopeDetails, 3) \
+  F(DebugPrintScopes, 0) \
   F(GetCFrames, 1) \
   F(GetThreadCount, 1) \
   F(GetThreadDetails, 2) \
diff --git a/src/runtime.js b/src/runtime.js
index c8ccf9f..d4b4970 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -97,12 +97,12 @@
   if (IS_STRING(this)) {
     if (!IS_STRING(x)) return 1;  // not equal
     return %StringEquals(this, x);
-  } 
+  }
 
   if (IS_NUMBER(this)) {
     if (!IS_NUMBER(x)) return 1;  // not equal
     return %NumberEquals(this, x);
-  } 
+  }
 
   // If anything else gets here, we just do simple identity check.
   // Objects (including functions), null, undefined and booleans were
@@ -148,7 +148,7 @@
   // Default implementation.
   var a = %ToPrimitive(this, NO_HINT);
   var b = %ToPrimitive(x, NO_HINT);
-  
+
   if (IS_STRING(a)) {
     return %StringAdd(a, %ToString(b));
   } else if (IS_STRING(b)) {
@@ -160,40 +160,48 @@
 
 
 // Left operand (this) is already a string.
-function STRING_ADD_LEFT(x) {
-  x = %ToString(%ToPrimitive(x, NO_HINT));
-  return %StringAdd(this, x);
+function STRING_ADD_LEFT(y) {
+  if (!IS_STRING(y)) y = %ToString(%ToPrimitive(y, NO_HINT));
+  return %StringAdd(this, y);
 }
 
 
-// Right operand (x) is already a string.
-function STRING_ADD_RIGHT(x) {
-  var a = %ToString(%ToPrimitive(this, NO_HINT));
-  return %StringAdd(a, x);
+// Right operand (y) is already a string.
+function STRING_ADD_RIGHT(y) {
+  var x = IS_STRING(this) ? this : %ToString(%ToPrimitive(this, NO_HINT));
+  return %StringAdd(x, y);
 }
 
 
 // ECMA-262, section 11.6.2, page 50.
-function SUB(x) {
-  return %NumberSub(%ToNumber(this), %ToNumber(x));
+function SUB(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberSub(x, y);
 }
 
 
 // ECMA-262, section 11.5.1, page 48.
-function MUL(x) {
-  return %NumberMul(%ToNumber(this), %ToNumber(x));
+function MUL(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberMul(x, y);
 }
 
 
 // ECMA-262, section 11.5.2, page 49.
-function DIV(x) {
-  return %NumberDiv(%ToNumber(this), %ToNumber(x));
+function DIV(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberDiv(x, y);
 }
 
 
 // ECMA-262, section 11.5.3, page 49.
-function MOD(x) {
-  return %NumberMod(%ToNumber(this), %ToNumber(x));
+function MOD(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberMod(x, y);
 }
 
 
@@ -204,50 +212,92 @@
 */
 
 // ECMA-262, section 11.10, page 57.
-function BIT_OR(x) {
-  return %NumberOr(%ToNumber(this), %ToNumber(x));
+function BIT_OR(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberOr(x, y);
 }
 
 
 // ECMA-262, section 11.10, page 57.
-function BIT_AND(x) {
-  return %NumberAnd(%ToNumber(this), %ToNumber(x));
+function BIT_AND(y) {
+  var x;
+  if (IS_NUMBER(this)) {
+    x = this;
+    if (!IS_NUMBER(y)) y = %ToNumber(y);
+  } else {
+    x = %ToNumber(this);
+    // Make sure to convert the right operand to a number before
+    // bailing out in the fast case, but after converting the
+    // left operand. This ensures that valueOf methods on the right
+    // operand are always executed.
+    if (!IS_NUMBER(y)) y = %ToNumber(y);
+    // Optimize for the case where we end up AND'ing a value
+    // that doesn't convert to a number. This is common in
+    // certain benchmarks.
+    if (NUMBER_IS_NAN(x)) return 0;
+  }
+  return %NumberAnd(x, y);
 }
 
 
 // ECMA-262, section 11.10, page 57.
-function BIT_XOR(x) {
-  return %NumberXor(%ToNumber(this), %ToNumber(x));
+function BIT_XOR(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberXor(x, y);
 }
 
 
 // ECMA-262, section 11.4.7, page 47.
 function UNARY_MINUS() {
-  return %NumberUnaryMinus(%ToNumber(this));
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  return %NumberUnaryMinus(x);
 }
 
 
 // ECMA-262, section 11.4.8, page 48.
 function BIT_NOT() {
-  return %NumberNot(%ToNumber(this));
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  return %NumberNot(x);
 }
 
 
 // ECMA-262, section 11.7.1, page 51.
-function SHL(x) {
-  return %NumberShl(%ToNumber(this), %ToNumber(x));
+function SHL(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberShl(x, y);
 }
 
 
 // ECMA-262, section 11.7.2, page 51.
-function SAR(x) {
-  return %NumberSar(%ToNumber(this), %ToNumber(x));
+function SAR(y) {
+  var x;
+  if (IS_NUMBER(this)) {
+    x = this;
+    if (!IS_NUMBER(y)) y = %ToNumber(y);
+  } else {
+    x = %ToNumber(this);
+    // Make sure to convert the right operand to a number before
+    // bailing out in the fast case, but after converting the
+    // left operand. This ensures that valueOf methods on the right
+    // operand are always executed.
+    if (!IS_NUMBER(y)) y = %ToNumber(y);
+    // Optimize for the case where we end up shifting a value
+    // that doesn't convert to a number. This is common in
+    // certain benchmarks.
+    if (NUMBER_IS_NAN(x)) return 0;
+  }
+  return %NumberSar(x, y);
 }
 
 
 // ECMA-262, section 11.7.3, page 52.
-function SHR(x) {
-  return %NumberShr(%ToNumber(this), %ToNumber(x));
+function SHR(y) {
+  var x = IS_NUMBER(this) ? this : %ToNumber(this);
+  if (!IS_NUMBER(y)) y = %ToNumber(y);
+  return %NumberShr(x, y);
 }
 
 
diff --git a/src/serialize.cc b/src/serialize.cc
index fb66d27..eb497fb 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -450,20 +450,26 @@
                                        const char* name) {
   Address address;
   switch (type) {
-    case C_BUILTIN:
-      address = Builtins::c_function_address(
-          static_cast<Builtins::CFunctionId>(id));
+    case C_BUILTIN: {
+      ExternalReference ref(static_cast<Builtins::CFunctionId>(id));
+      address = ref.address();
       break;
-    case BUILTIN:
-      address = Builtins::builtin_address(static_cast<Builtins::Name>(id));
+    }
+    case BUILTIN: {
+      ExternalReference ref(static_cast<Builtins::Name>(id));
+      address = ref.address();
       break;
-    case RUNTIME_FUNCTION:
-      address = Runtime::FunctionForId(
-          static_cast<Runtime::FunctionId>(id))->entry;
+    }
+    case RUNTIME_FUNCTION: {
+      ExternalReference ref(static_cast<Runtime::FunctionId>(id));
+      address = ref.address();
       break;
-    case IC_UTILITY:
-      address = IC::AddressFromUtilityId(static_cast<IC::UtilityId>(id));
+    }
+    case IC_UTILITY: {
+      ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)));
+      address = ref.address();
       break;
+    }
     default:
       UNREACHABLE();
       return;
@@ -642,10 +648,14 @@
       "StubCache::secondary_->value");
 
   // Runtime entries
-  Add(FUNCTION_ADDR(Runtime::PerformGC),
+  Add(ExternalReference::perform_gc_function().address(),
       RUNTIME_ENTRY,
       1,
       "Runtime::PerformGC");
+  Add(ExternalReference::random_positive_smi_function().address(),
+      RUNTIME_ENTRY,
+      2,
+      "V8::RandomPositiveSmi");
 
   // Miscellaneous
   Add(ExternalReference::builtin_passed_function().address(),
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index f7e5456..0c80378 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -103,7 +103,7 @@
     LoadStubCompiler compiler;
     code = compiler.CompileLoadField(receiver, holder, field_index, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return code;
   }
@@ -122,7 +122,7 @@
     LoadStubCompiler compiler;
     code = compiler.CompileLoadCallback(receiver, holder, callback, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return code;
   }
@@ -141,7 +141,7 @@
     LoadStubCompiler compiler;
     code = compiler.CompileLoadConstant(receiver, holder, value, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return code;
   }
@@ -158,7 +158,7 @@
     LoadStubCompiler compiler;
     code = compiler.CompileLoadInterceptor(receiver, holder, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("LoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return code;
   }
@@ -182,7 +182,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadField(name, receiver, holder, field_index);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -201,7 +201,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadConstant(name, receiver, holder, value);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -219,7 +219,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadInterceptor(receiver, holder, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -238,7 +238,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadCallback(name, receiver, holder, callback);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -256,7 +256,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadArrayLength(name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -273,7 +273,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadStringLength(name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -290,7 +290,7 @@
     KeyedLoadStubCompiler compiler;
     code = compiler.CompileLoadFunctionPrototype(name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("KeyedLoadIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -309,7 +309,7 @@
     StoreStubCompiler compiler;
     code = compiler.CompileStoreField(receiver, field_index, transition, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("StoreIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -327,7 +327,7 @@
     StoreStubCompiler compiler;
     code = compiler.CompileStoreCallback(receiver, callback, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("StoreIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -344,7 +344,7 @@
     StoreStubCompiler compiler;
     code = compiler.CompileStoreInterceptor(receiver, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("StoreIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::STORE_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -361,7 +361,7 @@
     KeyedStoreStubCompiler compiler;
     code = compiler.CompileStoreField(receiver, field_index, transition, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("KeyedStoreIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, Code::cast(code), name));
     Object* result = receiver->map()->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -412,7 +412,7 @@
     CallStubCompiler compiler(argc);
     code = compiler.CompileCallConstant(object, holder, function, check, flags);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
     Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -445,7 +445,7 @@
     CallStubCompiler compiler(argc);
     code = compiler.CompileCallField(object, holder, index, name, flags);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
     Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -478,7 +478,7 @@
     CallStubCompiler compiler(argc);
     code = compiler.CompileCallInterceptor(object, holder, name);
     if (code->IsFailure()) return code;
-    LOG(CodeCreateEvent("CallIC", Code::cast(code), name));
+    LOG(CodeCreateEvent(Logger::CALL_IC_TAG, Code::cast(code), name));
     Object* result = map->UpdateCodeCache(name, Code::cast(code));
     if (result->IsFailure()) return result;
   }
@@ -632,7 +632,8 @@
   if (result->IsCode()) {
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent("LazyCompile", code, code->arguments_count()));
+    LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
+                        code, code->arguments_count()));
   }
   return result;
 }
@@ -780,7 +781,8 @@
     Counters::call_initialize_stubs.Increment();
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent("CallInitialize", code, code->arguments_count()));
+    LOG(CodeCreateEvent(Logger::CALL_INITIALIZE_TAG,
+                        code, code->arguments_count()));
   }
   return result;
 }
@@ -795,7 +797,8 @@
     Counters::call_premonomorphic_stubs.Increment();
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent("CallPreMonomorphic", code, code->arguments_count()));
+    LOG(CodeCreateEvent(Logger::CALL_PRE_MONOMORPHIC_TAG,
+                        code, code->arguments_count()));
   }
   return result;
 }
@@ -810,7 +813,8 @@
     Counters::call_normal_stubs.Increment();
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent("CallNormal", code, code->arguments_count()));
+    LOG(CodeCreateEvent(Logger::CALL_NORMAL_TAG,
+                        code, code->arguments_count()));
   }
   return result;
 }
@@ -825,7 +829,8 @@
     Counters::call_megamorphic_stubs.Increment();
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent("CallMegamorphic", code, code->arguments_count()));
+    LOG(CodeCreateEvent(Logger::CALL_MEGAMORPHIC_TAG,
+                        code, code->arguments_count()));
   }
   return result;
 }
@@ -840,7 +845,7 @@
     Counters::call_megamorphic_stubs.Increment();
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent("CallMiss", code, code->arguments_count()));
+    LOG(CodeCreateEvent(Logger::CALL_MISS_TAG, code, code->arguments_count()));
   }
   return result;
 }
@@ -854,7 +859,8 @@
   if (!result->IsFailure()) {
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent("CallDebugBreak", code, code->arguments_count()));
+    LOG(CodeCreateEvent(Logger::CALL_DEBUG_BREAK_TAG,
+                        code, code->arguments_count()));
   }
   return result;
 }
@@ -870,8 +876,8 @@
   if (!result->IsFailure()) {
     Code* code = Code::cast(result);
     USE(code);
-    LOG(CodeCreateEvent("CallDebugPrepareStepIn", code,
-                        code->arguments_count()));
+    LOG(CodeCreateEvent(Logger::CALL_DEBUG_PREPARE_STEP_IN_TAG,
+                        code, code->arguments_count()));
   }
   return result;
 }
diff --git a/src/utils.h b/src/utils.h
index 137e2c4..91662ee 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -362,6 +362,11 @@
     Sort(PointerValueCompare<T>);
   }
 
+  void Truncate(int length) {
+    ASSERT(length <= length_);
+    length_ = length;
+  }
+
   // Releases the array underlying this vector. Once disposed the
   // vector is empty.
   void Dispose() {
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 4111312..06f116e 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -131,6 +131,8 @@
   SC(named_load_inline, V8.NamedLoadInline)                         \
   SC(named_load_inline_miss, V8.NamedLoadInlineMiss)                \
   SC(keyed_store_field, V8.KeyedStoreField)                         \
+  SC(keyed_store_inline, V8.KeyedStoreInline)                       \
+  SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)              \
   SC(for_in, V8.ForIn)                                              \
   SC(enum_cache_hits, V8.EnumCacheHits)                             \
   SC(enum_cache_misses, V8.EnumCacheMisses)                         \
diff --git a/src/v8.cc b/src/v8.cc
index 17cb2df..72f74aa 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -33,6 +33,10 @@
 #include "stub-cache.h"
 #include "oprofile-agent.h"
 
+#if V8_TARGET_ARCH_ARM
+#include "arm/simulator-arm.h"
+#endif
+
 namespace v8 {
 namespace internal {
 
@@ -62,6 +66,11 @@
   // Setup the platform OS support.
   OS::Setup();
 
+  // Initialize other runtime facilities
+#if !V8_HOST_ARCH_ARM && V8_TARGET_ARCH_ARM
+  ::assembler::arm::Simulator::Initialize();
+#endif
+
   // Setup the object heap
   ASSERT(!Heap::HasBeenSetup());
   if (!Heap::Setup(create_heap_objects)) {
@@ -69,7 +78,6 @@
     return false;
   }
 
-  // Initialize other runtime facilities
   Bootstrapper::Initialize(create_heap_objects);
   Builtins::Setup(create_heap_objects);
   Top::Initialize();
@@ -130,4 +138,29 @@
 }
 
 
+uint32_t V8::Random() {
+  // Random number generator using George Marsaglia's MWC algorithm.
+  static uint32_t hi = 0;
+  static uint32_t lo = 0;
+
+  // Initialize seed using the system random(). If one of the seeds
+  // should ever become zero again, or if random() returns zero, we
+  // avoid getting stuck with zero bits in hi or lo by re-initializing
+  // them on demand.
+  if (hi == 0) hi = random();
+  if (lo == 0) lo = random();
+
+  // Mix the bits.
+  hi = 36969 * (hi & 0xFFFF) + (hi >> 16);
+  lo = 18273 * (lo & 0xFFFF) + (lo >> 16);
+  return (hi << 16) + (lo & 0xFFFF);
+}
+
+
+Smi* V8::RandomPositiveSmi() {
+  uint32_t random = Random();
+  ASSERT(IsPowerOf2(Smi::kMaxValue + 1));
+  return Smi::FromInt(random & Smi::kMaxValue);
+}
+
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index 8cb3c7d..4e906df 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -80,10 +80,10 @@
  public:
   // Global actions.
 
-  // If Initialize is called with des == NULL, the
-  // initial state is created from scratch. If a non-null Deserializer
-  // is given, the initial state is created by reading the
-  // deserialized data into an empty heap.
+  // If Initialize is called with des == NULL, the initial state is
+  // created from scratch. If a non-null Deserializer is given, the
+  // initial state is created by reading the deserialized data into an
+  // empty heap.
   static bool Initialize(Deserializer* des);
   static void TearDown();
   static bool IsRunning() { return is_running_; }
@@ -93,6 +93,11 @@
 
   // Report process out of memory. Implementation found in api.cc.
   static void FatalProcessOutOfMemory(const char* location);
+
+  // Random number generation support. Not cryptographically safe.
+  static uint32_t Random();
+  static Smi* RandomPositiveSmi();
+
  private:
   // True if engine is currently running
   static bool is_running_;
diff --git a/src/version.cc b/src/version.cc
index 3acc0f9..caf033b 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      7
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      8
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 1822568..ec27983 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -151,11 +151,6 @@
   if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
     intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
     *p -= delta;  // relocate entry
-  } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
-    // Special handling of js_return when a break point is set (call
-    // instruction has been inserted).
-    intptr_t* p = reinterpret_cast<intptr_t*>(pc_ + 1);
-    *p -= delta;  // relocate entry
   } else if (IsInternalReference(rmode_)) {
     // absolute code pointer inside code object moves with the code object.
     intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
@@ -249,27 +244,9 @@
 // -----------------------------------------------------------------------------
 // Implementation of Operand
 
-Operand::Operand(Register base, int32_t disp) {
-  len_ = 1;
-  if (base.is(rsp) || base.is(r12)) {
-    // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
-    set_sib(kTimes1, rsp, base);
-  }
-
-  if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
-    set_modrm(0, rsp);
-  } else if (is_int8(disp)) {
-    set_modrm(1, base);
-    set_disp8(disp);
-  } else {
-    set_modrm(2, base);
-    set_disp32(disp);
-  }
-}
-
 void Operand::set_modrm(int mod, Register rm) {
   ASSERT((mod & -4) == 0);
-  buf_[0] = mod << 6 | (rm.code() & 0x7);
+  buf_[0] = (mod << 6) | (rm.code() & 0x7);
   // Set REX.B to the high bit of rm.code().
   rex_ |= (rm.code() >> 3);
 }
@@ -278,7 +255,8 @@
 void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
   ASSERT(len_ == 1);
   ASSERT(is_uint2(scale));
-  // Use SIB with no index register only for base rsp or r12.
+  // Use SIB with no index register only for base rsp or r12. Otherwise we
+  // would skip the SIB byte entirely.
   ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
   buf_[1] = scale << 6 | (index.code() & 0x7) << 3 | (base.code() & 0x7);
   rex_ |= (index.code() >> 3) << 1 | base.code() >> 3;
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 77bbf52..cc64471 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -72,7 +72,49 @@
 XMMRegister xmm14 = { 14 };
 XMMRegister xmm15 = { 15 };
 
+
+Operand::Operand(Register base, int32_t disp): rex_(0) {
+  len_ = 1;
+  if (base.is(rsp) || base.is(r12)) {
+    // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+    set_sib(kTimes1, rsp, base);
+  }
+
+  if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+    set_modrm(0, base);
+  } else if (is_int8(disp)) {
+    set_modrm(1, base);
+    set_disp8(disp);
+  } else {
+    set_modrm(2, base);
+    set_disp32(disp);
+  }
+}
+
+
+Operand::Operand(Register base,
+                 Register index,
+                 ScaleFactor scale,
+                 int32_t disp): rex_(0) {
+  ASSERT(!index.is(rsp));
+  len_ = 1;
+  set_sib(scale, index, base);
+  if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+    // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
+    // possibly set by set_sib.
+    set_modrm(0, rsp);
+  } else if (is_int8(disp)) {
+    set_modrm(1, rsp);
+    set_disp8(disp);
+  } else {
+    set_modrm(2, rsp);
+    set_disp32(disp);
+  }
+}
+
+
 // Safe default is no features.
+// TODO(X64): Safe defaults include SSE2 for X64.
 uint64_t CpuFeatures::supported_ = 0;
 uint64_t CpuFeatures::enabled_ = 0;
 
@@ -140,7 +182,8 @@
   Object* code =
       Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
   if (!code->IsCode()) return;
-  LOG(CodeCreateEvent("Builtin", Code::cast(code), "CpuFeatures::Probe"));
+  LOG(CodeCreateEvent(Logger::BUILTIN_TAG,
+                      Code::cast(code), "CpuFeatures::Probe"));
   typedef uint64_t (*F0)();
   F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
   supported_ = probe();
@@ -398,16 +441,47 @@
   emit_rex_64(dst);
   if (is_int8(src.value_)) {
     emit(0x83);
-    emit_operand(Register::toRegister(subcode), dst);
+    emit_operand(subcode, dst);
     emit(src.value_);
   } else {
     emit(0x81);
-    emit_operand(Register::toRegister(subcode), dst);
+    emit_operand(subcode, dst);
     emitl(src.value_);
   }
 }
 
 
+void Assembler::immediate_arithmetic_op_32(byte subcode,
+                                           const Operand& dst,
+                                           Immediate src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  if (is_int8(src.value_)) {
+    emit(0x83);
+    emit_operand(subcode, dst);
+    emit(src.value_);
+  } else {
+    emit(0x81);
+    emit_operand(subcode, dst);
+    emitl(src.value_);
+  }
+}
+
+
+void Assembler::immediate_arithmetic_op_8(byte subcode,
+                                           const Operand& dst,
+                                           Immediate src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  ASSERT(is_int8(src.value_));
+  emit(0x80);
+  emit_operand(subcode, dst);
+  emit(src.value_);
+}
+
+
 void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -486,14 +560,6 @@
   emit_modrm(0x2, adr);
 }
 
-void Assembler::cpuid() {
-  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  emit(0x0F);
-  emit(0xA2);
-}
-
 
 void Assembler::call(const Operand& op) {
   EnsureSpace ensure_space(this);
@@ -505,6 +571,15 @@
 }
 
 
+void Assembler::cpuid() {
+  ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x0F);
+  emit(0xA2);
+}
+
+
 void Assembler::cqo() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -513,7 +588,7 @@
 }
 
 
-void Assembler::dec(Register dst) {
+void Assembler::decq(Register dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_rex_64(dst);
@@ -522,7 +597,7 @@
 }
 
 
-void Assembler::dec(const Operand& dst) {
+void Assembler::decq(const Operand& dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_rex_64(dst);
@@ -531,6 +606,15 @@
 }
 
 
+void Assembler::decl(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xFF);
+  emit_operand(1, dst);
+}
+
+
 void Assembler::enter(Immediate size) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -582,7 +666,7 @@
 }
 
 
-void Assembler::inc(Register dst) {
+void Assembler::incq(Register dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_rex_64(dst);
@@ -591,7 +675,7 @@
 }
 
 
-void Assembler::inc(const Operand& dst) {
+void Assembler::incq(const Operand& dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_rex_64(dst);
@@ -600,6 +684,15 @@
 }
 
 
+void Assembler::incl(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xFF);
+  emit_operand(0, dst);
+}
+
+
 void Assembler::int3() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -768,6 +861,16 @@
 }
 
 
+void Assembler::movl(const Operand& dst, Immediate value) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xC7);
+  emit_operand(0x0, dst);
+  emit(value);  // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
 void Assembler::movl(Register dst, Immediate value) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -843,6 +946,31 @@
 }
 
 
+void Assembler::movq(const Operand& dst, Immediate value) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst);
+  emit(0xC7);
+  emit_operand(0, dst);
+  emit(value);
+}
+
+
+void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(!Heap::InNewSpace(*value));
+  emit_rex_64(dst);
+  emit(0xB8 | dst.code() & 0x7);
+  if (value->IsHeapObject()) {
+    emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
+  } else {
+    ASSERT_EQ(RelocInfo::NONE, mode);
+    emitq(reinterpret_cast<uintptr_t>(*value), RelocInfo::NONE);
+  }
+}
+
+
 void Assembler::mul(Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1063,6 +1191,13 @@
   }
 }
 
+void Assembler::rdtsc() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x0F);
+  emit(0x31);
+}
+
 
 void Assembler::ret(int imm16) {
   EnsureSpace ensure_space(this);
@@ -1078,6 +1213,19 @@
 }
 
 
+void Assembler::setcc(Condition cc, Register reg) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(0 <= cc && cc < 16);
+  if (reg.code() > 3) {  // Use x64 byte registers, where different.
+    emit_rex_32(reg);
+  }
+  emit(0x0F);
+  emit(0x90 | cc);
+  emit_modrm(0x0, reg);
+}
+
+
 void Assembler::shld(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1128,6 +1276,7 @@
 
 
 void Assembler::testb(Register reg, Immediate mask) {
+  ASSERT(is_int8(mask.value_));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   if (reg.is(rax)) {
@@ -1146,6 +1295,7 @@
 
 
 void Assembler::testb(const Operand& op, Immediate mask) {
+  ASSERT(is_int8(mask.value_));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_optional_rex_32(rax, op);
@@ -1198,6 +1348,22 @@
 }
 
 
+void Assembler::testq(Register dst, Immediate mask) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (dst.is(rax)) {
+    emit_rex_64();
+    emit(0xA9);
+    emit(mask);
+  } else {
+    emit_rex_64(dst);
+    emit(0xF7);
+    emit_modrm(0, dst);
+    emit(mask);
+  }
+}
+
+
 // Relocation information implementations
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
@@ -1360,19 +1526,7 @@
   return NULL;
 }
 
-
-StackFrame::Type ExitFrame::GetStateForFramePointer(unsigned char* a,
-                                                    StackFrame::State* b) {
-  // TODO(X64): UNIMPLEMENTED
-  return NONE;
-}
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
-  UNIMPLEMENTED();
-  return 0;
-}
-
-void JumpTarget::DoBind(int a) {
+void JumpTarget::DoBind() {
   UNIMPLEMENTED();
 }
 
@@ -1384,7 +1538,6 @@
   UNIMPLEMENTED();
 }
 
-
 Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
                                               JSObject* b,
                                               AccessorInfo* c,
@@ -1416,11 +1569,6 @@
   return NULL;
 }
 
-StackFrame::Type StackFrame::ComputeType(StackFrame::State* a) {
-  UNIMPLEMENTED();
-  return NONE;
-}
-
 Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
                                                 AccessorInfo* b,
                                                 String* c) {
@@ -1446,102 +1594,4 @@
   return NULL;
 }
 
-void VirtualFrame::Drop(int a) {
-  UNIMPLEMENTED();
-}
-
-int VirtualFrame::InvalidateFrameSlotAt(int a) {
-  UNIMPLEMENTED();
-  return -1;
-}
-
-void VirtualFrame::MergeTo(VirtualFrame* a) {
-  UNIMPLEMENTED();
-}
-
-Result VirtualFrame::Pop() {
-  UNIMPLEMENTED();
-  return Result(NULL);
-}
-
-Result VirtualFrame::RawCallStub(CodeStub* a) {
-  UNIMPLEMENTED();
-  return Result(NULL);
-}
-
-void VirtualFrame::SyncElementBelowStackPointer(int a) {
-  UNIMPLEMENTED();
-}
-
-void VirtualFrame::SyncElementByPushing(int a) {
-  UNIMPLEMENTED();
-}
-
-void VirtualFrame::SyncRange(int a, int b) {
-  UNIMPLEMENTED();
-}
-
-VirtualFrame::VirtualFrame() : elements_(0) {
-  UNIMPLEMENTED();
-}
-
-byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenerateLog(ZoneList<Expression*>* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* a) {
-  UNIMPLEMENTED();
-}
-
-void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* a) {
-  UNIMPLEMENTED();
-}
-
-void ExitFrame::Iterate(ObjectVisitor* a) const {
-  UNIMPLEMENTED();
-}
-
-byte* InternalFrame::GetCallerStackPointer() const {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
-byte* JavaScriptFrame::GetCallerStackPointer() const {
-  UNIMPLEMENTED();
-  return NULL;
-}
-
 } }  // namespace v8::internal
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index b488257..650c218 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -77,7 +77,7 @@
 
 struct Register {
   static Register toRegister(int code) {
-    Register r = {code};
+    Register r = { code };
     return r;
   }
   bool is_valid() const  { return 0 <= code_ && code_ < 16; }
@@ -89,11 +89,11 @@
     return code_;
   }
   int bit() const  {
-    UNIMPLEMENTED();
-    return 0;
+    return 1 << code_;
   }
 
-  // (unfortunately we can't make this private in a struct)
+  // (unfortunately we can't make this private in a struct when initializing
+  // by assignment.)
   int code_;
 };
 
@@ -250,7 +250,7 @@
 class Operand BASE_EMBEDDED {
  public:
   // [base + disp/r]
-  INLINE(Operand(Register base, int32_t disp));
+  Operand(Register base, int32_t disp);
 
   // [base + index*scale + disp/r]
   Operand(Register base,
@@ -385,7 +385,8 @@
   //
   // If we need versions of an assembly instruction that operate on different
   // width arguments, we add a single-letter suffix specifying the width.
-  // This is done for the following instructions: mov, cmp.
+  // This is done for the following instructions: mov, cmp, inc, dec,
+  // add, sub, and test.
   // There are no versions of these instructions without the suffix.
   // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
   // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
@@ -423,10 +424,10 @@
   void movl(Register dst, Register src);
   void movl(Register dst, const Operand& src);
   void movl(const Operand& dst, Register src);
+  void movl(const Operand& dst, Immediate imm);
   // Load a 32-bit immediate value, zero-extended to 64 bits.
   void movl(Register dst, Immediate imm32);
 
-  void movq(Register dst, int32_t imm32);
   void movq(Register dst, const Operand& src);
   // Sign extends immediate 32-bit value to 64 bits.
   void movq(Register dst, Immediate x);
@@ -434,7 +435,8 @@
 
   // Move 64 bit register value to 64-bit memory location.
   void movq(const Operand& dst, Register src);
-
+  // Move sign extended immediate to memory location.
+  void movq(const Operand& dst, Immediate value);
   // New x64 instructions to load a 64-bit immediate into a register.
   // All 64-bit immediates must have a relocation mode.
   void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
@@ -444,66 +446,63 @@
   void movq(Register dst, ExternalReference ext);
   void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
 
-
   // New x64 instruction to load from an immediate 64-bit pointer into RAX.
   void load_rax(void* ptr, RelocInfo::Mode rmode);
   void load_rax(ExternalReference ext);
 
-  void movsx_b(Register dst, const Operand& src);
-
-  void movsx_w(Register dst, const Operand& src);
-
-  void movzx_b(Register dst, const Operand& src);
-
-  void movzx_w(Register dst, const Operand& src);
-
   // Conditional moves
-  void cmov(Condition cc, Register dst, int32_t imm32);
-  void cmov(Condition cc, Register dst, Handle<Object> handle);
-  void cmov(Condition cc, Register dst, const Operand& src);
+  // Implement conditional moves here.
 
   // Exchange two registers
   void xchg(Register dst, Register src);
 
   // Arithmetics
-  void add(Register dst, Register src) {
+  void addq(Register dst, Register src) {
     arithmetic_op(0x03, dst, src);
   }
 
-  void add(Register dst, const Operand& src) {
+  void addq(Register dst, const Operand& src) {
     arithmetic_op(0x03, dst, src);
   }
 
 
-  void add(const Operand& dst, Register src) {
+  void addq(const Operand& dst, Register src) {
     arithmetic_op(0x01, src, dst);
   }
 
-  void add(Register dst, Immediate src) {
+  void addq(Register dst, Immediate src) {
     immediate_arithmetic_op(0x0, dst, src);
   }
 
-  void add(const Operand& dst, Immediate src) {
+  void addq(const Operand& dst, Immediate src) {
     immediate_arithmetic_op(0x0, dst, src);
   }
 
-  void cmp(Register dst, Register src) {
+  void addl(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_32(0x0, dst, src);
+  }
+
+  void cmpb(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_8(0x7, dst, src);
+  }
+
+  void cmpq(Register dst, Register src) {
     arithmetic_op(0x3B, dst, src);
   }
 
-  void cmp(Register dst, const Operand& src) {
+  void cmpq(Register dst, const Operand& src) {
     arithmetic_op(0x3B, dst, src);
   }
 
-  void cmp(const Operand& dst, Register src) {
+  void cmpq(const Operand& dst, Register src) {
     arithmetic_op(0x39, src, dst);
   }
 
-  void cmp(Register dst, Immediate src) {
+  void cmpq(Register dst, Immediate src) {
     immediate_arithmetic_op(0x7, dst, src);
   }
 
-  void cmp(const Operand& dst, Immediate src) {
+  void cmpq(const Operand& dst, Immediate src) {
     immediate_arithmetic_op(0x7, dst, src);
   }
 
@@ -527,15 +526,9 @@
     immediate_arithmetic_op(0x4, dst, src);
   }
 
-  void cmpb(const Operand& op, int8_t imm8);
-  void cmpb_al(const Operand& op);
-  void cmpw_ax(const Operand& op);
-  void cmpw(const Operand& op, Immediate imm16);
-
-  void dec_b(Register dst);
-
-  void dec(Register dst);
-  void dec(const Operand& dst);
+  void decq(Register dst);
+  void decq(const Operand& dst);
+  void decl(const Operand& dst);
 
   // Sign-extends rax into rdx:rax.
   void cqo();
@@ -548,8 +541,9 @@
   // Performs the operation dst = src * imm.
   void imul(Register dst, Register src, Immediate imm);
 
-  void inc(Register dst);
-  void inc(const Operand& dst);
+  void incq(Register dst);
+  void incq(const Operand& dst);
+  void incl(const Operand& dst);
 
   void lea(Register dst, const Operand& src);
 
@@ -621,32 +615,37 @@
   void store_rax(void* dst, RelocInfo::Mode mode);
   void store_rax(ExternalReference ref);
 
-  void sub(Register dst, Register src) {
+  void subq(Register dst, Register src) {
     arithmetic_op(0x2B, dst, src);
   }
 
-  void sub(Register dst, const Operand& src) {
+  void subq(Register dst, const Operand& src) {
     arithmetic_op(0x2B, dst, src);
   }
 
-  void sub(const Operand& dst, Register src) {
+  void subq(const Operand& dst, Register src) {
     arithmetic_op(0x29, src, dst);
   }
 
-  void sub(Register dst, Immediate src) {
+  void subq(Register dst, Immediate src) {
     immediate_arithmetic_op(0x5, dst, src);
   }
 
-  void sub(const Operand& dst, Immediate src) {
+  void subq(const Operand& dst, Immediate src) {
     immediate_arithmetic_op(0x5, dst, src);
   }
 
+  void subl(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_32(0x5, dst, src);
+  }
+
   void testb(Register reg, Immediate mask);
   void testb(const Operand& op, Immediate mask);
   void testl(Register reg, Immediate mask);
   void testl(const Operand& op, Immediate mask);
   void testq(const Operand& op, Register reg);
   void testq(Register dst, Register src);
+  void testq(Register dst, Immediate mask);
 
   void xor_(Register dst, Register src) {
     arithmetic_op(0x33, dst, src);
@@ -668,18 +667,19 @@
     immediate_arithmetic_op(0x6, dst, src);
   }
 
-
   // Bit operations.
   void bt(const Operand& dst, Register src);
   void bts(const Operand& dst, Register src);
 
   // Miscellaneous
+  void cpuid();
   void hlt();
   void int3();
   void nop();
   void nop(int n);
   void rdtsc();
   void ret(int imm16);
+  void setcc(Condition cc, Register reg);
 
   // Label operations & relative jumps (PPUM Appendix D)
   //
@@ -717,8 +717,6 @@
 
   // Conditional jumps
   void j(Condition cc, Label* L);
-  void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
-  void j(Condition cc, Handle<Code> code);
 
   // Floating-point operations
   void fld(int i);
@@ -774,11 +772,6 @@
 
   void frndint();
 
-  void sahf();
-  void setcc(Condition cc, Register reg);
-
-  void cpuid();
-
   // SSE2 instructions
   void cvttss2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, const Operand& src);
@@ -791,8 +784,8 @@
   void divsd(XMMRegister dst, XMMRegister src);
 
   // Use either movsd or movlpd.
-  void movdbl(XMMRegister dst, const Operand& src);
-  void movdbl(const Operand& dst, XMMRegister src);
+  // void movdbl(XMMRegister dst, const Operand& src);
+  // void movdbl(const Operand& dst, XMMRegister src);
 
   // Debugging
   void Print();
@@ -813,11 +806,11 @@
 
   // Writes a doubleword of data in the code stream.
   // Used for inline tables, e.g., jump-tables.
-  void dd(uint32_t data);
+  // void dd(uint32_t data);
 
   // Writes a quadword of data in the code stream.
   // Used for inline tables, e.g., jump-tables.
-  void dd(uint64_t data, RelocInfo::Mode reloc_info);
+  // void dd(uint64_t data, RelocInfo::Mode reloc_info);
 
   // Writes the absolute address of a bound label at the given position in
   // the generated code. That positions should have the relocation mode
@@ -841,11 +834,11 @@
   static const int kMinimalBufferSize = 4*KB;
 
  protected:
-  void movsd(XMMRegister dst, const Operand& src);
-  void movsd(const Operand& dst, XMMRegister src);
+  // void movsd(XMMRegister dst, const Operand& src);
+  // void movsd(const Operand& dst, XMMRegister src);
 
-  void emit_sse_operand(XMMRegister reg, const Operand& adr);
-  void emit_sse_operand(XMMRegister dst, XMMRegister src);
+  // void emit_sse_operand(XMMRegister reg, const Operand& adr);
+  // void emit_sse_operand(XMMRegister dst, XMMRegister src);
 
 
  private:
@@ -969,15 +962,23 @@
   void arithmetic_op(byte opcode, Register reg, const Operand& op);
   void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
   void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
+  // Operate on a 32-bit word in memory.
+  void immediate_arithmetic_op_32(byte subcode,
+                                  const Operand& dst,
+                                  Immediate src);
+  // Operate on a byte in memory.
+  void immediate_arithmetic_op_8(byte subcode,
+                                  const Operand& dst,
+                                  Immediate src);
   // Emit machine code for a shift operation.
   void shift(Register dst, Immediate shift_amount, int subcode);
   // Shift dst by cl % 64 bits.
   void shift(Register dst, int subcode);
 
-  void emit_farith(int b1, int b2, int i);
+  // void emit_farith(int b1, int b2, int i);
 
   // labels
-  void print(Label* L);
+  // void print(Label* L);
   void bind_to(Label* L, int pos);
   void link_to(Label* L, Label* appendix);
 
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 3f1cd9f..eb9c43f 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -27,19 +27,138 @@
 
 #include "v8.h"
 #include "codegen-inl.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
+
 void Builtins::Generate_Adaptor(MacroAssembler* masm,
                                 Builtins::CFunctionId id) {
   masm->int3();  // UNIMPLEMENTED.
 }
 
-void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ push(rbp);
+  __ movq(rbp, rsp);
+
+  // Store the arguments adaptor context sentinel.
+  __ push(Immediate(ArgumentsAdaptorFrame::SENTINEL));
+
+  // Push the function on the stack.
+  __ push(rdi);
+
+  // Preserve the number of arguments on the stack. Must preserve both
+  // eax and ebx because these registers are used when copying the
+  // arguments and the receiver.
+  ASSERT(kSmiTagSize == 1);
+  __ lea(rcx, Operand(rax, rax, kTimes1, kSmiTag));
+  __ push(rcx);
 }
 
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // Retrieve the number of arguments from the stack. Number is a Smi.
+  __ movq(rbx, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  // Leave the frame.
+  __ movq(rsp, rbp);
+  __ pop(rbp);
+
+  // Remove caller arguments from the stack.
+  // rbx holds a Smi, so we convery to dword offset by multiplying by 4.
+  ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
+  ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
+  __ pop(rcx);
+  __ lea(rsp, Operand(rsp, rbx, kTimes4, 1 * kPointerSize));  // 1 ~ receiver
+  __ push(rcx);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : actual number of arguments
+  //  -- rbx : expected number of arguments
+  //  -- rdx : code entry to call
+  // -----------------------------------
+
+  Label invoke, dont_adapt_arguments;
+  __ IncrementCounter(&Counters::arguments_adaptors, 1);
+
+  Label enough, too_few;
+  __ cmpq(rax, rbx);
+  __ j(less, &too_few);
+  __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  __ j(equal, &dont_adapt_arguments);
+
+  {  // Enough parameters: Actual >= expected.
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Copy receiver and all expected arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(rax, Operand(rbp, rax, kTimesPointerSize, offset));
+    __ movq(rcx, Immediate(-1));  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ incq(rcx);
+    __ push(Operand(rax, 0));
+    __ subq(rax, Immediate(kPointerSize));
+    __ cmpq(rcx, rbx);
+    __ j(less, &copy);
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Copy receiver and all actual arguments.
+    const int offset = StandardFrameConstants::kCallerSPOffset;
+    __ lea(rdi, Operand(rbp, rax, kTimesPointerSize, offset));
+    __ movq(rcx, Immediate(-1));  // account for receiver
+
+    Label copy;
+    __ bind(&copy);
+    __ incq(rcx);
+    __ push(Operand(rdi, 0));
+    __ subq(rdi, Immediate(kPointerSize));
+    __ cmpq(rcx, rax);
+    __ j(less, &copy);
+
+    // Fill remaining expected arguments with undefined values.
+    Label fill;
+    __ movq(kScratchRegister,
+            Factory::undefined_value(),
+            RelocInfo::EMBEDDED_OBJECT);
+    __ bind(&fill);
+    __ incq(rcx);
+    __ push(kScratchRegister);
+    __ cmpq(rcx, rbx);
+    __ j(less, &fill);
+
+    // Restore function pointer.
+    __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ call(rdx);
+
+  // Leave frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ ret(0);
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ jmp(rdx);
+}
+
+
 void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
   masm->int3();  // UNIMPLEMENTED.
 }
@@ -52,14 +171,125 @@
   masm->int3();  // UNIMPLEMENTED.
 }
 
-void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Expects five C++ function parameters.
+  // - Address entry (ignored)
+  // - JSFunction* function (
+  // - Object* receiver
+  // - int argc
+  // - Object*** argv
+  // (see Handle::Invoke in execution.cc).
+
+  // Platform specific argument handling. After this, the stack contains
+  // an internal frame and the pushed function and receiver, and
+  // register rax and rbx holds the argument count and argument array,
+  // while rdi holds the function pointer and rsi the context.
+#ifdef __MSVC__
+  // MSVC parameters in:
+  // rcx : entry (ignored)
+  // rdx : function
+  // r8 : receiver
+  // r9 : argc
+  // [rsp+0x20] : argv
+
+  // Clear the context before we push it when entering the JS frame.
+  __ xor_(rsi, rsi);
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Load the function context into rsi.
+  __ movq(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+
+  // Push the function and the receiver onto the stack.
+  __ push(rdx);
+  __ push(r8);
+
+  // Load the number of arguments and setup pointer to the arguments.
+  __ movq(rax, r9);
+  // Load the previous frame pointer to access C argument on stack
+  __ movq(kScratchRegister, Operand(rbp, 0));
+  __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
+  // Load the function pointer into rdi.
+  __ movq(rdi, rdx);
+#else  // !defined(__MSVC__)
+  // GCC parameters in:
+  // rdi : entry (ignored)
+  // rsi : function
+  // rdx : receiver
+  // rcx : argc
+  // r8  : argv
+
+  __ movq(rdi, rsi);
+  // rdi : function
+
+  // Clear the context before we push it when entering the JS frame.
+  __ xor_(rsi, rsi);
+  // Enter an internal frame.
+  __ EnterInternalFrame();
+
+  // Push the function and receiver and setup the context.
+  __ push(rdi);
+  __ push(rdx);
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Load the number of arguments and setup pointer to the arguments.
+  __ movq(rax, rcx);
+  __ movq(rbx, r8);
+#endif  // __MSVC__
+  // Current stack contents:
+  // [rsp + 2 * kPointerSize ... ]: Internal frame
+  // [rsp + kPointerSize]         : function
+  // [rsp]                        : receiver
+  // Current register contents:
+  // rax : argc
+  // rbx : argv
+  // rsi : context
+  // rdi : function
+
+  // Copy arguments to the stack in a loop.
+  // Register rbx points to array of pointers to handle locations.
+  // Push the values of these handles.
+  Label loop, entry;
+  __ xor_(rcx, rcx);  // Set loop variable to 0.
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ movq(kScratchRegister, Operand(rbx, rcx, kTimesPointerSize, 0));
+  __ push(Operand(kScratchRegister, 0));  // dereference handle
+  __ addq(rcx, Immediate(1));
+  __ bind(&entry);
+  __ cmpq(rcx, rax);
+  __ j(not_equal, &loop);
+
+  // Invoke the code.
+  if (is_construct) {
+    // Expects rdi to hold function pointer.
+    __ movq(kScratchRegister,
+            Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+            RelocInfo::CODE_TARGET);
+    __ call(kScratchRegister);
+  } else {
+    ParameterCount actual(rax);
+    // Function must be in rdi.
+    __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+  }
+
+  // Exit the JS frame. Notice that this also removes the empty
+  // context and the function left on the stack by the code
+  // invocation.
+  __ LeaveInternalFrame();
+  // TODO(X64): Is argument correct? Is there a receiver to remove?
+  __ ret(1 * kPointerSize);  // remove receiver
 }
 
+
 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
 }
 
 } }  // namespace v8::internal
-
-
diff --git a/src/x64/codegen-x64-inl.h b/src/x64/codegen-x64-inl.h
index 0d5b0e2..733378d 100644
--- a/src/x64/codegen-x64-inl.h
+++ b/src/x64/codegen-x64-inl.h
@@ -37,6 +37,17 @@
 void DeferredCode::Jump() { UNIMPLEMENTED(); }
 void DeferredCode::Branch(Condition cc) { UNIMPLEMENTED(); }
 
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(SIN, args);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  GenerateFastMathOp(COS, args);
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_X64_CODEGEN_X64_INL_H_
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index ca58e09..dc32227 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -30,6 +30,8 @@
 #include "macro-assembler.h"
 #include "register-allocator-inl.h"
 #include "codegen.h"
+// TEST
+#include "compiler.h"
 
 namespace v8 {
 namespace internal {
@@ -41,6 +43,37 @@
 
 void DeferredCode::RestoreRegisters() { UNIMPLEMENTED(); }
 
+// -------------------------------------------------------------------------
+// CodeGenState implementation.
+
+CodeGenState::CodeGenState(CodeGenerator* owner)
+    : owner_(owner),
+      typeof_state_(NOT_INSIDE_TYPEOF),
+      destination_(NULL),
+      previous_(NULL) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::CodeGenState(CodeGenerator* owner,
+                           TypeofState typeof_state,
+                           ControlDestination* destination)
+    : owner_(owner),
+      typeof_state_(typeof_state),
+      destination_(destination),
+      previous_(owner->state()) {
+  owner_->set_state(this);
+}
+
+
+CodeGenState::~CodeGenState() {
+  ASSERT(owner_->state() == this);
+  owner_->set_state(previous_);
+}
+
+
+// -----------------------------------------------------------------------------
+// CodeGenerator implementation.
 
 CodeGenerator::CodeGenerator(int buffer_size,
                              Handle<Script> script,
@@ -58,17 +91,127 @@
       in_spilled_code_(false) {
 }
 
-#define __ masm->
+#define __ ACCESS_MASM(masm_)
 
 
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> a) {
   UNIMPLEMENTED();
 }
 
-void CodeGenerator::GenCode(FunctionLiteral* a) {
-  masm_->int3();  // UNIMPLEMENTED
+void CodeGenerator::TestCodeGenerator() {
+  // Compile a function from a string, and run it.
+  Handle<JSFunction> test_function = Compiler::Compile(
+      Factory::NewStringFromAscii(CStrVector("42")),
+      Factory::NewStringFromAscii(CStrVector("CodeGeneratorTestScript")),
+      0,
+      0,
+      NULL,
+      NULL);
+
+  Code* code_object = test_function->code();  // Local for debugging ease.
+  USE(code_object);
+
+  // Create a dummy function and context.
+  Handle<JSFunction> bridge =
+      Factory::NewFunction(Factory::empty_symbol(), Factory::undefined_value());
+  Handle<Context> context =
+    Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
+
+  test_function = Factory::NewFunctionFromBoilerplate(
+      test_function,
+      context);
+
+  bool pending_exceptions;
+  Handle<Object> result =
+      Execution::Call(test_function,
+                      Handle<Object>::cast(test_function),
+                      0,
+                      NULL,
+                      &pending_exceptions);
+  CHECK(result->IsSmi());
+  CHECK_EQ(42, Smi::cast(*result)->value());
 }
 
+
+void CodeGenerator::GenCode(FunctionLiteral* function) {
+  // Record the position for debugging purposes.
+  CodeForFunctionPosition(function);
+  // ZoneList<Statement*>* body = fun->body();
+
+  // Initialize state.
+  ASSERT(scope_ == NULL);
+  scope_ = function->scope();
+  ASSERT(allocator_ == NULL);
+  RegisterAllocator register_allocator(this);
+  allocator_ = &register_allocator;
+  ASSERT(frame_ == NULL);
+  frame_ = new VirtualFrame();
+  set_in_spilled_code(false);
+
+  // Adjust for function-level loop nesting.
+  loop_nesting_ += function->loop_nesting();
+
+  JumpTarget::set_compiling_deferred_code(false);
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      //    fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+      false) {
+    frame_->SpillAll();
+    __ int3();
+  }
+#endif
+
+  // New scope to get automatic timing calculation.
+  {  // NOLINT
+    HistogramTimerScope codegen_timer(&Counters::code_generation);
+    CodeGenState state(this);
+
+    // Entry:
+    // Stack: receiver, arguments, return address.
+    // ebp: caller's frame pointer
+    // esp: stack pointer
+    // edi: called JS function
+    // esi: callee's context
+    allocator_->Initialize();
+    frame_->Enter();
+
+    Result return_register = allocator_->Allocate(rax);
+
+    __ movq(return_register.reg(), Immediate(0x54));  // Smi 42
+
+    GenerateReturnSequence(&return_register);
+  }
+}
+
+void CodeGenerator::GenerateReturnSequence(Result* return_value) {
+  // The return value is a live (but not currently reference counted)
+  // reference to rax.  This is safe because the current frame does not
+  // contain a reference to rax (it is prepared for the return by spilling
+  // all registers).
+  if (FLAG_trace) {
+    frame_->Push(return_value);
+    // *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
+  }
+  return_value->ToRegister(rax);
+
+  // Add a label for checking the size of the code used for returning.
+  Label check_exit_codesize;
+  masm_->bind(&check_exit_codesize);
+
+  // Leave the frame and return popping the arguments and the
+  // receiver.
+  frame_->Exit();
+  masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+  DeleteFrame();
+
+  // Check that the size of the code used for returning matches what is
+  // expected by the debugger.
+  // ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
+  //          masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+}
+
+
 void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a,
                                                     int b,
                                                     int c,
@@ -235,9 +378,316 @@
   UNIMPLEMENTED();
 }
 
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* a) {
+  UNIMPLEMENTED();
+}
+
+#undef __
+// End of CodeGenerator implementation.
+
+// -----------------------------------------------------------------------------
+// Implementation of stubs.
+
+//  Stub classes have public member named masm, not masm_.
+#define __ ACCESS_MASM(masm)
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+  // Check that stack should contain frame pointer, code pointer, state and
+  // return address in that order.
+  ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+            StackHandlerConstants::kStateOffset);
+  ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+            StackHandlerConstants::kPCOffset);
+
+  ExternalReference handler_address(Top::k_handler_address);
+  __ movq(kScratchRegister, handler_address);
+  __ movq(rdx, Operand(kScratchRegister, 0));
+  // get next in chain
+  __ movq(rcx, Operand(rdx, 0));
+  __ movq(Operand(kScratchRegister, 0), rcx);
+  __ movq(rsp, rdx);
+  __ pop(rbp);  // pop frame pointer
+  __ pop(rdx);  // remove code pointer
+  __ pop(rdx);  // remove state
+
+  // Before returning we restore the context from the frame pointer if not NULL.
+  // The frame pointer is NULL in the exception handler of a JS entry frame.
+  __ xor_(rsi, rsi);  // tentatively set context pointer to NULL
+  Label skip;
+  __ cmpq(rbp, Immediate(0));
+  __ j(equal, &skip);
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ bind(&skip);
+
+  __ ret(0);
+}
+
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+                              Label* throw_normal_exception,
+                              Label* throw_out_of_memory_exception,
+                              StackFrame::Type frame_type,
+                              bool do_gc,
+                              bool always_allocate_scope) {
+  // rax: result parameter for PerformGC, if any.
+  // rbx: pointer to C function  (C callee-saved).
+  // rbp: frame pointer  (restored after C call).
+  // rsp: stack pointer  (restored after C call).
+  // rdi: number of arguments including receiver.
+  // r15: pointer to the first argument (C callee-saved).
+  //      This pointer is reused in LeaveExitFrame(), so it is stored in a
+  //      callee-saved register.
+
+  if (do_gc) {
+    __ movq(Operand(rsp, 0), rax);  // Result.
+    __ movq(kScratchRegister,
+            FUNCTION_ADDR(Runtime::PerformGC),
+            RelocInfo::RUNTIME_ENTRY);
+    __ call(kScratchRegister);
+  }
+
+  ExternalReference scope_depth =
+      ExternalReference::heap_always_allocate_scope_depth();
+  if (always_allocate_scope) {
+    __ movq(kScratchRegister, scope_depth);
+    __ incl(Operand(kScratchRegister, 0));
+  }
+
+  // Call C function.
+#ifdef __MSVC__
+  // MSVC passes arguments in rcx, rdx, r8, r9
+  __ movq(rcx, rdi);  // argc.
+  __ movq(rdx, r15);  // argv.
+#else  // ! defined(__MSVC__)
+  // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
+  // First argument is already in rdi.
+  __ movq(rsi, r15);  // argv.
+#endif
+  __ call(rbx);
+  // Result is in rax - do not destroy this register!
+
+  if (always_allocate_scope) {
+    __ movq(kScratchRegister, scope_depth);
+    __ decl(Operand(kScratchRegister, 0));
+  }
+
+  // Check for failure result.
+  Label failure_returned;
+  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  __ lea(rcx, Operand(rax, 1));
+  // Lower 2 bits of rcx are 0 iff rax has failure tag.
+  __ testl(rcx, Immediate(kFailureTagMask));
+  __ j(zero, &failure_returned);
+
+  // Exit the JavaScript to C++ exit frame.
+  __ LeaveExitFrame(frame_type);
+  __ ret(0);
+
+  // Handling of failure.
+  __ bind(&failure_returned);
+
+  Label retry;
+  // If the returned exception is RETRY_AFTER_GC continue at retry label
+  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  __ testq(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+  __ j(zero, &retry);
+
+  Label continue_exception;
+  // If the returned failure is EXCEPTION then promote Top::pending_exception().
+  __ movq(kScratchRegister, Failure::Exception(), RelocInfo::NONE);
+  __ cmpq(rax, kScratchRegister);
+  __ j(not_equal, &continue_exception);
+
+  // Retrieve the pending exception and clear the variable.
+  ExternalReference pending_exception_address(Top::k_pending_exception_address);
+  __ movq(kScratchRegister, pending_exception_address);
+  __ movq(rax, Operand(kScratchRegister, 0));
+  __ movq(rdx, ExternalReference::the_hole_value_location());
+  __ movq(rdx, Operand(rdx, 0));
+  __ movq(Operand(kScratchRegister, 0), rdx);
+
+  __ bind(&continue_exception);
+  // Special handling of out of memory exception.
+  __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
+  __ cmpq(rax, kScratchRegister);
+  __ j(equal, throw_out_of_memory_exception);
+
+  // Handle normal exception.
+  __ jmp(throw_normal_exception);
+
+  // Retry.
+  __ bind(&retry);
+}
+
+
+void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
+  // Fetch top stack handler.
+  ExternalReference handler_address(Top::k_handler_address);
+  __ movq(kScratchRegister, handler_address);
+  __ movq(rdx, Operand(kScratchRegister, 0));
+
+  // Unwind the handlers until the ENTRY handler is found.
+  Label loop, done;
+  __ bind(&loop);
+  // Load the type of the current stack handler.
+  __ cmpq(Operand(rdx, StackHandlerConstants::kStateOffset),
+         Immediate(StackHandler::ENTRY));
+  __ j(equal, &done);
+  // Fetch the next handler in the list.
+  __ movq(rdx, Operand(rdx, StackHandlerConstants::kNextOffset));
+  __ jmp(&loop);
+  __ bind(&done);
+
+  // Set the top handler address to next handler past the current ENTRY handler.
+  __ movq(rax, Operand(rdx, StackHandlerConstants::kNextOffset));
+  __ store_rax(handler_address);
+
+  // Set external caught exception to false.
+  __ movq(rax, Immediate(false));
+  ExternalReference external_caught(Top::k_external_caught_exception_address);
+  __ store_rax(external_caught);
+
+  // Set pending exception and rax to out of memory exception.
+  __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+  ExternalReference pending_exception(Top::k_pending_exception_address);
+  __ store_rax(pending_exception);
+
+  // Restore the stack to the address of the ENTRY handler
+  __ movq(rsp, rdx);
+
+  // Clear the context pointer;
+  __ xor_(rsi, rsi);
+
+  // Restore registers from handler.
+
+  __ pop(rbp);  // FP
+  ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+            StackHandlerConstants::kStateOffset);
+  __ pop(rdx);  // State
+
+  ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+            StackHandlerConstants::kPCOffset);
+  __ ret(0);
+}
+
 
 void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
-  masm->int3();  // TODO(X64): UNIMPLEMENTED.
+  // rax: number of arguments including receiver
+  // rbx: pointer to C function  (C callee-saved)
+  // rbp: frame pointer  (restored after C call)
+  // rsp: stack pointer  (restored after C call)
+  // rsi: current context (C callee-saved)
+  // rdi: caller's parameter pointer pp  (C callee-saved)
+
+  // NOTE: Invocations of builtins may return failure objects
+  // instead of a proper result. The builtin entry handles
+  // this by performing a garbage collection and retrying the
+  // builtin once.
+
+  StackFrame::Type frame_type = is_debug_break ?
+      StackFrame::EXIT_DEBUG :
+      StackFrame::EXIT;
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  __ EnterExitFrame(frame_type);
+
+  // rax: result parameter for PerformGC, if any (setup below).
+  //      Holds the result of a previous call to GenerateCore that
+  //      returned a failure. On next call, it's used as parameter
+  //      to Runtime::PerformGC.
+  // rbx: pointer to builtin function  (C callee-saved).
+  // rbp: frame pointer  (restored after C call).
+  // rsp: stack pointer  (restored after C call).
+  // rdi: number of arguments including receiver (destroyed by C call).
+  //      The rdi register is not callee-save in Unix 64-bit ABI, so
+  //      we must treat it as volatile.
+  // r15: argv pointer (C callee-saved).
+
+  Label throw_out_of_memory_exception;
+  Label throw_normal_exception;
+
+  // Call into the runtime system. Collect garbage before the call if
+  // running with --gc-greedy set.
+  if (FLAG_gc_greedy) {
+    Failure* failure = Failure::RetryAfterGC(0);
+    __ movq(rax, failure, RelocInfo::NONE);
+  }
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               FLAG_gc_greedy,
+               false);
+
+  // Do space-specific GC and retry runtime call.
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               true,
+               false);
+
+  // Do full GC and retry runtime call one final time.
+  Failure* failure = Failure::InternalError();
+  __ movq(rax, failure, RelocInfo::NONE);
+  GenerateCore(masm,
+               &throw_normal_exception,
+               &throw_out_of_memory_exception,
+               frame_type,
+               true,
+               true);
+
+  __ bind(&throw_out_of_memory_exception);
+  GenerateThrowOutOfMemory(masm);
+  // control flow for generated will not return.
+
+  __ bind(&throw_normal_exception);
+  GenerateThrowTOS(masm);
 }
 
 
@@ -281,7 +731,6 @@
   // Invoke: Link this frame into the handler chain.
   __ bind(&invoke);
   __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
-  __ push(rax);  // flush TOS
 
   // Clear any pending exceptions.
   __ load_rax(ExternalReference::the_hole_value_location());
@@ -302,13 +751,14 @@
     ExternalReference entry(Builtins::JSEntryTrampoline);
     __ load_rax(entry);
   }
-  __ call(FieldOperand(rax, Code::kHeaderSize));
+  __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+  __ call(kScratchRegister);
 
   // Unlink this frame from the handler chain.
   __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
   __ pop(Operand(kScratchRegister, 0));
   // Pop next_sp.
-  __ add(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+  __ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
 
   // Restore the top frame descriptor from the stack.
   __ bind(&exit);
@@ -323,7 +773,7 @@
   __ pop(r14);
   __ pop(r13);
   __ pop(r12);
-  __ add(rsp, Immediate(2 * kPointerSize));  // remove markers
+  __ addq(rsp, Immediate(2 * kPointerSize));  // remove markers
 
   // Restore frame pointer and return.
   __ pop(rbp);
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 5f5daa4..19ad8a3 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -286,6 +286,15 @@
                                Handle<Script> script,
                                bool is_eval);
 
+  // During implementation of CodeGenerator, this call creates a
+  // CodeGenerator instance, and calls GenCode on it with a null
+  // function literal.  CodeGenerator will then construct and return
+  // a simple dummy function.  Call this during bootstrapping before
+  // trying to compile any real functions, to get CodeGenerator up
+  // and running.
+  // TODO(X64): Remove once we can get through the bootstrapping process.
+  static void TestCodeGenerator();
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -515,6 +524,14 @@
 
   void GenerateLog(ZoneList<Expression*>* args);
 
+  // Fast support for Math.random().
+  void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
+
+  // Fast support for Math.sin and Math.cos.
+  enum MathOp { SIN, COS };
+  void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
+  inline void GenerateMathSin(ZoneList<Expression*>* args);
+  inline void GenerateMathCos(ZoneList<Expression*>* args);
 
   // Methods and constants for fast case switch statement support.
   //
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index 209aa2d..cb97ff6 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -25,3 +25,48 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+
+#include "frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(unsigned char* a,
+                                                    StackFrame::State* b) {
+  // TODO(X64): UNIMPLEMENTED
+  return NONE;
+}
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+StackFrame::Type StackFrame::ComputeType(StackFrame::State* a) {
+  UNIMPLEMENTED();
+  return NONE;
+}
+
+byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* a) const {
+  UNIMPLEMENTED();
+}
+
+byte* InternalFrame::GetCallerStackPointer() const {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+byte* JavaScriptFrame::GetCallerStackPointer() const {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 3416f51..31d8a2d 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -32,70 +32,74 @@
 namespace internal {
 
 // TODO(x64): This is a stub, mostly just a copy of the ia32 bit version.
-// This will all need to change to be correct for x64.
+// This might all need to change to be correct for x64.
 
 static const int kNumRegs = 8;
-static const RegList kJSCallerSaved = 0;
+static const RegList kJSCallerSaved =
+    1 << 0 |  // rax
+    1 << 1 |  // rcx
+    1 << 2 |  // rdx
+    1 << 3 |  // rbx - used as a caller-saved register in JavaScript code
+    1 << 7;   // rdi - callee function
+
 static const int kNumJSCallerSaved = 5;
+
 typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
 
 class StackHandlerConstants : public AllStatic {
  public:
   static const int kNextOffset  = 0 * kPointerSize;
-  static const int kPPOffset    = 1 * kPointerSize;
-  static const int kFPOffset    = 2 * kPointerSize;
+  static const int kFPOffset    = 1 * kPointerSize;
+  static const int kStateOffset = 2 * kPointerSize;
+  static const int kPCOffset    = 3 * kPointerSize;
 
-  static const int kCodeOffset  = 3 * kPointerSize;
-
-  static const int kStateOffset = 4 * kPointerSize;
-  static const int kPCOffset    = 5 * kPointerSize;
-
-  static const int kAddressDisplacement = -1 * kPointerSize;
-  static const int kSize = 6 * kPointerSize;
+  static const int kSize = 4 * kPointerSize;
 };
 
 
 class EntryFrameConstants : public AllStatic {
  public:
-  static const int kCallerFPOffset      = -1 * kPointerSize;
+  static const int kCallerFPOffset      = 0 * kPointerSize;
 
-  static const int kFunctionArgOffset   = -1 * kPointerSize;
-  static const int kReceiverArgOffset   = -1 * kPointerSize;
-  static const int kArgcOffset          = -1 * kPointerSize;
-  static const int kArgvOffset          = -1 * kPointerSize;
+  static const int kFunctionArgOffset   = 1 * kPointerSize;
+  static const int kReceiverArgOffset   = 2 * kPointerSize;
+  static const int kArgcOffset          = 3 * kPointerSize;
+  static const int kArgvOffset          = 4 * kPointerSize;
 };
 
 
 class ExitFrameConstants : public AllStatic {
  public:
-  static const int kDebugMarkOffset = -1 * kPointerSize;
+  static const int kDebugMarkOffset = -2 * kPointerSize;
   static const int kSPOffset        = -1 * kPointerSize;
 
-  static const int kPPDisplacement = -1 * kPointerSize;
+  static const int kCallerFPOffset  = +0 * kPointerSize;
+  static const int kCallerPCOffset  = +1 * kPointerSize;
 
-  static const int kCallerFPOffset = -1 * kPointerSize;
-  static const int kCallerPCOffset = -1 * kPointerSize;
+  // FP-relative displacement of the caller's SP.  It points just
+  // below the saved PC.
+  static const int kCallerSPDisplacement = +2 * kPointerSize;
 };
 
 
 class StandardFrameConstants : public AllStatic {
  public:
-  static const int kExpressionsOffset = -1 * kPointerSize;
-  static const int kMarkerOffset      = -1 * kPointerSize;
+  static const int kExpressionsOffset = -3 * kPointerSize;
+  static const int kMarkerOffset      = -2 * kPointerSize;
   static const int kContextOffset     = -1 * kPointerSize;
-  static const int kCallerFPOffset    = -1 * kPointerSize;
-  static const int kCallerPCOffset    = -1 * kPointerSize;
-  static const int kCallerSPOffset    = -1 * kPointerSize;
+  static const int kCallerFPOffset    =  0 * kPointerSize;
+  static const int kCallerPCOffset    = +1 * kPointerSize;
+  static const int kCallerSPOffset    = +2 * kPointerSize;
 };
 
 
 class JavaScriptFrameConstants : public AllStatic {
  public:
   static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
-  static const int kSavedRegistersOffset = -1 * kPointerSize;
+  static const int kSavedRegistersOffset = +2 * kPointerSize;
   static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
 
-  static const int kParam0Offset   = -1 * kPointerSize;
+  static const int kParam0Offset   = -2 * kPointerSize;
   static const int kReceiverOffset = -1 * kPointerSize;
 };
 
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 71a3a9a..858f3a2 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -40,6 +40,14 @@
   UNIMPLEMENTED();
 }
 
+void KeyedStoreIC::ClearInlinedVersion(Address address) {
+  UNIMPLEMENTED();
+}
+
+void KeyedStoreIC::RestoreInlinedVersion(Address address) {
+  UNIMPLEMENTED();
+}
+
 void KeyedLoadIC::Generate(MacroAssembler* masm,
                            ExternalReference const& f) {
   masm->int3();  // UNIMPLEMENTED.
@@ -58,6 +66,11 @@
   return false;
 }
 
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+  UNIMPLEMENTED();
+  return false;
+}
+
 Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
   UNIMPLEMENTED();
   return NULL;
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 54c299d..c47bfcc 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -29,7 +29,9 @@
 
 #include "bootstrapper.h"
 #include "codegen-inl.h"
+#include "assembler-x64.h"
 #include "macro-assembler-x64.h"
+#include "debug.h"
 
 namespace v8 {
 namespace internal {
@@ -43,8 +45,152 @@
 }
 
 
-void MacroAssembler::TailCallRuntime(ExternalReference const& a, int b) {
-  UNIMPLEMENTED();
+void MacroAssembler::Assert(Condition cc, const char* msg) {
+  if (FLAG_debug_code) Check(cc, msg);
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg) {
+  Label L;
+  j(cc, &L);
+  Abort(msg);
+  // will not return here
+  bind(&L);
+}
+
+
+void MacroAssembler::ConstructAndTestJSFunction() {
+  const int initial_buffer_size = 4 * KB;
+  char* buffer = new char[initial_buffer_size];
+  MacroAssembler masm(buffer, initial_buffer_size);
+
+  const uint64_t secret = V8_INT64_C(0xdeadbeefcafebabe);
+  Handle<String> constant =
+      Factory::NewStringFromAscii(Vector<const char>("451", 3), TENURED);
+#define __ ACCESS_MASM((&masm))
+  // Construct a simple JSfunction here, using Assembler and MacroAssembler
+  // commands.
+  __ movq(rax, constant, RelocInfo::EMBEDDED_OBJECT);
+  __ push(rax);
+  __ CallRuntime(Runtime::kStringParseFloat, 1);
+  __ movq(kScratchRegister, secret, RelocInfo::NONE);
+  __ addq(rax, kScratchRegister);
+  __ ret(0);
+#undef __
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
+  Object* code = Heap::CreateCode(desc, NULL, flags, Handle<Object>::null());
+  if (!code->IsFailure()) {
+    Handle<Code> code_handle(Code::cast(code));
+    Handle<String> name =
+        Factory::NewStringFromAscii(Vector<const char>("foo", 3), NOT_TENURED);
+    Handle<JSFunction> function =
+        Factory::NewFunction(name,
+                             JS_FUNCTION_TYPE,
+                             JSObject::kHeaderSize,
+                             code_handle,
+                             true);
+    bool pending_exceptions;
+    Handle<Object> result =
+        Execution::Call(function,
+                        Handle<Object>::cast(function),
+                        0,
+                        NULL,
+                        &pending_exceptions);
+    CHECK(result->IsSmi());
+    CHECK(secret + (451 << kSmiTagSize) == reinterpret_cast<uint64_t>(*result));
+  }
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+  // We want to pass the msg string like a smi to avoid GC
+  // problems, however msg is not guaranteed to be aligned
+  // properly. Instead, we pass an aligned pointer that is
+  // a proper v8 smi, but also pass the alignment difference
+  // from the real pointer as a smi.
+  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+  // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
+  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+  if (msg != NULL) {
+    RecordComment("Abort message: ");
+    RecordComment(msg);
+  }
+#endif
+  push(rax);
+  movq(kScratchRegister, p0, RelocInfo::NONE);
+  push(kScratchRegister);
+  movq(kScratchRegister,
+       reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)),
+       RelocInfo::NONE);
+  push(kScratchRegister);
+  CallRuntime(Runtime::kAbort, 2);
+  // will not return here
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub) {
+  ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
+  movq(kScratchRegister, stub->GetCode(), RelocInfo::CODE_TARGET);
+  call(kScratchRegister);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+  ASSERT(argc >= 1 && generating_stub());
+  ret((argc - 1) * kPointerSize);
+}
+
+
+void MacroAssembler::IllegalOperation(int num_arguments) {
+  if (num_arguments > 0) {
+    addq(rsp, Immediate(num_arguments * kPointerSize));
+  }
+  movq(rax, Factory::undefined_value(), RelocInfo::EMBEDDED_OBJECT);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
+  CallRuntime(Runtime::FunctionForId(id), num_arguments);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+  // If the expected number of arguments of the runtime function is
+  // constant, we check that the actual number of arguments match the
+  // expectation.
+  if (f->nargs >= 0 && f->nargs != num_arguments) {
+    IllegalOperation(num_arguments);
+    return;
+  }
+
+  Runtime::FunctionId function_id =
+      static_cast<Runtime::FunctionId>(f->stub_id);
+  RuntimeStub stub(function_id, num_arguments);
+  CallStub(&stub);
+}
+
+
+void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
+                                     int num_arguments) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  movq(rax, Immediate(num_arguments));
+  JumpToBuiltin(ext);
+}
+
+
+void MacroAssembler::JumpToBuiltin(const ExternalReference& ext) {
+  // Set the entry point and jump to the C entry runtime stub.
+  movq(rbx, ext);
+  CEntryStub ces;
+  movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
+  jmp(kScratchRegister);
 }
 
 
@@ -71,18 +217,43 @@
 }
 
 
+void MacroAssembler::Jump(ExternalReference ext) {
+  movq(kScratchRegister, ext);
+  jmp(kScratchRegister);
+}
+
+
+void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
+  movq(kScratchRegister, destination, rmode);
+  jmp(kScratchRegister);
+}
+
+
+void MacroAssembler::Call(ExternalReference ext) {
+  movq(kScratchRegister, ext);
+  call(kScratchRegister);
+}
+
+
+void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
+  movq(kScratchRegister, destination, rmode);
+  call(kScratchRegister);
+}
+
+
 void MacroAssembler::PushTryHandler(CodeLocation try_location,
                                     HandlerType type) {
-  // The pc (return address) is already on TOS.
-  // This code pushes state, code, frame pointer and parameter pointer.
-  // Check that they are expected next on the stack, int that order.
+  // Adjust this code if not the case.
+  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+
+  // The pc (return address) is already on TOS.  This code pushes state,
+  // frame pointer and current handler.  Check that they are expected
+  // next on the stack, in that order.
   ASSERT_EQ(StackHandlerConstants::kStateOffset,
             StackHandlerConstants::kPCOffset - kPointerSize);
-  ASSERT_EQ(StackHandlerConstants::kCodeOffset,
-            StackHandlerConstants::kStateOffset - kPointerSize);
   ASSERT_EQ(StackHandlerConstants::kFPOffset,
-            StackHandlerConstants::kCodeOffset - kPointerSize);
-  ASSERT_EQ(StackHandlerConstants::kPPOffset,
+            StackHandlerConstants::kStateOffset - kPointerSize);
+  ASSERT_EQ(StackHandlerConstants::kNextOffset,
             StackHandlerConstants::kFPOffset - kPointerSize);
 
   if (try_location == IN_JAVASCRIPT) {
@@ -91,26 +262,414 @@
     } else {
       push(Immediate(StackHandler::TRY_FINALLY));
     }
-    push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
     push(rbp);
-    push(rdi);
   } else {
     ASSERT(try_location == IN_JS_ENTRY);
-    // The parameter pointer is meaningless here and ebp does not
-    // point to a JS frame. So we save NULL for both pp and ebp. We
-    // expect the code throwing an exception to check ebp before
-    // dereferencing it to restore the context.
+    // The frame pointer does not point to a JS frame so we save NULL
+    // for rbp. We expect the code throwing an exception to check rbp
+    // before dereferencing it to restore the context.
     push(Immediate(StackHandler::ENTRY));
-    push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
-    push(Immediate(0));  // NULL frame pointer
-    push(Immediate(0));  // NULL parameter pointer
+    push(Immediate(0));  // NULL frame pointer.
   }
+  // Save the current handler.
   movq(kScratchRegister, ExternalReference(Top::k_handler_address));
-  // Cached TOS.
-  movq(rax, Operand(kScratchRegister, 0));
+  push(Operand(kScratchRegister, 0));
   // Link this handler.
   movq(Operand(kScratchRegister, 0), rsp);
 }
 
 
+void MacroAssembler::Ret() {
+  ret(0);
+}
+
+
+void MacroAssembler::CmpObjectType(Register heap_object,
+                                   InstanceType type,
+                                   Register map) {
+  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+  CmpInstanceType(map, type);
+}
+
+
+void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
+  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
+       Immediate(static_cast<int8_t>(type)));
+}
+
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    movq(kScratchRegister, ExternalReference(counter));
+    movl(Operand(kScratchRegister, 0), Immediate(value));
+  }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    movq(kScratchRegister, ExternalReference(counter));
+    Operand operand(kScratchRegister, 0);
+    if (value == 1) {
+      incl(operand);
+    } else {
+      addl(operand, Immediate(value));
+    }
+  }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    movq(kScratchRegister, ExternalReference(counter));
+    Operand operand(kScratchRegister, 0);
+    if (value == 1) {
+      decl(operand);
+    } else {
+      subl(operand, Immediate(value));
+    }
+  }
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+void MacroAssembler::PushRegistersFromMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Push the content of the memory location to the stack.
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      movq(kScratchRegister, reg_addr);
+      push(Operand(kScratchRegister, 0));
+    }
+  }
+}
+
+void MacroAssembler::SaveRegistersToMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of registers to memory location.
+  for (int i = 0; i < kNumJSCallerSaved; i++) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      Register reg = { r };
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      movq(kScratchRegister, reg_addr);
+      movq(Operand(kScratchRegister, 0), reg);
+    }
+  }
+}
+
+
+void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of memory location to registers.
+  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      Register reg = { r };
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      movq(kScratchRegister, reg_addr);
+      movq(reg, Operand(kScratchRegister, 0));
+    }
+  }
+}
+
+
+void MacroAssembler::PopRegistersToMemory(RegList regs) {
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Pop the content from the stack to the memory location.
+  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      movq(kScratchRegister, reg_addr);
+      pop(Operand(kScratchRegister, 0));
+    }
+  }
+}
+
+
+void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
+                                                    Register scratch,
+                                                    RegList regs) {
+  ASSERT(!scratch.is(kScratchRegister));
+  ASSERT(!base.is(kScratchRegister));
+  ASSERT(!base.is(scratch));
+  ASSERT((regs & ~kJSCallerSaved) == 0);
+  // Copy the content of the stack to the memory location and adjust base.
+  for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+    int r = JSCallerSavedCode(i);
+    if ((regs & (1 << r)) != 0) {
+      movq(scratch, Operand(base, 0));
+      ExternalReference reg_addr =
+          ExternalReference(Debug_Address::Register(i));
+      movq(kScratchRegister, reg_addr);
+      movq(Operand(kScratchRegister, 0), scratch);
+      lea(base, Operand(base, kPointerSize));
+    }
+  }
+}
+
+#endif  // ENABLE_DEBUGGER_SUPPORT
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_register,
+                                    Label* done,
+                                    InvokeFlag flag) {
+  bool definitely_matches = false;
+  Label invoke;
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      movq(rax, Immediate(actual.immediate()));
+      if (expected.immediate() ==
+          SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+        // Don't worry about adapting arguments for built-ins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        movq(rbx, Immediate(expected.immediate()));
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      // Expected is in register, actual is immediate. This is the
+      // case when we invoke function values without going through the
+      // IC mechanism.
+      cmpq(expected.reg(), Immediate(actual.immediate()));
+      j(equal, &invoke);
+      ASSERT(expected.reg().is(rbx));
+      movq(rax, Immediate(actual.immediate()));
+    } else if (!expected.reg().is(actual.reg())) {
+      // Both expected and actual are in (different) registers. This
+      // is the case when we invoke functions using call and apply.
+      cmpq(expected.reg(), actual.reg());
+      j(equal, &invoke);
+      ASSERT(actual.reg().is(rax));
+      ASSERT(expected.reg().is(rbx));
+    }
+  }
+
+  if (!definitely_matches) {
+    Handle<Code> adaptor =
+        Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+    if (!code_constant.is_null()) {
+      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    } else if (!code_register.is(rdx)) {
+      movq(rdx, code_register);
+    }
+
+    movq(kScratchRegister, adaptor, RelocInfo::CODE_TARGET);
+    if (flag == CALL_FUNCTION) {
+      call(kScratchRegister);
+      jmp(done);
+    } else {
+      jmp(kScratchRegister);
+    }
+    bind(&invoke);
+  }
+}
+
+
+
+
+void MacroAssembler::InvokeCode(Register code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                InvokeFlag flag) {
+  Label done;
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+  if (flag == CALL_FUNCTION) {
+    call(code);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    jmp(code);
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                RelocInfo::Mode rmode,
+                                InvokeFlag flag) {
+  Label done;
+  Register dummy = rax;
+  InvokePrologue(expected, actual, code, dummy, &done, flag);
+  movq(kScratchRegister, code, rmode);
+  if (flag == CALL_FUNCTION) {
+    call(kScratchRegister);
+  } else {
+    ASSERT(flag == JUMP_FUNCTION);
+    jmp(kScratchRegister);
+  }
+  bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag) {
+  ASSERT(function.is(rdi));
+  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
+  movl(rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+  movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
+  // Advances rdx to the end of the Code object headers, to the start of
+  // the executable code.
+  lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+
+  ParameterCount expected(rbx);
+  InvokeCode(rdx, expected, actual, flag);
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+  push(rbp);
+  movq(rbp, rsp);
+  push(rsi);  // Context.
+  push(Immediate(Smi::FromInt(type)));
+  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+  push(kScratchRegister);
+  if (FLAG_debug_code) {
+    movq(kScratchRegister,
+         Factory::undefined_value(),
+         RelocInfo::EMBEDDED_OBJECT);
+    cmpq(Operand(rsp, 0), kScratchRegister);
+    Check(not_equal, "code object not properly patched");
+  }
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+  if (FLAG_debug_code) {
+    movq(kScratchRegister, Immediate(Smi::FromInt(type)));
+    cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+    Check(equal, "stack frame types must match");
+  }
+  movq(rsp, rbp);
+  pop(rbp);
+}
+
+
+
+void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
+  ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
+
+  // Setup the frame structure on the stack.
+  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
+  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
+  push(rbp);
+  movq(rbp, rsp);
+
+  // Reserve room for entry stack pointer and push the debug marker.
+  ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
+  push(Immediate(0));  // saved entry sp, patched before call
+  push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+
+  // Save the frame pointer and the context in top.
+  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+  ExternalReference context_address(Top::k_context_address);
+  movq(rdi, rax);  // Backup rax before we use it.
+
+  movq(rax, rbp);
+  store_rax(c_entry_fp_address);
+  movq(rax, rsi);
+  store_rax(context_address);
+
+  // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
+  // so it must be retained across the C-call.
+  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+  lea(r15, Operand(rbp, rdi, kTimesPointerSize, offset));
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Save the state of all registers to the stack from the memory
+  // location. This is needed to allow nested break points.
+  if (type == StackFrame::EXIT_DEBUG) {
+    // TODO(1243899): This should be symmetric to
+    // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
+    // correct here, but computed for the other call. Very error
+    // prone! FIX THIS.  Actually there are deeper problems with
+    // register saving than this asymmetry (see the bug report
+    // associated with this issue).
+    PushRegistersFromMemory(kJSCallerSaved);
+  }
+#endif
+
+  // Reserve space for two arguments: argc and argv
+  subq(rsp, Immediate(2 * kPointerSize));
+
+  // Get the required frame alignment for the OS.
+  static const int kFrameAlignment = OS::ActivationFrameAlignment();
+  if (kFrameAlignment > 0) {
+    ASSERT(IsPowerOf2(kFrameAlignment));
+    movq(kScratchRegister, Immediate(-kFrameAlignment));
+    and_(rsp, kScratchRegister);
+  }
+
+  // Patch the saved entry sp.
+  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
+}
+
+
+void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+  // Registers:
+  // r15 : argv
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Restore the memory copy of the registers by digging them out from
+  // the stack. This is needed to allow nested break points.
+  if (type == StackFrame::EXIT_DEBUG) {
+    // It's okay to clobber register ebx below because we don't need
+    // the function pointer after this.
+    const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
+    int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+    lea(rbx, Operand(rbp, kOffset));
+    CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
+  }
+#endif
+
+  // Get the return address from the stack and restore the frame pointer.
+  movq(rcx, Operand(rbp, 1 * kPointerSize));
+  movq(rbp, Operand(rbp, 0 * kPointerSize));
+
+  // Pop the arguments and the receiver from the caller stack.
+  lea(rsp, Operand(r15, 1 * kPointerSize));
+
+  // Restore current context from top and clear it in debug mode.
+  ExternalReference context_address(Top::k_context_address);
+  movq(kScratchRegister, context_address);
+  movq(rsi, Operand(kScratchRegister, 0));
+#ifdef DEBUG
+  movq(Operand(kScratchRegister, 0), Immediate(0));
+#endif
+
+  // Push the return address to get ready to return.
+  push(rcx);
+
+  // Clear the top frame.
+  ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
+  movq(kScratchRegister, c_entry_fp_address);
+  movq(Operand(kScratchRegister, 0), Immediate(0));
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 4af372a..51e998c 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -67,6 +67,16 @@
   MacroAssembler(void* buffer, int size);
 
   // ---------------------------------------------------------------------------
+  // x64 Implementation Support
+
+  // Test the MacroAssembler by constructing and calling a simple JSFunction.
+  // Cannot be done using API because this must be done in the middle of the
+  // bootstrapping process.
+  // TODO(X64): Remove once we can get through the bootstrapping process.
+
+  static void ConstructAndTestJSFunction();
+
+  // ---------------------------------------------------------------------------
   // GC Support
 
   // Set the remembered set bit for [object+offset].
@@ -117,7 +127,7 @@
   // JavaScript invokes
 
   // Invoke the JavaScript function code by either calling or jumping.
-  void InvokeCode(const Operand& code,
+  void InvokeCode(Register code,
                   const ParameterCount& expected,
                   const ParameterCount& actual,
                   InvokeFlag flag);
@@ -141,10 +151,19 @@
   // Store the code object for the given builtin in the target register.
   void GetBuiltinEntry(Register target, Builtins::JavaScript id);
 
+  // ---------------------------------------------------------------------------
+  // Macro instructions
+
   // Expression support
   void Set(Register dst, int64_t x);
   void Set(const Operand& dst, int64_t x);
 
+  // Control Flow
+  void Jump(Address destination, RelocInfo::Mode rmode);
+  void Jump(ExternalReference ext);
+  void Call(Address destination, RelocInfo::Mode rmode);
+  void Call(ExternalReference ext);
+
   // Compare object type for heap object.
   // Incoming register is heap_object and outgoing register is map.
   void CmpObjectType(Register heap_object, InstanceType type, Register map);
@@ -159,9 +178,8 @@
   // ---------------------------------------------------------------------------
   // Exception handling
 
-  // Push a new try handler and link into try handler chain.
-  // The return address must be pushed before calling this helper.
-  // On exit, rax contains TOS (next_sp).
+  // Push a new try handler and link into try handler chain.  The return
+  // address must be pushed before calling this helper.
   void PushTryHandler(CodeLocation try_location, HandlerType type);
 
 
@@ -292,13 +310,13 @@
   bool generating_stub_;
   bool allow_stub_calls_;
   Handle<Object> code_object_;  // This handle will be patched with the code
-                                // code object on installation.
+                                // object on installation.
 
   // Helper functions for generating invokes.
   void InvokePrologue(const ParameterCount& expected,
                       const ParameterCount& actual,
                       Handle<Code> code_constant,
-                      const Operand& code_operand,
+                      Register code_register,
                       Label* done,
                       InvokeFlag flag);
 
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
index f369d7d..926dd64 100644
--- a/src/x64/register-allocator-x64-inl.h
+++ b/src/x64/register-allocator-x64-inl.h
@@ -37,33 +37,50 @@
 // RegisterAllocator implementation.
 
 bool RegisterAllocator::IsReserved(Register reg) {
-  // All registers are reserved for now.
-  return true;
+  return reg.is(rsp) || reg.is(rbp) || reg.is(rsi) ||
+      reg.is(kScratchRegister);
 }
 
 
 // The register allocator uses small integers to represent the
 // non-reserved assembler registers.
-
 int RegisterAllocator::ToNumber(Register reg) {
   ASSERT(reg.is_valid() && !IsReserved(reg));
-  UNIMPLEMENTED();
-  return -1;
+  static const int numbers[] = {
+    0,   // rax
+    2,   // rcx
+    3,   // rdx
+    1,   // rbx
+    -1,  // rsp
+    -1,  // rbp
+    -1,  // rsi
+    4,   // rdi
+    5,   // r8
+    6,   // r9
+    -1,  // r10
+    7,   // r11
+    11,  // r12
+    10,   // r13
+    8,   // r14
+    9   // r15
+  };
+  return numbers[reg.code()];
 }
 
 
 Register RegisterAllocator::ToRegister(int num) {
   ASSERT(num >= 0 && num < kNumRegisters);
-  UNIMPLEMENTED();
-  return no_reg;
+  static Register registers[] =
+      { rax, rbx, rcx, rdx, rdi, r8, r9, r11, r14, r15, r13, r12 };
+  return registers[num];
 }
 
 
 void RegisterAllocator::Initialize() {
-  UNIMPLEMENTED();
+  Reset();
+  // The non-reserved rdi register is live on JS function entry.
+  Use(rdi);  // JS function.
 }
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/src/x64/register-allocator-x64.cc b/src/x64/register-allocator-x64.cc
index 209aa2d..3aba60f 100644
--- a/src/x64/register-allocator-x64.cc
+++ b/src/x64/register-allocator-x64.cc
@@ -25,3 +25,66 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+  ASSERT(is_valid());
+  if (is_constant()) {
+    // TODO(X64): Handle constant results.
+    /*
+    Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate();
+    ASSERT(fresh.is_valid());
+    if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
+      CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
+    } else {
+      CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
+                                                 Immediate(handle()));
+    }
+    // This result becomes a copy of the fresh one.
+    *this = fresh;
+    */
+  }
+  ASSERT(is_register());
+}
+
+
+void Result::ToRegister(Register target) {
+  ASSERT(is_valid());
+  if (!is_register() || !reg().is(target)) {
+    Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(target);
+    ASSERT(fresh.is_valid());
+    if (is_register()) {
+      CodeGeneratorScope::Current()->masm()->movq(fresh.reg(), reg());
+    } else {
+      ASSERT(is_constant());
+      /*
+      TODO(X64): Handle constant results.
+      if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) {
+        CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle());
+      } else {
+        CodeGeneratorScope::Current()->masm()->Set(fresh.reg(),
+                                                   Immediate(handle()));
+      }
+      */
+    }
+    *this = fresh;
+  } else if (is_register() && reg().is(target)) {
+    ASSERT(CodeGeneratorScope::Current()->has_valid_frame());
+    CodeGeneratorScope::Current()->frame()->Spill(target);
+    ASSERT(CodeGeneratorScope::Current()->allocator()->count(target) == 1);
+  }
+  ASSERT(is_register());
+  ASSERT(reg().is(target));
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/register-allocator-x64.h b/src/x64/register-allocator-x64.h
index bc08112..8672796 100644
--- a/src/x64/register-allocator-x64.h
+++ b/src/x64/register-allocator-x64.h
@@ -35,7 +35,7 @@
  public:
   // Register allocation is not yet implemented on x64, but C++
   // forbids 0-length arrays so we use 1 as the number of registers.
-  static const int kNumRegisters = 1;
+  static const int kNumRegisters = 12;
   static const int kInvalidRegister = -1;
 };
 
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index 8160e53..6b4d718 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -31,6 +31,7 @@
 
 // Since there is no simulator for the ia32 architecture the only thing we can
 // do is to call the entry directly.
+// TODO(X64): Don't pass p0, since it isn't used?
 #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
   entry(p0, p1, p2, p3, p4);
 
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 209aa2d..e6975fa 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -25,3 +25,172 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+// On entry to a function, the virtual frame already contains the receiver,
+// the parameters, and a return address.  All frame elements are in memory.
+VirtualFrame::VirtualFrame()
+    : elements_(parameter_count() + local_count() + kPreallocatedElements),
+      stack_pointer_(parameter_count() + 1) {  // 0-based index of TOS.
+  for (int i = 0; i <= stack_pointer_; i++) {
+    elements_.Add(FrameElement::MemoryElement());
+  }
+  for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+    register_locations_[i] = kIllegalIndex;
+  }
+}
+
+
+void VirtualFrame::Enter() {
+  // Registers live on entry to a JS frame:
+  //   rsp: stack pointer, points to return address from this function.
+  //   rbp: base pointer, points to previous JS, ArgumentsAdaptor, or
+  //        Trampoline frame.
+  //   rsi: context of this function call.
+  //   rdi: pointer to this function object.
+  Comment cmnt(masm(), "[ Enter JS frame");
+
+#ifdef DEBUG
+  // Verify that rdi contains a JS function.  The following code
+  // relies on rax being available for use.
+  __ testq(rdi, Immediate(kSmiTagMask));
+  __ Check(not_zero,
+           "VirtualFrame::Enter - rdi is not a function (smi check).");
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
+  __ Check(equal,
+           "VirtualFrame::Enter - rdi is not a function (map check).");
+#endif
+
+  EmitPush(rbp);
+
+  __ movq(rbp, rsp);
+
+  // Store the context in the frame.  The context is kept in rsi and a
+  // copy is stored in the frame.  The external reference to rsi
+  // remains.
+  EmitPush(rsi);
+
+  // Store the function in the frame.  The frame owns the register
+  // reference now (ie, it can keep it in rdi or spill it later).
+  Push(rdi);
+  // SyncElementAt(element_count() - 1);
+  cgen()->allocator()->Unuse(rdi);
+}
+
+
+void VirtualFrame::Exit() {
+  Comment cmnt(masm(), "[ Exit JS frame");
+  // Record the location of the JS exit code for patching when setting
+  // break point.
+  __ RecordJSReturn();
+
+  // Avoid using the leave instruction here, because it is too
+  // short. We need the return sequence to be a least the size of a
+  // call instruction to support patching the exit code in the
+  // debugger. See VisitReturnStatement for the full return sequence.
+  // TODO(X64): A patched call will be very long now.  Make sure we
+  // have enough room.
+  __ movq(rsp, rbp);
+  stack_pointer_ = frame_pointer();
+  for (int i = element_count() - 1; i > stack_pointer_; i--) {
+    FrameElement last = elements_.RemoveLast();
+    if (last.is_register()) {
+      Unuse(last.reg());
+    }
+  }
+
+  EmitPop(rbp);
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPop(const Operand& operand) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  stack_pointer_--;
+  elements_.RemoveLast();
+  __ pop(operand);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(reg);
+}
+
+
+void VirtualFrame::EmitPush(const Operand& operand) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(operand);
+}
+
+
+void VirtualFrame::EmitPush(Immediate immediate) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ push(immediate);
+}
+
+
+void VirtualFrame::Drop(int a) {
+  UNIMPLEMENTED();
+}
+
+int VirtualFrame::InvalidateFrameSlotAt(int a) {
+  UNIMPLEMENTED();
+  return -1;
+}
+
+void VirtualFrame::MergeTo(VirtualFrame* a) {
+  UNIMPLEMENTED();
+}
+
+Result VirtualFrame::Pop() {
+  UNIMPLEMENTED();
+  return Result(NULL);
+}
+
+Result VirtualFrame::RawCallStub(CodeStub* a) {
+  UNIMPLEMENTED();
+  return Result(NULL);
+}
+
+void VirtualFrame::SyncElementBelowStackPointer(int a) {
+  UNIMPLEMENTED();
+}
+
+void VirtualFrame::SyncElementByPushing(int a) {
+  UNIMPLEMENTED();
+}
+
+void VirtualFrame::SyncRange(int a, int b) {
+  UNIMPLEMENTED();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index d341a1e..2d3bf30 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -372,12 +372,12 @@
   // Pop and save an element from the top of the expression stack and
   // emit a corresponding pop instruction.
   void EmitPop(Register reg);
-  void EmitPop(Operand operand);
+  void EmitPop(const Operand& operand);
 
   // Push an element on top of the expression stack and emit a
   // corresponding push instruction.
   void EmitPush(Register reg);
-  void EmitPush(Operand operand);
+  void EmitPush(const Operand& operand);
   void EmitPush(Immediate immediate);
 
   // Push an element on the virtual frame.
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 48157d8..88fe00e 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -551,6 +551,7 @@
     CHECK(isymbol->IsSymbol());
   }
   i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage();
 }
 
 
@@ -568,6 +569,7 @@
     CHECK(isymbol->IsSymbol());
   }
   i::Heap::CollectAllGarbage();
+  i::Heap::CollectAllGarbage();
 }
 
 
@@ -2281,7 +2283,7 @@
 }
 
 
-THREADED_TEST(NamedInterceporPropertyRead) {
+THREADED_TEST(NamedInterceptorPropertyRead) {
   v8::HandleScope scope;
   Local<ObjectTemplate> templ = ObjectTemplate::New();
   templ->SetNamedPropertyHandler(XPropertyGetter);
@@ -2294,6 +2296,58 @@
   }
 }
 
+
+static v8::Handle<Value> IndexedPropertyGetter(uint32_t index,
+                                               const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  if (index == 37) {
+    return v8::Handle<Value>(v8_num(625));
+  }
+  return v8::Handle<Value>();
+}
+
+
+static v8::Handle<Value> IndexedPropertySetter(uint32_t index,
+                                               Local<Value> value,
+                                               const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  if (index == 39) {
+    return value;
+  }
+  return v8::Handle<Value>();
+}
+
+
+THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetIndexedPropertyHandler(IndexedPropertyGetter,
+                                   IndexedPropertySetter);
+  LocalContext context;
+  context->Global()->Set(v8_str("obj"), templ->NewInstance());
+  Local<Script> getter_script = Script::Compile(v8_str(
+      "obj.__defineGetter__(\"3\", function(){return 5;});obj[3];"));
+  Local<Script> setter_script = Script::Compile(v8_str(
+      "obj.__defineSetter__(\"17\", function(val){this.foo = val;});"
+      "obj[17] = 23;"
+      "obj.foo;"));
+  Local<Script> interceptor_setter_script = Script::Compile(v8_str(
+      "obj.__defineSetter__(\"39\", function(val){this.foo = \"hit\";});"
+      "obj[39] = 47;"
+      "obj.foo;"));  // This setter should not run, due to the interceptor.
+  Local<Script> interceptor_getter_script = Script::Compile(v8_str(
+      "obj[37];"));
+  Local<Value> result = getter_script->Run();
+  CHECK_EQ(v8_num(5), result);
+  result = setter_script->Run();
+  CHECK_EQ(v8_num(23), result);
+  result = interceptor_setter_script->Run();
+  CHECK_EQ(v8_num(23), result);
+  result = interceptor_getter_script->Run();
+  CHECK_EQ(v8_num(625), result);
+}
+
+
 THREADED_TEST(MultiContexts) {
   v8::HandleScope scope;
   v8::Handle<ObjectTemplate> templ = ObjectTemplate::New();
@@ -5008,6 +5062,22 @@
 }
 
 
+THREADED_TEST(InterceptorStoreICWithNoSetter) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  v8::Handle<Value> value = CompileRun(
+    "for (var i = 0; i < 1000; i++) {"
+    "  o.y = 239;"
+    "}"
+    "42 + o.y");
+  CHECK_EQ(239 + 42, value->Int32Value());
+}
+
+
+
 
 v8::Handle<Value> call_ic_function;
 v8::Handle<Value> call_ic_function2;
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index 43ba4e9..9e95f32 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -132,7 +132,7 @@
 
   // Assemble a simple function that copies argument 2 and returns it.
   __ movq(rax, rsi);
-  __ add(rax, rdi);
+  __ addq(rax, rdi);
   __ ret(0);
 
   CodeDesc desc;
@@ -215,12 +215,12 @@
   Label Loop1_body;
   __ jmp(&Loop1_test);
   __ bind(&Loop1_body);
-  __ add(rax, Immediate(7));
+  __ addq(rax, Immediate(7));
   __ bind(&Loop1_test);
-  __ cmp(rax, Immediate(20));
+  __ cmpq(rax, Immediate(20));
   __ j(less_equal, &Loop1_body);
   // Did the loop terminate with the expected value?
-  __ cmp(rax, Immediate(25));
+  __ cmpq(rax, Immediate(25));
   __ j(not_equal, &Fail);
 
   Label Loop2_test;
@@ -228,12 +228,12 @@
   __ movq(rax, Immediate(0x11FEED00));
   __ jmp(&Loop2_test);
   __ bind(&Loop2_body);
-  __ add(rax, Immediate(-0x1100));
+  __ addq(rax, Immediate(-0x1100));
   __ bind(&Loop2_test);
-  __ cmp(rax, Immediate(0x11FE8000));
+  __ cmpq(rax, Immediate(0x11FE8000));
   __ j(greater, &Loop2_body);
   // Did the loop terminate with the expected value?
-  __ cmp(rax, Immediate(0x11FE7600));
+  __ cmpq(rax, Immediate(0x11FE7600));
   __ j(not_equal, &Fail);
 
   __ movq(rax, Immediate(1));
@@ -248,4 +248,5 @@
   int result =  FUNCTION_CAST<F0>(buffer)();
   CHECK_EQ(1, result);
 }
+
 #undef __
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 92f48e1..7669b43 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -2237,6 +2237,52 @@
 }
 
 
+// Test of the stepping mechanism for keyed store in a loop.
+TEST(DebugStepKeyedStoreLoop) {
+  v8::HandleScope scope;
+  DebugLocalContext env;
+
+  // Create a function for testing stepping of keyed store. The statement 'y=1'
+  // is there to have more than one breakable statement in the loop, TODO(315).
+  v8::Local<v8::Function> foo = CompileFunction(
+      &env,
+      "function foo(a) {\n"
+      "  var len = a.length;\n"
+      "  for (var i = 0; i < len; i++) {\n"
+      "    y = 1;\n"
+      "    a[i] = 42;\n"
+      "  }\n"
+      "}\n",
+      "foo");
+
+  // Create array [0,1,2,3,4,5,6,7,8,9]
+  v8::Local<v8::Array> a = v8::Array::New(10);
+  for (int i = 0; i < 10; i++) {
+    a->Set(v8::Number::New(i), v8::Number::New(i));
+  }
+
+  // Call function without any break points to ensure inlining is in place.
+  const int kArgc = 1;
+  v8::Handle<v8::Value> args[kArgc] = { a };
+  foo->Call(env->Global(), kArgc, args);
+
+  // Register a debug event listener which steps and counts.
+  v8::Debug::SetDebugEventListener(DebugEventStep);
+
+  // Setup break point and step through the function.
+  SetBreakPoint(foo, 3);
+  step_action = StepNext;
+  break_point_hit_count = 0;
+  foo->Call(env->Global(), kArgc, args);
+
+  // With stepping all break locations are hit.
+  CHECK_EQ(22, break_point_hit_count);
+
+  v8::Debug::SetDebugEventListener(NULL);
+  CheckDebuggerUnloaded();
+}
+
+
 // Test the stepping mechanism with different ICs.
 TEST(DebugStepLinearMixedICs) {
   v8::HandleScope scope;
diff --git a/test/cctest/test-log-utils.cc b/test/cctest/test-log-utils.cc
index 64e5900..a08a0a1 100644
--- a/test/cctest/test-log-utils.cc
+++ b/test/cctest/test-log-utils.cc
@@ -9,8 +9,12 @@
 #include "log-utils.h"
 #include "cctest.h"
 
+using v8::internal::CStrVector;
 using v8::internal::EmbeddedVector;
 using v8::internal::LogDynamicBuffer;
+using v8::internal::LogRecordCompressor;
+using v8::internal::MutableCStrVector;
+using v8::internal::ScopedVector;
 using v8::internal::Vector;
 
 // Fills 'ref_buffer' with test data: a sequence of two-digit
@@ -47,9 +51,13 @@
                                      const Vector<V>& value) {
   if (expected.length() != value.length()) {
     V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n"
-             "#   Vectors lengths differ: %d expected, %d found",
+             "#   Vectors lengths differ: %d expected, %d found\n"
+             "#   Expected: %.*s\n"
+             "#   Found: %.*s",
              expected_source, value_source,
-             expected.length(), value.length());
+             expected.length(), value.length(),
+             expected.length(), expected.start(),
+             value.length(), value.start());
   }
   if (strncmp(expected.start(), value.start(), expected.length()) != 0) {
     V8_Fatal(file, line, "CHECK_EQ(%s, %s) failed\n"
@@ -124,9 +132,178 @@
   // Check the seal.
   EmbeddedVector<char, 50> seal_buf;
   CHECK_EQ(seal_size, ReadData(&dynabuf, 100, &seal_buf));
-  CHECK_EQ(v8::internal::CStrVector(seal), seal_buf.SubVector(0, seal_size));
+  CHECK_EQ(CStrVector(seal), seal_buf.SubVector(0, seal_size));
   // Verify that there's no data beyond the seal.
   CHECK_EQ(0, ReadData(&dynabuf, 100 + seal_size, &buf));
 }
 
+
+TEST(CompressorStore) {
+  LogRecordCompressor comp(2);
+  const Vector<const char> empty = CStrVector("");
+  CHECK(comp.Store(empty));
+  CHECK(!comp.Store(empty));
+  CHECK(!comp.Store(empty));
+  const Vector<const char> aaa = CStrVector("aaa");
+  CHECK(comp.Store(aaa));
+  CHECK(!comp.Store(aaa));
+  CHECK(!comp.Store(aaa));
+  CHECK(comp.Store(empty));
+  CHECK(!comp.Store(empty));
+  CHECK(!comp.Store(empty));
+}
+
+
+void CheckCompression(LogRecordCompressor* comp,
+                      const Vector<const char>& after) {
+  EmbeddedVector<char, 100> result;
+  CHECK(comp->RetrievePreviousCompressed(&result));
+  CHECK_EQ(after, result);
+}
+
+
+void CheckCompression(LogRecordCompressor* comp,
+                      const char* after) {
+  CheckCompression(comp, CStrVector(after));
+}
+
+
+TEST(CompressorNonCompressed) {
+  LogRecordCompressor comp(0);
+  CHECK(!comp.RetrievePreviousCompressed(NULL));
+  const Vector<const char> empty = CStrVector("");
+  CHECK(comp.Store(empty));
+  CHECK(!comp.RetrievePreviousCompressed(NULL));
+  const Vector<const char> a_x_20 = CStrVector("aaaaaaaaaaaaaaaaaaaa");
+  CHECK(comp.Store(a_x_20));
+  CheckCompression(&comp, empty);
+  CheckCompression(&comp, empty);
+  CHECK(comp.Store(empty));
+  CheckCompression(&comp, a_x_20);
+  CheckCompression(&comp, a_x_20);
+}
+
+
+TEST(CompressorSingleLine) {
+  LogRecordCompressor comp(1);
+  const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
+  CHECK(comp.Store(string_1));
+  const Vector<const char> string_2 = CStrVector("fff,ddd,ccc,bbb,aaa");
+  CHECK(comp.Store(string_2));
+  // string_1 hasn't been compressed.
+  CheckCompression(&comp, string_1);
+  CheckCompression(&comp, string_1);
+  const Vector<const char> string_3 = CStrVector("hhh,ggg,ccc,bbb,aaa");
+  CHECK(comp.Store(string_3));
+  // string_2 compressed using string_1.
+  CheckCompression(&comp, "fff#1:3");
+  CheckCompression(&comp, "fff#1:3");
+  CHECK(!comp.Store(string_3));
+  // Expecting no changes.
+  CheckCompression(&comp, "fff#1:3");
+  CHECK(!comp.Store(string_3));
+  // Expecting no changes.
+  CheckCompression(&comp, "fff#1:3");
+  const Vector<const char> string_4 = CStrVector("iii,hhh,ggg,ccc,bbb,aaa");
+  CHECK(comp.Store(string_4));
+  // string_3 compressed using string_2.
+  CheckCompression(&comp, "hhh,ggg#1:7");
+  const Vector<const char> string_5 = CStrVector("nnn,mmm,lll,kkk,jjj");
+  CHECK(comp.Store(string_5));
+  // string_4 compressed using string_3.
+  CheckCompression(&comp, "iii,#1");
+  const Vector<const char> string_6 = CStrVector("nnn,mmmmmm,lll,kkk,jjj");
+  CHECK(comp.Store(string_6));
+  // string_5 hasn't been compressed.
+  CheckCompression(&comp, string_5);
+  CHECK(comp.Store(string_5));
+  // string_6 compressed using string_5.
+  CheckCompression(&comp, "nnn,mmm#1:4");
+  const Vector<const char> string_7 = CStrVector("nnnnnn,mmm,lll,kkk,jjj");
+  CHECK(comp.Store(string_7));
+  // string_5 compressed using string_6.
+  CheckCompression(&comp, "nnn,#1:7");
+  const Vector<const char> string_8 = CStrVector("xxn,mmm,lll,kkk,jjj");
+  CHECK(comp.Store(string_8));
+  // string_7 compressed using string_5.
+  CheckCompression(&comp, "nnn#1");
+  const Vector<const char> string_9 =
+      CStrVector("aaaaaaaaaaaaa,bbbbbbbbbbbbbbbbb");
+  CHECK(comp.Store(string_9));
+  // string_8 compressed using string_7.
+  CheckCompression(&comp, "xx#1:5");
+  const Vector<const char> string_10 =
+      CStrVector("aaaaaaaaaaaaa,cccccccbbbbbbbbbb");
+  CHECK(comp.Store(string_10));
+  // string_9 hasn't been compressed.
+  CheckCompression(&comp, string_9);
+  CHECK(comp.Store(string_1));
+  // string_10 compressed using string_9.
+  CheckCompression(&comp, "aaaaaaaaaaaaa,ccccccc#1:21");
+}
+
+
+
+TEST(CompressorMultiLines) {
+  const int kWindowSize = 3;
+  LogRecordCompressor comp(kWindowSize);
+  const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
+  CHECK(comp.Store(string_1));
+  const Vector<const char> string_2 = CStrVector("iii,hhh,ggg,fff,aaa");
+  CHECK(comp.Store(string_2));
+  const Vector<const char> string_3 = CStrVector("mmm,lll,kkk,jjj,aaa");
+  CHECK(comp.Store(string_3));
+  const Vector<const char> string_4 = CStrVector("nnn,hhh,ggg,fff,aaa");
+  CHECK(comp.Store(string_4));
+  const Vector<const char> string_5 = CStrVector("ooo,lll,kkk,jjj,aaa");
+  CHECK(comp.Store(string_5));
+  // string_4 compressed using string_2.
+  CheckCompression(&comp, "nnn#2:3");
+  CHECK(comp.Store(string_1));
+  // string_5 compressed using string_3.
+  CheckCompression(&comp, "ooo#2:3");
+  CHECK(comp.Store(string_4));
+  // string_1 is out of buffer by now, so it shouldn't be compressed.
+  CHECK_GE(3, kWindowSize);
+  CheckCompression(&comp, string_1);
+  CHECK(comp.Store(string_2));
+  // string_4 compressed using itself.
+  CheckCompression(&comp, "#3");
+}
+
+
+TEST(CompressorBestSelection) {
+  LogRecordCompressor comp(3);
+  const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
+  CHECK(comp.Store(string_1));
+  const Vector<const char> string_2 = CStrVector("ddd,ccc,bbb,aaa");
+  CHECK(comp.Store(string_2));
+  const Vector<const char> string_3 = CStrVector("fff,eee,ddd,ccc,bbb,aaa");
+  CHECK(comp.Store(string_3));
+  // string_2 compressed using string_1.
+  CheckCompression(&comp, "#1:4");
+  const Vector<const char> string_4 = CStrVector("nnn,hhh,ggg,fff,aaa");
+  CHECK(comp.Store(string_4));
+  // Compressing string_3 using string_1 gives a better compression than
+  // using string_2.
+  CheckCompression(&comp, "fff,#2");
+}
+
+
+TEST(CompressorCompressibility) {
+  LogRecordCompressor comp(2);
+  const Vector<const char> string_1 = CStrVector("eee,ddd,ccc,bbb,aaa");
+  CHECK(comp.Store(string_1));
+  const Vector<const char> string_2 = CStrVector("ccc,bbb,aaa");
+  CHECK(comp.Store(string_2));
+  const Vector<const char> string_3 = CStrVector("aaa");
+  CHECK(comp.Store(string_3));
+  // string_2 compressed using string_1.
+  CheckCompression(&comp, "#1:8");
+  const Vector<const char> string_4 = CStrVector("xxx");
+  CHECK(comp.Store(string_4));
+  // string_3 can't be compressed using string_2 --- too short.
+  CheckCompression(&comp, string_3);
+}
+
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/mjsunit/array-sort.js b/test/mjsunit/array-sort.js
index ef75dcc..a082abc 100644
--- a/test/mjsunit/array-sort.js
+++ b/test/mjsunit/array-sort.js
@@ -214,6 +214,30 @@
 TestNonArrayLongerLength(Math.pow(2,32) - 1);
 
 
+function TestNonArrayWithAccessors() {
+  // Regression test for issue 346, more info at URL
+  // http://code.google.com/p/v8/issues/detail?id=346
+  // Reported by nth10sd, test based on this report.
+  var x = {};
+  x[0] = 42;
+  x.__defineGetter__("1", function(){return this.foo;});
+  x.__defineSetter__("1", function(val){this.foo = val;});
+  x[1] = 49
+  x[3] = 37;
+  x.length = 4;
+  Array.prototype.sort.call(x);
+  // Behavior of sort with accessors is undefined.  This accessor is
+  // well-behaved (acts like a normal property), so it should work.
+  assertEquals(4, x.length, "sortaccessors length");
+  assertEquals(37, x[0], "sortaccessors first");
+  assertEquals(42, x[1], "sortaccessors second");
+  assertEquals(49, x[2], "sortaccessors third")
+  assertFalse(3 in x, "sortaccessors fourth");
+}
+
+TestNonArrayWithAccessors();
+
+
 function TestInheritedElementSort(depth) {
   var length = depth * 2 + 3;
   var obj = {length: length};
@@ -268,7 +292,7 @@
     assertEquals(i, y[i], name + "value" + i);
   }
   for (var i = 10; i < length; i++) {
-    assertEquals(x.hasOwnProperty(i), y.hasOwnProperty(i), 
+    assertEquals(x.hasOwnProperty(i), y.hasOwnProperty(i),
                  name + "hasundef" + i);
     assertEquals(undefined, y[i], name+"undefined"+i);
     if (x.hasOwnProperty(i)) {
@@ -282,7 +306,7 @@
 TestSparseInheritedElementSort(1000);
 
 function TestSpecialCasesInheritedElementSort() {
-  
+
   var x = {
     1:"d1",
     2:"c1",
@@ -309,11 +333,11 @@
     }
   };
   Array.prototype.sort.call(x);
-  
+
   var name = "SpecialInherit-";
-  
+
   assertEquals(10000, x.length, name + "length");
-  var sorted = ["a2", "a3", "b1", "b2", "c1", "c2", "d1", "d2", "e3", 
+  var sorted = ["a2", "a3", "b1", "b2", "c1", "c2", "d1", "d2", "e3",
                 undefined, undefined, undefined];
   for (var i = 0; i < sorted.length; i++) {
     assertTrue(x.hasOwnProperty(i), name + "has" + i)
@@ -321,7 +345,6 @@
   }
   assertFalse(x.hasOwnProperty(sorted.length), name + "haspost");
   assertFalse(sorted.length in x, name + "haspost2");
-  
   assertTrue(x.hasOwnProperty(10), name + "hasundefined10");
   assertEquals(undefined, x[10], name + "undefined10");
   assertTrue(x.hasOwnProperty(100), name + "hasundefined100");
@@ -332,11 +355,8 @@
   assertEquals(undefined, x[2000], name + "undefined2000");
   assertTrue(x.hasOwnProperty(8000), name + "hasundefined8000");
   assertEquals(undefined, x[8000], name + "undefined8000");
-  
   assertFalse(x.hasOwnProperty(12000), name + "has12000");
   assertEquals("XX", x[12000], name + "XX12000");
-
 }
 
 TestSpecialCasesInheritedElementSort();
-
diff --git a/test/mjsunit/big-object-literal.js b/test/mjsunit/big-object-literal.js
index 0099ce9..8417951 100644
--- a/test/mjsunit/big-object-literal.js
+++ b/test/mjsunit/big-object-literal.js
@@ -84,7 +84,7 @@
 }
 
 // The sizes to test.
-var sizes = [1, 2, 100, 200, 400];
+var sizes = [1, 2, 100, 200, 350];
 
 // Run the test.
 for (var i = 0; i < sizes.length; i++) {
diff --git a/test/mjsunit/debug-scopes.js b/test/mjsunit/debug-scopes.js
new file mode 100644
index 0000000..7b477e1
--- /dev/null
+++ b/test/mjsunit/debug-scopes.js
@@ -0,0 +1,660 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// The functions used for testing backtraces. They are at the top to make the
+// testing of source line/column easier.
+
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var name;
+var listener_delegate;
+var listener_called;
+var exception;
+var begin_test_count = 0;
+var end_test_count = 0;
+var break_count = 0;
+
+
+// Debug event listener which delegates.
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      break_count++;
+      listener_called = true;
+      listener_delegate(exec_state)
+    }
+  } catch (e) {
+    exception = e;
+  }
+}
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+// Initialize for a noew test.
+function BeginTest(name) {
+  test_name = name;
+  listener_delegate = null;
+  listener_called = false;
+  exception = null;
+  begin_test_count++;
+}
+
+
+// Check result of a test.
+function EndTest() {
+  assertTrue(listener_called, "listerner not called for " + test_name);
+  assertNull(exception, test_name)
+  end_test_count++;
+}
+
+
+// Check that the scope chain contains the expected types of scopes.
+function CheckScopeChain(scopes, exec_state) {
+  assertEquals(scopes.length, exec_state.frame().scopeCount());
+  for (var i = 0; i < scopes.length; i++) {
+    var scope = exec_state.frame().scope(i);
+    assertTrue(scope.isScope());
+    assertEquals(scopes[i], scope.scopeType());
+    
+    // Check the global object when hitting the global scope.
+    if (scopes[i] == debug.ScopeType.Global) {
+      assertEquals(this, scope.scopeObject().value());
+    }
+  }
+  
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor();
+  
+  // Send a scopes request and check the result.
+  var json;
+  request_json = '{"seq":0,"type":"request","command":"scopes"}'
+  var response_json = dcp.processDebugJSONRequest(request_json);
+  var response = JSON.parse(response_json);
+  assertEquals(scopes.length, response.body.scopes.length);
+  for (var i = 0; i < scopes.length; i++) {
+    assertEquals(i, response.body.scopes[i].index);
+    assertEquals(scopes[i], response.body.scopes[i].type);
+    if (scopes[i] == debug.ScopeType.Local ||
+        scopes[i] == debug.ScopeType.Closure) {
+      assertTrue(response.body.scopes[i].object.ref < 0);
+    } else {
+      assertTrue(response.body.scopes[i].object.ref >= 0);
+    }
+    var found = false;
+    for (var j = 0; j < response.refs.length && !found; j++) {
+      found = response.refs[j].handle == response.body.scopes[i].object.ref;
+    }
+    assertTrue(found, "Scope object " + response.body.scopes[i].object.ref + " not found");
+  }
+}
+
+
+// Check that the content of the scope is as expected. For functions just check
+// that there is a function.
+function CheckScopeContent(content, number, exec_state) {
+  var scope = exec_state.frame().scope(number)
+  var count = 0;
+  for (var p in content) {
+    var property_mirror = scope.scopeObject().property(p);
+    assertFalse(property_mirror.isUndefined(), 'property ' + p + ' not found in scope');
+    if (typeof(content[p]) === 'function') {
+      assertTrue(property_mirror.value().isFunction());
+    } else {
+      assertEquals(content[p], property_mirror.value().value(), 'property ' + p + ' has unexpected value');
+    }
+    count++;
+  }
+  
+  // 'arguments' and might be exposed in the local and closure scope. Just
+  // ignore this.
+  var scope_size = scope.scopeObject().properties().length;
+  if (!scope.scopeObject().property('arguments').isUndefined()) {
+    scope_size--;
+  }
+  if (count != scope_size) {
+    print('Names found in scope:');
+    var names = scope.scopeObject().propertyNames();
+    for (var i = 0; i < names.length; i++) {
+      print(names[i]);
+    }
+  }
+  assertEquals(count, scope_size);
+
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor();
+  
+  // Send a scope request for information on a single scope and check the
+  // result.
+  request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":'
+  request_json += scope.scopeIndex();
+  request_json += '}}'
+  var response_json = dcp.processDebugJSONRequest(request_json);
+  var response = JSON.parse(response_json);
+  assertEquals(scope.scopeType(), response.body.type);
+  assertEquals(number, response.body.index);
+  if (scope.scopeType() == debug.ScopeType.Local ||
+      scope.scopeType() == debug.ScopeType.Closure) {
+    assertTrue(response.body.object.ref < 0);
+  } else {
+    assertTrue(response.body.object.ref >= 0);
+  }
+  var found = false;
+  for (var i = 0; i < response.refs.length && !found; i++) {
+    found = response.refs[i].handle == response.body.object.ref;
+  }
+  assertTrue(found, "Scope object " + response.body.object.ref + " not found");
+}
+
+
+// Simple empty local scope.
+BeginTest("Local 1");
+
+function local_1() {
+  debugger;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+}
+local_1()
+EndTest();
+
+
+// Local scope with a parameter.
+BeginTest("Local 2");
+
+function local_2(a) {
+  debugger;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1}, 0, exec_state);
+}
+local_2(1)
+EndTest();
+
+
+// Local scope with a parameter and a local variable.
+BeginTest("Local 3");
+
+function local_3(a) {
+  var x = 3;
+  debugger;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,x:3}, 0, exec_state);
+}
+local_3(1)
+EndTest();
+
+
+// Local scope with parameters and local variables.
+BeginTest("Local 4");
+
+function local_4(a, b) {
+  var x = 3;
+  var y = 4;
+  debugger;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
+}
+local_4(1, 2)
+EndTest();
+
+
+// Empty local scope with use of eval.
+BeginTest("Local 5");
+
+function local_5() {
+  eval('');
+  debugger;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+}
+local_5()
+EndTest();
+
+
+// Local introducing local variable using eval.
+BeginTest("Local 6");
+
+function local_6() {
+  eval('var i = 5');
+  debugger;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({i:5}, 0, exec_state);
+}
+local_6()
+EndTest();
+
+
+// Local scope with parameters, local variables and local variable introduced
+// using eval.
+BeginTest("Local 7");
+
+function local_7(a, b) {
+  var x = 3;
+  var y = 4;
+  eval('var i = 5');
+  eval('var j = 6');
+  debugger;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 0, exec_state);
+}
+local_7(1, 2)
+EndTest();
+
+
+// Single empty with block.
+BeginTest("With 1");
+
+function with_1() {
+  with({}) {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+}
+with_1()
+EndTest();
+
+
+// Nested empty with blocks.
+BeginTest("With 2");
+
+function with_2() {
+  with({}) {
+    with({}) {
+      debugger;
+    }
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+  CheckScopeContent({}, 1, exec_state);
+}
+with_2()
+EndTest();
+
+
+// With block using an in-place object literal.
+BeginTest("With 3");
+
+function with_3() {
+  with({a:1,b:2}) {
+    debugger;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,b:2}, 0, exec_state);
+}
+with_3()
+EndTest();
+
+
+// Nested with blocks using in-place object literals.
+BeginTest("With 4");
+
+function with_4() {
+  with({a:1,b:2}) {
+    with({a:2,b:1}) {
+      debugger;
+    }
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:2,b:1}, 0, exec_state);
+  CheckScopeContent({a:1,b:2}, 1, exec_state);
+}
+with_4()
+EndTest();
+
+
+// Nested with blocks using existing object.
+BeginTest("With 5");
+
+var with_object = {c:3,d:4};
+function with_5() {
+  with(with_object) {
+    with(with_object) {
+      debugger;
+    }
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent(with_object, 0, exec_state);
+  CheckScopeContent(with_object, 1, exec_state);
+  assertEquals(exec_state.frame().scope(0).scopeObject(), exec_state.frame().scope(1).scopeObject());
+  assertEquals(with_object, exec_state.frame().scope(1).scopeObject().value());
+}
+with_5()
+EndTest();
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments.
+BeginTest("Closure 1");
+
+function closure_1(a) {
+  function f() {
+    debugger;
+    return a;
+  };
+  return f;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1}, 1, exec_state);
+}
+closure_1(1)()
+EndTest();
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. Due to VM optimizations parts of the actual closure is
+// missing from the debugger information.
+BeginTest("Closure 2");
+
+function closure_2(a, b) {
+  var x = a + 2;
+  var y = b + 2;
+  function f() {
+    debugger;
+    return a + x;
+  };
+  return f;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,x:3}, 1, exec_state);
+}
+closure_2(1, 2)()
+EndTest();
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. Using all arguments and locals from the outer function
+// in the inner function makes these part of the debugger information on the
+// closure.
+BeginTest("Closure 3");
+
+function closure_3(a, b) {
+  var x = a + 2;
+  var y = b + 2;
+  function f() {
+    debugger;
+    return a + b + x + y;
+  };
+  return f;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,b:2,x:3,y:4}, 1, exec_state);
+}
+closure_3(1, 2)()
+EndTest();
+
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. Using all arguments and locals from the outer function
+// in the inner function makes these part of the debugger information on the
+// closure. Use the inner function as well...
+BeginTest("Closure 4");
+
+function closure_4(a, b) {
+  var x = a + 2;
+  var y = b + 2;
+  function f() {
+    debugger;
+    if (f) {
+      return a + b + x + y;
+    }
+  };
+  return f;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
+}
+closure_4(1, 2)()
+EndTest();
+
+
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments. In the presence of eval all arguments and locals
+// (including the inner function itself) from the outer function becomes part of
+// the debugger infformation on the closure.
+BeginTest("Closure 5");
+
+function closure_5(a, b) {
+  var x = 3;
+  var y = 4;
+  function f() {
+    eval('');
+    debugger;
+    return 1;
+  };
+  return f;
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
+}
+closure_5(1, 2)()
+EndTest();
+
+
+// Two closures. Due to optimizations only the parts actually used are provided
+// through the debugger information.
+BeginTest("Closure 6");
+function closure_6(a, b) {
+  function f(a, b) {
+    var x = 3;
+    var y = 4;
+    return function() {
+      var x = 3;
+      var y = 4;
+      debugger;
+      some_global = a;
+      return f;
+    }
+  }
+  return f(a, b);
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({a:1}, 1, exec_state);
+  CheckScopeContent({f:function(){}}, 2, exec_state);
+}
+closure_6(1, 2)()
+EndTest();
+
+
+// Two closures. In the presence of eval all information is provided as the
+// compiler cannot determine which parts are used.
+BeginTest("Closure 7");
+function closure_7(a, b) {
+  var x = 3;
+  var y = 4;
+  eval('var i = 5');
+  eval('var j = 6');
+  function f(a, b) {
+    var x = 3;
+    var y = 4;
+    eval('var i = 5');
+    eval('var j = 6');
+    return function() {
+      debugger;
+      some_global = a;
+      return f;
+    }
+  }
+  return f(a, b);
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+  CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 1, exec_state);
+  CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6,f:function(){}}, 2, exec_state);
+}
+closure_7(1, 2)()
+EndTest();
+
+
+// Test a mixture of scopes.
+BeginTest("The full monty");
+function the_full_monty(a, b) {
+  var x = 3;
+  var y = 4;
+  eval('var i = 5');
+  eval('var j = 6');
+  function f(a, b) {
+    var x = 9;
+    var y = 10;
+    eval('var i = 11');
+    eval('var j = 12');
+    with ({j:13}){
+      return function() {
+        var x = 14;
+        with ({a:15}) {      
+          with ({b:16}) {
+            debugger;
+            some_global = a;
+            return f;
+          }
+        }
+      }
+    }
+  }
+  return f(a, b);
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.With,
+                   debug.ScopeType.With,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.With,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Closure,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({b:16}, 0, exec_state);
+  CheckScopeContent({a:15}, 1, exec_state);
+  CheckScopeContent({x:14}, 2, exec_state);
+  CheckScopeContent({j:13}, 3, exec_state);
+  CheckScopeContent({a:1,b:2,x:9,y:10,i:11,j:12}, 4, exec_state);
+  CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6,f:function(){}}, 5, exec_state);
+}
+the_full_monty(1, 2)()
+EndTest();
+
+// Test global scope.
+BeginTest("Global");
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Global], exec_state);
+}
+debugger;
+EndTest();
+
+assertEquals(begin_test_count, break_count, 'one or more tests did not enter the debugger');
+assertEquals(begin_test_count, end_test_count, 'one or more tests did not have its result checked');
diff --git a/test/mjsunit/debug-sourceinfo.js b/test/mjsunit/debug-sourceinfo.js
index 36e9f03..b62a742 100644
--- a/test/mjsunit/debug-sourceinfo.js
+++ b/test/mjsunit/debug-sourceinfo.js
@@ -38,6 +38,23 @@
       return 1;

     }

   };

+function d(x) {

+  x = 1 ;

+  x = 2 ;

+  x = 3 ;

+  x = 4 ;

+  x = 5 ;

+  x = 6 ;

+  x = 7 ;

+  x = 8 ;

+  x = 9 ;

+  x = 10;

+  x = 11;

+  x = 12;

+  x = 13;

+  x = 14;

+  x = 15;

+}

 

 // Get the Debug object exposed from the debug context global object.

 Debug = debug.Debug

@@ -45,22 +62,42 @@
 // This is the number of comment lines above the first test function.

 var comment_lines = 29;

 

+// This is the last position in the entire file (note: this equals

+// file size of <debug-sourceinfo.js> - 1, since starting at 0).

+var last_position = 14072;

+// This is the last line of entire file (note: starting at 0).

+var last_line = 345;

+// This is the last column of last line (note: starting at 0 and +2, due

+// to trailing <CR><LF>).

+var last_column = 48;

+

 // This magic number is the length or the first line comment (actually number

 // of characters before 'function a(...'.

 var comment_line_length = 1726;

 var start_a = 10 + comment_line_length;

 var start_b = 37 + comment_line_length;

 var start_c = 71 + comment_line_length;

+var start_d = 163 + comment_line_length;

+

+// The position of the first line of d(), i.e. "x = 1 ;".

+var start_code_d = start_d + 7;

+// The line # of the first line of d() (note: starting at 0).

+var start_line_d = 41;

+var line_length_d = 11;

+var num_lines_d = 15;

 

 assertEquals(start_a, Debug.sourcePosition(a));

 assertEquals(start_b, Debug.sourcePosition(b));

 assertEquals(start_c, Debug.sourcePosition(c));

+assertEquals(start_d, Debug.sourcePosition(d));

 

 var script = Debug.findScript(a);

 assertTrue(script.data === Debug.findScript(b).data);

 assertTrue(script.data === Debug.findScript(c).data);

+assertTrue(script.data === Debug.findScript(d).data);

 assertTrue(script.source === Debug.findScript(b).source);

 assertTrue(script.source === Debug.findScript(c).source);

+assertTrue(script.source === Debug.findScript(d).source);

 

 // Test that when running through source positions the position, line and

 // column progresses as expected.

@@ -89,6 +126,19 @@
   column = location.column;

 }

 

+// Every line of d() is the same length.  Verify we can loop through all

+// positions and find the right line # for each.

+var p = start_code_d;

+for (line = 0; line < num_lines_d; line++) {

+  for (column = 0; column < line_length_d; column++) {

+    var location = script.locationFromPosition(p);

+    assertEquals(p, location.position);

+    assertEquals(start_line_d + line, location.line);

+    assertEquals(column, location.column);

+    p++;

+  }

+}

+

 // Test first position.

 assertEquals(0, script.locationFromPosition(0).position);

 assertEquals(0, script.locationFromPosition(0).line);

@@ -99,21 +149,26 @@
 assertEquals(0, script.locationFromPosition(1).line);

 assertEquals(1, script.locationFromPosition(1).column);

 

-// Test first position in finction a.

+// Test first position in function a().

 assertEquals(start_a, script.locationFromPosition(start_a).position);

 assertEquals(0, script.locationFromPosition(start_a).line - comment_lines);

 assertEquals(10, script.locationFromPosition(start_a).column);

 

-// Test first position in finction b.

+// Test first position in function b().

 assertEquals(start_b, script.locationFromPosition(start_b).position);

 assertEquals(1, script.locationFromPosition(start_b).line - comment_lines);

 assertEquals(13, script.locationFromPosition(start_b).column);

 

-// Test first position in finction b.

+// Test first position in function c().

 assertEquals(start_c, script.locationFromPosition(start_c).position);

 assertEquals(4, script.locationFromPosition(start_c).line - comment_lines);

 assertEquals(12, script.locationFromPosition(start_c).column);

 

+// Test first position in function d().

+assertEquals(start_d, script.locationFromPosition(start_d).position);

+assertEquals(11, script.locationFromPosition(start_d).line - comment_lines);

+assertEquals(10, script.locationFromPosition(start_d).column);

+

 // Test first line.

 assertEquals(0, script.locationFromLine().position);

 assertEquals(0, script.locationFromLine().line);

@@ -122,17 +177,17 @@
 assertEquals(0, script.locationFromLine(0).line);

 assertEquals(0, script.locationFromLine(0).column);

 

-// Test first line column 1

+// Test first line column 1.

 assertEquals(1, script.locationFromLine(0, 1).position);

 assertEquals(0, script.locationFromLine(0, 1).line);

 assertEquals(1, script.locationFromLine(0, 1).column);

 

-// Test first line offset 1

+// Test first line offset 1.

 assertEquals(1, script.locationFromLine(0, 0, 1).position);

 assertEquals(0, script.locationFromLine(0, 0, 1).line);

 assertEquals(1, script.locationFromLine(0, 0, 1).column);

 

-// Test offset function a

+// Test offset function a().

 assertEquals(start_a, script.locationFromLine(void 0, void 0, start_a).position);

 assertEquals(0, script.locationFromLine(void 0, void 0, start_a).line - comment_lines);

 assertEquals(10, script.locationFromLine(void 0, void 0, start_a).column);

@@ -143,27 +198,27 @@
 assertEquals(0, script.locationFromLine(0, 0, start_a).line - comment_lines);

 assertEquals(10, script.locationFromLine(0, 0, start_a).column);

 

-// Test second line offset function a

+// Test second line offset function a().

 assertEquals(start_a + 14, script.locationFromLine(1, 0, start_a).position);

 assertEquals(1, script.locationFromLine(1, 0, start_a).line - comment_lines);

 assertEquals(0, script.locationFromLine(1, 0, start_a).column);

 

-// Test second line column 2 offset function a

+// Test second line column 2 offset function a().

 assertEquals(start_a + 14 + 2, script.locationFromLine(1, 2, start_a).position);

 assertEquals(1, script.locationFromLine(1, 2, start_a).line - comment_lines);

 assertEquals(2, script.locationFromLine(1, 2, start_a).column);

 

-// Test offset function b

+// Test offset function b().

 assertEquals(start_b, script.locationFromLine(0, 0, start_b).position);

 assertEquals(1, script.locationFromLine(0, 0, start_b).line - comment_lines);

 assertEquals(13, script.locationFromLine(0, 0, start_b).column);

 

-// Test second line offset function b

+// Test second line offset function b().

 assertEquals(start_b + 6, script.locationFromLine(1, 0, start_b).position);

 assertEquals(2, script.locationFromLine(1, 0, start_b).line - comment_lines);

 assertEquals(0, script.locationFromLine(1, 0, start_b).column);

 

-// Test second line column 11 offset function b

+// Test second line column 11 offset function b().

 assertEquals(start_b + 6 + 11, script.locationFromLine(1, 11, start_b).position);

 assertEquals(2, script.locationFromLine(1, 11, start_b).line - comment_lines);

 assertEquals(11, script.locationFromLine(1, 11, start_b).column);

@@ -187,6 +242,21 @@
 assertEquals(52 + start_c, Debug.findFunctionSourceLocation(c, 4, 0).position);

 assertEquals(69 + start_c, Debug.findFunctionSourceLocation(c, 5, 0).position);

 assertEquals(76 + start_c, Debug.findFunctionSourceLocation(c, 6, 0).position);

+assertEquals(0 + start_d, Debug.findFunctionSourceLocation(d, 0, 0).position);

+assertEquals(7 + start_d, Debug.findFunctionSourceLocation(d, 1, 0).position);

+for (i = 1; i <= num_lines_d; i++) {

+  assertEquals(7 + (i * line_length_d) + start_d, Debug.findFunctionSourceLocation(d, (i + 1), 0).position);

+}

+assertEquals(175 + start_d, Debug.findFunctionSourceLocation(d, 17, 0).position);

+

+// Make sure invalid inputs work properly.

+assertEquals(0, script.locationFromPosition(-1).line);

+assertEquals(null, script.locationFromPosition(last_position + 1));

+

+// Test last position.

+assertEquals(last_position, script.locationFromPosition(last_position).position);

+assertEquals(last_line, script.locationFromPosition(last_position).line);

+assertEquals(last_column, script.locationFromPosition(last_position).column);

 

 // Test source line and restriction. All the following tests start from line 1

 // column 2 in function b, which is the call to c.

diff --git a/test/mjsunit/sin-cos.js b/test/mjsunit/sin-cos.js
new file mode 100644
index 0000000..ae02451
--- /dev/null
+++ b/test/mjsunit/sin-cos.js
@@ -0,0 +1,45 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test Math.sin and Math.cos.
+
+var input_sin = [0, Math.PI / 2];
+var input_cos = [0, Math.PI];
+
+var output_sin = input_sin.map(Math.sin);
+var output_cos = input_cos.map(Math.cos);
+
+var expected_sin = [0, 1];
+var expected_cos = [1, -1];
+
+assertArrayEquals(expected_sin, output_sin, "sine");
+assertArrayEquals(expected_cos, output_cos, "cosine");
+
+// By accident, the slow case for sine and cosine were both sine at
+// some point.  This is a regression test for that issue.
+var x = Math.pow(2, 70);
+assertTrue(Math.sin(x) != Math.cos(x));
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 477ab26..cce579b 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -86,6 +86,7 @@
   // Count each tick as a time unit.
   this.viewBuilder_ = new devtools.profiler.ViewBuilder(1);
   this.lastLogFileName_ = null;
+  this.aliases_ = {};
 };
 
 
@@ -116,6 +117,7 @@
   'code-delete': { parsers: [parseInt], processor: 'processCodeDelete' },
   'tick': { parsers: [parseInt, parseInt, parseInt, 'var-args'],
             processor: 'processTick' },
+  'alias': { parsers: [null, null], processor: 'processAlias' },
   'profiler': null,
   // Obsolete row types.
   'code-allocate': null,
@@ -123,7 +125,6 @@
   'end-code-region': null
 };
 
-
 TickProcessor.CALL_PROFILE_CUTOFF_PCT = 2.0;
 
 
@@ -218,8 +219,21 @@
 };
 
 
+TickProcessor.prototype.processAlias = function(symbol, expansion) {
+  if (expansion in TickProcessor.RecordsDispatch) {
+    TickProcessor.RecordsDispatch[symbol] =
+      TickProcessor.RecordsDispatch[expansion];
+  } else {
+    this.aliases_[symbol] = expansion;
+  }
+};
+
+
 TickProcessor.prototype.processCodeCreation = function(
     type, start, size, name) {
+  if (type in this.aliases_) {
+    type = this.aliases_[type];
+  }
   var entry = this.profile_.addCode(type, name, start, size);
 };
 
@@ -248,11 +262,17 @@
   }
 
   var fullStack = [pc];
+  var prevFrame = pc;
   for (var i = 0, n = stack.length; i < n; ++i) {
     var frame = stack[i];
+    var firstChar = frame.charAt(0);
     // Leave only numbers starting with 0x. Filter possible 'overflow' string.
-    if (frame.charAt(0) == '0') {
+    if (firstChar == '0') {
       fullStack.push(parseInt(frame, 16));
+    } else if (firstChar == '+' || firstChar == '-') {
+      // An offset from the previous frame.
+      prevFrame += parseInt(frame, 16);
+      fullStack.push(prevFrame);
     }
   }
   this.profile_.recordTick(fullStack);