Version 2.2.20

Fix bug with for-in on x64 platform (issue 748).

Fix crash bug on x64 platform (issue 756).
        
Fix bug in Object.getOwnPropertyNames. (chromium issue 41243).

Fix a bug on ARM  that caused the result of 1 << x to be miscalculated for some inputs.

Performance improvements on all platforms.


git-svn-id: http://v8.googlecode.com/svn/trunk@4962 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 95c1133..a8c6186 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2010-06-28: Version 2.2.20
+        Fix bug with for-in on x64 platform (issue 748).
+
+        Fix crash bug on x64 platform (issue 756).
+        
+        Fix bug in Object.getOwnPropertyNames. (chromium issue 41243).
+
+        Fix a bug on ARM  that caused the result of 1 << x to be 
+        miscalculated for some inputs.
+
+        Performance improvements on all platforms.
+
 2010-06-23: Version 2.2.19
 
         Fix bug that causes the build to break when profillingsupport=off
diff --git a/src/api.cc b/src/api.cc
index 464ca54..0f64dd4 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2606,6 +2606,8 @@
     return;
   }
   i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
+  self->set_map(
+      *i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
   self->set_elements(*pixels);
 }
 
@@ -2659,6 +2661,8 @@
   }
   i::Handle<i::ExternalArray> array =
       i::Factory::NewExternalArray(length, array_type, data);
+  self->set_map(
+      *i::Factory::GetSlowElementsMap(i::Handle<i::Map>(self->map())));
   self->set_elements(*array);
 }
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 869227a..54b584a 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1110,6 +1110,7 @@
   void EndBlockConstPool() {
     const_pool_blocked_nesting_--;
   }
+  bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
 
  private:
   // Code buffer:
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 8e87614..b6639ae 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -157,6 +157,7 @@
       state_(NULL),
       loop_nesting_(0),
       type_info_(NULL),
+      function_return_(JumpTarget::BIDIRECTIONAL),
       function_return_is_shadowed_(false) {
 }
 
@@ -218,7 +219,7 @@
       // for stack overflow.
       frame_->AllocateStackSlots();
 
-      VirtualFrame::SpilledScope spilled_scope(frame_);
+      frame_->AssertIsSpilled();
       int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
       if (heap_slots > 0) {
         // Allocate local context.
@@ -257,6 +258,7 @@
         // order: such a parameter is copied repeatedly into the same
         // context location and thus the last value is what is seen inside
         // the function.
+        frame_->AssertIsSpilled();
         for (int i = 0; i < scope()->num_parameters(); i++) {
           Variable* par = scope()->parameter(i);
           Slot* slot = par->slot();
@@ -282,8 +284,7 @@
 
       // Initialize ThisFunction reference if present.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
-        __ mov(ip, Operand(Factory::the_hole_value()));
-        frame_->EmitPush(ip);
+        frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
         StoreToSlot(scope()->function()->slot(), NOT_CONST_INIT);
       }
     } else {
@@ -510,7 +511,6 @@
         has_valid_frame() &&
         !has_cc() &&
         frame_->height() == original_height) {
-      frame_->SpillAll();
       true_target->Jump();
     }
   }
@@ -535,22 +535,18 @@
 
   if (has_cc()) {
     // Convert cc_reg_ into a boolean value.
-    VirtualFrame::SpilledScope scope(frame_);
     JumpTarget loaded;
     JumpTarget materialize_true;
     materialize_true.Branch(cc_reg_);
-    __ LoadRoot(r0, Heap::kFalseValueRootIndex);
-    frame_->EmitPush(r0);
+    frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
     loaded.Jump();
     materialize_true.Bind();
-    __ LoadRoot(r0, Heap::kTrueValueRootIndex);
-    frame_->EmitPush(r0);
+    frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
     loaded.Bind();
     cc_reg_ = al;
   }
 
   if (true_target.is_linked() || false_target.is_linked()) {
-    VirtualFrame::SpilledScope scope(frame_);
     // We have at least one condition value that has been "translated"
     // into a branch, thus it needs to be loaded explicitly.
     JumpTarget loaded;
@@ -561,8 +557,7 @@
     // Load "true" if necessary.
     if (true_target.is_linked()) {
       true_target.Bind();
-      __ LoadRoot(r0, Heap::kTrueValueRootIndex);
-      frame_->EmitPush(r0);
+      frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
     }
     // If both "true" and "false" need to be loaded jump across the code for
     // "false".
@@ -572,8 +567,7 @@
     // Load "false" if necessary.
     if (false_target.is_linked()) {
       false_target.Bind();
-      __ LoadRoot(r0, Heap::kFalseValueRootIndex);
-      frame_->EmitPush(r0);
+      frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
     }
     // A value is loaded on all paths reaching this point.
     loaded.Bind();
@@ -592,11 +586,11 @@
 
 
 void CodeGenerator::LoadGlobalReceiver(Register scratch) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
-  __ ldr(scratch,
-         FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
-  frame_->EmitPush(scratch);
+  Register reg = frame_->GetTOSRegister();
+  __ ldr(reg, ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(reg,
+         FieldMemOperand(reg, GlobalObject::kGlobalReceiverOffset));
+  frame_->EmitPush(reg);
 }
 
 
@@ -613,8 +607,6 @@
 
 
 void CodeGenerator::StoreArgumentsObject(bool initial) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-
   ArgumentsAllocationMode mode = ArgumentsMode();
   ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
 
@@ -623,9 +615,9 @@
     // When using lazy arguments allocation, we store the hole value
     // as a sentinel indicating that the arguments object hasn't been
     // allocated yet.
-    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-    frame_->EmitPush(ip);
+    frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex);
   } else {
+    frame_->SpillAll();
     ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
     __ ldr(r2, frame_->Function());
     // The receiver is below the arguments, the return address, and the
@@ -649,9 +641,9 @@
     // already been written to. This can happen if the a function
     // has a local variable named 'arguments'.
     LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
-    frame_->EmitPop(r0);
+    Register arguments = frame_->PopToRegister();
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-    __ cmp(r0, ip);
+    __ cmp(arguments, ip);
     done.Branch(ne);
   }
   StoreToSlot(arguments->slot(), NOT_CONST_INIT);
@@ -754,36 +746,35 @@
 // may jump to 'false_target' in case the register converts to 'false'.
 void CodeGenerator::ToBoolean(JumpTarget* true_target,
                               JumpTarget* false_target) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   // Note: The generated code snippet does not change stack variables.
   //       Only the condition code should be set.
-  frame_->EmitPop(r0);
+  Register tos = frame_->PopToRegister();
 
   // Fast case checks
 
   // Check if the value is 'false'.
   __ LoadRoot(ip, Heap::kFalseValueRootIndex);
-  __ cmp(r0, ip);
+  __ cmp(tos, ip);
   false_target->Branch(eq);
 
   // Check if the value is 'true'.
   __ LoadRoot(ip, Heap::kTrueValueRootIndex);
-  __ cmp(r0, ip);
+  __ cmp(tos, ip);
   true_target->Branch(eq);
 
   // Check if the value is 'undefined'.
   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r0, ip);
+  __ cmp(tos, ip);
   false_target->Branch(eq);
 
   // Check if the value is a smi.
-  __ cmp(r0, Operand(Smi::FromInt(0)));
+  __ cmp(tos, Operand(Smi::FromInt(0)));
   false_target->Branch(eq);
-  __ tst(r0, Operand(kSmiTagMask));
+  __ tst(tos, Operand(kSmiTagMask));
   true_target->Branch(eq);
 
   // Slow case: call the runtime.
-  frame_->EmitPush(r0);
+  frame_->EmitPush(tos);
   frame_->CallRuntime(Runtime::kToBool, 1);
   // Convert the result (r0) to a condition code.
   __ LoadRoot(ip, Heap::kFalseValueRootIndex);
@@ -935,7 +926,15 @@
 };
 
 
+
+// On entry the non-constant side of the binary operation is in tos_register_
+// and the constant smi side is nowhere.  The tos_register_ is not used by the
+// virtual frame.  On exit the answer is in the tos_register_ and the virtual
+// frame is unchanged.
 void DeferredInlineSmiOperation::Generate() {
+  VirtualFrame copied_frame(*frame_state()->frame());
+  copied_frame.SpillAll();
+
   Register lhs = r1;
   Register rhs = r0;
   switch (op_) {
@@ -969,44 +968,19 @@
     case Token::MOD:
     case Token::BIT_OR:
     case Token::BIT_XOR:
-    case Token::BIT_AND: {
-      if (reversed_) {
-        if (tos_register_.is(r0)) {
-          __ mov(r1, Operand(Smi::FromInt(value_)));
-        } else {
-          ASSERT(tos_register_.is(r1));
-          __ mov(r0, Operand(Smi::FromInt(value_)));
-          lhs = r0;
-          rhs = r1;
-        }
-      } else {
-        if (tos_register_.is(r1)) {
-          __ mov(r0, Operand(Smi::FromInt(value_)));
-        } else {
-          ASSERT(tos_register_.is(r0));
-          __ mov(r1, Operand(Smi::FromInt(value_)));
-          lhs = r0;
-          rhs = r1;
-        }
-      }
-      break;
-    }
-
+    case Token::BIT_AND:
     case Token::SHL:
     case Token::SHR:
     case Token::SAR: {
-      if (!reversed_) {
-        if (tos_register_.is(r1)) {
-          __ mov(r0, Operand(Smi::FromInt(value_)));
-        } else {
-          ASSERT(tos_register_.is(r0));
-          __ mov(r1, Operand(Smi::FromInt(value_)));
+      if (tos_register_.is(r1)) {
+        __ mov(r0, Operand(Smi::FromInt(value_)));
+      } else {
+        ASSERT(tos_register_.is(r0));
+        __ mov(r1, Operand(Smi::FromInt(value_)));
+      }
+      if (reversed_ == tos_register_.is(r1)) {
           lhs = r0;
           rhs = r1;
-        }
-      } else {
-        ASSERT(op_ == Token::SHL);
-        __ mov(r1, Operand(Smi::FromInt(value_)));
       }
       break;
     }
@@ -1019,11 +993,17 @@
 
   GenericBinaryOpStub stub(op_, overwrite_mode_, lhs, rhs, value_);
   __ CallStub(&stub);
+
   // The generic stub returns its value in r0, but that's not
   // necessarily what we want.  We want whatever the inlined code
   // expected, which is that the answer is in the same register as
   // the operand was.
   __ Move(tos_register_, r0);
+
+  // The tos register was not in use for the virtual frame that we
+  // came into this function with, so we can merge back to that frame
+  // without trashing it.
+  copied_frame.MergeTo(frame_state()->frame());
 }
 
 
@@ -1124,12 +1104,6 @@
 
   // We move the top of stack to a register (normally no move is invoved).
   Register tos = frame_->PopToRegister();
-  // All other registers are spilled.  The deferred code expects one argument
-  // in a register and all other values are flushed to the stack.  The
-  // answer is returned in the same register that the top of stack argument was
-  // in.
-  frame_->SpillAll();
-
   switch (op) {
     case Token::ADD: {
       DeferredCode* deferred =
@@ -1448,8 +1422,6 @@
 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
                                       CallFunctionFlags flags,
                                       int position) {
-  frame_->AssertIsSpilled();
-
   // Push the arguments ("left-to-right") on the stack.
   int arg_count = args->length();
   for (int i = 0; i < arg_count; i++) {
@@ -1482,7 +1454,6 @@
   // stack, as receiver and arguments, and calls x.
   // In the implementation comments, we call x the applicand
   // and y the receiver.
-  VirtualFrame::SpilledScope spilled_scope(frame_);
 
   ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
   ASSERT(arguments->IsArguments());
@@ -1500,6 +1471,15 @@
   Load(receiver);
   LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
 
+  // At this point the top two stack elements are probably in registers
+  // since they were just loaded.  Ensure they are in regs and get the
+  // regs.
+  Register receiver_reg = frame_->Peek2();
+  Register arguments_reg = frame_->Peek();
+
+  // From now on the frame is spilled.
+  frame_->SpillAll();
+
   // Emit the source position information after having loaded the
   // receiver and the arguments.
   CodeForSourcePosition(position);
@@ -1513,32 +1493,30 @@
   // already. If so, just use that instead of copying the arguments
   // from the stack. This also deals with cases where a local variable
   // named 'arguments' has been introduced.
-  __ ldr(r0, MemOperand(sp, 0));
-
-  Label slow, done;
+  JumpTarget slow;
+  Label done;
   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ cmp(ip, r0);
-  __ b(ne, &slow);
+  __ cmp(ip, arguments_reg);
+  slow.Branch(ne);
 
   Label build_args;
   // Get rid of the arguments object probe.
   frame_->Drop();
   // Stack now has 3 elements on it.
   // Contents of stack at this point:
-  //   sp[0]: receiver
+  //   sp[0]: receiver - in the receiver_reg register.
   //   sp[1]: applicand.apply
   //   sp[2]: applicand.
 
   // Check that the receiver really is a JavaScript object.
-  __ ldr(r0, MemOperand(sp, 0));
-  __ BranchOnSmi(r0, &build_args);
+  __ BranchOnSmi(receiver_reg, &build_args);
   // We allow all JSObjects including JSFunctions.  As long as
   // JS_FUNCTION_TYPE is the last instance type and it is right
   // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
   // bound.
   ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
   ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ CompareObjectType(r0, r1, r2, FIRST_JS_OBJECT_TYPE);
+  __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
   __ b(lt, &build_args);
 
   // Check that applicand.apply is Function.prototype.apply.
@@ -1627,7 +1605,7 @@
   StoreArgumentsObject(false);
 
   // Stack and frame now have 4 elements.
-  __ bind(&slow);
+  slow.Bind();
 
   // Generic computation of x.apply(y, args) with no special optimization.
   // Flip applicand.apply and applicand on the stack, so
@@ -1652,7 +1630,6 @@
 
 
 void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(has_cc());
   Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
   target->Branch(cc);
@@ -1661,7 +1638,7 @@
 
 
 void CodeGenerator::CheckStack() {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
+  frame_->SpillAll();
   Comment cmnt(masm_, "[ check stack");
   __ LoadRoot(ip, Heap::kStackLimitRootIndex);
   // Put the lr setup instruction in the delay slot.  kInstrSize is added to
@@ -1683,7 +1660,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
     Visit(statements->at(i));
   }
@@ -1695,7 +1671,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ Block");
   CodeForStatementPosition(node);
   node->break_target()->SetExpectedHeight();
@@ -1713,7 +1688,6 @@
   frame_->EmitPush(Operand(pairs));
   frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
 
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
   // The result is discarded.
 }
@@ -1754,7 +1728,6 @@
       frame_->EmitPush(Operand(0));
     }
 
-    VirtualFrame::SpilledScope spilled_scope(frame_);
     frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
     // Ignore the return value (declarations are statements).
 
@@ -1899,7 +1872,6 @@
 
 
 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ContinueStatement");
   CodeForStatementPosition(node);
   node->target()->continue_target()->Jump();
@@ -1907,7 +1879,6 @@
 
 
 void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ BreakStatement");
   CodeForStatementPosition(node);
   node->target()->break_target()->Jump();
@@ -1915,7 +1886,7 @@
 
 
 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
+  frame_->SpillAll();
   Comment cmnt(masm_, "[ ReturnStatement");
 
   CodeForStatementPosition(node);
@@ -1926,7 +1897,7 @@
   } else {
     // Pop the result from the frame and prepare the frame for
     // returning thus making it easier to merge.
-    frame_->EmitPop(r0);
+    frame_->PopToR0();
     frame_->PrepareForReturn();
     if (function_return_.is_bound()) {
       // If the function return label is already bound we reuse the
@@ -1986,7 +1957,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ WithEnterStatement");
   CodeForStatementPosition(node);
   Load(node->expression());
@@ -2012,7 +1982,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ WithExitStatement");
   CodeForStatementPosition(node);
   // Pop context.
@@ -2027,7 +1996,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ SwitchStatement");
   CodeForStatementPosition(node);
   node->break_target()->SetExpectedHeight();
@@ -2055,8 +2023,7 @@
     next_test.Bind();
     next_test.Unuse();
     // Duplicate TOS.
-    __ ldr(r0, frame_->Top());
-    frame_->EmitPush(r0);
+    frame_->Dup();
     Comparison(eq, NULL, clause->label(), true);
     Branch(false, &next_test);
 
@@ -2094,7 +2061,7 @@
     default_entry.Bind();
     VisitStatements(default_clause->statements());
     // If control flow can fall out of the default and there is a case after
-    // it, jup to that case's body.
+    // it, jump to that case's body.
     if (frame_ != NULL && default_exit.is_bound()) {
       default_exit.Jump();
     }
@@ -2116,7 +2083,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ DoWhileStatement");
   CodeForStatementPosition(node);
   node->break_target()->SetExpectedHeight();
@@ -2191,7 +2157,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ WhileStatement");
   CodeForStatementPosition(node);
 
@@ -2209,7 +2174,7 @@
   node->continue_target()->Bind();
 
   if (info == DONT_KNOW) {
-    JumpTarget body;
+    JumpTarget body(JumpTarget::BIDIRECTIONAL);
     LoadCondition(node->cond(), &body, node->break_target(), true);
     if (has_valid_frame()) {
       // A NULL frame indicates that control did not fall out of the
@@ -2242,7 +2207,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ForStatement");
   CodeForStatementPosition(node);
   if (node->init() != NULL) {
@@ -2931,7 +2895,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ Conditional");
   JumpTarget then;
   JumpTarget else_;
@@ -2972,10 +2935,8 @@
                                     &done);
 
     slow.Bind();
-    VirtualFrame::SpilledScope spilled_scope(frame_);
     frame_->EmitPush(cp);
-    __ mov(r0, Operand(slot->var()->name()));
-    frame_->EmitPush(r0);
+    frame_->EmitPush(Operand(slot->var()->name()));
 
     if (typeof_state == INSIDE_TYPEOF) {
       frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
@@ -2990,16 +2951,17 @@
     Register scratch = VirtualFrame::scratch0();
     TypeInfo info = type_info(slot);
     frame_->EmitPush(SlotOperand(slot, scratch), info);
+
     if (slot->var()->mode() == Variable::CONST) {
       // Const slots may contain 'the hole' value (the constant hasn't been
       // initialized yet) which needs to be converted into the 'undefined'
       // value.
       Comment cmnt(masm_, "[ Unhole const");
-      frame_->EmitPop(scratch);
+      Register tos = frame_->PopToRegister();
       __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-      __ cmp(scratch, ip);
-      __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex, eq);
-      frame_->EmitPush(scratch);
+      __ cmp(tos, ip);
+      __ LoadRoot(tos, Heap::kUndefinedValueRootIndex, eq);
+      frame_->EmitPush(tos);
     }
   }
 }
@@ -3007,6 +2969,7 @@
 
 void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
                                                   TypeofState state) {
+  VirtualFrame::RegisterAllocationScope scope(this);
   LoadFromSlot(slot, state);
 
   // Bail out quickly if we're not using lazy arguments allocation.
@@ -3015,17 +2978,15 @@
   // ... or if the slot isn't a non-parameter arguments slot.
   if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
 
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-
-  // Load the loaded value from the stack into r0 but leave it on the
+  // Load the loaded value from the stack into a register but leave it on the
   // stack.
-  __ ldr(r0, MemOperand(sp, 0));
+  Register tos = frame_->Peek();
 
   // If the loaded value is the sentinel that indicates that we
   // haven't loaded the arguments object yet, we need to do it now.
   JumpTarget exit;
   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ cmp(r0, ip);
+  __ cmp(tos, ip);
   exit.Branch(ne);
   frame_->Drop();
   StoreArgumentsObject(false);
@@ -3035,14 +2996,13 @@
 
 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
   ASSERT(slot != NULL);
+  VirtualFrame::RegisterAllocationScope scope(this);
   if (slot->type() == Slot::LOOKUP) {
-    VirtualFrame::SpilledScope spilled_scope(frame_);
     ASSERT(slot->var()->is_dynamic());
 
     // For now, just do a runtime call.
     frame_->EmitPush(cp);
-    __ mov(r0, Operand(slot->var()->name()));
-    frame_->EmitPush(r0);
+    frame_->EmitPush(Operand(slot->var()->name()));
 
     if (init_state == CONST_INIT) {
       // Same as the case for a normal store, but ignores attribute
@@ -3071,7 +3031,7 @@
   } else {
     ASSERT(!slot->var()->is_dynamic());
     Register scratch = VirtualFrame::scratch0();
-    VirtualFrame::RegisterAllocationScope scope(this);
+    Register scratch2 = VirtualFrame::scratch1();
 
     // The frame must be spilled when branching to this target.
     JumpTarget exit;
@@ -3085,7 +3045,6 @@
       __ ldr(scratch, SlotOperand(slot, scratch));
       __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
       __ cmp(scratch, ip);
-      frame_->SpillAll();
       exit.Branch(ne);
     }
 
@@ -3104,18 +3063,18 @@
       // Skip write barrier if the written value is a smi.
       __ tst(tos, Operand(kSmiTagMask));
       // We don't use tos any more after here.
-      VirtualFrame::SpilledScope spilled_scope(frame_);
       exit.Branch(eq);
       // scratch is loaded with context when calling SlotOperand above.
       int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
-      // r1 could be identical with tos, but that doesn't matter.
-      __ RecordWrite(scratch, Operand(offset), r3, r1);
+      // We need an extra register.  Until we have a way to do that in the
+      // virtual frame we will cheat and ask for a free TOS register.
+      Register scratch3 = frame_->GetTOSRegister();
+      __ RecordWrite(scratch, Operand(offset), scratch2, scratch3);
     }
     // If we definitely did not jump over the assignment, we do not need
     // to bind the exit label.  Doing so can defeat peephole
     // optimization.
     if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
-      frame_->SpillAll();
       exit.Bind();
     }
   }
@@ -3289,42 +3248,51 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ RexExp Literal");
 
+  Register tmp = VirtualFrame::scratch0();
+  // Free up a TOS register that can be used to push the literal.
+  Register literal = frame_->GetTOSRegister();
+
   // Retrieve the literal array and check the allocated entry.
 
   // Load the function of this activation.
-  __ ldr(r1, frame_->Function());
+  __ ldr(tmp, frame_->Function());
 
   // Load the literals array of the function.
-  __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+  __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kLiteralsOffset));
 
   // Load the literal at the ast saved index.
   int literal_offset =
       FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
-  __ ldr(r2, FieldMemOperand(r1, literal_offset));
+  __ ldr(literal, FieldMemOperand(tmp, literal_offset));
 
   JumpTarget done;
   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
-  __ cmp(r2, ip);
+  __ cmp(literal, ip);
+  // This branch locks the virtual frame at the done label to match the
+  // one we have here, where the literal register is not on the stack and
+  // nothing is spilled.
   done.Branch(ne);
 
-  // If the entry is undefined we call the runtime system to computed
+  // If the entry is undefined we call the runtime system to compute
   // the literal.
-  frame_->EmitPush(r1);  // literal array  (0)
-  __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
-  frame_->EmitPush(r0);  // literal index  (1)
-  __ mov(r0, Operand(node->pattern()));  // RegExp pattern (2)
-  frame_->EmitPush(r0);
-  __ mov(r0, Operand(node->flags()));  // RegExp flags   (3)
-  frame_->EmitPush(r0);
+  // literal array  (0)
+  frame_->EmitPush(tmp);
+  // literal index  (1)
+  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
+  // RegExp pattern (2)
+  frame_->EmitPush(Operand(node->pattern()));
+  // RegExp flags   (3)
+  frame_->EmitPush(Operand(node->flags()));
   frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
-  __ mov(r2, Operand(r0));
+  __ Move(literal, r0);
 
+  // This call to bind will get us back to the virtual frame we had before
+  // where things are not spilled and the literal register is not on the stack.
   done.Bind();
   // Push the literal.
-  frame_->EmitPush(r2);
+  frame_->EmitPush(literal);
   ASSERT_EQ(original_height + 1, frame_->height());
 }
 
@@ -3333,20 +3301,20 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ObjectLiteral");
 
+  Register literal = frame_->GetTOSRegister();
   // Load the function of this activation.
-  __ ldr(r3, frame_->Function());
+  __ ldr(literal, frame_->Function());
   // Literal array.
-  __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+  __ ldr(literal, FieldMemOperand(literal, JSFunction::kLiteralsOffset));
+  frame_->EmitPush(literal);
   // Literal index.
-  __ mov(r2, Operand(Smi::FromInt(node->literal_index())));
+  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
   // Constant properties.
-  __ mov(r1, Operand(node->constant_properties()));
+  frame_->EmitPush(Operand(node->constant_properties()));
   // Should the object literal have fast elements?
-  __ mov(r0, Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
-  frame_->EmitPushMultiple(4, r3.bit() | r2.bit() | r1.bit() | r0.bit());
+  frame_->EmitPush(Operand(Smi::FromInt(node->fast_elements() ? 1 : 0)));
   if (node->depth() > 1) {
     frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
@@ -3369,37 +3337,33 @@
         if (key->handle()->IsSymbol()) {
           Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
           Load(value);
-          frame_->EmitPop(r0);
+          frame_->PopToR0();
+          // Fetch the object literal.
+          frame_->SpillAllButCopyTOSToR1();
           __ mov(r2, Operand(key->handle()));
-          __ ldr(r1, frame_->Top());  // Load the receiver.
           frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
           break;
         }
         // else fall through
       case ObjectLiteral::Property::PROTOTYPE: {
-        __ ldr(r0, frame_->Top());
-        frame_->EmitPush(r0);  // dup the result
+        frame_->Dup();
         Load(key);
         Load(value);
         frame_->CallRuntime(Runtime::kSetProperty, 3);
         break;
       }
       case ObjectLiteral::Property::SETTER: {
-        __ ldr(r0, frame_->Top());
-        frame_->EmitPush(r0);
+        frame_->Dup();
         Load(key);
-        __ mov(r0, Operand(Smi::FromInt(1)));
-        frame_->EmitPush(r0);
+        frame_->EmitPush(Operand(Smi::FromInt(1)));
         Load(value);
         frame_->CallRuntime(Runtime::kDefineAccessor, 4);
         break;
       }
       case ObjectLiteral::Property::GETTER: {
-        __ ldr(r0, frame_->Top());
-        frame_->EmitPush(r0);
+        frame_->Dup();
         Load(key);
-        __ mov(r0, Operand(Smi::FromInt(0)));
-        frame_->EmitPush(r0);
+        frame_->EmitPush(Operand(Smi::FromInt(0)));
         Load(value);
         frame_->CallRuntime(Runtime::kDefineAccessor, 4);
         break;
@@ -3414,16 +3378,16 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ ArrayLiteral");
 
+  Register tos = frame_->GetTOSRegister();
   // Load the function of this activation.
-  __ ldr(r2, frame_->Function());
+  __ ldr(tos, frame_->Function());
   // Load the literals array of the function.
-  __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
-  __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
-  __ mov(r0, Operand(node->constant_elements()));
-  frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+  __ ldr(tos, FieldMemOperand(tos, JSFunction::kLiteralsOffset));
+  frame_->EmitPush(tos);
+  frame_->EmitPush(Operand(Smi::FromInt(node->literal_index())));
+  frame_->EmitPush(Operand(node->constant_elements()));
   int length = node->values()->length();
   if (node->depth() > 1) {
     frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
@@ -3450,10 +3414,10 @@
 
     // The property must be set by generated code.
     Load(value);
-    frame_->EmitPop(r0);
-
+    frame_->PopToR0();
     // Fetch the object literal.
-    __ ldr(r1, frame_->Top());
+    frame_->SpillAllButCopyTOSToR1();
+
     // Get the elements array.
     __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
 
@@ -3863,7 +3827,6 @@
   // ------------------------------------------------------------------------
 
   if (var != NULL && var->is_possibly_eval()) {
-    VirtualFrame::SpilledScope spilled_scope(frame_);
     // ----------------------------------
     // JavaScript example: 'eval(arg)'  // eval is not known to be shadowed
     // ----------------------------------
@@ -3877,8 +3840,7 @@
     Load(function);
 
     // Allocate a frame slot for the receiver.
-    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-    frame_->EmitPush(r2);
+    frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
 
     // Load the arguments.
     int arg_count = args->length();
@@ -3886,6 +3848,8 @@
       Load(args->at(i));
     }
 
+    VirtualFrame::SpilledScope spilled_scope(frame_);
+
     // If we know that eval can only be shadowed by eval-introduced
     // variables we attempt to load the global eval function directly
     // in generated code. If we succeed, there is no need to perform a
@@ -5201,7 +5165,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ UnaryOperation");
 
   Token::Value op = node->op();
@@ -5273,8 +5236,7 @@
         break;
 
       case Token::SUB: {
-        VirtualFrame::SpilledScope spilled(frame_);
-        frame_->EmitPop(r0);
+        frame_->PopToR0();
         GenericUnaryOpStub stub(Token::SUB, overwrite);
         frame_->CallStub(&stub, 0);
         frame_->EmitPush(r0);  // r0 has result
@@ -5282,23 +5244,28 @@
       }
 
       case Token::BIT_NOT: {
-        // smi check
-        VirtualFrame::SpilledScope spilled(frame_);
-        frame_->EmitPop(r0);
-        JumpTarget smi_label;
+        Register tos = frame_->PopToRegister();
+        JumpTarget not_smi_label;
         JumpTarget continue_label;
-        __ tst(r0, Operand(kSmiTagMask));
-        smi_label.Branch(eq);
+        // Smi check.
+        __ tst(tos, Operand(kSmiTagMask));
+        not_smi_label.Branch(ne);
 
-        GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
-        frame_->CallStub(&stub, 0);
+        __ mvn(tos, Operand(tos));
+        __ bic(tos, tos, Operand(kSmiTagMask));  // Bit-clear inverted smi-tag.
+        frame_->EmitPush(tos);
+        // The fast case is the first to jump to the continue label, so it gets
+        // to decide the virtual frame layout.
         continue_label.Jump();
 
-        smi_label.Bind();
-        __ mvn(r0, Operand(r0));
-        __ bic(r0, r0, Operand(kSmiTagMask));  // bit-clear inverted smi-tag
+        not_smi_label.Bind();
+        frame_->SpillAll();
+        __ Move(r0, tos);
+        GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+        frame_->CallStub(&stub, 0);
+        frame_->EmitPush(r0);
+
         continue_label.Bind();
-        frame_->EmitPush(r0);  // r0 has result
         break;
       }
 
@@ -5308,16 +5275,16 @@
         break;
 
       case Token::ADD: {
-        VirtualFrame::SpilledScope spilled(frame_);
-        frame_->EmitPop(r0);
+        Register tos = frame_->Peek();
         // Smi check.
         JumpTarget continue_label;
-        __ tst(r0, Operand(kSmiTagMask));
+        __ tst(tos, Operand(kSmiTagMask));
         continue_label.Branch(eq);
-        frame_->EmitPush(r0);
+
         frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
+        frame_->EmitPush(r0);
+
         continue_label.Bind();
-        frame_->EmitPush(r0);  // r0 has result
         break;
       }
       default:
@@ -5335,6 +5302,7 @@
   int original_height = frame_->height();
 #endif
   Comment cmnt(masm_, "[ CountOperation");
+  VirtualFrame::RegisterAllocationScope scope(this);
 
   bool is_postfix = node->is_postfix();
   bool is_increment = node->op() == Token::INC;
@@ -5478,7 +5446,6 @@
   // after evaluating the left hand side (due to the shortcut
   // semantics), but the compiler must (statically) know if the result
   // of compiling the binary operation is materialized or not.
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   if (node->op() == Token::AND) {
     JumpTarget is_true;
     LoadCondition(node->left(), &is_true, false_target(), false);
@@ -5663,8 +5630,6 @@
     if (left_is_null || right_is_null) {
       Load(left_is_null ? right : left);
       Register tos = frame_->PopToRegister();
-      // JumpTargets can't cope with register allocation yet.
-      frame_->SpillAll();
       __ LoadRoot(ip, Heap::kNullValueRootIndex);
       __ cmp(tos, ip);
 
@@ -5707,9 +5672,6 @@
     LoadTypeofExpression(operation->expression());
     Register tos = frame_->PopToRegister();
 
-    // JumpTargets can't cope with register allocation yet.
-    frame_->SpillAll();
-
     Register scratch = VirtualFrame::scratch0();
 
     if (check->Equals(Heap::number_symbol())) {
@@ -5830,7 +5792,6 @@
       break;
 
     case Token::IN: {
-      VirtualFrame::SpilledScope scope(frame_);
       Load(left);
       Load(right);
       frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
@@ -5839,7 +5800,6 @@
     }
 
     case Token::INSTANCEOF: {
-      VirtualFrame::SpilledScope scope(frame_);
       Load(left);
       Load(right);
       InstanceofStub stub;
@@ -5937,10 +5897,15 @@
 };
 
 
+// Takes key and register in r0 and r1 or vice versa.  Returns result
+// in r0.
 void DeferredReferenceGetKeyedValue::Generate() {
   ASSERT((key_.is(r0) && receiver_.is(r1)) ||
          (key_.is(r1) && receiver_.is(r0)));
 
+  VirtualFrame copied_frame(*frame_state()->frame());
+  copied_frame.SpillAll();
+
   Register scratch1 = VirtualFrame::scratch0();
   Register scratch2 = VirtualFrame::scratch1();
   __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
@@ -5961,6 +5926,13 @@
     // keyed load has been inlined.
     __ nop(PROPERTY_ACCESS_INLINED);
 
+    // Now go back to the frame that we entered with.  This will not overwrite
+    // the receiver or key registers since they were not in use when we came
+    // in.  The instructions emitted by this merge are skipped over by the
+    // inline load patching mechanism when looking for the branch instruction
+    // that tells it where the code to patch is.
+    copied_frame.MergeTo(frame_state()->frame());
+
     // Block the constant pool for one more instruction after leaving this
     // constant pool block scope to include the branch instruction ending the
     // deferred code.
@@ -6114,7 +6086,6 @@
     bool key_is_known_smi = frame_->KnownSmiAt(0);
     Register key = frame_->PopToRegister();
     Register receiver = frame_->PopToRegister(key);
-    VirtualFrame::SpilledScope spilled(frame_);
 
     // The deferred code expects key and receiver in registers.
     DeferredReferenceGetKeyedValue* deferred =
@@ -6152,10 +6123,12 @@
       // Get the elements array from the receiver and check that it
       // is not a dictionary.
       __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
-      __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
-      __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-      __ cmp(scratch2, ip);
-      deferred->Branch(ne);
+      if (FLAG_debug_code) {
+        __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
+        __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+        __ cmp(scratch2, ip);
+        __ Assert(eq, "JSObject with fast elements map has slow elements");
+      }
 
       // Check that key is within bounds. Use unsigned comparison to handle
       // negative keys.
@@ -6176,7 +6149,7 @@
 
       __ mov(r0, scratch1);
       // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatch,
+      ASSERT_EQ(GetInlinedKeyedLoadInstructionsAfterPatch(),
                 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
     }
 
@@ -6204,9 +6177,9 @@
     // Load the value, key and receiver from the stack.
     Register value = frame_->PopToRegister();
     Register key = frame_->PopToRegister(value);
+    VirtualFrame::SpilledScope spilled(frame_);
     Register receiver = r2;
     frame_->EmitPop(receiver);
-    VirtualFrame::SpilledScope spilled(frame_);
 
     // The deferred code expects value, key and receiver in registers.
     DeferredReferenceSetKeyedValue* deferred =
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index be4d556..925d267 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -276,7 +276,9 @@
   static int InlineRuntimeCallArgumentsCount(Handle<String> name);
 
   // Constants related to patching of inlined load/store.
-  static const int kInlinedKeyedLoadInstructionsAfterPatch = 17;
+  static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+    return FLAG_debug_code ? 27 : 13;
+  }
   static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
 
  private:
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index c6de4d8..134dfa3 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -47,71 +47,97 @@
 
 #define __ ACCESS_MASM(masm)
 
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+                                            Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ b(eq, global_object);
+  __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ b(eq, global_object);
+  __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
+  __ b(eq, global_object);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
+                                                Register receiver,
+                                                Register elements,
+                                                Register t0,
+                                                Register t1,
+                                                Label* miss) {
+  // Register usage:
+  //   receiver: holds the receiver on entry and is unchanged.
+  //   elements: holds the property dictionary on fall through.
+  // Scratch registers:
+  //   t0: used to holds the receiver map.
+  //   t1: used to holds the receiver instance type, receiver bit mask and
+  //       elements map.
+
+  // Check that the receiver isn't a smi.
+  __ tst(receiver, Operand(kSmiTagMask));
+  __ b(eq, miss);
+
+  // Check that the receiver is a valid JS object.
+  __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
+  __ b(lt, miss);
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  GenerateGlobalInstanceTypeCheck(masm, t1, miss);
+
+  // Check that the global object does not require access checks.
+  __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
+  __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
+                     (1 << Map::kHasNamedInterceptor)));
+  __ b(nz, miss);
+
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(t1, ip);
+  __ b(nz, miss);
+}
+
+
 // Helper function used from LoadIC/CallIC GenerateNormal.
-// receiver: Receiver. It is not clobbered if a jump to the miss label is
-//           done
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
 // name:     Property name. It is not clobbered if a jump to the miss label is
 //           done
 // result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as receiver or name clobbering
+//           label is not done. Can be the same as elements or name clobbering
 //           one of these in the case of not jumping to the miss label.
-// The three scratch registers need to be different from the receiver, name and
+// The two scratch registers need to be different from elements, name and
 // result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
 static void GenerateDictionaryLoad(MacroAssembler* masm,
                                    Label* miss,
-                                   Register receiver,
+                                   Register elements,
                                    Register name,
                                    Register result,
                                    Register scratch1,
-                                   Register scratch2,
-                                   Register scratch3,
-                                   DictionaryCheck check_dictionary) {
+                                   Register scratch2) {
   // Main use of the scratch registers.
-  // scratch1: Used to hold the property dictionary.
-  // scratch2: Used as temporary and to hold the capacity of the property
+  // scratch1: Used as temporary and to hold the capacity of the property
   //           dictionary.
-  // scratch3: Used as temporary.
+  // scratch2: Used as temporary.
 
   Label done;
 
-  // Check for the absence of an interceptor.
-  // Load the map into scratch1.
-  __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kMapOffset));
-
-  // Bail out if the receiver has a named interceptor.
-  __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
-  __ tst(scratch2, Operand(1 << Map::kHasNamedInterceptor));
-  __ b(nz, miss);
-
-  // Bail out if we have a JS global proxy object.
-  __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ cmp(scratch2, Operand(JS_GLOBAL_PROXY_TYPE));
-  __ b(eq, miss);
-
-  // Possible work-around for http://crbug.com/16276.
-  // See also: http://codereview.chromium.org/155418.
-  __ cmp(scratch2, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ b(eq, miss);
-  __ cmp(scratch2, Operand(JS_BUILTINS_OBJECT_TYPE));
-  __ b(eq, miss);
-
-  // Load the properties array.
-  __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  // Check that the properties array is a dictionary.
-  if (check_dictionary == CHECK_DICTIONARY) {
-    __ ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
-    __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-    __ cmp(scratch2, ip);
-    __ b(ne, miss);
-  }
-
   // Compute the capacity mask.
   const int kCapacityOffset = StringDictionary::kHeaderSize +
       StringDictionary::kCapacityIndex * kPointerSize;
-  __ ldr(scratch2, FieldMemOperand(scratch1, kCapacityOffset));
-  __ mov(scratch2, Operand(scratch2, ASR, kSmiTagSize));  // convert smi to int
-  __ sub(scratch2, scratch2, Operand(1));
+  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
+  __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize));  // convert smi to int
+  __ sub(scratch1, scratch1, Operand(1));
 
   const int kElementsStartOffset = StringDictionary::kHeaderSize +
       StringDictionary::kElementsStartIndex * kPointerSize;
@@ -122,26 +148,26 @@
   static const int kProbes = 4;
   for (int i = 0; i < kProbes; i++) {
     // Compute the masked index: (hash + i + i * i) & mask.
-    __ ldr(scratch3, FieldMemOperand(name, String::kHashFieldOffset));
+    __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
     if (i > 0) {
       // Add the probe offset (i + i * i) left shifted to avoid right shifting
       // the hash in a separate instruction. The value hash + i + i * i is right
       // shifted in the following and instruction.
       ASSERT(StringDictionary::GetProbeOffset(i) <
              1 << (32 - String::kHashFieldOffset));
-      __ add(scratch3, scratch3, Operand(
+      __ add(scratch2, scratch2, Operand(
           StringDictionary::GetProbeOffset(i) << String::kHashShift));
     }
-    __ and_(scratch3, scratch2, Operand(scratch3, LSR, String::kHashShift));
+    __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
 
     // Scale the index by multiplying by the element size.
     ASSERT(StringDictionary::kEntrySize == 3);
-    // scratch3 = scratch3 * 3.
-    __ add(scratch3, scratch3, Operand(scratch3, LSL, 1));
+    // scratch2 = scratch2 * 3.
+    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
 
     // Check if the key is identical to the name.
-    __ add(scratch3, scratch1, Operand(scratch3, LSL, 2));
-    __ ldr(ip, FieldMemOperand(scratch3, kElementsStartOffset));
+    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
+    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
     __ cmp(name, Operand(ip));
     if (i != kProbes - 1) {
       __ b(eq, &done);
@@ -151,15 +177,15 @@
   }
 
   // Check that the value is a normal property.
-  __ bind(&done);  // scratch3 == scratch1 + 4 * index
-  __ ldr(scratch2,
-         FieldMemOperand(scratch3, kElementsStartOffset + 2 * kPointerSize));
-  __ tst(scratch2, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+  __ bind(&done);  // scratch2 == elements + 4 * index
+  __ ldr(scratch1,
+         FieldMemOperand(scratch2, kElementsStartOffset + 2 * kPointerSize));
+  __ tst(scratch1, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
   __ b(ne, miss);
 
   // Get the value at the masked, scaled index and return.
   __ ldr(result,
-         FieldMemOperand(scratch3, kElementsStartOffset + 1 * kPointerSize));
+         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
 }
 
 
@@ -310,6 +336,7 @@
                                            Register receiver,
                                            Register scratch1,
                                            Register scratch2,
+                                           int interceptor_bit,
                                            Label* slow) {
   // Check that the object isn't a smi.
   __ BranchOnSmi(receiver, slow);
@@ -317,8 +344,9 @@
   __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
   // Check bit field.
   __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
-  __ tst(scratch2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
-  __ b(ne, slow);
+  __ tst(scratch2,
+         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+  __ b(nz, slow);
   // Check that the object is some kind of JS object EXCEPT JS Value type.
   // In the case that the object is a value-wrapper object,
   // we enter the runtime system to make sure that indexing into string
@@ -502,13 +530,11 @@
 }
 
 
-static void GenerateNormalHelper(MacroAssembler* masm,
-                                 int argc,
-                                 bool is_global_object,
-                                 Label* miss,
-                                 Register scratch) {
-  // Search dictionary - put result in register r1.
-  GenerateDictionaryLoad(masm, miss, r1, r2, r1, r0, r3, r4, CHECK_DICTIONARY);
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+                                     int argc,
+                                     Label* miss,
+                                     Register scratch) {
+  // r1: function
 
   // Check that the value isn't a smi.
   __ tst(r1, Operand(kSmiTagMask));
@@ -518,13 +544,6 @@
   __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
   __ b(ne, miss);
 
-  // Patch the receiver with the global proxy if necessary.
-  if (is_global_object) {
-    __ ldr(r0, MemOperand(sp, argc * kPointerSize));
-    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
-    __ str(r0, MemOperand(sp, argc * kPointerSize));
-  }
-
   // Invoke the function.
   ParameterCount actual(argc);
   __ InvokeFunction(r1, actual, JUMP_FUNCTION);
@@ -536,53 +555,18 @@
   //  -- r2    : name
   //  -- lr    : return address
   // -----------------------------------
-  Label miss, global_object, non_global_object;
+  Label miss;
 
   // Get the receiver of the function from the stack into r1.
   __ ldr(r1, MemOperand(sp, argc * kPointerSize));
 
-  // Check that the receiver isn't a smi.
-  __ tst(r1, Operand(kSmiTagMask));
-  __ b(eq, &miss);
+  GenerateDictionaryLoadReceiverCheck(masm, r1, r0, r3, r4, &miss);
 
-  // Check that the receiver is a valid JS object.  Put the map in r3.
-  __ CompareObjectType(r1, r3, r0, FIRST_JS_OBJECT_TYPE);
-  __ b(lt, &miss);
+  // r0: elements
+  // Search the dictionary - put result in register r1.
+  GenerateDictionaryLoad(masm, &miss, r0, r2, r1, r3, r4);
 
-  // If this assert fails, we have to check upper bound too.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
-  // Check for access to global object.
-  __ cmp(r0, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ b(eq, &global_object);
-  __ cmp(r0, Operand(JS_BUILTINS_OBJECT_TYPE));
-  __ b(ne, &non_global_object);
-
-  // Accessing global object: Load and invoke.
-  __ bind(&global_object);
-  // Check that the global object does not require access checks.
-  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
-  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ b(ne, &miss);
-  GenerateNormalHelper(masm, argc, true, &miss, r4);
-
-  // Accessing non-global object: Check for access to global proxy.
-  Label global_proxy, invoke;
-  __ bind(&non_global_object);
-  __ cmp(r0, Operand(JS_GLOBAL_PROXY_TYPE));
-  __ b(eq, &global_proxy);
-  // Check that the non-global, non-global-proxy object does not
-  // require access checks.
-  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
-  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ b(ne, &miss);
-  __ bind(&invoke);
-  GenerateNormalHelper(masm, argc, false, &miss, r4);
-
-  // Global object access: Check access rights.
-  __ bind(&global_proxy);
-  __ CheckAccessGlobalProxy(r1, r0, &miss);
-  __ b(&invoke);
+  GenerateFunctionTailCall(masm, argc, &miss, r4);
 
   __ bind(&miss);
 }
@@ -594,6 +578,12 @@
   //  -- lr    : return address
   // -----------------------------------
 
+  if (id == IC::kCallIC_Miss) {
+    __ IncrementCounter(&Counters::call_miss, 1, r3, r4);
+  } else {
+    __ IncrementCounter(&Counters::keyed_call_miss, 1, r3, r4);
+  }
+
   // Get the receiver of the function from the stack.
   __ ldr(r3, MemOperand(sp, argc * kPointerSize));
 
@@ -614,23 +604,26 @@
   __ LeaveInternalFrame();
 
   // Check if the receiver is a global object of some sort.
-  Label invoke, global;
-  __ ldr(r2, MemOperand(sp, argc * kPointerSize));  // receiver
-  __ tst(r2, Operand(kSmiTagMask));
-  __ b(eq, &invoke);
-  __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
-  __ b(eq, &global);
-  __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
-  __ b(ne, &invoke);
+  // This can happen only for regular CallIC but not KeyedCallIC.
+  if (id == IC::kCallIC_Miss) {
+    Label invoke, global;
+    __ ldr(r2, MemOperand(sp, argc * kPointerSize));  // receiver
+    __ tst(r2, Operand(kSmiTagMask));
+    __ b(eq, &invoke);
+    __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
+    __ b(eq, &global);
+    __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
+    __ b(ne, &invoke);
 
-  // Patch the receiver on the stack.
-  __ bind(&global);
-  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
-  __ str(r2, MemOperand(sp, argc * kPointerSize));
+    // Patch the receiver on the stack.
+    __ bind(&global);
+    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+    __ str(r2, MemOperand(sp, argc * kPointerSize));
+    __ bind(&invoke);
+  }
 
   // Invoke the function.
   ParameterCount actual(argc);
-  __ bind(&invoke);
   __ InvokeFunction(r1, actual, JUMP_FUNCTION);
 }
 
@@ -698,7 +691,8 @@
   // Now the key is known to be a smi. This place is also jumped to from below
   // where a numeric string is converted to a smi.
 
-  GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &slow_call);
+  GenerateKeyedLoadReceiverCheck(
+      masm, r1, r0, r3, Map::kHasIndexedInterceptor, &slow_call);
 
   GenerateFastArrayLoad(
       masm, r1, r2, r4, r3, r0, r1, &check_number_dictionary, &slow_load);
@@ -708,14 +702,7 @@
   // receiver in r1 is not used after this point.
   // r2: key
   // r1: function
-
-  // Check that the value in r1 is a JSFunction.
-  __ BranchOnSmi(r1, &slow_call);
-  __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
-  __ b(ne, &slow_call);
-  // Invoke the function.
-  ParameterCount actual(argc);
-  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+  GenerateFunctionTailCall(masm, argc, &slow_call, r0);
 
   __ bind(&check_number_dictionary);
   // r2: key
@@ -751,16 +738,16 @@
   // If the receiver is a regular JS object with slow properties then do
   // a quick inline probe of the receiver's dictionary.
   // Otherwise do the monomorphic cache probe.
-  GenerateKeyedLoadReceiverCheck(masm, r1, r0, r3, &lookup_monomorphic_cache);
+  GenerateKeyedLoadReceiverCheck(
+      masm, r1, r0, r3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
 
-  __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
-  __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ ldr(r0, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+  __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
   __ cmp(r3, ip);
   __ b(ne, &lookup_monomorphic_cache);
 
-  GenerateDictionaryLoad(
-      masm, &slow_load, r1, r2, r1, r0, r3, r4, DICTIONARY_CHECK_DONE);
+  GenerateDictionaryLoad(masm, &slow_load, r0, r2, r1, r3, r4);
   __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1, r0, r3);
   __ jmp(&do_call);
 
@@ -826,36 +813,14 @@
   //  -- r0    : receiver
   //  -- sp[0] : receiver
   // -----------------------------------
-  Label miss, probe, global;
+  Label miss;
 
-  // Check that the receiver isn't a smi.
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &miss);
+  GenerateDictionaryLoadReceiverCheck(masm, r0, r1, r3, r4, &miss);
 
-  // Check that the receiver is a valid JS object.  Put the map in r3.
-  __ CompareObjectType(r0, r3, r1, FIRST_JS_OBJECT_TYPE);
-  __ b(lt, &miss);
-  // If this assert fails, we have to check upper bound too.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
-  // Check for access to global object (unlikely).
-  __ cmp(r1, Operand(JS_GLOBAL_PROXY_TYPE));
-  __ b(eq, &global);
-
-  // Check for non-global object that requires access check.
-  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
-  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ b(ne, &miss);
-
-  __ bind(&probe);
-  GenerateDictionaryLoad(masm, &miss, r0, r2, r0, r1, r3, r4, CHECK_DICTIONARY);
+  // r1: elements
+  GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4);
   __ Ret();
 
-  // Global object access: Check access rights.
-  __ bind(&global);
-  __ CheckAccessGlobalProxy(r0, r1, &miss);
-  __ b(&probe);
-
   // Cache miss: Jump to runtime.
   __ bind(&miss);
   GenerateMiss(masm);
@@ -870,6 +835,8 @@
   //  -- sp[0] : receiver
   // -----------------------------------
 
+  __ IncrementCounter(&Counters::load_miss, 1, r3, r4);
+
   __ mov(r3, r0);
   __ Push(r3, r2);
 
@@ -963,7 +930,7 @@
   // Patch the map check.
   Address ldr_map_instr_address =
       inline_end_address -
-      (CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatch *
+      (CodeGenerator::GetInlinedKeyedLoadInstructionsAfterPatch() *
       Assembler::kInstrSize);
   Assembler::set_target_address_at(ldr_map_instr_address,
                                    reinterpret_cast<Address>(map));
@@ -1013,6 +980,8 @@
   //  -- r1     : receiver
   // -----------------------------------
 
+  __ IncrementCounter(&Counters::keyed_load_miss, 1, r3, r4);
+
   __ Push(r1, r0);
 
   ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
@@ -1045,14 +1014,15 @@
   Register key = r0;
   Register receiver = r1;
 
-  GenerateKeyedLoadReceiverCheck(masm, receiver, r2, r3, &slow);
-
   // Check that the key is a smi.
   __ BranchOnNotSmi(key, &check_string);
   __ bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from below
   // where a numeric string is converted to a smi.
 
+  GenerateKeyedLoadReceiverCheck(
+      masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
+
   GenerateFastArrayLoad(
       masm, receiver, key, r4, r3, r2, r0, &check_pixel_array, &slow);
   __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
@@ -1095,12 +1065,15 @@
   __ bind(&check_string);
   GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
 
+  GenerateKeyedLoadReceiverCheck(
+      masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
+
   // If the receiver is a fast-case object, check the keyed lookup
   // cache. Otherwise probe the dictionary.
   __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
-  __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-  __ cmp(r3, ip);
+  __ cmp(r4, ip);
   __ b(eq, &probe_dictionary);
 
   // Load the map of the receiver, compute the keyed lookup cache hash
@@ -1148,9 +1121,14 @@
   // Do a quick inline probe of the receiver's dictionary, if it
   // exists.
   __ bind(&probe_dictionary);
+  // r1: receiver
+  // r0: key
+  // r3: elements
+  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
   // Load the property to r0.
-  GenerateDictionaryLoad(
-      masm, &slow, r1, r0, r0, r2, r3, r4, DICTIONARY_CHECK_DONE);
+  GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
   __ Ret();
 
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index 86198fb..c6eb628 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -61,9 +61,17 @@
   } else {
     // Clone the current frame to use as the expected one at the target.
     set_entry_frame(cgen()->frame());
+    // Zap the fall-through frame since the jump was unconditional.
     RegisterFile empty;
     cgen()->SetFrame(NULL, &empty);
   }
+  if (entry_label_.is_bound()) {
+    // You can't jump backwards to an already bound label unless you admitted
+    // up front that this was a bidirectional jump target.  Bidirectional jump
+    // targets will zap their type info when bound in case some later virtual
+    // frame with less precise type info branches to them.
+    ASSERT(direction_ != FORWARD_ONLY);
+  }
   __ jmp(&entry_label_);
 }
 
@@ -83,6 +91,13 @@
     // Clone the current frame to use as the expected one at the target.
     set_entry_frame(cgen()->frame());
   }
+  if (entry_label_.is_bound()) {
+    // You can't branch backwards to an already bound label unless you admitted
+    // up front that this was a bidirectional jump target.  Bidirectional jump
+    // targets will zap their type info when bound in case some later virtual
+    // frame with less precise type info branches to them.
+    ASSERT(direction_ != FORWARD_ONLY);
+  }
   __ b(cc, &entry_label_);
   if (cc == al) {
     cgen()->DeleteFrame();
@@ -121,6 +136,7 @@
   ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
 
   if (cgen()->has_valid_frame()) {
+    if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
     // If there is a current frame we can use it on the fall through.
     if (!entry_frame_set_) {
       entry_frame_ = *cgen()->frame();
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 29e168c..df7565f 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1548,6 +1548,8 @@
 
 
 void MacroAssembler::Abort(const char* msg) {
+  Label abort_start;
+  bind(&abort_start);
   // We want to pass the msg string like a smi to avoid GC
   // problems, however msg is not guaranteed to be aligned
   // properly. Instead, we pass an aligned pointer that is
@@ -1571,6 +1573,17 @@
   push(r0);
   CallRuntime(Runtime::kAbort, 2);
   // will not return here
+  if (is_const_pool_blocked()) {
+    // If the calling code cares about the exact number of
+    // instructions generated, we insert padding here to keep the size
+    // of the Abort macro constant.
+    static const int kExpectedAbortInstructions = 10;
+    int abort_instructions = InstructionsGeneratedSince(&abort_start);
+    ASSERT(abort_instructions <= kExpectedAbortInstructions);
+    while (abort_instructions++ < kExpectedAbortInstructions) {
+      nop();
+    }
+  }
 }
 
 
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 8b90f42..2ddfd0f 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -482,6 +482,32 @@
 }
 
 
+void VirtualFrame::SpillAllButCopyTOSToR1() {
+  switch (top_of_stack_state_) {
+    case NO_TOS_REGISTERS:
+      __ ldr(r1, MemOperand(sp, 0));
+      break;
+    case R0_TOS:
+      __ push(r0);
+      __ mov(r1, r0);
+      break;
+    case R1_TOS:
+      __ push(r1);
+      break;
+    case R0_R1_TOS:
+      __ Push(r1, r0);
+      __ mov(r1, r0);
+      break;
+    case R1_R0_TOS:
+      __ Push(r0, r1);
+      break;
+    default:
+      UNREACHABLE();
+  }
+  top_of_stack_state_ = NO_TOS_REGISTERS;
+}
+
+
 void VirtualFrame::SpillAllButCopyTOSToR1R0() {
   switch (top_of_stack_state_) {
     case NO_TOS_REGISTERS:
@@ -524,6 +550,24 @@
 }
 
 
+Register VirtualFrame::Peek2() {
+  AssertIsNotSpilled();
+  switch (top_of_stack_state_) {
+    case NO_TOS_REGISTERS:
+    case R0_TOS:
+    case R0_R1_TOS:
+      MergeTOSTo(R0_R1_TOS);
+      return r1;
+    case R1_TOS:
+    case R1_R0_TOS:
+      MergeTOSTo(R1_R0_TOS);
+      return r0;
+  }
+  UNREACHABLE();
+  return no_reg;
+}
+
+
 void VirtualFrame::Dup() {
   if (SpilledScope::is_spilled()) {
     __ ldr(ip, MemOperand(sp, 0));
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index d8dc5c6..8eedf22 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -189,12 +189,15 @@
     return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
   }
 
+  inline void ForgetTypeInfo() {
+    tos_known_smi_map_ = 0;
+  }
+
   // Detach a frame from its code generator, perhaps temporarily.  This
   // tells the register allocator that it is free to use frame-internal
   // registers.  Used when the code generator's frame is switched from this
   // one to NULL by an unconditional jump.
   void DetachFromCodeGenerator() {
-    AssertIsSpilled();
   }
 
   // (Re)attach a frame to its code generator.  This informs the register
@@ -202,7 +205,6 @@
   // Used when a code generator's frame is switched from NULL to this one by
   // binding a label.
   void AttachToCodeGenerator() {
-    AssertIsSpilled();
   }
 
   // Emit code for the physical JS entry and exit frame sequences.  After
@@ -330,6 +332,10 @@
   // must be copied to a scratch register before modification.
   Register Peek();
 
+  // Look at the value beneath the top of the stack.  The register returned is
+  // aliased and must be copied to a scratch register before modification.
+  Register Peek2();
+
   // Duplicate the top of stack.
   void Dup();
 
@@ -339,6 +345,9 @@
   // Flushes all registers, but it puts a copy of the top-of-stack in r0.
   void SpillAllButCopyTOSToR0();
 
+  // Flushes all registers, but it puts a copy of the top-of-stack in r1.
+  void SpillAllButCopyTOSToR1();
+
   // Flushes all registers, but it puts a copy of the top-of-stack in r1
   // and the next value on the stack in r0.
   void SpillAllButCopyTOSToR1R0();
diff --git a/src/array.js b/src/array.js
index 216c03b..f3c0697 100644
--- a/src/array.js
+++ b/src/array.js
@@ -954,7 +954,7 @@
 
 function ArrayIndexOf(element, index) {
   var length = this.length;
-  if (index == null) {
+  if (IS_UNDEFINED(index)) {
     index = 0;
   } else {
     index = TO_INTEGER(index);
@@ -981,7 +981,7 @@
 
 function ArrayLastIndexOf(element, index) {
   var length = this.length;
-  if (index == null) {
+  if (%_ArgumentsLength() < 2) {
     index = length - 1;
   } else {
     index = TO_INTEGER(index);
diff --git a/src/ast-inl.h b/src/ast-inl.h
index 2b5d7c4..717f68d 100644
--- a/src/ast-inl.h
+++ b/src/ast-inl.h
@@ -45,7 +45,9 @@
 
 
 IterationStatement::IterationStatement(ZoneStringList* labels)
-    : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) {
+    : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+      body_(NULL),
+      continue_target_(JumpTarget::BIDIRECTIONAL) {
 }
 
 
diff --git a/src/builtins.cc b/src/builtins.cc
index 7116dc9..c8d4e09 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -195,6 +195,7 @@
   }
 
   // 'array' now contains the JSArray we should initialize.
+  ASSERT(array->HasFastElements());
 
   // Optimize the case where there is one argument and the argument is a
   // small smi.
diff --git a/src/factory.cc b/src/factory.cc
index 35d3c54..f6b93b0 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -274,11 +274,22 @@
   return copy;
 }
 
+
 Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
   CALL_HEAP_FUNCTION(src->CopyDropTransitions(), Map);
 }
 
 
+Handle<Map> Factory::GetFastElementsMap(Handle<Map> src) {
+  CALL_HEAP_FUNCTION(src->GetFastElementsMap(), Map);
+}
+
+
+Handle<Map> Factory::GetSlowElementsMap(Handle<Map> src) {
+  CALL_HEAP_FUNCTION(src->GetSlowElementsMap(), Map);
+}
+
+
 Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
   CALL_HEAP_FUNCTION(array->Copy(), FixedArray);
 }
diff --git a/src/factory.h b/src/factory.h
index 8a190fa..b0a0571 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -180,6 +180,10 @@
 
   static Handle<Map> CopyMapDropTransitions(Handle<Map> map);
 
+  static Handle<Map> GetFastElementsMap(Handle<Map> map);
+
+  static Handle<Map> GetSlowElementsMap(Handle<Map> map);
+
   static Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
 
   // Numbers (eg, literals) are pretenured by the parser.
diff --git a/src/heap.cc b/src/heap.cc
index f1ec56c..6ae46f2 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -126,6 +126,12 @@
 int Heap::linear_allocation_scope_depth_ = 0;
 int Heap::contexts_disposed_ = 0;
 
+int Heap::young_survivors_after_last_gc_ = 0;
+int Heap::high_survival_rate_period_length_ = 0;
+double Heap::survival_rate_ = 0;
+Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
+Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
+
 #ifdef DEBUG
 bool Heap::allocation_allowed_ = true;
 
@@ -582,6 +588,29 @@
 }
 #endif
 
+void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
+  double survival_rate =
+      (static_cast<double>(young_survivors_after_last_gc_) * 100) /
+      start_new_space_size;
+
+  if (survival_rate > kYoungSurvivalRateThreshold) {
+    high_survival_rate_period_length_++;
+  } else {
+    high_survival_rate_period_length_ = 0;
+  }
+
+  double survival_rate_diff = survival_rate_ - survival_rate;
+
+  if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
+    set_survival_rate_trend(DECREASING);
+  } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
+    set_survival_rate_trend(INCREASING);
+  } else {
+    set_survival_rate_trend(STABLE);
+  }
+
+  survival_rate_ = survival_rate;
+}
 
 void Heap::PerformGarbageCollection(AllocationSpace space,
                                     GarbageCollector collector,
@@ -604,6 +633,8 @@
 
   EnsureFromSpaceIsCommitted();
 
+  int start_new_space_size = Heap::new_space()->Size();
+
   if (collector == MARK_COMPACTOR) {
     if (FLAG_flush_code) {
       // Flush all potentially unused code.
@@ -613,16 +644,36 @@
     // Perform mark-sweep with optional compaction.
     MarkCompact(tracer);
 
+    bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
+        IsStableOrIncreasingSurvivalTrend();
+
+    UpdateSurvivalRateTrend(start_new_space_size);
+
     int old_gen_size = PromotedSpaceSize();
     old_gen_promotion_limit_ =
         old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
     old_gen_allocation_limit_ =
         old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
+
+    if (high_survival_rate_during_scavenges &&
+        IsStableOrIncreasingSurvivalTrend()) {
+      // Stable high survival rates of young objects both during partial and
+      // full collection indicate that mutator is either building or modifying
+      // a structure with a long lifetime.
+      // In this case we aggressively raise old generation memory limits to
+      // postpone subsequent mark-sweep collection and thus trade memory
+      // space for the mutation speed.
+      old_gen_promotion_limit_ *= 2;
+      old_gen_allocation_limit_ *= 2;
+    }
+
     old_gen_exhausted_ = false;
   } else {
     tracer_ = tracer;
     Scavenge();
     tracer_ = NULL;
+
+    UpdateSurvivalRateTrend(start_new_space_size);
   }
 
   Counters::objs_since_last_young.Set(0);
@@ -1217,7 +1268,7 @@
   map->set_code_cache(empty_fixed_array());
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
-  map->set_bit_field2(1 << Map::kIsExtensible);
+  map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
 
   // If the map object is aligned fill the padding area with Smi 0 objects.
   if (Map::kPadStart < Map::kSize) {
@@ -2545,6 +2596,7 @@
   map->set_inobject_properties(in_object_properties);
   map->set_unused_property_fields(in_object_properties);
   map->set_prototype(prototype);
+  ASSERT(map->has_fast_elements());
 
   // If the function has only simple this property assignments add
   // field descriptors for these to the initial map as the object
@@ -2598,8 +2650,8 @@
   // properly initialized.
   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
 
-  // Both types of globla objects should be allocated using
-  // AllocateGloblaObject to be properly initialized.
+  // Both types of global objects should be allocated using
+  // AllocateGlobalObject to be properly initialized.
   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
 
@@ -2623,6 +2675,7 @@
   InitializeJSObjectFromMap(JSObject::cast(obj),
                             FixedArray::cast(properties),
                             map);
+  ASSERT(JSObject::cast(obj)->HasFastElements());
   return obj;
 }
 
diff --git a/src/heap.h b/src/heap.h
index a8f8c34..df3ba0e 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1005,6 +1005,7 @@
   static void CheckNewSpaceExpansionCriteria();
 
   static inline void IncrementYoungSurvivorsCounter(int survived) {
+    young_survivors_after_last_gc_ = survived;
     survived_since_last_expansion_ += survived;
   }
 
@@ -1272,6 +1273,55 @@
   // be replaced with a lazy compilable version.
   static void FlushCode();
 
+  static void UpdateSurvivalRateTrend(int start_new_space_size);
+
+  enum SurvivalRateTrend { INCREASING, STABLE, DECREASING, FLUCTUATING };
+
+  static const int kYoungSurvivalRateThreshold = 90;
+  static const int kYoungSurvivalRateAllowedDeviation = 15;
+
+  static int young_survivors_after_last_gc_;
+  static int high_survival_rate_period_length_;
+  static double survival_rate_;
+  static SurvivalRateTrend previous_survival_rate_trend_;
+  static SurvivalRateTrend survival_rate_trend_;
+
+  static void set_survival_rate_trend(SurvivalRateTrend survival_rate_trend) {
+    ASSERT(survival_rate_trend != FLUCTUATING);
+    previous_survival_rate_trend_ = survival_rate_trend_;
+    survival_rate_trend_ = survival_rate_trend;
+  }
+
+  static SurvivalRateTrend survival_rate_trend() {
+    if (survival_rate_trend_ == STABLE) {
+      return STABLE;
+    } else if (previous_survival_rate_trend_ == STABLE) {
+      return survival_rate_trend_;
+    } else if (survival_rate_trend_ != previous_survival_rate_trend_) {
+      return FLUCTUATING;
+    } else {
+      return survival_rate_trend_;
+    }
+  }
+
+  static bool IsStableOrIncreasingSurvivalTrend() {
+    switch (survival_rate_trend()) {
+      case STABLE:
+      case INCREASING:
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  static bool IsIncreasingSurvivalTrend() {
+    return survival_rate_trend() == INCREASING;
+  }
+
+  static bool IsHighSurvivalRate() {
+    return high_survival_rate_period_length_ > 0;
+  }
+
   static const int kInitialSymbolTableSize = 2048;
   static const int kInitialEvalCacheSize = 64;
 
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 6b07472..c0c5442 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -8853,7 +8853,7 @@
     // Use masm-> here instead of the double underscore macro since extra
     // coverage code can interfere with the patching.
     masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-              Immediate(Factory::null_value()));
+               Immediate(Factory::null_value()));
     deferred->Branch(not_equal);
 
     // Check that the key is a smi.
@@ -8868,9 +8868,11 @@
     // is not a dictionary.
     __ mov(elements.reg(),
            FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-    __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
-           Immediate(Factory::fixed_array_map()));
-    deferred->Branch(not_equal);
+    if (FLAG_debug_code) {
+      __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+             Immediate(Factory::fixed_array_map()));
+      __ Assert(equal, "JSObject with fast elements map has slow elements");
+    }
 
     // Check that the key is within bounds.
     __ cmp(key.reg(),
@@ -13293,6 +13295,9 @@
   __ test(edx, Immediate(kSmiTagMask));
   __ j(not_zero, &runtime);
   __ sub(ecx, Operand(edx));
+  __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
+  Label return_eax;
+  __ j(equal, &return_eax);
   // Special handling of sub-strings of length 1 and 2. One character strings
   // are handled in the runtime system (looked up in the single character
   // cache). Two character strings are looked for in the symbol cache.
@@ -13397,6 +13402,8 @@
   // esi: character of sub string start
   StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
   __ mov(esi, edx);  // Restore esi.
+
+  __ bind(&return_eax);
   __ IncrementCounter(&Counters::sub_string_native, 1);
   __ ret(3 * kPointerSize);
 
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index c750444..13173e2 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -2175,7 +2175,7 @@
   // LAST_JS_OBJECT_TYPE.
   ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
   ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-  __ cmp(ebx, JS_FUNCTION_TYPE);
+  __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
   __ j(equal, &function);
 
   // Check if the constructor in the map is a function.
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index b0c07b7..62f878c 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -45,72 +45,96 @@
 #define __ ACCESS_MASM(masm)
 
 
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+                                            Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
+  __ j(equal, global_object, not_taken);
+  __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
+  __ j(equal, global_object, not_taken);
+  __ cmp(type, JS_GLOBAL_PROXY_TYPE);
+  __ j(equal, global_object, not_taken);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
+                                                Register receiver,
+                                                Register r0,
+                                                Register r1,
+                                                Label* miss) {
+  // Register usage:
+  //   receiver: holds the receiver on entry and is unchanged.
+  //   r0: used to hold receiver instance type.
+  //       Holds the property dictionary on fall through.
+  //   r1: used to hold receivers map.
+
+  // Check that the receiver isn't a smi.
+  __ test(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss, not_taken);
+
+  // Check that the receiver is a valid JS object.
+  __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
+  __ cmp(r0, FIRST_JS_OBJECT_TYPE);
+  __ j(below, miss, not_taken);
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  GenerateGlobalInstanceTypeCheck(masm, r0, miss);
+
+  // Check for non-global object that requires access check.
+  __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
+            (1 << Map::kIsAccessCheckNeeded) |
+            (1 << Map::kHasNamedInterceptor));
+  __ j(not_zero, miss, not_taken);
+
+  __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+  __ CheckMap(r0, Factory::hash_table_map(), miss, true);
+}
+
+
 // Helper function used to load a property from a dictionary backing storage.
 // This function may return false negatives, so miss_label
 // must always call a backup property load that is complete.
-// This function is safe to call if the receiver has fast properties,
-// or if name is not a symbol, and will jump to the miss_label in that case.
+// This function is safe to call if name is not a symbol, and will jump to
+// the miss_label in that case.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
 static void GenerateDictionaryLoad(MacroAssembler* masm,
                                    Label* miss_label,
-                                   Register receiver,
+                                   Register elements,
                                    Register name,
                                    Register r0,
                                    Register r1,
-                                   Register r2,
-                                   Register result,
-                                   DictionaryCheck check_dictionary) {
+                                   Register result) {
   // Register use:
   //
-  // name - holds the name of the property and is unchanged.
-  // receiver - holds the receiver and is unchanged.
+  // elements - holds the property dictionary on entry and is unchanged.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
   // Scratch registers:
-  // r0   - used to hold the property dictionary.
   //
-  // r1   - used for the index into the property dictionary
+  // r0   - used for the index into the property dictionary
   //
-  // r2   - used to hold the capacity of the property dictionary.
+  // r1   - used to hold the capacity of the property dictionary.
   //
   // result - holds the result on exit.
 
   Label done;
 
-  // Check for the absence of an interceptor.
-  // Load the map into r0.
-  __ mov(r0, FieldOperand(receiver, JSObject::kMapOffset));
-
-  // Bail out if the receiver has a named interceptor.
-  __ test(FieldOperand(r0, Map::kBitFieldOffset),
-          Immediate(1 << Map::kHasNamedInterceptor));
-  __ j(not_zero, miss_label, not_taken);
-
-  // Bail out if we have a JS global proxy object.
-  __ movzx_b(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
-  __ cmp(r0, JS_GLOBAL_PROXY_TYPE);
-  __ j(equal, miss_label, not_taken);
-
-  // Possible work-around for http://crbug.com/16276.
-  __ cmp(r0, JS_GLOBAL_OBJECT_TYPE);
-  __ j(equal, miss_label, not_taken);
-  __ cmp(r0, JS_BUILTINS_OBJECT_TYPE);
-  __ j(equal, miss_label, not_taken);
-
-  // Load properties array.
-  __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
-  // Check that the properties array is a dictionary.
-  if (check_dictionary == CHECK_DICTIONARY) {
-    __ cmp(FieldOperand(r0, HeapObject::kMapOffset),
-           Immediate(Factory::hash_table_map()));
-    __ j(not_equal, miss_label);
-  }
-
   // Compute the capacity mask.
   const int kCapacityOffset =
       StringDictionary::kHeaderSize +
       StringDictionary::kCapacityIndex * kPointerSize;
-  __ mov(r2, FieldOperand(r0, kCapacityOffset));
-  __ shr(r2, kSmiTagSize);  // convert smi to int
-  __ dec(r2);
+  __ mov(r1, FieldOperand(elements, kCapacityOffset));
+  __ shr(r1, kSmiTagSize);  // convert smi to int
+  __ dec(r1);
 
   // Generate an unrolled loop that performs a few probes before
   // giving up. Measurements done on Gmail indicate that 2 probes
@@ -121,20 +145,20 @@
       StringDictionary::kElementsStartIndex * kPointerSize;
   for (int i = 0; i < kProbes; i++) {
     // Compute the masked index: (hash + i + i * i) & mask.
-    __ mov(r1, FieldOperand(name, String::kHashFieldOffset));
-    __ shr(r1, String::kHashShift);
+    __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
+    __ shr(r0, String::kHashShift);
     if (i > 0) {
-      __ add(Operand(r1), Immediate(StringDictionary::GetProbeOffset(i)));
+      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
     }
-    __ and_(r1, Operand(r2));
+    __ and_(r0, Operand(r1));
 
     // Scale the index by multiplying by the entry size.
     ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
+    __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
 
     // Check if the key is identical to the name.
-    __ cmp(name,
-           Operand(r0, r1, times_4, kElementsStartOffset - kHeapObjectTag));
+    __ cmp(name, Operand(elements, r0, times_4,
+                         kElementsStartOffset - kHeapObjectTag));
     if (i != kProbes - 1) {
       __ j(equal, &done, taken);
     } else {
@@ -145,13 +169,13 @@
   // Check that the value is a normal property.
   __ bind(&done);
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ test(Operand(r0, r1, times_4, kDetailsOffset - kHeapObjectTag),
+  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
           Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
   __ j(not_zero, miss_label, not_taken);
 
   // Get the value at the masked, scaled index.
   const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ mov(result, Operand(r0, r1, times_4, kValueOffset - kHeapObjectTag));
+  __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
 }
 
 
@@ -307,6 +331,7 @@
 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
                                            Register receiver,
                                            Register map,
+                                           int interceptor_bit,
                                            Label* slow) {
   // Register use:
   //   receiver - holds the receiver and is unchanged.
@@ -322,7 +347,7 @@
 
   // Check bit field.
   __ test_b(FieldOperand(map, Map::kBitFieldOffset),
-            KeyedLoadIC::kSlowCaseBitFieldMask);
+            (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
   __ j(not_zero, slow, not_taken);
   // Check that the object is some kind of JS object EXCEPT JS Value type.
   // In the case that the object is a value-wrapper object,
@@ -432,8 +457,6 @@
   Label slow, check_string, index_smi, index_string;
   Label check_pixel_array, probe_dictionary, check_number_dictionary;
 
-  GenerateKeyedLoadReceiverCheck(masm, edx, ecx, &slow);
-
   // Check that the key is a smi.
   __ test(eax, Immediate(kSmiTagMask));
   __ j(not_zero, &check_string, not_taken);
@@ -441,6 +464,9 @@
   // Now the key is known to be a smi. This place is also jumped to from
   // where a numeric string is converted to a smi.
 
+  GenerateKeyedLoadReceiverCheck(
+      masm, edx, ecx, Map::kHasIndexedInterceptor, &slow);
+
   GenerateFastArrayLoad(masm,
                         edx,
                         eax,
@@ -503,6 +529,9 @@
   __ bind(&check_string);
   GenerateKeyStringCheck(masm, eax, ecx, ebx, &index_string, &slow);
 
+  GenerateKeyedLoadReceiverCheck(
+      masm, edx, ecx, Map::kHasNamedInterceptor, &slow);
+
   // If the receiver is a fast-case object, check the keyed lookup
   // cache. Otherwise probe the dictionary.
   __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
@@ -555,15 +584,12 @@
   // Do a quick inline probe of the receiver's dictionary, if it
   // exists.
   __ bind(&probe_dictionary);
-  GenerateDictionaryLoad(masm,
-                         &slow,
-                         edx,
-                         eax,
-                         ebx,
-                         ecx,
-                         edi,
-                         eax,
-                         DICTIONARY_CHECK_DONE);
+
+  __ mov(ecx, FieldOperand(edx, JSObject::kMapOffset));
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, ecx, &slow);
+
+  GenerateDictionaryLoad(masm, &slow, ebx, eax, ecx, edi, eax);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
 
@@ -1173,24 +1199,18 @@
 }
 
 
-static void GenerateNormalHelper(MacroAssembler* masm,
-                                 int argc,
-                                 bool is_global_object,
-                                 Label* miss) {
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+                                     int argc,
+                                     Label* miss) {
   // ----------- S t a t e -------------
   //  -- ecx                 : name
-  //  -- edx                 : receiver
+  //  -- edi                 : function
   //  -- esp[0]              : return address
   //  -- esp[(argc - n) * 4] : arg[n] (zero-based)
   //  -- ...
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
-  // Search dictionary - put result in register edi.
-  __ mov(edi, edx);
-  GenerateDictionaryLoad(
-      masm, miss, edx, ecx, eax, edi, ebx, edi, CHECK_DICTIONARY);
-
   // Check that the result is not a smi.
   __ test(edi, Immediate(kSmiTagMask));
   __ j(zero, miss, not_taken);
@@ -1199,12 +1219,6 @@
   __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
   __ j(not_equal, miss, not_taken);
 
-  // Patch the receiver on stack with the global proxy if necessary.
-  if (is_global_object) {
-    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
-    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
-  }
-
   // Invoke the function.
   ParameterCount actual(argc);
   __ InvokeFunction(edi, actual, JUMP_FUNCTION);
@@ -1219,55 +1233,17 @@
   //  -- ...
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
-  Label miss, global_object, non_global_object;
+  Label miss;
 
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
-  // Check that the receiver isn't a smi.
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  GenerateDictionaryLoadReceiverCheck(masm, edx, eax, ebx, &miss);
 
-  // Check that the receiver is a valid JS object.
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ cmp(eax, FIRST_JS_OBJECT_TYPE);
-  __ j(below, &miss, not_taken);
-
-  // If this assert fails, we have to check upper bound too.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
-  // Check for access to global object.
-  __ cmp(eax, JS_GLOBAL_OBJECT_TYPE);
-  __ j(equal, &global_object);
-  __ cmp(eax, JS_BUILTINS_OBJECT_TYPE);
-  __ j(not_equal, &non_global_object);
-
-  // Accessing global object: Load and invoke.
-  __ bind(&global_object);
-  // Check that the global object does not require access checks.
-  __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded);
-  __ j(not_equal, &miss, not_taken);
-  GenerateNormalHelper(masm, argc, true, &miss);
-
-  // Accessing non-global object: Check for access to global proxy.
-  Label global_proxy, invoke;
-  __ bind(&non_global_object);
-  __ cmp(eax, JS_GLOBAL_PROXY_TYPE);
-  __ j(equal, &global_proxy, not_taken);
-  // Check that the non-global, non-global-proxy object does not
-  // require access checks.
-  __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded);
-  __ j(not_equal, &miss, not_taken);
-  __ bind(&invoke);
-  GenerateNormalHelper(masm, argc, false, &miss);
-
-  // Global object proxy access: Check access rights.
-  __ bind(&global_proxy);
-  __ CheckAccessGlobalProxy(edx, eax, &miss);
-  __ jmp(&invoke);
+  // eax: elements
+  // Search the dictionary placing the result in edi.
+  GenerateDictionaryLoad(masm, &miss, eax, ecx, edi, ebx, edi);
+  GenerateFunctionTailCall(masm, argc, &miss);
 
   __ bind(&miss);
 }
@@ -1282,6 +1258,12 @@
   //  -- esp[(argc + 1) * 4] : receiver
   // -----------------------------------
 
+  if (id == IC::kCallIC_Miss) {
+    __ IncrementCounter(&Counters::call_miss, 1);
+  } else {
+    __ IncrementCounter(&Counters::keyed_call_miss, 1);
+  }
+
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
 
@@ -1303,25 +1285,28 @@
   __ LeaveInternalFrame();
 
   // Check if the receiver is a global object of some sort.
-  Label invoke, global;
-  __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));  // receiver
-  __ test(edx, Immediate(kSmiTagMask));
-  __ j(zero, &invoke, not_taken);
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
-  __ j(equal, &global);
-  __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
-  __ j(not_equal, &invoke);
+  // This can happen only for regular CallIC but not KeyedCallIC.
+  if (id == IC::kCallIC_Miss) {
+    Label invoke, global;
+    __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));  // receiver
+    __ test(edx, Immediate(kSmiTagMask));
+    __ j(zero, &invoke, not_taken);
+    __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+    __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+    __ cmp(ebx, JS_GLOBAL_OBJECT_TYPE);
+    __ j(equal, &global);
+    __ cmp(ebx, JS_BUILTINS_OBJECT_TYPE);
+    __ j(not_equal, &invoke);
 
-  // Patch the receiver on the stack.
-  __ bind(&global);
-  __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
-  __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+    // Patch the receiver on the stack.
+    __ bind(&global);
+    __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(Operand(esp, (argc + 1) * kPointerSize), edx);
+    __ bind(&invoke);
+  }
 
   // Invoke the function.
   ParameterCount actual(argc);
-  __ bind(&invoke);
   __ InvokeFunction(edi, actual, JUMP_FUNCTION);
 }
 
@@ -1393,7 +1378,8 @@
   // Now the key is known to be a smi. This place is also jumped to from
   // where a numeric string is converted to a smi.
 
-  GenerateKeyedLoadReceiverCheck(masm, edx, eax, &slow_call);
+  GenerateKeyedLoadReceiverCheck(
+      masm, edx, eax, Map::kHasIndexedInterceptor, &slow_call);
 
   GenerateFastArrayLoad(
       masm, edx, ecx, eax, edi, &check_number_dictionary, &slow_load);
@@ -1403,15 +1389,7 @@
   // receiver in edx is not used after this point.
   // ecx: key
   // edi: function
-
-  // Check that the value in edi is a JavaScript function.
-  __ test(edi, Immediate(kSmiTagMask));
-  __ j(zero, &slow_call, not_taken);
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
-  __ j(not_equal, &slow_call, not_taken);
-  // Invoke the function.
-  ParameterCount actual(argc);
-  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+  GenerateFunctionTailCall(masm, argc, &slow_call);
 
   __ bind(&check_number_dictionary);
   // eax: elements
@@ -1451,15 +1429,13 @@
   // If the receiver is a regular JS object with slow properties then do
   // a quick inline probe of the receiver's dictionary.
   // Otherwise do the monomorphic cache probe.
-  GenerateKeyedLoadReceiverCheck(masm, edx, eax, &lookup_monomorphic_cache);
+  GenerateKeyedLoadReceiverCheck(
+      masm, edx, eax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
 
   __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
-  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
-         Immediate(Factory::hash_table_map()));
-  __ j(not_equal, &lookup_monomorphic_cache, not_taken);
+  __ CheckMap(ebx, Factory::hash_table_map(), &lookup_monomorphic_cache, true);
 
-  GenerateDictionaryLoad(
-      masm, &slow_load, edx, ecx, ebx, eax, edi, edi, DICTIONARY_CHECK_DONE);
+  GenerateDictionaryLoad(masm, &slow_load, ebx, ecx, eax, edi, edi);
   __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
   __ jmp(&do_call);
 
@@ -1539,49 +1515,15 @@
   //  -- ecx    : name
   //  -- esp[0] : return address
   // -----------------------------------
-  Label miss, probe, global;
+  Label miss;
 
-  // Check that the receiver isn't a smi.
-  __ test(eax, Immediate(kSmiTagMask));
-  __ j(zero, &miss, not_taken);
+  GenerateDictionaryLoadReceiverCheck(masm, eax, edx, ebx, &miss);
 
-  // Check that the receiver is a valid JS object.
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(edx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ cmp(edx, FIRST_JS_OBJECT_TYPE);
-  __ j(less, &miss, not_taken);
-
-  // If this assert fails, we have to check upper bound too.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
-  // Check for access to global object (unlikely).
-  __ cmp(edx, JS_GLOBAL_PROXY_TYPE);
-  __ j(equal, &global, not_taken);
-
-  // Check for non-global object that requires access check.
-  __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded);
-  __ j(not_zero, &miss, not_taken);
-
+  // edx: elements
   // Search the dictionary placing the result in eax.
-  __ bind(&probe);
-  GenerateDictionaryLoad(masm,
-                         &miss,
-                         eax,
-                         ecx,
-                         edx,
-                         edi,
-                         ebx,
-                         edi,
-                         CHECK_DICTIONARY);
-  __ mov(eax, edi);
+  GenerateDictionaryLoad(masm, &miss, edx, ecx, edi, ebx, eax);
   __ ret(0);
 
-  // Global object access: Check access rights.
-  __ bind(&global);
-  __ CheckAccessGlobalProxy(eax, edx, &miss);
-  __ jmp(&probe);
-
   // Cache miss: Jump to runtime.
   __ bind(&miss);
   GenerateMiss(masm);
@@ -1595,6 +1537,8 @@
   //  -- esp[0] : return address
   // -----------------------------------
 
+  __ IncrementCounter(&Counters::load_miss, 1);
+
   __ pop(ebx);
   __ push(eax);  // receiver
   __ push(ecx);  // name
@@ -1711,6 +1655,8 @@
   //  -- esp[0] : return address
   // -----------------------------------
 
+  __ IncrementCounter(&Counters::keyed_load_miss, 1);
+
   __ pop(ebx);
   __ push(edx);  // receiver
   __ push(eax);  // name
diff --git a/src/ic.cc b/src/ic.cc
index 475f161..4b77d92 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -992,12 +992,14 @@
       }
     }
     set_target(stub);
-    // For JSObjects that are not value wrappers and that do not have
-    // indexed interceptors, we initialize the inlined fast case (if
-    // present) by patching the inlined map check.
+    // For JSObjects with fast elements that are not value wrappers
+    // and that do not have indexed interceptors, we initialize the
+    // inlined fast case (if present) by patching the inlined map
+    // check.
     if (object->IsJSObject() &&
         !object->IsJSValue() &&
-        !JSObject::cast(*object)->HasIndexedInterceptor()) {
+        !JSObject::cast(*object)->HasIndexedInterceptor() &&
+        JSObject::cast(*object)->HasFastElements()) {
       Map* map = JSObject::cast(*object)->map();
       PatchInlinedLoad(address(), map);
     }
diff --git a/src/ic.h b/src/ic.h
index 5fd5078..738b6f4 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -33,10 +33,6 @@
 namespace v8 {
 namespace internal {
 
-// Flag indicating whether an IC stub needs to check that a backing
-// store is in dictionary case.
-enum DictionaryCheck { CHECK_DICTIONARY, DICTIONARY_CHECK_DONE };
-
 
 // IC_UTIL_LIST defines all utility functions called from generated
 // inline caching code. The argument for the macro, ICU, is the function name.
diff --git a/src/json.js b/src/json.js
index 3e42d36..e5ee03d 100644
--- a/src/json.js
+++ b/src/json.js
@@ -207,7 +207,7 @@
     } else if (IS_STRING_WRAPPER(value)) {
       value = $String(value);
     } else if (IS_BOOLEAN_WRAPPER(value)) {
-      value = $Boolean(value);
+      value =  %_ValueOf(value);
     }
   }
   switch (typeof value) {
diff --git a/src/jump-target-heavy.h b/src/jump-target-heavy.h
index b923fe5..b2113a5 100644
--- a/src/jump-target-heavy.h
+++ b/src/jump-target-heavy.h
@@ -196,6 +196,8 @@
  public:
   // Construct a break target.
   BreakTarget() {}
+  explicit BreakTarget(JumpTarget::Directionality direction)
+    : JumpTarget(direction) { }
 
   virtual ~BreakTarget() {}
 
diff --git a/src/jump-target-light-inl.h b/src/jump-target-light-inl.h
index 0b4eee4..e8f1a5f 100644
--- a/src/jump-target-light-inl.h
+++ b/src/jump-target-light-inl.h
@@ -36,16 +36,20 @@
 // Construct a jump target.
 JumpTarget::JumpTarget(Directionality direction)
     : entry_frame_set_(false),
+      direction_(direction),
       entry_frame_(kInvalidVirtualFrameInitializer) {
 }
 
 JumpTarget::JumpTarget()
     : entry_frame_set_(false),
+      direction_(FORWARD_ONLY),
       entry_frame_(kInvalidVirtualFrameInitializer) {
 }
 
 
 BreakTarget::BreakTarget() { }
+BreakTarget::BreakTarget(JumpTarget::Directionality direction)
+  : JumpTarget(direction) { }
 
 } }  // namespace v8::internal
 
diff --git a/src/jump-target-light.h b/src/jump-target-light.h
index 084bd58..91b7266 100644
--- a/src/jump-target-light.h
+++ b/src/jump-target-light.h
@@ -120,6 +120,9 @@
   // Has an entry frame been found?
   bool entry_frame_set_;
 
+  // Can we branch backwards to this label?
+  Directionality direction_;
+
   // The frame used on entry to the block and expected at backward
   // jumps to the block.  Set the first time something branches to this
   // jump target.
@@ -150,6 +153,7 @@
  public:
   // Construct a break target.
   inline BreakTarget();
+  inline BreakTarget(JumpTarget::Directionality direction);
 
   virtual ~BreakTarget() {}
 
diff --git a/src/log.cc b/src/log.cc
index ada73cb..e083f01 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -309,10 +309,10 @@
 
 void Profiler::Run() {
   TickSample sample;
-  bool overflow = Logger::profiler_->Remove(&sample);
+  bool overflow = Remove(&sample);
   while (running_) {
     LOG(TickEvent(&sample, overflow));
-    overflow = Logger::profiler_->Remove(&sample);
+    overflow = Remove(&sample);
   }
 }
 
@@ -1150,7 +1150,7 @@
 
 int Logger::GetActiveProfilerModules() {
   int result = PROFILER_MODULE_NONE;
-  if (!profiler_->paused()) {
+  if (profiler_ != NULL && !profiler_->paused()) {
     result |= PROFILER_MODULE_CPU;
   }
   if (FLAG_log_gc) {
@@ -1162,7 +1162,7 @@
 
 void Logger::PauseProfiler(int flags, int tag) {
   if (!Log::IsEnabled()) return;
-  if (flags & PROFILER_MODULE_CPU) {
+  if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
     // It is OK to have negative nesting.
     if (--cpu_profiler_nesting_ == 0) {
       profiler_->pause();
@@ -1193,7 +1193,7 @@
   if (tag != 0) {
     UncheckedIntEvent("open-tag", tag);
   }
-  if (flags & PROFILER_MODULE_CPU) {
+  if (profiler_ != NULL && (flags & PROFILER_MODULE_CPU)) {
     if (cpu_profiler_nesting_++ == 0) {
       ++logging_nesting_;
       if (FLAG_prof_lazy) {
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index b60e54d..0b5ff99 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -539,6 +539,9 @@
              (map()->inobject_properties() + properties()->length() -
               map()->NextFreePropertyIndex()));
   }
+  ASSERT(map()->has_fast_elements() ==
+         (elements()->map() == Heap::fixed_array_map()));
+  ASSERT(map()->has_fast_elements() == HasFastElements());
 }
 
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index d6571bf..f9def82 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1166,6 +1166,8 @@
 
 
 void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
+  ASSERT(map()->has_fast_elements() ==
+         (value->map() == Heap::fixed_array_map()));
   // In the assert below Dictionary is covered under FixedArray.
   ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
          value->IsExternalArray());
@@ -1181,11 +1183,21 @@
 
 
 void JSObject::initialize_elements() {
+  ASSERT(map()->has_fast_elements());
   ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
   WRITE_FIELD(this, kElementsOffset, Heap::empty_fixed_array());
 }
 
 
+Object* JSObject::ResetElements() {
+  Object* obj = map()->GetFastElementsMap();
+  if (obj->IsFailure()) return obj;
+  set_map(Map::cast(obj));
+  initialize_elements();
+  return this;
+}
+
+
 ACCESSORS(Oddball, to_string, String, kToStringOffset)
 ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
 
@@ -2335,6 +2347,26 @@
 }
 
 
+Object* Map::GetFastElementsMap() {
+  if (has_fast_elements()) return this;
+  Object* obj = CopyDropTransitions();
+  if (obj->IsFailure()) return obj;
+  Map* new_map = Map::cast(obj);
+  new_map->set_has_fast_elements(true);
+  return new_map;
+}
+
+
+Object* Map::GetSlowElementsMap() {
+  if (!has_fast_elements()) return this;
+  Object* obj = CopyDropTransitions();
+  if (obj->IsFailure()) return obj;
+  Map* new_map = Map::cast(obj);
+  new_map->set_has_fast_elements(false);
+  return new_map;
+}
+
+
 ACCESSORS(Map, instance_descriptors, DescriptorArray,
           kInstanceDescriptorsOffset)
 ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
@@ -2838,11 +2870,14 @@
   if (array->IsFixedArray()) {
     // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
     if (array->map() == Heap::fixed_array_map()) {
+      ASSERT(map()->has_fast_elements());
       return FAST_ELEMENTS;
     }
     ASSERT(array->IsDictionary());
+    ASSERT(!map()->has_fast_elements());
     return DICTIONARY_ELEMENTS;
   }
+  ASSERT(!map()->has_fast_elements());
   if (array->IsExternalArray()) {
     switch (array->map()->instance_type()) {
       case EXTERNAL_BYTE_ARRAY_TYPE:
diff --git a/src/objects.cc b/src/objects.cc
index 63b77b7..5a057e1 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2222,6 +2222,11 @@
 Object* JSObject::NormalizeElements() {
   ASSERT(!HasPixelElements() && !HasExternalArrayElements());
   if (HasDictionaryElements()) return this;
+  ASSERT(map()->has_fast_elements());
+
+  Object* obj = map()->GetSlowElementsMap();
+  if (obj->IsFailure()) return obj;
+  Map* new_map = Map::cast(obj);
 
   // Get number of entries.
   FixedArray* array = FixedArray::cast(elements());
@@ -2230,7 +2235,7 @@
   int length = IsJSArray() ?
                Smi::cast(JSArray::cast(this)->length())->value() :
                array->length();
-  Object* obj = NumberDictionary::Allocate(length);
+  obj = NumberDictionary::Allocate(length);
   if (obj->IsFailure()) return obj;
   NumberDictionary* dictionary = NumberDictionary::cast(obj);
   // Copy entries.
@@ -2243,7 +2248,10 @@
       dictionary = NumberDictionary::cast(result);
     }
   }
-  // Switch to using the dictionary as the backing storage for elements.
+  // Switch to using the dictionary as the backing storage for
+  // elements. Set the new map first to satify the elements type
+  // assert in set_elements().
+  set_map(new_map);
   set_elements(dictionary);
 
   Counters::elements_to_dictionary.Increment();
@@ -5473,14 +5481,18 @@
 #endif  // ENABLE_DISASSEMBLER
 
 
-void JSObject::SetFastElements(FixedArray* elems) {
+Object* JSObject::SetFastElementsCapacityAndLength(int capacity, int length) {
   // We should never end in here with a pixel or external array.
   ASSERT(!HasPixelElements() && !HasExternalArrayElements());
-#ifdef DEBUG
-  // Check the provided array is filled with the_hole.
-  uint32_t len = static_cast<uint32_t>(elems->length());
-  for (uint32_t i = 0; i < len; i++) ASSERT(elems->get(i)->IsTheHole());
-#endif
+
+  Object* obj = Heap::AllocateFixedArrayWithHoles(capacity);
+  if (obj->IsFailure()) return obj;
+  FixedArray* elems = FixedArray::cast(obj);
+
+  obj = map()->GetFastElementsMap();
+  if (obj->IsFailure()) return obj;
+  Map* new_map = Map::cast(obj);
+
   AssertNoAllocation no_gc;
   WriteBarrierMode mode = elems->GetWriteBarrierMode(no_gc);
   switch (GetElementsKind()) {
@@ -5508,7 +5520,15 @@
       UNREACHABLE();
       break;
   }
+
+  set_map(new_map);
   set_elements(elems);
+
+  if (IsJSArray()) {
+    JSArray::cast(this)->set_length(Smi::FromInt(length));
+  }
+
+  return this;
 }
 
 
@@ -5595,7 +5615,7 @@
 
   Object* smi_length = len->ToSmi();
   if (smi_length->IsSmi()) {
-    int value = Smi::cast(smi_length)->value();
+    const int value = Smi::cast(smi_length)->value();
     if (value < 0) return ArrayLengthRangeError();
     switch (GetElementsKind()) {
       case FAST_ELEMENTS: {
@@ -5617,12 +5637,8 @@
         int new_capacity = value > min ? value : min;
         if (new_capacity <= kMaxFastElementsLength ||
             !ShouldConvertToSlowElements(new_capacity)) {
-          Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
+          Object* obj = SetFastElementsCapacityAndLength(new_capacity, value);
           if (obj->IsFailure()) return obj;
-          if (IsJSArray()) {
-            JSArray::cast(this)->set_length(Smi::cast(smi_length));
-          }
-          SetFastElements(FixedArray::cast(obj));
           return this;
         }
         break;
@@ -5633,7 +5649,8 @@
             // If the length of a slow array is reset to zero, we clear
             // the array and flush backing storage. This has the added
             // benefit that the array returns to fast mode.
-            initialize_elements();
+            Object* obj = ResetElements();
+            if (obj->IsFailure()) return obj;
           } else {
             // Remove deleted elements.
             uint32_t old_length =
@@ -6092,12 +6109,8 @@
     if (new_capacity <= kMaxFastElementsLength ||
         !ShouldConvertToSlowElements(new_capacity)) {
       ASSERT(static_cast<uint32_t>(new_capacity) > index);
-      Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity);
+      Object* obj = SetFastElementsCapacityAndLength(new_capacity, index + 1);
       if (obj->IsFailure()) return obj;
-      SetFastElements(FixedArray::cast(obj));
-      if (IsJSArray()) {
-        JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
-      }
       FixedArray::cast(elements())->set(index, value);
       return value;
     }
@@ -6216,13 +6229,11 @@
         uint32_t new_length = 0;
         if (IsJSArray()) {
           CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
-          JSArray::cast(this)->set_length(Smi::FromInt(new_length));
         } else {
           new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
         }
-        Object* obj = Heap::AllocateFixedArrayWithHoles(new_length);
+        Object* obj = SetFastElementsCapacityAndLength(new_length, new_length);
         if (obj->IsFailure()) return obj;
-        SetFastElements(FixedArray::cast(obj));
 #ifdef DEBUG
         if (FLAG_trace_normalization) {
           PrintF("Object elements are fast case again:\n");
@@ -7526,14 +7537,18 @@
     }
     // Convert to fast elements.
 
+    Object* obj = map()->GetFastElementsMap();
+    if (obj->IsFailure()) return obj;
+    Map* new_map = Map::cast(obj);
+
     PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED: TENURED;
     Object* new_array =
         Heap::AllocateFixedArray(dict->NumberOfElements(), tenure);
-    if (new_array->IsFailure()) {
-      return new_array;
-    }
+    if (new_array->IsFailure()) return new_array;
     FixedArray* fast_elements = FixedArray::cast(new_array);
     dict->CopyValuesTo(fast_elements);
+
+    set_map(new_map);
     set_elements(fast_elements);
   }
   ASSERT(HasFastElements());
diff --git a/src/objects.h b/src/objects.h
index 0c14665..0ad6f14 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1191,6 +1191,7 @@
   // case, and a PixelArray or ExternalArray in special cases.
   DECL_ACCESSORS(elements, HeapObject)
   inline void initialize_elements();
+  inline Object* ResetElements();
   inline ElementsKind GetElementsKind();
   inline bool HasFastElements();
   inline bool HasDictionaryElements();
@@ -1367,7 +1368,7 @@
   // The undefined object if index is out of bounds.
   Object* GetElementWithReceiver(JSObject* receiver, uint32_t index);
 
-  void SetFastElements(FixedArray* elements);
+  Object* SetFastElementsCapacityAndLength(int capacity, int length);
   Object* SetSlowElements(Object* length);
 
   // Lookup interceptors are used for handling properties controlled by host
@@ -2987,6 +2988,19 @@
     return ((1 << kIsExtensible) & bit_field2()) != 0;
   }
 
+  // Tells whether the instance has fast elements.
+  void set_has_fast_elements(bool value) {
+    if (value) {
+      set_bit_field2(bit_field2() | (1 << kHasFastElements));
+    } else {
+      set_bit_field2(bit_field2() & ~(1 << kHasFastElements));
+    }
+  }
+
+  bool has_fast_elements() {
+    return ((1 << kHasFastElements) & bit_field2()) != 0;
+  }
+
   // Tells whether the instance needs security checks when accessing its
   // properties.
   inline void set_is_access_check_needed(bool access_check_needed);
@@ -3010,6 +3024,16 @@
   // instance descriptors.
   Object* CopyDropTransitions();
 
+  // Returns this map if it has the fast elements bit set, otherwise
+  // returns a copy of the map, with all transitions dropped from the
+  // descriptors and the fast elements bit set.
+  inline Object* GetFastElementsMap();
+
+  // Returns this map if it has the fast elements bit cleared,
+  // otherwise returns a copy of the map, with all transitions dropped
+  // from the descriptors and the fast elements bit cleared.
+  inline Object* GetSlowElementsMap();
+
   // Returns the property index for name (only valid for FAST MODE).
   int PropertyIndexFor(String* name);
 
@@ -3111,6 +3135,7 @@
   // Bit positions for bit field 2
   static const int kIsExtensible = 0;
   static const int kFunctionWithPrototype = 1;
+  static const int kHasFastElements = 2;
 
   // Layout of the default cache. It holds alternating name and code objects.
   static const int kCodeCacheEntrySize = 2;
diff --git a/src/regexp.js b/src/regexp.js
index 9367f15..f0945b3 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -230,7 +230,10 @@
   var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
 
   if (matchIndices == null) {
-    if (this.global) this.lastIndex = 0;
+    if (this.global) {
+      this.lastIndex = 0;
+      if (lastIndex != 0) return matchIndices;
+    }
     cache.lastIndex = lastIndex;
     cache.regExp = this;
     cache.subject = s;
diff --git a/src/runtime.cc b/src/runtime.cc
index 71148e6..8625053 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -7449,7 +7449,7 @@
   uint32_t index_limit_;
   // Index after last seen index. Always less than or equal to index_limit_.
   uint32_t index_offset_;
-  bool fast_elements_;
+  const bool fast_elements_;
 };
 
 
@@ -7766,13 +7766,14 @@
     // The backing storage array must have non-existing elements to
     // preserve holes across concat operations.
     storage = Factory::NewFixedArrayWithHoles(result_length);
-
+    result->set_map(*Factory::GetFastElementsMap(Handle<Map>(result->map())));
   } else {
     // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
     uint32_t at_least_space_for = estimate_nof_elements +
                                   (estimate_nof_elements >> 2);
     storage = Handle<FixedArray>::cast(
                   Factory::NewNumberDictionary(at_least_space_for));
+    result->set_map(*Factory::GetSlowElementsMap(Handle<Map>(result->map())));
   }
 
   Handle<Object> len = Factory::NewNumber(static_cast<double>(result_length));
@@ -7822,9 +7823,19 @@
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSArray, from, args[0]);
   CONVERT_CHECKED(JSArray, to, args[1]);
-  to->SetContent(FixedArray::cast(from->elements()));
+  HeapObject* new_elements = from->elements();
+  Object* new_map;
+  if (new_elements->map() == Heap::fixed_array_map()) {
+    new_map = to->map()->GetFastElementsMap();
+  } else {
+    new_map = to->map()->GetSlowElementsMap();
+  }
+  if (new_map->IsFailure()) return new_map;
+  to->set_map(Map::cast(new_map));
+  to->set_elements(new_elements);
   to->set_length(from->length());
-  from->SetContent(Heap::empty_fixed_array());
+  Object* obj = from->ResetElements();
+  if (obj->IsFailure()) return obj;
   from->set_length(Smi::FromInt(0));
   return to;
 }
diff --git a/src/utils.h b/src/utils.h
index d7c5b70..236b85e 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -587,7 +587,7 @@
 // Limit below which the extra overhead of the MemCopy function is likely
 // to outweigh the benefits of faster copying.
 // TODO(lrn): Try to find a more precise value.
-static const int kMinComplexMemCopy = 256;
+static const int kMinComplexMemCopy = 64;
 
 #else  // V8_TARGET_ARCH_IA32
 
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 10b8102..93fecd1 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -153,6 +153,10 @@
   SC(keyed_store_inline_miss, V8.KeyedStoreInlineMiss)                \
   SC(named_store_global_inline, V8.NamedStoreGlobalInline)            \
   SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)   \
+  SC(call_miss, V8.CallMiss)                                          \
+  SC(keyed_call_miss, V8.KeyedCallMiss)                               \
+  SC(load_miss, V8.LoadMiss)                                          \
+  SC(keyed_load_miss, V8.KeyedLoadMiss)                               \
   SC(call_const, V8.CallConst)                                        \
   SC(call_const_fast_api, V8.CallConstFastApi)                        \
   SC(call_const_interceptor, V8.CallConstInterceptor)                 \
diff --git a/src/v8natives.js b/src/v8natives.js
index 1d47eb7..24d5e7c 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -677,9 +677,20 @@
     }
   }
 
-  // Property names are expected to be strings.
-  for (var i = 0; i < propertyNames.length; ++i)
-    propertyNames[i] = ToString(propertyNames[i]);
+  // Property names are expected to be unique strings.
+  var propertySet = {};
+  var j = 0;
+  for (var i = 0; i < propertyNames.length; ++i) {
+    var name = ToString(propertyNames[i]);
+    // We need to check for the exact property value since for intrinsic
+    // properties like toString if(propertySet["toString"]) will always
+    // succeed.
+    if (propertySet[name] === true)
+      continue;
+    propertySet[name] = true;
+    propertyNames[j++] = name;
+  }
+  propertyNames.length = j;
 
   return propertyNames;
 }
diff --git a/src/version.cc b/src/version.cc
index c9e8411..4cc7e1b 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      19
+#define BUILD_NUMBER      20
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index e665385..2bb92d7 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -376,8 +376,13 @@
 
 void Assembler::Align(int m) {
   ASSERT(IsPowerOf2(m));
-  while ((pc_offset() & (m - 1)) != 0) {
-    nop();
+  int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
+  while (delta >= 9) {
+    nop(9);
+    delta -= 9;
+  }
+  if (delta > 0) {
+    nop(delta);
   }
 }
 
@@ -837,9 +842,7 @@
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // Opcode: FF /2 r64.
-  if (adr.high_bit()) {
-    emit_rex_64(adr);
-  }
+  emit_optional_rex_32(adr);
   emit(0xFF);
   emit_modrm(0x2, adr);
 }
@@ -849,9 +852,9 @@
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // Opcode: FF /2 m64.
-  emit_rex_64(op);
+  emit_optional_rex_32(op);
   emit(0xFF);
-  emit_operand(2, op);
+  emit_operand(0x2, op);
 }
 
 
@@ -1270,9 +1273,7 @@
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   // Opcode FF/4 r64.
-  if (target.high_bit()) {
-    emit_rex_64(target);
-  }
+  emit_optional_rex_32(target);
   emit(0xFF);
   emit_modrm(0x4, target);
 }
@@ -1831,9 +1832,7 @@
 void Assembler::pop(Register dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  if (dst.high_bit()) {
-    emit_rex_64(dst);
-  }
+  emit_optional_rex_32(dst);
   emit(0x58 | dst.low_bits());
 }
 
@@ -1841,7 +1840,7 @@
 void Assembler::pop(const Operand& dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  emit_rex_64(dst);  // Could be omitted in some cases.
+  emit_optional_rex_32(dst);
   emit(0x8F);
   emit_operand(0, dst);
 }
@@ -1857,9 +1856,7 @@
 void Assembler::push(Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  if (src.high_bit()) {
-    emit_rex_64(src);
-  }
+  emit_optional_rex_32(src);
   emit(0x50 | src.low_bits());
 }
 
@@ -1867,7 +1864,7 @@
 void Assembler::push(const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  emit_rex_64(src);  // Could be omitted in some cases.
+  emit_optional_rex_32(src);
   emit(0xFF);
   emit_operand(6, src);
 }
@@ -2609,6 +2606,28 @@
 }
 
 
+void Assembler::movss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF3);  // single
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x10);  // load
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movss(const Operand& src, XMMRegister dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF3);  // single
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x11);  // store
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::cvttss2si(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2664,6 +2683,17 @@
 }
 
 
+void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x2A);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2686,6 +2716,50 @@
 }
 
 
+void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5A);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5A);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtsd2si(Register dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x2D);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::cvtsd2siq(Register dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0x2D);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2763,6 +2837,18 @@
 }
 
 
+void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x2e);
+  emit_sse_operand(dst, src);
+}
+
+
+
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
   emit_operand(ireg, adr);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index f195439..213db2c 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -46,23 +46,23 @@
 
 // Test whether a 64-bit value is in a specific range.
 static inline bool is_uint32(int64_t x) {
-  static const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
-  return x == (x & kUInt32Mask);
+  static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
+  return static_cast<uint64_t>(x) <= kMaxUInt32;
 }
 
 static inline bool is_int32(int64_t x) {
-  static const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
-  return is_uint32(x - kMinIntValue);
+  static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
+  return is_uint32(x - kMinInt32);
 }
 
 static inline bool uint_is_int32(uint64_t x) {
-  static const uint64_t kMaxIntValue = V8_UINT64_C(0x80000000);
-  return x < kMaxIntValue;
+  static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
+  return x <= kMaxInt32;
 }
 
 static inline bool is_uint32(uint64_t x) {
-  static const uint64_t kMaxUIntValue = V8_UINT64_C(0x100000000);
-  return x < kMaxUIntValue;
+  static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
+  return x <= kMaxUInt32;
 }
 
 // CPU Registers.
@@ -1110,6 +1110,9 @@
   void movsd(XMMRegister dst, XMMRegister src);
   void movsd(XMMRegister dst, const Operand& src);
 
+  void movss(XMMRegister dst, const Operand& src);
+  void movss(const Operand& dst, XMMRegister src);
+
   void cvttss2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, const Operand& src);
   void cvttsd2siq(Register dst, XMMRegister src);
@@ -1119,7 +1122,14 @@
   void cvtqsi2sd(XMMRegister dst, const Operand& src);
   void cvtqsi2sd(XMMRegister dst, Register src);
 
+  void cvtlsi2ss(XMMRegister dst, Register src);
+
   void cvtss2sd(XMMRegister dst, XMMRegister src);
+  void cvtss2sd(XMMRegister dst, const Operand& src);
+  void cvtsd2ss(XMMRegister dst, XMMRegister src);
+
+  void cvtsd2si(Register dst, XMMRegister src);
+  void cvtsd2siq(Register dst, XMMRegister src);
 
   void addsd(XMMRegister dst, XMMRegister src);
   void subsd(XMMRegister dst, XMMRegister src);
@@ -1130,6 +1140,7 @@
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
   void ucomisd(XMMRegister dst, XMMRegister src);
+  void ucomisd(XMMRegister dst, const Operand& src);
 
   // The first argument is the reg field, the second argument is the r/m field.
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 3ba8906..0c4cd16 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -2641,7 +2641,7 @@
 
   // Generate code to set the elements in the array that are not
   // literals.
-  for (int i = 0; i < node->values()->length(); i++) {
+  for (int i = 0; i < length; i++) {
     Expression* value = node->values()->at(i);
 
     // If value is a literal the property value is already set in the
@@ -3855,8 +3855,17 @@
     default:
       UNREACHABLE();
   }
-  Load(left);
-  Load(right);
+
+  if (left->IsTrivial()) {
+    Load(right);
+    Result right_result = frame_->Pop();
+    frame_->Push(left);
+    frame_->Push(&right_result);
+  } else {
+    Load(left);
+    Load(right);
+  }
+
   Comparison(node, cc, strict, destination());
 }
 
@@ -5336,9 +5345,8 @@
     dest->false_target()->Branch(equal);
     Condition is_smi = masm_->CheckSmi(value.reg());
     dest->true_target()->Branch(is_smi);
-    __ fldz();
-    __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
-    __ FCmp();
+    __ xorpd(xmm0, xmm0);
+    __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
     value.Unuse();
     dest->Split(not_zero);
   } else {
@@ -6511,7 +6519,7 @@
 void DeferredInlineBinaryOperation::Generate() {
   Label done;
   if ((op_ == Token::ADD)
-      || (op_ ==Token::SUB)
+      || (op_ == Token::SUB)
       || (op_ == Token::MUL)
       || (op_ == Token::DIV)) {
     Label call_runtime;
@@ -7530,9 +7538,11 @@
     // is not a dictionary.
     __ movq(elements.reg(),
             FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-    __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
-           Factory::fixed_array_map());
-    deferred->Branch(not_equal);
+    if (FLAG_debug_code) {
+      __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+             Factory::fixed_array_map());
+      __ Assert(equal, "JSObject with fast elements map has slow elements");
+    }
 
     // Check that key is within bounds.
     __ SmiCompare(key.reg(),
@@ -8000,14 +8010,12 @@
   __ jmp(&true_result);
 
   __ bind(&not_string);
-  // HeapNumber => false iff +0, -0, or NaN.
-  // These three cases set C3 when compared to zero in the FPU.
   __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
   __ j(not_equal, &true_result);
-  __ fldz();  // Load zero onto fp stack
-  // Load heap-number double value onto fp stack
-  __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
-  __ FCmp();
+  // HeapNumber => false iff +0, -0, or NaN.
+  // These three cases set the zero flag when compared to zero using ucomisd.
+  __ xorpd(xmm0, xmm0);
+  __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
   __ j(zero, &false_result);
   // Fall through to |true_result|.
 
@@ -8951,48 +8959,31 @@
     // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
     // so we do the second best thing - test it ourselves.
     // Note: if cc_ != equal, never_nan_nan_ is not used.
+    __ Set(rax, EQUAL);
     if (never_nan_nan_ && (cc_ == equal)) {
-      __ Set(rax, EQUAL);
       __ ret(0);
     } else {
-      Label return_equal;
       Label heap_number;
       // If it's not a heap number, then return equal.
       __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
              Factory::heap_number_map());
       __ j(equal, &heap_number);
-      __ bind(&return_equal);
-      __ Set(rax, EQUAL);
       __ ret(0);
 
       __ bind(&heap_number);
-      // It is a heap number, so return non-equal if it's NaN and equal if
-      // it's not NaN.
-      // The representation of NaN values has all exponent bits (52..62) set,
-      // and not all mantissa bits (0..51) clear.
-      // We only allow QNaNs, which have bit 51 set (which also rules out
-      // the value being Infinity).
+      // It is a heap number, so return  equal if it's not NaN.
+      // For NaN, return 1 for every condition except greater and
+      // greater-equal.  Return -1 for them, so the comparison yields
+      // false for all conditions except not-equal.
 
-      // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
-      // all bits in the mask are set. We only need to check the word
-      // that contains the exponent and high bit of the mantissa.
-      ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
-      __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
-      __ xorl(rax, rax);
-      __ addl(rdx, rdx);  // Shift value and mask so mask applies to top bits.
-      __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
-      if (cc_ == equal) {
-        __ setcc(above_equal, rax);
-        __ ret(0);
-      } else {
-        Label nan;
-        __ j(above_equal, &nan);
-        __ Set(rax, EQUAL);
-        __ ret(0);
-        __ bind(&nan);
-        __ Set(rax, NegativeComparisonResult(cc_));
-        __ ret(0);
+      __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+      __ ucomisd(xmm0, xmm0);
+      __ setcc(parity_even, rax);
+      // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+      if (cc_ == greater_equal || cc_ == greater) {
+        __ neg(rax);
       }
+      __ ret(0);
     }
 
     __ bind(&not_identical);
@@ -10040,20 +10031,15 @@
 // Input: rdx, rax are the left and right objects of a bit op.
 // Output: rax, rcx are left and right integers for a bit op.
 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
-  if (FLAG_debug_code) {
-    // Both arguments can not be smis. That case is handled by smi-only code.
-    Label ok;
-    __ JumpIfNotBothSmi(rax, rdx, &ok);
-    __ Abort("Both arguments smi but not handled by smi-code.");
-    __ bind(&ok);
-  }
   // Check float operands.
   Label done;
+  Label rax_is_smi;
   Label rax_is_object;
   Label rdx_is_object;
 
   __ JumpIfNotSmi(rdx, &rdx_is_object);
   __ SmiToInteger32(rdx, rdx);
+  __ JumpIfSmi(rax, &rax_is_smi);
 
   __ bind(&rax_is_object);
   IntegerConvert(masm, rcx, rax);  // Uses rdi, rcx and rbx.
@@ -10062,6 +10048,7 @@
   __ bind(&rdx_is_object);
   IntegerConvert(masm, rdx, rdx);  // Uses rdi, rcx and rbx.
   __ JumpIfNotSmi(rax, &rax_is_object);
+  __ bind(&rax_is_smi);
   __ SmiToInteger32(rcx, rax);
 
   __ bind(&done);
@@ -10446,7 +10433,6 @@
         Label not_floats;
         // rax: y
         // rdx: x
-        ASSERT(!static_operands_type_.IsSmi());
         if (static_operands_type_.IsNumber()) {
           if (FLAG_debug_code) {
             // Assert at runtime that inputs are only numbers.
@@ -11583,7 +11569,9 @@
   __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
 
   __ SmiSub(rcx, rcx, rdx, NULL);  // Overflow doesn't happen.
-  __ j(negative, &runtime);
+  __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
+  Label return_rax;
+  __ j(equal, &return_rax);
   // Special handling of sub-strings of length 1 and 2. One character strings
   // are handled in the runtime system (looked up in the single character
   // cache). Two character strings are looked for in the symbol cache.
@@ -11686,6 +11674,8 @@
   // rsi: character of sub string start
   StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
   __ movq(rsi, rdx);  // Restore esi.
+
+  __ bind(&return_rax);
   __ IncrementCounter(&Counters::sub_string_native, 1);
   __ ret(kArgumentsSize);
 
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 44ffe5f..002a5eb 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1028,9 +1028,9 @@
         if (opcode == 0x57) {
           mnemonic = "xorpd";
         } else if (opcode == 0x2E) {
-          mnemonic = "comisd";
-        } else if (opcode == 0x2F) {
           mnemonic = "ucomisd";
+        } else if (opcode == 0x2F) {
+          mnemonic = "comisd";
         } else {
           UnimplementedInstruction();
         }
@@ -1057,7 +1057,7 @@
       // CVTSI2SD: integer to XMM double conversion.
       int mod, regop, rm;
       get_modrm(*current, &mod, &regop, &rm);
-      AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+      AppendToBuffer("%sd %s,", mnemonic, NameOfXMMRegister(regop));
       current += PrintRightOperand(current);
     } else if ((opcode & 0xF8) == 0x58 || opcode == 0x51) {
       // XMM arithmetic. Mnemonic was retrieved at the start of this function.
@@ -1070,7 +1070,25 @@
     }
   } else if (group_1_prefix_ == 0xF3) {
     // Instructions with prefix 0xF3.
-    if (opcode == 0x2C) {
+    if (opcode == 0x11 || opcode == 0x10) {
+      // MOVSS: Move scalar double-precision fp to/from/between XMM registers.
+      AppendToBuffer("movss ");
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      if (opcode == 0x11) {
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s", NameOfXMMRegister(regop));
+      } else {
+        AppendToBuffer("%s,", NameOfXMMRegister(regop));
+        current += PrintRightOperand(current);
+      }
+    } else if (opcode == 0x2A) {
+      // CVTSI2SS: integer to XMM single conversion.
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("%ss %s,", mnemonic, NameOfXMMRegister(regop));
+      current += PrintRightOperand(current);
+    } else if (opcode == 0x2C) {
       // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
       // Assert that mod is not 3, so source is memory, not an XMM register.
       ASSERT_NE(0xC0, *current & 0xC0);
@@ -1146,8 +1164,8 @@
   switch (opcode) {
     case 0x1F:
       return "nop";
-    case 0x2A:  // F2 prefix.
-      return "cvtsi2sd";
+    case 0x2A:  // F2/F3 prefix.
+      return "cvtsi2s";
     case 0x31:
       return "rdtsc";
     case 0x51:  // F2 prefix.
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 1df1de3..e3f74f6 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1518,12 +1518,13 @@
     case KEYED_PROPERTY: {
       __ push(rax);  // Preserve value.
       VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kStack);
-      __ movq(rax, Operand(rsp, 2 * kPointerSize));
+      VisitForValue(prop->key(), kAccumulator);
+      __ movq(rcx, rax);
+      __ pop(rdx);
+      __ pop(rax);
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
       __ call(ic, RelocInfo::CODE_TARGET);
       __ nop();  // Signal no inlined code.
-      __ Drop(3);  // Receiver, key, and extra copy of value.
       break;
     }
   }
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 6e77c89..31a806a 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -45,71 +45,93 @@
 #define __ ACCESS_MASM(masm)
 
 
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
+                                            Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
+  __ j(equal, global_object);
+  __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
+  __ j(equal, global_object);
+  __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
+  __ j(equal, global_object);
+}
+
+
+// Generated code falls through if the receiver is a regular non-global
+// JS object with slow properties and no interceptors.
+static void GenerateDictionaryLoadReceiverCheck(MacroAssembler* masm,
+                                                Register receiver,
+                                                Register r0,
+                                                Register r1,
+                                                Label* miss) {
+  // Register usage:
+  //   receiver: holds the receiver on entry and is unchanged.
+  //   r0: used to hold receiver instance type.
+  //       Holds the property dictionary on fall through.
+  //   r1: used to hold receivers map.
+
+  __ JumpIfSmi(receiver, miss);
+
+  // Check that the receiver is a valid JS object.
+  __ movq(r1, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
+  __ cmpb(r0, Immediate(FIRST_JS_OBJECT_TYPE));
+  __ j(below, miss);
+
+  // If this assert fails, we have to check upper bound too.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+  GenerateGlobalInstanceTypeCheck(masm, r0, miss);
+
+  // Check for non-global object that requires access check.
+  __ testb(FieldOperand(r1, Map::kBitFieldOffset),
+           Immediate((1 << Map::kIsAccessCheckNeeded) |
+                     (1 << Map::kHasNamedInterceptor)));
+  __ j(not_zero, miss);
+
+  __ movq(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
+  __ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
+                 Heap::kHashTableMapRootIndex);
+  __ j(not_equal, miss);
+}
+
+
 // Helper function used to load a property from a dictionary backing storage.
 // This function may return false negatives, so miss_label
 // must always call a backup property load that is complete.
-// This function is safe to call if the receiver has fast properties,
-// or if name is not a symbol, and will jump to the miss_label in that case.
+// This function is safe to call if name is not a symbol, and will jump to
+// the miss_label in that case.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
 static void GenerateDictionaryLoad(MacroAssembler* masm,
                                    Label* miss_label,
+                                   Register elements,
+                                   Register name,
                                    Register r0,
                                    Register r1,
-                                   Register r2,
-                                   Register name,
-                                   Register r4,
-                                   Register result,
-                                   DictionaryCheck check_dictionary) {
+                                   Register result) {
   // Register use:
   //
-  // r0   - used to hold the property dictionary and is unchanged.
+  // elements - holds the property dictionary on entry and is unchanged.
   //
-  // r1   - used to hold the receiver and is unchanged.
+  // name - holds the name of the property on entry and is unchanged.
   //
-  // r2   - used to hold the capacity of the property dictionary.
+  // r0   - used to hold the capacity of the property dictionary.
   //
-  // name - holds the name of the property and is unchanged.
-  //
-  // r4   - used to hold the index into the property dictionary.
+  // r1   - used to hold the index into the property dictionary.
   //
   // result - holds the result on exit if the load succeeded.
 
   Label done;
 
-  // Check for the absence of an interceptor.
-  // Load the map into r0.
-  __ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
-
-  // Bail out if the receiver has a named interceptor.
-  __ testl(FieldOperand(r0, Map::kBitFieldOffset),
-           Immediate(1 << Map::kHasNamedInterceptor));
-  __ j(not_zero, miss_label);
-
-  // Bail out if we have a JS global proxy object.
-  __ movzxbq(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
-  __ cmpb(r0, Immediate(JS_GLOBAL_PROXY_TYPE));
-  __ j(equal, miss_label);
-
-  // Possible work-around for http://crbug.com/16276.
-  __ cmpb(r0, Immediate(JS_GLOBAL_OBJECT_TYPE));
-  __ j(equal, miss_label);
-  __ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
-  __ j(equal, miss_label);
-
-  // Load properties array.
-  __ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
-
-  if (check_dictionary == CHECK_DICTIONARY) {
-    // Check that the properties array is a dictionary.
-    __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
-    __ j(not_equal, miss_label);
-  }
-
   // Compute the capacity mask.
   const int kCapacityOffset =
       StringDictionary::kHeaderSize +
       StringDictionary::kCapacityIndex * kPointerSize;
-  __ SmiToInteger32(r2, FieldOperand(r0, kCapacityOffset));
-  __ decl(r2);
+  __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
+  __ decl(r0);
 
   // Generate an unrolled loop that performs a few probes before
   // giving up. Measurements done on Gmail indicate that 2 probes
@@ -120,19 +142,19 @@
       StringDictionary::kElementsStartIndex * kPointerSize;
   for (int i = 0; i < kProbes; i++) {
     // Compute the masked index: (hash + i + i * i) & mask.
-    __ movl(r4, FieldOperand(name, String::kHashFieldOffset));
-    __ shrl(r4, Immediate(String::kHashShift));
+    __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
+    __ shrl(r1, Immediate(String::kHashShift));
     if (i > 0) {
-      __ addl(r4, Immediate(StringDictionary::GetProbeOffset(i)));
+      __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
     }
-    __ and_(r4, r2);
+    __ and_(r1, r0);
 
     // Scale the index by multiplying by the entry size.
     ASSERT(StringDictionary::kEntrySize == 3);
-    __ lea(r4, Operand(r4, r4, times_2, 0));  // r4 = r4 * 3
+    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
 
     // Check if the key is identical to the name.
-    __ cmpq(name, Operand(r0, r4, times_pointer_size,
+    __ cmpq(name, Operand(elements, r1, times_pointer_size,
                           kElementsStartOffset - kHeapObjectTag));
     if (i != kProbes - 1) {
       __ j(equal, &done);
@@ -144,14 +166,16 @@
   // Check that the value is a normal property.
   __ bind(&done);
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ Test(Operand(r0, r4, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+  __ Test(Operand(elements, r1, times_pointer_size,
+                  kDetailsOffset - kHeapObjectTag),
           Smi::FromInt(PropertyDetails::TypeField::mask()));
   __ j(not_zero, miss_label);
 
   // Get the value at the masked, scaled index.
   const int kValueOffset = kElementsStartOffset + kPointerSize;
   __ movq(result,
-          Operand(r0, r4, times_pointer_size, kValueOffset - kHeapObjectTag));
+          Operand(elements, r1, times_pointer_size,
+                  kValueOffset - kHeapObjectTag));
 }
 
 
@@ -327,6 +351,8 @@
   //  -- rsp[0]  : return address
   // -----------------------------------
 
+  __ IncrementCounter(&Counters::keyed_load_miss, 1);
+
   __ pop(rbx);
   __ push(rdx);  // receiver
   __ push(rax);  // name
@@ -360,6 +386,7 @@
 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
                                            Register receiver,
                                            Register map,
+                                           int interceptor_bit,
                                            Label* slow) {
   // Register use:
   //   receiver - holds the receiver and is unchanged.
@@ -379,7 +406,8 @@
 
   // Check bit field.
   __ testb(FieldOperand(map, Map::kBitFieldOffset),
-           Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+           Immediate((1 << Map::kIsAccessCheckNeeded) |
+                     (1 << interceptor_bit)));
   __ j(not_zero, slow);
 }
 
@@ -500,14 +528,15 @@
   Label slow, check_string, index_smi, index_string;
   Label check_pixel_array, probe_dictionary, check_number_dictionary;
 
-  GenerateKeyedLoadReceiverCheck(masm, rdx, rcx, &slow);
-
   // Check that the key is a smi.
   __ JumpIfNotSmi(rax, &check_string);
   __ bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from below
   // where a numeric string is converted to a smi.
 
+  GenerateKeyedLoadReceiverCheck(
+      masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
+
   GenerateFastArrayLoad(masm,
                         rdx,
                         rax,
@@ -557,6 +586,9 @@
   __ bind(&check_string);
   GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow);
 
+  GenerateKeyedLoadReceiverCheck(
+      masm, rdx, rcx, Map::kHasNamedInterceptor, &slow);
+
   // If the receiver is a fast-case object, check the keyed lookup
   // cache. Otherwise probe the dictionary leaving result in rcx.
   __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
@@ -608,15 +640,13 @@
   __ bind(&probe_dictionary);
   // rdx: receiver
   // rax: key
-  GenerateDictionaryLoad(masm,
-                         &slow,
-                         rbx,
-                         rdx,
-                         rcx,
-                         rax,
-                         rdi,
-                         rax,
-                         DICTIONARY_CHECK_DONE);
+  // rbx: elements
+
+  __ movq(rcx, FieldOperand(rdx, JSObject::kMapOffset));
+  __ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
+
+  GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
 
@@ -672,7 +702,7 @@
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label slow, failed_allocation;
+  Label slow;
 
   // Check that the object isn't a smi.
   __ JumpIfSmi(rdx, &slow);
@@ -731,7 +761,7 @@
       __ movl(rcx, Operand(rbx, rcx, times_4, 0));
       break;
     case kExternalFloatArray:
-      __ fld_s(Operand(rbx, rcx, times_4, 0));
+      __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
       break;
     default:
       UNREACHABLE();
@@ -743,20 +773,16 @@
   // For integer array types:
   // rcx: value
   // For floating-point array type:
-  // FP(0): value
+  // xmm0: value as double.
 
-  if (array_type == kExternalIntArray ||
-      array_type == kExternalUnsignedIntArray) {
-    // For the Int and UnsignedInt array types, we need to see whether
+  ASSERT(kSmiValueSize == 32);
+  if (array_type == kExternalUnsignedIntArray) {
+    // For the UnsignedInt array type, we need to see whether
     // the value can be represented in a Smi. If not, we need to convert
     // it to a HeapNumber.
     Label box_int;
-    if (array_type == kExternalIntArray) {
-      __ JumpIfNotValidSmiValue(rcx, &box_int);
-    } else {
-      ASSERT_EQ(array_type, kExternalUnsignedIntArray);
-      __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
-    }
+
+    __ JumpIfUIntNotValidSmiValue(rcx, &box_int);
 
     __ Integer32ToSmi(rax, rcx);
     __ ret(0);
@@ -765,42 +791,28 @@
 
     // Allocate a HeapNumber for the int and perform int-to-double
     // conversion.
-    __ push(rcx);
-    if (array_type == kExternalIntArray) {
-      __ fild_s(Operand(rsp, 0));
-    } else {
-      ASSERT(array_type == kExternalUnsignedIntArray);
-      // The value is zero-extended on the stack, because all pushes are
-      // 64-bit and we loaded the value from memory with movl.
-      __ fild_d(Operand(rsp, 0));
-    }
-    __ pop(rcx);
-    // FP(0): value
-    __ AllocateHeapNumber(rcx, rbx, &failed_allocation);
+    // The value is zero-extended since we loaded the value from memory
+    // with movl.
+    __ cvtqsi2sd(xmm0, rcx);
+
+    __ AllocateHeapNumber(rcx, rbx, &slow);
     // Set the value.
+    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
     __ movq(rax, rcx);
-    __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
     __ ret(0);
   } else if (array_type == kExternalFloatArray) {
     // For the floating-point array type, we need to always allocate a
     // HeapNumber.
-    __ AllocateHeapNumber(rcx, rbx, &failed_allocation);
+    __ AllocateHeapNumber(rcx, rbx, &slow);
     // Set the value.
+    __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
     __ movq(rax, rcx);
-    __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
     __ ret(0);
   } else {
     __ Integer32ToSmi(rax, rcx);
     __ ret(0);
   }
 
-  // If we fail allocation of the HeapNumber, we still have a value on
-  // top of the FPU stack. Remove it.
-  __ bind(&failed_allocation);
-  __ ffree();
-  __ fincstp();
-  // Fall through to slow case.
-
   // Slow case: Jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
@@ -1086,10 +1098,8 @@
       break;
     case kExternalFloatArray:
       // Need to perform int-to-float conversion.
-      __ push(rdx);
-      __ fild_s(Operand(rsp, 0));
-      __ pop(rdx);
-      __ fstp_s(Operand(rbx, rdi, times_4, 0));
+      __ cvtlsi2ss(xmm0, rdx);
+      __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
       break;
     default:
       UNREACHABLE();
@@ -1110,53 +1120,41 @@
   // The WebGL specification leaves the behavior of storing NaN and
   // +/-Infinity into integer arrays basically undefined. For more
   // reproducible behavior, convert these to zero.
-  __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+  __ movsd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
   __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
   // rdi: untagged index
   // rbx: base pointer of external storage
   // top of FPU stack: value
   if (array_type == kExternalFloatArray) {
-    __ fstp_s(Operand(rbx, rdi, times_4, 0));
+    __ cvtsd2ss(xmm0, xmm0);
+    __ movss(Operand(rbx, rdi, times_4, 0), xmm0);
     __ ret(0);
   } else {
     // Need to perform float-to-int conversion.
-    // Test the top of the FP stack for NaN.
-    Label is_nan;
-    __ fucomi(0);
-    __ j(parity_even, &is_nan);
+    // Test the value for NaN.
 
-    __ push(rdx);  // Make room on the stack.  Receiver is no longer needed.
-    __ fistp_d(Operand(rsp, 0));
-    __ pop(rdx);
+    // Convert to int32 and store the low byte/word.
+    // If the value is NaN or +/-infinity, the result is 0x80000000,
+    // which is automatically zero when taken mod 2^n, n < 32.
     // rdx: value (converted to an untagged integer)
     // rdi: untagged index
     // rbx: base pointer of external storage
     switch (array_type) {
       case kExternalByteArray:
       case kExternalUnsignedByteArray:
+        __ cvtsd2si(rdx, xmm0);
         __ movb(Operand(rbx, rdi, times_1, 0), rdx);
         break;
       case kExternalShortArray:
       case kExternalUnsignedShortArray:
+        __ cvtsd2si(rdx, xmm0);
         __ movw(Operand(rbx, rdi, times_2, 0), rdx);
         break;
       case kExternalIntArray:
       case kExternalUnsignedIntArray: {
-        // We also need to explicitly check for +/-Infinity. These are
-        // converted to MIN_INT, but we need to be careful not to
-        // confuse with legal uses of MIN_INT.  Since MIN_INT truncated
-        // to 8 or 16 bits is zero, we only perform this test when storing
-        // 32-bit ints.
-        Label not_infinity;
-        // This test would apparently detect both NaN and Infinity,
-        // but we've already checked for NaN using the FPU hardware
-        // above.
-        __ movzxwq(rcx, FieldOperand(rax, HeapNumber::kValueOffset + 6));
-        __ and_(rcx, Immediate(0x7FF0));
-        __ cmpw(rcx, Immediate(0x7FF0));
-        __ j(not_equal, &not_infinity);
-        __ movq(rdx, Immediate(0));
-        __ bind(&not_infinity);
+        // Convert to int64, so that NaN and infinities become
+        // 0x8000000000000000, which is zero mod 2^32.
+        __ cvtsd2siq(rdx, xmm0);
         __ movl(Operand(rbx, rdi, times_4, 0), rdx);
         break;
       }
@@ -1165,31 +1163,6 @@
         break;
     }
     __ ret(0);
-
-    __ bind(&is_nan);
-    // rdi: untagged index
-    // rbx: base pointer of external storage
-    __ ffree();
-    __ fincstp();
-    __ movq(rdx, Immediate(0));
-    switch (array_type) {
-      case kExternalByteArray:
-      case kExternalUnsignedByteArray:
-        __ movb(Operand(rbx, rdi, times_1, 0), rdx);
-        break;
-      case kExternalShortArray:
-      case kExternalUnsignedShortArray:
-        __ movw(Operand(rbx, rdi, times_2, 0), rdx);
-        break;
-      case kExternalIntArray:
-      case kExternalUnsignedIntArray:
-        __ movl(Operand(rbx, rdi, times_4, 0), rdx);
-        break;
-      default:
-        UNREACHABLE();
-        break;
-    }
-    __ ret(0);
   }
 
   // Slow case: call runtime.
@@ -1212,6 +1185,13 @@
   // rsp[argc * 8]            : argument 1
   // rsp[(argc + 1) * 8]      : argument 0 = receiver
   // -----------------------------------
+
+  if (id == IC::kCallIC_Miss) {
+    __ IncrementCounter(&Counters::call_miss, 1);
+  } else {
+    __ IncrementCounter(&Counters::keyed_call_miss, 1);
+  }
+
   // Get the receiver of the function from the stack; 1 ~ return address.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
@@ -1233,22 +1213,25 @@
   __ LeaveInternalFrame();
 
   // Check if the receiver is a global object of some sort.
-  Label invoke, global;
-  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));  // receiver
-  __ JumpIfSmi(rdx, &invoke);
-  __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
-  __ j(equal, &global);
-  __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
-  __ j(not_equal, &invoke);
+  // This can happen only for regular CallIC but not KeyedCallIC.
+  if (id == IC::kCallIC_Miss) {
+    Label invoke, global;
+    __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));  // receiver
+    __ JumpIfSmi(rdx, &invoke);
+    __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
+    __ j(equal, &global);
+    __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
+    __ j(not_equal, &invoke);
 
-  // Patch the receiver on the stack.
-  __ bind(&global);
-  __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
-  __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+    // Patch the receiver on the stack.
+    __ bind(&global);
+    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+    __ bind(&invoke);
+  }
 
   // Invoke the function.
   ParameterCount actual(argc);
-  __ bind(&invoke);
   __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
 }
 
@@ -1309,13 +1292,12 @@
 }
 
 
-static void GenerateNormalHelper(MacroAssembler* masm,
-                                 int argc,
-                                 bool is_global_object,
-                                 Label* miss) {
+static void GenerateFunctionTailCall(MacroAssembler* masm,
+                                     int argc,
+                                     Label* miss) {
   // ----------- S t a t e -------------
   // rcx                    : function name
-  // rdx                    : receiver
+  // rdi                    : function
   // rsp[0]                 : return address
   // rsp[8]                 : argument argc
   // rsp[16]                : argument argc - 1
@@ -1323,21 +1305,11 @@
   // rsp[argc * 8]          : argument 1
   // rsp[(argc + 1) * 8]    : argument 0 = receiver
   // -----------------------------------
-  // Search dictionary - put result in register rdx.
-  GenerateDictionaryLoad(
-     masm, miss, rax, rdx, rbx, rcx, rdi, rdi, CHECK_DICTIONARY);
-
   __ JumpIfSmi(rdi, miss);
   // Check that the value is a JavaScript function.
   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
   __ j(not_equal, miss);
 
-  // Patch the receiver with the global proxy if necessary.
-  if (is_global_object) {
-    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
-    __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
-  }
-
   // Invoke the function.
   ParameterCount actual(argc);
   __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
@@ -1355,56 +1327,18 @@
   // rsp[argc * 8]          : argument 1
   // rsp[(argc + 1) * 8]    : argument 0 = receiver
   // -----------------------------------
-  Label miss, global_object, non_global_object;
+  Label miss;
 
   // Get the receiver of the function from the stack.
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
 
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(rdx, &miss);
+  GenerateDictionaryLoadReceiverCheck(masm, rdx, rax, rbx, &miss);
 
-  // Check that the receiver is a valid JS object.
-  // Because there are so many map checks and type checks, do not
-  // use CmpObjectType, but load map and type into registers.
-  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-  __ movb(rax, FieldOperand(rbx, Map::kInstanceTypeOffset));
-  __ cmpb(rax, Immediate(FIRST_JS_OBJECT_TYPE));
-  __ j(below, &miss);
+  // rax: elements
+  // Search the dictionary placing the result in rdi.
+  GenerateDictionaryLoad(masm, &miss, rax, rcx, rbx, rdi, rdi);
 
-  // If this assert fails, we have to check upper bound too.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
-  // Check for access to global object.
-  __ cmpb(rax, Immediate(JS_GLOBAL_OBJECT_TYPE));
-  __ j(equal, &global_object);
-  __ cmpb(rax, Immediate(JS_BUILTINS_OBJECT_TYPE));
-  __ j(not_equal, &non_global_object);
-
-  // Accessing global object: Load and invoke.
-  __ bind(&global_object);
-  // Check that the global object does not require access checks.
-  __ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
-  __ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_equal, &miss);
-  GenerateNormalHelper(masm, argc, true, &miss);
-
-  // Accessing non-global object: Check for access to global proxy.
-  Label global_proxy, invoke;
-  __ bind(&non_global_object);
-  __ cmpb(rax, Immediate(JS_GLOBAL_PROXY_TYPE));
-  __ j(equal, &global_proxy);
-  // Check that the non-global, non-global-proxy object does not
-  // require access checks.
-  __ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
-  __ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_equal, &miss);
-  __ bind(&invoke);
-  GenerateNormalHelper(masm, argc, false, &miss);
-
-  // Global object proxy access: Check access rights.
-  __ bind(&global_proxy);
-  __ CheckAccessGlobalProxy(rdx, rax, &miss);
-  __ jmp(&invoke);
+  GenerateFunctionTailCall(masm, argc, &miss);
 
   __ bind(&miss);
 }
@@ -1498,7 +1432,8 @@
   // Now the key is known to be a smi. This place is also jumped to from below
   // where a numeric string is converted to a smi.
 
-  GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &slow_call);
+  GenerateKeyedLoadReceiverCheck(
+      masm, rdx, rax, Map::kHasIndexedInterceptor, &slow_call);
 
   GenerateFastArrayLoad(
       masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
@@ -1508,14 +1443,7 @@
   // receiver in rdx is not used after this point.
   // rcx: key
   // rdi: function
-
-  // Check that the value in edi is a JavaScript function.
-  __ JumpIfSmi(rdi, &slow_call);
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
-  __ j(not_equal, &slow_call);
-  // Invoke the function.
-  ParameterCount actual(argc);
-  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+  GenerateFunctionTailCall(masm, argc, &slow_call);
 
   __ bind(&check_number_dictionary);
   // eax: elements
@@ -1523,6 +1451,7 @@
   // Check whether the elements is a number dictionary.
   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                  Heap::kHashTableMapRootIndex);
+  __ j(not_equal, &slow_load);
   __ SmiToInteger32(rbx, rcx);
   // ebx: untagged index
   GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
@@ -1550,15 +1479,15 @@
   // If the receiver is a regular JS object with slow properties then do
   // a quick inline probe of the receiver's dictionary.
   // Otherwise do the monomorphic cache probe.
-  GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &lookup_monomorphic_cache);
+  GenerateKeyedLoadReceiverCheck(
+      masm, rdx, rax, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
 
   __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
   __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
                  Heap::kHashTableMapRootIndex);
   __ j(not_equal, &lookup_monomorphic_cache);
 
-  GenerateDictionaryLoad(
-      masm, &slow_load, rbx, rdx, rax, rcx, rdi, rdi, DICTIONARY_CHECK_DONE);
+  GenerateDictionaryLoad(masm, &slow_load, rbx, rcx, rax, rdi, rdi);
   __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
   __ jmp(&do_call);
 
@@ -1620,6 +1549,8 @@
   //  -- rsp[0] : return address
   // -----------------------------------
 
+  __ IncrementCounter(&Counters::load_miss, 1);
+
   __ pop(rbx);
   __ push(rax);  // receiver
   __ push(rcx);  // name
@@ -1683,38 +1614,15 @@
   //  -- rcx    : name
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label miss, probe, global;
+  Label miss;
 
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(rax, &miss);
+  GenerateDictionaryLoadReceiverCheck(masm, rax, rdx, rbx, &miss);
 
-  // Check that the receiver is a valid JS object.
-  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
-  __ j(below, &miss);
-
-  // If this assert fails, we have to check upper bound too.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-
-  // Check for access to global object (unlikely).
-  __ CmpInstanceType(rbx, JS_GLOBAL_PROXY_TYPE);
-  __ j(equal, &global);
-
-  // Check for non-global object that requires access check.
-  __ testl(FieldOperand(rbx, Map::kBitFieldOffset),
-           Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &miss);
-
+  //  rdx: elements
   // Search the dictionary placing the result in rax.
-  __ bind(&probe);
-  GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx,
-                         rcx, rdi, rax, CHECK_DICTIONARY);
+  GenerateDictionaryLoad(masm, &miss, rdx, rcx, rbx, rdi, rax);
   __ ret(0);
 
-  // Global object access: Check access rights.
-  __ bind(&global);
-  __ CheckAccessGlobalProxy(rax, rdx, &miss);
-  __ jmp(&probe);
-
   // Cache miss: Jump to runtime.
   __ bind(&miss);
   GenerateMiss(masm);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 24bac7d..32cd2db 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -652,8 +652,8 @@
   if (first.is(second)) {
     return CheckPositiveSmi(first);
   }
-  movl(kScratchRegister, first);
-  orl(kScratchRegister, second);
+  movq(kScratchRegister, first);
+  or_(kScratchRegister, second);
   rol(kScratchRegister, Immediate(1));
   testl(kScratchRegister, Immediate(0x03));
   return zero;
@@ -1678,8 +1678,7 @@
 
 void MacroAssembler::FCmp() {
   fucomip();
-  ffree(0);
-  fincstp();
+  fstp(0);
 }
 
 
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index bb0b681..44573f3 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -546,7 +546,8 @@
                                Register map,
                                Register instance_type);
 
-  // FCmp is similar to integer cmp, but requires unsigned
+  // FCmp compares and pops the two values on top of the FPU stack.
+  // The flag results are similar to integer cmp, but requires unsigned
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
 
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index e65378d..f5e17fd 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -115,25 +115,45 @@
     Handle<Object> undefined = Factory::undefined_value();
     FrameElement initial_value =
         FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
-    if (count == 1) {
-      __ Push(undefined);
-    } else if (count < kLocalVarBound) {
-      // For less locals the unrolled loop is more compact.
-      __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+    if (count < kLocalVarBound) {
+      // For fewer locals the unrolled loop is more compact.
+
+      // Hope for one of the first eight registers, where the push operation
+      // takes only one byte (kScratchRegister needs the REX.W bit).
+      Result tmp = cgen()->allocator()->Allocate();
+      ASSERT(tmp.is_valid());
+      __ movq(tmp.reg(), undefined, RelocInfo::EMBEDDED_OBJECT);
       for (int i = 0; i < count; i++) {
-        __ push(kScratchRegister);
+        __ push(tmp.reg());
       }
     } else {
       // For more locals a loop in generated code is more compact.
       Label alloc_locals_loop;
       Result cnt = cgen()->allocator()->Allocate();
       ASSERT(cnt.is_valid());
-      __ movq(cnt.reg(), Immediate(count));
       __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+#ifdef DEBUG
+      Label loop_size;
+      __ bind(&loop_size);
+#endif
+      if (is_uint8(count)) {
+        // Loading imm8 is shorter than loading imm32.
+        // Loading only partial byte register, and using decb below.
+        __ movb(cnt.reg(), Immediate(count));
+      } else {
+        __ movl(cnt.reg(), Immediate(count));
+      }
       __ bind(&alloc_locals_loop);
       __ push(kScratchRegister);
-      __ decl(cnt.reg());
+      if (is_uint8(count)) {
+        __ decb(cnt.reg());
+      } else {
+        __ decl(cnt.reg());
+      }
       __ j(not_zero, &alloc_locals_loop);
+#ifdef DEBUG
+      CHECK(masm()->SizeOfCodeGeneratedSince(&loop_size) < kLocalVarBound);
+#endif
     }
     for (int i = 0; i < count; i++) {
       elements_.Add(initial_value);
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index dc270fe..0549e3c 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -200,7 +200,7 @@
   inline void PrepareForReturn();
 
   // Number of local variables after when we use a loop for allocating.
-  static const int kLocalVarBound = 7;
+  static const int kLocalVarBound = 14;
 
   // Allocate and initialize the frame-allocated locals.
   void AllocateStackSlots();
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index c426db4..0cf3f7b 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -5035,6 +5035,31 @@
 }
 
 
+static v8::Handle<v8::Array> NamedPropertyEnumerator(const AccessorInfo& info) {
+  v8::Handle<v8::Array> result = v8::Array::New(1);
+  result->Set(0, v8_str("x"));
+  return result;
+}
+
+
+THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
+  v8::HandleScope handle_scope;
+  v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New();
+
+  obj_template->Set(v8_str("x"), v8::Integer::New(42));
+  obj_template->SetNamedPropertyHandler(NULL, NULL, NULL, NULL,
+                                        NamedPropertyEnumerator);
+
+  LocalContext context;
+  v8::Handle<v8::Object> global = context->Global();
+  global->Set(v8_str("object"), obj_template->NewInstance());
+
+  v8::Handle<Value> value =
+      CompileRun("Object.getOwnPropertyNames(object).join(',')");
+  CHECK_EQ(v8_str("x"), value);
+}
+
+
 static v8::Handle<Value> ConstTenGetter(Local<String> name,
                                         const AccessorInfo& info) {
   return v8_num(10);
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index 308f764..ea477de 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -653,7 +653,7 @@
   time += SampleRateCalculator::kWallTimeQueryIntervalMs * 0.75;
   calc2.UpdateMeasurements(time);
   // (1.0 + 2.0 + 2.0) / 3
-  CHECK_EQ(kSamplingIntervalMs * 1.66666, calc2.ticks_per_ms());
+  CHECK_EQ(kSamplingIntervalMs * 5.0, floor(calc2.ticks_per_ms() * 3.0 + 0.5));
 
   SampleRateCalculator calc3;
   time = 0.0;
@@ -667,7 +667,7 @@
   time += SampleRateCalculator::kWallTimeQueryIntervalMs * 1.5;
   calc3.UpdateMeasurements(time);
   // (1.0 + 0.5 + 0.5) / 3
-  CHECK_EQ(kSamplingIntervalMs * 0.66666, calc3.ticks_per_ms());
+  CHECK_EQ(kSamplingIntervalMs * 2.0, floor(calc3.ticks_per_ms() * 3.0 + 0.5));
 }
 
 
diff --git a/test/mjsunit/for-in.js b/test/mjsunit/for-in.js
index e3436ff..ab35e95 100644
--- a/test/mjsunit/for-in.js
+++ b/test/mjsunit/for-in.js
@@ -84,3 +84,38 @@
 for (var p in { get a() {}, set a(x) {}, b : 1}) { result += p; }
 assertEquals('ab', result, "abgetset");
 
+
+// Test that for-in in the global scope works with a keyed property as "each".
+// Test outside a loop and in a loop for multiple iterations.
+a = [1,2,3,4];
+x = {foo:5, bar:6, zip:7, glep:9, 10:11};
+delete x.bar;
+y = {}
+
+for (a[2] in x) {
+  y[a[2]] = x[a[2]];
+}
+
+assertEquals(5, y.foo, "y.foo");
+assertEquals("undefined", typeof y.bar, "y.bar");
+assertEquals(7, y.zip, "y.zip");
+assertEquals(9, y.glep, "y.glep");
+assertEquals(11, y[10], "y[10]");
+assertEquals("undefined", typeof y[2], "y[2]");
+assertEquals("undefined", typeof y[0], "y[0]");
+
+for (i=0 ; i < 3; ++i) {
+  y = {}
+
+  for (a[2] in x) {
+    y[a[2]] = x[a[2]];
+  }
+
+  assertEquals(5, y.foo, "y.foo");
+  assertEquals("undefined", typeof y.bar, "y.bar");
+  assertEquals(7, y.zip, "y.zip");
+  assertEquals(9, y.glep, "y.glep");
+  assertEquals(11, y[10], "y[10]");
+  assertEquals("undefined", typeof y[2], "y[2]");
+  assertEquals("undefined", typeof y[0], "y[0]");
+}
diff --git a/test/mjsunit/regress/regress-45469.js b/test/mjsunit/regress/regress-45469.js
new file mode 100644
index 0000000..832a73f
--- /dev/null
+++ b/test/mjsunit/regress/regress-45469.js
@@ -0,0 +1,46 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that global regexps capture and fail in the correct cyclic way.
+
+var re = /x/g;
+
+for (var i = 0; i < 15; i++) {
+  assertEquals(i % 3, re.lastIndex, "preindex" + i);
+  var res = re.exec("xx");
+  assertEquals(i % 3 == 2 ? null : ["x"], res, "res" + i);
+}
+
+re = /x/g;
+
+for (var i = 0; i < 15; i++) {
+  assertEquals(i % 3, re.lastIndex, "testpreindex" + i);
+  var res = re.test("xx");
+  assertEquals(i % 3 != 2, res, "testres" + i);
+}
+
+
diff --git a/test/mjsunit/regress/regress-752.js b/test/mjsunit/regress/regress-752.js
new file mode 100644
index 0000000..1142a1f
--- /dev/null
+++ b/test/mjsunit/regress/regress-752.js
@@ -0,0 +1,36 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that JSON.stringify correctly unwraps Boolean objects.
+
+// See: http://code.google.com/p/v8/issues/detail?id=752
+
+function replacer(key, value) {
+  return value === 42 ? new Boolean(false) : value;
+}
+
+assertEquals(JSON.stringify([42], replacer), "[false]");
diff --git a/test/mjsunit/regress/regress-754.js b/test/mjsunit/regress/regress-754.js
new file mode 100644
index 0000000..0b84416
--- /dev/null
+++ b/test/mjsunit/regress/regress-754.js
@@ -0,0 +1,39 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that Array.prototype.lastIndexOf correctly handles null and undefined
+// as fromIndex argument.
+
+// See: http://code.google.com/p/v8/issues/detail?id=754
+
+var a = new Array(1,2,1);
+assertEquals(1, a.lastIndexOf(2));
+assertEquals(2, a.lastIndexOf(1));
+assertEquals(0, a.lastIndexOf(1, undefined));
+assertEquals(0, a.lastIndexOf(1, null));
+assertEquals(-1, a.lastIndexOf(2, undefined));
+assertEquals(-1, a.lastIndexOf(2, null));
diff --git a/test/mjsunit/smi-ops.js b/test/mjsunit/smi-ops.js
index d5bd214..499535c 100644
--- a/test/mjsunit/smi-ops.js
+++ b/test/mjsunit/smi-ops.js
@@ -685,3 +685,8 @@
 assertEquals(24, LeftShiftThreeBy(35));
 assertEquals(24, LeftShiftThreeBy(67));
 assertEquals(24, LeftShiftThreeBy(-29));
+
+// Regression test for a bug in the ARM code generator.  For some register
+// allocations we got the Smi overflow case wrong.
+function f(x, y) { return y +  ( 1 << (x & 31)); }
+assertEquals(-2147483647, f(31, 1));