update V8 to TOT snapshot branch
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index d0fbabb..ecb4c49 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -125,7 +125,7 @@
 // edi: called JS function
 // esi: callee's context
 
-void CodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+void CodeGenerator::Generate(CompilationInfo* info) {
   // Record the position for debugging purposes.
   CodeForFunctionPosition(info->function());
 
@@ -164,7 +164,7 @@
     // esi: callee's context
     allocator_->Initialize();
 
-    if (mode == PRIMARY) {
+    if (info->mode() == CompilationInfo::PRIMARY) {
       frame_->Enter();
 
       // Allocate space for locals and initialize them.
@@ -255,6 +255,12 @@
       // frame to match this state.
       frame_->Adjust(3);
       allocator_->Unuse(edi);
+
+      // Bind all the bailout labels to the beginning of the function.
+      List<CompilationInfo::Bailout*>* bailouts = info->bailouts();
+      for (int i = 0; i < bailouts->length(); i++) {
+        __ bind(bailouts->at(i)->label());
+      }
     }
 
     // Initialize the function return target after the locals are set
@@ -574,7 +580,9 @@
   } else if (variable != NULL && variable->slot() != NULL) {
     // For a variable that rewrites to a slot, we signal it is the immediate
     // subexpression of a typeof.
-    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+    Result result =
+        LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
+    frame()->Push(&result);
   } else {
     // Anything else can be handled normally.
     Load(expr);
@@ -623,8 +631,7 @@
     // We have to skip storing into the arguments slot if it has already
     // been written to. This can happen if the a function has a local
     // variable named 'arguments'.
-    LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
-    Result probe = frame_->Pop();
+    Result probe = LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
     if (probe.is_constant()) {
       // We have to skip updating the arguments object if it has
       // been assigned a proper value.
@@ -688,6 +695,11 @@
     // The expression is a variable proxy that does not rewrite to a
     // property.  Global variables are treated as named property references.
     if (var->is_global()) {
+      // If eax is free, the register allocator prefers it.  Thus the code
+      // generator will load the global object into eax, which is where
+      // LoadIC wants it.  Most uses of Reference call LoadIC directly
+      // after the reference is created.
+      frame_->Spill(eax);
       LoadGlobal();
       ref->set_type(Reference::NAMED);
     } else {
@@ -721,35 +733,54 @@
   // The value to convert should be popped from the frame.
   Result value = frame_->Pop();
   value.ToRegister();
-  // Fast case checks.
 
-  // 'false' => false.
-  __ cmp(value.reg(), Factory::false_value());
-  dest->false_target()->Branch(equal);
+  if (value.is_number()) {
+    Comment cmnt(masm_, "ONLY_NUMBER");
+    // Fast case if NumberInfo indicates only numbers.
+    if (FLAG_debug_code) {
+      __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
+    }
+    // Smi => false iff zero.
+    ASSERT(kSmiTag == 0);
+    __ test(value.reg(), Operand(value.reg()));
+    dest->false_target()->Branch(zero);
+    __ test(value.reg(), Immediate(kSmiTagMask));
+    dest->true_target()->Branch(zero);
+    __ fldz();
+    __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
+    __ FCmp();
+    value.Unuse();
+    dest->Split(not_zero);
+  } else {
+    // Fast case checks.
+    // 'false' => false.
+    __ cmp(value.reg(), Factory::false_value());
+    dest->false_target()->Branch(equal);
 
-  // 'true' => true.
-  __ cmp(value.reg(), Factory::true_value());
-  dest->true_target()->Branch(equal);
+    // 'true' => true.
+    __ cmp(value.reg(), Factory::true_value());
+    dest->true_target()->Branch(equal);
 
-  // 'undefined' => false.
-  __ cmp(value.reg(), Factory::undefined_value());
-  dest->false_target()->Branch(equal);
+    // 'undefined' => false.
+    __ cmp(value.reg(), Factory::undefined_value());
+    dest->false_target()->Branch(equal);
 
-  // Smi => false iff zero.
-  ASSERT(kSmiTag == 0);
-  __ test(value.reg(), Operand(value.reg()));
-  dest->false_target()->Branch(zero);
-  __ test(value.reg(), Immediate(kSmiTagMask));
-  dest->true_target()->Branch(zero);
+    // Smi => false iff zero.
+    ASSERT(kSmiTag == 0);
+    __ test(value.reg(), Operand(value.reg()));
+    dest->false_target()->Branch(zero);
+    __ test(value.reg(), Immediate(kSmiTagMask));
+    dest->true_target()->Branch(zero);
 
-  // Call the stub for all other cases.
-  frame_->Push(&value);  // Undo the Pop() from above.
-  ToBooleanStub stub;
-  Result temp = frame_->CallStub(&stub, 1);
-  // Convert the result to a condition code.
-  __ test(temp.reg(), Operand(temp.reg()));
-  temp.Unuse();
-  dest->Split(not_equal);
+    // Call the stub for all other cases.
+    frame_->Push(&value);  // Undo the Pop() from above.
+    ToBooleanStub stub;
+    Result temp = frame_->CallStub(&stub, 1);
+    // Convert the result to a condition code.
+    __ test(temp.reg(), Operand(temp.reg()));
+    temp.Unuse();
+    dest->Split(not_equal);
+  }
 }
 
 
@@ -789,6 +820,10 @@
   static void LoadAsIntegers(MacroAssembler* masm,
                              bool use_sse3,
                              Label* operand_conversion_failure);
+  // Test if operands are smis or heap numbers and load them
+  // into xmm0 and xmm1 if they are. Operands are in edx and eax.
+  // Leaves operands unchanged.
+  static void LoadSSE2Operands(MacroAssembler* masm);
   // Test if operands are numbers (smi or HeapNumber objects), and load
   // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
   // either operand is not a number.  Operands are in edx and eax.
@@ -816,12 +851,13 @@
   }
 
   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
-               "GenericBinaryOpStub_%s_%s%s_%s%s",
+               "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
                op_name,
                overwrite_name,
                (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
                args_in_registers_ ? "RegArgs" : "StackArgs",
-               args_reversed_ ? "_R" : "");
+               args_reversed_ ? "_R" : "",
+               NumberInfo::ToString(operands_type_));
   return name_;
 }
 
@@ -971,27 +1007,35 @@
     // Neither operand is known to be a string.
   }
 
-  bool left_is_smi = left.is_constant() && left.handle()->IsSmi();
-  bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
-  bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
-  bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
+  bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
+  bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
+  bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
+  bool right_is_non_smi_constant =
+      right.is_constant() && !right.handle()->IsSmi();
 
-  if (left_is_smi && right_is_smi) {
+  if (left_is_smi_constant && right_is_smi_constant) {
     // Compute the constant result at compile time, and leave it on the frame.
     int left_int = Smi::cast(*left.handle())->value();
     int right_int = Smi::cast(*right.handle())->value();
     if (FoldConstantSmis(op, left_int, right_int)) return;
   }
 
+  // Get number type of left and right sub-expressions.
+  NumberInfo::Type operands_type =
+      NumberInfo::Combine(left.number_info(), right.number_info());
+
   Result answer;
-  if (left_is_non_smi || right_is_non_smi) {
+  if (left_is_non_smi_constant || right_is_non_smi_constant) {
     // Go straight to the slow case, with no smi code.
-    GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+    GenericBinaryOpStub stub(op,
+                             overwrite_mode,
+                             NO_SMI_CODE_IN_STUB,
+                             operands_type);
     answer = stub.GenerateCall(masm_, frame_, &left, &right);
-  } else if (right_is_smi) {
+  } else if (right_is_smi_constant) {
     answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
                                         type, false, overwrite_mode);
-  } else if (left_is_smi) {
+  } else if (left_is_smi_constant) {
     answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
                                         type, true, overwrite_mode);
   } else {
@@ -1003,10 +1047,67 @@
     if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
       answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
     } else {
-      GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+      GenericBinaryOpStub stub(op,
+                               overwrite_mode,
+                               NO_GENERIC_BINARY_FLAGS,
+                               operands_type);
       answer = stub.GenerateCall(masm_, frame_, &left, &right);
     }
   }
+
+  // Set NumberInfo of result according to the operation performed.
+  // Rely on the fact that smis have a 31 bit payload on ia32.
+  ASSERT(kSmiValueSize == 31);
+  NumberInfo::Type result_type = NumberInfo::kUnknown;
+  switch (op) {
+    case Token::COMMA:
+      result_type = right.number_info();
+      break;
+    case Token::OR:
+    case Token::AND:
+      // Result type can be either of the two input types.
+      result_type = operands_type;
+      break;
+    case Token::BIT_OR:
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
+      // Result is always a number. Smi property of inputs is preserved.
+      result_type = (operands_type == NumberInfo::kSmi)
+          ? NumberInfo::kSmi
+          : NumberInfo::kNumber;
+      break;
+    case Token::SAR:
+      // Result is a smi if we shift by a constant >= 1, otherwise a number.
+      result_type = (right.is_constant() && right.handle()->IsSmi()
+                     && Smi::cast(*right.handle())->value() >= 1)
+          ? NumberInfo::kSmi
+          : NumberInfo::kNumber;
+      break;
+    case Token::SHR:
+      // Result is a smi if we shift by a constant >= 2, otherwise a number.
+      result_type = (right.is_constant() && right.handle()->IsSmi()
+                     && Smi::cast(*right.handle())->value() >= 2)
+          ? NumberInfo::kSmi
+          : NumberInfo::kNumber;
+      break;
+    case Token::ADD:
+      // Result could be a string or a number. Check types of inputs.
+      result_type = NumberInfo::IsNumber(operands_type)
+          ? NumberInfo::kNumber
+          : NumberInfo::kUnknown;
+      break;
+    case Token::SHL:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      // Result is always a number.
+      result_type = NumberInfo::kNumber;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  answer.set_number_info(result_type);
   frame_->Push(&answer);
 }
 
@@ -1848,6 +1949,39 @@
       break;
     }
 
+    case Token::DIV:
+      if (!reversed && int_value == 2) {
+        operand->ToRegister();
+        frame_->Spill(operand->reg());
+
+        DeferredInlineSmiOperation* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           operand->reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
+        // Check that lowest log2(value) bits of operand are zero, and test
+        // smi tag at the same time.
+        ASSERT_EQ(0, kSmiTag);
+        ASSERT_EQ(1, kSmiTagSize);
+        __ test(operand->reg(), Immediate(3));
+        deferred->Branch(not_zero);  // Branch if non-smi or odd smi.
+        __ sar(operand->reg(), 1);
+        deferred->BindExit();
+        answer = *operand;
+      } else {
+        // Cannot fall through MOD to default case, so we duplicate the
+        // default case here.
+        Result constant_operand(value);
+        if (reversed) {
+          answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                            overwrite_mode);
+        } else {
+          answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+                                            overwrite_mode);
+        }
+      }
+      break;
     // Generate inline code for mod of powers of 2 and negative powers of 2.
     case Token::MOD:
       if (!reversed &&
@@ -2327,6 +2461,7 @@
   // Load applicand.apply onto the stack. This will usually
   // give us a megamorphic load site. Not super, but it works.
   Load(applicand);
+  frame()->Dup();
   Handle<String> name = Factory::LookupAsciiSymbol("apply");
   frame()->Push(name);
   Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
@@ -2336,7 +2471,9 @@
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
   Load(receiver);
-  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+  Result existing_args =
+      LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+  frame()->Push(&existing_args);
 
   // Emit the source position information after having loaded the
   // receiver and the arguments.
@@ -3906,35 +4043,32 @@
   // Spill everything, even constants, to the frame.
   frame_->SpillAll();
 
-  DebuggerStatementStub ces;
-  frame_->CallStub(&ces, 0);
+  frame_->DebugBreak();
   // Ignore the return value.
 #endif
 }
 
 
-void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+Result CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
   ASSERT(boilerplate->IsBoilerplate());
 
   // The inevitable call will sync frame elements to memory anyway, so
   // we do it eagerly to allow us to push the arguments directly into
   // place.
-  frame_->SyncRange(0, frame_->element_count() - 1);
+  frame()->SyncRange(0, frame()->element_count() - 1);
 
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
     FastNewClosureStub stub;
-    frame_->EmitPush(Immediate(boilerplate));
-    Result answer = frame_->CallStub(&stub, 1);
-    frame_->Push(&answer);
+    frame()->EmitPush(Immediate(boilerplate));
+    return frame()->CallStub(&stub, 1);
   } else {
     // Call the runtime to instantiate the function boilerplate
     // object.
-    frame_->EmitPush(esi);
-    frame_->EmitPush(Immediate(boilerplate));
-    Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
-    frame_->Push(&result);
+    frame()->EmitPush(esi);
+    frame()->EmitPush(Immediate(boilerplate));
+    return frame()->CallRuntime(Runtime::kNewClosure, 2);
   }
 }
 
@@ -3947,14 +4081,16 @@
       Compiler::BuildBoilerplate(node, script(), this);
   // Check for stack-overflow exception.
   if (HasStackOverflow()) return;
-  InstantiateBoilerplate(boilerplate);
+  Result result = InstantiateBoilerplate(boilerplate);
+  frame()->Push(&result);
 }
 
 
 void CodeGenerator::VisitFunctionBoilerplateLiteral(
     FunctionBoilerplateLiteral* node) {
   Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
-  InstantiateBoilerplate(node->boilerplate());
+  Result result = InstantiateBoilerplate(node->boilerplate());
+  frame()->Push(&result);
 }
 
 
@@ -3990,13 +4126,12 @@
 }
 
 
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+  Result result;
   if (slot->type() == Slot::LOOKUP) {
     ASSERT(slot->var()->is_dynamic());
-
     JumpTarget slow;
     JumpTarget done;
-    Result value;
 
     // Generate fast-case code for variables that might be shadowed by
     // eval-introduced variables.  Eval is used a lot without
@@ -4004,14 +4139,10 @@
     // perform a runtime call for all variables in the scope
     // containing the eval.
     if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-      value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
+      result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
       // If there was no control flow to slow, we can exit early.
-      if (!slow.is_linked()) {
-        frame_->Push(&value);
-        return;
-      }
-
-      done.Jump(&value);
+      if (!slow.is_linked()) return result;
+      done.Jump(&result);
 
     } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
       Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
@@ -4021,21 +4152,21 @@
         // Allocate a fresh register to use as a temp in
         // ContextSlotOperandCheckExtensions and to hold the result
         // value.
-        value = allocator_->Allocate();
-        ASSERT(value.is_valid());
-        __ mov(value.reg(),
+        result = allocator()->Allocate();
+        ASSERT(result.is_valid());
+        __ mov(result.reg(),
                ContextSlotOperandCheckExtensions(potential_slot,
-                                                 value,
+                                                 result,
                                                  &slow));
         if (potential_slot->var()->mode() == Variable::CONST) {
-          __ cmp(value.reg(), Factory::the_hole_value());
-          done.Branch(not_equal, &value);
-          __ mov(value.reg(), Factory::undefined_value());
+          __ cmp(result.reg(), Factory::the_hole_value());
+          done.Branch(not_equal, &result);
+          __ mov(result.reg(), Factory::undefined_value());
         }
         // There is always control flow to slow from
         // ContextSlotOperandCheckExtensions so we have to jump around
         // it.
-        done.Jump(&value);
+        done.Jump(&result);
       }
     }
 
@@ -4043,18 +4174,18 @@
     // A runtime call is inevitable.  We eagerly sync frame elements
     // to memory so that we can push the arguments directly into place
     // on top of the frame.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-    frame_->EmitPush(esi);
-    frame_->EmitPush(Immediate(slot->var()->name()));
+    frame()->SyncRange(0, frame()->element_count() - 1);
+    frame()->EmitPush(esi);
+    frame()->EmitPush(Immediate(slot->var()->name()));
     if (typeof_state == INSIDE_TYPEOF) {
-      value =
-          frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+      result =
+          frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
     } else {
-      value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+      result = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
     }
 
-    done.Bind(&value);
-    frame_->Push(&value);
+    done.Bind(&result);
+    return result;
 
   } else if (slot->var()->mode() == Variable::CONST) {
     // Const slots may contain 'the hole' value (the constant hasn't been
@@ -4065,19 +4196,21 @@
     // potentially unsafe direct-frame access of SlotOperand.
     VirtualFrame::SpilledScope spilled_scope;
     Comment cmnt(masm_, "[ Load const");
-    JumpTarget exit;
+    Label exit;
     __ mov(ecx, SlotOperand(slot, ecx));
     __ cmp(ecx, Factory::the_hole_value());
-    exit.Branch(not_equal);
+    __ j(not_equal, &exit);
     __ mov(ecx, Factory::undefined_value());
-    exit.Bind();
-    frame_->EmitPush(ecx);
+    __ bind(&exit);
+    return Result(ecx);
 
   } else if (slot->type() == Slot::PARAMETER) {
-    frame_->PushParameterAt(slot->index());
+    frame()->PushParameterAt(slot->index());
+    return frame()->Pop();
 
   } else if (slot->type() == Slot::LOCAL) {
-    frame_->PushLocalAt(slot->index());
+    frame()->PushLocalAt(slot->index());
+    return frame()->Pop();
 
   } else {
     // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
@@ -4086,49 +4219,46 @@
     // The use of SlotOperand below is safe for an unspilled frame
     // because it will always be a context slot.
     ASSERT(slot->type() == Slot::CONTEXT);
-    Result temp = allocator_->Allocate();
-    ASSERT(temp.is_valid());
-    __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
-    frame_->Push(&temp);
+    result = allocator()->Allocate();
+    ASSERT(result.is_valid());
+    __ mov(result.reg(), SlotOperand(slot, result.reg()));
+    return result;
   }
 }
 
 
-void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
-                                                  TypeofState state) {
-  LoadFromSlot(slot, state);
+Result CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+                                                    TypeofState state) {
+  Result result = LoadFromSlot(slot, state);
 
   // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return result;
 
   // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
-
-  // Pop the loaded value from the stack.
-  Result value = frame_->Pop();
+  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return result;
 
   // If the loaded value is a constant, we know if the arguments
   // object has been lazily loaded yet.
-  if (value.is_constant()) {
-    if (value.handle()->IsTheHole()) {
-      Result arguments = StoreArgumentsObject(false);
-      frame_->Push(&arguments);
+  if (result.is_constant()) {
+    if (result.handle()->IsTheHole()) {
+      result.Unuse();
+      return StoreArgumentsObject(false);
     } else {
-      frame_->Push(&value);
+      return result;
     }
-    return;
   }
 
   // The loaded value is in a register. If it is the sentinel that
   // indicates that we haven't loaded the arguments object yet, we
   // need to do it now.
   JumpTarget exit;
-  __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
-  frame_->Push(&value);
-  exit.Branch(not_equal);
-  Result arguments = StoreArgumentsObject(false);
-  frame_->SetElementAt(0, &arguments);
-  exit.Bind();
+  __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
+  exit.Branch(not_equal, &result);
+
+  result.Unuse();
+  result = StoreArgumentsObject(false);
+  exit.Bind(&result);
+  return result;
 }
 
 
@@ -4188,6 +4318,10 @@
 
   // All extension objects were empty and it is safe to use a global
   // load IC call.
+  // The register allocator prefers eax if it is free, so the code generator
+  // will load the global object directly into eax, which is where the LoadIC
+  // expects it.
+  frame_->Spill(eax);
   LoadGlobal();
   frame_->Push(slot->var()->name());
   RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
@@ -4198,8 +4332,6 @@
   // property case was inlined.  Ensure that there is not a test eax
   // instruction here.
   __ nop();
-  // Discard the global object. The result is in answer.
-  frame_->Drop();
   return answer;
 }
 
@@ -4304,7 +4436,8 @@
 
 void CodeGenerator::VisitSlot(Slot* node) {
   Comment cmnt(masm_, "[ Slot");
-  LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
+  Result result = LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
+  frame()->Push(&result);
 }
 
 
@@ -4474,8 +4607,8 @@
           // Duplicate the object as the IC receiver.
           frame_->Dup();
           Load(property->value());
-          frame_->Push(key);
-          Result ignored = frame_->CallStoreIC();
+          Result dummy = frame_->CallStoreIC(Handle<String>::cast(key), false);
+          dummy.Unuse();
           break;
         }
         // Fall through
@@ -4599,106 +4732,241 @@
 }
 
 
+void CodeGenerator::EmitSlotAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Comment cmnt(masm(), "[ Variable Assignment");
+  Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL);
+  Slot* slot = var->slot();
+  ASSERT(slot != NULL);
+
+  // Evaluate the right-hand side.
+  if (node->is_compound()) {
+    Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+    frame()->Push(&result);
+    Load(node->value());
+
+    bool overwrite_value =
+        (node->value()->AsBinaryOperation() != NULL &&
+         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+    GenericBinaryOperation(node->binary_op(),
+                           node->type(),
+                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+  } else {
+    Load(node->value());
+  }
+
+  // Perform the assignment.
+  if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) {
+    CodeForSourcePosition(node->position());
+    StoreToSlot(slot,
+                node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT);
+  }
+  ASSERT(frame()->height() == original_height + 1);
+}
+
+
+void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Comment cmnt(masm(), "[ Named Property Assignment");
+  Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  Property* prop = node->target()->AsProperty();
+  ASSERT(var == NULL || (prop == NULL && var->is_global()));
+
+  // Initialize name and evaluate the receiver subexpression if necessary.
+  Handle<String> name;
+  bool is_trivial_receiver = false;
+  if (var != NULL) {
+    name = var->name();
+  } else {
+    Literal* lit = prop->key()->AsLiteral();
+    ASSERT_NOT_NULL(lit);
+    name = Handle<String>::cast(lit->handle());
+    // Do not materialize the receiver on the frame if it is trivial.
+    is_trivial_receiver = prop->obj()->IsTrivial();
+    if (!is_trivial_receiver) Load(prop->obj());
+  }
+
+  if (node->starts_initialization_block()) {
+    ASSERT_EQ(NULL, var);
+    // Change to slow case in the beginning of an initialization block to
+    // avoid the quadratic behavior of repeatedly adding fast properties.
+    if (is_trivial_receiver) {
+      frame()->Push(prop->obj());
+    } else {
+      frame()->Dup();
+    }
+    Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1);
+  }
+
+  if (node->ends_initialization_block() && !is_trivial_receiver) {
+    // Add an extra copy of the receiver to the frame, so that it can be
+    // converted back to fast case after the assignment.
+    frame()->Dup();
+  }
+
+  // Evaluate the right-hand side.
+  if (node->is_compound()) {
+    if (is_trivial_receiver) {
+      frame()->Push(prop->obj());
+    } else if (var != NULL) {
+      // The LoadIC stub expects the object in eax.
+      // Freeing eax causes the code generator to load the global into it.
+      frame_->Spill(eax);
+      LoadGlobal();
+    } else {
+      frame()->Dup();
+    }
+    Result value = EmitNamedLoad(name, var != NULL);
+    frame()->Push(&value);
+    Load(node->value());
+
+    bool overwrite_value =
+        (node->value()->AsBinaryOperation() != NULL &&
+         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+    GenericBinaryOperation(node->binary_op(),
+                           node->type(),
+                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+  } else {
+    Load(node->value());
+  }
+
+  // Perform the assignment.  It is safe to ignore constants here.
+  ASSERT(var == NULL || var->mode() != Variable::CONST);
+  ASSERT_NE(Token::INIT_CONST, node->op());
+  if (is_trivial_receiver) {
+    Result value = frame()->Pop();
+    frame()->Push(prop->obj());
+    frame()->Push(&value);
+  }
+  CodeForSourcePosition(node->position());
+  bool is_contextual = (var != NULL);
+  Result answer = EmitNamedStore(name, is_contextual);
+  frame()->Push(&answer);
+
+  if (node->ends_initialization_block()) {
+    ASSERT_EQ(NULL, var);
+    // The argument to the runtime call is the receiver.
+    if (is_trivial_receiver) {
+      frame()->Push(prop->obj());
+    } else {
+      // A copy of the receiver is below the value of the assignment.  Swap
+      // the receiver and the value of the assignment expression.
+      Result result = frame()->Pop();
+      Result receiver = frame()->Pop();
+      frame()->Push(&result);
+      frame()->Push(&receiver);
+    }
+    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  ASSERT_EQ(frame()->height(), original_height + 1);
+}
+
+
+void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Comment cmnt(masm_, "[ Named Property Assignment");
+  Property* prop = node->target()->AsProperty();
+  ASSERT_NOT_NULL(prop);
+
+  // Evaluate the receiver subexpression.
+  Load(prop->obj());
+
+  if (node->starts_initialization_block()) {
+    // Change to slow case in the beginning of an initialization block to
+    // avoid the quadratic behavior of repeatedly adding fast properties.
+    frame_->Dup();
+    Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+  }
+
+  if (node->ends_initialization_block()) {
+    // Add an extra copy of the receiver to the frame, so that it can be
+    // converted back to fast case after the assignment.
+    frame_->Dup();
+  }
+
+  // Evaluate the key subexpression.
+  Load(prop->key());
+
+  // Evaluate the right-hand side.
+  if (node->is_compound()) {
+    // Duplicate receiver and key.
+    frame()->PushElementAt(1);
+    frame()->PushElementAt(1);
+    Result value = EmitKeyedLoad();
+    frame()->Push(&value);
+    Load(node->value());
+
+    bool overwrite_value =
+        (node->value()->AsBinaryOperation() != NULL &&
+         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+    GenericBinaryOperation(node->binary_op(),
+                           node->type(),
+                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+  } else {
+    Load(node->value());
+  }
+
+  // Perform the assignment.  It is safe to ignore constants here.
+  ASSERT(node->op() != Token::INIT_CONST);
+  CodeForSourcePosition(node->position());
+  Result answer = EmitKeyedStore(prop->key()->type());
+  frame()->Push(&answer);
+
+  if (node->ends_initialization_block()) {
+    // The argument to the runtime call is the extra copy of the receiver,
+    // which is below the value of the assignment.  Swap the receiver and
+    // the value of the assignment expression.
+    Result result = frame()->Pop();
+    Result receiver = frame()->Pop();
+    frame()->Push(&result);
+    frame()->Push(&receiver);
+    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  ASSERT(frame()->height() == original_height + 1);
+}
+
+
 void CodeGenerator::VisitAssignment(Assignment* node) {
 #ifdef DEBUG
-  int original_height = frame_->height();
+  int original_height = frame()->height();
 #endif
-  Comment cmnt(masm_, "[ Assignment");
+  Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  Property* prop = node->target()->AsProperty();
 
-  { Reference target(this, node->target(), node->is_compound());
-    if (target.is_illegal()) {
-      // Fool the virtual frame into thinking that we left the assignment's
-      // value on the frame.
-      frame_->Push(Smi::FromInt(0));
-      return;
-    }
-    Variable* var = node->target()->AsVariableProxy()->AsVariable();
+  if (var != NULL && !var->is_global()) {
+    EmitSlotAssignment(node);
 
-    if (node->starts_initialization_block()) {
-      ASSERT(target.type() == Reference::NAMED ||
-             target.type() == Reference::KEYED);
-      // Change to slow case in the beginning of an initialization
-      // block to avoid the quadratic behavior of repeatedly adding
-      // fast properties.
+  } else if ((prop != NULL && prop->key()->IsPropertyName()) ||
+             (var != NULL && var->is_global())) {
+    // Properties whose keys are property names and global variables are
+    // treated as named property references.  We do not need to consider
+    // global 'this' because it is not a valid left-hand side.
+    EmitNamedPropertyAssignment(node);
 
-      // The receiver is the argument to the runtime call.  It is the
-      // first value pushed when the reference was loaded to the
-      // frame.
-      frame_->PushElementAt(target.size() - 1);
-      Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-    }
-    if (node->ends_initialization_block()) {
-      // Add an extra copy of the receiver to the frame, so that it can be
-      // converted back to fast case after the assignment.
-      ASSERT(target.type() == Reference::NAMED ||
-             target.type() == Reference::KEYED);
-      if (target.type() == Reference::NAMED) {
-        frame_->Dup();
-        // Dup target receiver on stack.
-      } else {
-        ASSERT(target.type() == Reference::KEYED);
-        Result temp = frame_->Pop();
-        frame_->Dup();
-        frame_->Push(&temp);
-      }
-    }
-    if (node->op() == Token::ASSIGN ||
-        node->op() == Token::INIT_VAR ||
-        node->op() == Token::INIT_CONST) {
-      Load(node->value());
+  } else if (prop != NULL) {
+    // Other properties (including rewritten parameters for a function that
+    // uses arguments) are keyed property assignments.
+    EmitKeyedPropertyAssignment(node);
 
-    } else {  // Assignment is a compound assignment.
-      Literal* literal = node->value()->AsLiteral();
-      bool overwrite_value =
-          (node->value()->AsBinaryOperation() != NULL &&
-           node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
-      Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
-      // There are two cases where the target is not read in the right hand
-      // side, that are easy to test for: the right hand side is a literal,
-      // or the right hand side is a different variable.  TakeValue invalidates
-      // the target, with an implicit promise that it will be written to again
-      // before it is read.
-      if (literal != NULL || (right_var != NULL && right_var != var)) {
-        target.TakeValue();
-      } else {
-        target.GetValue();
-      }
-      Load(node->value());
-      GenericBinaryOperation(node->binary_op(),
-                             node->type(),
-                             overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-    }
-
-    if (var != NULL &&
-        var->mode() == Variable::CONST &&
-        node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
-      // Assignment ignored - leave the value on the stack.
-      UnloadReference(&target);
-    } else {
-      CodeForSourcePosition(node->position());
-      if (node->op() == Token::INIT_CONST) {
-        // Dynamic constant initializations must use the function context
-        // and initialize the actual constant declared. Dynamic variable
-        // initializations are simply assignments and use SetValue.
-        target.SetValue(CONST_INIT);
-      } else {
-        target.SetValue(NOT_CONST_INIT);
-      }
-      if (node->ends_initialization_block()) {
-        ASSERT(target.type() == Reference::UNLOADED);
-        // End of initialization block. Revert to fast case.  The
-        // argument to the runtime call is the extra copy of the receiver,
-        // which is below the value of the assignment.
-        // Swap the receiver and the value of the assignment expression.
-        Result lhs = frame_->Pop();
-        Result receiver = frame_->Pop();
-        frame_->Push(&lhs);
-        frame_->Push(&receiver);
-        Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-      }
-    }
+  } else {
+    // Invalid left-hand side.
+    Load(node->target());
+    Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
+    // The runtime call doesn't actually return but the code generator will
+    // still generate code and expects a certain frame height.
+    frame()->Push(&result);
   }
-  ASSERT(frame_->height() == original_height + 1);
+
+  ASSERT(frame()->height() == original_height + 1);
 }
 
 
@@ -4903,9 +5171,9 @@
         LoadGlobalReceiver();
       } else {
         Load(property->obj());
+        frame()->Dup();
         Load(property->key());
-        Result function = EmitKeyedLoad(false);
-        frame_->Drop();  // Key.
+        Result function = EmitKeyedLoad();
         Result receiver = frame_->Pop();
         frame_->Push(&function);
         frame_->Push(&receiver);
@@ -5173,6 +5441,25 @@
 }
 
 
+void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result value = frame_->Pop();
+  value.ToRegister();
+  ASSERT(value.is_valid());
+  __ test(value.reg(), Immediate(kSmiTagMask));
+  destination()->false_target()->Branch(equal);
+  // It is a heap object - get map.
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  // Check if the object is a regexp.
+  __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, temp.reg());
+  value.Unuse();
+  temp.Unuse();
+  destination()->Split(equal);
+}
+
+
 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
   // This generates a fast version of:
   // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
@@ -5527,6 +5814,35 @@
 }
 
 
+void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and call the stub.
+  Load(args->at(0));
+  NumberToStringStub stub;
+  Result result = frame_->CallStub(&stub, 1);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+  Load(args->at(0));
+  TranscendentalCacheStub stub(TranscendentalCache::SIN);
+  Result result = frame_->CallStub(&stub, 1);
+  frame_->Push(&result);
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+  Load(args->at(0));
+  TranscendentalCacheStub stub(TranscendentalCache::COS);
+  Result result = frame_->CallStub(&stub, 1);
+  frame_->Push(&result);
+}
+
+
 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
   if (CheckForInlineRuntimeCall(node)) {
     return;
@@ -5661,7 +5977,6 @@
     switch (op) {
       case Token::SUB: {
         GenericUnaryOpStub stub(Token::SUB, overwrite);
-        // TODO(1222589): remove dependency of TOS being cached inside stub
         Result operand = frame_->Pop();
         Result answer = frame_->CallStub(&stub, &operand);
         frame_->Push(&answer);
@@ -6111,13 +6426,10 @@
       __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kBitFieldOffset));
       __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
       destination()->false_target()->Branch(not_zero);
-      __ mov(temp.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
-      __ movzx_b(temp.reg(),
-                 FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
-      __ cmp(temp.reg(), FIRST_NONSTRING_TYPE);
+      __ CmpObjectType(answer.reg(), FIRST_NONSTRING_TYPE, temp.reg());
       temp.Unuse();
       answer.Unuse();
-      destination()->Split(less);
+      destination()->Split(below);
 
     } else if (check->Equals(Heap::boolean_symbol())) {
       __ cmp(answer.reg(), Factory::true_value());
@@ -6277,7 +6589,7 @@
 
 
 // Emit a LoadIC call to get the value from receiver and leave it in
-// dst.  The receiver register is restored after the call.
+// dst.
 class DeferredReferenceGetNamedValue: public DeferredCode {
  public:
   DeferredReferenceGetNamedValue(Register dst,
@@ -6300,7 +6612,9 @@
 
 
 void DeferredReferenceGetNamedValue::Generate() {
-  __ push(receiver_);
+  if (!receiver_.is(eax)) {
+    __ mov(eax, receiver_);
+  }
   __ Set(ecx, Immediate(name_));
   Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
   __ call(ic, RelocInfo::CODE_TARGET);
@@ -6317,7 +6631,6 @@
   __ IncrementCounter(&Counters::named_load_inline_miss, 1);
 
   if (!dst_.is(eax)) __ mov(dst_, eax);
-  __ pop(receiver_);
 }
 
 
@@ -6325,9 +6638,8 @@
  public:
   explicit DeferredReferenceGetKeyedValue(Register dst,
                                           Register receiver,
-                                          Register key,
-                                          bool is_global)
-      : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+                                          Register key)
+      : dst_(dst), receiver_(receiver), key_(key) {
     set_comment("[ DeferredReferenceGetKeyedValue");
   }
 
@@ -6340,14 +6652,29 @@
   Register dst_;
   Register receiver_;
   Register key_;
-  bool is_global_;
 };
 
 
 void DeferredReferenceGetKeyedValue::Generate() {
-  __ push(receiver_);  // First IC argument.
-  __ push(key_);       // Second IC argument.
-
+  if (!receiver_.is(eax)) {
+    // Register eax is available for key.
+    if (!key_.is(eax)) {
+      __ mov(eax, key_);
+    }
+    if (!receiver_.is(edx)) {
+      __ mov(edx, receiver_);
+    }
+  } else if (!key_.is(edx)) {
+    // Register edx is available for receiver.
+    if (!receiver_.is(edx)) {
+      __ mov(edx, receiver_);
+    }
+    if (!key_.is(eax)) {
+      __ mov(eax, key_);
+    }
+  } else {
+    __ xchg(edx, eax);
+  }
   // Calculate the delta from the IC call instruction to the map check
   // cmp instruction in the inlined version.  This delta is stored in
   // a test(eax, delta) instruction after the call so that we can find
@@ -6355,10 +6682,7 @@
   // This means that we cannot allow test instructions after calls to
   // KeyedLoadIC stubs in other places.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  RelocInfo::Mode mode = is_global_
-                         ? RelocInfo::CODE_TARGET_CONTEXT
-                         : RelocInfo::CODE_TARGET;
-  __ call(ic, mode);
+  __ call(ic, RelocInfo::CODE_TARGET);
   // The delta from the start of the map-compare instruction to the
   // test instruction.  We use masm_-> directly here instead of the __
   // macro because the macro sometimes uses macro expansion to turn
@@ -6371,8 +6695,6 @@
   __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
 
   if (!dst_.is(eax)) __ mov(dst_, eax);
-  __ pop(key_);
-  __ pop(receiver_);
 }
 
 
@@ -6424,12 +6746,90 @@
 }
 
 
-Result CodeGenerator::EmitKeyedLoad(bool is_global) {
-  Comment cmnt(masm_, "[ Load from keyed Property");
-  // Inline array load code if inside of a loop.  We do not know
-  // the receiver map yet, so we initially generate the code with
-  // a check against an invalid map.  In the inline cache code, we
-  // patch the map check if appropriate.
+Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
+  // Do not inline the inobject property case for loads from the global
+  // object.  Also do not inline for unoptimized code.  This saves time in
+  // the code generator.  Unoptimized code is toplevel code or code that is
+  // not in a loop.
+  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+    Comment cmnt(masm(), "[ Load from named Property");
+    frame()->Push(name);
+
+    RelocInfo::Mode mode = is_contextual
+        ? RelocInfo::CODE_TARGET_CONTEXT
+        : RelocInfo::CODE_TARGET;
+    result = frame()->CallLoadIC(mode);
+    // A test eax instruction following the call signals that the inobject
+    // property case was inlined.  Ensure that there is not a test eax
+    // instruction here.
+    __ nop();
+  } else {
+    // Inline the inobject property case.
+    Comment cmnt(masm(), "[ Inlined named property load");
+    Result receiver = frame()->Pop();
+    receiver.ToRegister();
+
+    result = allocator()->Allocate();
+    ASSERT(result.is_valid());
+    DeferredReferenceGetNamedValue* deferred =
+        new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
+
+    // Check that the receiver is a heap object.
+    __ test(receiver.reg(), Immediate(kSmiTagMask));
+    deferred->Branch(zero);
+
+    __ bind(deferred->patch_site());
+    // This is the map check instruction that will be patched (so we can't
+    // use the double underscore macro that may insert instructions).
+    // Initially use an invalid map to force a failure.
+    masm()->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                Immediate(Factory::null_value()));
+    // This branch is always a forwards branch so it's always a fixed size
+    // which allows the assert below to succeed and patching to work.
+    deferred->Branch(not_equal);
+
+    // The delta from the patch label to the load offset must be statically
+    // known.
+    ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+           LoadIC::kOffsetToLoadInstruction);
+    // The initial (invalid) offset has to be large enough to force a 32-bit
+    // instruction encoding to allow patching with an arbitrary offset.  Use
+    // kMaxInt (minus kHeapObjectTag).
+    int offset = kMaxInt;
+    masm()->mov(result.reg(), FieldOperand(receiver.reg(), offset));
+
+    __ IncrementCounter(&Counters::named_load_inline, 1);
+    deferred->BindExit();
+  }
+  ASSERT(frame()->height() == original_height - 1);
+  return result;
+}
+
+
+Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
+#ifdef DEBUG
+  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
+#endif
+  Result result = frame()->CallStoreIC(name, is_contextual);
+
+  ASSERT_EQ(expected_height, frame()->height());
+  return result;
+}
+
+
+Result CodeGenerator::EmitKeyedLoad() {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
+  // Inline array load code if inside of a loop.  We do not know the
+  // receiver map yet, so we initially generate the code with a check
+  // against an invalid map.  In the inline cache code, we patch the map
+  // check if appropriate.
   if (loop_nesting() > 0) {
     Comment cmnt(masm_, "[ Inlined load from keyed Property");
 
@@ -6445,22 +6845,16 @@
 
     // Use a fresh temporary for the index and later the loaded
     // value.
-    Result index = allocator()->Allocate();
-    ASSERT(index.is_valid());
+    result = allocator()->Allocate();
+    ASSERT(result.is_valid());
 
     DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(index.reg(),
+        new DeferredReferenceGetKeyedValue(result.reg(),
                                            receiver.reg(),
-                                           key.reg(),
-                                           is_global);
+                                           key.reg());
 
-    // Check that the receiver is not a smi (only needed if this
-    // is not a load from the global context) and that it has the
-    // expected map.
-    if (!is_global) {
-      __ test(receiver.reg(), Immediate(kSmiTagMask));
-      deferred->Branch(zero);
-    }
+    __ test(receiver.reg(), Immediate(kSmiTagMask));
+    deferred->Branch(zero);
 
     // Initially, use an invalid map. The map is patched in the IC
     // initialization code.
@@ -6485,50 +6879,132 @@
 
     // Shift the key to get the actual index value and check that
     // it is within bounds.
-    __ mov(index.reg(), key.reg());
-    __ SmiUntag(index.reg());
-    __ cmp(index.reg(),
+    __ mov(result.reg(), key.reg());
+    __ SmiUntag(result.reg());
+    __ cmp(result.reg(),
            FieldOperand(elements.reg(), FixedArray::kLengthOffset));
     deferred->Branch(above_equal);
 
-    // Load and check that the result is not the hole.  We could
-    // reuse the index or elements register for the value.
-    //
-    // TODO(206): Consider whether it makes sense to try some
-    // heuristic about which register to reuse.  For example, if
-    // one is eax, the we can reuse that one because the value
-    // coming from the deferred code will be in eax.
-    Result value = index;
-    __ mov(value.reg(), Operand(elements.reg(),
-                                index.reg(),
-                                times_4,
-                                FixedArray::kHeaderSize - kHeapObjectTag));
+    // Load and check that the result is not the hole.
+    __ mov(result.reg(), Operand(elements.reg(),
+                                 result.reg(),
+                                 times_4,
+                                 FixedArray::kHeaderSize - kHeapObjectTag));
     elements.Unuse();
-    index.Unuse();
-    __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+    __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
     deferred->Branch(equal);
     __ IncrementCounter(&Counters::keyed_load_inline, 1);
 
     deferred->BindExit();
-    // Restore the receiver and key to the frame and push the
-    // result on top of it.
-    frame_->Push(&receiver);
-    frame_->Push(&key);
-    return value;
   } else {
     Comment cmnt(masm_, "[ Load from keyed Property");
-    RelocInfo::Mode mode = is_global
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    Result answer = frame_->CallKeyedLoadIC(mode);
+    result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
     // Make sure that we do not have a test instruction after the
     // call.  A test instruction after the call is used to
     // indicate that we have generated an inline version of the
     // keyed load.  The explicit nop instruction is here because
     // the push that follows might be peep-hole optimized away.
     __ nop();
-    return answer;
   }
+  ASSERT(frame()->height() == original_height - 2);
+  return result;
+}
+
+
+Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
+  // Generate inlined version of the keyed store if the code is in a loop
+  // and the key is likely to be a smi.
+  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
+    Comment cmnt(masm(), "[ Inlined store to keyed Property");
+
+    // Get the receiver, key and value into registers.
+    result = frame()->Pop();
+    Result key = frame()->Pop();
+    Result receiver = frame()->Pop();
+
+    Result tmp = allocator_->Allocate();
+    ASSERT(tmp.is_valid());
+
+    // Determine whether the value is a constant before putting it in a
+    // register.
+    bool value_is_constant = result.is_constant();
+
+    // Make sure that value, key and receiver are in registers.
+    result.ToRegister();
+    key.ToRegister();
+    receiver.ToRegister();
+
+    DeferredReferenceSetKeyedValue* deferred =
+        new DeferredReferenceSetKeyedValue(result.reg(),
+                                           key.reg(),
+                                           receiver.reg());
+
+    // Check that the value is a smi if it is not a constant.  We can skip
+    // the write barrier for smis and constants.
+    if (!value_is_constant) {
+      __ test(result.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(not_zero);
+    }
+
+    // Check that the key is a non-negative smi.
+    __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
+    deferred->Branch(not_zero);
+
+    // Check that the receiver is not a smi.
+    __ test(receiver.reg(), Immediate(kSmiTagMask));
+    deferred->Branch(zero);
+
+    // Check that the receiver is a JSArray.
+    __ mov(tmp.reg(),
+           FieldOperand(receiver.reg(), HeapObject::kMapOffset));
+    __ movzx_b(tmp.reg(),
+               FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
+    __ cmp(tmp.reg(), JS_ARRAY_TYPE);
+    deferred->Branch(not_equal);
+
+    // Check that the key is within bounds.  Both the key and the length of
+    // the JSArray are smis.
+    __ cmp(key.reg(),
+           FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+    deferred->Branch(greater_equal);
+
+    // Get the elements array from the receiver and check that it is not a
+    // dictionary.
+    __ mov(tmp.reg(),
+           FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+    // Bind the deferred code patch site to be able to locate the fixed
+    // array map comparison.  When debugging, we patch this comparison to
+    // always fail so that we will hit the IC call in the deferred code
+    // which will allow the debugger to break for fast case stores.
+    __ bind(deferred->patch_site());
+    __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+           Immediate(Factory::fixed_array_map()));
+    deferred->Branch(not_equal);
+
+    // Store the value.
+    __ mov(Operand(tmp.reg(),
+                   key.reg(),
+                   times_2,
+                   FixedArray::kHeaderSize - kHeapObjectTag),
+           result.reg());
+    __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+    deferred->BindExit();
+  } else {
+    result = frame()->CallKeyedStoreIC();
+    // Make sure that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to
+    // indicate that we have generated an inline version of the
+    // keyed store.
+    __ nop();
+    frame()->Drop(2);
+  }
+  ASSERT(frame()->height() == original_height - 3);
+  return result;
 }
 
 
@@ -6548,7 +7024,7 @@
   } else {
     Literal* raw_name = property->key()->AsLiteral();
     ASSERT(raw_name != NULL);
-    return Handle<String>(String::cast(*raw_name->handle()));
+    return Handle<String>::cast(raw_name->handle());
   }
 }
 
@@ -6570,7 +7046,10 @@
       Comment cmnt(masm, "[ Load from Slot");
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
-      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+      Result result =
+          cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+      if (!persist_after_get_) set_unloaded();
+      cgen_->frame()->Push(&result);
       break;
     }
 
@@ -6578,87 +7057,27 @@
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
-
-      // Do not inline the inobject property case for loads from the global
-      // object.  Also do not inline for unoptimized code.  This saves time
-      // in the code generator.  Unoptimized code is toplevel code or code
-      // that is not in a loop.
-      if (is_global ||
-          cgen_->scope()->is_global_scope() ||
-          cgen_->loop_nesting() == 0) {
-        Comment cmnt(masm, "[ Load from named Property");
-        cgen_->frame()->Push(GetName());
-
-        RelocInfo::Mode mode = is_global
-                               ? RelocInfo::CODE_TARGET_CONTEXT
-                               : RelocInfo::CODE_TARGET;
-        Result answer = cgen_->frame()->CallLoadIC(mode);
-        // A test eax instruction following the call signals that the
-        // inobject property case was inlined.  Ensure that there is not
-        // a test eax instruction here.
-        __ nop();
-        cgen_->frame()->Push(&answer);
-      } else {
-        // Inline the inobject property case.
-        Comment cmnt(masm, "[ Inlined named property load");
-        Result receiver = cgen_->frame()->Pop();
-        receiver.ToRegister();
-
-        Result value = cgen_->allocator()->Allocate();
-        ASSERT(value.is_valid());
-        DeferredReferenceGetNamedValue* deferred =
-            new DeferredReferenceGetNamedValue(value.reg(),
-                                               receiver.reg(),
-                                               GetName());
-
-        // Check that the receiver is a heap object.
-        __ test(receiver.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(zero);
-
-        __ bind(deferred->patch_site());
-        // This is the map check instruction that will be patched (so we can't
-        // use the double underscore macro that may insert instructions).
-        // Initially use an invalid map to force a failure.
-        masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                  Immediate(Factory::null_value()));
-        // This branch is always a forwards branch so it's always a fixed
-        // size which allows the assert below to succeed and patching to work.
-        deferred->Branch(not_equal);
-
-        // The delta from the patch label to the load offset must be
-        // statically known.
-        ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
-               LoadIC::kOffsetToLoadInstruction);
-        // The initial (invalid) offset has to be large enough to force
-        // a 32-bit instruction encoding to allow patching with an
-        // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
-        int offset = kMaxInt;
-        masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
-
-        __ IncrementCounter(&Counters::named_load_inline, 1);
-        deferred->BindExit();
-        cgen_->frame()->Push(&receiver);
-        cgen_->frame()->Push(&value);
-      }
+      if (persist_after_get_) cgen_->frame()->Dup();
+      Result result = cgen_->EmitNamedLoad(GetName(), is_global);
+      if (!persist_after_get_) set_unloaded();
+      cgen_->frame()->Push(&result);
       break;
     }
 
     case KEYED: {
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
-      Result value = cgen_->EmitKeyedLoad(is_global);
+      if (persist_after_get_) {
+        cgen_->frame()->PushElementAt(1);
+        cgen_->frame()->PushElementAt(1);
+      }
+      Result value = cgen_->EmitKeyedLoad();
       cgen_->frame()->Push(&value);
+      if (!persist_after_get_) set_unloaded();
       break;
     }
 
     default:
       UNREACHABLE();
   }
-
-  if (!persist_after_get_) {
-    cgen_->UnloadReference(this);
-  }
 }
 
 
@@ -6708,14 +7127,13 @@
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
       cgen_->StoreToSlot(slot, init_state);
-      cgen_->UnloadReference(this);
+      set_unloaded();
       break;
     }
 
     case NAMED: {
       Comment cmnt(masm, "[ Store to named Property");
-      cgen_->frame()->Push(GetName());
-      Result answer = cgen_->frame()->CallStoreIC();
+      Result answer = cgen_->EmitNamedStore(GetName(), false);
       cgen_->frame()->Push(&answer);
       set_unloaded();
       break;
@@ -6723,108 +7141,16 @@
 
     case KEYED: {
       Comment cmnt(masm, "[ Store to keyed Property");
-
-      // Generate inlined version of the keyed store if the code is in
-      // a loop and the key is likely to be a smi.
       Property* property = expression()->AsProperty();
       ASSERT(property != NULL);
-      StaticType* key_smi_analysis = property->key()->type();
-
-      if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
-        Comment cmnt(masm, "[ Inlined store to keyed Property");
-
-        // Get the receiver, key and value into registers.
-        Result value = cgen_->frame()->Pop();
-        Result key = cgen_->frame()->Pop();
-        Result receiver = cgen_->frame()->Pop();
-
-        Result tmp = cgen_->allocator_->Allocate();
-        ASSERT(tmp.is_valid());
-
-        // Determine whether the value is a constant before putting it
-        // in a register.
-        bool value_is_constant = value.is_constant();
-
-        // Make sure that value, key and receiver are in registers.
-        value.ToRegister();
-        key.ToRegister();
-        receiver.ToRegister();
-
-        DeferredReferenceSetKeyedValue* deferred =
-            new DeferredReferenceSetKeyedValue(value.reg(),
-                                               key.reg(),
-                                               receiver.reg());
-
-        // Check that the value is a smi if it is not a constant.  We
-        // can skip the write barrier for smis and constants.
-        if (!value_is_constant) {
-          __ test(value.reg(), Immediate(kSmiTagMask));
-          deferred->Branch(not_zero);
-        }
-
-        // Check that the key is a non-negative smi.
-        __ test(key.reg(), Immediate(kSmiTagMask | 0x80000000));
-        deferred->Branch(not_zero);
-
-        // Check that the receiver is not a smi.
-        __ test(receiver.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(zero);
-
-        // Check that the receiver is a JSArray.
-        __ mov(tmp.reg(),
-               FieldOperand(receiver.reg(), HeapObject::kMapOffset));
-        __ movzx_b(tmp.reg(),
-                   FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
-        __ cmp(tmp.reg(), JS_ARRAY_TYPE);
-        deferred->Branch(not_equal);
-
-        // Check that the key is within bounds.  Both the key and the
-        // length of the JSArray are smis.
-        __ cmp(key.reg(),
-               FieldOperand(receiver.reg(), JSArray::kLengthOffset));
-        deferred->Branch(greater_equal);
-
-        // Get the elements array from the receiver and check that it
-        // is not a dictionary.
-        __ mov(tmp.reg(),
-               FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-        // Bind the deferred code patch site to be able to locate the
-        // fixed array map comparison.  When debugging, we patch this
-        // comparison to always fail so that we will hit the IC call
-        // in the deferred code which will allow the debugger to
-        // break for fast case stores.
-        __ bind(deferred->patch_site());
-        __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
-               Immediate(Factory::fixed_array_map()));
-        deferred->Branch(not_equal);
-
-        // Store the value.
-        __ mov(Operand(tmp.reg(),
-                       key.reg(),
-                       times_2,
-                       FixedArray::kHeaderSize - kHeapObjectTag),
-               value.reg());
-        __ IncrementCounter(&Counters::keyed_store_inline, 1);
-
-        deferred->BindExit();
-
-        cgen_->frame()->Push(&receiver);
-        cgen_->frame()->Push(&key);
-        cgen_->frame()->Push(&value);
-      } else {
-        Result answer = cgen_->frame()->CallKeyedStoreIC();
-        // Make sure that we do not have a test instruction after the
-        // call.  A test instruction after the call is used to
-        // indicate that we have generated an inline version of the
-        // keyed store.
-        __ nop();
-        cgen_->frame()->Push(&answer);
-      }
-      cgen_->UnloadReference(this);
+      Result answer = cgen_->EmitKeyedStore(property->key()->type());
+      cgen_->frame()->Push(&answer);
+      set_unloaded();
       break;
     }
 
-    default:
+    case UNLOADED:
+    case ILLEGAL:
       UNREACHABLE();
   }
 }
@@ -6918,6 +7244,13 @@
 
 
 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [esp + kPointerSize]: constant elements.
+  // [esp + (2 * kPointerSize)]: literal index.
+  // [esp + (3 * kPointerSize)]: literals array.
+
+  // All sizes here are multiples of kPointerSize.
   int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
   int size = JSArray::kSize + elements_size;
 
@@ -7037,6 +7370,8 @@
         }
       } else if (left.is(left_arg)) {
         __ mov(right_arg, right);
+      } else if (right.is(right_arg)) {
+        __ mov(left_arg, left);
       } else if (left.is(right_arg)) {
         if (IsOperationCommutative()) {
           __ mov(left_arg, right);
@@ -7055,8 +7390,6 @@
           __ mov(right_arg, right);
           __ mov(left_arg, left);
         }
-      } else if (right.is(right_arg)) {
-        __ mov(left_arg, left);
       } else {
         // Order of moves is not important.
         __ mov(left_arg, left);
@@ -7092,6 +7425,10 @@
       __ mov(left_arg, Immediate(right));
       SetArgsReversed();
     } else {
+      // For non-commutative operations, left and right_arg might be
+      // the same register.  Therefore, the order of the moves is
+      // important here in order to not overwrite left before moving
+      // it to left_arg.
       __ mov(left_arg, left);
       __ mov(right_arg, Immediate(right));
     }
@@ -7124,8 +7461,12 @@
       __ mov(right_arg, Immediate(left));
       SetArgsReversed();
     } else {
-      __ mov(left_arg, Immediate(left));
+      // For non-commutative operations, right and left_arg might be
+      // the same register.  Therefore, the order of the moves is
+      // important here in order to not overwrite right before moving
+      // it to right_arg.
       __ mov(right_arg, right);
+      __ mov(left_arg, Immediate(left));
     }
     // Update flags to indicate that arguments are in registers.
     SetArgsInRegisters();
@@ -7493,7 +7834,18 @@
     case Token::DIV: {
       if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
-        FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+        if (NumberInfo::IsNumber(operands_type_)) {
+          if (FLAG_debug_code) {
+            // Assert at runtime that inputs are only numbers.
+            __ AbortIfNotNumber(edx,
+                                "GenericBinaryOpStub operand not a number.");
+            __ AbortIfNotNumber(eax,
+                                "GenericBinaryOpStub operand not a number.");
+          }
+          FloatingPointHelper::LoadSSE2Operands(masm);
+        } else {
+          FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
+        }
 
         switch (op_) {
           case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -7506,7 +7858,17 @@
         __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         GenerateReturn(masm);
       } else {  // SSE2 not available, use FPU.
-        FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+        if (NumberInfo::IsNumber(operands_type_)) {
+          if (FLAG_debug_code) {
+            // Assert at runtime that inputs are only numbers.
+            __ AbortIfNotNumber(edx,
+                                "GenericBinaryOpStub operand not a number.");
+            __ AbortIfNotNumber(eax,
+                                "GenericBinaryOpStub operand not a number.");
+          }
+        } else {
+          FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+        }
         FloatingPointHelper::LoadFloatOperands(
             masm,
             ecx,
@@ -7618,7 +7980,7 @@
   switch (op_) {
     case Token::ADD: {
       // Test for string arguments before calling runtime.
-      Label not_strings, not_string1, string1;
+      Label not_strings, not_string1, string1, string1_smi2;
       Result answer;
       __ test(edx, Immediate(kSmiTagMask));
       __ j(zero, &not_string1);
@@ -7627,15 +7989,28 @@
 
       // First argument is a string, test second.
       __ test(eax, Immediate(kSmiTagMask));
-      __ j(zero, &string1);
+      __ j(zero, &string1_smi2);
       __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
       __ j(above_equal, &string1);
 
       // First and second argument are strings. Jump to the string add stub.
-      StringAddStub stub(NO_STRING_CHECK_IN_STUB);
-      __ TailCallStub(&stub);
+      StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
+      __ TailCallStub(&string_add_stub);
 
-      // Only first argument is a string.
+      __ bind(&string1_smi2);
+      // First argument is a string, second is a smi. Try to lookup the number
+      // string for the smi in the number string cache.
+      NumberToStringStub::GenerateLookupNumberStringCache(
+          masm, eax, edi, ebx, ecx, true, &string1);
+
+      // Call the string add stub to make the result.
+      __ EnterInternalFrame();
+      __ push(edx);  // Original first argument.
+      __ push(edi);  // Number to string result for second argument.
+      __ CallStub(&string_add_stub);
+      __ LeaveInternalFrame();
+      __ ret(2 * kPointerSize);
+
       __ bind(&string1);
       __ InvokeBuiltin(
           HasArgsReversed() ?
@@ -7766,6 +8141,212 @@
 }
 
 
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+  // Input on stack:
+  // esp[4]: argument (should be number).
+  // esp[0]: return address.
+  // Test that eax is a number.
+  Label runtime_call;
+  Label runtime_call_clear_stack;
+  Label input_not_smi;
+  Label loaded;
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &input_not_smi);
+  // Input is a smi. Untag and load it onto the FPU stack.
+  // Then load the low and high words of the double into ebx, edx.
+  ASSERT_EQ(1, kSmiTagSize);
+  __ sar(eax, 1);
+  __ sub(Operand(esp), Immediate(2 * kPointerSize));
+  __ mov(Operand(esp, 0), eax);
+  __ fild_s(Operand(esp, 0));
+  __ fst_d(Operand(esp, 0));
+  __ pop(edx);
+  __ pop(ebx);
+  __ jmp(&loaded);
+  __ bind(&input_not_smi);
+  // Check if input is a HeapNumber.
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
+  __ j(not_equal, &runtime_call);
+  // Input is a HeapNumber. Push it on the FPU stack and load its
+  // low and high words into ebx, edx.
+  __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+  __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+  __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
+
+  __ bind(&loaded);
+  // ST[0] == double value
+  // ebx = low 32 bits of double value
+  // edx = high 32 bits of double value
+  // Compute hash:
+  //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+  __ mov(ecx, ebx);
+  __ xor_(ecx, Operand(edx));
+  __ mov(eax, ecx);
+  __ sar(eax, 16);
+  __ xor_(ecx, Operand(eax));
+  __ mov(eax, ecx);
+  __ sar(eax, 8);
+  __ xor_(ecx, Operand(eax));
+  ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
+  __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
+  // ST[0] == double value.
+  // ebx = low 32 bits of double value.
+  // edx = high 32 bits of double value.
+  // ecx = TranscendentalCache::hash(double value).
+  __ mov(eax,
+         Immediate(ExternalReference::transcendental_cache_array_address()));
+  // Eax points to cache array.
+  __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
+  // Eax points to the cache for the type type_.
+  // If NULL, the cache hasn't been initialized yet, so go through runtime.
+  __ test(eax, Operand(eax));
+  __ j(zero, &runtime_call_clear_stack);
+#ifdef DEBUG
+  // Check that the layout of cache elements match expectations.
+  {  // NOLINT - doesn't like a single brace on a line.
+    TranscendentalCache::Element test_elem[2];
+    char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+    char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+    char* elem_in0  = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+    char* elem_in1  = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+    char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+    CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
+    CHECK_EQ(0, elem_in0 - elem_start);
+    CHECK_EQ(kIntSize, elem_in1 - elem_start);
+    CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+  }
+#endif
+  // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
+  __ lea(ecx, Operand(ecx, ecx, times_2, 0));
+  __ lea(ecx, Operand(eax, ecx, times_4, 0));
+  // Check if cache matches: Double value is stored in uint32_t[2] array.
+  Label cache_miss;
+  __ cmp(ebx, Operand(ecx, 0));
+  __ j(not_equal, &cache_miss);
+  __ cmp(edx, Operand(ecx, kIntSize));
+  __ j(not_equal, &cache_miss);
+  // Cache hit!
+  __ mov(eax, Operand(ecx, 2 * kIntSize));
+  __ fstp(0);
+  __ ret(kPointerSize);
+
+  __ bind(&cache_miss);
+  // Update cache with new value.
+  // We are short on registers, so use no_reg as scratch.
+  // This gives slightly larger code.
+  __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
+  GenerateOperation(masm);
+  __ mov(Operand(ecx, 0), ebx);
+  __ mov(Operand(ecx, kIntSize), edx);
+  __ mov(Operand(ecx, 2 * kIntSize), eax);
+  __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+  __ ret(kPointerSize);
+
+  __ bind(&runtime_call_clear_stack);
+  __ fstp(0);
+  __ bind(&runtime_call);
+  __ TailCallRuntime(ExternalReference(RuntimeFunction()), 1, 1);
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+  switch (type_) {
+    // Add more cases when necessary.
+    case TranscendentalCache::SIN: return Runtime::kMath_sin;
+    case TranscendentalCache::COS: return Runtime::kMath_cos;
+    default:
+      UNIMPLEMENTED();
+      return Runtime::kAbort;
+  }
+}
+
+
+void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
+  // Only free register is edi.
+  Label done;
+  ASSERT(type_ == TranscendentalCache::SIN ||
+         type_ == TranscendentalCache::COS);
+  // More transcendental types can be added later.
+
+  // Both fsin and fcos require arguments in the range +/-2^63 and
+  // return NaN for infinities and NaN. They can share all code except
+  // the actual fsin/fcos operation.
+  Label in_range;
+  // If argument is outside the range -2^63..2^63, fsin/cos doesn't
+  // work. We must reduce it to the appropriate range.
+  __ mov(edi, edx);
+  __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
+  int supported_exponent_limit =
+      (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
+  __ cmp(Operand(edi), Immediate(supported_exponent_limit));
+  __ j(below, &in_range, taken);
+  // Check for infinity and NaN. Both return NaN for sin.
+  __ cmp(Operand(edi), Immediate(0x7ff00000));
+  Label non_nan_result;
+  __ j(not_equal, &non_nan_result, taken);
+  // Input is +/-Infinity or NaN. Result is NaN.
+  __ fstp(0);
+  // NaN is represented by 0x7ff8000000000000.
+  __ push(Immediate(0x7ff80000));
+  __ push(Immediate(0));
+  __ fld_d(Operand(esp, 0));
+  __ add(Operand(esp), Immediate(2 * kPointerSize));
+  __ jmp(&done);
+
+  __ bind(&non_nan_result);
+
+  // Use fpmod to restrict argument to the range +/-2*PI.
+  __ mov(edi, eax);  // Save eax before using fnstsw_ax.
+  __ fldpi();
+  __ fadd(0);
+  __ fld(1);
+  // FPU Stack: input, 2*pi, input.
+  {
+    Label no_exceptions;
+    __ fwait();
+    __ fnstsw_ax();
+    // Clear if Illegal Operand or Zero Division exceptions are set.
+    __ test(Operand(eax), Immediate(5));
+    __ j(zero, &no_exceptions);
+    __ fnclex();
+    __ bind(&no_exceptions);
+  }
+
+  // Compute st(0) % st(1)
+  {
+    Label partial_remainder_loop;
+    __ bind(&partial_remainder_loop);
+    __ fprem1();
+    __ fwait();
+    __ fnstsw_ax();
+    __ test(Operand(eax), Immediate(0x400 /* C2 */));
+    // If C2 is set, computation only has partial result. Loop to
+    // continue computation.
+    __ j(not_zero, &partial_remainder_loop);
+  }
+  // FPU Stack: input, 2*pi, input % 2*pi
+  __ fstp(2);
+  __ fstp(0);
+  __ mov(eax, edi);  // Restore eax (allocated HeapNumber pointer).
+
+  // FPU Stack: input % 2*pi
+  __ bind(&in_range);
+  switch (type_) {
+    case TranscendentalCache::SIN:
+      __ fsin();
+      break;
+    case TranscendentalCache::COS:
+      __ fcos();
+      break;
+    default:
+      UNREACHABLE();
+  }
+  __ bind(&done);
+}
+
+
 // Get the integer part of a heap number.  Surprisingly, all this bit twiddling
 // is faster than using the built-in instructions on floating point registers.
 // Trashes edi and ebx.  Dest is ecx.  Source cannot be ecx or one of the
@@ -7977,6 +8558,35 @@
 }
 
 
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
+  Label load_smi_edx, load_eax, load_smi_eax, done;
+  // Load operand in edx into xmm0.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
+  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+
+  __ bind(&load_eax);
+  // Load operand in eax into xmm1.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
+  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi_edx);
+  __ SmiUntag(edx);  // Untag smi before converting to float.
+  __ cvtsi2sd(xmm0, Operand(edx));
+  __ SmiTag(edx);  // Retag smi for heap number overwriting test.
+  __ jmp(&load_eax);
+
+  __ bind(&load_smi_eax);
+  __ SmiUntag(eax);  // Untag smi before converting to float.
+  __ cvtsi2sd(xmm1, Operand(eax));
+  __ SmiTag(eax);  // Retag smi for heap number overwriting test.
+
+  __ bind(&done);
+}
+
+
 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
                                            Label* not_numbers) {
   Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
@@ -8306,6 +8916,11 @@
 
 
 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+  // esp[0] : return address
+  // esp[4] : number of parameters
+  // esp[8] : receiver displacement
+  // esp[16] : function
+
   // The displacement is used for skipping the return address and the
   // frame pointer on the stack. It is the offset of the last
   // parameter (if any) relative to the frame pointer.
@@ -8389,7 +9004,6 @@
   __ add(Operand(edi), Immediate(kPointerSize));
   __ sub(Operand(edx), Immediate(kPointerSize));
   __ dec(ecx);
-  __ test(ecx, Operand(ecx));
   __ j(not_zero, &loop);
 
   // Return and remove the on-stack parameters.
@@ -8737,6 +9351,74 @@
 }
 
 
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+                                                         Register object,
+                                                         Register result,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         bool object_is_smi,
+                                                         Label* not_found) {
+  // Currently only lookup for smis. Check for smi if object is not known to be
+  // a smi.
+  if (!object_is_smi) {
+    ASSERT(kSmiTag == 0);
+    __ test(object, Immediate(kSmiTagMask));
+    __ j(not_zero, not_found);
+  }
+
+  // Use of registers. Register result is used as a temporary.
+  Register number_string_cache = result;
+  Register mask = scratch1;
+  Register scratch = scratch2;
+
+  // Load the number string cache.
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
+  __ mov(number_string_cache,
+         Operand::StaticArray(scratch, times_pointer_size, roots_address));
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+  __ shr(mask, 1);  // Divide length by two (length is not a smi).
+  __ sub(Operand(mask), Immediate(1));  // Make mask.
+  // Calculate the entry in the number string cache. The hash value in the
+  // number string cache for smis is just the smi value.
+  __ mov(scratch, object);
+  __ SmiUntag(scratch);
+  __ and_(scratch, Operand(mask));
+  // Check if the entry is the smi we are looking for.
+  __ cmp(object,
+         FieldOperand(number_string_cache,
+                      scratch,
+                      times_twice_pointer_size,
+                      FixedArray::kHeaderSize));
+  __ j(not_equal, not_found);
+
+  // Get the result from the cache.
+  __ mov(result,
+         FieldOperand(number_string_cache,
+                      scratch,
+                      times_twice_pointer_size,
+                      FixedArray::kHeaderSize + kPointerSize));
+  __ IncrementCounter(&Counters::number_to_string_native, 1);
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  __ mov(ebx, Operand(esp, kPointerSize));
+
+  // Generate code to lookup number in the number string cache.
+  GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
+  __ ret(1 * kPointerSize);
+
+  __ bind(&runtime);
+  // Handle number to string in the runtime system if not found in the cache.
+  __ TailCallRuntime(ExternalReference(Runtime::kNumberToString), 1, 1);
+}
+
+
 void CompareStub::Generate(MacroAssembler* masm) {
   Label call_builtin, done;
 
@@ -9069,6 +9751,9 @@
 
   // Slow-case: Non-function called.
   __ bind(&slow);
+  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+  // of the original receiver from the call site).
+  __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
   __ Set(eax, Immediate(argc_));
   __ Set(ebx, Immediate(0));
   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
@@ -9642,13 +10327,34 @@
   // ecx: length of second string
   // edx: second string
   // Look at the length of the result of adding the two strings.
-  Label string_add_flat_result;
+  Label string_add_flat_result, longer_than_two;
   __ bind(&both_not_zero_length);
   __ add(ebx, Operand(ecx));
   // Use the runtime system when adding two one character strings, as it
   // contains optimizations for this specific case using the symbol table.
   __ cmp(ebx, 2);
-  __ j(equal, &string_add_runtime);
+  __ j(not_equal, &longer_than_two);
+
+  // Check that both strings are non-external ascii strings.
+  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
+                                         &string_add_runtime);
+
+  // Get the two characters forming the sub string.
+  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
+  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+
+  // Try to lookup two character string in symbol table. If it is not found
+  // just allocate a new one.
+  Label make_two_character_string, make_flat_ascii_string;
+  GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
+                                       &make_two_character_string);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&make_two_character_string);
+  __ Set(ebx, Immediate(2));
+  __ jmp(&make_flat_ascii_string);
+
+  __ bind(&longer_than_two);
   // Check if resulting string will be flat.
   __ cmp(ebx, String::kMinNonFlatLength);
   __ j(below, &string_add_flat_result);
@@ -9715,7 +10421,10 @@
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
   __ test(ecx, Immediate(kAsciiStringTag));
   __ j(zero, &string_add_runtime);
+
+  __ bind(&make_flat_ascii_string);
   // Both strings are ascii strings. As they are short they are both flat.
+  // ebx: length of resulting flat string
   __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
   // eax: result string
   __ mov(ecx, eax);
@@ -9872,6 +10581,190 @@
 }
 
 
+void StringStubBase::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+                                                          Register c1,
+                                                          Register c2,
+                                                          Register scratch1,
+                                                          Register scratch2,
+                                                          Register scratch3,
+                                                          Label* not_found) {
+  // Register scratch3 is the general scratch register in this function.
+  Register scratch = scratch3;
+
+  // Make sure that both characters are not digits as such strings has a
+  // different hash algorithm. Don't try to look for these in the symbol table.
+  Label not_array_index;
+  __ mov(scratch, c1);
+  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+  __ j(above, &not_array_index);
+  __ mov(scratch, c2);
+  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
+  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
+  __ j(below_equal, not_found);
+
+  __ bind(&not_array_index);
+  // Calculate the two character string hash.
+  Register hash = scratch1;
+  GenerateHashInit(masm, hash, c1, scratch);
+  GenerateHashAddCharacter(masm, hash, c2, scratch);
+  GenerateHashGetHash(masm, hash, scratch);
+
+  // Collect the two characters in a register.
+  Register chars = c1;
+  __ shl(c2, kBitsPerByte);
+  __ or_(chars, Operand(c2));
+
+  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:  hash of two character string.
+
+  // Load the symbol table.
+  Register symbol_table = c2;
+  ExternalReference roots_address = ExternalReference::roots_address();
+  __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
+  __ mov(symbol_table,
+         Operand::StaticArray(scratch, times_pointer_size, roots_address));
+
+  // Calculate capacity mask from the symbol table capacity.
+  Register mask = scratch2;
+  static const int kCapacityOffset =
+      FixedArray::kHeaderSize +
+      SymbolTable::kCapacityIndex * kPointerSize;
+  __ mov(mask, FieldOperand(symbol_table, kCapacityOffset));
+  __ SmiUntag(mask);
+  __ sub(Operand(mask), Immediate(1));
+
+  // Registers
+  // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
+  // hash:         hash of two character string
+  // symbol_table: symbol table
+  // mask:         capacity mask
+  // scratch:      -
+
+  // Perform a number of probes in the symbol table.
+  static const int kProbes = 4;
+  Label found_in_symbol_table;
+  Label next_probe[kProbes], next_probe_pop_mask[kProbes];
+  for (int i = 0; i < kProbes; i++) {
+    // Calculate entry in symbol table.
+    __ mov(scratch, hash);
+    if (i > 0) {
+      __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
+    }
+    __ and_(scratch, Operand(mask));
+
+    // Load the entry from the symble table.
+    Register candidate = scratch;  // Scratch register contains candidate.
+    ASSERT_EQ(1, SymbolTableShape::kEntrySize);
+    static const int kFirstElementOffset =
+        FixedArray::kHeaderSize +
+        SymbolTable::kPrefixStartIndex * kPointerSize +
+        SymbolTableShape::kPrefixSize * kPointerSize;
+    __ mov(candidate,
+           FieldOperand(symbol_table,
+                        scratch,
+                        times_pointer_size,
+                        kFirstElementOffset));
+
+    // If entry is undefined no string with this hash can be found.
+    __ cmp(candidate, Factory::undefined_value());
+    __ j(equal, not_found);
+
+    // If length is not 2 the string is not a candidate.
+    __ cmp(FieldOperand(candidate, String::kLengthOffset), Immediate(2));
+    __ j(not_equal, &next_probe[i]);
+
+    // As we are out of registers save the mask on the stack and use that
+    // register as a temporary.
+    __ push(mask);
+    Register temp = mask;
+
+    // Check that the candidate is a non-external ascii string.
+    __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
+    __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+    __ JumpIfInstanceTypeIsNotSequentialAscii(
+        temp, temp, &next_probe_pop_mask[i]);
+
+    // Check if the two characters match.
+    __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+    __ and_(temp, 0x0000ffff);
+    __ cmp(chars, Operand(temp));
+    __ j(equal, &found_in_symbol_table);
+    __ bind(&next_probe_pop_mask[i]);
+    __ pop(mask);
+    __ bind(&next_probe[i]);
+  }
+
+  // No matching 2 character string found by probing.
+  __ jmp(not_found);
+
+  // Scratch register contains result when we fall through to here.
+  Register result = scratch;
+  __ bind(&found_in_symbol_table);
+  __ pop(mask);  // Pop temporally saved mask from the stack.
+  if (!result.is(eax)) {
+    __ mov(eax, result);
+  }
+}
+
+
+void StringStubBase::GenerateHashInit(MacroAssembler* masm,
+                                      Register hash,
+                                      Register character,
+                                      Register scratch) {
+  // hash = character + (character << 10);
+  __ mov(hash, character);
+  __ shl(hash, 10);
+  __ add(hash, Operand(character));
+  // hash ^= hash >> 6;
+  __ mov(scratch, hash);
+  __ sar(scratch, 6);
+  __ xor_(hash, Operand(scratch));
+}
+
+
+void StringStubBase::GenerateHashAddCharacter(MacroAssembler* masm,
+                                              Register hash,
+                                              Register character,
+                                              Register scratch) {
+  // hash += character;
+  __ add(hash, Operand(character));
+  // hash += hash << 10;
+  __ mov(scratch, hash);
+  __ shl(scratch, 10);
+  __ add(hash, Operand(scratch));
+  // hash ^= hash >> 6;
+  __ mov(scratch, hash);
+  __ sar(scratch, 6);
+  __ xor_(hash, Operand(scratch));
+}
+
+
+void StringStubBase::GenerateHashGetHash(MacroAssembler* masm,
+                                         Register hash,
+                                         Register scratch) {
+  // hash += hash << 3;
+  __ mov(scratch, hash);
+  __ shl(scratch, 3);
+  __ add(hash, Operand(scratch));
+  // hash ^= hash >> 11;
+  __ mov(scratch, hash);
+  __ sar(scratch, 11);
+  __ xor_(hash, Operand(scratch));
+  // hash += hash << 15;
+  __ mov(scratch, hash);
+  __ shl(scratch, 15);
+  __ add(hash, Operand(scratch));
+
+  // if (hash == 0) hash = 27;
+  Label hash_not_zero;
+  __ test(hash, Operand(hash));
+  __ j(not_zero, &hash_not_zero);
+  __ mov(hash, Immediate(27));
+  __ bind(&hash_not_zero);
+}
+
+
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -9892,26 +10785,55 @@
   // eax: string
   // ebx: instance type
   // Calculate length of sub string using the smi values.
-  __ mov(ecx, Operand(esp, 1 * kPointerSize));  // to
+  Label result_longer_than_two;
+  __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
   __ test(ecx, Immediate(kSmiTagMask));
   __ j(not_zero, &runtime);
-  __ mov(edx, Operand(esp, 2 * kPointerSize));  // from
+  __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
   __ test(edx, Immediate(kSmiTagMask));
   __ j(not_zero, &runtime);
   __ sub(ecx, Operand(edx));
-  // Handle sub-strings of length 2 and less in the runtime system.
+  // Special handling of sub-strings of length 1 and 2. One character strings
+  // are handled in the runtime system (looked up in the single character
+  // cache). Two character strings are looked for in the symbol cache.
   __ SmiUntag(ecx);  // Result length is no longer smi.
   __ cmp(ecx, 2);
-  __ j(below_equal, &runtime);
+  __ j(greater, &result_longer_than_two);
+  __ j(less, &runtime);
 
+  // Sub string of length 2 requested.
+  // eax: string
+  // ebx: instance type
+  // ecx: sub string length (value is 2)
+  // edx: from index (smi)
+  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
+
+  // Get the two characters forming the sub string.
+  __ SmiUntag(edx);  // From index is no longer smi.
+  __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
+  __ movzx_b(ecx,
+             FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
+
+  // Try to lookup two character string in symbol table.
+  Label make_two_character_string;
+  GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
+                                     &make_two_character_string);
+  __ ret(2 * kPointerSize);
+
+  __ bind(&make_two_character_string);
+  // Setup registers for allocating the two character string.
+  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ Set(ecx, Immediate(2));
+
+  __ bind(&result_longer_than_two);
   // eax: string
   // ebx: instance type
   // ecx: result string length
   // Check for flat ascii string
   Label non_ascii_flat;
-  __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
-  __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
-  __ j(not_equal, &non_ascii_flat);
+  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
 
   // Allocate the result.
   __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);