Update V8 to r4924 as required by WebKit r61871

Change-Id: Ic819dad0c1c9e035b8ffd306c96656ba87c5e85a
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 70bcdb1..01c60aa 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -35,16 +35,11 @@
 namespace v8 {
 namespace internal {
 
-inline Condition NegateCondition(Condition cc) {
-  return static_cast<Condition>(cc ^ 1);
-}
-
 
 // -----------------------------------------------------------------------------
 // Implementation of Assembler
 
 
-
 void Assembler::emitl(uint32_t x) {
   Memory::uint32_at(pc_) = x;
   pc_ += sizeof(uint32_t);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index d77c09f..e665385 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -382,6 +382,11 @@
 }
 
 
+void Assembler::CodeTargetAlign() {
+  Align(16);  // Preferred alignment of jump targets on x64.
+}
+
+
 void Assembler::bind_to(Label* L, int pos) {
   ASSERT(!L->is_bound());  // Label may only be bound once.
   last_pc_ = NULL;
@@ -1148,6 +1153,15 @@
 }
 
 
+void Assembler::incl(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xFF);
+  emit_modrm(0, dst);
+}
+
+
 void Assembler::int3() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2738,17 +2752,6 @@
 }
 
 
-void Assembler::comisd(XMMRegister dst, XMMRegister src) {
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  emit(0x66);
-  emit_optional_rex_32(dst, src);
-  emit(0x0f);
-  emit(0x2f);
-  emit_sse_operand(dst, src);
-}
-
-
 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index c7e737c..f195439 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -215,7 +215,10 @@
 // Negation of the default no_condition (-1) results in a non-default
 // no_condition value (-2). As long as tests for no_condition check
 // for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc);
+inline Condition NegateCondition(Condition cc) {
+  return static_cast<Condition>(cc ^ 1);
+}
+
 
 // Corresponds to transposing the operands of a comparison.
 inline Condition ReverseCondition(Condition cc) {
@@ -241,6 +244,7 @@
   };
 }
 
+
 enum Hint {
   no_hint = 0,
   not_taken = 0x2e,
@@ -495,6 +499,8 @@
   // possible to align the pc offset to a multiple
   // of m. m must be a power of 2.
   void Align(int m);
+  // Aligns code to something that's optimal for a jump target for the platform.
+  void CodeTargetAlign();
 
   // Stack
   void pushfq();
@@ -761,6 +767,7 @@
 
   void incq(Register dst);
   void incq(const Operand& dst);
+  void incl(Register dst);
   void incl(const Operand& dst);
 
   void lea(Register dst, const Operand& src);
@@ -1122,7 +1129,6 @@
   void xorpd(XMMRegister dst, XMMRegister src);
   void sqrtsd(XMMRegister dst, XMMRegister src);
 
-  void comisd(XMMRegister dst, XMMRegister src);
   void ucomisd(XMMRegister dst, XMMRegister src);
 
   // The first argument is the reg field, the second argument is the r/m field.
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index f9692ce..3ba8906 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -262,63 +262,23 @@
 
 class FloatingPointHelper : public AllStatic {
  public:
-  // Code pattern for loading a floating point value. Input value must
-  // be either a smi or a heap number object (fp value). Requirements:
-  // operand on TOS+1. Returns operand as floating point number on FPU
-  // stack.
-  static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
-
-  // Code pattern for loading a floating point value. Input value must
-  // be either a smi or a heap number object (fp value). Requirements:
-  // operand in src register. Returns operand as floating point number
-  // in XMM register.  May destroy src register.
-  static void LoadFloatOperand(MacroAssembler* masm,
-                               Register src,
-                               XMMRegister dst);
-
-  // Code pattern for loading a possible number into a XMM register.
-  // If the contents of src is not a number, control branches to
-  // the Label not_number.  If contents of src is a smi or a heap number
-  // object (fp value), it is loaded into the XMM register as a double.
-  // The register src is not changed, and src may not be kScratchRegister.
-  static void LoadFloatOperand(MacroAssembler* masm,
-                               Register src,
-                               XMMRegister dst,
-                               Label *not_number);
-
-  // Code pattern for loading floating point values. Input values must
-  // be either smi or heap number objects (fp values). Requirements:
-  // operand_1 in rdx, operand_2 in rax; Returns operands as
-  // floating point numbers in XMM registers.
-  static void LoadFloatOperands(MacroAssembler* masm,
-                                XMMRegister dst1,
-                                XMMRegister dst2);
-
-  // Similar to LoadFloatOperands, assumes that the operands are smis.
-  static void LoadFloatOperandsFromSmis(MacroAssembler* masm,
-                                        XMMRegister dst1,
-                                        XMMRegister dst2);
-
-  // Code pattern for loading floating point values onto the fp stack.
-  // Input values must be either smi or heap number objects (fp values).
-  // Requirements:
-  // Register version: operands in registers lhs and rhs.
-  // Stack version: operands on TOS+1 and TOS+2.
-  // Returns operands as floating point numbers on fp stack.
-  static void LoadFloatOperands(MacroAssembler* masm,
-                                Register lhs,
-                                Register rhs);
-
-  // Test if operands are smi or number objects (fp). Requirements:
-  // operand_1 in rax, operand_2 in rdx; falls through on float or smi
-  // operands, jumps to the non_float label otherwise.
-  static void CheckNumberOperands(MacroAssembler* masm,
-                                  Label* non_float);
+  // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
+  // If the operands are not both numbers, jump to not_numbers.
+  // Leaves rdx and rax unchanged.  SmiOperands assumes both are smis.
+  // NumberOperands assumes both are smis or heap numbers.
+  static void LoadSSE2SmiOperands(MacroAssembler* masm);
+  static void LoadSSE2NumberOperands(MacroAssembler* masm);
+  static void LoadSSE2UnknownOperands(MacroAssembler* masm,
+                                      Label* not_numbers);
 
   // Takes the operands in rdx and rax and loads them as integers in rax
   // and rcx.
   static void LoadAsIntegers(MacroAssembler* masm,
-                             Label* operand_conversion_failure);
+                             Label* operand_conversion_failure,
+                             Register heap_number_map);
+  // As above, but we know the operands to be numbers. In that case,
+  // conversion can't fail.
+  static void LoadNumbersAsIntegers(MacroAssembler* masm);
 };
 
 
@@ -3108,25 +3068,31 @@
         ref.GetValue();
         // Use global object as receiver.
         LoadGlobalReceiver();
+       // Call the function.
+        CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
       } else {
-        Reference ref(this, property, false);
-        ASSERT(ref.size() == 2);
-        Result key = frame_->Pop();
-        frame_->Dup();  // Duplicate the receiver.
-        frame_->Push(&key);
-        ref.GetValue();
-        // Top of frame contains function to call, with duplicate copy of
-        // receiver below it.  Swap them.
-        Result function = frame_->Pop();
-        Result receiver = frame_->Pop();
-        frame_->Push(&function);
-        frame_->Push(&receiver);
+        // Push the receiver onto the frame.
+        Load(property->obj());
+
+        // Load the arguments.
+        int arg_count = args->length();
+        for (int i = 0; i < arg_count; i++) {
+          Load(args->at(i));
+          frame_->SpillTop();
+        }
+
+        // Load the name of the function.
+        Load(property->key());
+
+        // Call the IC initialization code.
+        CodeForSourcePosition(node->position());
+        Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET,
+                                                arg_count,
+                                                loop_nesting());
+        frame_->RestoreContextRegister();
+        frame_->Push(&result);
       }
-
-      // Call the function.
-      CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position());
     }
-
   } else {
     // ----------------------------------
     // JavaScript example: 'foo(1, 2, 3)'  // foo is not global
@@ -4423,7 +4389,7 @@
   // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
   __ addsd(xmm2, xmm3);
   // xmm2 now has 0.5.
-  __ comisd(xmm2, xmm1);
+  __ ucomisd(xmm2, xmm1);
   call_runtime.Branch(not_equal);
 
   // Calculates square root.
@@ -4763,8 +4729,8 @@
   __ cmpq(ArrayElement(cache_, dst_), key_);
   __ j(not_equal, &first_loop);
 
-  __ Integer32ToSmi(scratch_, dst_);
-  __ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_);
+  __ Integer32ToSmiField(
+      FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
   __ movq(dst_, ArrayElement(cache_, dst_, 1));
   __ jmp(exit_label());
 
@@ -4785,16 +4751,15 @@
   __ cmpq(ArrayElement(cache_, dst_), key_);
   __ j(not_equal, &second_loop);
 
-  __ Integer32ToSmi(scratch_, dst_);
-  __ movq(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), scratch_);
+  __ Integer32ToSmiField(
+      FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
   __ movq(dst_, ArrayElement(cache_, dst_, 1));
   __ jmp(exit_label());
 
   __ bind(&cache_miss);
   __ push(cache_);  // store a reference to cache
   __ push(key_);  // store a key
-  Handle<Object> receiver(Top::global_context()->global());
-  __ Push(receiver);
+  __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
   __ push(key_);
   // On x64 function must be in rdi.
   __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset));
@@ -4809,50 +4774,50 @@
   // cache miss this optimization would hardly matter much.
 
   // Check if we could add new entry to cache.
-  __ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
-  __ SmiCompare(rbx, r9);
+  __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ SmiToInteger32(r9,
+                    FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
+  __ cmpl(rbx, r9);
   __ j(greater, &add_new_entry);
 
   // Check if we could evict entry after finger.
-  __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
-  __ SmiToInteger32(rdx, rdx);
-  __ SmiToInteger32(rbx, rbx);
-  __ addq(rdx, kEntrySizeImm);
+  __ SmiToInteger32(rdx,
+                    FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
+  __ addl(rdx, kEntrySizeImm);
   Label forward;
-  __ cmpq(rbx, rdx);
+  __ cmpl(rbx, rdx);
   __ j(greater, &forward);
   // Need to wrap over the cache.
   __ movl(rdx, kEntriesIndexImm);
   __ bind(&forward);
-  __ Integer32ToSmi(r9, rdx);
+  __ movl(r9, rdx);
   __ jmp(&update_cache);
 
   __ bind(&add_new_entry);
-  // r9 holds cache size as smi.
-  __ SmiToInteger32(rdx, r9);
-  __ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
-  __ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
+  // r9 holds cache size as int32.
+  __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize));
+  __ Integer32ToSmiField(
+      FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
 
   // Update the cache itself.
-  // rdx holds the index as int.
-  // r9 holds the index as smi.
+  // r9 holds the index as int32.
   __ bind(&update_cache);
   __ pop(rbx);  // restore the key
-  __ movq(FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
+  __ Integer32ToSmiField(
+      FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9);
   // Store key.
-  __ movq(ArrayElement(rcx, rdx), rbx);
+  __ movq(ArrayElement(rcx, r9), rbx);
   __ RecordWrite(rcx, 0, rbx, r9);
 
   // Store value.
   __ pop(rcx);  // restore the cache.
-  __ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
-  __ SmiAddConstant(rdx, rdx, Smi::FromInt(1));
-  __ movq(r9, rdx);
-  __ SmiToInteger32(rdx, rdx);
+  __ SmiToInteger32(rdx,
+                    FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
+  __ incl(rdx);
+  // Backup rax, because the RecordWrite macro clobbers its arguments.
   __ movq(rbx, rax);
-  __ movq(ArrayElement(rcx, rdx), rbx);
-  __ RecordWrite(rcx, 0, rbx, r9);
+  __ movq(ArrayElement(rcx, rdx), rax);
+  __ RecordWrite(rcx, 0, rbx, rdx);
 
   if (!dst_.is(rax)) {
     __ movq(dst_, rax);
@@ -6507,7 +6472,7 @@
                         &not_numbers);
   LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
                         &not_numbers);
-  __ comisd(xmm0, xmm1);
+  __ ucomisd(xmm0, xmm1);
   // Bail out if a NaN is involved.
   not_numbers.Branch(parity_even, left_side, right_side);
 
@@ -8178,7 +8143,7 @@
   // ST[0] == double value
   // rbx = bits of double value.
   // rdx = also bits of double value.
-  // Compute hash (h is 32 bits, bits are 64):
+  // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
   //   h = h0 = bits ^ (bits >> 32);
   //   h ^= h >> 16;
   //   h ^= h >> 8;
@@ -8189,9 +8154,9 @@
   __ movl(rcx, rdx);
   __ movl(rax, rdx);
   __ movl(rdi, rdx);
-  __ shrl(rdx, Immediate(8));
-  __ shrl(rcx, Immediate(16));
-  __ shrl(rax, Immediate(24));
+  __ sarl(rdx, Immediate(8));
+  __ sarl(rcx, Immediate(16));
+  __ sarl(rax, Immediate(24));
   __ xorl(rcx, rdx);
   __ xorl(rax, rdi);
   __ xorl(rcx, rax);
@@ -8293,7 +8258,7 @@
   // Move exponent and sign bits to low bits.
   __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
   // Remove sign bit.
-  __ andl(rdi, Immediate((1 << HeapNumber::KExponentBits) - 1));
+  __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
   int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
   __ cmpl(rdi, Immediate(supported_exponent_limit));
   __ j(below, &in_range);
@@ -8370,7 +8335,7 @@
   // Double to remove sign bit, shift exponent down to least significant bits.
   // and subtract bias to get the unshifted, unbiased exponent.
   __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
-  __ shr(double_exponent, Immediate(64 - HeapNumber::KExponentBits));
+  __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
   __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
   // Check whether the exponent is too big for a 63 bit unsigned integer.
   __ cmpl(double_exponent, Immediate(63));
@@ -8546,18 +8511,18 @@
 
   // rcx: RegExp data (FixedArray)
   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
-  __ movq(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
-  __ SmiCompare(rbx, Smi::FromInt(JSRegExp::IRREGEXP));
+  __ SmiToInteger32(rbx, FieldOperand(rcx, JSRegExp::kDataTagOffset));
+  __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
   __ j(not_equal, &runtime);
 
   // rcx: RegExp data (FixedArray)
   // Check that the number of captures fit in the static offsets vector buffer.
-  __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+  __ SmiToInteger32(rdx,
+                    FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
   // Calculate number of capture registers (number_of_captures + 1) * 2.
-  __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
-  __ addq(rdx, Immediate(2));  // rdx was number_of_captures * 2.
+  __ leal(rdx, Operand(rdx, rdx, times_1, 2));
   // Check that the static offsets vector buffer is large enough.
-  __ cmpq(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
+  __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
   __ j(above, &runtime);
 
   // rcx: RegExp data (FixedArray)
@@ -8567,17 +8532,15 @@
   __ JumpIfSmi(rax, &runtime);
   Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
   __ j(NegateCondition(is_string), &runtime);
-  // Get the length of the string to rbx.
-  __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
 
-  // rbx: Length of subject string as smi
-  // rcx: RegExp data (FixedArray)
-  // rdx: Number of capture registers
+  // rax: Subject string.
+  // rcx: RegExp data (FixedArray).
+  // rdx: Number of capture registers.
   // Check that the third argument is a positive smi less than the string
   // length. A negative value will be greater (unsigned comparison).
-  __ movq(rax, Operand(rsp, kPreviousIndexOffset));
-  __ JumpIfNotSmi(rax, &runtime);
-  __ SmiCompare(rax, rbx);
+  __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+  __ JumpIfNotSmi(rbx, &runtime);
+  __ SmiCompare(rbx, FieldOperand(rax, String::kLengthOffset));
   __ j(above_equal, &runtime);
 
   // rcx: RegExp data (FixedArray)
@@ -8595,65 +8558,63 @@
   // Check that the last match info has space for the capture registers and the
   // additional information. Ensure no overflow in add.
   ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
-  __ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
-  __ SmiToInteger32(rax, rax);
+  __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
   __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
   __ cmpl(rdx, rax);
   __ j(greater, &runtime);
 
-  // ecx: RegExp data (FixedArray)
+  // rcx: RegExp data (FixedArray)
   // Check the representation and encoding of the subject string.
-  Label seq_string, seq_two_byte_string, check_code;
-  const int kStringRepresentationEncodingMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  Label seq_ascii_string, seq_two_byte_string, check_code;
   __ movq(rax, Operand(rsp, kSubjectOffset));
   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-  __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
-  // First check for sequential string.
-  ASSERT_EQ(0, kStringTag);
-  ASSERT_EQ(0, kSeqStringTag);
+  // First check for flat two byte string.
+  __ andb(rbx, Immediate(
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
+  ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
+  __ j(zero, &seq_two_byte_string);
+  // Any other flat string must be a flat ascii string.
   __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
-  __ j(zero, &seq_string);
+  __ j(zero, &seq_ascii_string);
 
   // Check for flat cons string.
   // A flat cons string is a cons string where the second part is the empty
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  __ andb(rbx, Immediate(kStringRepresentationMask));
-  __ cmpb(rbx, Immediate(kConsStringTag));
-  __ j(not_equal, &runtime);
+  ASSERT(kExternalStringTag !=0);
+  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
+  __ j(not_zero, &runtime);
+  // String is a cons string.
   __ movq(rdx, FieldOperand(rax, ConsString::kSecondOffset));
   __ Cmp(rdx, Factory::empty_string());
   __ j(not_equal, &runtime);
   __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
-  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
-  ASSERT_EQ(0, kSeqStringTag);
-  __ testb(rbx, Immediate(kStringRepresentationMask));
+  // String is a cons string with empty second part.
+  // eax: first part of cons string.
+  // ebx: map of first part of cons string.
+  // Is first part a flat two byte string?
+  __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+           Immediate(kStringRepresentationMask | kStringEncodingMask));
+  ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
+  __ j(zero, &seq_two_byte_string);
+  // Any other flat string must be ascii.
+  __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
+           Immediate(kStringRepresentationMask));
   __ j(not_zero, &runtime);
-  __ andb(rbx, Immediate(kStringRepresentationEncodingMask));
 
-  __ bind(&seq_string);
-  // rax: subject string (sequential either ascii to two byte)
-  // rbx: suject string type & kStringRepresentationEncodingMask
+  __ bind(&seq_ascii_string);
+  // rax: subject string (sequential ascii)
   // rcx: RegExp data (FixedArray)
-  // Check that the irregexp code has been generated for an ascii string. If
-  // it has, the field contains a code object otherwise it contains the hole.
-  const int kSeqTwoByteString = kStringTag | kSeqStringTag | kTwoByteStringTag;
-  __ cmpb(rbx, Immediate(kSeqTwoByteString));
-  __ j(equal, &seq_two_byte_string);
-  if (FLAG_debug_code) {
-    __ cmpb(rbx, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
-    __ Check(equal, "Expected sequential ascii string");
-  }
   __ movq(r12, FieldOperand(rcx, JSRegExp::kDataAsciiCodeOffset));
   __ Set(rdi, 1);  // Type is ascii.
   __ jmp(&check_code);
 
   __ bind(&seq_two_byte_string);
-  // rax: subject string
+  // rax: subject string (flat two-byte)
   // rcx: RegExp data (FixedArray)
   __ movq(r12, FieldOperand(rcx, JSRegExp::kDataUC16CodeOffset));
   __ Set(rdi, 0);  // Type is two byte.
@@ -8670,8 +8631,7 @@
   // r12: code
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
-  __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
-  __ SmiToInteger64(rbx, rbx);  // Previous index from smi.
+  __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
 
   // rax: subject string
   // rbx: previous index
@@ -8783,10 +8743,10 @@
   __ bind(&success);
   __ movq(rax, Operand(rsp, kJSRegExpOffset));
   __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
-  __ movq(rdx, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
+  __ SmiToInteger32(rax,
+                    FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
   // Calculate number of capture registers (number_of_captures + 1) * 2.
-  __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rdx, 1);
-  __ addq(rdx, Immediate(2));  // rdx was number_of_captures * 2.
+  __ leal(rdx, Operand(rax, rax, times_1, 2));
 
   // rdx: Number of capture registers
   // Load last_match_info which is still known to be a fast case JSArray.
@@ -8829,7 +8789,7 @@
                        rdx,
                        times_pointer_size,
                        RegExpImpl::kFirstCaptureOffset),
-                       rdi);
+          rdi);
   __ jmp(&next_capture);
   __ bind(&done);
 
@@ -8873,9 +8833,9 @@
 
   // Make the hash mask from the length of the number string cache. It
   // contains two elements (number and string) for each cache entry.
-  __ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
-  // Divide smi tagged length by two.
-  __ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1);
+  __ SmiToInteger32(
+      mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+  __ shrl(mask, Immediate(1));
   __ subq(mask, Immediate(1));  // Make mask.
 
   // Calculate the entry in the number string cache. The hash value in the
@@ -8905,15 +8865,14 @@
     CpuFeatures::Scope fscope(SSE2);
     __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
     __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
-    __ comisd(xmm0, xmm1);
+    __ ucomisd(xmm0, xmm1);
     __ j(parity_even, not_found);  // Bail out if NaN is involved.
     __ j(not_equal, not_found);  // The cache did not contain this value.
     __ jmp(&load_result_from_cache);
   }
 
   __ bind(&is_smi);
-  __ movq(scratch, object);
-  __ SmiToInteger32(scratch, scratch);
+  __ SmiToInteger32(scratch, object);
   GenerateConvertHashCodeToIndex(masm, scratch, mask);
 
   Register index = scratch;
@@ -9107,12 +9066,8 @@
   if (include_number_compare_) {
     Label non_number_comparison;
     Label unordered;
-    FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0,
-                                          &non_number_comparison);
-    FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1,
-                                          &non_number_comparison);
-
-    __ comisd(xmm0, xmm1);
+    FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+    __ ucomisd(xmm0, xmm1);
 
     // Don't base result on EFLAGS when a NaN is involved.
     __ j(parity_even, &unordered);
@@ -9340,29 +9295,30 @@
   __ j(equal, &adaptor_frame);
 
   // Get the length from the frame.
-  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+  __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
   __ jmp(&try_allocate);
 
   // Patch the arguments.length and the parameters pointer.
   __ bind(&adaptor_frame);
-  __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
+  __ SmiToInteger32(rcx,
+                    Operand(rdx,
+                            ArgumentsAdaptorFrameConstants::kLengthOffset));
+  // Space on stack must already hold a smi.
+  __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
   // Do not clobber the length index for the indexing operation since
   // it is used compute the size for allocation later.
-  SmiIndex index = masm->SmiToIndex(rbx, rcx, kPointerSizeLog2);
-  __ lea(rdx, Operand(rdx, index.reg, index.scale, kDisplacement));
+  __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
   __ movq(Operand(rsp, 2 * kPointerSize), rdx);
 
   // Try the new space allocation. Start out with computing the size of
   // the arguments object and the elements array.
   Label add_arguments_object;
   __ bind(&try_allocate);
-  __ testq(rcx, rcx);
+  __ testl(rcx, rcx);
   __ j(zero, &add_arguments_object);
-  index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
-  __ lea(rcx, Operand(index.reg, index.scale, FixedArray::kHeaderSize));
+  __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
   __ bind(&add_arguments_object);
-  __ addq(rcx, Immediate(Heap::kArgumentsObjectSize));
+  __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
 
   // Do the allocation of both objects in one go.
   __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
@@ -9374,10 +9330,13 @@
   __ movq(rdi, Operand(rdi, offset));
 
   // Copy the JS object part.
-  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
-    __ movq(kScratchRegister, FieldOperand(rdi, i));
-    __ movq(FieldOperand(rax, i), kScratchRegister);
-  }
+  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+  __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
+  __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
+  __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
+  __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
+  __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
+  __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
 
   // Setup the callee in-object property.
   ASSERT(Heap::arguments_callee_index == 0);
@@ -9391,7 +9350,7 @@
 
   // If there are no actual arguments, we're done.
   Label done;
-  __ testq(rcx, rcx);
+  __ SmiTest(rcx);
   __ j(zero, &done);
 
   // Get the parameters pointer from the stack and untag the length.
@@ -9413,7 +9372,7 @@
   __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
   __ addq(rdi, Immediate(kPointerSize));
   __ subq(rdx, Immediate(kPointerSize));
-  __ decq(rcx);
+  __ decl(rcx);
   __ j(not_zero, &loop);
 
   // Return and remove the on-stack parameters.
@@ -9964,86 +9923,73 @@
 }
 
 
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
-                                           Register number) {
-  Label load_smi, done;
-
-  __ JumpIfSmi(number, &load_smi);
-  __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
-  __ jmp(&done);
-
-  __ bind(&load_smi);
-  __ SmiToInteger32(number, number);
-  __ push(number);
-  __ fild_s(Operand(rsp, 0));
-  __ pop(number);
-
-  __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
-                                           Register src,
-                                           XMMRegister dst) {
-  Label load_smi, done;
-
-  __ JumpIfSmi(src, &load_smi);
-  __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
-  __ jmp(&done);
-
-  __ bind(&load_smi);
-  __ SmiToInteger32(src, src);
-  __ cvtlsi2sd(dst, src);
-
-  __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
-                                           Register src,
-                                           XMMRegister dst,
-                                           Label* not_number) {
-  Label load_smi, done;
-  ASSERT(!src.is(kScratchRegister));
-  __ JumpIfSmi(src, &load_smi);
-  __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
-  __ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister);
-  __ j(not_equal, not_number);
-  __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
-  __ jmp(&done);
-
-  __ bind(&load_smi);
-  __ SmiToInteger32(kScratchRegister, src);
-  __ cvtlsi2sd(dst, kScratchRegister);
-
-  __ bind(&done);
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
-                                            XMMRegister dst1,
-                                            XMMRegister dst2) {
-  __ movq(kScratchRegister, rdx);
-  LoadFloatOperand(masm, kScratchRegister, dst1);
-  __ movq(kScratchRegister, rax);
-  LoadFloatOperand(masm, kScratchRegister, dst2);
-}
-
-
-void FloatingPointHelper::LoadFloatOperandsFromSmis(MacroAssembler* masm,
-                                                    XMMRegister dst1,
-                                                    XMMRegister dst2) {
+void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
   __ SmiToInteger32(kScratchRegister, rdx);
-  __ cvtlsi2sd(dst1, kScratchRegister);
+  __ cvtlsi2sd(xmm0, kScratchRegister);
   __ SmiToInteger32(kScratchRegister, rax);
-  __ cvtlsi2sd(dst2, kScratchRegister);
+  __ cvtlsi2sd(xmm1, kScratchRegister);
+}
+
+
+void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
+  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
+  // Load operand in rdx into xmm0.
+  __ JumpIfSmi(rdx, &load_smi_rdx);
+  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+  // Load operand in rax into xmm1.
+  __ JumpIfSmi(rax, &load_smi_rax);
+  __ bind(&load_nonsmi_rax);
+  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi_rdx);
+  __ SmiToInteger32(kScratchRegister, rdx);
+  __ cvtlsi2sd(xmm0, kScratchRegister);
+  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+  __ bind(&load_smi_rax);
+  __ SmiToInteger32(kScratchRegister, rax);
+  __ cvtlsi2sd(xmm1, kScratchRegister);
+
+  __ bind(&done);
+}
+
+
+void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
+                                                  Label* not_numbers) {
+  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
+  // Load operand in rdx into xmm0, or branch to not_numbers.
+  __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
+  __ JumpIfSmi(rdx, &load_smi_rdx);
+  __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+  __ j(not_equal, not_numbers);  // Argument in rdx is not a number.
+  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+  // Load operand in rax into xmm1, or branch to not_numbers.
+  __ JumpIfSmi(rax, &load_smi_rax);
+
+  __ bind(&load_nonsmi_rax);
+  __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+  __ j(not_equal, not_numbers);
+  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+  __ jmp(&done);
+
+  __ bind(&load_smi_rdx);
+  __ SmiToInteger32(kScratchRegister, rdx);
+  __ cvtlsi2sd(xmm0, kScratchRegister);
+  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
+
+  __ bind(&load_smi_rax);
+  __ SmiToInteger32(kScratchRegister, rax);
+  __ cvtlsi2sd(xmm1, kScratchRegister);
+  __ bind(&done);
 }
 
 
 // Input: rdx, rax are the left and right objects of a bit op.
 // Output: rax, rcx are left and right integers for a bit op.
 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
-                                         Label* conversion_failure) {
+                                         Label* conversion_failure,
+                                         Register heap_number_map) {
   // Check float operands.
   Label arg1_is_object, check_undefined_arg1;
   Label arg2_is_object, check_undefined_arg2;
@@ -10061,8 +10007,7 @@
   __ jmp(&load_arg2);
 
   __ bind(&arg1_is_object);
-  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-  __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+  __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
   __ j(not_equal, &check_undefined_arg1);
   // Get the untagged integer version of the edx heap number in rcx.
   IntegerConvert(masm, rdx, rdx);
@@ -10083,8 +10028,7 @@
   __ jmp(&done);
 
   __ bind(&arg2_is_object);
-  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
-  __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+  __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
   __ j(not_equal, &check_undefined_arg2);
   // Get the untagged integer version of the eax heap number in ecx.
   IntegerConvert(masm, rcx, rax);
@@ -10093,51 +10037,35 @@
 }
 
 
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
-                                            Register lhs,
-                                            Register rhs) {
-  Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
-  __ JumpIfSmi(lhs, &load_smi_lhs);
-  __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
-  __ bind(&done_load_lhs);
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
+  if (FLAG_debug_code) {
+    // Both arguments can not be smis. That case is handled by smi-only code.
+    Label ok;
+    __ JumpIfNotBothSmi(rax, rdx, &ok);
+    __ Abort("Both arguments smi but not handled by smi-code.");
+    __ bind(&ok);
+  }
+  // Check float operands.
+  Label done;
+  Label rax_is_object;
+  Label rdx_is_object;
 
-  __ JumpIfSmi(rhs, &load_smi_rhs);
-  __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
+  __ JumpIfNotSmi(rdx, &rdx_is_object);
+  __ SmiToInteger32(rdx, rdx);
+
+  __ bind(&rax_is_object);
+  IntegerConvert(masm, rcx, rax);  // Uses rdi, rcx and rbx.
   __ jmp(&done);
 
-  __ bind(&load_smi_lhs);
-  __ SmiToInteger64(kScratchRegister, lhs);
-  __ push(kScratchRegister);
-  __ fild_d(Operand(rsp, 0));
-  __ pop(kScratchRegister);
-  __ jmp(&done_load_lhs);
-
-  __ bind(&load_smi_rhs);
-  __ SmiToInteger64(kScratchRegister, rhs);
-  __ push(kScratchRegister);
-  __ fild_d(Operand(rsp, 0));
-  __ pop(kScratchRegister);
+  __ bind(&rdx_is_object);
+  IntegerConvert(masm, rdx, rdx);  // Uses rdi, rcx and rbx.
+  __ JumpIfNotSmi(rax, &rax_is_object);
+  __ SmiToInteger32(rcx, rax);
 
   __ bind(&done);
-}
-
-
-void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
-                                              Label* non_float) {
-  Label test_other, done;
-  // Test if both operands are numbers (heap_numbers or smis).
-  // If not, jump to label non_float.
-  __ JumpIfSmi(rdx, &test_other);  // argument in rdx is OK
-  __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
-  __ j(not_equal, non_float);  // The argument in rdx is not a number.
-
-  __ bind(&test_other);
-  __ JumpIfSmi(rax, &done);  // argument in rax is OK
-  __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
-  __ j(not_equal, non_float);  // The argument in rax is not a number.
-
-  // Fall-through: Both operands are numbers.
-  __ bind(&done);
+  __ movl(rax, rdx);
 }
 
 
@@ -10447,15 +10375,15 @@
       }
       // left is rdx, right is rax.
       __ AllocateHeapNumber(rbx, rcx, slow);
-      FloatingPointHelper::LoadFloatOperandsFromSmis(masm, xmm4, xmm5);
+      FloatingPointHelper::LoadSSE2SmiOperands(masm);
       switch (op_) {
-        case Token::ADD: __ addsd(xmm4, xmm5); break;
-        case Token::SUB: __ subsd(xmm4, xmm5); break;
-        case Token::MUL: __ mulsd(xmm4, xmm5); break;
-        case Token::DIV: __ divsd(xmm4, xmm5); break;
+        case Token::ADD: __ addsd(xmm0, xmm1); break;
+        case Token::SUB: __ subsd(xmm0, xmm1); break;
+        case Token::MUL: __ mulsd(xmm0, xmm1); break;
+        case Token::DIV: __ divsd(xmm0, xmm1); break;
         default: UNREACHABLE();
       }
-      __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm4);
+      __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
       __ movq(rax, rbx);
       GenerateReturn(masm);
     }
@@ -10518,22 +10446,23 @@
         Label not_floats;
         // rax: y
         // rdx: x
-      if (static_operands_type_.IsNumber() && FLAG_debug_code) {
-        // Assert at runtime that inputs are only numbers.
-        __ AbortIfNotNumber(rdx);
-        __ AbortIfNotNumber(rax);
-      } else {
-        FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
-      }
-        // Fast-case: Both operands are numbers.
-        // xmm4 and xmm5 are volatile XMM registers.
-        FloatingPointHelper::LoadFloatOperands(masm, xmm4, xmm5);
+        ASSERT(!static_operands_type_.IsSmi());
+        if (static_operands_type_.IsNumber()) {
+          if (FLAG_debug_code) {
+            // Assert at runtime that inputs are only numbers.
+            __ AbortIfNotNumber(rdx);
+            __ AbortIfNotNumber(rax);
+          }
+          FloatingPointHelper::LoadSSE2NumberOperands(masm);
+        } else {
+          FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
+        }
 
         switch (op_) {
-          case Token::ADD: __ addsd(xmm4, xmm5); break;
-          case Token::SUB: __ subsd(xmm4, xmm5); break;
-          case Token::MUL: __ mulsd(xmm4, xmm5); break;
-          case Token::DIV: __ divsd(xmm4, xmm5); break;
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
           default: UNREACHABLE();
         }
         // Allocate a heap number, if needed.
@@ -10568,7 +10497,7 @@
             break;
           default: UNREACHABLE();
         }
-        __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
+        __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
         GenerateReturn(masm);
         __ bind(&not_floats);
         if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
@@ -10593,34 +10522,52 @@
       case Token::SAR:
       case Token::SHL:
       case Token::SHR: {
-        Label skip_allocation, non_smi_result;
-        FloatingPointHelper::LoadAsIntegers(masm, &call_runtime);
+        Label skip_allocation, non_smi_shr_result;
+        Register heap_number_map = r9;
+        __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+        if (static_operands_type_.IsNumber()) {
+          if (FLAG_debug_code) {
+            // Assert at runtime that inputs are only numbers.
+            __ AbortIfNotNumber(rdx);
+            __ AbortIfNotNumber(rax);
+          }
+          FloatingPointHelper::LoadNumbersAsIntegers(masm);
+        } else {
+          FloatingPointHelper::LoadAsIntegers(masm,
+                                              &call_runtime,
+                                              heap_number_map);
+        }
         switch (op_) {
           case Token::BIT_OR:  __ orl(rax, rcx); break;
           case Token::BIT_AND: __ andl(rax, rcx); break;
           case Token::BIT_XOR: __ xorl(rax, rcx); break;
           case Token::SAR: __ sarl_cl(rax); break;
           case Token::SHL: __ shll_cl(rax); break;
-          case Token::SHR: __ shrl_cl(rax); break;
+          case Token::SHR: {
+            __ shrl_cl(rax);
+            // Check if result is negative. This can only happen for a shift
+            // by zero.
+            __ testl(rax, rax);
+            __ j(negative, &non_smi_shr_result);
+            break;
+          }
           default: UNREACHABLE();
         }
-        if (op_ == Token::SHR) {
-          // Check if result is negative. This can only happen for a shift
-          // by zero, which also doesn't update the sign flag.
-          __ testl(rax, rax);
-          __ j(negative, &non_smi_result);
-        }
-        __ JumpIfNotValidSmiValue(rax, &non_smi_result);
-        // Tag smi result, if possible, and return.
+
+        STATIC_ASSERT(kSmiValueSize == 32);
+        // Tag smi result and return.
         __ Integer32ToSmi(rax, rax);
         GenerateReturn(masm);
 
-        // All ops except SHR return a signed int32 that we load in
-        // a HeapNumber.
-        if (op_ != Token::SHR && non_smi_result.is_linked()) {
-          __ bind(&non_smi_result);
+        // All bit-ops except SHR return a signed int32 that can be
+        // returned immediately as a smi.
+        // We might need to allocate a HeapNumber if we shift a negative
+        // number right by zero (i.e., convert to UInt32).
+        if (op_ == Token::SHR) {
+          ASSERT(non_smi_shr_result.is_linked());
+          __ bind(&non_smi_shr_result);
           // Allocate a heap number if needed.
-          __ movsxlq(rbx, rax);  // rbx: sign extended 32-bit result
+          __ movl(rbx, rax);  // rbx holds result value (uint32 value as int64).
           switch (mode_) {
             case OVERWRITE_LEFT:
             case OVERWRITE_RIGHT:
@@ -10631,22 +10578,33 @@
               __ JumpIfNotSmi(rax, &skip_allocation);
               // Fall through!
             case NO_OVERWRITE:
-              __ AllocateHeapNumber(rax, rcx, &call_runtime);
+              // Allocate heap number in new space.
+              // Not using AllocateHeapNumber macro in order to reuse
+              // already loaded heap_number_map.
+              __ AllocateInNewSpace(HeapNumber::kSize,
+                                    rax,
+                                    rcx,
+                                    no_reg,
+                                    &call_runtime,
+                                    TAG_OBJECT);
+              // Set the map.
+              if (FLAG_debug_code) {
+                __ AbortIfNotRootValue(heap_number_map,
+                                       Heap::kHeapNumberMapRootIndex,
+                                       "HeapNumberMap register clobbered.");
+              }
+              __ movq(FieldOperand(rax, HeapObject::kMapOffset),
+                      heap_number_map);
               __ bind(&skip_allocation);
               break;
             default: UNREACHABLE();
           }
           // Store the result in the HeapNumber and return.
-          __ movq(Operand(rsp, 1 * kPointerSize), rbx);
-          __ fild_s(Operand(rsp, 1 * kPointerSize));
-          __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+          __ cvtqsi2sd(xmm0, rbx);
+          __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
           GenerateReturn(masm);
         }
 
-        // SHR should return uint32 - go to runtime for non-smi/negative result.
-        if (op_ == Token::SHR) {
-          __ bind(&non_smi_result);
-        }
         break;
       }
       default: UNREACHABLE(); break;
@@ -10679,7 +10637,7 @@
       Label not_strings, both_strings, not_string1, string1, string1_smi2;
 
       // If this stub has already generated FP-specific code then the arguments
-      // are already in rdx, rax
+      // are already in rdx and rax.
       if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
         GenerateLoadArguments(masm);
       }
@@ -10828,19 +10786,13 @@
   __ push(rax);
 
   // Push this stub's key.
-  __ movq(rax, Immediate(MinorKey()));
-  __ Integer32ToSmi(rax, rax);
-  __ push(rax);
+  __ Push(Smi::FromInt(MinorKey()));
 
   // Although the operation and the type info are encoded into the key,
   // the encoding is opaque, so push them too.
-  __ movq(rax, Immediate(op_));
-  __ Integer32ToSmi(rax, rax);
-  __ push(rax);
+  __ Push(Smi::FromInt(op_));
 
-  __ movq(rax, Immediate(runtime_operands_type_));
-  __ Integer32ToSmi(rax, rax);
-  __ push(rax);
+  __ Push(Smi::FromInt(runtime_operands_type_));
 
   __ push(rcx);
 
@@ -11208,16 +11160,17 @@
   // If result is not supposed to be flat, allocate a cons string object. If
   // both strings are ascii the result is an ascii cons string.
   // rax: first string
-  // ebx: length of resulting flat string
+  // rbx: length of resulting flat string
   // rdx: second string
   // r8: instance type of first string
   // r9: instance type of second string
-  Label non_ascii, allocated;
+  Label non_ascii, allocated, ascii_data;
   __ movl(rcx, r8);
   __ and_(rcx, r9);
   ASSERT(kStringEncodingMask == kAsciiStringTag);
   __ testl(rcx, Immediate(kAsciiStringTag));
   __ j(zero, &non_ascii);
+  __ bind(&ascii_data);
   // Allocate an acsii cons string.
   __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
   __ bind(&allocated);
@@ -11231,6 +11184,18 @@
   __ IncrementCounter(&Counters::string_add_native, 1);
   __ ret(2 * kPointerSize);
   __ bind(&non_ascii);
+  // At least one of the strings is two-byte. Check whether it happens
+  // to contain only ascii characters.
+  // rcx: first instance type AND second instance type.
+  // r8: first instance type.
+  // r9: second instance type.
+  __ testb(rcx, Immediate(kAsciiDataHintMask));
+  __ j(not_zero, &ascii_data);
+  __ xor_(r8, r9);
+  ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+  __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+  __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+  __ j(equal, &ascii_data);
   // Allocate a two byte cons string.
   __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
   __ jmp(&allocated);
@@ -11238,7 +11203,7 @@
   // Handle creating a flat result. First check that both strings are not
   // external strings.
   // rax: first string
-  // ebx: length of resulting flat string as smi
+  // rbx: length of resulting flat string as smi
   // rdx: second string
   // r8: instance type of first string
   // r9: instance type of first string
@@ -11254,7 +11219,7 @@
   __ j(equal, &string_add_runtime);
   // Now check if both strings are ascii strings.
   // rax: first string
-  // ebx: length of resulting flat string
+  // rbx: length of resulting flat string
   // rdx: second string
   // r8: instance type of first string
   // r9: instance type of second string
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index d99ea84..1df1de3 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1729,6 +1729,30 @@
 }
 
 
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+                                            Expression* key,
+                                            RelocInfo::Mode mode) {
+  // Code common for calls using the IC.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForValue(args->at(i), kStack);
+  }
+  VisitForValue(key, kAccumulator);
+  __ movq(rcx, rax);
+  // Record source position for debugger.
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic = CodeGenerator::ComputeKeyedCallInitialize(arg_count,
+                                                              in_loop);
+  __ Call(ic, mode);
+  // Restore context register.
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  Apply(context_, rax);
+}
+
+
 void FullCodeGenerator::EmitCallWithStub(Call* expr) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
@@ -1820,30 +1844,32 @@
       VisitForValue(prop->obj(), kStack);
       EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
     } else {
-      // Call to a keyed property, use keyed load IC followed by function
-      // call.
+      // Call to a keyed property.
+      // For a synthetic property use keyed load IC followed by function call,
+      // for a regular property use KeyedCallIC.
       VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kAccumulator);
-      __ movq(rdx, Operand(rsp, 0));
-      // Record source code position for IC call.
-      SetSourcePosition(prop->position());
-      Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-      __ call(ic, RelocInfo::CODE_TARGET);
-      // By emitting a nop we make sure that we do not have a "test rax,..."
-      // instruction after the call it is treated specially by the LoadIC code.
-      __ nop();
-      // Pop receiver.
-      __ pop(rbx);
-      // Push result (function).
-      __ push(rax);
-      // Push receiver object on stack.
       if (prop->is_synthetic()) {
+        VisitForValue(prop->key(), kAccumulator);
+        __ movq(rdx, Operand(rsp, 0));
+        // Record source code position for IC call.
+        SetSourcePosition(prop->position());
+        Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+        __ call(ic, RelocInfo::CODE_TARGET);
+        // By emitting a nop we make sure that we do not have a "test rax,..."
+        // instruction after the call as it is treated specially
+        // by the LoadIC code.
+        __ nop();
+        // Pop receiver.
+        __ pop(rbx);
+        // Push result (function).
+        __ push(rax);
+        // Push receiver object on stack.
         __ movq(rcx, CodeGenerator::GlobalObject());
         __ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+        EmitCallWithStub(expr);
       } else {
-        __ push(rbx);
+        EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
       }
-      EmitCallWithStub(expr);
     }
   } else {
     // Call to some other expression.  If the expression is an anonymous
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 89c21cb..6e77c89 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -57,19 +57,21 @@
                                    Register r2,
                                    Register name,
                                    Register r4,
+                                   Register result,
                                    DictionaryCheck check_dictionary) {
   // Register use:
   //
-  // r0   - used to hold the property dictionary.
+  // r0   - used to hold the property dictionary and is unchanged.
   //
-  // r1   - initially the receiver.
-  //      - unchanged on any jump to miss_label.
-  //      - holds the result on exit.
+  // r1   - used to hold the receiver and is unchanged.
   //
   // r2   - used to hold the capacity of the property dictionary.
   //
   // name - holds the name of the property and is unchanged.
+  //
   // r4   - used to hold the index into the property dictionary.
+  //
+  // result - holds the result on exit if the load succeeded.
 
   Label done;
 
@@ -148,7 +150,7 @@
 
   // Get the value at the masked, scaled index.
   const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ movq(r1,
+  __ movq(result,
           Operand(r0, r4, times_pointer_size, kValueOffset - kHeapObjectTag));
 }
 
@@ -159,14 +161,15 @@
                                          Register key,
                                          Register r0,
                                          Register r1,
-                                         Register r2) {
+                                         Register r2,
+                                         Register result) {
   // Register use:
   //
-  // elements - holds the slow-case elements of the receiver and is unchanged.
+  // elements - holds the slow-case elements of the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
   //
-  // key      - holds the smi key on entry and is unchanged if a branch is
-  //            performed to the miss label.
-  //            Holds the result on exit if the load succeeded.
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
   //
   // Scratch registers:
   //
@@ -175,6 +178,12 @@
   // r1 - used to hold the capacity mask of the dictionary
   //
   // r2 - used for the index into the dictionary.
+  //
+  // result - holds the result on exit if the load succeeded.
+  //          Allowed to be the same as 'key' or 'result'.
+  //          Unchanged on bailout so 'key' or 'result' can be used
+  //          in further computation.
+
   Label done;
 
   // Compute the hash code from the untagged key.  This must be kept in sync
@@ -246,7 +255,7 @@
   // Get the value at the masked, scaled index.
   const int kValueOffset =
       NumberDictionary::kElementsStartOffset + kPointerSize;
-  __ movq(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+  __ movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
 }
 
 
@@ -346,6 +355,142 @@
 }
 
 
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register map,
+                                           Label* slow) {
+  // Register use:
+  //   receiver - holds the receiver and is unchanged.
+  // Scratch registers:
+  //   map - used to hold the map of the receiver.
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing
+  // into string objects work as intended.
+  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
+  __ j(below, slow);
+
+  // Check bit field.
+  __ testb(FieldOperand(map, Map::kBitFieldOffset),
+           Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
+  __ j(not_zero, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+static void GenerateFastArrayLoad(MacroAssembler* masm,
+                                  Register receiver,
+                                  Register key,
+                                  Register elements,
+                                  Register scratch,
+                                  Register result,
+                                  Label* not_fast_array,
+                                  Label* out_of_range) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // elements - holds the elements of the receiver on exit.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the the same as 'receiver' or 'key'.
+  //            Unchanged on bailout so 'receiver' and 'key' can be safely
+  //            used by further computation.
+  //
+  // Scratch registers:
+  //
+  //   scratch - used to hold elements of the receiver and the loaded value.
+
+  __ movq(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, not_fast_array);
+  // Check that the key (index) is within bounds.
+  __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
+  // Unsigned comparison rejects negative indices.
+  __ j(above_equal, out_of_range);
+  // Fast case: Do the load.
+  SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
+  __ movq(scratch, FieldOperand(elements,
+                                index.reg,
+                                index.scale,
+                                FixedArray::kHeaderSize));
+  __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ j(equal, out_of_range);
+  if (!result.is(scratch)) {
+    __ movq(result, scratch);
+  }
+}
+
+
+// Checks whether a key is an array index string or a symbol string.
+// Falls through if the key is a symbol.
+static void GenerateKeyStringCheck(MacroAssembler* masm,
+                                   Register key,
+                                   Register map,
+                                   Register hash,
+                                   Label* index_string,
+                                   Label* not_symbol) {
+  // Register use:
+  //   key - holds the key and is unchanged. Assumed to be non-smi.
+  // Scratch registers:
+  //   map - used to hold the map of the key.
+  //   hash - used to hold the hash of the key.
+  __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
+  __ j(above_equal, not_symbol);
+  // Is the string an array index, with cached numeric value?
+  __ movl(hash, FieldOperand(key, String::kHashFieldOffset));
+  __ testl(hash, Immediate(String::kContainsCachedArrayIndexMask));
+  __ j(zero, index_string);  // The value in hash is used at jump target.
+
+  // Is the string a symbol?
+  ASSERT(kSymbolTag != 0);
+  __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
+           Immediate(kIsSymbolMask));
+  __ j(zero, not_symbol);
+}
+
+
+// Picks out an array index from the hash field.
+static void GenerateIndexFromHash(MacroAssembler* masm,
+                                  Register key,
+                                  Register hash) {
+  // Register use:
+  //   key - holds the overwritten key on exit.
+  //   hash - holds the key's hash. Clobbered.
+
+  // The assert checks that the constants for the maximum number of digits
+  // for an array index cached in the hash field and the number of bits
+  // reserved for it does not conflict.
+  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+         (1 << String::kArrayIndexValueBits));
+  // We want the smi-tagged index in key. Even if we subsequently go to
+  // the slow case, converting the key to a smi is always valid.
+  // key: string key
+  // hash: key's hash field, including its array index value.
+  __ and_(hash, Immediate(String::kArrayIndexValueMask));
+  __ shr(hash, Immediate(String::kHashShift));
+  // Here we actually clobber the key which will be used if calling into
+  // runtime later. However as the new key is the numeric value of a string key
+  // there is no difference in using either key.
+  __ Integer32ToSmi(key, hash);
+}
+
+
 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : key
@@ -355,46 +500,22 @@
   Label slow, check_string, index_smi, index_string;
   Label check_pixel_array, probe_dictionary, check_number_dictionary;
 
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(rdx, &slow);
-
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing
-  // into string objects work as intended.
-  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
-  __ j(below, &slow);
-
-  // Check bit field.
-  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
-           Immediate(kSlowCaseBitFieldMask));
-  __ j(not_zero, &slow);
+  GenerateKeyedLoadReceiverCheck(masm, rdx, rcx, &slow);
 
   // Check that the key is a smi.
   __ JumpIfNotSmi(rax, &check_string);
   __ bind(&index_smi);
   // Now the key is known to be a smi. This place is also jumped to from below
   // where a numeric string is converted to a smi.
-  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  // Check that the object is in fast mode (not dictionary).
-  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
-                 Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &check_pixel_array);
-  // Check that the key (index) is within bounds.
-  __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
-  __ j(above_equal, &slow);  // Unsigned comparison rejects negative indices.
-  // Fast case: Do the load.
-  SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
-  __ movq(rbx, FieldOperand(rcx,
-                            index.reg,
-                            index.scale,
-                            FixedArray::kHeaderSize));
-  __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to consult GetProperty
-  // to ensure the prototype chain is searched.
-  __ j(equal, &slow);
-  __ movq(rax, rbx);
+
+  GenerateFastArrayLoad(masm,
+                        rdx,
+                        rax,
+                        rcx,
+                        rbx,
+                        rax,
+                        &check_pixel_array,
+                        &slow);
   __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
   __ ret(0);
 
@@ -423,7 +544,7 @@
   __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
                  Heap::kHashTableMapRootIndex);
   __ j(not_equal, &slow);
-  GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi);
+  GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi, rax);
   __ ret(0);
 
   __ bind(&slow);
@@ -434,22 +555,7 @@
   GenerateRuntimeGetProperty(masm);
 
   __ bind(&check_string);
-  // The key is not a smi.
-  // Is it a string?
-  // rdx: receiver
-  // rax: key
-  __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rcx);
-  __ j(above_equal, &slow);
-  // Is the string an array index, with cached numeric value?
-  __ movl(rbx, FieldOperand(rax, String::kHashFieldOffset));
-  __ testl(rbx, Immediate(String::kContainsCachedArrayIndexMask));
-  __ j(zero, &index_string);  // The value in rbx is used at jump target.
-
-  // Is the string a symbol?
-  ASSERT(kSymbolTag != 0);
-  __ testb(FieldOperand(rcx, Map::kInstanceTypeOffset),
-           Immediate(kIsSymbolMask));
-  __ j(zero, &slow);
+  GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow);
 
   // If the receiver is a fast-case object, check the keyed lookup
   // cache. Otherwise probe the dictionary leaving result in rcx.
@@ -509,29 +615,13 @@
                          rcx,
                          rax,
                          rdi,
+                         rax,
                          DICTIONARY_CHECK_DONE);
-  __ movq(rax, rdx);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
-  // If the hash field contains an array index pick it out. The assert checks
-  // that the constants for the maximum number of digits for an array index
-  // cached in the hash field and the number of bits reserved for it does not
-  // conflict.
-  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
-         (1 << String::kArrayIndexValueBits));
+
   __ bind(&index_string);
-  // We want the smi-tagged index in rax. Even if we subsequently go to
-  // the slow case, converting the key to a smi is always valid.
-  // rdx: receiver
-  // rax: key (a string)
-  // rbx: key's hash field, including its array index value.
-  __ and_(rbx, Immediate(String::kArrayIndexValueMask));
-  __ shr(rbx, Immediate(String::kHashShift));
-  // Here we actually clobber the key (rax) which will be used if calling into
-  // runtime later. However as the new key is the numeric value of a string key
-  // there is no difference in using either key.
-  __ Integer32ToSmi(rax, rbx);
-  // Now jump to the place where smi keys are handled.
+  GenerateIndexFromHash(masm, rax, rbx);
   __ jmp(&index_smi);
 }
 
@@ -803,19 +893,20 @@
   //  -- rdx     : receiver
   //  -- rsp[0]  : return address
   // -----------------------------------
-  Label slow, fast, array, extra, check_pixel_array;
+  Label slow, slow_with_tagged_index, fast, array, extra, check_pixel_array;
 
   // Check that the object isn't a smi.
-  __ JumpIfSmi(rdx, &slow);
+  __ JumpIfSmi(rdx, &slow_with_tagged_index);
   // Get the map from the receiver.
   __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
   // Check that the receiver does not require access checks.  We need
   // to do this because this generic stub does not perform map checks.
   __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
            Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &slow);
+  __ j(not_zero, &slow_with_tagged_index);
   // Check that the key is a smi.
-  __ JumpIfNotSmi(rcx, &slow);
+  __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
+  __ SmiToInteger32(rcx, rcx);
 
   __ CmpInstanceType(rbx, JS_ARRAY_TYPE);
   __ j(equal, &array);
@@ -826,27 +917,30 @@
   // Object case: Check key against length in the elements array.
   // rax: value
   // rdx: JSObject
-  // rcx: index (as a smi)
+  // rcx: index
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
   __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
                  Heap::kFixedArrayMapRootIndex);
   __ j(not_equal, &check_pixel_array);
-  __ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
   // rax: value
   // rbx: FixedArray
-  // rcx: index (as a smi)
-  __ j(below, &fast);
+  // rcx: index
+  __ j(above, &fast);
 
   // Slow case: call runtime.
   __ bind(&slow);
+  __ Integer32ToSmi(rcx, rcx);
+  __ bind(&slow_with_tagged_index);
   GenerateRuntimeSetProperty(masm);
+  // Never returns to here.
 
   // Check whether the elements is a pixel array.
   // rax: value
   // rdx: receiver
   // rbx: receiver's elements array
-  // rcx: index (as a smi), zero-extended.
+  // rcx: index, zero-extended.
   __ bind(&check_pixel_array);
   __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
                  Heap::kPixelArrayMapRootIndex);
@@ -854,21 +948,20 @@
   // Check that the value is a smi. If a conversion is needed call into the
   // runtime to convert and clamp.
   __ JumpIfNotSmi(rax, &slow);
-  __ SmiToInteger32(rdi, rcx);
-  __ cmpl(rdi, FieldOperand(rbx, PixelArray::kLengthOffset));
+  __ cmpl(rcx, FieldOperand(rbx, PixelArray::kLengthOffset));
   __ j(above_equal, &slow);
   // No more bailouts to slow case on this path, so key not needed.
-  __ SmiToInteger32(rcx, rax);
+  __ SmiToInteger32(rdi, rax);
   {  // Clamp the value to [0..255].
     Label done;
-    __ testl(rcx, Immediate(0xFFFFFF00));
+    __ testl(rdi, Immediate(0xFFFFFF00));
     __ j(zero, &done);
-    __ setcc(negative, rcx);  // 1 if negative, 0 if positive.
-    __ decb(rcx);  // 0 if negative, 255 if positive.
+    __ setcc(negative, rdi);  // 1 if negative, 0 if positive.
+    __ decb(rdi);  // 0 if negative, 255 if positive.
     __ bind(&done);
   }
   __ movq(rbx, FieldOperand(rbx, PixelArray::kExternalPointerOffset));
-  __ movb(Operand(rbx, rdi, times_1, 0), rcx);
+  __ movb(Operand(rbx, rcx, times_1, 0), rdi);
   __ ret(0);
 
   // Extra capacity case: Check if there is extra capacity to
@@ -878,14 +971,14 @@
   // rax: value
   // rdx: receiver (a JSArray)
   // rbx: receiver's elements array (a FixedArray)
-  // rcx: index (as a smi)
+  // rcx: index
   // flags: smicompare (rdx.length(), rbx)
   __ j(not_equal, &slow);  // do not leave holes in the array
-  __ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
-  __ j(above_equal, &slow);
+  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
+  __ j(below_equal, &slow);
   // Increment index to get new length.
-  __ SmiAddConstant(rdi, rcx, Smi::FromInt(1));
-  __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
+  __ leal(rdi, Operand(rcx, 1));
+  __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
   __ jmp(&fast);
 
   // Array case: Get the length and the elements array from the JS
@@ -894,7 +987,7 @@
   __ bind(&array);
   // rax: value
   // rdx: receiver (a JSArray)
-  // rcx: index (as a smi)
+  // rcx: index
   __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
   __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
                  Heap::kFixedArrayMapRootIndex);
@@ -902,26 +995,22 @@
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
-  __ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
+  __ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
   __ j(below_equal, &extra);
 
   // Fast case: Do the store.
   __ bind(&fast);
   // rax: value
   // rbx: receiver's elements array (a FixedArray)
-  // rcx: index (as a smi)
+  // rcx: index
   Label non_smi_value;
-  __ JumpIfNotSmi(rax, &non_smi_value);
-  SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
-  __ movq(FieldOperand(rbx, index.reg, index.scale, FixedArray::kHeaderSize),
+  __ movq(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
           rax);
+  __ JumpIfNotSmi(rax, &non_smi_value);
   __ ret(0);
   __ bind(&non_smi_value);
   // Slow case that needs to retain rcx for use by RecordWrite.
   // Update write barrier for the elements array address.
-  SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rcx, kPointerSizeLog2);
-  __ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize),
-          rax);
   __ movq(rdx, rax);
   __ RecordWriteNonSmi(rbx, 0, rdx, rcx);
   __ ret(0);
@@ -1109,7 +1198,11 @@
 }
 
 
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+
+static void GenerateCallMiss(MacroAssembler* masm, int argc, IC::UtilityId id) {
   // ----------- S t a t e -------------
   // rcx                      : function name
   // rsp[0]                   : return address
@@ -1132,7 +1225,7 @@
   // Call the entry.
   CEntryStub stub(1);
   __ movq(rax, Immediate(2));
-  __ movq(rbx, ExternalReference(IC_Utility(kCallIC_Miss)));
+  __ movq(rbx, ExternalReference(IC_Utility(id)));
   __ CallStub(&stub);
 
   // Move result to rdi and exit the internal frame.
@@ -1160,27 +1253,20 @@
 }
 
 
-// Defined in ic.cc.
-Object* CallIC_Miss(Arguments args);
-
-void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+// The generated code does not accept smi keys.
+// The generated code falls through if both probes miss.
+static void GenerateMonomorphicCacheProbe(MacroAssembler* masm,
+                                          int argc,
+                                          Code::Kind kind) {
   // ----------- S t a t e -------------
   // rcx                      : function name
-  // rsp[0]                   : return address
-  // rsp[8]                   : argument argc
-  // rsp[16]                  : argument argc - 1
-  // ...
-  // rsp[argc * 8]            : argument 1
-  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // rdx                      : receiver
   // -----------------------------------
   Label number, non_number, non_string, boolean, probe, miss;
 
-  // Get the receiver of the function from the stack; 1 ~ return address.
-  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
-
   // Probe the stub cache.
   Code::Flags flags =
-      Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+      Code::ComputeFlags(kind, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
   StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
 
   // If the stub cache probing failed, the receiver might be a value.
@@ -1219,9 +1305,7 @@
   __ bind(&probe);
   StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
 
-  // Cache miss: Jump to runtime.
   __ bind(&miss);
-  GenerateMiss(masm, argc);
 }
 
 
@@ -1240,19 +1324,16 @@
   // rsp[(argc + 1) * 8]    : argument 0 = receiver
   // -----------------------------------
   // Search dictionary - put result in register rdx.
-  GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, rdi, CHECK_DICTIONARY);
+  GenerateDictionaryLoad(
+     masm, miss, rax, rdx, rbx, rcx, rdi, rdi, CHECK_DICTIONARY);
 
-  // Move the result to register rdi and check that it isn't a smi.
-  __ movq(rdi, rdx);
-  __ JumpIfSmi(rdx, miss);
-
+  __ JumpIfSmi(rdi, miss);
   // Check that the value is a JavaScript function.
-  __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rdx);
   __ j(not_equal, miss);
 
   // Patch the receiver with the global proxy if necessary.
   if (is_global_object) {
-    __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
     __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
     __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
   }
@@ -1263,7 +1344,8 @@
 }
 
 
-void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+// The generated code falls through if the call should be handled by runtime.
+static void GenerateCallNormal(MacroAssembler* masm, int argc) {
   // ----------- S t a t e -------------
   // rcx                    : function name
   // rsp[0]                 : return address
@@ -1324,24 +1406,197 @@
   __ CheckAccessGlobalProxy(rdx, rax, &miss);
   __ jmp(&invoke);
 
-  // Cache miss: Jump to runtime.
   __ bind(&miss);
+}
+
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
+  GenerateCallMiss(masm, argc, IC::kCallIC_Miss);
+}
+
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC);
+  GenerateMiss(masm, argc);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
+
+  GenerateCallNormal(masm, argc);
   GenerateMiss(masm, argc);
 }
 
 
 void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
-  UNREACHABLE();
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
+
+  GenerateCallMiss(masm, argc, IC::kKeyedCallIC_Miss);
 }
 
 
 void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
-  UNREACHABLE();
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
+
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  Label do_call, slow_call, slow_load, slow_reload_receiver;
+  Label check_number_dictionary, check_string, lookup_monomorphic_cache;
+  Label index_smi, index_string;
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rcx, &check_string);
+
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &slow_call);
+
+  GenerateFastArrayLoad(
+      masm, rdx, rcx, rax, rbx, rdi, &check_number_dictionary, &slow_load);
+  __ IncrementCounter(&Counters::keyed_call_generic_smi_fast, 1);
+
+  __ bind(&do_call);
+  // receiver in rdx is not used after this point.
+  // rcx: key
+  // rdi: function
+
+  // Check that the value in edi is a JavaScript function.
+  __ JumpIfSmi(rdi, &slow_call);
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
+  __ j(not_equal, &slow_call);
+  // Invoke the function.
+  ParameterCount actual(argc);
+  __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+
+  __ bind(&check_number_dictionary);
+  // eax: elements
+  // ecx: smi key
+  // Check whether the elements is a number dictionary.
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kHashTableMapRootIndex);
+  __ SmiToInteger32(rbx, rcx);
+  // ebx: untagged index
+  GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi);
+  __ IncrementCounter(&Counters::keyed_call_generic_smi_dict, 1);
+  __ jmp(&do_call);
+
+  __ bind(&slow_load);
+  // This branch is taken when calling KeyedCallIC_Miss is neither required
+  // nor beneficial.
+  __ IncrementCounter(&Counters::keyed_call_generic_slow_load, 1);
+  __ EnterInternalFrame();
+  __ push(rcx);  // save the key
+  __ push(rdx);  // pass the receiver
+  __ push(rcx);  // pass the key
+  __ CallRuntime(Runtime::kKeyedGetProperty, 2);
+  __ pop(rcx);  // restore the key
+  __ LeaveInternalFrame();
+  __ movq(rdi, rax);
+  __ jmp(&do_call);
+
+  __ bind(&check_string);
+  GenerateKeyStringCheck(masm, rcx, rax, rbx, &index_string, &slow_call);
+
+  // The key is known to be a symbol.
+  // If the receiver is a regular JS object with slow properties then do
+  // a quick inline probe of the receiver's dictionary.
+  // Otherwise do the monomorphic cache probe.
+  GenerateKeyedLoadReceiverCheck(masm, rdx, rax, &lookup_monomorphic_cache);
+
+  __ movq(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
+  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+                 Heap::kHashTableMapRootIndex);
+  __ j(not_equal, &lookup_monomorphic_cache);
+
+  GenerateDictionaryLoad(
+      masm, &slow_load, rbx, rdx, rax, rcx, rdi, rdi, DICTIONARY_CHECK_DONE);
+  __ IncrementCounter(&Counters::keyed_call_generic_lookup_dict, 1);
+  __ jmp(&do_call);
+
+  __ bind(&lookup_monomorphic_cache);
+  __ IncrementCounter(&Counters::keyed_call_generic_lookup_cache, 1);
+  GenerateMonomorphicCacheProbe(masm, argc, Code::KEYED_CALL_IC);
+  // Fall through on miss.
+
+  __ bind(&slow_call);
+  // This branch is taken if:
+  // - the receiver requires boxing or access check,
+  // - the key is neither smi nor symbol,
+  // - the value loaded is not a function,
+  // - there is hope that the runtime will create a monomorphic call stub
+  //   that will get fetched next time.
+  __ IncrementCounter(&Counters::keyed_call_generic_slow, 1);
+  GenerateMiss(masm, argc);
+
+  __ bind(&index_string);
+  GenerateIndexFromHash(masm, rcx, rbx);
+  // Now jump to the place where smi keys are handled.
+  __ jmp(&index_smi);
 }
 
 
 void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
-  UNREACHABLE();
+  // ----------- S t a t e -------------
+  // rcx                      : function name
+  // rsp[0]                   : return address
+  // rsp[8]                   : argument argc
+  // rsp[16]                  : argument argc - 1
+  // ...
+  // rsp[argc * 8]            : argument 1
+  // rsp[(argc + 1) * 8]      : argument 0 = receiver
+  // -----------------------------------
+
+  GenerateCallNormal(masm, argc);
+  GenerateMiss(masm, argc);
 }
 
 
@@ -1452,7 +1707,7 @@
   // Search the dictionary placing the result in rax.
   __ bind(&probe);
   GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx,
-                         rcx, rdi, CHECK_DICTIONARY);
+                         rcx, rdi, rax, CHECK_DICTIONARY);
   __ ret(0);
 
   // Global object access: Check access rights.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 3823cad..24bac7d 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -35,6 +35,7 @@
 #include "macro-assembler-x64.h"
 #include "serialize.h"
 #include "debug.h"
+#include "heap.h"
 
 namespace v8 {
 namespace internal {
@@ -96,8 +97,8 @@
 
   // Compute number of region covering addr. See Page::GetRegionNumberForAddress
   // method for more details.
-  and_(addr, Immediate(Page::kPageAlignmentMask));
   shrl(addr, Immediate(Page::kRegionSizeLog2));
+  andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2));
 
   // Set dirty mark for region.
   bts(Operand(object, Page::kDirtyFlagOffset), addr);
@@ -106,25 +107,25 @@
 
 // For page containing |object| mark region covering [object+offset] dirty.
 // object is the object being stored into, value is the object being stored.
-// If offset is zero, then the smi_index register contains the array index into
-// the elements array represented as a smi. Otherwise it can be used as a
-// scratch register.
+// If offset is zero, then the index register contains the array index into
+// the elements array represented a zero extended int32. Otherwise it can be
+// used as a scratch register.
 // All registers are clobbered by the operation.
 void MacroAssembler::RecordWrite(Register object,
                                  int offset,
                                  Register value,
-                                 Register smi_index) {
+                                 Register index) {
   // The compiled code assumes that record write doesn't change the
   // context register, so we check that none of the clobbered
   // registers are rsi.
-  ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
+  ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
 
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
   Label done;
   JumpIfSmi(value, &done);
 
-  RecordWriteNonSmi(object, offset, value, smi_index);
+  RecordWriteNonSmi(object, offset, value, index);
   bind(&done);
 
   // Clobber all input registers when running with the debug-code flag
@@ -135,7 +136,7 @@
   if (FLAG_debug_code) {
     movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
 }
 
@@ -143,7 +144,7 @@
 void MacroAssembler::RecordWriteNonSmi(Register object,
                                        int offset,
                                        Register scratch,
-                                       Register smi_index) {
+                                       Register index) {
   Label done;
 
   if (FLAG_debug_code) {
@@ -151,6 +152,16 @@
     JumpIfNotSmi(object, &okay);
     Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
     bind(&okay);
+
+    if (offset == 0) {
+      // index must be int32.
+      Register tmp = index.is(rax) ? rbx : rax;
+      push(tmp);
+      movl(tmp, index);
+      cmpq(tmp, index);
+      Check(equal, "Index register for RecordWrite must be untagged int32.");
+      pop(tmp);
+    }
   }
 
   // Test that the object address is not in the new space. We cannot
@@ -163,16 +174,15 @@
   ASSERT(IsAligned(offset, kPointerSize) ||
          IsAligned(offset + kHeapObjectTag, kPointerSize));
 
-  Register dst = smi_index;
+  Register dst = index;
   if (offset != 0) {
     lea(dst, Operand(object, offset));
   } else {
     // array access: calculate the destination address in the same manner as
     // KeyedStoreIC::GenerateGeneric.
-    SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
     lea(dst, FieldOperand(object,
-                          index.reg,
-                          index.scale,
+                          index,
+                          times_pointer_size,
                           FixedArray::kHeaderSize));
   }
   RecordWriteHelper(object, dst, scratch);
@@ -184,7 +194,7 @@
   if (FLAG_debug_code) {
     movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
-    movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
   }
 }
 
@@ -446,13 +456,8 @@
 
 
 void MacroAssembler::Set(const Operand& dst, int64_t x) {
-  if (x == 0) {
-    xor_(kScratchRegister, kScratchRegister);
-    movq(dst, kScratchRegister);
-  } else if (is_int32(x)) {
+  if (is_int32(x)) {
     movq(dst, Immediate(static_cast<int32_t>(x)));
-  } else if (is_uint32(x)) {
-    movl(dst, Immediate(static_cast<uint32_t>(x)));
   } else {
     movq(kScratchRegister, x, RelocInfo::NONE);
     movq(dst, kScratchRegister);
@@ -485,6 +490,23 @@
 }
 
 
+void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
+  if (FLAG_debug_code) {
+    testb(dst, Immediate(0x01));
+    Label ok;
+    j(zero, &ok);
+    if (allow_stub_calls()) {
+      Abort("Integer32ToSmiField writing to non-smi location");
+    } else {
+      int3();
+    }
+    bind(&ok);
+  }
+  ASSERT(kSmiShift % kBitsPerByte == 0);
+  movl(Operand(dst, kSmiShift / kBitsPerByte), src);
+}
+
+
 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
                                                 Register src,
                                                 int constant) {
@@ -520,6 +542,11 @@
 }
 
 
+void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
+  movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
+}
+
+
 void MacroAssembler::SmiTest(Register src) {
   testq(src, src);
 }
@@ -556,6 +583,11 @@
 }
 
 
+void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
+  cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
+}
+
+
 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
                                                            Register src,
                                                            int power) {
@@ -696,15 +728,12 @@
       movq(dst, src1);
       addq(dst, src2);
     }
-    Assert(no_overflow, "Smi addition onverflow");
+    Assert(no_overflow, "Smi addition overflow");
   } else if (dst.is(src1)) {
-    addq(dst, src2);
-    Label smi_result;
-    j(no_overflow, &smi_result);
-    // Restore src1.
-    subq(src1, src2);
-    jmp(on_not_smi_result);
-    bind(&smi_result);
+    movq(kScratchRegister, src1);
+    addq(kScratchRegister, src2);
+    j(overflow, on_not_smi_result);
+    movq(dst, kScratchRegister);
   } else {
     movq(dst, src1);
     addq(dst, src2);
@@ -727,15 +756,11 @@
       movq(dst, src1);
       subq(dst, src2);
     }
-    Assert(no_overflow, "Smi substraction onverflow");
+    Assert(no_overflow, "Smi subtraction overflow");
   } else if (dst.is(src1)) {
+    cmpq(dst, src2);
+    j(overflow, on_not_smi_result);
     subq(dst, src2);
-    Label smi_result;
-    j(no_overflow, &smi_result);
-    // Restore src1.
-    addq(src1, src2);
-    jmp(on_not_smi_result);
-    bind(&smi_result);
   } else {
     movq(dst, src1);
     subq(dst, src2);
@@ -757,15 +782,12 @@
       movq(dst, src1);
       subq(dst, src2);
     }
-    Assert(no_overflow, "Smi substraction onverflow");
+    Assert(no_overflow, "Smi subtraction overflow");
   } else if (dst.is(src1)) {
-    subq(dst, src2);
-    Label smi_result;
-    j(no_overflow, &smi_result);
-    // Restore src1.
-    addq(src1, src2);
-    jmp(on_not_smi_result);
-    bind(&smi_result);
+    movq(kScratchRegister, src1);
+    subq(kScratchRegister, src2);
+    j(overflow, on_not_smi_result);
+    movq(src1, kScratchRegister);
   } else {
     movq(dst, src1);
     subq(dst, src2);
@@ -883,12 +905,9 @@
     ASSERT(!dst.is(kScratchRegister));
 
     Move(kScratchRegister, constant);
-    addq(dst, kScratchRegister);
-    Label result_ok;
-    j(no_overflow, &result_ok);
-    subq(dst, kScratchRegister);
-    jmp(on_not_smi_result);
-    bind(&result_ok);
+    addq(kScratchRegister, dst);
+    j(overflow, on_not_smi_result);
+    movq(dst, kScratchRegister);
   } else {
     Move(dst, constant);
     addq(dst, src);
@@ -910,10 +929,12 @@
   } else {
     // Subtract by adding the negative, to do it in two operations.
     if (constant->value() == Smi::kMinValue) {
-      Move(kScratchRegister, constant);
-      movq(dst, src);
-      subq(dst, kScratchRegister);
+      Move(dst, constant);
+      // Adding and subtracting the min-value gives the same result, it only
+      // differs on the overflow bit, which we don't check here.
+      addq(dst, src);
     } else {
+      // Subtract by adding the negation.
       Move(dst, Smi::FromInt(-constant->value()));
       addq(dst, src);
     }
@@ -931,21 +952,32 @@
     }
   } else if (dst.is(src)) {
     ASSERT(!dst.is(kScratchRegister));
-
-    Move(kScratchRegister, constant);
-    subq(dst, kScratchRegister);
-    Label sub_success;
-    j(no_overflow, &sub_success);
-    addq(src, kScratchRegister);
-    jmp(on_not_smi_result);
-    bind(&sub_success);
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result);
+      Move(kScratchRegister, constant);
+      subq(dst, kScratchRegister);
+    } else {
+      // Subtract by adding the negation.
+      Move(kScratchRegister, Smi::FromInt(-constant->value()));
+      addq(kScratchRegister, dst);
+      j(overflow, on_not_smi_result);
+      movq(dst, kScratchRegister);
+    }
   } else {
     if (constant->value() == Smi::kMinValue) {
-      Move(kScratchRegister, constant);
-      movq(dst, src);
-      subq(dst, kScratchRegister);
-      j(overflow, on_not_smi_result);
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result);
+      Move(dst, constant);
+      // Adding and subtracting the min-value gives the same result, it only
+      // differs on the overflow bit, which we don't check here.
+      addq(dst, src);
     } else {
+      // Subtract by adding the negation.
       Move(dst, Smi::FromInt(-(constant->value())));
       addq(dst, src);
       j(overflow, on_not_smi_result);
@@ -1695,6 +1727,17 @@
 }
 
 
+void MacroAssembler::AbortIfNotRootValue(Register src,
+                                         Heap::RootListIndex root_value_index,
+                                         const char* message) {
+  ASSERT(!src.is(kScratchRegister));
+  LoadRoot(kScratchRegister, root_value_index);
+  cmpq(src, kScratchRegister);
+  Check(equal, message);
+}
+
+
+
 Condition MacroAssembler::IsObjectStringType(Register heap_object,
                                              Register map,
                                              Register instance_type) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 0acce05..bb0b681 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -203,6 +203,9 @@
   // NOTICE: Destroys the dst register even if unsuccessful!
   void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
 
+  // Stores an integer32 value into a memory field that already holds a smi.
+  void Integer32ToSmiField(const Operand& dst, Register src);
+
   // Adds constant to src and tags the result as a smi.
   // Result must be a valid smi.
   void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
@@ -214,6 +217,7 @@
 
   // Convert smi to 64-bit integer (sign extended if necessary).
   void SmiToInteger64(Register dst, Register src);
+  void SmiToInteger64(Register dst, const Operand& src);
 
   // Multiply a positive smi's integer value by a power of two.
   // Provides result as 64-bit integer value.
@@ -234,6 +238,8 @@
   void SmiCompare(Register dst, const Operand& src);
   void SmiCompare(const Operand& dst, Register src);
   void SmiCompare(const Operand& dst, Smi* src);
+  // Compare the int32 in src register to the value of the smi stored at dst.
+  void SmiCompareInteger32(const Operand& dst, Register src);
   // Sets sign and zero flags depending on value of smi in register.
   void SmiTest(Register src);
 
@@ -550,6 +556,11 @@
   // Abort execution if argument is not a smi. Used in debug code.
   void AbortIfNotSmi(Register object);
 
+  // Abort execution if argument is not the root value with the given index.
+  void AbortIfNotRootValue(Register src,
+                           Heap::RootListIndex root_value_index,
+                           const char* message);
+
   // ---------------------------------------------------------------------------
   // Exception handling
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index cc54470..1e103ac 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -706,6 +706,15 @@
 
 #define __ ACCESS_MASM((masm()))
 
+
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
+  if (kind_ == Code::KEYED_CALL_IC) {
+    __ Cmp(rcx, Handle<String>(name));
+    __ j(not_equal, miss);
+  }
+}
+
+
 void CallStubCompiler::GenerateMissBranch() {
   Handle<Code> ic = ComputeCallMiss(arguments().immediate(), kind_);
   __ Jump(ic, RelocInfo::CODE_TARGET);
@@ -740,6 +749,8 @@
 
   Label miss_in_smi_check;
 
+  GenerateNameCheck(name, &miss_in_smi_check);
+
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -881,6 +892,8 @@
   // -----------------------------------
   Label miss;
 
+  GenerateNameCheck(name, &miss);
+
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -938,6 +951,8 @@
 
   Label miss;
 
+  GenerateNameCheck(name, &miss);
+
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -970,30 +985,30 @@
       Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
 
       // Get the array's length into rax and calculate new length.
-      __ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+      __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
       STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
-      __ SmiAddConstant(rax, rax, Smi::FromInt(argc));
+      __ addl(rax, Immediate(argc));
 
       // Get the element's length into rcx.
-      __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
+      __ SmiToInteger32(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
 
       // Check if we could survive without allocation.
-      __ SmiCompare(rax, rcx);
+      __ cmpl(rax, rcx);
       __ j(greater, &attempt_to_grow_elements);
 
       // Save new length.
-      __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+      __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
 
       // Push the element.
       __ movq(rcx, Operand(rsp, argc * kPointerSize));
-      SmiIndex index =
-          masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
       __ lea(rdx, FieldOperand(rbx,
-                               index.reg, index.scale,
+                               rax, times_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
       __ movq(Operand(rdx, 0), rcx);
 
       // Check if value is a smi.
+      __ Integer32ToSmi(rax, rax);  // Return new length as smi.
+
       __ JumpIfNotSmi(rcx, &with_write_barrier);
 
       __ bind(&exit);
@@ -1005,6 +1020,7 @@
 
       RecordWriteStub stub(rbx, rdx, rcx);
       __ CallStub(&stub);
+
       __ ret((argc + 1) * kPointerSize);
 
       __ bind(&attempt_to_grow_elements);
@@ -1019,9 +1035,8 @@
       __ movq(rcx, Operand(rcx, 0));
 
       // Check if it's the end of elements.
-      index = masm()->SmiToIndex(kScratchRegister, rax, times_pointer_size);
       __ lea(rdx, FieldOperand(rbx,
-                               index.reg, index.scale,
+                               rax, times_pointer_size,
                                FixedArray::kHeaderSize - argc * kPointerSize));
       __ cmpq(rdx, rcx);
       __ j(not_equal, &call_builtin);
@@ -1049,8 +1064,9 @@
       // Increment element's and array's sizes.
       __ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
                         Smi::FromInt(kAllocationDelta));
+      // Make new length a smi before returning it.
+      __ Integer32ToSmi(rax, rax);
       __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
-
       // Elements are in new space, so write barrier is not required.
       __ ret((argc + 1) * kPointerSize);
 
@@ -1092,6 +1108,8 @@
 
   Label miss, return_undefined, call_builtin;
 
+  GenerateNameCheck(name, &miss);
+
   // Get the receiver from the stack.
   const int argc = arguments().immediate();
   __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
@@ -1111,28 +1129,26 @@
   __ j(not_equal, &miss);
 
   // Get the array's length into rcx and calculate new length.
-  __ movq(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
-  __ SmiSubConstant(rcx, rcx, Smi::FromInt(1));
-  __ SmiTest(rcx);
+  __ SmiToInteger32(rcx, FieldOperand(rdx, JSArray::kLengthOffset));
+  __ subl(rcx, Immediate(1));
   __ j(negative, &return_undefined);
 
   // Get the last element.
   __ Move(r9, Factory::the_hole_value());
-  SmiIndex index =
-      masm()->SmiToIndex(r8, rcx, times_pointer_size);
   __ movq(rax, FieldOperand(rbx,
-                            index.reg, index.scale,
+                            rcx, times_pointer_size,
                             FixedArray::kHeaderSize));
   // Check if element is already the hole.
   __ cmpq(rax, r9);
+  // If so, call slow-case to also check prototypes for value.
   __ j(equal, &call_builtin);
 
   // Set the array's length.
-  __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
+  __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
 
-  // Fill with the hole and return original value..
+  // Fill with the hole and return original value.
   __ movq(FieldOperand(rbx,
-                       index.reg, index.scale,
+                       rcx, times_pointer_size,
                        FixedArray::kHeaderSize),
           r9);
   __ ret((argc + 1) * kPointerSize);
@@ -1190,6 +1206,8 @@
   // -----------------------------------
   Label miss;
 
+  GenerateNameCheck(name, &miss);
+
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
@@ -1254,6 +1272,8 @@
   // rsp[(argc + 1) * 8] : argument 0 = receiver
   Label miss;
 
+  GenerateNameCheck(name, &miss);
+
   // Get the number of arguments.
   const int argc = arguments().immediate();
 
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index a0acd6a..e65378d 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -961,16 +961,18 @@
   // Sync elements below the range if they have not been materialized
   // on the stack.
   int start = Min(begin, stack_pointer_ + 1);
+  int end_or_stack_pointer = Min(stack_pointer_, end);
+  // Emit normal push instructions for elements above stack pointer
+  // and use mov instructions if we are below stack pointer.
+  int i = start;
 
-  // If positive we have to adjust the stack pointer.
-  int delta = end - stack_pointer_;
-  if (delta > 0) {
-    stack_pointer_ = end;
-    __ subq(rsp, Immediate(delta * kPointerSize));
-  }
-
-  for (int i = start; i <= end; i++) {
+  while (i <= end_or_stack_pointer) {
     if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+    i++;
+  }
+  while (i <= end) {
+    SyncElementByPushing(i);
+    i++;
   }
 }
 
@@ -1164,6 +1166,25 @@
 }
 
 
+Result VirtualFrame::CallKeyedCallIC(RelocInfo::Mode mode,
+                                     int arg_count,
+                                     int loop_nesting) {
+  // Function name, arguments, and receiver are found on top of the frame
+  // and dropped by the call.  The IC expects the name in rcx and the rest
+  // on the stack, and drops them all.
+  InLoopFlag in_loop = loop_nesting > 0 ? IN_LOOP : NOT_IN_LOOP;
+  Handle<Code> ic =
+      cgen()->ComputeKeyedCallInitialize(arg_count, in_loop);
+  Result name = Pop();
+  // Spill args, receiver, and function.  The call will drop args and
+  // receiver.
+  PrepareForCall(arg_count + 1, arg_count + 1);
+  name.ToRegister(rcx);
+  name.Unuse();
+  return RawCallCodeObject(ic, mode);
+}
+
+
 Result VirtualFrame::CallConstructor(int arg_count) {
   // Arguments, receiver, and function are on top of the frame.  The
   // IC expects arg count in rax, function in rdi, and the arguments
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index affe18f..dc270fe 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -369,6 +369,8 @@
   // The argument count does not include the receiver.
   Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
 
+  Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+
   // Allocate and call JS function as constructor.  Arguments,
   // receiver (global object), and function are found on top of the
   // frame.  Function is not dropped.  The argument count does not