Version 3.15.4

Fixed Array.prototype.join evaluation order. (issue 2263)

Perform CPU sampling by CPU sampling thread only iff processing thread is not running. (issue 2364)

When using an Object as a set in Object.getOwnPropertyNames, null out the proto. (issue 2410)

Disabled EXTRA_CHECKS in Release build.

Heap explorer: Show representation of strings.

Removed 'type' and 'arguments' properties from Error object. (issue 2397)

Added atomics implementation for ThreadSanitizer v2. (Chromium issue 128314)

Fixed LiveEdit crashes when object/array literal is added. (issue 2368)

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@12994 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 7ea71e4..42086c8 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -735,6 +735,12 @@
   static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
                                         Label* non_int32,
                                         Register scratch);
+
+  static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
+                                      Label* non_int32,
+                                      XMMRegister operand,
+                                      Register scratch,
+                                      XMMRegister xmm_scratch);
 };
 
 
@@ -755,11 +761,20 @@
   // Get exponent alone in scratch2.
   __ mov(scratch2, scratch);
   __ and_(scratch2, HeapNumber::kExponentMask);
+  __ shr(scratch2, HeapNumber::kExponentShift);
+  __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
+  // Load ecx with zero.  We use this either for the final shift or
+  // for the answer.
+  __ xor_(ecx, ecx);
+  // If the exponent is above 83, the number contains no significant
+  // bits in the range 0..2^31, so the result is zero.
+  static const uint32_t kResultIsZeroExponent = 83;
+  __ cmp(scratch2, Immediate(kResultIsZeroExponent));
+  __ j(above, &done);
   if (use_sse3) {
     CpuFeatures::Scope scope(SSE3);
     // Check whether the exponent is too big for a 64 bit signed integer.
-    static const uint32_t kTooBigExponent =
-        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+    static const uint32_t kTooBigExponent = 63;
     __ cmp(scratch2, Immediate(kTooBigExponent));
     __ j(greater_equal, conversion_failure);
     // Load x87 register with heap number.
@@ -771,15 +786,11 @@
     __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
     __ add(esp, Immediate(sizeof(uint64_t)));  // Nolint.
   } else {
-    // Load ecx with zero.  We use this either for the final shift or
-    // for the answer.
-    __ xor_(ecx, ecx);
     // Check whether the exponent matches a 32 bit signed int that cannot be
     // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
     // exponent is 30 (biased).  This is the exponent that we are fastest at and
     // also the highest exponent we can handle here.
-    const uint32_t non_smi_exponent =
-        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+    const uint32_t non_smi_exponent = 30;
     __ cmp(scratch2, Immediate(non_smi_exponent));
     // If we have a match of the int32-but-not-Smi exponent then skip some
     // logic.
@@ -791,8 +802,7 @@
     {
       // Handle a big exponent.  The only reason we have this code is that the
       // >>> operator has a tendency to generate numbers with an exponent of 31.
-      const uint32_t big_non_smi_exponent =
-          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+      const uint32_t big_non_smi_exponent = 31;
       __ cmp(scratch2, Immediate(big_non_smi_exponent));
       __ j(not_equal, conversion_failure);
       // We have the big exponent, typically from >>>.  This means the number is
@@ -821,19 +831,8 @@
     }
 
     __ bind(&normal_exponent);
-    // Exponent word in scratch, exponent part of exponent word in scratch2.
-    // Zero in ecx.
-    // We know the exponent is smaller than 30 (biased).  If it is less than
-    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
-    // it rounds to zero.
-    const uint32_t zero_exponent =
-        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
-    __ sub(scratch2, Immediate(zero_exponent));
-    // ecx already has a Smi zero.
-    __ j(less, &done, Label::kNear);
-
-    // We have a shifted exponent between 0 and 30 in scratch2.
-    __ shr(scratch2, HeapNumber::kExponentShift);
+    // Exponent word in scratch, exponent in scratch2. Zero in ecx.
+    // We know that 0 <= exponent < 30.
     __ mov(ecx, Immediate(30));
     __ sub(ecx, scratch2);
 
@@ -868,8 +867,8 @@
     __ jmp(&done, Label::kNear);
     __ bind(&negative);
     __ sub(ecx, scratch2);
-    __ bind(&done);
   }
+  __ bind(&done);
 }
 
 
@@ -1192,16 +1191,17 @@
 }
 
 
+void BinaryOpStub::Initialize() {
+  platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
+}
+
+
 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
   __ pop(ecx);  // Save return address.
   __ push(edx);
   __ push(eax);
   // Left and right arguments are now on top.
-  // Push this stub's key. Although the operation and the type info are
-  // encoded into the key, the encoding is opaque, so push them too.
   __ push(Immediate(Smi::FromInt(MinorKey())));
-  __ push(Immediate(Smi::FromInt(op_)));
-  __ push(Immediate(Smi::FromInt(operands_type_)));
 
   __ push(ecx);  // Push return address.
 
@@ -1210,7 +1210,7 @@
   __ TailCallExternalReference(
       ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
                         masm->isolate()),
-      5,
+      3,
       1);
 }
 
@@ -1220,11 +1220,7 @@
 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
   __ pop(ecx);  // Save return address.
   // Left and right arguments are already on top of the stack.
-  // Push this stub's key. Although the operation and the type info are
-  // encoded into the key, the encoding is opaque, so push them too.
   __ push(Immediate(Smi::FromInt(MinorKey())));
-  __ push(Immediate(Smi::FromInt(op_)));
-  __ push(Immediate(Smi::FromInt(operands_type_)));
 
   __ push(ecx);  // Push return address.
 
@@ -1233,73 +1229,22 @@
   __ TailCallExternalReference(
       ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
                         masm->isolate()),
-      5,
+      3,
       1);
 }
 
 
-void BinaryOpStub::Generate(MacroAssembler* masm) {
-  // Explicitly allow generation of nested stubs. It is safe here because
-  // generation code does not use any raw pointers.
-  AllowStubCallsScope allow_stub_calls(masm, true);
-
-  switch (operands_type_) {
-    case BinaryOpIC::UNINITIALIZED:
-      GenerateTypeTransition(masm);
-      break;
-    case BinaryOpIC::SMI:
-      GenerateSmiStub(masm);
-      break;
-    case BinaryOpIC::INT32:
-      GenerateInt32Stub(masm);
-      break;
-    case BinaryOpIC::HEAP_NUMBER:
-      GenerateHeapNumberStub(masm);
-      break;
-    case BinaryOpIC::ODDBALL:
-      GenerateOddballStub(masm);
-      break;
-    case BinaryOpIC::BOTH_STRING:
-      GenerateBothStringStub(masm);
-      break;
-    case BinaryOpIC::STRING:
-      GenerateStringStub(masm);
-      break;
-    case BinaryOpIC::GENERIC:
-      GenerateGeneric(masm);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
-  const char* op_name = Token::Name(op_);
-  const char* overwrite_name;
-  switch (mode_) {
-    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
-    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
-    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
-    default: overwrite_name = "UnknownOverwrite"; break;
-  }
-  stream->Add("BinaryOpStub_%s_%s_%s",
-              op_name,
-              overwrite_name,
-              BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiCode(
+static void BinaryOpStub_GenerateSmiCode(
     MacroAssembler* masm,
     Label* slow,
-    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+    BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+    Token::Value op) {
   // 1. Move arguments into edx, eax except for DIV and MOD, which need the
   // dividend in eax and edx free for the division.  Use eax, ebx for those.
   Comment load_comment(masm, "-- Load arguments");
   Register left = edx;
   Register right = eax;
-  if (op_ == Token::DIV || op_ == Token::MOD) {
+  if (op == Token::DIV || op == Token::MOD) {
     left = eax;
     right = ebx;
     __ mov(ebx, eax);
@@ -1312,7 +1257,7 @@
   Label not_smis;
   Register combined = ecx;
   ASSERT(!left.is(combined) && !right.is(combined));
-  switch (op_) {
+  switch (op) {
     case Token::BIT_OR:
       // Perform the operation into eax and smi check the result.  Preserve
       // eax in case the result is not a smi.
@@ -1356,7 +1301,7 @@
   // eax and check the result if necessary.
   Comment perform_smi(masm, "-- Perform smi operation");
   Label use_fp_on_smis;
-  switch (op_) {
+  switch (op) {
     case Token::BIT_OR:
       // Nothing to do.
       break;
@@ -1490,7 +1435,7 @@
   }
 
   // 5. Emit return of result in eax.  Some operations have registers pushed.
-  switch (op_) {
+  switch (op) {
     case Token::ADD:
     case Token::SUB:
     case Token::MUL:
@@ -1513,9 +1458,9 @@
   // 6. For some operations emit inline code to perform floating point
   // operations on known smis (e.g., if the result of the operation
   // overflowed the smi range).
-  if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
+  if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
     __ bind(&use_fp_on_smis);
-    switch (op_) {
+    switch (op) {
       // Undo the effects of some operations, and some register moves.
       case Token::SHL:
         // The arguments are saved on the stack, and only used from there.
@@ -1543,8 +1488,8 @@
     }
     __ jmp(&not_smis);
   } else {
-    ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
-    switch (op_) {
+    ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
+    switch (op) {
       case Token::SHL:
       case Token::SHR: {
         Comment perform_float(masm, "-- Perform float operation on smis");
@@ -1555,13 +1500,13 @@
         // Store the result in the HeapNumber and return.
         // It's OK to overwrite the arguments on the stack because we
         // are about to return.
-        if (op_ == Token::SHR) {
+        if (op == Token::SHR) {
           __ mov(Operand(esp, 1 * kPointerSize), left);
           __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
           __ fild_d(Operand(esp, 1 * kPointerSize));
           __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
         } else {
-          ASSERT_EQ(Token::SHL, op_);
+          ASSERT_EQ(Token::SHL, op);
           if (CpuFeatures::IsSupported(SSE2)) {
             CpuFeatures::Scope use_sse2(SSE2);
             __ cvtsi2sd(xmm0, left);
@@ -1583,7 +1528,7 @@
         Comment perform_float(masm, "-- Perform float operation on smis");
         __ bind(&use_fp_on_smis);
         // Restore arguments to edx, eax.
-        switch (op_) {
+        switch (op) {
           case Token::ADD:
             // Revert right = right + left.
             __ sub(right, left);
@@ -1609,7 +1554,7 @@
         if (CpuFeatures::IsSupported(SSE2)) {
           CpuFeatures::Scope use_sse2(SSE2);
           FloatingPointHelper::LoadSSE2Smis(masm, ebx);
-          switch (op_) {
+          switch (op) {
             case Token::ADD: __ addsd(xmm0, xmm1); break;
             case Token::SUB: __ subsd(xmm0, xmm1); break;
             case Token::MUL: __ mulsd(xmm0, xmm1); break;
@@ -1619,7 +1564,7 @@
           __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
         } else {  // SSE2 not available, use FPU.
           FloatingPointHelper::LoadFloatSmis(masm, ebx);
-          switch (op_) {
+          switch (op) {
             case Token::ADD: __ faddp(1); break;
             case Token::SUB: __ fsubp(1); break;
             case Token::MUL: __ fmulp(1); break;
@@ -1642,7 +1587,7 @@
   // edx and eax.
   Comment done_comment(masm, "-- Enter non-smi code");
   __ bind(&not_smis);
-  switch (op_) {
+  switch (op) {
     case Token::BIT_OR:
     case Token::SHL:
     case Token::SAR:
@@ -1689,9 +1634,11 @@
 
   if (result_type_ == BinaryOpIC::UNINITIALIZED ||
       result_type_ == BinaryOpIC::SMI) {
-    GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
+    BinaryOpStub_GenerateSmiCode(
+        masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
   } else {
-    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+    BinaryOpStub_GenerateSmiCode(
+        masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
   }
   __ bind(&call_runtime);
   switch (op_) {
@@ -1716,19 +1663,9 @@
 }
 
 
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
-  ASSERT(operands_type_ == BinaryOpIC::STRING);
-  ASSERT(op_ == Token::ADD);
-  // Try to add arguments as strings, otherwise, transition to the generic
-  // BinaryOpIC type.
-  GenerateAddStrings(masm);
-  GenerateTypeTransition(masm);
-}
-
-
 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
   Label call_runtime;
-  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+  ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
   ASSERT(op_ == Token::ADD);
   // If both arguments are strings, call the string add stub.
   // Otherwise, do a transition.
@@ -1756,6 +1693,11 @@
 }
 
 
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+                                                      Label* alloc_failure,
+                                                      OverwriteMode mode);
+
+
 // Input:
 //    edx: left operand (tagged)
 //    eax: right operand (tagged)
@@ -1763,7 +1705,7 @@
 //    eax: result (tagged)
 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
   Label call_runtime;
-  ASSERT(operands_type_ == BinaryOpIC::INT32);
+  ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
 
   // Floating point case.
   switch (op_) {
@@ -1776,6 +1718,18 @@
       Label not_int32;
       if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
+        // It could be that only SMIs have been seen at either the left
+        // or the right operand. For precise type feedback, patch the IC
+        // again if this changes.
+        // In theory, we would need the same check in the non-SSE2 case,
+        // but since we don't support Crankshaft on such hardware we can
+        // afford not to care about precise type feedback.
+        if (left_type_ == BinaryOpIC::SMI) {
+          __ JumpIfNotSmi(edx, &not_int32);
+        }
+        if (right_type_ == BinaryOpIC::SMI) {
+          __ JumpIfNotSmi(eax, &not_int32);
+        }
         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
         FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
         if (op_ == Token::MOD) {
@@ -1798,7 +1752,7 @@
             __ test(ecx, Immediate(1));
             __ j(zero, &not_int32);
           }
-          GenerateHeapResultAllocation(masm, &call_runtime);
+          BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
           __ ret(0);
         }
@@ -1824,7 +1778,8 @@
             default: UNREACHABLE();
           }
           Label after_alloc_failure;
-          GenerateHeapResultAllocation(masm, &after_alloc_failure);
+          BinaryOpStub_GenerateHeapResultAllocation(
+              masm, &after_alloc_failure, mode_);
           __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
           __ ret(0);
           __ bind(&after_alloc_failure);
@@ -1849,10 +1804,14 @@
       Label not_floats;
       Label not_int32;
       Label non_smi_result;
+      // We do not check the input arguments here, as any value is
+      // unconditionally truncated to an int32 anyway. To get the
+      // right optimized code, int32 type feedback is just right.
+      bool use_sse3 = platform_specific_bit_;
       FloatingPointHelper::LoadUnknownsAsIntegers(masm,
-                                                  use_sse3_,
+                                                  use_sse3,
                                                   &not_floats);
-      FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
+      FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3,
                                                         &not_int32);
       switch (op_) {
         case Token::BIT_OR:  __ or_(eax, ecx); break;
@@ -1925,44 +1884,24 @@
 
   switch (op_) {
     case Token::ADD:
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
-      break;
     case Token::SUB:
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
-      break;
     case Token::MUL:
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
-      break;
     case Token::DIV:
       GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
       break;
     case Token::MOD:
-      break;
+      return;  // Handled above.
     case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
-      break;
     case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
-      break;
     case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
-      break;
     case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
-      break;
     case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
-      break;
     case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
       break;
     default:
       UNREACHABLE();
   }
+  GenerateCallRuntime(masm);
 }
 
 
@@ -2011,7 +1950,28 @@
       Label not_floats;
       if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
+
+        // It could be that only SMIs have been seen at either the left
+        // or the right operand. For precise type feedback, patch the IC
+        // again if this changes.
+        // In theory, we would need the same check in the non-SSE2 case,
+        // but since we don't support Crankshaft on such hardware we can
+        // afford not to care about precise type feedback.
+        if (left_type_ == BinaryOpIC::SMI) {
+          __ JumpIfNotSmi(edx, &not_floats);
+        }
+        if (right_type_ == BinaryOpIC::SMI) {
+          __ JumpIfNotSmi(eax, &not_floats);
+        }
         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+        if (left_type_ == BinaryOpIC::INT32) {
+          FloatingPointHelper::CheckSSE2OperandIsInt32(
+              masm, &not_floats, xmm0, ecx, xmm2);
+        }
+        if (right_type_ == BinaryOpIC::INT32) {
+          FloatingPointHelper::CheckSSE2OperandIsInt32(
+              masm, &not_floats, xmm1, ecx, xmm2);
+        }
 
         switch (op_) {
           case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -2020,7 +1980,7 @@
           case Token::DIV: __ divsd(xmm0, xmm1); break;
           default: UNREACHABLE();
         }
-        GenerateHeapResultAllocation(masm, &call_runtime);
+        BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
         __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         __ ret(0);
       } else {  // SSE2 not available, use FPU.
@@ -2037,7 +1997,8 @@
           default: UNREACHABLE();
         }
         Label after_alloc_failure;
-        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        BinaryOpStub_GenerateHeapResultAllocation(
+            masm, &after_alloc_failure, mode_);
         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
         __ ret(0);
         __ bind(&after_alloc_failure);
@@ -2063,8 +2024,12 @@
       GenerateRegisterArgsPush(masm);
       Label not_floats;
       Label non_smi_result;
+      // We do not check the input arguments here, as any value is
+      // unconditionally truncated to an int32 anyway. To get the
+      // right optimized code, int32 type feedback is just right.
+      bool use_sse3 = platform_specific_bit_;
       FloatingPointHelper::LoadUnknownsAsIntegers(masm,
-                                                  use_sse3_,
+                                                  use_sse3,
                                                   &not_floats);
       switch (op_) {
         case Token::BIT_OR:  __ or_(eax, ecx); break;
@@ -2136,46 +2101,23 @@
 
   switch (op_) {
     case Token::ADD:
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
-      break;
     case Token::SUB:
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
-      break;
     case Token::MUL:
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
-      break;
     case Token::DIV:
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
-      break;
     case Token::MOD:
       GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
       break;
     case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
-      break;
     case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
-      break;
     case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
-      break;
     case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
-      break;
     case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
-      break;
     case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
       break;
     default:
       UNREACHABLE();
   }
+  GenerateCallRuntime(masm);
 }
 
 
@@ -2204,7 +2146,8 @@
       UNREACHABLE();
   }
 
-  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+  BinaryOpStub_GenerateSmiCode(
+      masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
 
   // Floating point case.
   switch (op_) {
@@ -2224,7 +2167,7 @@
           case Token::DIV: __ divsd(xmm0, xmm1); break;
           default: UNREACHABLE();
         }
-        GenerateHeapResultAllocation(masm, &call_runtime);
+        BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
         __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         __ ret(0);
       } else {  // SSE2 not available, use FPU.
@@ -2241,7 +2184,8 @@
           default: UNREACHABLE();
         }
         Label after_alloc_failure;
-        GenerateHeapResultAllocation(masm, &after_alloc_failure);
+        BinaryOpStub_GenerateHeapResultAllocation(
+            masm, &after_alloc_failure, mode_);
         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
         __ ret(0);
         __ bind(&after_alloc_failure);
@@ -2262,8 +2206,9 @@
     case Token::SHL:
     case Token::SHR: {
       Label non_smi_result;
+      bool use_sse3 = platform_specific_bit_;
       FloatingPointHelper::LoadUnknownsAsIntegers(masm,
-                                                  use_sse3_,
+                                                  use_sse3,
                                                   &call_runtime);
       switch (op_) {
         case Token::BIT_OR:  __ or_(eax, ecx); break;
@@ -2330,48 +2275,26 @@
   // result.
   __ bind(&call_runtime);
   switch (op_) {
-    case Token::ADD: {
+    case Token::ADD:
       GenerateAddStrings(masm);
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
-      break;
-    }
+      // Fall through.
     case Token::SUB:
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
-      break;
     case Token::MUL:
-      GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
-      break;
     case Token::DIV:
       GenerateRegisterArgsPush(masm);
-      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
       break;
     case Token::MOD:
-      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
-      break;
     case Token::BIT_OR:
-      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
-      break;
     case Token::BIT_AND:
-      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
-      break;
     case Token::BIT_XOR:
-      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
-      break;
     case Token::SAR:
-      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
-      break;
     case Token::SHL:
-      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
-      break;
     case Token::SHR:
-      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
       break;
     default:
       UNREACHABLE();
   }
+  GenerateCallRuntime(masm);
 }
 
 
@@ -2407,11 +2330,10 @@
 }
 
 
-void BinaryOpStub::GenerateHeapResultAllocation(
-    MacroAssembler* masm,
-    Label* alloc_failure) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+                                                      Label* alloc_failure,
+                                                      OverwriteMode mode) {
   Label skip_allocation;
-  OverwriteMode mode = mode_;
   switch (mode) {
     case OVERWRITE_LEFT: {
       // If the argument in edx is already an object, we skip the
@@ -2923,16 +2845,22 @@
 void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
                                                     Label* non_int32,
                                                     Register scratch) {
-  __ cvttsd2si(scratch, Operand(xmm0));
-  __ cvtsi2sd(xmm2, scratch);
-  __ ucomisd(xmm0, xmm2);
-  __ j(not_zero, non_int32);
-  __ j(carry, non_int32);
-  __ cvttsd2si(scratch, Operand(xmm1));
-  __ cvtsi2sd(xmm2, scratch);
-  __ ucomisd(xmm1, xmm2);
-  __ j(not_zero, non_int32);
-  __ j(carry, non_int32);
+  CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, xmm2);
+  CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, xmm2);
+}
+
+
+void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
+                                                  Label* non_int32,
+                                                  XMMRegister operand,
+                                                  Register scratch,
+                                                  XMMRegister xmm_scratch) {
+  __ cvttsd2si(scratch, Operand(operand));
+  __ cvtsi2sd(xmm_scratch, scratch);
+  __ pcmpeqd(xmm_scratch, operand);
+  __ movmskpd(scratch, xmm_scratch);
+  __ test(scratch, Immediate(1));
+  __ j(zero, non_int32);
 }
 
 
@@ -3960,9 +3888,9 @@
   __ test(ecx, ecx);
   __ j(zero, &setup_two_byte, Label::kNear);
   __ SmiUntag(esi);
-  __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
+  __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize));
   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
-  __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+  __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize));
   __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
   __ jmp(&setup_rest, Label::kNear);
 
@@ -4108,7 +4036,7 @@
   }
   __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
   // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
   STATIC_ASSERT(kTwoByteStringTag == 0);
   __ test_b(ebx, kStringEncodingMask);
@@ -4321,30 +4249,59 @@
   return (cc == greater || cc == greater_equal) ? LESS : GREATER;
 }
 
-void CompareStub::Generate(MacroAssembler* masm) {
-  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
 
-  Label check_unequal_objects;
-
-  // Compare two smis if required.
-  if (include_smi_compare_) {
-    Label non_smi, smi_done;
-    __ mov(ecx, edx);
-    __ or_(ecx, eax);
-    __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
-    __ sub(edx, eax);  // Return on the result of the subtraction.
-    __ j(no_overflow, &smi_done, Label::kNear);
-    __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
-    __ bind(&smi_done);
-    __ mov(eax, edx);
-    __ ret(0);
-    __ bind(&non_smi);
-  } else if (FLAG_debug_code) {
-    __ mov(ecx, edx);
-    __ or_(ecx, eax);
-    __ test(ecx, Immediate(kSmiTagMask));
-    __ Assert(not_zero, "Unexpected smi operands.");
+static void CheckInputType(MacroAssembler* masm,
+                           Register input,
+                           CompareIC::State expected,
+                           Label* fail) {
+  Label ok;
+  if (expected == CompareIC::SMI) {
+    __ JumpIfNotSmi(input, fail);
+  } else if (expected == CompareIC::HEAP_NUMBER) {
+    __ JumpIfSmi(input, &ok);
+    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+           Immediate(masm->isolate()->factory()->heap_number_map()));
+    __ j(not_equal, fail);
   }
+  // We could be strict about symbol/string here, but as long as
+  // hydrogen doesn't care, the stub doesn't have to care either.
+  __ bind(&ok);
+}
+
+
+static void BranchIfNonSymbol(MacroAssembler* masm,
+                              Label* label,
+                              Register object,
+                              Register scratch) {
+  __ JumpIfSmi(object, label);
+  __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+  __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+  __ cmp(scratch, kSymbolTag | kStringTag);
+  __ j(not_equal, label);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+  Label check_unequal_objects;
+  Condition cc = GetCondition();
+
+  Label miss;
+  CheckInputType(masm, edx, left_, &miss);
+  CheckInputType(masm, eax, right_, &miss);
+
+  // Compare two smis.
+  Label non_smi, smi_done;
+  __ mov(ecx, edx);
+  __ or_(ecx, eax);
+  __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
+  __ sub(edx, eax);  // Return on the result of the subtraction.
+  __ j(no_overflow, &smi_done, Label::kNear);
+  __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
+  __ bind(&smi_done);
+  __ mov(eax, edx);
+  __ ret(0);
+  __ bind(&non_smi);
 
   // NOTICE! This code is only reached after a smi-fast-case check, so
   // it is certain that at least one operand isn't a smi.
@@ -4356,67 +4313,61 @@
     __ cmp(eax, edx);
     __ j(not_equal, &not_identical);
 
-    if (cc_ != equal) {
+    if (cc != equal) {
       // Check for undefined.  undefined OP undefined is false even though
       // undefined == undefined.
       Label check_for_nan;
       __ cmp(edx, masm->isolate()->factory()->undefined_value());
       __ j(not_equal, &check_for_nan, Label::kNear);
-      __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+      __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
       __ ret(0);
       __ bind(&check_for_nan);
     }
 
     // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
     // so we do the second best thing - test it ourselves.
-    // Note: if cc_ != equal, never_nan_nan_ is not used.
-    if (never_nan_nan_ && (cc_ == equal)) {
-      __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+    Label heap_number;
+    __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+           Immediate(masm->isolate()->factory()->heap_number_map()));
+    __ j(equal, &heap_number, Label::kNear);
+    if (cc != equal) {
+      // Call runtime on identical JSObjects.  Otherwise return equal.
+      __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+      __ j(above_equal, &not_identical);
+    }
+    __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+    __ ret(0);
+
+    __ bind(&heap_number);
+    // It is a heap number, so return non-equal if it's NaN and equal if
+    // it's not NaN.
+    // The representation of NaN values has all exponent bits (52..62) set,
+    // and not all mantissa bits (0..51) clear.
+    // We only accept QNaNs, which have bit 51 set.
+    // Read top bits of double representation (second word of value).
+
+    // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+    // all bits in the mask are set. We only need to check the word
+    // that contains the exponent and high bit of the mantissa.
+    STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
+    __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+    __ Set(eax, Immediate(0));
+    // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+    // bits.
+    __ add(edx, edx);
+    __ cmp(edx, kQuietNaNHighBitsMask << 1);
+    if (cc == equal) {
+      STATIC_ASSERT(EQUAL != 1);
+      __ setcc(above_equal, eax);
       __ ret(0);
     } else {
-      Label heap_number;
-      __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
-             Immediate(masm->isolate()->factory()->heap_number_map()));
-      __ j(equal, &heap_number, Label::kNear);
-      if (cc_ != equal) {
-        // Call runtime on identical JSObjects.  Otherwise return equal.
-        __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
-        __ j(above_equal, &not_identical);
-      }
+      Label nan;
+      __ j(above_equal, &nan, Label::kNear);
       __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
       __ ret(0);
-
-      __ bind(&heap_number);
-      // It is a heap number, so return non-equal if it's NaN and equal if
-      // it's not NaN.
-      // The representation of NaN values has all exponent bits (52..62) set,
-      // and not all mantissa bits (0..51) clear.
-      // We only accept QNaNs, which have bit 51 set.
-      // Read top bits of double representation (second word of value).
-
-      // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
-      // all bits in the mask are set. We only need to check the word
-      // that contains the exponent and high bit of the mantissa.
-      STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
-      __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
-      __ Set(eax, Immediate(0));
-      // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
-      // bits.
-      __ add(edx, edx);
-      __ cmp(edx, kQuietNaNHighBitsMask << 1);
-      if (cc_ == equal) {
-        STATIC_ASSERT(EQUAL != 1);
-        __ setcc(above_equal, eax);
-        __ ret(0);
-      } else {
-        Label nan;
-        __ j(above_equal, &nan, Label::kNear);
-        __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
-        __ ret(0);
-        __ bind(&nan);
-        __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
-        __ ret(0);
-      }
+      __ bind(&nan);
+      __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+      __ ret(0);
     }
 
     __ bind(&not_identical);
@@ -4424,7 +4375,7 @@
 
   // Strict equality can quickly decide whether objects are equal.
   // Non-strict object equality is slower, so it is handled later in the stub.
-  if (cc_ == equal && strict_) {
+  if (cc == equal && strict()) {
     Label slow;  // Fallthrough label.
     Label not_smis;
     // If we're doing a strict equality comparison, we don't have to do
@@ -4495,70 +4446,68 @@
   }
 
   // Generate the number comparison code.
-  if (include_number_compare_) {
-    Label non_number_comparison;
-    Label unordered;
-    if (CpuFeatures::IsSupported(SSE2)) {
-      CpuFeatures::Scope use_sse2(SSE2);
-      CpuFeatures::Scope use_cmov(CMOV);
+  Label non_number_comparison;
+  Label unordered;
+  if (CpuFeatures::IsSupported(SSE2)) {
+    CpuFeatures::Scope use_sse2(SSE2);
+    CpuFeatures::Scope use_cmov(CMOV);
 
-      FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
-      __ ucomisd(xmm0, xmm1);
+    FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+    __ ucomisd(xmm0, xmm1);
 
-      // Don't base result on EFLAGS when a NaN is involved.
-      __ j(parity_even, &unordered, Label::kNear);
-      // Return a result of -1, 0, or 1, based on EFLAGS.
-      __ mov(eax, 0);  // equal
-      __ mov(ecx, Immediate(Smi::FromInt(1)));
-      __ cmov(above, eax, ecx);
-      __ mov(ecx, Immediate(Smi::FromInt(-1)));
-      __ cmov(below, eax, ecx);
-      __ ret(0);
-    } else {
-      FloatingPointHelper::CheckFloatOperands(
-          masm, &non_number_comparison, ebx);
-      FloatingPointHelper::LoadFloatOperand(masm, eax);
-      FloatingPointHelper::LoadFloatOperand(masm, edx);
-      __ FCmp();
+    // Don't base result on EFLAGS when a NaN is involved.
+    __ j(parity_even, &unordered, Label::kNear);
+    // Return a result of -1, 0, or 1, based on EFLAGS.
+    __ mov(eax, 0);  // equal
+    __ mov(ecx, Immediate(Smi::FromInt(1)));
+    __ cmov(above, eax, ecx);
+    __ mov(ecx, Immediate(Smi::FromInt(-1)));
+    __ cmov(below, eax, ecx);
+    __ ret(0);
+  } else {
+    FloatingPointHelper::CheckFloatOperands(
+        masm, &non_number_comparison, ebx);
+    FloatingPointHelper::LoadFloatOperand(masm, eax);
+    FloatingPointHelper::LoadFloatOperand(masm, edx);
+    __ FCmp();
 
-      // Don't base result on EFLAGS when a NaN is involved.
-      __ j(parity_even, &unordered, Label::kNear);
+    // Don't base result on EFLAGS when a NaN is involved.
+    __ j(parity_even, &unordered, Label::kNear);
 
-      Label below_label, above_label;
-      // Return a result of -1, 0, or 1, based on EFLAGS.
-      __ j(below, &below_label, Label::kNear);
-      __ j(above, &above_label, Label::kNear);
+    Label below_label, above_label;
+    // Return a result of -1, 0, or 1, based on EFLAGS.
+    __ j(below, &below_label, Label::kNear);
+    __ j(above, &above_label, Label::kNear);
 
-      __ Set(eax, Immediate(0));
-      __ ret(0);
-
-      __ bind(&below_label);
-      __ mov(eax, Immediate(Smi::FromInt(-1)));
-      __ ret(0);
-
-      __ bind(&above_label);
-      __ mov(eax, Immediate(Smi::FromInt(1)));
-      __ ret(0);
-    }
-
-    // If one of the numbers was NaN, then the result is always false.
-    // The cc is never not-equal.
-    __ bind(&unordered);
-    ASSERT(cc_ != not_equal);
-    if (cc_ == less || cc_ == less_equal) {
-      __ mov(eax, Immediate(Smi::FromInt(1)));
-    } else {
-      __ mov(eax, Immediate(Smi::FromInt(-1)));
-    }
+    __ Set(eax, Immediate(0));
     __ ret(0);
 
-    // The number comparison code did not provide a valid result.
-    __ bind(&non_number_comparison);
+    __ bind(&below_label);
+    __ mov(eax, Immediate(Smi::FromInt(-1)));
+    __ ret(0);
+
+    __ bind(&above_label);
+    __ mov(eax, Immediate(Smi::FromInt(1)));
+    __ ret(0);
   }
 
+  // If one of the numbers was NaN, then the result is always false.
+  // The cc is never not-equal.
+  __ bind(&unordered);
+  ASSERT(cc != not_equal);
+  if (cc == less || cc == less_equal) {
+    __ mov(eax, Immediate(Smi::FromInt(1)));
+  } else {
+    __ mov(eax, Immediate(Smi::FromInt(-1)));
+  }
+  __ ret(0);
+
+  // The number comparison code did not provide a valid result.
+  __ bind(&non_number_comparison);
+
   // Fast negative check for symbol-to-symbol equality.
   Label check_for_strings;
-  if (cc_ == equal) {
+  if (cc == equal) {
     BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
     BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
 
@@ -4574,7 +4523,7 @@
                                          &check_unequal_objects);
 
   // Inline comparison of ASCII strings.
-  if (cc_ == equal) {
+  if (cc == equal) {
     StringCompareStub::GenerateFlatAsciiStringEquals(masm,
                                                      edx,
                                                      eax,
@@ -4593,7 +4542,7 @@
 #endif
 
   __ bind(&check_unequal_objects);
-  if (cc_ == equal && !strict_) {
+  if (cc == equal && !strict()) {
     // Non-strict equality.  Objects are unequal if
     // they are both JSObjects and not undetectable,
     // and their pointers are different.
@@ -4637,11 +4586,11 @@
 
   // Figure out which native to call and setup the arguments.
   Builtins::JavaScript builtin;
-  if (cc_ == equal) {
-    builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+  if (cc == equal) {
+    builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
   } else {
     builtin = Builtins::COMPARE;
-    __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+    __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
   }
 
   // Restore return address on the stack.
@@ -4650,19 +4599,9 @@
   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
   __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
 
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
-                                    Label* label,
-                                    Register object,
-                                    Register scratch) {
-  __ JumpIfSmi(object, label);
-  __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
-  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
-  __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
-  __ cmp(scratch, kSymbolTag | kStringTag);
-  __ j(not_equal, label);
+  __ bind(&miss);
+  GenerateMiss(masm);
 }
 
 
@@ -5407,44 +5346,6 @@
 Register InstanceofStub::right() { return edx; }
 
 
-int CompareStub::MinorKey() {
-  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
-  // stubs the never NaN NaN condition is only taken into account if the
-  // condition is equals.
-  ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
-  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-  return ConditionField::encode(static_cast<unsigned>(cc_))
-         | RegisterField::encode(false)   // lhs_ and rhs_ are not used
-         | StrictField::encode(strict_)
-         | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
-         | IncludeNumberCompareField::encode(include_number_compare_)
-         | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
-  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-  const char* cc_name;
-  switch (cc_) {
-    case less: cc_name = "LT"; break;
-    case greater: cc_name = "GT"; break;
-    case less_equal: cc_name = "LE"; break;
-    case greater_equal: cc_name = "GE"; break;
-    case equal: cc_name = "EQ"; break;
-    case not_equal: cc_name = "NE"; break;
-    default: cc_name = "UnknownCondition"; break;
-  }
-  bool is_equality = cc_ == equal || cc_ == not_equal;
-  stream->Add("CompareStub_%s", cc_name);
-  if (strict_ && is_equality) stream->Add("_STRICT");
-  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
-  if (!include_number_compare_) stream->Add("_NO_NUMBER");
-  if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
 // -------------------------------------------------------------------------
 // StringCharCodeAtGenerator
 
@@ -5679,8 +5580,8 @@
   __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
 
   // Get the two characters forming the new string.
-  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
-  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+  __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
+  __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
 
   // Try to lookup two character string in symbol table. If it is not found
   // just allocate a new one.
@@ -5697,8 +5598,8 @@
   __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
   __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
   // Get the two characters forming the new string.
-  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
-  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+  __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
+  __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
   __ bind(&make_two_character_string_no_reload);
   __ IncrementCounter(counters->string_add_make_two_char(), 1);
   __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
@@ -5706,7 +5607,7 @@
   __ shl(ecx, kBitsPerByte);
   __ or_(ebx, ecx);
   // Set the characters in the new string.
-  __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
+  __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx);
   __ IncrementCounter(counters->string_add_native(), 1);
   __ ret(2 * kPointerSize);
 
@@ -5748,13 +5649,6 @@
   // edi: second instance type.
   __ test(ecx, Immediate(kAsciiDataHintMask));
   __ j(not_zero, &ascii_data);
-  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ xor_(edi, ecx);
-  STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
-  __ and_(edi, kOneByteStringTag | kAsciiDataHintTag);
-  __ cmp(edi, kOneByteStringTag | kAsciiDataHintTag);
-  __ j(equal, &ascii_data);
   // Allocate a two byte cons string.
   __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
   __ jmp(&allocated);
@@ -5780,10 +5674,10 @@
   __ test_b(ecx, kShortExternalStringMask);
   __ j(not_zero, &call_runtime);
   __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
   __ jmp(&first_prepared, Label::kNear);
   __ bind(&first_is_sequential);
-  __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   __ bind(&first_prepared);
 
   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
@@ -5801,10 +5695,10 @@
   __ test_b(edi, kShortExternalStringMask);
   __ j(not_zero, &call_runtime);
   __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
-  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
   __ jmp(&second_prepared, Label::kNear);
   __ bind(&second_is_sequential);
-  __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   __ bind(&second_prepared);
 
   // Push the addresses of both strings' first characters onto the stack.
@@ -5825,7 +5719,7 @@
   // eax: result string
   __ mov(ecx, eax);
   // Locate first character of result.
-  __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   // Load first argument's length and first character location.  Account for
   // values currently on the stack when fetching arguments from it.
   __ mov(edx, Operand(esp, 4 * kPointerSize));
@@ -6135,7 +6029,7 @@
         temp, temp, &next_probe_pop_mask[i]);
 
     // Check if the two characters match.
-    __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+    __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
     __ and_(temp, 0x0000ffff);
     __ cmp(chars, temp);
     __ j(equal, &found_in_symbol_table);
@@ -6363,7 +6257,7 @@
   __ j(not_zero, &runtime);
   __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
   // Move the pointer so that offset-wise, it looks like a sequential string.
-  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
   __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
 
   __ bind(&sequential_string);
@@ -6383,12 +6277,12 @@
   __ mov(edx, esi);  // esi used by following code.
   // Locate first character of result.
   __ mov(edi, eax);
-  __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
   // Load string argument and locate character of sub string start.
   __ pop(esi);
   __ pop(ebx);
   __ SmiUntag(ebx);
-  __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
+  __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize));
 
   // eax: result string
   // ecx: result length
@@ -6549,9 +6443,9 @@
   // doesn't need an additional compare.
   __ SmiUntag(length);
   __ lea(left,
-         FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+         FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
   __ lea(right,
-         FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+         FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
   __ neg(length);
   Register index = length;  // index = -length;
 
@@ -6606,7 +6500,7 @@
 
 
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::SMIS);
+  ASSERT(state_ == CompareIC::SMI);
   Label miss;
   __ mov(ecx, edx);
   __ or_(ecx, eax);
@@ -6632,31 +6526,52 @@
 
 
 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+  ASSERT(state_ == CompareIC::HEAP_NUMBER);
 
   Label generic_stub;
   Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss;
-  __ mov(ecx, edx);
-  __ and_(ecx, eax);
-  __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
 
-  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
-  __ j(not_equal, &maybe_undefined1, Label::kNear);
-  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
-  __ j(not_equal, &maybe_undefined2, Label::kNear);
+  if (left_ == CompareIC::SMI) {
+    __ JumpIfNotSmi(edx, &miss);
+  }
+  if (right_ == CompareIC::SMI) {
+    __ JumpIfNotSmi(eax, &miss);
+  }
 
   // Inlining the double comparison and falling back to the general compare
-  // stub if NaN is involved or SS2 or CMOV is unsupported.
+  // stub if NaN is involved or SSE2 or CMOV is unsupported.
   if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
     CpuFeatures::Scope scope1(SSE2);
     CpuFeatures::Scope scope2(CMOV);
 
-    // Load left and right operand
-    __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+    // Load left and right operand.
+    Label done, left, left_smi, right_smi;
+    __ JumpIfSmi(eax, &right_smi, Label::kNear);
+    __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+           masm->isolate()->factory()->heap_number_map());
+    __ j(not_equal, &maybe_undefined1, Label::kNear);
     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+    __ jmp(&left, Label::kNear);
+    __ bind(&right_smi);
+    __ mov(ecx, eax);  // Can't clobber eax because we can still jump away.
+    __ SmiUntag(ecx);
+    __ cvtsi2sd(xmm1, ecx);
 
-    // Compare operands
+    __ bind(&left);
+    __ JumpIfSmi(edx, &left_smi, Label::kNear);
+    __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+           masm->isolate()->factory()->heap_number_map());
+    __ j(not_equal, &maybe_undefined2, Label::kNear);
+    __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+    __ jmp(&done);
+    __ bind(&left_smi);
+    __ mov(ecx, edx);  // Can't clobber edx because we can still jump away.
+    __ SmiUntag(ecx);
+    __ cvtsi2sd(xmm0, ecx);
+
+    __ bind(&done);
+    // Compare operands.
     __ ucomisd(xmm0, xmm1);
 
     // Don't base result on EFLAGS when a NaN is involved.
@@ -6670,17 +6585,30 @@
     __ mov(ecx, Immediate(Smi::FromInt(-1)));
     __ cmov(below, eax, ecx);
     __ ret(0);
+  } else {
+    __ mov(ecx, edx);
+    __ and_(ecx, eax);
+    __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
+
+    __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+           masm->isolate()->factory()->heap_number_map());
+    __ j(not_equal, &maybe_undefined1, Label::kNear);
+    __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+           masm->isolate()->factory()->heap_number_map());
+    __ j(not_equal, &maybe_undefined2, Label::kNear);
   }
 
   __ bind(&unordered);
-  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
   __ bind(&generic_stub);
+  ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+                     CompareIC::GENERIC);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
   __ bind(&maybe_undefined1);
   if (Token::IsOrderedRelationalCompareOp(op_)) {
     __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
     __ j(not_equal, &miss);
+    __ JumpIfSmi(edx, &unordered);
     __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
     __ j(not_equal, &maybe_undefined2, Label::kNear);
     __ jmp(&unordered);
@@ -6698,7 +6626,7 @@
 
 
 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::SYMBOLS);
+  ASSERT(state_ == CompareIC::SYMBOL);
   ASSERT(GetCondition() == equal);
 
   // Registers containing left and right operands respectively.
@@ -6743,7 +6671,7 @@
 
 
 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::STRINGS);
+  ASSERT(state_ == CompareIC::STRING);
   Label miss;
 
   bool equality = Token::IsEqualityOp(op_);
@@ -6832,7 +6760,7 @@
 
 
 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::OBJECTS);
+  ASSERT(state_ == CompareIC::OBJECT);
   Label miss;
   __ mov(ecx, edx);
   __ and_(ecx, eax);