Version 3.23.7

Bugfix: dependent code field in AllocationSite was keeping code objects alive even after context death. (Chromium issue 320532)

Fixed data view accessors to throw execptions on offsets bigger than size_t. (issue 3013)

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@17859 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index dedd3fd..45c82ae 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,16 @@
+2013-11-19: Version 3.23.7
+
+        Bugfix: dependent code field in AllocationSite was keeping code objects
+        alive even after context death.
+        (Chromium issue 320532)
+
+        Fixed data view accessors to throw execptions on offsets bigger than
+        size_t.
+        (issue 3013)
+
+        Performance and stability improvements on all platforms.
+
+
 2013-11-18: Version 3.23.6
 
         Limit size of dehoistable array indices.
diff --git a/build/toolchain.gypi b/build/toolchain.gypi
index de41fe0..99f357a 100644
--- a/build/toolchain.gypi
+++ b/build/toolchain.gypi
@@ -376,7 +376,7 @@
         'target_conditions': [
           ['_toolset=="host"', {
             'variables': {
-              'm32flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+              'm32flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
             },
             'cflags': [ '<(m32flag)' ],
             'ldflags': [ '<(m32flag)' ],
@@ -386,7 +386,7 @@
           }],
           ['_toolset=="target"', {
             'variables': {
-              'm32flag': '<!((echo | $(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+              'm32flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
               'clang%': 0,
             },
             'conditions': [
@@ -408,14 +408,14 @@
         'target_conditions': [
           ['_toolset=="host"', {
             'variables': {
-              'm64flag': '<!((echo | $(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1) && echo "-m64" || true)',
+              'm64flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
             },
             'cflags': [ '<(m64flag)' ],
             'ldflags': [ '<(m64flag)' ],
           }],
           ['_toolset=="target"', {
             'variables': {
-              'm64flag': '<!((echo | $(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1) && echo "-m64" || true)',
+              'm64flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
             },
             'cflags': [ '<(m64flag)' ],
             'ldflags': [ '<(m64flag)' ],
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index f93a9a6..24c5cca 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -57,6 +57,11 @@
 }
 
 
+int DwVfpRegister::NumReservedRegisters() {
+  return kNumReservedRegisters;
+}
+
+
 int DwVfpRegister::NumAllocatableRegisters() {
   return NumRegisters() - kNumReservedRegisters;
 }
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 55e6f9a..137dd73 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -285,6 +285,7 @@
   // Any code included in the snapshot must be able to run both with 16 or 32
   // registers.
   inline static int NumRegisters();
+  inline static int NumReservedRegisters();
   inline static int NumAllocatableRegisters();
 
   inline static int ToAllocationIndex(DwVfpRegister reg);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index a3b2a6e..f93b04c 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -827,7 +827,7 @@
   __ mov(r0, r2);
   __ Ret();
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -856,7 +856,7 @@
   __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
   __ Ret();
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -887,7 +887,7 @@
 }
 
 
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- lr     : return address
   //  -- r0     : key
@@ -900,9 +900,8 @@
   __ Push(r1, r0);
 
   // Perform tail call to the entry.
-  ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
-      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
-      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
 
   __ TailCallExternalReference(ref, 2, 1);
 }
@@ -1120,7 +1119,7 @@
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -1160,11 +1159,11 @@
       1);
 
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- r0     : value
   //  -- r1     : key
@@ -1175,10 +1174,8 @@
   // Push receiver, key and value for runtime call.
   __ Push(r2, r1, r0);
 
-  ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
-      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
-                          masm->isolate())
-      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 3, 1);
 }
 
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 53ba309..71ce8f1 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -2579,6 +2579,9 @@
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  LInstruction* goto_instr = CheckElideControlInstruction(instr);
+  if (goto_instr != NULL) return goto_instr;
+
   return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index ad5bd76..9060195 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -620,22 +620,26 @@
   // Number of d-regs not known at snapshot time.
   ASSERT(!Serializer::enabled());
   PushSafepointRegisters();
-  sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
-                      kDoubleSize));
-  for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
-    vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
+  // Only save allocatable registers.
+  ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
+  ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
+  if (CpuFeatures::IsSupported(VFP32DREGS)) {
+    vstm(db_w, sp, d16, d31);
   }
+  vstm(db_w, sp, d0, d13);
 }
 
 
 void MacroAssembler::PopSafepointRegistersAndDoubles() {
   // Number of d-regs not known at snapshot time.
   ASSERT(!Serializer::enabled());
-  for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
-    vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
+  // Only save allocatable registers.
+  ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
+  ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
+  vldm(ia_w, sp, d0, d13);
+  if (CpuFeatures::IsSupported(VFP32DREGS)) {
+    vldm(ia_w, sp, d16, d31);
   }
-  add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
-                      kDoubleSize));
   PopSafepointRegisters();
 }
 
@@ -3800,7 +3804,7 @@
   ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
 
   EnumLength(r3, r1);
-  cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
+  cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
   b(eq, call_runtime);
 
   jmp(&start);
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 88e220e..2e3a0f8 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -3051,7 +3051,7 @@
 
 
 Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
-    MapHandleList* receiver_maps,
+    TypeHandleList* types,
     CodeHandleList* handlers,
     Handle<Name> name,
     Code::StubType type,
@@ -3063,22 +3063,22 @@
   }
 
   Label number_case;
-  Label* smi_target = HasHeapNumberMap(receiver_maps) ? &number_case : &miss;
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
   __ JumpIfSmi(receiver(), smi_target);
 
   Register map_reg = scratch1();
 
-  int receiver_count = receiver_maps->length();
+  int receiver_count = types->length();
   int number_of_handled_maps = 0;
   __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
-  Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
   for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map = receiver_maps->at(current);
+    Handle<Type> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
     if (!map->is_deprecated()) {
       number_of_handled_maps++;
-      __ mov(ip, Operand(receiver_maps->at(current)));
+      __ mov(ip, Operand(map));
       __ cmp(map_reg, ip);
-      if (map.is_identical_to(heap_number_map)) {
+      if (type->Is(Type::Number())) {
         ASSERT(!number_case.is_unused());
         __ bind(&number_case);
       }
@@ -3140,12 +3140,12 @@
   //  -- r0     : key
   //  -- r1     : receiver
   // -----------------------------------
-  Label slow, miss_force_generic;
+  Label slow, miss;
 
   Register key = r0;
   Register receiver = r1;
 
-  __ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic);
+  __ UntagAndJumpIfNotSmi(r2, key, &miss);
   __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
   __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
   __ Ret();
@@ -3163,14 +3163,14 @@
   TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
 
   // Miss case, call the runtime.
-  __ bind(&miss_force_generic);
+  __ bind(&miss);
 
   // ---------- S t a t e --------------
   //  -- lr     : return address
   //  -- r0     : key
   //  -- r1     : receiver
   // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
 }
 
 
diff --git a/src/builtins.cc b/src/builtins.cc
index 4077272..655b808 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1351,12 +1351,7 @@
 
 
 static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateMiss(masm, MISS);
-}
-
-
-static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
+  KeyedLoadIC::GenerateMiss(masm);
 }
 
 
@@ -1476,12 +1471,7 @@
 
 
 static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateMiss(masm, MISS);
-}
-
-
-static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
+  KeyedStoreIC::GenerateMiss(masm);
 }
 
 
diff --git a/src/builtins.h b/src/builtins.h
index 4f85c38..ec78782 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -119,14 +119,10 @@
                                     Code::kNoExtraICState)              \
   V(KeyedLoadIC_Miss,               BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
-  V(KeyedLoadIC_MissForceGeneric,   BUILTIN, UNINITIALIZED,             \
-                                    Code::kNoExtraICState)              \
   V(StoreIC_Miss,                   BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
   V(KeyedStoreIC_Miss,              BUILTIN, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
-  V(KeyedStoreIC_MissForceGeneric,  BUILTIN, UNINITIALIZED,             \
-                                    Code::kNoExtraICState)              \
   V(LoadIC_Initialize,              LOAD_IC, UNINITIALIZED,             \
                                     Code::kNoExtraICState)              \
   V(LoadIC_PreMonomorphic,          LOAD_IC, PREMONOMORPHIC,            \
diff --git a/src/d8.cc b/src/d8.cc
index d227ac3..eaec7d3 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1364,6 +1364,9 @@
     } else if (strcmp(argv[i], "--stress-deopt") == 0) {
       options.stress_deopt = true;
       argv[i] = NULL;
+    } else if (strcmp(argv[i], "--mock-arraybuffer-allocator") == 0) {
+      options.mock_arraybuffer_allocator = true;
+      argv[i] = NULL;
     } else if (strcmp(argv[i], "--noalways-opt") == 0) {
       // No support for stressing if we can't use --always-opt.
       options.stress_opt = false;
@@ -1673,6 +1676,19 @@
 };
 
 
+class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+  virtual void* Allocate(size_t) V8_OVERRIDE {
+    return malloc(0);
+  }
+  virtual void* AllocateUninitialized(size_t length) V8_OVERRIDE {
+    return malloc(0);
+  }
+  virtual void Free(void*, size_t) V8_OVERRIDE {
+  }
+};
+
+
 int Shell::Main(int argc, char* argv[]) {
   if (!SetOptions(argc, argv)) return 1;
   v8::V8::InitializeICU();
@@ -1683,7 +1699,12 @@
   SetStandaloneFlagsViaCommandLine();
 #endif
   ShellArrayBufferAllocator array_buffer_allocator;
-  v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
+  MockArrayBufferAllocator mock_arraybuffer_allocator;
+  if (options.mock_arraybuffer_allocator) {
+    v8::V8::SetArrayBufferAllocator(&mock_arraybuffer_allocator);
+  } else {
+    v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
+  }
   int result = 0;
   Isolate* isolate = Isolate::GetCurrent();
 #ifndef V8_SHARED
diff --git a/src/d8.h b/src/d8.h
index 411dfdd..8c1687e 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -233,6 +233,7 @@
      test_shell(false),
      dump_heap_constants(false),
      expected_to_throw(false),
+     mock_arraybuffer_allocator(false),
      num_isolates(1),
      isolate_sources(NULL) { }
 
@@ -258,6 +259,7 @@
   bool test_shell;
   bool dump_heap_constants;
   bool expected_to_throw;
+  bool mock_arraybuffer_allocator;
   int num_isolates;
   SourceGroup* isolate_sources;
 };
diff --git a/src/handles.cc b/src/handles.cc
index b86f19a..2d41402 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -637,7 +637,7 @@
       // present enum cache. The first step to using the cache is to set the
       // enum length of the map by counting the number of own descriptors that
       // are not DONT_ENUM or SYMBOLIC.
-      if (own_property_count == Map::kInvalidEnumCache) {
+      if (own_property_count == kInvalidEnumCacheSentinel) {
         own_property_count = object->map()->NumberOfDescribedProperties(
             OWN_DESCRIPTORS, DONT_SHOW);
 
diff --git a/src/heap.cc b/src/heap.cc
index dfb2478..90bb2cd 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1784,6 +1784,8 @@
       mark_compact_collector()->is_compacting();
   ProcessArrayBuffers(retainer, record_slots);
   ProcessNativeContexts(retainer, record_slots);
+  // TODO(mvstanton): AllocationSites only need to be processed during
+  // MARK_COMPACT, as they live in old space. Verify and address.
   ProcessAllocationSites(retainer, record_slots);
 }
 
@@ -1889,7 +1891,7 @@
   }
 
   static void VisitLiveObject(Heap* heap,
-                              AllocationSite* array_buffer,
+                              AllocationSite* site,
                               WeakObjectRetainer* retainer,
                               bool record_slots) {}
 
@@ -2482,7 +2484,7 @@
   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
   reinterpret_cast<Map*>(result)->set_bit_field(0);
   reinterpret_cast<Map*>(result)->set_bit_field2(0);
-  int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
+  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
                    Map::OwnsDescriptors::encode(true);
   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
   return result;
@@ -2514,7 +2516,7 @@
   map->set_instance_descriptors(empty_descriptor_array());
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
-  int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
+  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
                    Map::OwnsDescriptors::encode(true);
   map->set_bit_field3(bit_field3);
   map->set_elements_kind(elements_kind);
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 38eabda..c47351d 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1184,6 +1184,20 @@
 }
 
 
+bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (value()->representation().IsSpecialization()) {
+    if (compares_number_type()) {
+      *block = FirstSuccessor();
+    } else {
+      *block = SecondSuccessor();
+    }
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
 void HCheckMapValue::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
   stream->Add(" ");
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 6b92d5b..756ae0d 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -642,6 +642,7 @@
   virtual ~HValue() {}
 
   virtual int position() const { return RelocInfo::kNoPosition; }
+  virtual int operand_position(int index) const { return position(); }
 
   HBasicBlock* block() const { return block_; }
   void SetBlock(HBasicBlock* block);
@@ -1105,6 +1106,102 @@
   }
 
 
+// A helper class to represent per-operand position information attached to
+// the HInstruction in the compact form. Uses tagging to distinguish between
+// case when only instruction's position is available and case when operands'
+// positions are also available.
+// In the first case it contains intruction's position as a tagged value.
+// In the second case it points to an array which contains instruction's
+// position and operands' positions.
+// TODO(vegorov): what we really want to track here is a combination of
+// source position and a script id because cross script inlining can easily
+// result in optimized functions composed of several scripts.
+class HPositionInfo {
+ public:
+  explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
+
+  int position() const {
+    if (has_operand_positions()) {
+      return static_cast<int>(operand_positions()[kInstructionPosIndex]);
+    }
+    return static_cast<int>(UntagPosition(data_));
+  }
+
+  void set_position(int pos) {
+    if (has_operand_positions()) {
+      operand_positions()[kInstructionPosIndex] = pos;
+    } else {
+      data_ = TagPosition(pos);
+    }
+  }
+
+  void ensure_storage_for_operand_positions(Zone* zone, int operand_count) {
+    if (has_operand_positions()) {
+      return;
+    }
+
+    const int length = kFirstOperandPosIndex + operand_count;
+    intptr_t* positions =
+        zone->NewArray<intptr_t>(length);
+    for (int i = 0; i < length; i++) {
+      positions[i] = RelocInfo::kNoPosition;
+    }
+
+    const int pos = position();
+    data_ = reinterpret_cast<intptr_t>(positions);
+    set_position(pos);
+
+    ASSERT(has_operand_positions());
+  }
+
+  int operand_position(int idx) const {
+    if (!has_operand_positions()) {
+      return position();
+    }
+    return static_cast<int>(*operand_position_slot(idx));
+  }
+
+  void set_operand_position(int idx, int pos) {
+    *operand_position_slot(idx) = pos;
+  }
+
+ private:
+  static const intptr_t kInstructionPosIndex = 0;
+  static const intptr_t kFirstOperandPosIndex = 1;
+
+  intptr_t* operand_position_slot(int idx) const {
+    ASSERT(has_operand_positions());
+    return &(operand_positions()[kFirstOperandPosIndex + idx]);
+  }
+
+  bool has_operand_positions() const {
+    return !IsTaggedPosition(data_);
+  }
+
+  intptr_t* operand_positions() const {
+    ASSERT(has_operand_positions());
+    return reinterpret_cast<intptr_t*>(data_);
+  }
+
+  static const intptr_t kPositionTag = 1;
+  static const intptr_t kPositionShift = 1;
+  static bool IsTaggedPosition(intptr_t val) {
+    return (val & kPositionTag) != 0;
+  }
+  static intptr_t UntagPosition(intptr_t val) {
+    ASSERT(IsTaggedPosition(val));
+    return val >> kPositionShift;
+  }
+  static intptr_t TagPosition(intptr_t val) {
+    const intptr_t result = (val << kPositionShift) | kPositionTag;
+    ASSERT(UntagPosition(result) == val);
+    return result;
+  }
+
+  intptr_t data_;
+};
+
+
 class HInstruction : public HValue {
  public:
   HInstruction* next() const { return next_; }
@@ -1119,12 +1216,26 @@
   void InsertAfter(HInstruction* previous);
 
   // The position is a write-once variable.
-  virtual int position() const V8_OVERRIDE { return position_; }
-  bool has_position() const { return position_ != RelocInfo::kNoPosition; }
+  virtual int position() const V8_OVERRIDE {
+    return position_.position();
+  }
+  bool has_position() const {
+    return position_.position() != RelocInfo::kNoPosition;
+  }
   void set_position(int position) {
     ASSERT(!has_position());
     ASSERT(position != RelocInfo::kNoPosition);
-    position_ = position;
+    position_.set_position(position);
+  }
+
+  virtual int operand_position(int index) const V8_OVERRIDE {
+    const int pos = position_.operand_position(index);
+    return (pos != RelocInfo::kNoPosition) ? pos : position();
+  }
+  void set_operand_position(Zone* zone, int index, int pos) {
+    ASSERT(0 <= index && index < OperandCount());
+    position_.ensure_storage_for_operand_positions(zone, OperandCount());
+    position_.set_operand_position(index, pos);
   }
 
   bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
@@ -1160,7 +1271,7 @@
 
   HInstruction* next_;
   HInstruction* previous_;
-  int position_;
+  HPositionInfo position_;
 
   friend class HBasicBlock;
 };
@@ -3704,6 +3815,11 @@
     return representation();
   }
 
+  void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
+    set_operand_position(zone, 1, left_pos);
+    set_operand_position(zone, 2, right_pos);
+  }
+
   DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
 
  private:
@@ -4137,6 +4253,11 @@
   }
   virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
 
+  void SetOperandPositions(Zone* zone, int left_pos, int right_pos) {
+    set_operand_position(zone, 0, left_pos);
+    set_operand_position(zone, 1, right_pos);
+  }
+
   DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
 
  private:
@@ -4487,20 +4608,27 @@
   DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
 
   Handle<String> type_literal() { return type_literal_; }
+  bool compares_number_type() { return compares_number_type_; }
   virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
 
   virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
-    return Representation::Tagged();
+    return Representation::None();
   }
 
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
  private:
   HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
       : HUnaryControlInstruction(value, NULL, NULL),
-        type_literal_(type_literal) { }
+        type_literal_(type_literal) {
+    Heap* heap = type_literal->GetHeap();
+    compares_number_type_ = type_literal->Equals(heap->number_string());
+  }
 
   Handle<String> type_literal_;
+  bool compares_number_type_ : 1;
 };
 
 
diff --git a/src/hydrogen-representation-changes.cc b/src/hydrogen-representation-changes.cc
index d0c9b58..07fc8be 100644
--- a/src/hydrogen-representation-changes.cc
+++ b/src/hydrogen-representation-changes.cc
@@ -61,8 +61,8 @@
   if (new_value == NULL) {
     new_value = new(graph()->zone()) HChange(
         value, to, is_truncating_to_smi, is_truncating_to_int);
-    if (use_value->position() != RelocInfo::kNoPosition) {
-      new_value->set_position(use_value->position());
+    if (use_value->operand_position(use_index) != RelocInfo::kNoPosition) {
+      new_value->set_position(use_value->operand_position(use_index));
     } else {
       ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing());
     }
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index b544904..431242f 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -8052,6 +8052,52 @@
 #undef INLINE_FUNCTION_GENERATOR_ADDRESS
 
 
+void HOptimizedGraphBuilder::VisitDataViewInitialize(
+    CallRuntime* expr) {
+  ZoneList<Expression*>* arguments = expr->arguments();
+
+  NoObservableSideEffectsScope scope(this);
+  ASSERT(arguments->length()== 4);
+  CHECK_ALIVE(VisitForValue(arguments->at(0)));
+  HValue* obj = Pop();
+
+  CHECK_ALIVE(VisitForValue(arguments->at(1)));
+  HValue* buffer = Pop();
+
+  CHECK_ALIVE(VisitForValue(arguments->at(2)));
+  HValue* byte_offset = Pop();
+
+  CHECK_ALIVE(VisitForValue(arguments->at(3)));
+  HValue* byte_length = Pop();
+
+  for (int offset = JSDataView::kSize;
+        offset < JSDataView::kSizeWithInternalFields;
+        offset += kPointerSize) {
+    Add<HStoreNamedField>(obj,
+        HObjectAccess::ForJSObjectOffset(offset),
+        Add<HConstant>(static_cast<int32_t>(0)));
+  }
+
+  Add<HStoreNamedField>(obj,
+      HObjectAccess::ForJSObjectOffset(JSDataView::kBufferOffset), buffer);
+  Add<HStoreNamedField>(obj,
+      HObjectAccess::ForJSObjectOffset(JSDataView::kByteOffsetOffset),
+      byte_offset);
+  Add<HStoreNamedField>(obj,
+      HObjectAccess::ForJSObjectOffset(JSDataView::kByteLengthOffset),
+      byte_length);
+
+  Add<HStoreNamedField>(obj,
+      HObjectAccess::ForJSObjectOffset(JSDataView::kWeakNextOffset),
+      Add<HLoadNamedField>(buffer,
+          HObjectAccess::ForJSObjectOffset(
+            JSArrayBuffer::kWeakFirstViewOffset)));
+  Add<HStoreNamedField>(buffer,
+      HObjectAccess::ForJSObjectOffset(JSArrayBuffer::kWeakFirstViewOffset),
+      obj);
+}
+
+
 void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
@@ -8062,6 +8108,11 @@
 
   const Runtime::Function* function = expr->function();
   ASSERT(function != NULL);
+
+  if (function->function_id == Runtime::kDataViewInitialize) {
+      return VisitDataViewInitialize(expr);
+  }
+
   if (function->intrinsic_type == Runtime::INLINE) {
     ASSERT(expr->name()->length() > 0);
     ASSERT(expr->name()->Get(0) == '_');
@@ -8845,10 +8896,14 @@
 void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
   CHECK_ALIVE(VisitForValue(expr->left()));
   CHECK_ALIVE(VisitForValue(expr->right()));
-  if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+  SetSourcePosition(expr->position());
   HValue* right = Pop();
   HValue* left = Pop();
   HInstruction* instr = BuildBinaryOperation(expr, left, right);
+  if (FLAG_emit_opt_code_positions && instr->IsBinaryOperation()) {
+    HBinaryOperation::cast(instr)->SetOperandPositions(
+        zone(), expr->left()->position(), expr->right()->position());
+  }
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
@@ -8857,7 +8912,7 @@
                                                         Expression* sub_expr,
                                                         Handle<String> check) {
   CHECK_ALIVE(VisitForTypeOf(sub_expr));
-  if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+  SetSourcePosition(expr->position());
   HValue* value = Pop();
   HTypeofIsAndBranch* instr = New<HTypeofIsAndBranch>(value, check);
   return ast_context()->ReturnControl(instr, expr->id());
@@ -8919,6 +8974,8 @@
   CHECK_ALIVE(VisitForValue(expr->left()));
   CHECK_ALIVE(VisitForValue(expr->right()));
 
+  if (FLAG_emit_opt_code_positions) SetSourcePosition(expr->position());
+
   HValue* right = Pop();
   HValue* left = Pop();
   Token::Value op = expr->op();
@@ -8997,6 +9054,10 @@
           AddCheckMap(right, map);
           HCompareObjectEqAndBranch* result =
               New<HCompareObjectEqAndBranch>(left, right);
+          if (FLAG_emit_opt_code_positions) {
+            result->set_operand_position(zone(), 0, expr->left()->position());
+            result->set_operand_position(zone(), 1, expr->right()->position());
+          }
           return ast_context()->ReturnControl(result, expr->id());
         } else {
           BuildCheckHeapObject(left);
@@ -9038,6 +9099,11 @@
       HCompareNumericAndBranch* result =
           New<HCompareNumericAndBranch>(left, right, op);
       result->set_observed_input_representation(left_rep, right_rep);
+      if (FLAG_emit_opt_code_positions) {
+        result->SetOperandPositions(zone(),
+                                    expr->left()->position(),
+                                    expr->right()->position());
+      }
       return ast_context()->ReturnControl(result, expr->id());
     }
   }
@@ -10354,7 +10420,8 @@
       Tag HIR_tag(this, "HIR");
       for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
         HInstruction* instruction = it.Current();
-        int bci = 0;
+        int bci = FLAG_emit_opt_code_positions && instruction->has_position() ?
+            instruction->position() : 0;
         int uses = instruction->UseCount();
         PrintIndent();
         trace_.Add("%d %d ", bci, uses);
@@ -10379,6 +10446,9 @@
             trace_.Add("%d ",
                        LifetimePosition::FromInstructionIndex(i).Value());
             linstr->PrintTo(&trace_);
+            trace_.Add(" [hir:");
+            linstr->hydrogen_value()->PrintNameTo(&trace_);
+            trace_.Add("]");
             trace_.Add(" <|@\n");
           }
         }
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 0ea3b4e..9aa9489 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -2154,6 +2154,8 @@
   bool IsCallNewArrayInlineable(CallNew* expr);
   void BuildInlinedCallNewArray(CallNew* expr);
 
+  void VisitDataViewInitialize(CallRuntime* expr);
+
   class PropertyAccessInfo {
    public:
     PropertyAccessInfo(Isolate* isolate, Handle<Map> map, Handle<String> name)
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 24c6b92..7334320 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -89,8 +89,6 @@
 }
 
 
-// The Probe method needs executable memory, so it uses Heap::CreateCode.
-// Allocation failure is silent and leads to safe default.
 void CpuFeatures::Probe() {
   ASSERT(!initialized_);
   ASSERT(supported_ == 0);
@@ -2069,7 +2067,8 @@
 }
 
 
-void Assembler::andps(XMMRegister dst, XMMRegister src) {
+void Assembler::andps(XMMRegister dst, const Operand& src) {
+  ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0x54);
@@ -2077,7 +2076,8 @@
 }
 
 
-void Assembler::orps(XMMRegister dst, XMMRegister src) {
+void Assembler::orps(XMMRegister dst, const Operand& src) {
+  ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0x56);
@@ -2085,7 +2085,8 @@
 }
 
 
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+void Assembler::xorps(XMMRegister dst, const Operand& src) {
+  ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0x57);
@@ -2093,6 +2094,42 @@
 }
 
 
+void Assembler::addps(XMMRegister dst, const Operand& src) {
+  ASSERT(IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0x58);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subps(XMMRegister dst, const Operand& src) {
+  ASSERT(IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0x5C);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulps(XMMRegister dst, const Operand& src) {
+  ASSERT(IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0x59);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divps(XMMRegister dst, const Operand& src) {
+  ASSERT(IsEnabled(SSE2));
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0x5E);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
   ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2123,16 +2160,6 @@
 }
 
 
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
-  ASSERT(IsEnabled(SSE2));
-  EnsureSpace ensure_space(this);
-  EMIT(0x66);
-  EMIT(0x0F);
-  EMIT(0x2E);
-  emit_sse_operand(dst, src);
-}
-
-
 void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
   ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2205,6 +2232,17 @@
 }
 
 
+void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
+  ASSERT(IsEnabled(SSE2));
+  ASSERT(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  EMIT(0x0F);
+  EMIT(0xC6);
+  emit_sse_operand(dst, src);
+  EMIT(imm8);
+}
+
+
 void Assembler::movdqa(const Operand& dst, XMMRegister src) {
   ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2297,16 +2335,6 @@
 }
 
 
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
-  ASSERT(IsEnabled(SSE2));
-  EnsureSpace ensure_space(this);
-  EMIT(0xF2);
-  EMIT(0x0F);
-  EMIT(0x10);
-  emit_sse_operand(dst, src);
-}
-
-
 void Assembler::movss(const Operand& dst, XMMRegister src ) {
   ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
@@ -2327,16 +2355,6 @@
 }
 
 
-void Assembler::movss(XMMRegister dst, XMMRegister src) {
-  ASSERT(IsEnabled(SSE2));
-  EnsureSpace ensure_space(this);
-  EMIT(0xF3);
-  EMIT(0x0F);
-  EMIT(0x10);
-  emit_sse_operand(dst, src);
-}
-
-
 void Assembler::movd(XMMRegister dst, const Operand& src) {
   ASSERT(IsEnabled(SSE2));
   EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index a82b517..ed7b71f 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -1019,12 +1019,30 @@
   void cpuid();
 
   // SSE instructions
-  void andps(XMMRegister dst, XMMRegister src);
-  void xorps(XMMRegister dst, XMMRegister src);
-  void orps(XMMRegister dst, XMMRegister src);
+  void movaps(XMMRegister dst, XMMRegister src);
+  void shufps(XMMRegister dst, XMMRegister src, byte imm8);
+
+  void andps(XMMRegister dst, const Operand& src);
+  void andps(XMMRegister dst, XMMRegister src) { andps(dst, Operand(src)); }
+  void xorps(XMMRegister dst, const Operand& src);
+  void xorps(XMMRegister dst, XMMRegister src) { xorps(dst, Operand(src)); }
+  void orps(XMMRegister dst, const Operand& src);
+  void orps(XMMRegister dst, XMMRegister src) { orps(dst, Operand(src)); }
+
+  void addps(XMMRegister dst, const Operand& src);
+  void addps(XMMRegister dst, XMMRegister src) { addps(dst, Operand(src)); }
+  void subps(XMMRegister dst, const Operand& src);
+  void subps(XMMRegister dst, XMMRegister src) { subps(dst, Operand(src)); }
+  void mulps(XMMRegister dst, const Operand& src);
+  void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
+  void divps(XMMRegister dst, const Operand& src);
+  void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
 
   // SSE2 instructions
   void cvttss2si(Register dst, const Operand& src);
+  void cvttss2si(Register dst, XMMRegister src) {
+    cvttss2si(dst, Operand(src));
+  }
   void cvttsd2si(Register dst, const Operand& src);
   void cvtsd2si(Register dst, XMMRegister src);
 
@@ -1045,7 +1063,7 @@
   void andpd(XMMRegister dst, XMMRegister src);
   void orpd(XMMRegister dst, XMMRegister src);
 
-  void ucomisd(XMMRegister dst, XMMRegister src);
+  void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
   void ucomisd(XMMRegister dst, const Operand& src);
 
   enum RoundingMode {
@@ -1063,8 +1081,6 @@
   void cmpltsd(XMMRegister dst, XMMRegister src);
   void pcmpeqd(XMMRegister dst, XMMRegister src);
 
-  void movaps(XMMRegister dst, XMMRegister src);
-
   void movdqa(XMMRegister dst, const Operand& src);
   void movdqa(const Operand& dst, XMMRegister src);
   void movdqu(XMMRegister dst, const Operand& src);
@@ -1081,14 +1097,14 @@
   void movd(XMMRegister dst, const Operand& src);
   void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); }
   void movd(const Operand& dst, XMMRegister src);
-  void movsd(XMMRegister dst, XMMRegister src);
+  void movsd(XMMRegister dst, XMMRegister src) { movsd(dst, Operand(src)); }
   void movsd(XMMRegister dst, const Operand& src);
   void movsd(const Operand& dst, XMMRegister src);
 
 
   void movss(XMMRegister dst, const Operand& src);
   void movss(const Operand& dst, XMMRegister src);
-  void movss(XMMRegister dst, XMMRegister src);
+  void movss(XMMRegister dst, XMMRegister src) { movss(dst, Operand(src)); }
   void extractps(Register dst, XMMRegister src, byte imm8);
 
   void pand(XMMRegister dst, XMMRegister src);
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index d7b28d5..057a558 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -1042,30 +1042,30 @@
                            NameOfXMMRegister(regop),
                            NameOfXMMRegister(rm));
             data++;
-          } else if (f0byte == 0x54) {
+          } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
+            const char* const pseudo_op[] = {
+              "rcpps",
+              "andps",
+              "andnps",
+              "orps",
+              "xorps",
+              "addps",
+              "mulps",
+              "cvtps2pd",
+              "cvtdq2ps",
+              "subps",
+              "minps",
+              "divps",
+              "maxps",
+            };
+
             data += 2;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("andps %s,%s",
-                           NameOfXMMRegister(regop),
-                           NameOfXMMRegister(rm));
-            data++;
-          } else if (f0byte == 0x56) {
-            data += 2;
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("orps %s,%s",
-                           NameOfXMMRegister(regop),
-                           NameOfXMMRegister(rm));
-            data++;
-          } else if (f0byte == 0x57) {
-            data += 2;
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("xorps %s,%s",
-                           NameOfXMMRegister(regop),
-                           NameOfXMMRegister(rm));
-            data++;
+            AppendToBuffer("%s %s,",
+                           pseudo_op[f0byte - 0x53],
+                           NameOfXMMRegister(regop));
+            data += PrintRightXMMOperand(data);
           } else if (f0byte == 0x50) {
             data += 2;
             int mod, regop, rm;
@@ -1074,6 +1074,17 @@
                            NameOfCPURegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (f0byte== 0xC6) {
+            // shufps xmm, xmm/m128, imm8
+            data += 2;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            int8_t imm8 = static_cast<int8_t>(data[1]);
+            AppendToBuffer("shufps %s,%s,%d",
+                            NameOfXMMRegister(rm),
+                            NameOfXMMRegister(regop),
+                            static_cast<int>(imm8));
+            data += 2;
           } else if ((f0byte & 0xF0) == 0x80) {
             data += JumpConditional(data, branch_hint);
           } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 8a51fbc..24cb13f 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1114,7 +1114,7 @@
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   __ CmpObjectType(ecx, LAST_JS_PROXY_TYPE, ecx);
   __ j(above, &non_proxy);
-  __ mov(ebx, Immediate(Smi::FromInt(0)));  // Zero indicates proxy
+  __ Set(ebx, Immediate(Smi::FromInt(0)));  // Zero indicates proxy
   __ bind(&non_proxy);
   __ push(ebx);  // Smi
   __ push(eax);  // Array
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index dab9dd7..53742dc 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -611,7 +611,7 @@
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -653,7 +653,7 @@
   __ TailCallExternalReference(ref, 2, 1);
 
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -678,7 +678,7 @@
   __ mov(eax, unmapped_location);
   __ Ret();
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -707,7 +707,7 @@
   __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
   __ Ret();
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -1396,7 +1396,7 @@
 }
 
 
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- ecx    : key
   //  -- edx    : receiver
@@ -1411,10 +1411,8 @@
   __ push(ebx);  // return address
 
   // Perform tail call to the entry.
-  ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
-      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
-                          masm->isolate())
-      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 2, 1);
 }
 
@@ -1551,7 +1549,7 @@
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
   //  -- ecx    : key
@@ -1566,10 +1564,8 @@
   __ push(ebx);
 
   // Do tail-call to runtime routine.
-  ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
-      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
-                          masm->isolate())
-      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 3, 1);
 }
 
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index fd70b77..2a118a9 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -160,7 +160,7 @@
 
     if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
       // Move state of dynamic frame alignment into edx.
-      __ mov(edx, Immediate(kNoAlignmentPadding));
+      __ Set(edx, Immediate(kNoAlignmentPadding));
 
       Label do_not_pad, align_loop;
       STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
@@ -316,7 +316,7 @@
   osr_pc_offset_ = masm()->pc_offset();
 
     // Move state of dynamic frame alignment into edx.
-  __ mov(edx, Immediate(kNoAlignmentPadding));
+  __ Set(edx, Immediate(kNoAlignmentPadding));
 
   if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
     Label do_not_pad, align_loop;
@@ -6056,7 +6056,7 @@
   // TODO(3095996): Get rid of this. For now, we need to make the
   // result register contain a valid pointer because it is already
   // contained in the register pointer map.
-  __ mov(result, Immediate(Smi::FromInt(0)));
+  __ Set(result, Immediate(Smi::FromInt(0)));
 
   PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
@@ -6178,43 +6178,48 @@
 
 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   Register input = ToRegister(instr->value());
-
-  Condition final_branch_condition =
-      EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
-          input, instr->type_literal());
+  Condition final_branch_condition = EmitTypeofIs(instr, input);
   if (final_branch_condition != no_condition) {
     EmitBranch(instr, final_branch_condition);
   }
 }
 
 
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
-                                 Label* false_label,
-                                 Register input,
-                                 Handle<String> type_name) {
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+  Label* true_label = instr->TrueLabel(chunk_);
+  Label* false_label = instr->FalseLabel(chunk_);
+  Handle<String> type_name = instr->type_literal();
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+  int next_block = GetNextEmittedBlock();
+
+  Label::Distance true_distance = left_block == next_block ? Label::kNear
+                                                           : Label::kFar;
+  Label::Distance false_distance = right_block == next_block ? Label::kNear
+                                                             : Label::kFar;
   Condition final_branch_condition = no_condition;
   if (type_name->Equals(heap()->number_string())) {
-    __ JumpIfSmi(input, true_label);
+    __ JumpIfSmi(input, true_label, true_distance);
     __ cmp(FieldOperand(input, HeapObject::kMapOffset),
            factory()->heap_number_map());
     final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->string_string())) {
-    __ JumpIfSmi(input, false_label);
+    __ JumpIfSmi(input, false_label, false_distance);
     __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
-    __ j(above_equal, false_label);
+    __ j(above_equal, false_label, false_distance);
     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
               1 << Map::kIsUndetectable);
     final_branch_condition = zero;
 
   } else if (type_name->Equals(heap()->symbol_string())) {
-    __ JumpIfSmi(input, false_label);
+    __ JumpIfSmi(input, false_label, false_distance);
     __ CmpObjectType(input, SYMBOL_TYPE, input);
     final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->boolean_string())) {
     __ cmp(input, factory()->true_value());
-    __ j(equal, true_label);
+    __ j(equal, true_label, true_distance);
     __ cmp(input, factory()->false_value());
     final_branch_condition = equal;
 
@@ -6224,8 +6229,8 @@
 
   } else if (type_name->Equals(heap()->undefined_string())) {
     __ cmp(input, factory()->undefined_value());
-    __ j(equal, true_label);
-    __ JumpIfSmi(input, false_label);
+    __ j(equal, true_label, true_distance);
+    __ JumpIfSmi(input, false_label, false_distance);
     // Check for undetectable objects => true.
     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
@@ -6234,29 +6239,29 @@
 
   } else if (type_name->Equals(heap()->function_string())) {
     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    __ JumpIfSmi(input, false_label);
+    __ JumpIfSmi(input, false_label, false_distance);
     __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
-    __ j(equal, true_label);
+    __ j(equal, true_label, true_distance);
     __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
     final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->object_string())) {
-    __ JumpIfSmi(input, false_label);
+    __ JumpIfSmi(input, false_label, false_distance);
     if (!FLAG_harmony_typeof) {
       __ cmp(input, factory()->null_value());
-      __ j(equal, true_label);
+      __ j(equal, true_label, true_distance);
     }
     __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
-    __ j(below, false_label);
+    __ j(below, false_label, false_distance);
     __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-    __ j(above, false_label);
+    __ j(above, false_label, false_distance);
     // Check for undetectable objects => false.
     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
               1 << Map::kIsUndetectable);
     final_branch_condition = zero;
 
   } else {
-    __ jmp(false_label);
+    __ jmp(false_label, false_distance);
   }
   return final_branch_condition;
 }
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 514b42e..52ca07a 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -347,10 +347,7 @@
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label,
-                         Label* false_label,
-                         Register input,
-                         Handle<String> type_name);
+  Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 278d600..5a4a3bc 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -2656,6 +2656,8 @@
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  LInstruction* goto_instr = CheckElideControlInstruction(instr);
+  if (goto_instr != NULL) return goto_instr;
   return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index a46f8d9..914a4c2 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -3546,7 +3546,7 @@
   mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
 
   EnumLength(edx, ebx);
-  cmp(edx, Immediate(Smi::FromInt(Map::kInvalidEnumCache)));
+  cmp(edx, Immediate(Smi::FromInt(kInvalidEnumCacheSentinel)));
   j(equal, call_runtime);
 
   jmp(&start);
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index b839333..a3be0a7 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -3156,7 +3156,7 @@
 
 
 Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
-    MapHandleList* receiver_maps,
+    TypeHandleList* types,
     CodeHandleList* handlers,
     Handle<Name> name,
     Code::StubType type,
@@ -3168,20 +3168,20 @@
   }
 
   Label number_case;
-  Label* smi_target = HasHeapNumberMap(receiver_maps) ? &number_case : &miss;
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
   __ JumpIfSmi(receiver(), smi_target);
 
   Register map_reg = scratch1();
   __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
-  int receiver_count = receiver_maps->length();
+  int receiver_count = types->length();
   int number_of_handled_maps = 0;
-  Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
   for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map = receiver_maps->at(current);
+    Handle<Type> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
     if (!map->is_deprecated()) {
       number_of_handled_maps++;
       __ cmp(map_reg, map);
-      if (map.is_identical_to(heap_number_map)) {
+      if (type->Is(Type::Number())) {
         ASSERT(!number_case.is_unused());
         __ bind(&number_case);
       }
@@ -3211,11 +3211,11 @@
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  Label slow, miss_force_generic;
+  Label slow, miss;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
-  __ JumpIfNotSmi(ecx, &miss_force_generic);
+  __ JumpIfNotSmi(ecx, &miss);
   __ mov(ebx, ecx);
   __ SmiUntag(ebx);
   __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
@@ -3238,13 +3238,13 @@
   // -----------------------------------
   TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
 
-  __ bind(&miss_force_generic);
+  __ bind(&miss);
   // ----------- S t a t e -------------
   //  -- ecx    : key
   //  -- edx    : receiver
   //  -- esp[0] : return address
   // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
 }
 
 
diff --git a/src/ic-inl.h b/src/ic-inl.h
index d1c31c0..bd45c3e 100644
--- a/src/ic-inl.h
+++ b/src/ic-inl.h
@@ -120,6 +120,39 @@
 }
 
 
+InlineCacheHolderFlag IC::GetCodeCacheFlag(Type* type) {
+  if (type->Is(Type::Boolean()) ||
+      type->Is(Type::Number()) ||
+      type->Is(Type::String()) ||
+      type->Is(Type::Symbol())) {
+    return PROTOTYPE_MAP;
+  }
+  return OWN_MAP;
+}
+
+
+Handle<Map> IC::GetCodeCacheHolder(InlineCacheHolderFlag flag,
+                                   Type* type,
+                                   Isolate* isolate) {
+  if (flag == PROTOTYPE_MAP) {
+    Context* context = isolate->context()->native_context();
+    JSFunction* constructor;
+    if (type->Is(Type::Boolean())) {
+      constructor = context->boolean_function();
+    } else if (type->Is(Type::Number())) {
+      constructor = context->number_function();
+    } else if (type->Is(Type::String())) {
+      constructor = context->string_function();
+    } else {
+      ASSERT(type->Is(Type::Symbol()));
+      constructor = context->symbol_function();
+    }
+    return handle(JSObject::cast(constructor->instance_prototype())->map());
+  }
+  return type->AsClass();
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_IC_INL_H_
diff --git a/src/ic.cc b/src/ic.cc
index 2f7db26..9b30405 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -783,7 +783,7 @@
       : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())),
                          isolate());
 
-  PatchCache(cache_object, name, code);
+  PatchCache(handle(Type::CurrentOf(cache_object), isolate()), name, code);
   TRACE_IC("CallIC", name);
 }
 
@@ -967,79 +967,94 @@
 }
 
 
-bool IC::UpdatePolymorphicIC(Handle<Object> receiver,
+bool IC::UpdatePolymorphicIC(Handle<Type> type,
                              Handle<String> name,
                              Handle<Code> code) {
   if (!code->is_handler()) return false;
-  MapHandleList receiver_maps;
+  TypeHandleList types;
   CodeHandleList handlers;
 
-  int number_of_valid_maps;
+  int number_of_valid_types;
   int handler_to_overwrite = -1;
-  Handle<Map> new_receiver_map(receiver->GetMarkerMap(isolate()));
 
-  target()->FindAllMaps(&receiver_maps);
-  int number_of_maps = receiver_maps.length();
-  number_of_valid_maps = number_of_maps;
+  target()->FindAllTypes(&types);
+  int number_of_types = types.length();
+  number_of_valid_types = number_of_types;
 
-  for (int i = 0; i < number_of_maps; i++) {
-    Handle<Map> map = receiver_maps.at(i);
-    // Filter out deprecated maps to ensure its instances get migrated.
-    if (map->is_deprecated()) {
-      number_of_valid_maps--;
-    // If the receiver map is already in the polymorphic IC, this indicates
+  for (int i = 0; i < number_of_types; i++) {
+    Handle<Type> current_type = types.at(i);
+    // Filter out deprecated maps to ensure their instances get migrated.
+    if (current_type->IsClass() && current_type->AsClass()->is_deprecated()) {
+      number_of_valid_types--;
+    // If the receiver type is already in the polymorphic IC, this indicates
     // there was a prototoype chain failure. In that case, just overwrite the
     // handler.
-    } else if (map.is_identical_to(new_receiver_map)) {
-      number_of_valid_maps--;
+    } else if (type->Is(current_type)) {
+      ASSERT(handler_to_overwrite == -1);
+      number_of_valid_types--;
       handler_to_overwrite = i;
     }
   }
 
-  if (number_of_valid_maps >= 4) return false;
-  if (number_of_maps == 0) return false;
+  if (number_of_valid_types >= 4) return false;
+  if (number_of_types == 0) return false;
+  if (!target()->FindHandlers(&handlers, types.length())) return false;
 
-  if (!target()->FindHandlers(&handlers, receiver_maps.length())) {
-    return false;
-  }
-
-  number_of_valid_maps++;
+  number_of_valid_types++;
   if (handler_to_overwrite >= 0) {
     handlers.Set(handler_to_overwrite, code);
   } else {
-    receiver_maps.Add(new_receiver_map);
+    types.Add(type);
     handlers.Add(code);
   }
 
   Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
-      &receiver_maps, &handlers, number_of_valid_maps, name, strict_mode());
+      &types, &handlers, number_of_valid_types, name, strict_mode());
   set_target(*ic);
   return true;
 }
 
 
-void IC::UpdateMonomorphicIC(Handle<Object> receiver,
+Handle<Map> IC::TypeToMap(Type* type, Isolate* isolate) {
+  if (type->Is(Type::Number())) return isolate->factory()->heap_number_map();
+  if (type->Is(Type::Boolean())) return isolate->factory()->oddball_map();
+  ASSERT(type->IsClass());
+  return type->AsClass();
+}
+
+
+Type* IC::MapToType(Handle<Map> map) {
+  if (map->instance_type() == HEAP_NUMBER_TYPE) return Type::Number();
+  // The only oddballs that can be recorded in ICs are booleans.
+  if (map->instance_type() == ODDBALL_TYPE) return Type::Boolean();
+  return Type::Class(map);
+}
+
+
+void IC::UpdateMonomorphicIC(Handle<Type> type,
                              Handle<Code> handler,
                              Handle<String> name) {
   if (!handler->is_handler()) return set_target(*handler);
   Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
-      name, receiver, handler, strict_mode());
+      name, type, handler, strict_mode());
   set_target(*ic);
 }
 
 
 void IC::CopyICToMegamorphicCache(Handle<String> name) {
-  MapHandleList receiver_maps;
+  TypeHandleList types;
   CodeHandleList handlers;
-  target()->FindAllMaps(&receiver_maps);
-  if (!target()->FindHandlers(&handlers, receiver_maps.length())) return;
-  for (int i = 0; i < receiver_maps.length(); i++) {
-    UpdateMegamorphicCache(*receiver_maps.at(i), *name, *handlers.at(i));
+  target()->FindAllTypes(&types);
+  if (!target()->FindHandlers(&handlers, types.length())) return;
+  for (int i = 0; i < types.length(); i++) {
+    UpdateMegamorphicCache(*types.at(i), *name, *handlers.at(i));
   }
 }
 
 
-bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) {
+bool IC::IsTransitionOfMonomorphicTarget(Type* type) {
+  if (!type->IsClass()) return false;
+  Map* receiver_map = *type->AsClass();
   Map* current_map = target()->FindFirstMap();
   ElementsKind receiver_elements_kind = receiver_map->elements_kind();
   bool more_general_transition =
@@ -1053,14 +1068,14 @@
 }
 
 
-void IC::PatchCache(Handle<Object> object,
+void IC::PatchCache(Handle<Type> type,
                     Handle<String> name,
                     Handle<Code> code) {
   switch (state()) {
     case UNINITIALIZED:
     case PREMONOMORPHIC:
     case MONOMORPHIC_PROTOTYPE_FAILURE:
-      UpdateMonomorphicIC(object, code, name);
+      UpdateMonomorphicIC(type, code, name);
       break;
     case MONOMORPHIC: {
       // For now, call stubs are allowed to rewrite to the same stub. This
@@ -1069,23 +1084,21 @@
              target()->is_keyed_call_stub() ||
              !target().is_identical_to(code));
       Code* old_handler = target()->FindFirstHandler();
-      if (old_handler == *code &&
-          IsTransitionedMapOfMonomorphicTarget(
-              object->GetMarkerMap(isolate()))) {
-        UpdateMonomorphicIC(object, code, name);
+      if (old_handler == *code && IsTransitionOfMonomorphicTarget(*type)) {
+        UpdateMonomorphicIC(type, code, name);
         break;
       }
       // Fall through.
     }
     case POLYMORPHIC:
       if (!target()->is_keyed_stub()) {
-        if (UpdatePolymorphicIC(object, name, code)) break;
+        if (UpdatePolymorphicIC(type, name, code)) break;
         CopyICToMegamorphicCache(name);
       }
       set_target(*megamorphic_stub());
       // Fall through.
     case MEGAMORPHIC:
-      UpdateMegamorphicCache(object->GetMarkerMap(isolate()), *name, *code);
+      UpdateMegamorphicCache(*type, *name, *code);
       break;
     case DEBUG_STUB:
       break;
@@ -1135,14 +1148,15 @@
     code = ComputeHandler(lookup, object, name);
   }
 
-  PatchCache(object, name, code);
+  PatchCache(handle(Type::CurrentOf(object), isolate()), name, code);
   TRACE_IC("LoadIC", name);
 }
 
 
-void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) {
+void IC::UpdateMegamorphicCache(Type* type, Name* name, Code* code) {
   // Cache code holding map should be consistent with
   // GenerateMonomorphicCacheProbe.
+  Map* map = *TypeToMap(type, isolate());
   isolate()->stub_cache()->Set(name, map, code);
 }
 
@@ -1364,13 +1378,6 @@
 }
 
 
-MaybeObject* KeyedLoadIC::LoadForceGeneric(Handle<Object> object,
-                                           Handle<Object> key) {
-  set_target(*generic_stub());
-  return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
-}
-
-
 MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key) {
   if (MigrateDeprecated(object)) {
     return Runtime::GetObjectPropertyOrFail(isolate(), object, key);
@@ -1605,7 +1612,7 @@
 
   Handle<Code> code = ComputeHandler(lookup, receiver, name, value);
 
-  PatchCache(receiver, name, code);
+  PatchCache(handle(Type::CurrentOf(receiver), isolate()), name, code);
   TRACE_IC("StoreIC", name);
 }
 
@@ -1751,7 +1758,7 @@
       transitioned_receiver_map =
           ComputeTransitionedMap(receiver, store_mode);
     }
-    if (IsTransitionedMapOfMonomorphicTarget(*transitioned_receiver_map)) {
+    if (IsTransitionOfMonomorphicTarget(MapToType(transitioned_receiver_map))) {
       // Element family is the same, use the "worst" case map.
       store_mode = GetNonTransitioningStoreMode(store_mode);
       return isolate()->stub_cache()->ComputeKeyedStoreElement(
@@ -1949,20 +1956,6 @@
 }
 
 
-MaybeObject* KeyedStoreIC::StoreForceGeneric(Handle<Object> object,
-                                             Handle<Object> key,
-                                             Handle<Object> value) {
-  set_target(*generic_stub());
-  Handle<Object> result = Runtime::SetObjectProperty(isolate(), object,
-                                                     key,
-                                                     value,
-                                                     NONE,
-                                                     strict_mode());
-  RETURN_IF_EMPTY_HANDLE(isolate(), result);
-  return *result;
-}
-
-
 MaybeObject* KeyedStoreIC::Store(Handle<Object> object,
                                  Handle<Object> key,
                                  Handle<Object> value) {
@@ -2133,17 +2126,6 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
-  KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  ic.UpdateState(receiver, key);
-  return ic.LoadForceGeneric(receiver, key);
-}
-
-
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
   HandleScope scope(isolate);
@@ -2320,17 +2302,6 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  ic.UpdateState(receiver, key);
-  return ic.StoreForceGeneric(receiver, key, args.at<Object>(2));
-}
-
-
 RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 4);
diff --git a/src/ic.h b/src/ic.h
index cc63767..7113b0b 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -40,7 +40,6 @@
 #define IC_UTIL_LIST(ICU)                             \
   ICU(LoadIC_Miss)                                    \
   ICU(KeyedLoadIC_Miss)                               \
-  ICU(KeyedLoadIC_MissForceGeneric)                   \
   ICU(CallIC_Miss)                                    \
   ICU(KeyedCallIC_Miss)                               \
   ICU(StoreIC_Miss)                                   \
@@ -48,7 +47,6 @@
   ICU(StoreIC_Slow)                                   \
   ICU(SharedStoreIC_ExtendStorage)                    \
   ICU(KeyedStoreIC_Miss)                              \
-  ICU(KeyedStoreIC_MissForceGeneric)                  \
   ICU(KeyedStoreIC_Slow)                              \
   /* Utilities for IC stubs. */                       \
   ICU(StoreCallbackProperty)                          \
@@ -150,11 +148,24 @@
                                                Object* object,
                                                InlineCacheHolderFlag holder);
 
+  static inline InlineCacheHolderFlag GetCodeCacheFlag(Type* type);
+  static inline Handle<Map> GetCodeCacheHolder(InlineCacheHolderFlag flag,
+                                               Type* type,
+                                               Isolate* isolate);
+
   static bool IsCleared(Code* code) {
     InlineCacheState state = code->ic_state();
     return state == UNINITIALIZED || state == PREMONOMORPHIC;
   }
 
+  // Utility functions to convert maps to types and back. There are two special
+  // cases:
+  // - The heap_number_map is used as a marker which includes heap numbers as
+  //   well as smis.
+  // - The oddball map is only used for booleans.
+  static Handle<Map> TypeToMap(Type* type, Isolate* isolate);
+  static Type* MapToType(Handle<Map> type);
+
  protected:
   // Get the call-site target; used for determining the state.
   Handle<Code> target() const { return target_; }
@@ -206,20 +217,22 @@
     UNREACHABLE();
     return Handle<Code>::null();
   }
-  void UpdateMonomorphicIC(Handle<Object> receiver,
+
+  void UpdateMonomorphicIC(Handle<Type> type,
                            Handle<Code> handler,
                            Handle<String> name);
 
-  bool UpdatePolymorphicIC(Handle<Object> receiver,
+  bool UpdatePolymorphicIC(Handle<Type> type,
                            Handle<String> name,
                            Handle<Code> code);
 
+  virtual void UpdateMegamorphicCache(Type* type, Name* name, Code* code);
+
   void CopyICToMegamorphicCache(Handle<String> name);
-  bool IsTransitionedMapOfMonomorphicTarget(Map* receiver_map);
-  void PatchCache(Handle<Object> object,
+  bool IsTransitionOfMonomorphicTarget(Type* type);
+  void PatchCache(Handle<Type> type,
                   Handle<String> name,
                   Handle<Code> code);
-  virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code);
   virtual Code::Kind kind() const {
     UNREACHABLE();
     return Code::STUB;
@@ -470,12 +483,6 @@
 };
 
 
-enum ICMissMode {
-  MISS_FORCE_GENERIC,
-  MISS
-};
-
-
 class KeyedLoadIC: public LoadIC {
  public:
   explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
@@ -483,20 +490,15 @@
     ASSERT(target()->is_keyed_load_stub());
   }
 
-  MUST_USE_RESULT MaybeObject* LoadForceGeneric(Handle<Object> object,
-                                                Handle<Object> key);
-
   MUST_USE_RESULT MaybeObject* Load(Handle<Object> object,
                                     Handle<Object> key);
 
   // Code generator routines.
-  static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
+  static void GenerateMiss(MacroAssembler* masm);
   static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-  static void GenerateInitialize(MacroAssembler* masm) {
-    GenerateMiss(masm, MISS);
-  }
+  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
   static void GeneratePreMonomorphic(MacroAssembler* masm) {
-    GenerateMiss(masm, MISS);
+    GenerateMiss(masm);
   }
   static void GenerateGeneric(MacroAssembler* masm);
   static void GenerateString(MacroAssembler* masm);
@@ -525,7 +527,7 @@
     return isolate()->builtins()->KeyedLoadIC_Slow();
   }
 
-  virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
+  virtual void UpdateMegamorphicCache(Type* type, Name* name, Code* code) { }
 
  private:
   // Stub accessors.
@@ -687,21 +689,16 @@
     ASSERT(target()->is_keyed_store_stub());
   }
 
-  MUST_USE_RESULT MaybeObject* StoreForceGeneric(Handle<Object> object,
-                                                 Handle<Object> name,
-                                                 Handle<Object> value);
   MUST_USE_RESULT MaybeObject* Store(Handle<Object> object,
                                      Handle<Object> name,
                                      Handle<Object> value);
 
   // Code generators for stub routines.  Only called once at startup.
-  static void GenerateInitialize(MacroAssembler* masm) {
-    GenerateMiss(masm, MISS);
-  }
+  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
   static void GeneratePreMonomorphic(MacroAssembler* masm) {
-    GenerateMiss(masm, MISS);
+    GenerateMiss(masm);
   }
-  static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
+  static void GenerateMiss(MacroAssembler* masm);
   static void GenerateSlow(MacroAssembler* masm);
   static void GenerateRuntimeSetProperty(MacroAssembler* masm,
                                          StrictModeFlag strict_mode);
@@ -711,7 +708,7 @@
  protected:
   virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
 
-  virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { }
+  virtual void UpdateMegamorphicCache(Type* type, Name* name, Code* code) { }
 
   virtual Handle<Code> pre_monomorphic_stub() {
     return pre_monomorphic_stub(isolate(), strict_mode());
diff --git a/src/list.h b/src/list.h
index 41666de..ea67b8b 100644
--- a/src/list.h
+++ b/src/list.h
@@ -197,11 +197,13 @@
 };
 
 class Map;
+class Type;
 class Code;
 template<typename T> class Handle;
 typedef List<Map*> MapList;
 typedef List<Code*> CodeList;
 typedef List<Handle<Map> > MapHandleList;
+typedef List<Handle<Type> > TypeHandleList;
 typedef List<Handle<Code> > CodeHandleList;
 
 // Perform binary search for an element in an already sorted
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index b82149e..6d03d43 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -2541,6 +2541,17 @@
     }
   }
 
+  // Iterate over allocation sites, removing dependent code that is not
+  // otherwise kept alive by strong references.
+  Object* undefined = heap()->undefined_value();
+  for (Object* site = heap()->allocation_sites_list();
+       site != undefined;
+       site = AllocationSite::cast(site)->weak_next()) {
+    if (IsMarked(site)) {
+      ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
+    }
+  }
+
   if (heap_->weak_object_to_code_table()->IsHashTable()) {
     WeakHashTable* table =
         WeakHashTable::cast(heap_->weak_object_to_code_table());
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index c7e1a2a..98fb2f7 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -826,7 +826,7 @@
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a2);
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -861,7 +861,7 @@
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -891,7 +891,7 @@
 }
 
 
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- ra     : return address
   //  -- a0     : key
@@ -904,9 +904,8 @@
   __ Push(a1, a0);
 
   // Perform tail call to the entry.
-  ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
-      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
-      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
 
   __ TailCallExternalReference(ref, 2, 1);
 }
@@ -1131,7 +1130,7 @@
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -1453,11 +1452,11 @@
        IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
 
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- a0     : value
   //  -- a1     : key
@@ -1468,10 +1467,8 @@
   // Push receiver, key and value for runtime call.
   __ Push(a2, a1, a0);
 
-  ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
-      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
-                          masm->isolate())
-      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 3, 1);
 }
 
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 1635b77..09bae59 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -2501,6 +2501,9 @@
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  LInstruction* goto_instr = CheckElideControlInstruction(instr);
+  if (goto_instr != NULL) return goto_instr;
+
   return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 2fdfb67..5519110 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -5461,7 +5461,8 @@
   lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
 
   EnumLength(a3, a1);
-  Branch(call_runtime, eq, a3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
+  Branch(
+      call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
 
   jmp(&start);
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 25edc6d..437d769 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -3062,7 +3062,7 @@
 
 
 Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
-    MapHandleList* receiver_maps,
+    TypeHandleList* types,
     CodeHandleList* handlers,
     Handle<Name> name,
     Code::StubType type,
@@ -3074,25 +3074,25 @@
   }
 
   Label number_case;
-  Label* smi_target = HasHeapNumberMap(receiver_maps) ? &number_case : &miss;
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
   __ JumpIfSmi(receiver(), smi_target);
 
   Register map_reg = scratch1();
 
-  int receiver_count = receiver_maps->length();
+  int receiver_count = types->length();
   int number_of_handled_maps = 0;
   __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
-  Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
   for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map = receiver_maps->at(current);
+    Handle<Type> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
     if (!map->is_deprecated()) {
       number_of_handled_maps++;
-      if (map.is_identical_to(heap_number_map)) {
+      if (type->Is(Type::Number())) {
         ASSERT(!number_case.is_unused());
         __ bind(&number_case);
       }
       __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
-          eq, map_reg, Operand(receiver_maps->at(current)));
+          eq, map_reg, Operand(map));
     }
   }
   ASSERT(number_of_handled_maps != 0);
@@ -3149,12 +3149,12 @@
   //  -- a0     : key
   //  -- a1     : receiver
   // -----------------------------------
-  Label slow, miss_force_generic;
+  Label slow, miss;
 
   Register key = a0;
   Register receiver = a1;
 
-  __ JumpIfNotSmi(key, &miss_force_generic);
+  __ JumpIfNotSmi(key, &miss);
   __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
   __ sra(a2, a0, kSmiTagSize);
   __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
@@ -3174,14 +3174,14 @@
   TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
 
   // Miss case, call the runtime.
-  __ bind(&miss_force_generic);
+  __ bind(&miss);
 
   // ---------- S t a t e --------------
   //  -- ra     : return address
   //  -- a0     : key
   //  -- a1     : receiver
   // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
 }
 
 
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index 93b7cb9..1a68344 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -189,10 +189,7 @@
 
   table_.Register(kVisitNativeContext, &VisitNativeContext);
 
-  table_.Register(kVisitAllocationSite,
-                  &FixedBodyVisitor<StaticVisitor,
-                  AllocationSite::BodyDescriptor,
-                  void>::Visit);
+  table_.Register(kVisitAllocationSite, &VisitAllocationSite);
 
   table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
 
@@ -389,6 +386,31 @@
 
 
 template<typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+
+  Object** slot =
+      HeapObject::RawField(object, AllocationSite::kDependentCodeOffset);
+  if (FLAG_collect_maps) {
+    // Mark allocation site dependent codes array but do not push it onto
+    // marking stack, this will make references from it weak. We will clean
+    // dead codes when we iterate over allocation sites in
+    // ClearNonLiveReferences.
+    HeapObject* obj = HeapObject::cast(*slot);
+    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+    StaticVisitor::MarkObjectWithoutPush(heap, obj);
+  } else {
+    StaticVisitor::VisitPointer(heap, slot);
+  }
+
+  StaticVisitor::VisitPointers(heap,
+      HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
+      HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset));
+}
+
+
+template<typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitCode(
     Map* map, HeapObject* object) {
   Heap* heap = map->GetHeap();
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
index 60e6f67..f7758fd 100644
--- a/src/objects-visiting.h
+++ b/src/objects-visiting.h
@@ -399,6 +399,7 @@
   }
 
   INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
+  INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
   INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
   INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
   INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
diff --git a/src/objects.cc b/src/objects.cc
index 671d06f..6cb5e21 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2137,8 +2137,7 @@
 
   if (object->HasFastProperties()) {
     // Ensure the descriptor array does not get too big.
-    if (object->map()->NumberOfOwnDescriptors() <
-        DescriptorArray::kMaxNumberOfDescriptors) {
+    if (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors) {
       // TODO(verwaest): Support other constants.
       // if (mode == ALLOW_AS_CONSTANT &&
       //     !value->IsTheHole() &&
@@ -2560,7 +2559,7 @@
   DescriptorArray* to_replace = instance_descriptors();
   Map* current = this;
   while (current->instance_descriptors() == to_replace) {
-    current->SetEnumLength(Map::kInvalidEnumCache);
+    current->SetEnumLength(kInvalidEnumCacheSentinel);
     current->set_instance_descriptors(new_descriptors);
     Object* next = current->GetBackPointer();
     if (next->IsUndefined()) break;
@@ -5915,7 +5914,7 @@
     if (!o->IsJSObject()) return false;
     JSObject* curr = JSObject::cast(o);
     int enum_length = curr->map()->EnumLength();
-    if (enum_length == Map::kInvalidEnumCache) return false;
+    if (enum_length == kInvalidEnumCacheSentinel) return false;
     ASSERT(!curr->HasNamedInterceptor());
     ASSERT(!curr->HasIndexedInterceptor());
     ASSERT(!curr->IsAccessCheckNeeded());
@@ -6169,8 +6168,7 @@
   bool only_attribute_changes = getter->IsNull() && setter->IsNull();
   if (object->HasFastProperties() && !only_attribute_changes &&
       access_control == v8::DEFAULT &&
-      (object->map()->NumberOfOwnDescriptors() <
-       DescriptorArray::kMaxNumberOfDescriptors)) {
+      (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors)) {
     bool getterOk = getter->IsNull() ||
         DefineFastAccessor(object, name, ACCESSOR_GETTER, getter, attributes);
     bool setterOk = !getterOk || setter->IsNull() ||
@@ -6687,7 +6685,8 @@
   int new_bit_field3 = bit_field3();
   new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
   new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
-  new_bit_field3 = EnumLengthBits::update(new_bit_field3, kInvalidEnumCache);
+  new_bit_field3 = EnumLengthBits::update(new_bit_field3,
+                                          kInvalidEnumCacheSentinel);
   new_bit_field3 = Deprecated::update(new_bit_field3, false);
   new_bit_field3 = IsUnstable::update(new_bit_field3, false);
   result->set_bit_field3(new_bit_field3);
@@ -9315,7 +9314,7 @@
 
 static void TrimEnumCache(Heap* heap, Map* map, DescriptorArray* descriptors) {
   int live_enum = map->EnumLength();
-  if (live_enum == Map::kInvalidEnumCache) {
+  if (live_enum == kInvalidEnumCacheSentinel) {
     live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
   }
   if (live_enum == 0) return descriptors->ClearEnumCache();
@@ -10551,7 +10550,23 @@
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
     Object* object = info->target_object();
-    if (object->IsMap()) maps->Add(Handle<Map>(Map::cast(object)));
+    if (object->IsMap()) maps->Add(handle(Map::cast(object)));
+  }
+}
+
+
+void Code::FindAllTypes(TypeHandleList* types) {
+  ASSERT(is_inline_cache_stub());
+  DisallowHeapAllocation no_allocation;
+  int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+  Isolate* isolate = GetIsolate();
+  for (RelocIterator it(this, mask); !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    Object* object = info->target_object();
+    if (object->IsMap()) {
+      Handle<Map> map(Map::cast(object));
+      types->Add(handle(IC::MapToType(map), isolate));
+    }
   }
 }
 
@@ -13401,7 +13416,7 @@
     if (filter == NONE) return map->NumberOfOwnDescriptors();
     if (filter & DONT_ENUM) {
       int result = map->EnumLength();
-      if (result != Map::kInvalidEnumCache) return result;
+      if (result != kInvalidEnumCacheSentinel) return result;
     }
     return map->NumberOfDescribedProperties(OWN_DESCRIPTORS, filter);
   }
@@ -15768,7 +15783,7 @@
   // Make sure we preserve dictionary representation if there are too many
   // descriptors.
   int number_of_elements = NumberOfElements();
-  if (number_of_elements > DescriptorArray::kMaxNumberOfDescriptors) return obj;
+  if (number_of_elements > kMaxNumberOfDescriptors) return obj;
 
   if (number_of_elements != NextEnumerationIndex()) {
     MaybeObject* maybe_result = GenerateNewEnumerationIndices();
diff --git a/src/objects.h b/src/objects.h
index 25c2210..c720c44 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -3370,10 +3370,6 @@
   bool IsEqualTo(DescriptorArray* other);
 #endif
 
-  // The maximum number of descriptors we want in a descriptor array (should
-  // fit in a page).
-  static const int kMaxNumberOfDescriptors = 1024 + 512;
-
   // Returns the fixed array length required to hold number_of_descriptors
   // descriptors.
   static int LengthFor(int number_of_descriptors) {
@@ -5242,6 +5238,7 @@
   // Find the first map in an IC stub.
   Map* FindFirstMap();
   void FindAllMaps(MapHandleList* maps);
+  void FindAllTypes(TypeHandleList* types);
   void ReplaceFirstMap(Map* replace);
 
   // Find the first handler in an IC stub.
@@ -5709,17 +5706,20 @@
   inline uint32_t bit_field3();
   inline void set_bit_field3(uint32_t bits);
 
-  class EnumLengthBits:             public BitField<int,   0, 11> {};
-  class NumberOfOwnDescriptorsBits: public BitField<int,  11, 11> {};
-  class IsShared:                   public BitField<bool, 22,  1> {};
-  class FunctionWithPrototype:      public BitField<bool, 23,  1> {};
-  class DictionaryMap:              public BitField<bool, 24,  1> {};
-  class OwnsDescriptors:            public BitField<bool, 25,  1> {};
-  class HasInstanceCallHandler:     public BitField<bool, 26,  1> {};
-  class Deprecated:                 public BitField<bool, 27,  1> {};
-  class IsFrozen:                   public BitField<bool, 28,  1> {};
-  class IsUnstable:                 public BitField<bool, 29,  1> {};
-  class IsMigrationTarget:          public BitField<bool, 30,  1> {};
+  class EnumLengthBits:             public BitField<int,
+      0, kDescriptorIndexBitCount> {};  // NOLINT
+  class NumberOfOwnDescriptorsBits: public BitField<int,
+      kDescriptorIndexBitCount, kDescriptorIndexBitCount> {};  // NOLINT
+  STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
+  class IsShared:                   public BitField<bool, 20,  1> {};
+  class FunctionWithPrototype:      public BitField<bool, 21,  1> {};
+  class DictionaryMap:              public BitField<bool, 22,  1> {};
+  class OwnsDescriptors:            public BitField<bool, 23,  1> {};
+  class HasInstanceCallHandler:     public BitField<bool, 24,  1> {};
+  class Deprecated:                 public BitField<bool, 25,  1> {};
+  class IsFrozen:                   public BitField<bool, 26,  1> {};
+  class IsUnstable:                 public BitField<bool, 27,  1> {};
+  class IsMigrationTarget:          public BitField<bool, 28,  1> {};
 
   // Tells whether the object in the prototype property will be used
   // for instances created from this function.  If the prototype
@@ -6034,7 +6034,7 @@
   }
 
   void SetEnumLength(int length) {
-    if (length != kInvalidEnumCache) {
+    if (length != kInvalidEnumCacheSentinel) {
       ASSERT(length >= 0);
       ASSERT(length == 0 || instance_descriptors()->HasEnumCache());
       ASSERT(length <= NumberOfOwnDescriptors());
@@ -6252,9 +6252,6 @@
 
   static const int kMaxPreAllocatedPropertyFields = 255;
 
-  // Constant for denoting that the enum cache is not yet initialized.
-  static const int kInvalidEnumCache = EnumLengthBits::kMax;
-
   // Layout description.
   static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
   static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
@@ -8192,6 +8189,12 @@
   static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize;
   static const int kSize = kWeakNextOffset + kPointerSize;
 
+  // During mark compact we need to take special care for the dependent code
+  // field.
+  static const int kPointerFieldsBeginOffset = kTransitionInfoOffset;
+  static const int kPointerFieldsEndOffset = kDependentCodeOffset;
+
+  // For other visitors, use the fixed body descriptor below.
   typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
                               kDependentCodeOffset + kPointerSize,
                               kSize> BodyDescriptor;
diff --git a/src/property-details.h b/src/property-details.h
index 92e4f81..36f1406 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -194,6 +194,15 @@
 };
 
 
+static const int kDescriptorIndexBitCount = 10;
+// The maximum number of descriptors we want in a descriptor array (should
+// fit in a page).
+static const int kMaxNumberOfDescriptors =
+    (1 << kDescriptorIndexBitCount) - 2;
+static const int kInvalidEnumCacheSentinel =
+    (1 << kDescriptorIndexBitCount) - 1;
+
+
 // PropertyDetails captures type and attributes for a property.
 // They are used both in property dictionaries and instance descriptors.
 class PropertyDetails BASE_EMBEDDED {
@@ -284,9 +293,14 @@
   class DictionaryStorageField:   public BitField<uint32_t,           7, 24> {};
 
   // Bit fields for fast objects.
-  class DescriptorPointer:        public BitField<uint32_t,           6, 11> {};
-  class RepresentationField:      public BitField<uint32_t,          17,  4> {};
-  class FieldIndexField:          public BitField<uint32_t,          21, 10> {};
+  class RepresentationField:      public BitField<uint32_t,           6,  4> {};
+  class DescriptorPointer:        public BitField<uint32_t, 10,
+      kDescriptorIndexBitCount> {};  // NOLINT
+  class FieldIndexField:          public BitField<uint32_t,
+      10 + kDescriptorIndexBitCount,
+      kDescriptorIndexBitCount> {};  // NOLINT
+  // All bits for fast objects must fix in a smi.
+  STATIC_ASSERT(10 + kDescriptorIndexBitCount + kDescriptorIndexBitCount <= 31);
 
   static const int kInitialIndex = 1;
 
diff --git a/src/runtime.cc b/src/runtime.cc
index 28506dd..449d8a3 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1212,7 +1212,10 @@
     Handle<Object> byte_offset_obj,
     bool is_little_endian,
     T* result) {
-  size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+  size_t byte_offset = 0;
+  if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
+    return false;
+  }
   Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
 
   size_t data_view_byte_offset =
@@ -1253,7 +1256,10 @@
     Handle<Object> byte_offset_obj,
     bool is_little_endian,
     T data) {
-  size_t byte_offset = NumberToSize(isolate, *byte_offset_obj);
+  size_t byte_offset = 0;
+  if (!TryNumberToSize(isolate, *byte_offset_obj, &byte_offset)) {
+    return false;
+  }
   Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(data_view->buffer()));
 
   size_t data_view_byte_offset =
diff --git a/src/runtime.js b/src/runtime.js
index 19b858b..35bc07a 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -665,7 +665,7 @@
 
 function ToPositiveInteger(x, rangeErrorName) {
   var i = TO_INTEGER(x);
-  if (i < 0) throw %MakeRangeError(rangeErrorName);
+  if (i < 0) throw MakeRangeError(rangeErrorName);
   return i;
 }
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index af1f3a1..f9918ed 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -134,37 +134,40 @@
 
 
 Handle<Code> StubCache::ComputeMonomorphicIC(Handle<Name> name,
-                                             Handle<Object> object,
+                                             Handle<Type> type,
                                              Handle<Code> handler,
                                              StrictModeFlag strict_mode) {
   Code::Kind kind = handler->handler_kind();
-  // Use the same cache holder for the IC as for the handler.
-  InlineCacheHolderFlag cache_holder =
-      Code::ExtractCacheHolderFromFlags(handler->flags());
-  Handle<HeapObject> stub_holder(IC::GetCodeCacheHolder(
-      isolate(), *object, cache_holder));
-  Handle<Map> stub_holder_map(stub_holder->map());
-  Handle<Code> ic = FindIC(
-      name, stub_holder_map, kind, strict_mode, cache_holder);
-  if (!ic.is_null()) return ic;
+  InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
 
-  Handle<Map> map(object->GetMarkerMap(isolate()));
+  Handle<Map> stub_holder;
+  Handle<Code> ic;
+  // There are multiple string maps that all use the same prototype. That
+  // prototype cannot hold multiple handlers, one for each of the string maps,
+  // for a single name. Hence, turn off caching of the IC.
+  bool can_be_cached = !type->Is(Type::String());
+  if (can_be_cached) {
+    stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate());
+    ic = FindIC(name, stub_holder, kind, strict_mode, flag);
+    if (!ic.is_null()) return ic;
+  }
+
   if (kind == Code::LOAD_IC) {
-    LoadStubCompiler ic_compiler(isolate(), cache_holder);
-    ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+    LoadStubCompiler ic_compiler(isolate(), flag);
+    ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
   } else if (kind == Code::KEYED_LOAD_IC) {
-    KeyedLoadStubCompiler ic_compiler(isolate(), cache_holder);
-    ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+    KeyedLoadStubCompiler ic_compiler(isolate(), flag);
+    ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
   } else if (kind == Code::STORE_IC) {
     StoreStubCompiler ic_compiler(isolate(), strict_mode);
-    ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+    ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
   } else {
     ASSERT(kind == Code::KEYED_STORE_IC);
     KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE);
-    ic = ic_compiler.CompileMonomorphicIC(map, handler, name);
+    ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
   }
 
-  HeapObject::UpdateMapCodeCache(stub_holder, name, ic);
+  if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
   return ic;
 }
 
@@ -584,6 +587,7 @@
 }
 
 
+// TODO(verwaest): Change this method so it takes in a TypeHandleList.
 Handle<Code> StubCache::ComputeLoadElementPolymorphic(
     MapHandleList* receiver_maps) {
   Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
@@ -592,12 +596,15 @@
   Handle<Object> probe = cache->Lookup(receiver_maps, flags);
   if (probe->IsCode()) return Handle<Code>::cast(probe);
 
+  TypeHandleList types(receiver_maps->length());
+  for (int i = 0; i < receiver_maps->length(); i++) {
+    types.Add(handle(Type::Class(receiver_maps->at(i)), isolate()));
+  }
   CodeHandleList handlers(receiver_maps->length());
   KeyedLoadStubCompiler compiler(isolate_);
   compiler.CompileElementHandlers(receiver_maps, &handlers);
   Handle<Code> code = compiler.CompilePolymorphicIC(
-      receiver_maps, &handlers, factory()->empty_string(),
-      Code::NORMAL, ELEMENT);
+      &types, &handlers, factory()->empty_string(), Code::NORMAL, ELEMENT);
 
   isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
 
@@ -606,24 +613,24 @@
 }
 
 
-Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
+Handle<Code> StubCache::ComputePolymorphicIC(TypeHandleList* types,
                                              CodeHandleList* handlers,
-                                             int number_of_valid_maps,
+                                             int number_of_valid_types,
                                              Handle<Name> name,
                                              StrictModeFlag strict_mode) {
   Handle<Code> handler = handlers->at(0);
   Code::Kind kind = handler->handler_kind();
-  Code::StubType type = number_of_valid_maps == 1 ? handler->type()
-                                                  : Code::NORMAL;
+  Code::StubType type = number_of_valid_types == 1 ? handler->type()
+                                                   : Code::NORMAL;
   if (kind == Code::LOAD_IC) {
     LoadStubCompiler ic_compiler(isolate_);
     return ic_compiler.CompilePolymorphicIC(
-        receiver_maps, handlers, name, type, PROPERTY);
+        types, handlers, name, type, PROPERTY);
   } else {
     ASSERT(kind == Code::STORE_IC);
     StoreStubCompiler ic_compiler(isolate_, strict_mode);
     return ic_compiler.CompilePolymorphicIC(
-        receiver_maps, handlers, name, type, PROPERTY);
+        types, handlers, name, type, PROPERTY);
   }
 }
 
@@ -1181,12 +1188,9 @@
 }
 
 
-bool BaseLoadStoreStubCompiler::HasHeapNumberMap(MapHandleList* receiver_maps) {
-  for (int i = 0; i < receiver_maps->length(); ++i) {
-    Handle<Map> map = receiver_maps->at(i);
-    if (map.is_identical_to(isolate()->factory()->heap_number_map())) {
-      return true;
-    }
+bool BaseLoadStoreStubCompiler::IncludesNumberType(TypeHandleList* types) {
+  for (int i = 0; i < types->length(); ++i) {
+    if (types->at(i)->Is(Type::Number())) return true;
   }
   return false;
 }
@@ -1353,15 +1357,15 @@
 
 
 Handle<Code> BaseLoadStoreStubCompiler::CompileMonomorphicIC(
-    Handle<Map> receiver_map,
+    Handle<Type> type,
     Handle<Code> handler,
     Handle<Name> name) {
-  MapHandleList receiver_maps(1);
-  receiver_maps.Add(receiver_map);
+  TypeHandleList types(1);
   CodeHandleList handlers(1);
+  types.Add(type);
   handlers.Add(handler);
-  Code::StubType type = handler->type();
-  return CompilePolymorphicIC(&receiver_maps, &handlers, name, type, PROPERTY);
+  Code::StubType stub_type = handler->type();
+  return CompilePolymorphicIC(&types, &handlers, name, stub_type, PROPERTY);
 }
 
 
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 1daffff..adfa44a 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -93,7 +93,7 @@
                            StrictModeFlag strict_mode = kNonStrictMode);
 
   Handle<Code> ComputeMonomorphicIC(Handle<Name> name,
-                                    Handle<Object> receiver,
+                                    Handle<Type> type,
                                     Handle<Code> handler,
                                     StrictModeFlag strict_mode);
 
@@ -173,7 +173,7 @@
                                               KeyedAccessStoreMode store_mode,
                                               StrictModeFlag strict_mode);
 
-  Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
+  Handle<Code> ComputePolymorphicIC(TypeHandleList* types,
                                     CodeHandleList* handlers,
                                     int number_of_valid_maps,
                                     Handle<Name> name,
@@ -532,11 +532,11 @@
   }
   virtual ~BaseLoadStoreStubCompiler() { }
 
-  Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map,
+  Handle<Code> CompileMonomorphicIC(Handle<Type> type,
                                     Handle<Code> handler,
                                     Handle<Name> name);
 
-  Handle<Code> CompilePolymorphicIC(MapHandleList* receiver_maps,
+  Handle<Code> CompilePolymorphicIC(TypeHandleList* types,
                                     CodeHandleList* handlers,
                                     Handle<Name> name,
                                     Code::StubType type,
@@ -608,7 +608,7 @@
 
   void InitializeRegisters();
 
-  bool HasHeapNumberMap(MapHandleList* receiver_maps);
+  bool IncludesNumberType(TypeHandleList* types);
 
   Code::Kind kind_;
   InlineCacheHolderFlag cache_holder_;
diff --git a/src/typedarray.js b/src/typedarray.js
index ca87f8b..fc3a608 100644
--- a/src/typedarray.js
+++ b/src/typedarray.js
@@ -309,7 +309,7 @@
     if (!IS_ARRAYBUFFER(buffer)) {
       throw MakeTypeError('data_view_not_array_buffer', []);
     }
-    var bufferByteLength = %ArrayBufferGetByteLength(buffer);
+    var bufferByteLength = buffer.byteLength;
     var offset = IS_UNDEFINED(byteOffset) ?
       0 : ToPositiveInteger(byteOffset, 'invalid_data_view_offset');
     if (offset > bufferByteLength) {
diff --git a/src/v8conversions.h b/src/v8conversions.h
index d3da9f8..68107de 100644
--- a/src/v8conversions.h
+++ b/src/v8conversions.h
@@ -55,26 +55,41 @@
 // Converts a string into an integer.
 double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
 
-// Converts a number into size_t.
-inline size_t NumberToSize(Isolate* isolate,
-                           Object* number) {
+inline bool TryNumberToSize(Isolate* isolate,
+                            Object* number, size_t* result) {
   SealHandleScope shs(isolate);
   if (number->IsSmi()) {
     int value = Smi::cast(number)->value();
-    CHECK_GE(value, 0);
     ASSERT(
-      static_cast<unsigned>(Smi::kMaxValue)
-        <= std::numeric_limits<size_t>::max());
-    return static_cast<size_t>(value);
+        static_cast<unsigned>(Smi::kMaxValue)
+          <= std::numeric_limits<size_t>::max());
+    if (value >= 0) {
+      *result = static_cast<size_t>(value);
+      return true;
+    }
+    return false;
   } else {
     ASSERT(number->IsHeapNumber());
     double value = HeapNumber::cast(number)->value();
-    CHECK(value >= 0 &&
-          value <= std::numeric_limits<size_t>::max());
-    return static_cast<size_t>(value);
+    if (value >= 0 &&
+        value <= std::numeric_limits<size_t>::max()) {
+      *result = static_cast<size_t>(value);
+      return true;
+    } else {
+      return false;
+    }
   }
 }
 
+// Converts a number into size_t.
+inline size_t NumberToSize(Isolate* isolate,
+                           Object* number) {
+  size_t result = 0;
+  bool is_valid = TryNumberToSize(isolate, number, &result);
+  CHECK(is_valid);
+  return result;
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_V8CONVERSIONS_H_
diff --git a/src/version.cc b/src/version.cc
index daa5e63..b48219d 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     23
-#define BUILD_NUMBER      6
+#define BUILD_NUMBER      7
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index a1b2a0e..49bd78d 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -76,7 +76,7 @@
   ASSERT(cpu.has_sse2());
   probed_features |= static_cast<uint64_t>(1) << SSE2;
 
-  // CMOD must be available on every x64 CPU.
+  // CMOV must be available on every x64 CPU.
   ASSERT(cpu.has_cmov());
   probed_features |= static_cast<uint64_t>(1) << CMOV;
 
@@ -2496,6 +2496,15 @@
 }
 
 
+void Assembler::andps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x54);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::orps(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(dst, src);
@@ -2505,6 +2514,15 @@
 }
 
 
+void Assembler::orps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x56);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(dst, src);
@@ -2514,6 +2532,87 @@
 }
 
 
+void Assembler::xorps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x57);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x58);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x58);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5C);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5C);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x59);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x59);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divps(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divps(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5E);
+  emit_sse_operand(dst, src);
+}
+
+
 // SSE 2 operations.
 
 void Assembler::movd(XMMRegister dst, Register src) {
@@ -2676,6 +2775,17 @@
 }
 
 
+void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
+  ASSERT(is_uint8(imm8));
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(src, dst);
+  emit(0x0F);
+  emit(0xC6);
+  emit_sse_operand(dst, src);
+  emit(imm8);
+}
+
+
 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   if (src.low_bits() == 4) {
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 1423b77..ac69a24 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -1351,14 +1351,27 @@
   void movaps(XMMRegister dst, XMMRegister src);
   void movss(XMMRegister dst, const Operand& src);
   void movss(const Operand& dst, XMMRegister src);
+  void shufps(XMMRegister dst, XMMRegister src, byte imm8);
 
   void cvttss2si(Register dst, const Operand& src);
   void cvttss2si(Register dst, XMMRegister src);
   void cvtlsi2ss(XMMRegister dst, Register src);
 
   void andps(XMMRegister dst, XMMRegister src);
+  void andps(XMMRegister dst, const Operand& src);
   void orps(XMMRegister dst, XMMRegister src);
+  void orps(XMMRegister dst, const Operand& src);
   void xorps(XMMRegister dst, XMMRegister src);
+  void xorps(XMMRegister dst, const Operand& src);
+
+  void addps(XMMRegister dst, XMMRegister src);
+  void addps(XMMRegister dst, const Operand& src);
+  void subps(XMMRegister dst, XMMRegister src);
+  void subps(XMMRegister dst, const Operand& src);
+  void mulps(XMMRegister dst, XMMRegister src);
+  void mulps(XMMRegister dst, const Operand& src);
+  void divps(XMMRegister dst, XMMRegister src);
+  void divps(XMMRegister dst, const Operand& src);
 
   void movmskps(Register dst, XMMRegister src);
 
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 667561b..76b541c 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1260,26 +1260,37 @@
     byte_size_operand_ = idesc.byte_size_operation;
     current += PrintOperands(idesc.mnem, idesc.op_order_, current);
 
-  } else if (opcode == 0x54) {
-    // andps xmm, xmm/m128
+  } else if (opcode >= 0x53 && opcode <= 0x5F) {
+    const char* const pseudo_op[] = {
+      "rcpps",
+      "andps",
+      "andnps",
+      "orps",
+      "xorps",
+      "addps",
+      "mulps",
+      "cvtps2pd",
+      "cvtdq2ps",
+      "subps",
+      "minps",
+      "divps",
+      "maxps",
+    };
     int mod, regop, rm;
     get_modrm(*current, &mod, &regop, &rm);
-    AppendToBuffer("andps %s,", NameOfXMMRegister(regop));
+    AppendToBuffer("%s %s,",
+                   pseudo_op[opcode - 0x53],
+                   NameOfXMMRegister(regop));
     current += PrintRightXMMOperand(current);
 
-  } else if (opcode == 0x56) {
-    // orps xmm, xmm/m128
+  } else if (opcode == 0xC6) {
+    // shufps xmm, xmm/m128, imm8
     int mod, regop, rm;
     get_modrm(*current, &mod, &regop, &rm);
-    AppendToBuffer("orps %s,", NameOfXMMRegister(regop));
+    AppendToBuffer("shufps %s, ", NameOfXMMRegister(regop));
     current += PrintRightXMMOperand(current);
-
-  } else if (opcode == 0x57) {
-    // xorps xmm, xmm/m128
-    int mod, regop, rm;
-    get_modrm(*current, &mod, &regop, &rm);
-    AppendToBuffer("xorps %s,", NameOfXMMRegister(regop));
-    current += PrintRightXMMOperand(current);
+    AppendToBuffer(", %d", (*current) & 3);
+    current += 1;
 
   } else if (opcode == 0x50) {
     // movmskps reg, xmm
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index fe8734c..667a523 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -540,7 +540,7 @@
   char_at_generator.GenerateSlow(masm, call_helper);
 
   __ bind(&miss);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -583,7 +583,7 @@
       1);
 
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -1271,7 +1271,7 @@
   __ movq(rax, unmapped_location);
   __ Ret();
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -1310,7 +1310,7 @@
                  INLINE_SMI_CHECK);
   __ Ret();
   __ bind(&slow);
-  GenerateMiss(masm, MISS);
+  GenerateMiss(masm);
 }
 
 
@@ -1423,7 +1423,7 @@
 }
 
 
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
@@ -1439,10 +1439,8 @@
   __ PushReturnAddressFrom(rbx);
 
   // Perform tail call to the entry.
-  ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
-      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
-                          masm->isolate())
-      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 2, 1);
 }
 
@@ -1612,7 +1610,7 @@
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rax    : value
   //  -- rcx    : key
@@ -1627,10 +1625,8 @@
   __ PushReturnAddressFrom(rbx);
 
   // Do tail-call to runtime routine.
-  ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
-    ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
-                        masm->isolate())
-    : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
   __ TailCallExternalReference(ref, 3, 1);
 }
 
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index c45f91e..cafde51 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -5367,44 +5367,49 @@
 
 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   Register input = ToRegister(instr->value());
-
-  Condition final_branch_condition =
-      EmitTypeofIs(instr->TrueLabel(chunk_),
-          instr->FalseLabel(chunk_), input, instr->type_literal());
+  Condition final_branch_condition = EmitTypeofIs(instr, input);
   if (final_branch_condition != no_condition) {
     EmitBranch(instr, final_branch_condition);
   }
 }
 
 
-Condition LCodeGen::EmitTypeofIs(Label* true_label,
-                                 Label* false_label,
-                                 Register input,
-                                 Handle<String> type_name) {
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+  Label* true_label = instr->TrueLabel(chunk_);
+  Label* false_label = instr->FalseLabel(chunk_);
+  Handle<String> type_name = instr->type_literal();
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+  int next_block = GetNextEmittedBlock();
+
+  Label::Distance true_distance = left_block == next_block ? Label::kNear
+                                                           : Label::kFar;
+  Label::Distance false_distance = right_block == next_block ? Label::kNear
+                                                             : Label::kFar;
   Condition final_branch_condition = no_condition;
   if (type_name->Equals(heap()->number_string())) {
-    __ JumpIfSmi(input, true_label);
+    __ JumpIfSmi(input, true_label, true_distance);
     __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
                    Heap::kHeapNumberMapRootIndex);
 
     final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->string_string())) {
-    __ JumpIfSmi(input, false_label);
+    __ JumpIfSmi(input, false_label, false_distance);
     __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
-    __ j(above_equal, false_label);
+    __ j(above_equal, false_label, false_distance);
     __ testb(FieldOperand(input, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
     final_branch_condition = zero;
 
   } else if (type_name->Equals(heap()->symbol_string())) {
-    __ JumpIfSmi(input, false_label);
+    __ JumpIfSmi(input, false_label, false_distance);
     __ CmpObjectType(input, SYMBOL_TYPE, input);
     final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->boolean_string())) {
     __ CompareRoot(input, Heap::kTrueValueRootIndex);
-    __ j(equal, true_label);
+    __ j(equal, true_label, true_distance);
     __ CompareRoot(input, Heap::kFalseValueRootIndex);
     final_branch_condition = equal;
 
@@ -5414,8 +5419,8 @@
 
   } else if (type_name->Equals(heap()->undefined_string())) {
     __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
-    __ j(equal, true_label);
-    __ JumpIfSmi(input, false_label);
+    __ j(equal, true_label, true_distance);
+    __ JumpIfSmi(input, false_label, false_distance);
     // Check for undetectable objects => true.
     __ movq(input, FieldOperand(input, HeapObject::kMapOffset));
     __ testb(FieldOperand(input, Map::kBitFieldOffset),
@@ -5424,29 +5429,29 @@
 
   } else if (type_name->Equals(heap()->function_string())) {
     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    __ JumpIfSmi(input, false_label);
+    __ JumpIfSmi(input, false_label, false_distance);
     __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
-    __ j(equal, true_label);
+    __ j(equal, true_label, true_distance);
     __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
     final_branch_condition = equal;
 
   } else if (type_name->Equals(heap()->object_string())) {
-    __ JumpIfSmi(input, false_label);
+    __ JumpIfSmi(input, false_label, false_distance);
     if (!FLAG_harmony_typeof) {
       __ CompareRoot(input, Heap::kNullValueRootIndex);
-      __ j(equal, true_label);
+      __ j(equal, true_label, true_distance);
     }
     __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
-    __ j(below, false_label);
+    __ j(below, false_label, false_distance);
     __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-    __ j(above, false_label);
+    __ j(above, false_label, false_distance);
     // Check for undetectable objects => false.
     __ testb(FieldOperand(input, Map::kBitFieldOffset),
              Immediate(1 << Map::kIsUndetectable));
     final_branch_condition = zero;
 
   } else {
-    __ jmp(false_label);
+    __ jmp(false_label, false_distance);
   }
 
   return final_branch_condition;
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index ac238d0..51cfe08 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -287,10 +287,7 @@
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
-  Condition EmitTypeofIs(Label* true_label,
-                         Label* false_label,
-                         Register input,
-                         Handle<String> type_name);
+  Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
 
   // Emits optimized code for %_IsObject(x).  Preserves input register.
   // Returns the condition on which a final split to
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index c4cb642..b911c48 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -2504,6 +2504,9 @@
 
 
 LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  LInstruction* goto_instr = CheckElideControlInstruction(instr);
+  if (goto_instr != NULL) return goto_instr;
+
   return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
 }
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index fd857d3..5127ddf 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -4915,7 +4915,7 @@
   movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
 
   EnumLength(rdx, rbx);
-  Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
+  Cmp(rdx, Smi::FromInt(kInvalidEnumCacheSentinel));
   j(equal, call_runtime);
 
   jmp(&start);
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index edfb47b..a1e8f21 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -3068,7 +3068,7 @@
 
 
 Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
-    MapHandleList* receiver_maps,
+    TypeHandleList* types,
     CodeHandleList* handlers,
     Handle<Name> name,
     Code::StubType type,
@@ -3080,21 +3080,21 @@
   }
 
   Label number_case;
-  Label* smi_target = HasHeapNumberMap(receiver_maps) ? &number_case : &miss;
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
   __ JumpIfSmi(receiver(), smi_target);
 
   Register map_reg = scratch1();
   __ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
-  int receiver_count = receiver_maps->length();
+  int receiver_count = types->length();
   int number_of_handled_maps = 0;
-  Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
   for (int current = 0; current < receiver_count; ++current) {
-    Handle<Map> map = receiver_maps->at(current);
+    Handle<Type> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
     if (!map->is_deprecated()) {
       number_of_handled_maps++;
       // Check map and tail call if there's a match
-      __ Cmp(map_reg, receiver_maps->at(current));
-      if (map.is_identical_to(heap_number_map)) {
+      __ Cmp(map_reg, map);
+      if (type->Is(Type::Number())) {
         ASSERT(!number_case.is_unused());
         __ bind(&number_case);
       }
@@ -3124,12 +3124,12 @@
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  Label slow, miss_force_generic;
+  Label slow, miss;
 
   // This stub is meant to be tail-jumped to, the receiver must already
   // have been verified by the caller to not be a smi.
 
-  __ JumpIfNotSmi(rax, &miss_force_generic);
+  __ JumpIfNotSmi(rax, &miss);
   __ SmiToInteger32(rbx, rax);
   __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
 
@@ -3149,13 +3149,13 @@
   // -----------------------------------
   TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
 
-  __ bind(&miss_force_generic);
+  __ bind(&miss);
   // ----------- S t a t e -------------
   //  -- rax    : key
   //  -- rdx    : receiver
   //  -- rsp[0] : return address
   // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
+  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
 }
 
 
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index d401568..ca788a9 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -599,4 +599,44 @@
 }
 
 
+typedef int (*F8)(float x, float y);
+TEST(AssemblerIa32SSE) {
+  CcTest::InitializeVM();
+  if (!CpuFeatures::IsSupported(SSE2)) return;
+
+  Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+  HandleScope scope(isolate);
+  v8::internal::byte buffer[256];
+  MacroAssembler assm(isolate, buffer, sizeof buffer);
+  {
+    CpuFeatureScope fscope(&assm, SSE2);
+    __ movss(xmm0, Operand(esp, kPointerSize));
+    __ movss(xmm1, Operand(esp, 2 * kPointerSize));
+    __ shufps(xmm0, xmm0, 0x0);
+    __ shufps(xmm1, xmm1, 0x0);
+    __ movaps(xmm2, xmm1);
+    __ addps(xmm2, xmm0);
+    __ mulps(xmm2, xmm1);
+    __ subps(xmm2, xmm0);
+    __ divps(xmm2, xmm1);
+    __ cvttss2si(eax, xmm2);
+    __ ret(0);
+  }
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Code* code = Code::cast(isolate->heap()->CreateCode(
+      desc,
+      Code::ComputeFlags(Code::STUB),
+      Handle<Code>())->ToObjectChecked());
+  CHECK(code->IsCode());
+#ifdef OBJECT_PRINT
+  Code::cast(code)->Print();
+#endif
+
+  F8 f = FUNCTION_CAST<F8>(Code::cast(code)->entry());
+  CHECK_EQ(2, f(1.0, 2.0));
+}
+
+
 #undef __
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index cd1ed28..4372d8a 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -676,4 +676,38 @@
 }
 
 
+typedef int (*F6)(float x, float y);
+TEST(AssemblerX64SSE) {
+  CcTest::InitializeVM();
+
+  Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+  HandleScope scope(isolate);
+  v8::internal::byte buffer[256];
+  MacroAssembler assm(isolate, buffer, sizeof buffer);
+  {
+    __ shufps(xmm0, xmm0, 0x0);  // brocast first argument
+    __ shufps(xmm1, xmm1, 0x0);  // brocast second argument
+    __ movaps(xmm2, xmm1);
+    __ addps(xmm2, xmm0);
+    __ mulps(xmm2, xmm1);
+    __ subps(xmm2, xmm0);
+    __ divps(xmm2, xmm1);
+    __ cvttss2si(rax, xmm2);
+    __ ret(0);
+  }
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Code* code = Code::cast(isolate->heap()->CreateCode(
+      desc,
+      Code::ComputeFlags(Code::STUB),
+      Handle<Code>())->ToObjectChecked());
+  CHECK(code->IsCode());
+#ifdef OBJECT_PRINT
+  Code::cast(code)->Print();
+#endif
+
+  F6 f = FUNCTION_CAST<F6>(Code::cast(code)->entry());
+  CHECK_EQ(2, f(1.0, 2.0));
+}
 #undef __
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 7f9a633..2b4c82f 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -349,6 +349,34 @@
   __ fcompp();
   __ fwait();
   __ nop();
+
+  // SSE instruction
+  {
+    if (CpuFeatures::IsSupported(SSE2)) {
+      CpuFeatureScope fscope(&assm, SSE2);
+      // Move operation
+      __ movaps(xmm0, xmm1);
+      __ shufps(xmm0, xmm0, 0x0);
+
+      // logic operation
+      __ andps(xmm0, xmm1);
+      __ andps(xmm0, Operand(ebx, ecx, times_4, 10000));
+      __ orps(xmm0, xmm1);
+      __ orps(xmm0, Operand(ebx, ecx, times_4, 10000));
+      __ xorps(xmm0, xmm1);
+      __ xorps(xmm0, Operand(ebx, ecx, times_4, 10000));
+
+      // Arithmetic operation
+      __ addps(xmm1, xmm0);
+      __ addps(xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ subps(xmm1, xmm0);
+      __ subps(xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ mulps(xmm1, xmm0);
+      __ mulps(xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ divps(xmm1, xmm0);
+      __ divps(xmm1, Operand(ebx, ecx, times_4, 10000));
+    }
+  }
   {
     if (CpuFeatures::IsSupported(SSE2)) {
       CpuFeatureScope fscope(&assm, SSE2);
@@ -356,7 +384,6 @@
       __ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
       __ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
       __ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
-      __ movaps(xmm0, xmm1);
       // 128 bit move instructions.
       __ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
       __ movdqa(Operand(ebx, ecx, times_4, 10000), xmm0);
@@ -370,8 +397,6 @@
       __ ucomisd(xmm0, xmm1);
       __ cmpltsd(xmm0, xmm1);
 
-      __ andps(xmm0, xmm1);
-      __ orps(xmm0, xmm1);
       __ andpd(xmm0, xmm1);
       __ psllq(xmm0, 17);
       __ psllq(xmm0, xmm1);
diff --git a/test/cctest/test-disasm-x64.cc b/test/cctest/test-disasm-x64.cc
index 9fe73de..8806764 100644
--- a/test/cctest/test-disasm-x64.cc
+++ b/test/cctest/test-disasm-x64.cc
@@ -90,11 +90,7 @@
   __ or_(rdx, Immediate(3));
   __ xor_(rdx, Immediate(3));
   __ nop();
-  {
-    CHECK(CpuFeatures::IsSupported(CPUID));
-    CpuFeatures::Scope fscope(CPUID);
-    __ cpuid();
-  }
+  __ cpuid();
   __ movsxbq(rdx, Operand(rcx, 0));
   __ movsxwq(rdx, Operand(rcx, 0));
   __ movzxbl(rdx, Operand(rcx, 0));
@@ -338,13 +334,28 @@
 
   // SSE instruction
   {
+    // Move operation
     __ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
     __ cvttss2si(rdx, xmm1);
     __ movaps(xmm0, xmm1);
 
+    // logic operation
     __ andps(xmm0, xmm1);
+    __ andps(xmm0, Operand(rbx, rcx, times_4, 10000));
     __ orps(xmm0, xmm1);
+    __ ordps(xmm0, Operand(rbx, rcx, times_4, 10000));
     __ xorps(xmm0, xmm1);
+    __ xordps(xmm0, Operand(rbx, rcx, times_4, 10000));
+
+    // Arithmetic operation
+    __ addps(xmm1, xmm0);
+    __ addps(xmm1, Operand(rbx, rcx, times_4, 10000));
+    __ subps(xmm1, xmm0);
+    __ subps(xmm1, Operand(rbx, rcx, times_4, 10000));
+    __ mulps(xmm1, xmm0);
+    __ mulps(xmm1, Operand(rbx, ecx, times_4, 10000));
+    __ divps(xmm1, xmm0);
+    __ divps(xmm1, Operand(rbx, rcx, times_4, 10000));
   }
   // SSE 2 instructions
   {
diff --git a/test/mjsunit/fuzz-natives-part1.js b/test/mjsunit/fuzz-natives-part1.js
index e76b9be..ffcbd1d 100644
--- a/test/mjsunit/fuzz-natives-part1.js
+++ b/test/mjsunit/fuzz-natives-part1.js
@@ -208,6 +208,7 @@
   "_GeneratorThrow": true,
 
   // Only applicable to DataViews.
+  "DataViewInitialize": true,
   "DataViewGetBuffer": true,
   "DataViewGetByteLength": true,
   "DataViewGetByteOffset": true
diff --git a/test/mjsunit/fuzz-natives-part2.js b/test/mjsunit/fuzz-natives-part2.js
index 0797deb..f796b80 100644
--- a/test/mjsunit/fuzz-natives-part2.js
+++ b/test/mjsunit/fuzz-natives-part2.js
@@ -209,6 +209,7 @@
   "_GeneratorThrow": true,
 
   // Only applicable to DataViews.
+  "DataViewInitialize": true,
   "DataViewGetBuffer": true,
   "DataViewGetByteLength": true,
   "DataViewGetByteOffset": true
diff --git a/test/mjsunit/fuzz-natives-part3.js b/test/mjsunit/fuzz-natives-part3.js
index 9a3a883..61e0211 100644
--- a/test/mjsunit/fuzz-natives-part3.js
+++ b/test/mjsunit/fuzz-natives-part3.js
@@ -208,6 +208,7 @@
   "_GeneratorThrow": true,
 
   // Only applicable to DataViews.
+  "DataViewInitialize":true,
   "DataViewGetBuffer": true,
   "DataViewGetByteLength": true,
   "DataViewGetByteOffset": true
diff --git a/test/mjsunit/fuzz-natives-part4.js b/test/mjsunit/fuzz-natives-part4.js
index 83e00d2..831722b 100644
--- a/test/mjsunit/fuzz-natives-part4.js
+++ b/test/mjsunit/fuzz-natives-part4.js
@@ -208,6 +208,7 @@
   "_GeneratorThrow": true,
 
   // Only applicable to DataViews.
+  "DataViewInitialize": true,
   "DataViewGetBuffer": true,
   "DataViewGetByteLength": true,
   "DataViewGetByteOffset": true
diff --git a/test/mjsunit/harmony/dataview-accessors.js b/test/mjsunit/harmony/dataview-accessors.js
index 7b03da7..c54f8cc 100644
--- a/test/mjsunit/harmony/dataview-accessors.js
+++ b/test/mjsunit/harmony/dataview-accessors.js
@@ -114,11 +114,13 @@
   test(isTestingGet, "Int8", undefined, 0);
   test(isTestingGet, "Int8", 8, -128);
   test(isTestingGet, "Int8", 15, -1);
+  test(isTestingGet, "Int8", 1e12, undefined);
 
   test(isTestingGet, "Uint8", 0, 0);
   test(isTestingGet, "Uint8", undefined, 0);
   test(isTestingGet, "Uint8", 8, 128);
   test(isTestingGet, "Uint8", 15, 255);
+  test(isTestingGet, "Uint8", 1e12, undefined);
 
   // Little endian.
   test(isTestingGet, "Int16", 0, 256, true);
@@ -126,6 +128,7 @@
   test(isTestingGet, "Int16", 5, 26213, true);
   test(isTestingGet, "Int16", 9, -32127, true);
   test(isTestingGet, "Int16", 14, -2, true);
+  test(isTestingGet, "Int16", 1e12, undefined, true);
 
   // Big endian.
   test(isTestingGet, "Int16", 0, 1);
@@ -133,6 +136,7 @@
   test(isTestingGet, "Int16", 5, 25958);
   test(isTestingGet, "Int16", 9, -32382);
   test(isTestingGet, "Int16", 14, -257);
+  test(isTestingGet, "Int16", 1e12, undefined);
 
   // Little endian.
   test(isTestingGet, "Uint16", 0, 256, true);
@@ -140,6 +144,7 @@
   test(isTestingGet, "Uint16", 5, 26213, true);
   test(isTestingGet, "Uint16", 9, 33409, true);
   test(isTestingGet, "Uint16", 14, 65534, true);
+  test(isTestingGet, "Uint16", 1e12, undefined, true);
 
   // Big endian.
   test(isTestingGet, "Uint16", 0, 1);
@@ -147,6 +152,7 @@
   test(isTestingGet, "Uint16", 5, 25958);
   test(isTestingGet, "Uint16", 9, 33154);
   test(isTestingGet, "Uint16", 14, 65279);
+  test(isTestingGet, "Uint16", 1e12, undefined);
 
   // Little endian.
   test(isTestingGet, "Int32", 0, 50462976, true);
@@ -155,6 +161,7 @@
   test(isTestingGet, "Int32", 6, -2122291354, true);
   test(isTestingGet, "Int32", 9, -58490239, true);
   test(isTestingGet, "Int32", 12,-66052, true);
+  test(isTestingGet, "Int32", 1e12, undefined, true);
 
   // Big endian.
   test(isTestingGet, "Int32", 0, 66051);
@@ -163,6 +170,7 @@
   test(isTestingGet, "Int32", 6, 1718059137);
   test(isTestingGet, "Int32", 9, -2122152964);
   test(isTestingGet, "Int32", 12, -50462977);
+  test(isTestingGet, "Int32", 1e12, undefined);
 
   // Little endian.
   test(isTestingGet, "Uint32", 0, 50462976, true);
@@ -171,6 +179,7 @@
   test(isTestingGet, "Uint32", 6, 2172675942, true);
   test(isTestingGet, "Uint32", 9, 4236477057, true);
   test(isTestingGet, "Uint32", 12,4294901244, true);
+  test(isTestingGet, "Uint32", 1e12, undefined, true);
 
   // Big endian.
   test(isTestingGet, "Uint32", 0, 66051);
@@ -179,6 +188,7 @@
   test(isTestingGet, "Uint32", 6, 1718059137);
   test(isTestingGet, "Uint32", 9, 2172814332);
   test(isTestingGet, "Uint32", 12, 4244504319);
+  test(isTestingGet, "Uint32", 1e12, undefined);
 }
 
 function testFloat(isTestingGet, func, array, start, expected) {
@@ -192,6 +202,7 @@
   test(isTestingGet, func, 7, expected, true);
   createDataView(array, 10, true, start);
   test(isTestingGet, func, 10, expected, true);
+  test(isTestingGet, func, 1e12, undefined, true);
 
   // Big endian.
   createDataView(array, 0, false);
@@ -203,6 +214,7 @@
   test(isTestingGet, func, 7, expected, false);
   createDataView(array, 10, false);
   test(isTestingGet, func, 10, expected, false);
+  test(isTestingGet, func, 1e12, undefined, false);
 }
 
 function runFloatTestCases(isTestingGet, start) {
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 5134313..d975f2b 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -99,10 +99,6 @@
   ##############################################################################
   # Long running test that reproduces memory leak and should be run manually.
   'regress/regress-2073': [SKIP],
-
-  ##############################################################################
-  # Needs to allocate 1Gb of memory.
-  'regress/regress-319722-ArrayBuffer': [PASS, ['system == windows or system == macos or arch == arm or arch == android_arm or arch == android_ia32', SKIP]],
 }],  # ALWAYS
 
 ##############################################################################
diff --git a/test/mjsunit/regress/regress-3010.js b/test/mjsunit/regress/regress-3010.js
new file mode 100644
index 0000000..7aeec64
--- /dev/null
+++ b/test/mjsunit/regress/regress-3010.js
@@ -0,0 +1,65 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+(function() {
+  function testOneSize(current_size) {
+    var eval_string = 'obj = {';
+    for (var current = 0; current <= current_size; ++current) {
+      eval_string += 'k' + current + ':' + current + ','
+    }
+    eval_string += '};';
+    eval(eval_string);
+    for (var i = 0; i <= current_size; i++) {
+      assertEquals(i, obj['k'+i]);
+    }
+    var current_number = 0;
+    for (var x in obj) {
+      assertEquals(current_number, obj[x]);
+      current_number++;
+    }
+  }
+
+  testOneSize(127);
+  testOneSize(128);
+  testOneSize(129);
+
+  testOneSize(255);
+  testOneSize(256);
+  testOneSize(257);
+
+  testOneSize(511);
+  testOneSize(512);
+  testOneSize(513);
+
+  testOneSize(1023);
+  testOneSize(1024);
+  testOneSize(1025);
+
+  testOneSize(2047);
+  testOneSize(2048);
+  testOneSize(2049);
+}())
diff --git a/test/mjsunit/regress/regress-319722-ArrayBuffer.js b/test/mjsunit/regress/regress-319722-ArrayBuffer.js
index c8aed9e..1849bd2 100644
--- a/test/mjsunit/regress/regress-319722-ArrayBuffer.js
+++ b/test/mjsunit/regress/regress-319722-ArrayBuffer.js
@@ -25,9 +25,20 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --nostress-opt --allow-natives-syntax
+// Flags: --nostress-opt --allow-natives-syntax --mock-arraybuffer-allocator
 var maxSize = %MaxSmi() + 1;
-var ab = new ArrayBuffer(maxSize);
+var ab;
+
+// Allocate the largest ArrayBuffer we can on this architecture.
+for (k = 8; k >= 1 && ab == null; k = k/2) {
+  try {
+    ab = new ArrayBuffer(maxSize * k);
+  } catch (e) {
+    ab = null;
+  }
+}
+
+assertTrue(ab != null);
 
 function TestArray(constr) {
   assertThrows(function() {
diff --git a/test/mjsunit/regress/regress-320532.js b/test/mjsunit/regress/regress-320532.js
new file mode 100644
index 0000000..0730721
--- /dev/null
+++ b/test/mjsunit/regress/regress-320532.js
@@ -0,0 +1,42 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --track-allocation-sites --noalways-opt
+// Flags: --stress-runs=8 --send-idle-notification --gc-global
+
+
+function bar() { return new Array(); }
+bar();
+bar();
+%OptimizeFunctionOnNextCall(bar);
+a = bar();
+function foo(len) { return new Array(len); }
+foo(0);
+foo(0);
+%OptimizeFunctionOnNextCall(bar);
+foo(0);
diff --git a/tools/push-to-trunk/auto_roll.py b/tools/push-to-trunk/auto_roll.py
new file mode 100755
index 0000000..9547301
--- /dev/null
+++ b/tools/push-to-trunk/auto_roll.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import optparse
+import re
+import sys
+import urllib2
+
+from common_includes import *
+
+CONFIG = {
+  PERSISTFILE_BASENAME: "/tmp/v8-auto-roll-tempfile",
+  DOT_GIT_LOCATION: ".git",
+}
+
+
+class Preparation(Step):
+  def __init__(self):
+    Step.__init__(self, "Preparation.")
+
+  def RunStep(self):
+    self.InitialEnvironmentChecks()
+    self.CommonPrepare()
+
+
+class FetchLatestRevision(Step):
+  def __init__(self):
+    Step.__init__(self, "Fetching latest V8 revision.")
+
+  def RunStep(self):
+    log = self.Git("svn log -1 --oneline").strip()
+    match = re.match(r"^r(\d+) ", log)
+    if not match:
+      self.Die("Could not extract current svn revision from log.")
+    self.Persist("latest", match.group(1))
+
+
+class FetchLKGR(Step):
+  def __init__(self):
+    Step.__init__(self, "Fetching V8 LKGR.")
+
+  def RunStep(self):
+    lkgr_url = "https://v8-status.appspot.com/lkgr"
+    try:
+      # pylint: disable=E1121
+      url_fh = urllib2.urlopen(lkgr_url, None, 60)
+    except urllib2.URLError:
+      self.Die("URLException while fetching %s" % lkgr_url)
+    try:
+      self.Persist("lkgr", url_fh.read())
+    finally:
+      url_fh.close()
+
+
+class PushToTrunk(Step):
+  def __init__(self):
+    Step.__init__(self, "Pushing to trunk if possible.")
+
+  def RunStep(self):
+    self.RestoreIfUnset("latest")
+    self.RestoreIfUnset("lkgr")
+    latest = int(self._state["latest"])
+    lkgr = int(self._state["lkgr"])
+    if latest == lkgr:
+      print "ToT (r%d) is clean. Pushing to trunk." % latest
+      # TODO(machenbach): Call push to trunk script.
+    else:
+      print("ToT (r%d) is ahead of the LKGR (r%d). Skipping push to trunk."
+            % (latest, lkgr))
+
+
+def BuildOptions():
+  result = optparse.OptionParser()
+  result.add_option("-s", "--step", dest="s",
+                    help="Specify the step where to start work. Default: 0.",
+                    default=0, type="int")
+  return result
+
+
+def Main():
+  parser = BuildOptions()
+  (options, args) = parser.parse_args()
+
+  step_classes = [
+    Preparation,
+    FetchLatestRevision,
+    FetchLKGR,
+    PushToTrunk,
+  ]
+
+  RunScript(step_classes, CONFIG, options, DEFAULT_SIDE_EFFECT_HANDLER)
+
+if __name__ == "__main__":
+  sys.exit(Main())
diff --git a/tools/push-to-trunk/common_includes.py b/tools/push-to-trunk/common_includes.py
index eb2bfb0..06b7ebe 100644
--- a/tools/push-to-trunk/common_includes.py
+++ b/tools/push-to-trunk/common_includes.py
@@ -75,6 +75,34 @@
   return "".join(result)
 
 
+def MakeChangeLogBody(commit_generator):
+  result = ""
+  for (title, body, author) in commit_generator():
+    # Add the commit's title line.
+    result += "%s\n" % title.rstrip()
+
+    # Grep for "BUG=xxxx" lines in the commit message and convert them to
+    # "(issue xxxx)".
+    out = body.splitlines()
+    out = filter(lambda x: re.search(r"^BUG=", x), out)
+    out = filter(lambda x: not re.search(r"BUG=$", x), out)
+    out = filter(lambda x: not re.search(r"BUG=none$", x), out)
+
+    # TODO(machenbach): Handle multiple entries (e.g. BUG=123, 234).
+    def FormatIssue(text):
+      text = re.sub(r"BUG=v8:(.*)$", r"(issue \1)", text)
+      text = re.sub(r"BUG=chromium:(.*)$", r"(Chromium issue \1)", text)
+      text = re.sub(r"BUG=(.*)$", r"(Chromium issue \1)", text)
+      return "        %s\n" % text
+
+    for line in map(FormatIssue, out):
+      result += line
+
+    # Append the commit's author for reference.
+    result += "%s\n\n" % author.rstrip()
+  return result
+
+
 # Some commands don't like the pipe, e.g. calling vi from within the script or
 # from subscripts like git cl upload.
 def Command(cmd, args="", prefix="", pipe=True):
@@ -216,8 +244,10 @@
     if self.Git("svn fetch") is None:
       self.Die("'git svn fetch' failed.")
 
+  def PrepareBranch(self):
     # Get ahold of a safe temporary branch and check it out.
-    if current_branch != self._config[TEMP_BRANCH]:
+    self.RestoreIfUnset("current_branch")
+    if self._state["current_branch"] != self._config[TEMP_BRANCH]:
       self.DeleteBranch(self._config[TEMP_BRANCH])
       self.Git("checkout -b %s" % self._config[TEMP_BRANCH])
 
@@ -295,3 +325,26 @@
     args = "cl upload -r \"%s\" --send-mail" % reviewer
     if self.Git(args,pipe=False) is None:
       self.Die("'git cl upload' failed, please try again.")
+
+
+def RunScript(step_classes,
+              config,
+              options,
+              side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+  state = {}
+  steps = []
+  number = 0
+
+  for step_class in step_classes:
+    # TODO(machenbach): Factory methods.
+    step = step_class()
+    step.SetNumber(number)
+    step.SetConfig(config)
+    step.SetOptions(options)
+    step.SetState(state)
+    step.SetSideEffectHandler(side_effect_handler)
+    steps.append(step)
+    number += 1
+
+  for step in steps[options.s:]:
+    step.Run()
diff --git a/tools/push-to-trunk/push_to_trunk.py b/tools/push-to-trunk/push_to_trunk.py
index 669ba52..2aa32cd 100755
--- a/tools/push-to-trunk/push_to_trunk.py
+++ b/tools/push-to-trunk/push_to_trunk.py
@@ -59,6 +59,7 @@
   def RunStep(self):
     self.InitialEnvironmentChecks()
     self.CommonPrepare()
+    self.PrepareBranch()
     self.DeleteBranch(self.Config(TRUNKBRANCH))
 
 
@@ -110,37 +111,22 @@
 
     args = "log %s..HEAD --format=%%H" % self._state["last_push"]
     commits = self.Git(args).strip()
-    for commit in commits.splitlines():
-      # Get the commit's title line.
-      args = "log -1 %s --format=\"%%w(80,8,8)%%s\"" % commit
-      title = "%s\n" % self.Git(args).rstrip()
-      AppendToFile(title, self.Config(CHANGELOG_ENTRY_FILE))
 
-      # Grep for "BUG=xxxx" lines in the commit message and convert them to
-      # "(issue xxxx)".
-      out = self.Git("log -1 %s --format=\"%%B\"" % commit).splitlines()
-      out = filter(lambda x: re.search(r"^BUG=", x), out)
-      out = filter(lambda x: not re.search(r"BUG=$", x), out)
-      out = filter(lambda x: not re.search(r"BUG=none$", x), out)
+    def GetCommitMessages():
+      for commit in commits.splitlines():
+        yield [
+          self.Git("log -1 %s --format=\"%%w(80,8,8)%%s\"" % commit),
+          self.Git("log -1 %s --format=\"%%B\"" % commit),
+          self.Git("log -1 %s --format=\"%%w(80,8,8)(%%an)\"" % commit),
+        ]
 
-      # TODO(machenbach): Handle multiple entries (e.g. BUG=123, 234).
-      def FormatIssue(text):
-        text = re.sub(r"BUG=v8:(.*)$", r"(issue \1)", text)
-        text = re.sub(r"BUG=chromium:(.*)$", r"(Chromium issue \1)", text)
-        text = re.sub(r"BUG=(.*)$", r"(Chromium issue \1)", text)
-        return "        %s\n" % text
-
-      for line in map(FormatIssue, out):
-        AppendToFile(line, self.Config(CHANGELOG_ENTRY_FILE))
-
-      # Append the commit's author for reference.
-      args = "log -1 %s --format=\"%%w(80,8,8)(%%an)\"" % commit
-      author = self.Git(args).rstrip()
-      AppendToFile("%s\n\n" % author, self.Config(CHANGELOG_ENTRY_FILE))
+    body = MakeChangeLogBody(GetCommitMessages)
+    AppendToFile(body, self.Config(CHANGELOG_ENTRY_FILE))
 
     msg = "        Performance and stability improvements on all platforms.\n"
     AppendToFile(msg, self.Config(CHANGELOG_ENTRY_FILE))
 
+
 class EditChangeLog(Step):
   def __init__(self):
     Step.__init__(self, "Edit ChangeLog entry.")
@@ -502,9 +488,9 @@
       self.Git("branch -D %s" % self.Config(TRUNKBRANCH))
 
 
-def RunScript(config,
-              options,
-              side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+def RunPushToTrunk(config,
+                   options,
+                   side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
   step_classes = [
     Preparation,
     FreshBranch,
@@ -532,23 +518,7 @@
     CleanUp,
   ]
 
-  state = {}
-  steps = []
-  number = 0
-
-  for step_class in step_classes:
-    # TODO(machenbach): Factory methods.
-    step = step_class()
-    step.SetNumber(number)
-    step.SetConfig(config)
-    step.SetOptions(options)
-    step.SetState(state)
-    step.SetSideEffectHandler(side_effect_handler)
-    steps.append(step)
-    number += 1
-
-  for step in steps[options.s:]:
-    step.Run()
+  RunScript(step_classes, config, options, side_effect_handler)
 
 
 def BuildOptions():
diff --git a/tools/push-to-trunk/test_scripts.py b/tools/push-to-trunk/test_scripts.py
index 727e93b..b9d762d 100644
--- a/tools/push-to-trunk/test_scripts.py
+++ b/tools/push-to-trunk/test_scripts.py
@@ -160,6 +160,7 @@
     ]
     self._rl_recipe = ["Y"]
     self.MakeStep().CommonPrepare()
+    self.MakeStep().PrepareBranch()
     self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
 
   def testCommonPrepareNoConfirm(self):
@@ -170,7 +171,8 @@
       ["branch", "  branch1\n* %s" % TEST_CONFIG[TEMP_BRANCH]],
     ]
     self._rl_recipe = ["n"]
-    self.assertRaises(Exception, self.MakeStep().CommonPrepare)
+    self.MakeStep().CommonPrepare()
+    self.assertRaises(Exception, self.MakeStep().PrepareBranch)
     self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
 
   def testCommonPrepareDeleteBranchFailure(self):
@@ -182,7 +184,8 @@
       ["branch -D %s" % TEST_CONFIG[TEMP_BRANCH], None],
     ]
     self._rl_recipe = ["Y"]
-    self.assertRaises(Exception, self.MakeStep().CommonPrepare)
+    self.MakeStep().CommonPrepare()
+    self.assertRaises(Exception, self.MakeStep().PrepareBranch)
     self.assertEquals("some_branch", self.MakeStep().Restore("current_branch"))
 
   def testInitialEnvironmentChecks(self):
@@ -428,7 +431,7 @@
     options.s = 0
     options.l = None
     options.c = TEST_CONFIG[CHROMIUM]
-    RunScript(TEST_CONFIG, options, self)
+    RunPushToTrunk(TEST_CONFIG, options, self)
 
     deps = FileToText(TEST_CONFIG[DEPS_FILE])
     self.assertTrue(re.search("\"v8_revision\": \"123456\"", deps))