Version 3.7.9.

Removed exit-time destructors.

Stability and performance improvements on all platforms.
Review URL: http://codereview.chromium.org/8551006

git-svn-id: http://v8.googlecode.com/svn/trunk@10037 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index b37855b..9e6f759 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -5815,15 +5815,12 @@
     // r3: from index (untagged smi)
     // r6 (a.k.a. to): to (smi)
     // r7 (a.k.a. from): from offset (smi)
-    Label allocate_slice, sliced_string, seq_string;
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ tst(r1, Operand(kStringRepresentationMask));
-    __ b(eq, &seq_string);
+    Label allocate_slice, sliced_string, seq_or_external_string;
+    // If the string is not indirect, it can only be sequential or external.
     STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
     STATIC_ASSERT(kIsIndirectStringMask != 0);
     __ tst(r1, Operand(kIsIndirectStringMask));
-    // External string.  Jump to runtime.
-    __ b(eq, &runtime);
+    __ b(eq, &seq_or_external_string);
 
     __ tst(r1, Operand(kSlicedNotConsMask));
     __ b(ne, &sliced_string);
@@ -5842,8 +5839,8 @@
     __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
     __ jmp(&allocate_slice);
 
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
+    __ bind(&seq_or_external_string);
+    // Sequential or external string.  Just move string to the correct register.
     __ mov(r5, r0);
 
     __ bind(&allocate_slice);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 62eae3f..cc3f38d 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -228,6 +228,13 @@
 }
 
 
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -242,6 +249,14 @@
 }
 
 
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(0)->PrintTo(stream);
+  InputAt(1)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -1452,6 +1467,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   return new LIsSmiAndBranch(Use(instr->value()));
@@ -1466,6 +1488,17 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), r1);
+  LOperand* right = UseFixed(instr->right(), r0);
+  LStringCompareAndBranch* result = new LStringCompareAndBranch(left, right);
+  return MarkAsCall(result, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 5a73bed..4e1974f 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -109,8 +109,10 @@
   V(IsConstructCallAndBranch)                   \
   V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -658,6 +660,20 @@
 };
 
 
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -686,6 +702,23 @@
 };
 
 
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index db1a365..61bdf68 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1812,6 +1812,31 @@
 }
 
 
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp1 = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond =
+      EmitIsString(reg, temp1, false_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1837,6 +1862,41 @@
 }
 
 
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  __ cmp(r0, Operand(0));  // This instruction also signals no smi code inlined.
+
+  Condition condition = ComputeCompareCondition(op);
+
+  EmitBranch(true_block, false_block, condition);
+}
+
+
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -2125,26 +2185,6 @@
 }
 
 
-static Condition ComputeCompareCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return eq;
-    case Token::LT:
-      return lt;
-    case Token::GT:
-      return gt;
-    case Token::LTE:
-      return le;
-    case Token::GTE:
-      return ge;
-    default:
-      UNREACHABLE();
-      return kNoCondition;
-  }
-}
-
-
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 2b49e19..ac757ea 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -284,6 +284,13 @@
                          Label* is_not_object,
                          Label* is_object);
 
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp1, Register temp2);
diff --git a/src/ast.cc b/src/ast.cc
index 1429e2a..6af1525 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -710,6 +710,10 @@
   TypeInfo info = oracle->SwitchType(this);
   if (info.IsSmi()) {
     compare_type_ = SMI_ONLY;
+  } else if (info.IsSymbol()) {
+    compare_type_ = SYMBOL_ONLY;
+  } else if (info.IsNonSymbol()) {
+    compare_type_ = STRING_ONLY;
   } else if (info.IsNonPrimitive()) {
     compare_type_ = OBJECT_ONLY;
   } else {
@@ -893,8 +897,6 @@
 FOR_EACH_REG_EXP_TREE_TYPE(MAKE_TYPE_CASE)
 #undef MAKE_TYPE_CASE
 
-RegExpEmpty RegExpEmpty::kInstance;
-
 
 static Interval ListCaptureRegisters(ZoneList<RegExpTree*>* children) {
   Interval result = Interval::Empty();
diff --git a/src/ast.h b/src/ast.h
index 71639d5..c2d9412 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -726,6 +726,8 @@
   // Type feedback information.
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
   bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+  bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; }
+  bool IsStringCompare() { return compare_type_ == STRING_ONLY; }
   bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
 
  private:
@@ -733,7 +735,13 @@
   Label body_target_;
   ZoneList<Statement*>* statements_;
   int position_;
-  enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
+  enum CompareTypeFeedback {
+    NONE,
+    SMI_ONLY,
+    SYMBOL_ONLY,
+    STRING_ONLY,
+    OBJECT_ONLY
+  };
   CompareTypeFeedback compare_type_;
   int compare_id_;
   int entry_id_;
@@ -2123,9 +2131,10 @@
   virtual bool IsEmpty();
   virtual int min_match() { return 0; }
   virtual int max_match() { return 0; }
-  static RegExpEmpty* GetInstance() { return &kInstance; }
- private:
-  static RegExpEmpty kInstance;
+  static RegExpEmpty* GetInstance() {
+    static RegExpEmpty* instance = ::new RegExpEmpty();
+    return instance;
+  }
 };
 
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 83b0f49..16826db 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -355,7 +355,8 @@
   enum Mode {
     CLONE_ELEMENTS,
     CLONE_DOUBLE_ELEMENTS,
-    COPY_ON_WRITE_ELEMENTS
+    COPY_ON_WRITE_ELEMENTS,
+    CLONE_ANY_ELEMENTS
   };
 
   FastCloneShallowArrayStub(Mode mode, int length)
@@ -373,8 +374,8 @@
 
   Major MajorKey() { return FastCloneShallowArray; }
   int MinorKey() {
-    ASSERT(mode_ == 0 || mode_ == 1 || mode_ == 2);
-    return length_ * 3 +  mode_;
+    ASSERT(mode_ == 0 || mode_ == 1 || mode_ == 2 || mode_ == 3);
+    return length_ * 4 +  mode_;
   }
 };
 
diff --git a/src/codegen.h b/src/codegen.h
index 5360d3e..e6d5399 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -94,6 +94,23 @@
   DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
 };
 
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Factory* factory,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_CODEGEN_H_
diff --git a/src/elements.cc b/src/elements.cc
index 1d50099..ef55d54 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -801,42 +801,45 @@
 
 
 void ElementsAccessor::InitializeOncePerProcess() {
+  // First argument in list is the accessor class, the second argument is can
+  // be any arbitrary unique identifier, in this case chosen to be the
+  // corresponding enum.  Use the fast element handler for smi-only arrays.
+  // The implementation is currently identical.  Note that the order must match
+  // that of the ElementsKind enum for the |accessor_array[]| below to work.
+#define ELEMENTS_LIST(V)                                                       \
+  V(FastObjectElementsAccessor, FAST_SMI_ONLY_ELEMENTS)                        \
+  V(FastObjectElementsAccessor, FAST_ELEMENTS)                                 \
+  V(FastDoubleElementsAccessor, FAST_DOUBLE_ELEMENTS)                          \
+  V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS)                           \
+  V(NonStrictArgumentsElementsAccessor, NON_STRICT_ARGUMENTS_ELEMENTS)         \
+  V(ExternalByteElementsAccessor, EXTERNAL_BYTE_ELEMENTS)                      \
+  V(ExternalUnsignedByteElementsAccessor, EXTERNAL_UNSIGNED_BYTE_ELEMENTS)     \
+  V(ExternalShortElementsAccessor, EXTERNAL_SHORT_ELEMENTS)                    \
+  V(ExternalUnsignedShortElementsAccessor, EXTERNAL_UNSIGNED_SHORT_ELEMENTS)   \
+  V(ExternalIntElementsAccessor, EXTERNAL_INT_ELEMENTS)                        \
+  V(ExternalUnsignedIntElementsAccessor, EXTERNAL_UNSIGNED_INT_ELEMENTS)       \
+  V(ExternalFloatElementsAccessor, EXTERNAL_FLOAT_ELEMENTS)                    \
+  V(ExternalDoubleElementsAccessor, EXTERNAL_DOUBLE_ELEMENTS)                  \
+  V(PixelElementsAccessor, EXTERNAL_PIXEL_ELEMENTS)
+
   static struct ConcreteElementsAccessors {
-    // Use the fast element handler for smi-only arrays. The implementation is
-    // currently identical.
-    FastObjectElementsAccessor fast_smi_elements_handler;
-    FastObjectElementsAccessor fast_elements_handler;
-    FastDoubleElementsAccessor fast_double_elements_handler;
-    DictionaryElementsAccessor dictionary_elements_handler;
-    NonStrictArgumentsElementsAccessor non_strict_arguments_elements_handler;
-    ExternalByteElementsAccessor byte_elements_handler;
-    ExternalUnsignedByteElementsAccessor unsigned_byte_elements_handler;
-    ExternalShortElementsAccessor short_elements_handler;
-    ExternalUnsignedShortElementsAccessor unsigned_short_elements_handler;
-    ExternalIntElementsAccessor int_elements_handler;
-    ExternalUnsignedIntElementsAccessor unsigned_int_elements_handler;
-    ExternalFloatElementsAccessor float_elements_handler;
-    ExternalDoubleElementsAccessor double_elements_handler;
-    PixelElementsAccessor pixel_elements_handler;
-  } element_accessors;
+#define ACCESSOR_STRUCT(Class, Name) Class* Name##_handler;
+    ELEMENTS_LIST(ACCESSOR_STRUCT)
+#undef ACCESSOR_STRUCT
+  } element_accessors = {
+#define ACCESSOR_INIT(Class, Name) new Class(),
+    ELEMENTS_LIST(ACCESSOR_INIT)
+#undef ACCESSOR_INIT
+  };
 
   static ElementsAccessor* accessor_array[] = {
-    &element_accessors.fast_smi_elements_handler,
-    &element_accessors.fast_elements_handler,
-    &element_accessors.fast_double_elements_handler,
-    &element_accessors.dictionary_elements_handler,
-    &element_accessors.non_strict_arguments_elements_handler,
-    &element_accessors.byte_elements_handler,
-    &element_accessors.unsigned_byte_elements_handler,
-    &element_accessors.short_elements_handler,
-    &element_accessors.unsigned_short_elements_handler,
-    &element_accessors.int_elements_handler,
-    &element_accessors.unsigned_int_elements_handler,
-    &element_accessors.float_elements_handler,
-    &element_accessors.double_elements_handler,
-    &element_accessors.pixel_elements_handler
+#define ACCESSOR_ARRAY(Class, Name) element_accessors.Name##_handler,
+    ELEMENTS_LIST(ACCESSOR_ARRAY)
+#undef ACCESSOR_ARRAY
   };
 
+#undef ELEMENTS_LIST
+
   STATIC_ASSERT((sizeof(accessor_array) / sizeof(*accessor_array)) ==
                 kElementsKindCount);
 
diff --git a/src/extensions/gc-extension.cc b/src/extensions/gc-extension.cc
index 48e8c42..54c8cdc 100644
--- a/src/extensions/gc-extension.cc
+++ b/src/extensions/gc-extension.cc
@@ -46,8 +46,9 @@
 
 
 void GCExtension::Register() {
-  static GCExtension gc_extension;
-  static v8::DeclareExtension gc_extension_declaration(&gc_extension);
+  static GCExtension* gc_extension = NULL;
+  if (gc_extension == NULL) gc_extension = new GCExtension();
+  static v8::DeclareExtension gc_extension_declaration(gc_extension);
 }
 
 } }  // namespace v8::internal
diff --git a/src/factory.cc b/src/factory.cc
index 8344355..16f5fd0 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -546,17 +546,19 @@
 }
 
 
-Handle<Object> Factory::NewNumberFromInt(int value) {
+Handle<Object> Factory::NewNumberFromInt(int32_t value,
+                                         PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->NumberFromInt32(value), Object);
+      isolate()->heap()->NumberFromInt32(value, pretenure), Object);
 }
 
 
-Handle<Object> Factory::NewNumberFromUint(uint32_t value) {
+Handle<Object> Factory::NewNumberFromUint(uint32_t value,
+                                         PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->NumberFromUint32(value), Object);
+      isolate()->heap()->NumberFromUint32(value, pretenure), Object);
 }
 
 
diff --git a/src/factory.h b/src/factory.h
index 158db5b..27e4501 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -231,8 +231,10 @@
   Handle<Object> NewNumber(double value,
                            PretenureFlag pretenure = NOT_TENURED);
 
-  Handle<Object> NewNumberFromInt(int value);
-  Handle<Object> NewNumberFromUint(uint32_t value);
+  Handle<Object> NewNumberFromInt(int32_t value,
+                                  PretenureFlag pretenure = NOT_TENURED);
+  Handle<Object> NewNumberFromUint(uint32_t value,
+                                  PretenureFlag pretenure = NOT_TENURED);
 
   // These objects are used by the api to create env-independent data
   // structures in the heap.
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 9f11616..566010b 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -128,6 +128,9 @@
 DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
 DEFINE_bool(eliminate_empty_blocks, true, "eliminate empty blocks")
 DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
+DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
+            true,
+            "crankshaft harvests type feedback from stub cache")
 DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
 DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
 DEFINE_bool(trace_inlining, false, "trace inlining decisions")
diff --git a/src/hashmap.cc b/src/hashmap.cc
index 37748a3..0b404a9 100644
--- a/src/hashmap.cc
+++ b/src/hashmap.cc
@@ -36,7 +36,7 @@
 namespace v8 {
 namespace internal {
 
-Allocator HashMap::DefaultAllocator;
+Allocator* HashMap::DefaultAllocator = ::new Allocator();
 
 
 HashMap::HashMap(MatchFun match,
diff --git a/src/hashmap.h b/src/hashmap.h
index 4e6a454..d2d1faf 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -46,14 +46,14 @@
 
 class HashMap {
  public:
-  static Allocator DefaultAllocator;
+  static Allocator* DefaultAllocator;
 
   typedef bool (*MatchFun) (void* key1, void* key2);
 
   // initial_capacity is the size of the initial hash map;
   // it must be a power of 2 (and thus must not be 0).
   explicit HashMap(MatchFun match,
-                   Allocator* allocator = &DefaultAllocator,
+                   Allocator* allocator = DefaultAllocator,
                    uint32_t initial_capacity = 8);
 
   ~HashMap();
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 51c037c..396f678 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -229,19 +229,21 @@
 }
 
 
-MaybeObject* Heap::NumberFromInt32(int32_t value) {
+MaybeObject* Heap::NumberFromInt32(
+    int32_t value, PretenureFlag pretenure) {
   if (Smi::IsValid(value)) return Smi::FromInt(value);
   // Bypass NumberFromDouble to avoid various redundant checks.
-  return AllocateHeapNumber(FastI2D(value));
+  return AllocateHeapNumber(FastI2D(value), pretenure);
 }
 
 
-MaybeObject* Heap::NumberFromUint32(uint32_t value) {
+MaybeObject* Heap::NumberFromUint32(
+    uint32_t value, PretenureFlag pretenure) {
   if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
     return Smi::FromInt((int32_t)value);
   }
   // Bypass NumberFromDouble to avoid various redundant checks.
-  return AllocateHeapNumber(FastUI2D(value));
+  return AllocateHeapNumber(FastUI2D(value), pretenure);
 }
 
 
@@ -253,6 +255,9 @@
           ExternalString::kResourceOffset -
           kHeapObjectTag);
 
+  // Clear pointer cache.
+  ExternalString::cast(string)->clear_data_cache();
+
   // Dispose of the C++ object if it has not already been disposed.
   if (*resource_addr != NULL) {
     (*resource_addr)->Dispose();
diff --git a/src/heap.cc b/src/heap.cc
index cbf0cb3..8aa7580 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2847,14 +2847,14 @@
       // Copy first part.
       const char* src;
       if (first->IsExternalString()) {
-        src = ExternalAsciiString::cast(first)->resource()->data();
+        src = ExternalAsciiString::cast(first)->GetChars();
       } else {
         src = SeqAsciiString::cast(first)->GetChars();
       }
       for (int i = 0; i < first_length; i++) *dest++ = src[i];
       // Copy second part.
       if (second->IsExternalString()) {
-        src = ExternalAsciiString::cast(second)->resource()->data();
+        src = ExternalAsciiString::cast(second)->GetChars();
       } else {
         src = SeqAsciiString::cast(second)->GetChars();
       }
diff --git a/src/heap.h b/src/heap.h
index 1eb4640..cfd9625 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -837,13 +837,15 @@
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* NumberFromInt32(int32_t value);
+  MUST_USE_RESULT inline MaybeObject* NumberFromInt32(
+      int32_t value, PretenureFlag pretenure = NOT_TENURED);
 
   // Converts an int into either a Smi or a HeapNumber object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
   // Please note this does not perform a garbage collection.
-  MUST_USE_RESULT inline MaybeObject* NumberFromUint32(uint32_t value);
+  MUST_USE_RESULT inline MaybeObject* NumberFromUint32(
+      uint32_t value, PretenureFlag pretenure = NOT_TENURED);
 
   // Allocates a new foreign object.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index ae62367..360384b 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1325,6 +1325,13 @@
 }
 
 
+void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add(Token::Name(token()));
+  stream->Add(" ");
+  HControlInstruction::PrintDataTo(stream);
+}
+
+
 void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add(Token::Name(token()));
   stream->Add(" ");
@@ -1887,6 +1894,167 @@
 }
 
 
+#define H_CONSTANT_INT32(val)                                                  \
+new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED),                   \
+                    Representation::Integer32())
+#define H_CONSTANT_DOUBLE(val)                                                 \
+new(zone) HConstant(FACTORY->NewNumber(val, TENURED),                          \
+                    Representation::Double())
+
+#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op)                       \
+HInstruction* HInstr::New##HInstr(Zone* zone,                                  \
+                                  HValue* context,                             \
+                                  HValue* left,                                \
+                                  HValue* right) {                             \
+  if (left->IsConstant() && right->IsConstant()) {                             \
+    HConstant* c_left = HConstant::cast(left);                                 \
+    HConstant* c_right = HConstant::cast(right);                               \
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {             \
+      double double_res = c_left->DoubleValue() op c_right->DoubleValue();     \
+      if (TypeInfo::IsInt32Double(double_res)) {                               \
+        return H_CONSTANT_INT32(static_cast<int32_t>(double_res));             \
+      }                                                                        \
+      return H_CONSTANT_DOUBLE(double_res);                                    \
+    }                                                                          \
+  }                                                                            \
+  return new(zone) HInstr(context, left, right);                               \
+}
+
+
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *)
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
+
+#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
+
+
+HInstruction* HMod::NewHMod(Zone* zone,
+                            HValue* context,
+                            HValue* left,
+                            HValue* right) {
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
+      int32_t dividend = c_left->Integer32Value();
+      int32_t divisor = c_right->Integer32Value();
+      if (divisor != 0) {
+        int32_t res = dividend % divisor;
+        if ((res == 0) && (dividend < 0)) {
+          return H_CONSTANT_DOUBLE(-0.0);
+        }
+        return H_CONSTANT_INT32(res);
+      }
+    }
+  }
+  return new(zone) HMod(context, left, right);
+}
+
+
+HInstruction* HDiv::NewHDiv(Zone* zone,
+                            HValue* context,
+                            HValue* left,
+                            HValue* right) {
+  // If left and right are constant values, try to return a constant value.
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      if (c_right->DoubleValue() != 0) {
+        double double_res = c_left->DoubleValue() / c_right->DoubleValue();
+        if (TypeInfo::IsInt32Double(double_res)) {
+          return H_CONSTANT_INT32(static_cast<int32_t>(double_res));
+        }
+        return H_CONSTANT_DOUBLE(double_res);
+      }
+    }
+  }
+  return new(zone) HDiv(context, left, right);
+}
+
+
+HInstruction* HBitwise::NewHBitwise(Zone* zone,
+                                    Token::Value op,
+                                    HValue* context,
+                                    HValue* left,
+                                    HValue* right) {
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      int32_t result;
+      int32_t v_left = c_left->NumberValueAsInteger32();
+      int32_t v_right = c_right->NumberValueAsInteger32();
+      switch (op) {
+        case Token::BIT_XOR:
+          result = v_left ^ v_right;
+          break;
+        case Token::BIT_AND:
+          result = v_left & v_right;
+          break;
+        case Token::BIT_OR:
+          result = v_left | v_right;
+          break;
+        default:
+          result = 0;  // Please the compiler.
+          UNREACHABLE();
+      }
+      return H_CONSTANT_INT32(result);
+    }
+  }
+  return new(zone) HBitwise(op, context, left, right);
+}
+
+
+#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result)                             \
+HInstruction* HInstr::New##HInstr(Zone* zone,                                  \
+                                  HValue* context,                             \
+                                  HValue* left,                                \
+                                  HValue* right) {                             \
+  if (left->IsConstant() && right->IsConstant()) {                             \
+    HConstant* c_left = HConstant::cast(left);                                 \
+    HConstant* c_right = HConstant::cast(right);                               \
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {             \
+      return H_CONSTANT_INT32(result);                                         \
+    }                                                                          \
+  }                                                                            \
+  return new(zone) HInstr(context, left, right);                               \
+}
+
+
+DEFINE_NEW_H_BITWISE_INSTR(HSar,
+c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
+DEFINE_NEW_H_BITWISE_INSTR(HShl,
+c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
+
+#undef DEFINE_NEW_H_BITWISE_INSTR
+
+
+HInstruction* HShr::NewHShr(Zone* zone,
+                            HValue* context,
+                            HValue* left,
+                            HValue* right) {
+  if (left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      int32_t left_val = c_left->NumberValueAsInteger32();
+      int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
+      if ((right_val == 0) && (left_val < 0)) {
+        return H_CONSTANT_DOUBLE(
+            static_cast<double>(static_cast<uint32_t>(left_val)));
+      }
+      return H_CONSTANT_INT32(static_cast<uint32_t>(left_val) >> right_val);
+    }
+  }
+  return new(zone) HShr(context, left, right);
+}
+
+
+#undef H_CONSTANT_INT32
+#undef H_CONSTANT_DOUBLE
+
+
 void HIn::PrintDataTo(StringStream* stream) {
   key()->PrintNameTo(stream);
   stream->Add(" ");
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index fce0e30..5d46794 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -118,8 +118,10 @@
   V(IsConstructCallAndBranch)                  \
   V(IsNilAndBranch)                            \
   V(IsObjectAndBranch)                         \
+  V(IsStringAndBranch)                         \
   V(IsSmiAndBranch)                            \
   V(IsUndetectableAndBranch)                   \
+  V(StringCompareAndBranch)                   \
   V(JSArrayLength)                             \
   V(LeaveInlined)                              \
   V(LoadContextSlot)                           \
@@ -2326,6 +2328,12 @@
     ASSERT(HasDoubleValue());
     return double_value_;
   }
+  bool HasNumberValue() const { return has_int32_value_ || has_double_value_; }
+  int32_t NumberValueAsInteger32() const {
+    ASSERT(HasNumberValue());
+    if (has_int32_value_) return int32_value_;
+    return DoubleToInt32(double_value_);
+  }
   bool HasStringValue() const { return handle_->IsString(); }
 
   bool ToBoolean() const;
@@ -2716,6 +2724,18 @@
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
 };
 
+class HIsStringAndBranch: public HUnaryControlInstruction {
+ public:
+  explicit HIsStringAndBranch(HValue* value)
+    : HUnaryControlInstruction(value, NULL, NULL) { }
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+};
+
 
 class HIsSmiAndBranch: public HUnaryControlInstruction {
  public:
@@ -2746,6 +2766,42 @@
 };
 
 
+class HStringCompareAndBranch: public HTemplateControlInstruction<2, 3> {
+ public:
+  HStringCompareAndBranch(HValue* context,
+                           HValue* left,
+                           HValue* right,
+                           Token::Value token)
+      : token_(token) {
+    ASSERT(Token::IsCompareOp(token));
+    SetOperandAt(0, context);
+    SetOperandAt(1, left);
+    SetOperandAt(2, right);
+    set_representation(Representation::Tagged());
+  }
+
+  HValue* context() { return OperandAt(0); }
+  HValue* left() { return OperandAt(1); }
+  HValue* right() { return OperandAt(2); }
+  Token::Value token() const { return token_; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) {
+    return Representation::Tagged();
+  }
+
+  Representation GetInputRepresentation() const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
+
+ private:
+  Token::Value token_;
+};
+
+
 class HIsConstructCallAndBranch: public HTemplateControlInstruction<2, 0> {
  public:
   virtual Representation RequiredInputRepresentation(int index) {
@@ -2943,6 +2999,11 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+  static HInstruction* NewHAdd(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   virtual HType CalculateInferredType();
 
   DECLARE_CONCRETE_INSTRUCTION(Add)
@@ -2963,6 +3024,11 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+  static HInstruction* NewHSub(Zone* zone,
+                              HValue* context,
+                              HValue* left,
+                              HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Sub)
 
  protected:
@@ -2986,6 +3052,11 @@
     return !representation().IsTagged();
   }
 
+  static HInstruction* NewHMul(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Mul)
 
  protected:
@@ -3014,6 +3085,11 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+  static HInstruction* NewHMod(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Mod)
 
  protected:
@@ -3033,6 +3109,12 @@
 
   virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
 
+
+  static HInstruction* NewHDiv(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Div)
 
  protected:
@@ -3055,6 +3137,12 @@
 
   virtual bool IsCommutative() const { return true; }
 
+  static HInstruction* NewHBitwise(Zone* zone,
+                                   Token::Value op,
+                                   HValue* context,
+                                   HValue* left,
+                                   HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Bitwise)
 
  protected:
@@ -3076,6 +3164,11 @@
 
   virtual Range* InferRange();
 
+  static HInstruction* NewHShl(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Shl)
 
  protected:
@@ -3090,6 +3183,11 @@
 
   virtual Range* InferRange();
 
+  static HInstruction* NewHShr(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Shr)
 
  protected:
@@ -3104,6 +3202,11 @@
 
   virtual Range* InferRange();
 
+  static HInstruction* NewHSar(Zone* zone,
+                               HValue* context,
+                               HValue* left,
+                               HValue* right);
+
   DECLARE_CONCRETE_INSTRUCTION(Sar)
 
  protected:
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index d0380c4..39a68cc 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -734,6 +734,7 @@
       Postorder(it.Current(), visited, order, block);
     }
   } else {
+    ASSERT(block->IsFinished());
     for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
       Postorder(it.Current(), visited, order, loop_header);
     }
@@ -2708,43 +2709,95 @@
     return Bailout("SwitchStatement: too many clauses");
   }
 
+  HValue* context = environment()->LookupContext();
+
   CHECK_ALIVE(VisitForValue(stmt->tag()));
   AddSimulate(stmt->EntryId());
   HValue* tag_value = Pop();
   HBasicBlock* first_test_block = current_block();
 
-  // 1. Build all the tests, with dangling true branches.  Unconditionally
-  // deoptimize if we encounter a non-smi comparison.
+  SwitchType switch_type = UNKNOWN_SWITCH;
+
+  // 1. Extract clause type
   for (int i = 0; i < clause_count; ++i) {
     CaseClause* clause = clauses->at(i);
     if (clause->is_default()) continue;
-    if (!clause->label()->IsSmiLiteral()) {
-      return Bailout("SwitchStatement: non-literal switch label");
+
+    if (switch_type == UNKNOWN_SWITCH) {
+      if (clause->label()->IsSmiLiteral()) {
+        switch_type = SMI_SWITCH;
+      } else if (clause->label()->IsStringLiteral()) {
+        switch_type = STRING_SWITCH;
+      } else {
+        return Bailout("SwitchStatement: non-literal switch label");
+      }
+    } else if ((switch_type == STRING_SWITCH &&
+                !clause->label()->IsStringLiteral()) ||
+               (switch_type == SMI_SWITCH &&
+                !clause->label()->IsSmiLiteral())) {
+      return Bailout("SwitchStatemnt: mixed label types are not supported");
+    }
+  }
+
+  HUnaryControlInstruction* string_check = NULL;
+  HBasicBlock* not_string_block = NULL;
+
+  // Test switch's tag value if all clauses are string literals
+  if (switch_type == STRING_SWITCH) {
+    string_check = new(zone()) HIsStringAndBranch(tag_value);
+    first_test_block = graph()->CreateBasicBlock();
+    not_string_block = graph()->CreateBasicBlock();
+
+    string_check->SetSuccessorAt(0, first_test_block);
+    string_check->SetSuccessorAt(1, not_string_block);
+    current_block()->Finish(string_check);
+
+    set_current_block(first_test_block);
+  }
+
+  // 2. Build all the tests, with dangling true branches
+  for (int i = 0; i < clause_count; ++i) {
+    CaseClause* clause = clauses->at(i);
+    if (clause->is_default()) continue;
+
+    if (switch_type == SMI_SWITCH) {
+      clause->RecordTypeFeedback(oracle());
     }
 
-    // Unconditionally deoptimize on the first non-smi compare.
-    clause->RecordTypeFeedback(oracle());
-    if (!clause->IsSmiCompare()) {
-      // Finish with deoptimize and add uses of enviroment values to
-      // account for invisible uses.
-      current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
-      set_current_block(NULL);
-      break;
-    }
-
-    // Otherwise generate a compare and branch.
+    // Generate a compare and branch.
     CHECK_ALIVE(VisitForValue(clause->label()));
     HValue* label_value = Pop();
-    HCompareIDAndBranch* compare =
-        new(zone()) HCompareIDAndBranch(tag_value,
-                                        label_value,
-                                        Token::EQ_STRICT);
-    compare->SetInputRepresentation(Representation::Integer32());
-    HBasicBlock* body_block = graph()->CreateBasicBlock();
+
     HBasicBlock* next_test_block = graph()->CreateBasicBlock();
+    HBasicBlock* body_block = graph()->CreateBasicBlock();
+
+    HControlInstruction* compare;
+
+    if (switch_type == SMI_SWITCH) {
+      if (!clause->IsSmiCompare()) {
+        // Finish with deoptimize and add uses of enviroment values to
+        // account for invisible uses.
+        current_block()->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+        set_current_block(NULL);
+        break;
+      }
+
+      HCompareIDAndBranch* compare_ =
+          new(zone()) HCompareIDAndBranch(tag_value,
+                                          label_value,
+                                          Token::EQ_STRICT);
+      compare_->SetInputRepresentation(Representation::Integer32());
+      compare = compare_;
+    } else {
+      compare = new(zone()) HStringCompareAndBranch(context, tag_value,
+                                                     label_value,
+                                                     Token::EQ_STRICT);
+    }
+
     compare->SetSuccessorAt(0, body_block);
     compare->SetSuccessorAt(1, next_test_block);
     current_block()->Finish(compare);
+
     set_current_block(next_test_block);
   }
 
@@ -2752,10 +2805,15 @@
   // exit.  This block is NULL if we deoptimized.
   HBasicBlock* last_block = current_block();
 
-  // 2. Loop over the clauses and the linked list of tests in lockstep,
+  if (not_string_block != NULL) {
+    last_block = CreateJoin(last_block, not_string_block, stmt->ExitId());
+  }
+
+  // 3. Loop over the clauses and the linked list of tests in lockstep,
   // translating the clause bodies.
   HBasicBlock* curr_test_block = first_test_block;
   HBasicBlock* fall_through_block = NULL;
+
   BreakAndContinueInfo break_info(stmt);
   { BreakAndContinueScope push(&break_info, this);
     for (int i = 0; i < clause_count; ++i) {
@@ -5620,34 +5678,34 @@
         AddInstruction(HCheckInstanceType::NewIsString(right));
         instr = new(zone()) HStringAdd(context, left, right);
       } else {
-        instr = new(zone()) HAdd(context, left, right);
+        instr = HAdd::NewHAdd(zone(), context, left, right);
       }
       break;
     case Token::SUB:
-      instr = new(zone()) HSub(context, left, right);
+      instr = HSub::NewHSub(zone(), context, left, right);
       break;
     case Token::MUL:
-      instr = new(zone()) HMul(context, left, right);
+      instr = HMul::NewHMul(zone(), context, left, right);
       break;
     case Token::MOD:
-      instr = new(zone()) HMod(context, left, right);
+      instr = HMod::NewHMod(zone(), context, left, right);
       break;
     case Token::DIV:
-      instr = new(zone()) HDiv(context, left, right);
+      instr = HDiv::NewHDiv(zone(), context, left, right);
       break;
     case Token::BIT_XOR:
     case Token::BIT_AND:
     case Token::BIT_OR:
-      instr = new(zone()) HBitwise(expr->op(), context, left, right);
+      instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
       break;
     case Token::SAR:
-      instr = new(zone()) HSar(context, left, right);
+      instr = HSar::NewHSar(zone(), context, left, right);
       break;
     case Token::SHR:
-      instr = new(zone()) HShr(context, left, right);
+      instr = HShr::NewHShr(zone(), context, left, right);
       break;
     case Token::SHL:
-      instr = new(zone()) HShl(context, left, right);
+      instr = HShl::NewHShl(zone(), context, left, right);
       break;
     default:
       UNREACHABLE();
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 2d08dc8..948ea34 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -657,6 +657,7 @@
 class HGraphBuilder: public AstVisitor {
  public:
   enum BreakType { BREAK, CONTINUE };
+  enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
 
   // A class encapsulating (lazily-allocated) break and continue blocks for
   // a breakable statement.  Separated from BreakAndContinueScope so that it
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 23bea4f..27b127d 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -35,6 +35,7 @@
 #include "jsregexp.h"
 #include "regexp-macro-assembler.h"
 #include "stub-cache.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -231,70 +232,38 @@
 }
 
 
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
   //
-  // [esp + kPointerSize]: constant elements.
-  // [esp + (2 * kPointerSize)]: literal index.
-  // [esp + (3 * kPointerSize)]: literals array.
+  // ecx: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
 
   // All sizes here are multiples of kPointerSize.
   int elements_size = 0;
-  if (length_ > 0) {
-    elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
-        ? FixedDoubleArray::SizeFor(length_)
-        : FixedArray::SizeFor(length_);
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
   }
   int size = JSArray::kSize + elements_size;
 
-  // Load boilerplate object into ecx and check if we need to create a
-  // boilerplate.
-  Label slow_case;
-  __ mov(ecx, Operand(esp, 3 * kPointerSize));
-  __ mov(eax, Operand(esp, 2 * kPointerSize));
-  STATIC_ASSERT(kPointerSize == 4);
-  STATIC_ASSERT(kSmiTagSize == 1);
-  STATIC_ASSERT(kSmiTag == 0);
-  __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
-                           FixedArray::kHeaderSize));
-  Factory* factory = masm->isolate()->factory();
-  __ cmp(ecx, factory->undefined_value());
-  __ j(equal, &slow_case);
-
-  if (FLAG_debug_code) {
-    const char* message;
-    Handle<Map> expected_map;
-    if (mode_ == CLONE_ELEMENTS) {
-      message = "Expected (writable) fixed array";
-      expected_map = factory->fixed_array_map();
-    } else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
-      message = "Expected (writable) fixed double array";
-      expected_map = factory->fixed_double_array_map();
-    } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
-      message = "Expected copy-on-write fixed array";
-      expected_map = factory->fixed_cow_array_map();
-    }
-    __ push(ecx);
-    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
-    __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
-    __ Assert(equal, message);
-    __ pop(ecx);
-  }
-
   // Allocate both the JS array and the elements array in one big
   // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
+  __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
 
   // Copy the JS array part.
   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
       __ mov(ebx, FieldOperand(ecx, i));
       __ mov(FieldOperand(eax, i), ebx);
     }
   }
 
-  if (length_ > 0) {
+  if (length > 0) {
     // Get hold of the elements array of the boilerplate and setup the
     // elements pointer in the resulting object.
     __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
@@ -302,13 +271,13 @@
     __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
 
     // Copy the elements array.
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
       for (int i = 0; i < elements_size; i += kPointerSize) {
         __ mov(ebx, FieldOperand(ecx, i));
         __ mov(FieldOperand(edx, i), ebx);
       }
     } else {
-      ASSERT(mode_ == CLONE_DOUBLE_ELEMENTS);
+      ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
       int i;
       for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
         __ mov(ebx, FieldOperand(ecx, i));
@@ -322,7 +291,75 @@
       ASSERT(i == elements_size);
     }
   }
+}
 
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [esp + kPointerSize]: constant elements.
+  // [esp + (2 * kPointerSize)]: literal index.
+  // [esp + (3 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into ecx and check if we need to create a
+  // boilerplate.
+  __ mov(ecx, Operand(esp, 3 * kPointerSize));
+  __ mov(eax, Operand(esp, 2 * kPointerSize));
+  STATIC_ASSERT(kPointerSize == 4);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
+                           FixedArray::kHeaderSize));
+  Factory* factory = masm->isolate()->factory();
+  __ cmp(ecx, factory->undefined_value());
+  Label slow_case;
+  __ j(equal, &slow_case);
+
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  // ecx is boilerplate object.
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
+    __ CheckMap(ebx, factory->fixed_cow_array_map(),
+                &check_fast_elements, DONT_DO_SMI_CHECK);
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&check_fast_elements);
+    __ CheckMap(ebx, factory->fixed_array_map(),
+                &double_elements, DONT_DO_SMI_CHECK);
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
+  if (FLAG_debug_code) {
+    const char* message;
+    Handle<Map> expected_map;
+    if (mode == CLONE_ELEMENTS) {
+      message = "Expected (writable) fixed array";
+      expected_map = factory->fixed_array_map();
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+      message = "Expected (writable) fixed double array";
+      expected_map = factory->fixed_double_array_map();
+    } else {
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
+      message = "Expected copy-on-write fixed array";
+      expected_map = factory->fixed_cow_array_map();
+    }
+    __ push(ecx);
+    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
+    __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
+    __ Assert(equal, message);
+    __ pop(ecx);
+  }
+
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
   // Return and remove the on-stack parameters.
   __ ret(3 * kPointerSize);
 
@@ -5089,11 +5126,6 @@
 // StringCharCodeAtGenerator
 
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
-  Label flat_string;
-  Label ascii_string;
-  Label got_char_code;
-  Label sliced_string;
-
   // If the receiver is a smi trigger the non-string case.
   STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfSmi(object_, receiver_not_string_);
@@ -5114,71 +5146,12 @@
   __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
   __ j(above_equal, index_out_of_range_);
 
-  // We need special handling for non-flat strings.
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result_, Immediate(kStringRepresentationMask));
-  __ j(zero, &flat_string);
-
-  // Handle non-flat strings.
-  __ and_(result_, kStringRepresentationMask);
-  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
-  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
-  __ cmp(result_, kExternalStringTag);
-  __ j(greater, &sliced_string, Label::kNear);
-  __ j(equal, &call_runtime_);
-
-  // ConsString.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  Label assure_seq_string;
-  __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
-         Immediate(masm->isolate()->factory()->empty_string()));
-  __ j(not_equal, &call_runtime_);
-  // Get the first of the two parts.
-  __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
-  __ jmp(&assure_seq_string, Label::kNear);
-
-  // SlicedString, unpack and add offset.
-  __ bind(&sliced_string);
-  __ add(index_, FieldOperand(object_, SlicedString::kOffsetOffset));
-  __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
-
-  // Assure that we are dealing with a sequential string. Go to runtime if not.
-  // Note that if the original string is a cons or slice with an external
-  // string as underlying string, we pass that unpacked underlying string with
-  // the adjusted index to the runtime function.
-  __ bind(&assure_seq_string);
-  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
-  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result_, Immediate(kStringRepresentationMask));
-  __ j(not_zero, &call_runtime_);
-
-  // Check for 1-byte or 2-byte string.
-  __ bind(&flat_string);
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ test(result_, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string, Label::kNear);
-
-  // 2-byte string.
-  // Load the 2-byte character code into the result register.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ movzx_w(result_, FieldOperand(object_,
-                                   index_, times_1,  // Scratch is smi-tagged.
-                                   SeqTwoByteString::kHeaderSize));
-  __ jmp(&got_char_code, Label::kNear);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
   __ SmiUntag(index_);
-  __ movzx_b(result_, FieldOperand(object_,
-                                   index_, times_1,
-                                   SeqAsciiString::kHeaderSize));
-  __ bind(&got_char_code);
+
+  Factory* factory = masm->isolate()->factory();
+  StringCharLoadGenerator::Generate(
+      masm, factory, object_, index_, result_, &call_runtime_);
+
   __ SmiTag(result_);
   __ bind(&exit_);
 }
@@ -5228,6 +5201,7 @@
   __ bind(&call_runtime_);
   call_helper.BeforeCall(masm);
   __ push(object_);
+  __ SmiTag(index_);
   __ push(index_);
   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
   if (!result_.is(eax)) {
@@ -5973,18 +5947,15 @@
     // ebx: instance type
     // ecx: sub string length
     // edx: from index (smi)
-    Label allocate_slice, sliced_string, seq_string;
+    Label allocate_slice, sliced_string, seq_or_external_string;
     __ cmp(ecx, SlicedString::kMinLength);
     // Short slice.  Copy instead of slicing.
     __ j(less, &copy_routine);
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ test(ebx, Immediate(kStringRepresentationMask));
-    __ j(zero, &seq_string, Label::kNear);
+    // If the string is not indirect, it can only be sequential or external.
     STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
     STATIC_ASSERT(kIsIndirectStringMask != 0);
     __ test(ebx, Immediate(kIsIndirectStringMask));
-    // External string.  Jump to runtime.
-    __ j(zero, &runtime);
+    __ j(zero, &seq_or_external_string, Label::kNear);
 
     Factory* factory = masm->isolate()->factory();
     __ test(ebx, Immediate(kSlicedNotConsMask));
@@ -6002,8 +5973,8 @@
     __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
     __ jmp(&allocate_slice, Label::kNear);
 
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
+    __ bind(&seq_or_external_string);
+    // Sequential or external string.  Just move string to the correct register.
     __ mov(edi, eax);
 
     __ bind(&allocate_slice);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index cf52415..58eead7 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -524,6 +524,113 @@
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
 }
 
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Factory* factory,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ test(result, Immediate(kIsIndirectStringMask));
+  __ j(zero, &check_sequential);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ test(result, Immediate(kSlicedNotConsMask));
+  __ j(zero, &cons_string);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
+  __ SmiUntag(result);
+  __ add(index, result);
+  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
+  __ jmp(&indirect_string_loaded);
+
+  // Handle external strings.
+  Label external_string, ascii_external, done;
+  __ bind(&external_string);
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ test(result, Immediate(kIsIndirectStringMask));
+    __ Assert(zero, "external string expected, but not found");
+  }
+  __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
+  // Assert that the external string has not been finalized yet.
+  __ test(result, result);
+  __ j(zero, call_runtime);
+  Register scratch = string;
+  __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
+  __ cmp(scratch, Immediate(factory->external_ascii_string_map()));
+  __ j(equal, &ascii_external, Label::kNear);
+  __ cmp(scratch, Immediate(factory->external_ascii_symbol_map()));
+  __ j(equal, &ascii_external, Label::kNear);
+  // Two-byte string.
+  __ movzx_w(result, Operand(result, index, times_2, 0));
+  __ jmp(&done);
+  __ bind(&ascii_external);
+  // Ascii string.
+  __ movzx_b(result, Operand(result, index, times_1, 0));
+  __ jmp(&done);
+
+  // Handle conses.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
+         Immediate(factory->empty_string()));
+  __ j(not_equal, call_runtime);
+  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
+  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+
+  // Check whether the string is sequential. The only non-sequential
+  // shapes we support have just been unwrapped above.
+  // Note that if the original string is a cons or slice with an external
+  // string as underlying string, we pass that unpacked underlying string with
+  // the adjusted index to the runtime function.
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ test(result, Immediate(kStringRepresentationMask));
+  __ j(not_zero, &external_string);
+
+  // Dispatch on the encoding: ASCII or two-byte.
+  Label ascii_string;
+  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
+  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+  __ test(result, Immediate(kStringEncodingMask));
+  __ j(not_zero, &ascii_string, Label::kNear);
+
+  // Two-byte string.
+  // Load the two-byte character code into the result register.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ movzx_w(result, FieldOperand(string,
+                                  index,
+                                  times_2,
+                                  SeqTwoByteString::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  // Ascii string.
+  // Load the byte into the result register.
+  __ bind(&ascii_string);
+  __ movzx_b(result, FieldOperand(string,
+                                  index,
+                                  times_1,
+                                  SeqAsciiString::kHeaderSize));
+  __ bind(&done);
+}
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 74616d0..c1ef22e 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1479,6 +1479,7 @@
   ASSERT_EQ(2, constant_elements->length());
   ElementsKind constant_elements_kind =
       static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
   Handle<FixedArrayBase> constant_elements_values(
       FixedArrayBase::cast(constant_elements->get(1)));
 
@@ -1486,13 +1487,16 @@
   __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
   __ push(Immediate(constant_elements));
-  if (constant_elements_values->map() ==
-      isolate()->heap()->fixed_cow_array_map()) {
-    ASSERT(expr->depth() == 1);
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    __ CallStub(&stub);
+  Heap* heap = isolate()->heap();
+  if (has_constant_fast_elements &&
+      constant_elements_values->map() == heap->fixed_cow_array_map()) {
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
     __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        length);
+    __ CallStub(&stub);
   } else if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -1501,10 +1505,11 @@
     ASSERT(constant_elements_kind == FAST_ELEMENTS ||
            constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
            FLAG_smi_only_arrays);
-    FastCloneShallowArrayStub::Mode mode =
-        constant_elements_kind == FAST_DOUBLE_ELEMENTS
-        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
+    FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
+        ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 7f78d03..40d5dec 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -33,6 +33,7 @@
 #include "code-stubs.h"
 #include "deoptimizer.h"
 #include "stub-cache.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -1683,6 +1684,31 @@
 }
 
 
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+
+  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
+
+  return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond = EmitIsString(reg, temp, false_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   Operand input = ToOperand(instr->InputAt(0));
 
@@ -1710,6 +1736,41 @@
 }
 
 
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+  __ test(eax, Operand(eax));
+
+  EmitBranch(true_block, false_block, condition);
+}
+
+
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
@@ -1987,26 +2048,6 @@
 }
 
 
-static Condition ComputeCompareCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return equal;
-    case Token::LT:
-      return less;
-    case Token::GT:
-      return greater;
-    case Token::LTE:
-      return less_equal;
-    case Token::GTE:
-      return greater_equal;
-    default:
-      UNREACHABLE();
-      return no_condition;
-  }
-}
-
-
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
@@ -3371,85 +3412,15 @@
     LStringCharCodeAt* instr_;
   };
 
-  Register string = ToRegister(instr->string());
-  Register index = ToRegister(instr->index());
-  Register result = ToRegister(instr->result());
-
   DeferredStringCharCodeAt* deferred =
       new DeferredStringCharCodeAt(this, instr);
 
-  // Fetch the instance type of the receiver into result register.
-  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // We need special handling for indirect strings.
-  Label check_sequential;
-  __ test(result, Immediate(kIsIndirectStringMask));
-  __ j(zero, &check_sequential, Label::kNear);
-
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ test(result, Immediate(kSlicedNotConsMask));
-  __ j(zero, &cons_string, Label::kNear);
-
-  // Handle slices.
-  Label indirect_string_loaded;
-  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
-  __ SmiUntag(result);
-  __ add(index, Operand(result));
-  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded, Label::kNear);
-
-  // Handle conses.
-  // Check whether the right hand side is the empty string (i.e. if
-  // this is really a flat string in a cons string). If that is not
-  // the case we would rather go to the runtime system now to flatten
-  // the string.
-  __ bind(&cons_string);
-  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
-         Immediate(factory()->empty_string()));
-  __ j(not_equal, deferred->entry());
-  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
-
-  // Check whether the string is sequential. The only non-sequential
-  // shapes we support have just been unwrapped above.
-  // Note that if the original string is a cons or slice with an external
-  // string as underlying string, we pass that unpacked underlying string with
-  // the adjusted index to the runtime function.
-  __ bind(&check_sequential);
-  STATIC_ASSERT(kSeqStringTag == 0);
-  __ test(result, Immediate(kStringRepresentationMask));
-  __ j(not_zero, deferred->entry());
-
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii_string;
-  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
-  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
-  __ test(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii_string, Label::kNear);
-
-  // Two-byte string.
-  // Load the two-byte character code into the result register.
-  Label done;
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  __ movzx_w(result, FieldOperand(string,
-                                  index,
-                                  times_2,
-                                  SeqTwoByteString::kHeaderSize));
-  __ jmp(&done, Label::kNear);
-
-  // ASCII string.
-  // Load the byte into the result register.
-  __ bind(&ascii_string);
-  __ movzx_b(result, FieldOperand(string,
-                                  index,
-                                  times_1,
-                                  SeqAsciiString::kHeaderSize));
-  __ bind(&done);
+  StringCharLoadGenerator::Generate(masm(),
+                                    factory(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
   __ bind(deferred->exit());
 }
 
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 1354e1d..ccbf048 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -280,6 +280,13 @@
                          Label* is_not_object,
                          Label* is_object);
 
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 1f01df6..7b5bf1e 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -225,6 +225,13 @@
 }
 
 
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -239,6 +246,14 @@
 }
 
 
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(1)->PrintTo(stream);
+  InputAt(2)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -1499,6 +1514,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new LIsStringAndBranch(UseRegister(instr->value()), temp);
+}
+
+
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   return new(zone()) LIsSmiAndBranch(Use(instr->value()));
@@ -1513,6 +1535,21 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+
+  LStringCompareAndBranch* result = new
+      LStringCompareAndBranch(context, left, right);
+
+  return MarkAsCall(result, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 2fe55fd..74e53ae 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -103,8 +103,10 @@
   V(IsConstructCallAndBranch)                   \
   V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -633,6 +635,19 @@
 };
 
 
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -660,6 +675,24 @@
 };
 
 
+class LStringCompareAndBranch: public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 1> {
  public:
   LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 8b84507..dd1af02 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -6026,15 +6026,13 @@
     // a3: from index (untagged smi)
     // t2 (a.k.a. to): to (smi)
     // t3 (a.k.a. from): from offset (smi)
-    Label allocate_slice, sliced_string, seq_string;
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ And(t4, a1, Operand(kStringRepresentationMask));
-    __ Branch(&seq_string, eq, t4, Operand(zero_reg));
+    Label allocate_slice, sliced_string, seq_or_external_string;
+    // If the string is not indirect, it can only be sequential or external.
     STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
     STATIC_ASSERT(kIsIndirectStringMask != 0);
     __ And(t4, a1, Operand(kIsIndirectStringMask));
     // External string.  Jump to runtime.
-    __ Branch(&sub_string_runtime, eq, t4, Operand(zero_reg));
+    __ Branch(&seq_or_external_string, eq, t4, Operand(zero_reg));
 
     __ And(t4, a1, Operand(kSlicedNotConsMask));
     __ Branch(&sliced_string, ne, t4, Operand(zero_reg));
@@ -6052,8 +6050,8 @@
     __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
     __ jmp(&allocate_slice);
 
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
+    __ bind(&seq_or_external_string);
+    // Sequential or external string.  Just move string to the correct register.
     __ mov(t1, v0);
 
     __ bind(&allocate_slice);
@@ -7162,6 +7160,8 @@
   // ElementsTransitionGenerator::GenerateDoubleToObject
   { t2, a2, a0, EMIT_REMEMBERED_SET },
   { a2, t2, t5, EMIT_REMEMBERED_SET },
+  // StoreArrayLiteralElementStub::Generate
+  { t1, a0, t2, EMIT_REMEMBERED_SET },
   // Null termination.
   { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
 };
@@ -7401,6 +7401,66 @@
 }
 
 
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : element value to store
+  //  -- a1    : array literal
+  //  -- a2    : map of array literal
+  //  -- a3    : element index as smi
+  //  -- t0    : array literal index in function as smi
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label fast_elements;
+
+  __ CheckFastElements(a2, t1, &double_elements);
+  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
+  __ JumpIfSmi(a0, &smi_element);
+  __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+  __ bind(&slow_elements);
+  // call.
+  __ Push(a1, a3, a0);
+  __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
+  __ Push(t1, t0);
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t2, t1, t2);
+  __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sw(a0, MemOperand(t2, 0));
+  // Update the write barrier for the array store.
+  __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret();
+
+  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
+  // FAST_ELEMENTS, and value is Smi.
+  __ bind(&smi_element);
+  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t2, t1, t2);
+  __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
+  __ Ret();
+
+  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
+                                 &slow_elements);
+  __ Ret();
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index bc735df..a27c61c 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -45,12 +45,6 @@
 }
 
 
-void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
-  // Nothing to do. No new relocation information is written for lazy
-  // deoptimization on MIPS.
-}
-
-
 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
   HandleScope scope;
   AssertNoAllocation no_allocation;
@@ -59,58 +53,38 @@
 
   // Get the optimized code.
   Code* code = function->code();
+  Address code_start_address = code->instruction_start();
 
   // Invalidate the relocation information, as it will become invalid by the
   // code patching below, and is not needed any more.
   code->InvalidateRelocation();
 
-  // For each return after a safepoint insert an absolute call to the
-  // corresponding deoptimization entry.
-  unsigned last_pc_offset = 0;
-  SafepointTable table(function->code());
-  for (unsigned i = 0; i < table.length(); i++) {
-    unsigned pc_offset = table.GetPcOffset(i);
-    SafepointEntry safepoint_entry = table.GetEntry(i);
-    int deoptimization_index = safepoint_entry.deoptimization_index();
-    int gap_code_size = safepoint_entry.gap_code_size();
-    // Check that we did not shoot past next safepoint.
-    CHECK(pc_offset >= last_pc_offset);
+  // For each LLazyBailout instruction insert a call to the corresponding
+  // deoptimization entry.
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
 #ifdef DEBUG
-    // Destroy the code which is not supposed to be run again.
-    int instructions = (pc_offset - last_pc_offset) / Assembler::kInstrSize;
-    CodePatcher destroyer(code->instruction_start() + last_pc_offset,
-                          instructions);
-    for (int x = 0; x < instructions; x++) {
-      destroyer.masm()->break_(0);
-    }
+  Address prev_call_address = NULL;
 #endif
-    last_pc_offset = pc_offset;
-    if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
-      Address deoptimization_entry = Deoptimizer::GetDeoptimizationEntry(
-          deoptimization_index, Deoptimizer::LAZY);
-      last_pc_offset += gap_code_size;
-      int call_size_in_bytes = MacroAssembler::CallSize(deoptimization_entry,
-                                                        RelocInfo::NONE);
-      int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
-      ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
-      ASSERT(call_size_in_bytes <= patch_size());
-      CodePatcher patcher(code->instruction_start() + last_pc_offset,
-                          call_size_in_words);
-      patcher.masm()->Call(deoptimization_entry, RelocInfo::NONE);
-      last_pc_offset += call_size_in_bytes;
-    }
-  }
+  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+    if (deopt_data->Pc(i)->value() == -1) continue;
+    Address call_address = code_start_address + deopt_data->Pc(i)->value();
+    Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+    int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
+                                                      RelocInfo::NONE);
+    int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+    ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
+    ASSERT(call_size_in_bytes <= patch_size());
+    CodePatcher patcher(call_address, call_size_in_words);
+    patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+    ASSERT(prev_call_address == NULL ||
+           call_address >= prev_call_address + patch_size());
+    ASSERT(call_address + patch_size() <= code->instruction_end());
 
 #ifdef DEBUG
-  // Destroy the code which is not supposed to be run again.
-  int instructions =
-      (code->safepoint_table_offset() - last_pc_offset) / Assembler::kInstrSize;
-  CodePatcher destroyer(code->instruction_start() + last_pc_offset,
-                        instructions);
-  for (int x = 0; x < instructions; x++) {
-    destroyer.masm()->break_(0);
-  }
+    prev_call_address = call_address;
 #endif
+  }
 
   Isolate* isolate = code->GetIsolate();
 
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index ca1d2b5..afe8056 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -40,37 +40,22 @@
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
-                     int deoptimization_index)
+                     Safepoint::DeoptMode mode)
       : codegen_(codegen),
         pointers_(pointers),
-        deoptimization_index_(deoptimization_index) { }
+        deopt_mode_(mode) { }
   virtual ~SafepointGenerator() { }
 
-  virtual void BeforeCall(int call_size) const {
-    ASSERT(call_size >= 0);
-    // Ensure that we have enough space after the previous safepoint position
-    // for the generated code there.
-    int call_end = codegen_->masm()->pc_offset() + call_size;
-    int prev_jump_end =
-        codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
-    if (call_end < prev_jump_end) {
-      int padding_size = prev_jump_end - call_end;
-      ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
-      while (padding_size > 0) {
-        codegen_->masm()->nop();
-        padding_size -= Assembler::kInstrSize;
-      }
-    }
-  }
+  virtual void BeforeCall(int call_size) const { }
 
   virtual void AfterCall() const {
-    codegen_->RecordSafepoint(pointers_, deoptimization_index_);
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
  private:
   LCodeGen* codegen_;
   LPointerMap* pointers_;
-  int deoptimization_index_;
+  Safepoint::DeoptMode deopt_mode_;
 };
 
 
@@ -101,7 +86,6 @@
   code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   PopulateDeoptimizationData(code);
-  Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
 }
 
 
@@ -198,7 +182,7 @@
     } else {
       __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
-    RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
+    RecordSafepoint(Safepoint::kNoLazyDeopt);
     // Context is returned in both v0 and cp.  It replaces the context
     // passed to us.  It's saved in the stack and kept live in cp.
     __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -226,6 +210,7 @@
   if (FLAG_trace) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
+  EnsureSpaceForLazyDeopt();
   return !is_aborted();
 }
 
@@ -251,15 +236,6 @@
 }
 
 
-LInstruction* LCodeGen::GetNextInstruction() {
-  if (current_instruction_ < instructions_->length() - 1) {
-    return instructions_->at(current_instruction_ + 1);
-  } else {
-    return NULL;
-  }
-}
-
-
 bool LCodeGen::GenerateDeferredCode() {
   ASSERT(is_generating());
   if (deferred_.length() > 0) {
@@ -272,13 +248,6 @@
       code->Generate();
       __ jmp(code->exit());
     }
-
-    // Pad code to ensure that the last piece of deferred code have
-    // room for lazy bailout.
-    while ((masm()->pc_offset() - LastSafepointEnd())
-           < Deoptimizer::patch_size()) {
-      __ nop();
-    }
   }
   // Deferred code is the last part of the instruction sequence. Mark
   // the generated code as done unless we bailed out.
@@ -534,7 +503,7 @@
   LPointerMap* pointers = instr->pointer_map();
   RecordPosition(pointers->position());
   __ Call(code, mode);
-  RegisterLazyDeoptimization(instr, safepoint_mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
 }
 
 
@@ -547,7 +516,7 @@
   RecordPosition(pointers->position());
 
   __ CallRuntime(function, num_arguments);
-  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 }
 
 
@@ -556,37 +525,12 @@
                                        LInstruction* instr) {
   __ CallRuntimeSaveDoubles(id);
   RecordSafepointWithRegisters(
-      instr->pointer_map(), argc, Safepoint::kNoDeoptimizationIndex);
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
 }
 
 
-void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr,
-                                          SafepointMode safepoint_mode) {
-  // Create the environment to bailout to. If the call has side effects
-  // execution has to continue after the call otherwise execution can continue
-  // from a previous bailout point repeating the call.
-  LEnvironment* deoptimization_environment;
-  if (instr->HasDeoptimizationEnvironment()) {
-    deoptimization_environment = instr->deoptimization_environment();
-  } else {
-    deoptimization_environment = instr->environment();
-  }
-
-  RegisterEnvironmentForDeoptimization(deoptimization_environment);
-  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
-    RecordSafepoint(instr->pointer_map(),
-                    deoptimization_environment->deoptimization_index());
-  } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-    RecordSafepointWithRegisters(
-        instr->pointer_map(),
-        0,
-        deoptimization_environment->deoptimization_index());
-  }
-}
-
-
-void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
   if (!environment->HasBeenRegistered()) {
     // Physical stack frame layout:
     // -x ............. -4  0 ..................................... y
@@ -608,7 +552,10 @@
     Translation translation(&translations_, frame_count);
     WriteTranslation(environment, &translation);
     int deoptimization_index = deoptimizations_.length();
-    environment->Register(deoptimization_index, translation.index());
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
     deoptimizations_.Add(environment);
   }
 }
@@ -618,7 +565,7 @@
                             LEnvironment* environment,
                             Register src1,
                             const Operand& src2) {
-  RegisterEnvironmentForDeoptimization(environment);
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
   Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
@@ -683,6 +630,7 @@
     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
     data->SetArgumentsStackHeight(i,
                                   Smi::FromInt(env->arguments_stack_height()));
+    data->SetPc(i, Smi::FromInt(env->pc_offset()));
   }
   code->set_deoptimization_data(*data);
 }
@@ -714,16 +662,28 @@
 }
 
 
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
 void LCodeGen::RecordSafepoint(
     LPointerMap* pointers,
     Safepoint::Kind kind,
     int arguments,
-    int deoptimization_index) {
+    Safepoint::DeoptMode deopt_mode) {
   ASSERT(expected_safepoint_kind_ == kind);
 
   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
-      kind, arguments, deoptimization_index);
+      kind, arguments, deopt_mode);
   for (int i = 0; i < operands->length(); i++) {
     LOperand* pointer = operands->at(i);
     if (pointer->IsStackSlot()) {
@@ -740,31 +700,31 @@
 
 
 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
-                               int deoptimization_index) {
-  RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
 }
 
 
-void LCodeGen::RecordSafepoint(int deoptimization_index) {
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
   LPointerMap empty_pointers(RelocInfo::kNoPosition);
-  RecordSafepoint(&empty_pointers, deoptimization_index);
+  RecordSafepoint(&empty_pointers, deopt_mode);
 }
 
 
 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
                                             int arguments,
-                                            int deoptimization_index) {
-  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
-      deoptimization_index);
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
 }
 
 
 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
     LPointerMap* pointers,
     int arguments,
-    int deoptimization_index) {
-  RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
-      deoptimization_index);
+    Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
 }
 
 
@@ -799,12 +759,6 @@
     LParallelMove* move = gap->GetParallelMove(inner_pos);
     if (move != NULL) DoParallelMove(move);
   }
-
-  LInstruction* next = GetNextInstruction();
-  if (next != NULL && next->IsLazyBailout()) {
-    int pc = masm()->pc_offset();
-    safepoints_.SetPcAfterGap(pc);
-  }
 }
 
 
@@ -874,52 +828,47 @@
   const Register left = ToRegister(instr->InputAt(0));
   const Register result = ToRegister(instr->result());
 
-  // p2constant holds the right side value if it's a power of 2 constant.
-  // In other cases it is 0.
-  int32_t p2constant = 0;
+  Label done;
 
-  if (instr->InputAt(1)->IsConstantOperand()) {
-      p2constant = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
-      if (p2constant % 2 != 0) {
-        p2constant = 0;
-      }
-      // Result always takes the sign of the dividend (left).
-      p2constant = abs(p2constant);
-  }
-
-  // div runs in the background while we check for special cases.
-  Register right = EmitLoadRegister(instr->InputAt(1), scratch);
-  __ div(left, right);
-
-  // Check for x % 0.
-  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
-    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
-  }
-
-  Label skip_div, do_div;
-  if (p2constant != 0) {
-    // Fall back to the result of the div instruction if we could have sign
-    // problems.
-    __ Branch(&do_div, lt, left, Operand(zero_reg));
-    // Modulo by masking.
-    __ And(scratch, left, p2constant - 1);
-    __ Branch(&skip_div);
-  }
-
-  __ bind(&do_div);
-  __ mfhi(scratch);
-  __ bind(&skip_div);
-
-  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+  if (instr->hydrogen()->HasPowerOf2Divisor()) {
+    Register scratch = scratch0();
+    ASSERT(!left.is(scratch));
+    __ mov(scratch, left);
+    int32_t p2constant = HConstant::cast(
+        instr->hydrogen()->right())->Integer32Value();
+    ASSERT(p2constant != 0);
     // Result always takes the sign of the dividend (left).
-    Label done;
-    __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
-    __ mov(result, scratch);
-    DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
-    __ bind(&done);
+    p2constant = abs(p2constant);
+
+    Label positive_dividend;
+    __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
+    __ subu(result, zero_reg, left);
+    __ And(result, result, p2constant - 1);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+    }
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ subu(result, zero_reg, result);
+    __ bind(&positive_dividend);
+    __ And(result, scratch, p2constant - 1);
   } else {
-    __ Move(result, scratch);
+    // div runs in the background while we check for special cases.
+    Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+    __ div(left, right);
+
+    // Check for x % 0.
+    if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+      DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
+    }
+
+    __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
+    __ mfhi(result);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+    }
   }
+  __ bind(&done);
 }
 
 
@@ -1686,9 +1635,9 @@
 
 Condition LCodeGen::EmitIsObject(Register input,
                                  Register temp1,
+                                 Register temp2,
                                  Label* is_not_object,
                                  Label* is_object) {
-  Register temp2 = scratch0();
   __ JumpIfSmi(input, is_not_object);
 
   __ LoadRoot(temp2, Heap::kNullValueRootIndex);
@@ -1721,7 +1670,7 @@
   Label* false_label = chunk_->GetAssemblyLabel(false_block);
 
   Condition true_cond =
-      EmitIsObject(reg, temp1, false_label, true_label);
+      EmitIsObject(reg, temp1, temp2, false_label, true_label);
 
   EmitBranch(true_block, false_block, true_cond, temp2,
              Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
@@ -1941,7 +1890,7 @@
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
     virtual void Generate() {
-      codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
+      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
     virtual LInstruction* instr() { return instr_; }
     Label* map_check() { return &map_check_; }
@@ -2009,8 +1958,8 @@
 }
 
 
-void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                                Label* map_check) {
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                               Label* map_check) {
   Register result = ToRegister(instr->result());
   ASSERT(result.is(v0));
 
@@ -2044,6 +1993,9 @@
                   RelocInfo::CODE_TARGET,
                   instr,
                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  ASSERT(instr->HasDeoptimizationEnvironment());
+  LEnvironment* env = instr->deoptimization_environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   // Put the result value into the result register slot and
   // restore all registers.
   __ StoreToSafepointRegisterSlot(result, result);
@@ -2670,12 +2622,9 @@
   __ bind(&invoke);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
-  LEnvironment* env = instr->deoptimization_environment();
   RecordPosition(pointers->position());
-  RegisterEnvironmentForDeoptimization(env);
-  SafepointGenerator safepoint_generator(this,
-                                         pointers,
-                                         env->deoptimization_index());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in receiver which is a0, as expected
   // by InvokeFunction.
   v8::internal::ParameterCount actual(receiver);
@@ -2758,7 +2707,7 @@
   __ Call(at);
 
   // Setup deoptimization.
-  RegisterLazyDeoptimization(instr, RECORD_SIMPLE_SAFEPOINT);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 
   // Restore context.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3135,10 +3084,8 @@
   ASSERT(instr->HasPointerMap());
   ASSERT(instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
-  LEnvironment* env = instr->deoptimization_environment();
   RecordPosition(pointers->position());
-  RegisterEnvironmentForDeoptimization(env);
-  SafepointGenerator generator(this, pointers, env->deoptimization_index());
+  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   ParameterCount count(instr->arity());
   __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -4518,9 +4465,29 @@
 }
 
 
+void LCodeGen::EnsureSpaceForLazyDeopt() {
+  // Ensure that we have enough space after the previous lazy-bailout
+  // instruction for patching the code here.
+  int current_pc = masm()->pc_offset();
+  int patch_size = Deoptimizer::patch_size();
+  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+    while (padding_size > 0) {
+      __ nop();
+      padding_size -= Assembler::kInstrSize;
+    }
+  }
+  last_lazy_deopt_pc_ = current_pc;
+}
+
+
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
-  // No code for lazy bailout instruction. Used to capture environment after a
-  // call for populating the safepoint data with deoptimization data.
+  EnsureSpaceForLazyDeopt();
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
@@ -4537,12 +4504,9 @@
   __ Push(object, key, strict);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
-  LEnvironment* env = instr->deoptimization_environment();
   RecordPosition(pointers->position());
-  RegisterEnvironmentForDeoptimization(env);
-  SafepointGenerator safepoint_generator(this,
-                                         pointers,
-                                         env->deoptimization_index());
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
   __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
 }
 
@@ -4553,27 +4517,20 @@
   __ Push(key, obj);
   ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
   LPointerMap* pointers = instr->pointer_map();
-  LEnvironment* env = instr->deoptimization_environment();
   RecordPosition(pointers->position());
-  RegisterEnvironmentForDeoptimization(env);
-  SafepointGenerator safepoint_generator(this,
-                                         pointers,
-                                         env->deoptimization_index());
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
 }
 
 
 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
-  {
-    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-    __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
-    RegisterLazyDeoptimization(
-        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  }
-
-  // The gap code includes the restoring of the safepoint registers.
-  int pc = masm()->pc_offset();
-  safepoints_.SetPcAfterGap(pc);
+  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
@@ -4588,6 +4545,10 @@
     LStackCheck* instr_;
   };
 
+  ASSERT(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
   if (instr->hydrogen()->is_function_entry()) {
     // Perform stack overflow check.
     Label done;
@@ -4595,7 +4556,10 @@
     __ Branch(&done, hs, sp, Operand(at));
     StackCheckStub stub;
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    EnsureSpaceForLazyDeopt();
     __ bind(&done);
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   } else {
     ASSERT(instr->hydrogen()->is_backwards_branch());
     // Perform stack overflow check if this goto needs it before jumping.
@@ -4603,8 +4567,13 @@
         new DeferredStackCheck(this, instr);
     __ LoadRoot(at, Heap::kStackLimitRootIndex);
     __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
+    EnsureSpaceForLazyDeopt();
     __ bind(instr->done_label());
     deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
   }
 }
 
@@ -4620,7 +4589,7 @@
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
   ASSERT(!environment->HasBeenRegistered());
-  RegisterEnvironmentForDeoptimization(environment);
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   ASSERT(osr_pc_offset_ == -1);
   osr_pc_offset_ = masm()->pc_offset();
 }
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index f9c013c..5d4a057 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -58,6 +58,7 @@
         status_(UNUSED),
         deferred_(8),
         osr_pc_offset_(-1),
+        last_lazy_deopt_pc_(0),
         resolver_(this),
         expected_safepoint_kind_(Safepoint::kSimple) {
     PopulateDeoptimizationLiteralsWithInlinedFunctions();
@@ -101,10 +102,6 @@
   // information on it.
   void FinishCode(Handle<Code> code);
 
-  // Deferred code support.
-  template<int T>
-  void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
-                              Token::Value op);
   void DoDeferredNumberTagD(LNumberTagD* instr);
   void DoDeferredNumberTagI(LNumberTagI* instr);
   void DoDeferredTaggedToI(LTaggedToI* instr);
@@ -112,8 +109,8 @@
   void DoDeferredStackCheck(LStackCheck* instr);
   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
-  void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                        Label* map_check);
+  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                       Label* map_check);
 
   // Parallel move support.
   void DoParallelMove(LParallelMove* move);
@@ -216,10 +213,11 @@
 
   void LoadHeapObject(Register result, Handle<HeapObject> object);
 
-  void RegisterLazyDeoptimization(LInstruction* instr,
-                                  SafepointMode safepoint_mode);
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
 
-  void RegisterEnvironmentForDeoptimization(LEnvironment* environment);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
   void DeoptimizeIf(Condition cc,
                     LEnvironment* environment,
                     Register src1,
@@ -251,19 +249,16 @@
   void RecordSafepoint(LPointerMap* pointers,
                        Safepoint::Kind kind,
                        int arguments,
-                       int deoptimization_index);
-  void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
-  void RecordSafepoint(int deoptimization_index);
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
-                                    int deoptimization_index);
+                                    Safepoint::DeoptMode mode);
   void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
                                               int arguments,
-                                              int deoptimization_index);
+                                              Safepoint::DeoptMode mode);
   void RecordPosition(int position);
-  int LastSafepointEnd() {
-    return static_cast<int>(safepoints_.GetPcAfterGap());
-  }
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
@@ -300,6 +295,7 @@
   // true and false label should be made, to optimize fallthrough.
   Condition EmitIsObject(Register input,
                          Register temp1,
+                         Register temp2,
                          Label* is_not_object,
                          Label* is_object);
 
@@ -320,6 +316,8 @@
     Address address;
   };
 
+  void EnsureSpaceForLazyDeopt();
+
   LChunk* const chunk_;
   MacroAssembler* const masm_;
   CompilationInfo* const info_;
@@ -336,6 +334,7 @@
   TranslationBuffer translations_;
   ZoneList<LDeferredCode*> deferred_;
   int osr_pc_offset_;
+  int last_lazy_deopt_pc_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index ccf0481..fb93208 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -910,34 +910,21 @@
                          uint16_t pos,
                          uint16_t size) {
   ASSERT(pos < 32);
-  ASSERT(pos + size < 32);
+  ASSERT(pos + size <= 32);
+  ASSERT(size != 0);
 
   if (mips32r2) {
     ins_(rt, rs, pos, size);
   } else {
     ASSERT(!rt.is(t8) && !rs.is(t8));
-
-    srl(t8, rt, pos + size);
-    // The left chunk from rt that needs to
-    // be saved is on the right side of t8.
-    sll(at, t8, pos + size);
-    // The 'at' register now contains the left chunk on
-    // the left (proper position) and zeroes.
-    sll(t8, rt, 32 - pos);
-    // t8 now contains the right chunk on the left and zeroes.
-    srl(t8, t8, 32 - pos);
-    // t8 now contains the right chunk on
-    // the right (proper position) and zeroes.
-    or_(rt, at, t8);
-    // rt now contains the left and right chunks from the original rt
-    // in their proper position and zeroes in the middle.
-    sll(t8, rs, 32 - size);
-    // t8 now contains the chunk from rs on the left and zeroes.
-    srl(t8, t8, 32 - size - pos);
-    // t8 now contains the original chunk from rs in
-    // the middle (proper position).
-    or_(rt, rt, t8);
-    // rt now contains the result of the ins instruction in R2 mode.
+    Subu(at, zero_reg, Operand(1));
+    srl(at, at, 32 - size);
+    and_(t8, rs, at);
+    sll(t8, t8, pos);
+    sll(at, at, pos);
+    nor(at, at, zero_reg);
+    and_(at, rt, at);
+    or_(rt, t8, at);
   }
 }
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 11819ea..9da480e 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2297,6 +2297,11 @@
 }
 
 
+void ExternalString::clear_data_cache() {
+  WRITE_INTPTR_FIELD(this, kResourceDataOffset, 0);
+}
+
+
 const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
   return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
 }
@@ -2306,6 +2311,21 @@
     const ExternalAsciiString::Resource* resource) {
   *reinterpret_cast<const Resource**>(
       FIELD_ADDR(this, kResourceOffset)) = resource;
+  clear_data_cache();
+}
+
+
+const char* ExternalAsciiString::GetChars() {
+  const char** data_field =
+      reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
+  if (*data_field == NULL) *data_field = resource()->data();
+  return *data_field;
+}
+
+
+uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return GetChars()[index];
 }
 
 
@@ -2318,6 +2338,27 @@
     const ExternalTwoByteString::Resource* resource) {
   *reinterpret_cast<const Resource**>(
       FIELD_ADDR(this, kResourceOffset)) = resource;
+  clear_data_cache();
+}
+
+
+const uint16_t* ExternalTwoByteString::GetChars() {
+  const uint16_t** data_field =
+      reinterpret_cast<const uint16_t**>(FIELD_ADDR(this, kResourceDataOffset));
+  if (*data_field == NULL) *data_field = resource()->data();
+  return *data_field;
+}
+
+
+uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
+  ASSERT(index >= 0 && index < length());
+  return GetChars()[index];
+}
+
+
+const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
+      unsigned start) {
+  return GetChars() + start;
 }
 
 
diff --git a/src/objects.cc b/src/objects.cc
index 854ce68..2ebeb65 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -953,8 +953,6 @@
   Heap* heap = GetHeap();
   int size = this->Size();  // Byte size of the original string.
   if (size < ExternalString::kSize) {
-    // The string is too small to fit an external String in its place. This can
-    // only happen for zero length strings.
     return false;
   }
   ASSERT(size >= ExternalString::kSize);
@@ -1007,8 +1005,6 @@
   Heap* heap = GetHeap();
   int size = this->Size();  // Byte size of the original string.
   if (size < ExternalString::kSize) {
-    // The string is too small to fit an external String in its place. This can
-    // only happen for zero length strings.
     return false;
   }
   ASSERT(size >= ExternalString::kSize);
@@ -5790,7 +5786,7 @@
     if (shape.representation_tag() == kSeqStringTag) {
       start = SeqAsciiString::cast(string)->GetChars();
     } else {
-      start = ExternalAsciiString::cast(string)->resource()->data();
+      start = ExternalAsciiString::cast(string)->GetChars();
     }
     return FlatContent(Vector<const char>(start + offset, length));
   } else {
@@ -5799,7 +5795,7 @@
     if (shape.representation_tag() == kSeqStringTag) {
       start = SeqTwoByteString::cast(string)->GetChars();
     } else {
-      start = ExternalTwoByteString::cast(string)->resource()->data();
+      start = ExternalTwoByteString::cast(string)->GetChars();
     }
     return FlatContent(Vector<const uc16>(start + offset, length));
   }
@@ -6032,44 +6028,26 @@
 }
 
 
-uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
-  return resource()->data()[index];
-}
-
-
 const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
       unsigned* remaining,
       unsigned* offset_ptr,
       unsigned max_chars) {
   // Cast const char* to unibrow::byte* (signedness difference).
   const unibrow::byte* b =
-      reinterpret_cast<const unibrow::byte*>(resource()->data()) + *offset_ptr;
+      reinterpret_cast<const unibrow::byte*>(GetChars()) + *offset_ptr;
   *remaining = max_chars;
   *offset_ptr += max_chars;
   return b;
 }
 
 
-const uc16* ExternalTwoByteString::ExternalTwoByteStringGetData(
-      unsigned start) {
-  return resource()->data() + start;
-}
-
-
-uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
-  return resource()->data()[index];
-}
-
-
 void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
       ReadBlockBuffer* rbb,
       unsigned* offset_ptr,
       unsigned max_chars) {
   unsigned chars_read = 0;
   unsigned offset = *offset_ptr;
-  const uint16_t* data = resource()->data();
+  const uint16_t* data = GetChars();
   while (chars_read < max_chars) {
     uint16_t c = data[offset];
     if (c <= kMaxAsciiCharCode) {
@@ -6115,9 +6093,7 @@
       unsigned max_chars) {
   unsigned capacity = rbb->capacity - rbb->cursor;
   if (max_chars > capacity) max_chars = capacity;
-  memcpy(rbb->util_buffer + rbb->cursor,
-         resource()->data() + *offset_ptr,
-         max_chars);
+  memcpy(rbb->util_buffer + rbb->cursor, GetChars() + *offset_ptr, max_chars);
   rbb->remaining += max_chars;
   *offset_ptr += max_chars;
   rbb->cursor += max_chars;
@@ -6559,13 +6535,13 @@
     switch (StringShape(source).full_representation_tag()) {
       case kAsciiStringTag | kExternalStringTag: {
         CopyChars(sink,
-                  ExternalAsciiString::cast(source)->resource()->data() + from,
+                  ExternalAsciiString::cast(source)->GetChars() + from,
                   to - from);
         return;
       }
       case kTwoByteStringTag | kExternalStringTag: {
         const uc16* data =
-            ExternalTwoByteString::cast(source)->resource()->data();
+            ExternalTwoByteString::cast(source)->GetChars();
         CopyChars(sink,
                   data + from,
                   to - from);
@@ -9986,8 +9962,8 @@
   set(j, temp);
   if (this != numbers) {
     temp = numbers->get(i);
-    numbers->set(i, numbers->get(j));
-    numbers->set(j, temp);
+    numbers->set(i, Smi::cast(numbers->get(j)));
+    numbers->set(j, Smi::cast(temp));
   }
 }
 
diff --git a/src/objects.h b/src/objects.h
index 6e672e7..fc717b2 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -6758,7 +6758,12 @@
 
   // Layout description.
   static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
-  static const int kSize = kResourceOffset + kPointerSize;
+  static const int kResourceDataOffset = kResourceOffset + kPointerSize;
+  static const int kSize = kResourceDataOffset + kPointerSize;
+
+  // Clear the cached pointer to the character array provided by the resource.
+  // This cache is updated the first time the character array is accessed.
+  inline void clear_data_cache();
 
   STATIC_CHECK(kResourceOffset == Internals::kStringResourceOffset);
 
@@ -6779,8 +6784,10 @@
   inline const Resource* resource();
   inline void set_resource(const Resource* buffer);
 
+  inline const char* GetChars();
+
   // Dispatched behavior.
-  uint16_t ExternalAsciiStringGet(int index);
+  inline uint16_t ExternalAsciiStringGet(int index);
 
   // Casting.
   static inline ExternalAsciiString* cast(Object* obj);
@@ -6816,11 +6823,13 @@
   inline const Resource* resource();
   inline void set_resource(const Resource* buffer);
 
+  inline const uint16_t* GetChars();
+
   // Dispatched behavior.
-  uint16_t ExternalTwoByteStringGet(int index);
+  inline uint16_t ExternalTwoByteStringGet(int index);
 
   // For regexp code.
-  const uint16_t* ExternalTwoByteStringGetData(unsigned start);
+  inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
 
   // Casting.
   static inline ExternalTwoByteString* cast(Object* obj);
diff --git a/src/parser.h b/src/parser.h
index 1436bf9..5e2c27a 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -681,8 +681,9 @@
   // Factory methods.
 
   Statement* EmptyStatement() {
-    static v8::internal::EmptyStatement empty;
-    return &empty;
+    static v8::internal::EmptyStatement* empty =
+        ::new v8::internal::EmptyStatement();
+    return empty;
   }
 
   Scope* NewScope(Scope* parent, ScopeType type);
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 0284b98..5626aca 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -2470,10 +2470,10 @@
 
 // Modifies heap. Must not be run during heap traversal.
 void V8HeapExplorer::TagGlobalObjects() {
+  HandleScope scope;
   Isolate* isolate = Isolate::Current();
   GlobalObjectsEnumerator enumerator;
   isolate->global_handles()->IterateAllRoots(&enumerator);
-  HandleScope scope;
   Handle<String> document_string =
       isolate->factory()->NewStringFromAscii(CStrVector("document"));
   Handle<String> url_string =
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index f91ea93..99f3a37 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -81,7 +81,7 @@
   if (subject->IsAsciiRepresentation()) {
     const byte* address;
     if (StringShape(subject).IsExternal()) {
-      const char* data = ExternalAsciiString::cast(subject)->resource()->data();
+      const char* data = ExternalAsciiString::cast(subject)->GetChars();
       address = reinterpret_cast<const byte*>(data);
     } else {
       ASSERT(subject->IsSeqAsciiString());
@@ -92,7 +92,7 @@
   }
   const uc16* data;
   if (StringShape(subject).IsExternal()) {
-    data = ExternalTwoByteString::cast(subject)->resource()->data();
+    data = ExternalTwoByteString::cast(subject)->GetChars();
   } else {
     ASSERT(subject->IsSeqTwoByteString());
     data = SeqTwoByteString::cast(subject)->GetChars();
diff --git a/src/runtime.cc b/src/runtime.cc
index 62c38aa..c3918e6 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -1794,11 +1794,14 @@
       JSFunction::cast(constructor)->initial_map() == map) {
     // If we still have the original map, set in-object properties directly.
     regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, source);
-    // TODO(lrn): Consider skipping write barrier on booleans as well.
-    // Both true and false should be in oldspace at all times.
-    regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, global);
-    regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, ignoreCase);
-    regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, multiline);
+    // Both true and false are immovable immortal objects so no need for write
+    // barrier.
+    regexp->InObjectPropertyAtPut(
+        JSRegExp::kGlobalFieldIndex, global, SKIP_WRITE_BARRIER);
+    regexp->InObjectPropertyAtPut(
+        JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
+    regexp->InObjectPropertyAtPut(
+        JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
     regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
                                   Smi::FromInt(0),
                                   SKIP_WRITE_BARRIER);  // It's a Smi.
@@ -11213,48 +11216,28 @@
       inlined_frame_index_(inlined_frame_index),
       function_(JSFunction::cast(frame->function())),
       context_(Context::cast(frame->context())),
-      nested_scope_chain_(4) {
+      local_done_(false),
+      at_local_(false) {
 
-    // Catch the case when the debugger stops in an internal function.
-    Handle<SharedFunctionInfo> shared_info(function_->shared());
-    if (shared_info->script() == isolate->heap()->undefined_value()) {
-      while (context_->closure() == *function_) {
-        context_ = Handle<Context>(context_->previous(), isolate_);
-      }
-      return;
-    }
-
-    // Check whether we are in global code or function code. If there is a stack
-    // slot for .result then this function has been created for evaluating
-    // global code and it is not a real function.
+    // Check whether the first scope is actually a local scope.
+    // If there is a stack slot for .result then this local scope has been
+    // created for evaluating top level code and it is not a real local scope.
     // Checking for the existence of .result seems fragile, but the scope info
     // saved with the code object does not otherwise have that information.
-    int index = shared_info->scope_info()->
+    int index = function_->shared()->scope_info()->
         StackSlotIndex(isolate_->heap()->result_symbol());
-
-    // Reparse the code and analyze the scopes.
-    ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
-    Handle<Script> script(Script::cast(shared_info->script()));
-    Scope* scope;
     if (index >= 0) {
-      // Global code
-      CompilationInfo info(script);
-      info.MarkAsGlobal();
-      CHECK(ParserApi::Parse(&info));
-      CHECK(Scope::Analyze(&info));
-      scope = info.function()->scope();
-    } else {
-      // Function code
-      CompilationInfo info(shared_info);
-      CHECK(ParserApi::Parse(&info));
-      CHECK(Scope::Analyze(&info));
-      scope = info.function()->scope();
+      local_done_ = true;
+    } else if (context_->IsGlobalContext() ||
+               context_->IsFunctionContext()) {
+      at_local_ = true;
+    } else if (context_->closure() != *function_) {
+      // The context_ is a block or with or catch block from the outer function.
+      ASSERT(context_->IsWithContext() ||
+             context_->IsCatchContext() ||
+             context_->IsBlockContext());
+      at_local_ = true;
     }
-
-    // Retrieve the scope chain for the current position.
-    int statement_position =
-        shared_info->code()->SourceStatementPosition(frame_->pc());
-    scope->GetNestedScopeChain(&nested_scope_chain_, statement_position);
   }
 
   // More scopes?
@@ -11262,48 +11245,40 @@
 
   // Move to the next scope.
   void Next() {
-    ScopeType scope_type = Type();
-    if (scope_type == ScopeTypeGlobal) {
-      // The global scope is always the last in the chain.
-      ASSERT(context_->IsGlobalContext());
+    // If at a local scope mark the local scope as passed.
+    if (at_local_) {
+      at_local_ = false;
+      local_done_ = true;
+
+      // If the current context is not associated with the local scope the
+      // current context is the next real scope, so don't move to the next
+      // context in this case.
+      if (context_->closure() != *function_) {
+        return;
+      }
+    }
+
+    // The global scope is always the last in the chain.
+    if (context_->IsGlobalContext()) {
       context_ = Handle<Context>();
       return;
     }
-    if (nested_scope_chain_.is_empty()) {
-      context_ = Handle<Context>(context_->previous(), isolate_);
-    } else {
-      if (nested_scope_chain_.last()->HasContext()) {
-        context_ = Handle<Context>(context_->previous(), isolate_);
-      }
-      nested_scope_chain_.RemoveLast();
+
+    // Move to the next context.
+    context_ = Handle<Context>(context_->previous(), isolate_);
+
+    // If passing the local scope indicate that the current scope is now the
+    // local scope.
+    if (!local_done_ &&
+        (context_->IsGlobalContext() || context_->IsFunctionContext())) {
+      at_local_ = true;
     }
   }
 
   // Return the type of the current scope.
   ScopeType Type() {
-    if (!nested_scope_chain_.is_empty()) {
-      Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
-      switch (scope_info->Type()) {
-        case FUNCTION_SCOPE:
-          ASSERT(context_->IsFunctionContext() ||
-                 !scope_info->HasContext());
-          return ScopeTypeLocal;
-        case GLOBAL_SCOPE:
-          ASSERT(context_->IsGlobalContext());
-          return ScopeTypeGlobal;
-        case WITH_SCOPE:
-          ASSERT(context_->IsWithContext());
-          return ScopeTypeWith;
-        case CATCH_SCOPE:
-          ASSERT(context_->IsCatchContext());
-          return ScopeTypeCatch;
-        case BLOCK_SCOPE:
-          ASSERT(!scope_info->HasContext() ||
-                 context_->IsBlockContext());
-          return ScopeTypeBlock;
-        case EVAL_SCOPE:
-          UNREACHABLE();
-      }
+    if (at_local_) {
+      return ScopeTypeLocal;
     }
     if (context_->IsGlobalContext()) {
       ASSERT(context_->global()->IsGlobalObject());
@@ -11329,7 +11304,6 @@
         return Handle<JSObject>(CurrentContext()->global());
       case ScopeIterator::ScopeTypeLocal:
         // Materialize the content of the local scope into a JSObject.
-        ASSERT(nested_scope_chain_.length() == 1);
         return MaterializeLocalScope(isolate_, frame_, inlined_frame_index_);
       case ScopeIterator::ScopeTypeWith:
         // Return the with object.
@@ -11346,28 +11320,13 @@
     return Handle<JSObject>();
   }
 
-  Handle<ScopeInfo> CurrentScopeInfo() {
-    if (!nested_scope_chain_.is_empty()) {
-      return nested_scope_chain_.last();
-    } else if (context_->IsBlockContext()) {
-      return Handle<ScopeInfo>(ScopeInfo::cast(context_->extension()));
-    } else if (context_->IsFunctionContext()) {
-      return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
-    }
-    return Handle<ScopeInfo>::null();
-  }
-
   // Return the context for this scope. For the local context there might not
   // be an actual context.
   Handle<Context> CurrentContext() {
-    if (Type() == ScopeTypeGlobal ||
-        nested_scope_chain_.is_empty()) {
-      return context_;
-    } else if (nested_scope_chain_.last()->HasContext()) {
-      return context_;
-    } else {
+    if (at_local_ && context_->closure() != *function_) {
       return Handle<Context>();
     }
+    return context_;
   }
 
 #ifdef DEBUG
@@ -11429,7 +11388,8 @@
   int inlined_frame_index_;
   Handle<JSFunction> function_;
   Handle<Context> context_;
-  List<Handle<ScopeInfo> > nested_scope_chain_;
+  bool local_done_;
+  bool at_local_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
 };
@@ -11892,65 +11852,45 @@
 
 // Creates a copy of the with context chain. The copy of the context chain is
 // is linked to the function context supplied.
-static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
-                                                   Handle<JSFunction> function,
-                                                   Handle<Context> base,
-                                                   JavaScriptFrame* frame,
-                                                   int inlined_frame_index) {
-  HandleScope scope(isolate);
-  List<Handle<ScopeInfo> > scope_chain;
-  List<Handle<Context> > context_chain;
-
-  ScopeIterator it(isolate, frame, inlined_frame_index);
-  for (; it.Type() != ScopeIterator::ScopeTypeGlobal &&
-         it.Type() != ScopeIterator::ScopeTypeLocal ; it.Next()) {
-    ASSERT(!it.Done());
-    scope_chain.Add(it.CurrentScopeInfo());
-    context_chain.Add(it.CurrentContext());
-  }
-
+static Handle<Context> CopyWithContextChain(Isolate* isolate,
+                                            Handle<JSFunction> function,
+                                            Handle<Context> current,
+                                            Handle<Context> base) {
   // At the end of the chain. Return the base context to link to.
-  Handle<Context> context = base;
-
-  // Iteratively copy and or materialize the nested contexts.
-  while (!scope_chain.is_empty()) {
-    Handle<ScopeInfo> scope_info = scope_chain.RemoveLast();
-    Handle<Context> current = context_chain.RemoveLast();
-    ASSERT(!(scope_info->HasContext() & current.is_null()));
-
-    if (scope_info->Type() == CATCH_SCOPE) {
-      Handle<String> name(String::cast(current->extension()));
-      Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
-      context =
-          isolate->factory()->NewCatchContext(function,
-                                              context,
-                                              name,
-                                              thrown_object);
-    } else if (scope_info->Type() == BLOCK_SCOPE) {
-      // Materialize the contents of the block scope into a JSObject.
-      Handle<JSObject> block_scope_object =
-          MaterializeBlockScope(isolate, current);
-      if (block_scope_object.is_null()) {
-        return Handle<Context>::null();
-      }
-      // Allocate a new function context for the debug evaluation and set the
-      // extension object.
-      Handle<Context> new_context =
-          isolate->factory()->NewFunctionContext(Context::MIN_CONTEXT_SLOTS,
-                                                 function);
-      new_context->set_extension(*block_scope_object);
-      new_context->set_previous(*context);
-      context = new_context;
-    } else {
-      ASSERT(scope_info->Type() == WITH_SCOPE);
-      ASSERT(current->IsWithContext());
-      Handle<JSObject> extension(JSObject::cast(current->extension()));
-      context =
-          isolate->factory()->NewWithContext(function, context, extension);
-    }
+  if (current->IsFunctionContext() || current->IsGlobalContext()) {
+    return base;
   }
 
-  return scope.CloseAndEscape(context);
+  // Recursively copy the with and catch contexts.
+  HandleScope scope(isolate);
+  Handle<Context> previous(current->previous());
+  Handle<Context> new_previous =
+      CopyWithContextChain(isolate, function, previous, base);
+  Handle<Context> new_current;
+  if (current->IsCatchContext()) {
+    Handle<String> name(String::cast(current->extension()));
+    Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
+    new_current =
+        isolate->factory()->NewCatchContext(function,
+                                            new_previous,
+                                            name,
+                                            thrown_object);
+  } else if (current->IsBlockContext()) {
+    Handle<ScopeInfo> scope_info(ScopeInfo::cast(current->extension()));
+    new_current =
+        isolate->factory()->NewBlockContext(function, new_previous, scope_info);
+    // Copy context slots.
+    int num_context_slots = scope_info->ContextLength();
+    for (int i = Context::MIN_CONTEXT_SLOTS; i < num_context_slots; ++i) {
+      new_current->set(i, current->get(i));
+    }
+  } else {
+    ASSERT(current->IsWithContext());
+    Handle<JSObject> extension(JSObject::cast(current->extension()));
+    new_current =
+        isolate->factory()->NewWithContext(function, new_previous, extension);
+  }
+  return scope.CloseAndEscape(new_current);
 }
 
 
@@ -12085,11 +12025,7 @@
   if (scope_info->HasContext()) {
     function_context = Handle<Context>(frame_context->declaration_context());
   }
-  context = CopyNestedScopeContextChain(isolate,
-                                        go_between,
-                                        context,
-                                        frame,
-                                        inlined_frame_index);
+  context = CopyWithContextChain(isolate, go_between, frame_context, context);
 
   if (additional_context->IsJSObject()) {
     Handle<JSObject> extension = Handle<JSObject>::cast(additional_context);
diff --git a/src/scopes.cc b/src/scopes.cc
index f9cd620..60ea20f 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -55,7 +55,7 @@
 };
 
 
-static ZoneAllocator LocalsMapAllocator;
+static ZoneAllocator* LocalsMapAllocator = ::new ZoneAllocator();
 
 
 // ----------------------------------------------------------------------------
@@ -76,7 +76,7 @@
 }
 
 
-VariableMap::VariableMap() : HashMap(Match, &LocalsMapAllocator, 8) {}
+VariableMap::VariableMap() : HashMap(Match, LocalsMapAllocator, 8) {}
 VariableMap::~VariableMap() {}
 
 
diff --git a/src/serialize.cc b/src/serialize.cc
index 8c19d99..e3a84c8 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1564,6 +1564,7 @@
         sink_->Put(kNativesStringResource, "NativesStringResource");
         sink_->PutSection(i, "NativesStringResourceEnd");
         bytes_processed_so_far_ += sizeof(resource);
+        string->clear_data_cache();
         return;
       }
     }
diff --git a/src/spaces.cc b/src/spaces.cc
index 79a66fa..53f6ff3 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -1658,14 +1658,14 @@
   // field and a next pointer, we give it a filler map that gives it the
   // correct size.
   if (size_in_bytes > FreeSpace::kHeaderSize) {
-    set_map(heap->raw_unchecked_free_space_map());
+    set_map_unsafe(heap->raw_unchecked_free_space_map());
     // Can't use FreeSpace::cast because it fails during deserialization.
     FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
     this_as_free_space->set_size(size_in_bytes);
   } else if (size_in_bytes == kPointerSize) {
-    set_map(heap->raw_unchecked_one_pointer_filler_map());
+    set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
   } else if (size_in_bytes == 2 * kPointerSize) {
-    set_map(heap->raw_unchecked_two_pointer_filler_map());
+    set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
   } else {
     UNREACHABLE();
   }
diff --git a/src/type-info.cc b/src/type-info.cc
index afec71a..c781c61 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -360,6 +360,10 @@
       return unknown;
     case CompareIC::SMIS:
       return TypeInfo::Smi();
+    case CompareIC::STRINGS:
+      return TypeInfo::String();
+    case CompareIC::SYMBOLS:
+      return TypeInfo::Symbol();
     case CompareIC::HEAP_NUMBERS:
       return TypeInfo::Number();
     case CompareIC::OBJECTS:
@@ -415,7 +419,8 @@
     ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
   } else if (object->IsMap()) {
     types->Add(Handle<Map>::cast(object));
-  } else if (Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
+  } else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
+      Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
     types->Reserve(4);
     ASSERT(object->IsCode());
     isolate_->stub_cache()->CollectMatchingMaps(types, *name, flags);
diff --git a/src/type-info.h b/src/type-info.h
index 2c3543e..7c9c05e 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -64,6 +64,8 @@
   static TypeInfo Integer32() { return TypeInfo(kInteger32); }
   // We know it's a Smi.
   static TypeInfo Smi() { return TypeInfo(kSmi); }
+  // We know it's a Symbol.
+  static TypeInfo Symbol() { return TypeInfo(kSymbol); }
   // We know it's a heap number.
   static TypeInfo Double() { return TypeInfo(kDouble); }
   // We know it's a string.
@@ -137,6 +139,16 @@
     return ((type_ & kSmi) == kSmi);
   }
 
+  inline bool IsSymbol() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kSymbol) == kSymbol);
+  }
+
+  inline bool IsNonSymbol() {
+    ASSERT(type_ != kUninitialized);
+    return ((type_ & kSymbol) == kString);
+  }
+
   inline bool IsInteger32() {
     ASSERT(type_ != kUninitialized);
     return ((type_ & kInteger32) == kInteger32);
@@ -168,6 +180,7 @@
       case kNumber: return "Number";
       case kInteger32: return "Integer32";
       case kSmi: return "Smi";
+      case kSymbol: return "Symbol";
       case kDouble: return "Double";
       case kString: return "String";
       case kNonPrimitive: return "Object";
@@ -186,6 +199,7 @@
     kSmi = 0x17,           // 0010111
     kDouble = 0x19,        // 0011001
     kString = 0x30,        // 0110000
+    kSymbol = 0x32,        // 0110010
     kNonPrimitive = 0x40,  // 1000000
     kUninitialized = 0x7f  // 1111111
   };
diff --git a/src/version.cc b/src/version.cc
index e568fd1..5fc89b0 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     7
-#define BUILD_NUMBER      8
+#define BUILD_NUMBER      9
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index bf50869..a30d62f 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -219,68 +219,38 @@
 }
 
 
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
-  // Stack layout on entry:
+static void GenerateFastCloneShallowArrayCommon(
+    MacroAssembler* masm,
+    int length,
+    FastCloneShallowArrayStub::Mode mode,
+    Label* fail) {
+  // Registers on entry:
   //
-  // [rsp + kPointerSize]: constant elements.
-  // [rsp + (2 * kPointerSize)]: literal index.
-  // [rsp + (3 * kPointerSize)]: literals array.
+  // rcx: boilerplate literal array.
+  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
 
   // All sizes here are multiples of kPointerSize.
   int elements_size = 0;
-  if (length_ > 0) {
-    elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
-        ? FixedDoubleArray::SizeFor(length_)
-        : FixedArray::SizeFor(length_);
+  if (length > 0) {
+    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+        ? FixedDoubleArray::SizeFor(length)
+        : FixedArray::SizeFor(length);
   }
   int size = JSArray::kSize + elements_size;
 
-  // Load boilerplate object into rcx and check if we need to create a
-  // boilerplate.
-  Label slow_case;
-  __ movq(rcx, Operand(rsp, 3 * kPointerSize));
-  __ movq(rax, Operand(rsp, 2 * kPointerSize));
-  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
-  __ movq(rcx,
-          FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
-  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &slow_case);
-
-  if (FLAG_debug_code) {
-    const char* message;
-    Heap::RootListIndex expected_map_index;
-    if (mode_ == CLONE_ELEMENTS) {
-      message = "Expected (writable) fixed array";
-      expected_map_index = Heap::kFixedArrayMapRootIndex;
-    } else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
-      message = "Expected (writable) fixed double array";
-      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
-    } else {
-      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
-      message = "Expected copy-on-write fixed array";
-      expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
-    }
-    __ push(rcx);
-    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
-    __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
-                   expected_map_index);
-    __ Assert(equal, message);
-    __ pop(rcx);
-  }
-
   // Allocate both the JS array and the elements array in one big
   // allocation. This avoids multiple limit checks.
-  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
+  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
 
   // Copy the JS array part.
   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
-    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
       __ movq(rbx, FieldOperand(rcx, i));
       __ movq(FieldOperand(rax, i), rbx);
     }
   }
 
-  if (length_ > 0) {
+  if (length > 0) {
     // Get hold of the elements array of the boilerplate and setup the
     // elements pointer in the resulting object.
     __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
@@ -288,13 +258,13 @@
     __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
 
     // Copy the elements array.
-    if (mode_ == CLONE_ELEMENTS) {
+    if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
       for (int i = 0; i < elements_size; i += kPointerSize) {
         __ movq(rbx, FieldOperand(rcx, i));
         __ movq(FieldOperand(rdx, i), rbx);
       }
     } else {
-      ASSERT(mode_ == CLONE_DOUBLE_ELEMENTS);
+      ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
       int i;
       for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
         __ movq(rbx, FieldOperand(rcx, i));
@@ -308,8 +278,75 @@
       ASSERT(i == elements_size);
     }
   }
+}
 
-  // Return and remove the on-stack parameters.
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+  // Stack layout on entry:
+  //
+  // [rsp + kPointerSize]: constant elements.
+  // [rsp + (2 * kPointerSize)]: literal index.
+  // [rsp + (3 * kPointerSize)]: literals array.
+
+  // Load boilerplate object into rcx and check if we need to create a
+  // boilerplate.
+  __ movq(rcx, Operand(rsp, 3 * kPointerSize));
+  __ movq(rax, Operand(rsp, 2 * kPointerSize));
+  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+  __ movq(rcx,
+          FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
+  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+  Label slow_case;
+  __ j(equal, &slow_case);
+
+  FastCloneShallowArrayStub::Mode mode = mode_;
+  // rcx is boilerplate object.
+  Factory* factory = masm->isolate()->factory();
+  if (mode == CLONE_ANY_ELEMENTS) {
+    Label double_elements, check_fast_elements;
+    __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
+    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+           factory->fixed_cow_array_map());
+    __ j(not_equal, &check_fast_elements);
+    GenerateFastCloneShallowArrayCommon(masm, 0,
+                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&check_fast_elements);
+    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+           factory->fixed_array_map());
+    __ j(not_equal, &double_elements);
+    GenerateFastCloneShallowArrayCommon(masm, length_,
+                                        CLONE_ELEMENTS, &slow_case);
+    __ ret(3 * kPointerSize);
+
+    __ bind(&double_elements);
+    mode = CLONE_DOUBLE_ELEMENTS;
+    // Fall through to generate the code to handle double elements.
+  }
+
+  if (FLAG_debug_code) {
+    const char* message;
+    Heap::RootListIndex expected_map_index;
+    if (mode == CLONE_ELEMENTS) {
+      message = "Expected (writable) fixed array";
+      expected_map_index = Heap::kFixedArrayMapRootIndex;
+    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
+      message = "Expected (writable) fixed double array";
+      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
+    } else {
+      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
+      message = "Expected copy-on-write fixed array";
+      expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
+    }
+    __ push(rcx);
+    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
+    __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                   expected_map_index);
+    __ Assert(equal, message);
+    __ pop(rcx);
+  }
+
+  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
   __ ret(3 * kPointerSize);
 
   __ bind(&slow_case);
@@ -4912,18 +4949,15 @@
     // rbx: instance type
     // rcx: sub string length
     // rdx: from index (smi)
-    Label allocate_slice, sliced_string, seq_string;
+    Label allocate_slice, sliced_string, seq_or_external_string;
     __ cmpq(rcx, Immediate(SlicedString::kMinLength));
     // Short slice.  Copy instead of slicing.
     __ j(less, &copy_routine);
-    STATIC_ASSERT(kSeqStringTag == 0);
-    __ testb(rbx, Immediate(kStringRepresentationMask));
-    __ j(zero, &seq_string, Label::kNear);
+    // If the string is not indirect, it can only be sequential or external.
     STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
     STATIC_ASSERT(kIsIndirectStringMask != 0);
     __ testb(rbx, Immediate(kIsIndirectStringMask));
-    // External string.  Jump to runtime.
-    __ j(zero, &runtime);
+    __ j(zero, &seq_or_external_string, Label::kNear);
 
     __ testb(rbx, Immediate(kSlicedNotConsMask));
     __ j(not_zero, &sliced_string, Label::kNear);
@@ -4940,8 +4974,8 @@
     __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
     __ jmp(&allocate_slice, Label::kNear);
 
-    __ bind(&seq_string);
-    // Sequential string.  Just move string to the right register.
+    __ bind(&seq_or_external_string);
+    // Sequential or external string.  Just move string to the correct register.
     __ movq(rdi, rax);
 
     __ bind(&allocate_slice);
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 587f73b..35b902d 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1482,6 +1482,7 @@
   ASSERT_EQ(2, constant_elements->length());
   ElementsKind constant_elements_kind =
       static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_constant_fast_elements = constant_elements_kind == FAST_ELEMENTS;
   Handle<FixedArrayBase> constant_elements_values(
       FixedArrayBase::cast(constant_elements->get(1)));
 
@@ -1489,12 +1490,16 @@
   __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
   __ Push(Smi::FromInt(expr->literal_index()));
   __ Push(constant_elements);
-  if (constant_elements_values->map() ==
-      isolate()->heap()->fixed_cow_array_map()) {
-    FastCloneShallowArrayStub stub(
-        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
-    __ CallStub(&stub);
+  Heap* heap = isolate()->heap();
+  if (has_constant_fast_elements &&
+      constant_elements_values->map() == heap->fixed_cow_array_map()) {
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
     __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+    FastCloneShallowArrayStub stub(
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        length);
+    __ CallStub(&stub);
   } else if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
   } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -1503,10 +1508,11 @@
     ASSERT(constant_elements_kind == FAST_ELEMENTS ||
            constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
            FLAG_smi_only_arrays);
-    FastCloneShallowArrayStub::Mode mode =
-        constant_elements_kind == FAST_DOUBLE_ELEMENTS
-        ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
-        : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    // If the elements are already FAST_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
+    FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
+        ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+        : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
     FastCloneShallowArrayStub stub(mode, length);
     __ CallStub(&stub);
   }
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 36edfb7..fd88b9d 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1614,6 +1614,30 @@
 }
 
 
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string) {
+  __ JumpIfSmi(input, is_not_string);
+  Condition cond =  masm_->IsObjectStringType(input, temp1, temp1);
+
+  return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->InputAt(0));
+  Register temp = ToRegister(instr->TempAt(0));
+
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+  Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+  Condition true_cond = EmitIsString(reg, temp, false_label);
+
+  EmitBranch(true_block, false_block, true_cond);
+}
+
+
 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   int true_block = chunk_->LookupDestination(instr->true_block_id());
   int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1645,6 +1669,21 @@
 }
 
 
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  Token::Value op = instr->op();
+  int true_block = chunk_->LookupDestination(instr->true_block_id());
+  int false_block = chunk_->LookupDestination(instr->false_block_id());
+
+  Handle<Code> ic = CompareIC::GetUninitialized(op);
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = TokenToCondition(op, false);
+  __ testq(rax, rax);
+
+  EmitBranch(true_block, false_block, condition);
+}
+
+
 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   InstanceType from = instr->from();
   InstanceType to = instr->to();
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 37e426e..ee54466 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -269,6 +269,13 @@
                          Label* is_not_object,
                          Label* is_object);
 
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string);
+
   // Emits optimized code for %_IsConstructCall().
   // Caller should branch on equal condition.
   void EmitIsConstructCall(Register temp);
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index e9e7896..a8e29d1 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -230,6 +230,13 @@
 }
 
 
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  InputAt(0)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if is_smi(");
   InputAt(0)->PrintTo(stream);
@@ -244,6 +251,14 @@
 }
 
 
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  InputAt(0)->PrintTo(stream);
+  InputAt(1)->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
 void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
   stream->Add("if has_instance_type(");
   InputAt(0)->PrintTo(stream);
@@ -1451,6 +1466,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  ASSERT(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new LIsStringAndBranch(UseRegisterAtStart(instr->value()), temp);
+}
+
+
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
   return new LIsSmiAndBranch(Use(instr->value()));
@@ -1465,6 +1487,19 @@
 }
 
 
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+
+  ASSERT(instr->left()->representation().IsTagged());
+  ASSERT(instr->right()->representation().IsTagged());
+  LOperand* left = UseFixed(instr->left(), rdx);
+  LOperand* right = UseFixed(instr->right(), rax);
+  LStringCompareAndBranch* result = new LStringCompareAndBranch(left, right);
+
+  return MarkAsCall(result, instr);
+}
+
+
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
   ASSERT(instr->value()->representation().IsTagged());
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index f5edd62..e60e5d6 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -109,8 +109,10 @@
   V(IsConstructCallAndBranch)                   \
   V(IsNilAndBranch)                             \
   V(IsObjectAndBranch)                          \
+  V(IsStringAndBranch)                          \
   V(IsSmiAndBranch)                             \
   V(IsUndetectableAndBranch)                    \
+  V(StringCompareAndBranch)                     \
   V(JSArrayLength)                              \
   V(Label)                                      \
   V(LazyBailout)                                \
@@ -640,6 +642,20 @@
 };
 
 
+class LIsStringAndBranch: public LControlInstruction<1, 1> {
+ public:
+  explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
 class LIsSmiAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
@@ -668,6 +684,23 @@
 };
 
 
+class LStringCompareAndBranch: public LControlInstruction<2, 0> {
+ public:
+  explicit LStringCompareAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
 class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {