Version 3.19.13

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@15068 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index fb3c4f9..6117e56 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,8 @@
+2013-06-11: Version 3.19.13
+
+        Performance and stability improvements on all platforms.
+
+
 2013-06-10: Version 3.19.12
 
         Fixed arguments array access. (Chromium issue 247303)
diff --git a/include/v8.h b/include/v8.h
index e99c6d9..c0bec79 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -2370,30 +2370,6 @@
   static void CheckCast(Value* obj);
 };
 
-/**
- * The contents of an |ArrayBuffer|. Externalization of |ArrayBuffer|
- * populates an instance of this class with a pointer to data and byte length.
- *
- * |ArrayBufferContents| is the owner of its data. When an instance of
- * this class is destructed, the |Data| is freed.
- *
- * This API is experimental and may change significantly.
- */
-class V8EXPORT ArrayBufferContents {
- public:
-  ArrayBufferContents() : data_(NULL), byte_length_(0) {}
-  ~ArrayBufferContents();
-
-  void* Data() const { return data_; }
-  size_t ByteLength() const { return byte_length_; }
-
- private:
-  void* data_;
-  size_t byte_length_;
-
-  friend class ArrayBuffer;
-};
-
 #ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
 #define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
 #endif
@@ -2405,6 +2381,53 @@
 class V8EXPORT ArrayBuffer : public Object {
  public:
   /**
+   * Allocator that V8 uses to allocate |ArrayBuffer|'s memory.
+   * The allocator is a global V8 setting. It should be set with
+   * V8::SetArrayBufferAllocator prior to creation of a first ArrayBuffer.
+   *
+   * This API is experimental and may change significantly.
+   */
+  class V8EXPORT Allocator { // NOLINT
+   public:
+    virtual ~Allocator() {}
+
+    /**
+     * Allocate |length| bytes. Return NULL if allocation is not successful.
+     */
+    virtual void* Allocate(size_t length) = 0;
+    /**
+     * Free the memory pointed to |data|. That memory is guaranteed to be
+     * previously allocated by |Allocate|.
+     */
+    virtual void Free(void* data) = 0;
+  };
+
+  /**
+   * The contents of an |ArrayBuffer|. Externalization of |ArrayBuffer|
+   * returns an instance of this class, populated, with a pointer to data
+   * and byte length.
+   *
+   * The Data pointer of ArrayBuffer::Contents is always allocated with
+   * Allocator::Allocate that is set with V8::SetArrayBufferAllocator.
+   *
+   * This API is experimental and may change significantly.
+   */
+  class V8EXPORT Contents { // NOLINT
+   public:
+    Contents() : data_(NULL), byte_length_(0) {}
+
+    void* Data() const { return data_; }
+    size_t ByteLength() const { return byte_length_; }
+
+   private:
+    void* data_;
+    size_t byte_length_;
+
+    friend class ArrayBuffer;
+  };
+
+
+  /**
    * Data length in bytes.
    */
   size_t ByteLength() const;
@@ -2440,14 +2463,18 @@
   void Neuter();
 
   /**
-   * Pass the ownership of this ArrayBuffer's backing store to
-   * a given ArrayBufferContents.
+   * Make this ArrayBuffer external. The pointer to underlying memory block
+   * and byte length are returned as |Contents| structure. After ArrayBuffer
+   * had been etxrenalized, it does no longer owns the memory block. The caller
+   * should take steps to free memory when it is no longer needed.
+   *
+   * The memory block is guaranteed to be allocated with |Allocator::Allocate|
+   * that has been set with V8::SetArrayBufferAllocator.
    */
-  void Externalize(ArrayBufferContents* contents);
+  Contents Externalize();
 
   V8_INLINE(static ArrayBuffer* Cast(Value* obj));
 
-
   static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
 
  private:
@@ -2845,6 +2872,7 @@
   // Fast JS primitive setters
   V8_INLINE(void SetNull());
   V8_INLINE(void SetUndefined());
+  V8_INLINE(void SetEmptyString());
   // Convenience getter for Isolate
   V8_INLINE(Isolate* GetIsolate());
 
@@ -4193,6 +4221,14 @@
       AllowCodeGenerationFromStringsCallback that);
 
   /**
+   * Set allocator to use for ArrayBuffer memory.
+   * The allocator should be set only once. The allocator should be set
+   * before any code tha uses ArrayBuffers is executed.
+   * This allocator is used in all isolates.
+   */
+  static void SetArrayBufferAllocator(ArrayBuffer::Allocator* allocator);
+
+  /**
    * Ignore out-of-memory exceptions.
    *
    * V8 running out of memory is treated as a fatal error by default.
@@ -5730,6 +5766,12 @@
 }
 
 template<typename T>
+void ReturnValue<T>::SetEmptyString() {
+  typedef internal::Internals I;
+  *value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
+}
+
+template<typename T>
 Isolate* ReturnValue<T>::GetIsolate() {
   // Isolate is always the pointer below the default value on the stack.
   return *reinterpret_cast<Isolate**>(&value_[-2]);
diff --git a/src/api.cc b/src/api.cc
index 18acc0d..20496fe 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -5160,6 +5160,15 @@
   isolate->logger()->SetCodeEventHandler(options, event_handler);
 }
 
+void v8::V8::SetArrayBufferAllocator(
+    ArrayBuffer::Allocator* allocator) {
+  if (!ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
+                "v8::V8::SetArrayBufferAllocator",
+                "ArrayBufferAllocator might only be set once"))
+    return;
+  i::V8::SetArrayBufferAllocator(allocator);
+}
+
 
 bool v8::V8::Dispose() {
   i::Isolate* isolate = i::Isolate::Current();
@@ -6129,25 +6138,17 @@
   return Utils::OpenHandle(this)->is_external();
 }
 
-v8::ArrayBufferContents::~ArrayBufferContents() {
-  free(data_);
-  data_ = NULL;
-  byte_length_ = 0;
-}
-
-
-void v8::ArrayBuffer::Externalize(ArrayBufferContents* contents) {
+v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
   i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
   ApiCheck(!obj->is_external(),
             "v8::ArrayBuffer::Externalize",
             "ArrayBuffer already externalized");
   obj->set_is_external(true);
   size_t byte_length = static_cast<size_t>(obj->byte_length()->Number());
-  ApiCheck(contents->data_ == NULL,
-           "v8::ArrayBuffer::Externalize",
-           "Externalizing into non-empty ArrayBufferContents");
-  contents->data_ = obj->backing_store();
-  contents->byte_length_ = byte_length;
+  Contents contents;
+  contents.data_ = obj->backing_store();
+  contents.byte_length_ = byte_length;
+  return contents;
 }
 
 
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 66c3dbf..c6ea600 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1368,6 +1368,7 @@
 void Assembler::sdiv(Register dst, Register src1, Register src2,
                      Condition cond) {
   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+  ASSERT(IsEnabled(SUDIV));
   emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
        src2.code()*B8 | B4 | src1.code());
 }
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 3fbe0c5..b26bf7e 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1707,6 +1707,7 @@
       __ Ret();
 
       if (CpuFeatures::IsSupported(SUDIV)) {
+        CpuFeatureScope scope(masm, SUDIV);
         Label result_not_zero;
 
         __ bind(&div_with_sdiv);
@@ -1763,6 +1764,7 @@
       __ Ret();
 
       if (CpuFeatures::IsSupported(SUDIV)) {
+        CpuFeatureScope scope(masm, SUDIV);
         __ bind(&modulo_with_sdiv);
         __ mov(scratch2, right);
         // Perform modulus with sdiv and mls.
@@ -2208,42 +2210,25 @@
             UNREACHABLE();
         }
 
-        if (op_ != Token::DIV) {
-          // These operations produce an integer result.
-          // Try to return a smi if we can.
-          // Otherwise return a heap number if allowed, or jump to type
-          // transition.
-
-          if (result_type_ <= BinaryOpIC::INT32) {
-            __ TryDoubleToInt32Exact(scratch1, d5, d8);
-            // If the ne condition is set, result does
-            // not fit in a 32-bit integer.
-            __ b(ne, &transition);
-          } else {
-            __ vcvt_s32_f64(s8, d5);
-            __ vmov(scratch1, s8);
-          }
-
-          // Check if the result fits in a smi.
-          __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
-          // If not try to return a heap number.
-          __ b(mi, &return_heap_number);
-          // Check for minus zero. Return heap number for minus zero if
-          // double results are allowed; otherwise transition.
+        if (result_type_ <= BinaryOpIC::INT32) {
+          __ TryDoubleToInt32Exact(scratch1, d5, d8);
+          // If the ne condition is set, result does
+          // not fit in a 32-bit integer.
+          __ b(ne, &transition);
+          // Try to tag the result as a Smi, return heap number on overflow.
+          __ SmiTag(scratch1, SetCC);
+          __ b(vs, &return_heap_number);
+          // Check for minus zero, transition in that case (because we need
+          // to return a heap number).
           Label not_zero;
-          __ cmp(scratch1, Operand::Zero());
+          ASSERT(kSmiTag == 0);
           __ b(ne, &not_zero);
           __ vmov(scratch2, d5.high());
           __ tst(scratch2, Operand(HeapNumber::kSignMask));
-          __ b(ne, result_type_ <= BinaryOpIC::INT32 ? &transition
-                                                     : &return_heap_number);
+          __ b(ne, &transition);
           __ bind(&not_zero);
-
-          // Tag the result and return.
-          __ SmiTag(r0, scratch1);
+          __ mov(r0, scratch1);
           __ Ret();
-        } else {
-          // DIV just falls through to allocating a heap number.
         }
 
         __ bind(&return_heap_number);
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 8f11769..8b24bf1 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -4765,9 +4765,7 @@
 
   VisitForAccumulatorValue(sub_expr);
   PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  EqualityKind kind = expr->op() == Token::EQ_STRICT
-      ? kStrictEquality : kNonStrictEquality;
-  if (kind == kStrictEquality) {
+  if (expr->op() == Token::EQ_STRICT) {
     Heap::RootListIndex nil_value = nil == kNullValue ?
         Heap::kNullValueRootIndex :
         Heap::kUndefinedValueRootIndex;
@@ -4775,9 +4773,7 @@
     __ cmp(r0, r1);
     Split(eq, if_true, if_false, fall_through);
   } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
-                                                         kNonStrictEquality,
-                                                         nil);
+    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
     CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
     __ cmp(r0, Operand(0));
     Split(ne, if_true, if_false, fall_through);
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index d9f9053..fbb9c6e 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1345,18 +1345,14 @@
       ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
       LOperand* value = UseRegisterAtStart(instr->left());
       LDivI* div =
-          new(zone()) LDivI(value, UseOrConstant(instr->right()));
+          new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
       return AssignEnvironment(DefineSameAsFirst(div));
     }
-    // TODO(1042) The fixed register allocation
-    // is needed because we call TypeRecordingBinaryOpStub from
-    // the generated code, which requires registers r0
-    // and r1 to be used. We should remove that
-    // when we provide a native implementation.
-    LOperand* dividend = UseFixed(instr->left(), r0);
-    LOperand* divisor = UseFixed(instr->right(), r1);
-    return AssignEnvironment(AssignPointerMap(
-             DefineFixed(new(zone()) LDivI(dividend, divisor), r0)));
+    LOperand* dividend = UseRegister(instr->left());
+    LOperand* divisor = UseRegister(instr->right());
+    LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4);
+    LDivI* div = new(zone()) LDivI(dividend, divisor, temp);
+    return AssignEnvironment(DefineAsRegister(div));
   } else {
     return DoArithmeticT(Token::DIV, instr);
   }
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 0964275..ccfd0db 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -597,15 +597,17 @@
 };
 
 
-class LDivI: public LTemplateInstruction<1, 2, 0> {
+class LDivI: public LTemplateInstruction<1, 2, 1> {
  public:
-  LDivI(LOperand* left, LOperand* right) {
+  LDivI(LOperand* left, LOperand* right, LOperand* temp) {
     inputs_[0] = left;
     inputs_[1] = right;
+    temps_[0] = temp;
   }
 
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
   DECLARE_HYDROGEN_ACCESSOR(Div)
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index c42e651..96befb0 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -87,7 +87,20 @@
     RegisterDependentCodeForEmbeddedMaps(code);
   }
   PopulateDeoptimizationData(code);
-  info()->CommitDependentMaps(code);
+  for (int i = 0 ; i < prototype_maps_.length(); i++) {
+    prototype_maps_.at(i)->AddDependentCode(
+        DependentCode::kPrototypeCheckGroup, code);
+  }
+  for (int i = 0 ; i < transition_maps_.length(); i++) {
+    transition_maps_.at(i)->AddDependentCode(
+        DependentCode::kTransitionGroup, code);
+  }
+  if (graph()->depends_on_empty_array_proto_elements()) {
+    isolate()->initial_object_prototype()->map()->AddDependentCode(
+        DependentCode::kElementsCantBeAddedGroup, code);
+    isolate()->initial_array_prototype()->map()->AddDependentCode(
+        DependentCode::kElementsCantBeAddedGroup, code);
+  }
 }
 
 
@@ -1417,25 +1430,9 @@
 
 
 void LCodeGen::DoDivI(LDivI* instr) {
-  class DeferredDivI: public LDeferredCode {
-   public:
-    DeferredDivI(LCodeGen* codegen, LDivI* instr)
-        : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() {
-      codegen()->DoDeferredBinaryOpStub(instr_->pointer_map(),
-                                        instr_->left(),
-                                        instr_->right(),
-                                        Token::DIV);
-    }
-    virtual LInstruction* instr() { return instr_; }
-   private:
-    LDivI* instr_;
-  };
-
   if (instr->hydrogen()->HasPowerOf2Divisor()) {
     Register dividend = ToRegister(instr->left());
-    int32_t divisor =
-        HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+    int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
     int32_t test_value = 0;
     int32_t power = 0;
 
@@ -1458,10 +1455,19 @@
     }
 
     if (test_value != 0) {
-      // Deoptimize if remainder is not 0.
-      __ tst(dividend, Operand(test_value));
-      DeoptimizeIf(ne, instr->environment());
-      __ mov(dividend, Operand(dividend, ASR, power));
+      if (instr->hydrogen()->CheckFlag(
+          HInstruction::kAllUsesTruncatingToInt32)) {
+        __ cmp(dividend, Operand(0));
+        __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
+        __ mov(dividend, Operand(dividend, ASR, power));
+        if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
+        return;  // Don't fall through to "__ rsb" below.
+      } else {
+        // Deoptimize if remainder is not 0.
+        __ tst(dividend, Operand(test_value));
+        DeoptimizeIf(ne, instr->environment());
+        __ mov(dividend, Operand(dividend, ASR, power));
+      }
     }
     if (divisor < 0) __ rsb(dividend, dividend, Operand(0));
 
@@ -1498,40 +1504,38 @@
     __ bind(&left_not_min_int);
   }
 
-  Label done, deoptimize;
-  // Test for a few common cases first.
-  __ cmp(right, Operand(1));
-  __ mov(result, left, LeaveCC, eq);
-  __ b(eq, &done);
+  if (CpuFeatures::IsSupported(SUDIV)) {
+    CpuFeatureScope scope(masm(), SUDIV);
+    __ sdiv(result, left, right);
 
-  __ cmp(right, Operand(2));
-  __ tst(left, Operand(1), eq);
-  __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
-  __ b(eq, &done);
+    if (!instr->hydrogen()->CheckFlag(
+        HInstruction::kAllUsesTruncatingToInt32)) {
+      // Compute remainder and deopt if it's not zero.
+      const Register remainder = scratch0();
+      __ mls(remainder, result, right, left);
+      __ cmp(remainder, Operand::Zero());
+      DeoptimizeIf(ne, instr->environment());
+    }
+  } else {
+    const DoubleRegister vleft = ToDoubleRegister(instr->temp());
+    const DoubleRegister vright = double_scratch0();
+    __ vmov(vleft.low(), left);
+    __ vmov(vright.low(), right);
+    __ vcvt_f64_s32(vleft, vleft.low());
+    __ vcvt_f64_s32(vright, vright.low());
+    __ vdiv(vleft, vleft, vright);  // vleft now contains the result.
+    __ vcvt_s32_f64(vright.low(), vleft);
+    __ vmov(result, vright.low());
 
-  __ cmp(right, Operand(4));
-  __ tst(left, Operand(3), eq);
-  __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
-  __ b(eq, &done);
-
-  // Call the stub. The numbers in r0 and r1 have
-  // to be tagged to Smis. If that is not possible, deoptimize.
-  DeferredDivI* deferred = new(zone()) DeferredDivI(this, instr);
-
-  __ TrySmiTag(left, &deoptimize);
-  __ TrySmiTag(right, &deoptimize);
-
-  __ b(al, deferred->entry());
-  __ bind(deferred->exit());
-
-  // If the result in r0 is a Smi, untag it, else deoptimize.
-  __ JumpIfNotSmi(result, &deoptimize);
-  __ SmiUntag(result);
-  __ b(&done);
-
-  __ bind(&deoptimize);
-  DeoptimizeIf(al, instr->environment());
-  __ bind(&done);
+    if (!instr->hydrogen()->CheckFlag(
+        HInstruction::kAllUsesTruncatingToInt32)) {
+      // Deopt if exact conversion to integer was not possible.
+      // Use vright as scratch register.
+      __ vcvt_f64_s32(vright, vright.low());
+      __ VFPCompareAndSetFlags(vleft, vright);
+      DeoptimizeIf(ne, instr->environment());
+    }
+  }
 }
 
 
@@ -1630,38 +1634,6 @@
 }
 
 
-void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
-                                      LOperand* left_argument,
-                                      LOperand* right_argument,
-                                      Token::Value op) {
-  Register left = ToRegister(left_argument);
-  Register right = ToRegister(right_argument);
-
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
-  // Move left to r1 and right to r0 for the stub call.
-  if (left.is(r1)) {
-    __ Move(r0, right);
-  } else if (left.is(r0) && right.is(r1)) {
-    __ Swap(r0, r1, r2);
-  } else if (left.is(r0)) {
-    ASSERT(!right.is(r1));
-    __ mov(r1, r0);
-    __ mov(r0, right);
-  } else {
-    ASSERT(!left.is(r0) && !right.is(r0));
-    __ mov(r0, right);
-    __ mov(r1, left);
-  }
-  BinaryOpStub stub(op, OVERWRITE_LEFT);
-  __ CallStub(&stub);
-  RecordSafepointWithRegistersAndDoubles(pointer_map,
-                                         0,
-                                         Safepoint::kNoLazyDeopt);
-  // Overwrite the stored value of r0 with the result of the stub.
-  __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
-}
-
-
 void LCodeGen::DoMulI(LMulI* instr) {
   Register scratch = scratch0();
   Register result = ToRegister(instr->result());
@@ -4295,6 +4267,9 @@
   }
 
   if (!transition.is_null()) {
+    if (transition->CanBeDeprecated()) {
+      transition_maps_.Add(transition, info()->zone());
+    }
     __ mov(scratch, Operand(transition));
     __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
@@ -5407,7 +5382,11 @@
 
   ASSERT(prototypes->length() == maps->length());
 
-  if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
+  if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+    for (int i = 0; i < maps->length(); i++) {
+      prototype_maps_.Add(maps->at(i), info()->zone());
+    }
+  } else {
     for (int i = 0; i < prototypes->length(); i++) {
       __ LoadHeapObject(prototype_reg, prototypes->at(i));
       __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index a22d192..f264259 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -56,6 +56,8 @@
         deoptimizations_(4, info->zone()),
         deopt_jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
+        prototype_maps_(0, info->zone()),
+        transition_maps_(0, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
@@ -138,10 +140,6 @@
   void FinishCode(Handle<Code> code);
 
   // Deferred code support.
-  void DoDeferredBinaryOpStub(LPointerMap* pointer_map,
-                              LOperand* left_argument,
-                              LOperand* right_argument,
-                              Token::Value op);
   void DoDeferredNumberTagD(LNumberTagD* instr);
 
   enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
@@ -408,6 +406,8 @@
   ZoneList<LEnvironment*> deoptimizations_;
   ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
+  ZoneList<Handle<Map> > prototype_maps_;
+  ZoneList<Handle<Map> > transition_maps_;
   int inlined_function_count_;
   Scope* const scope_;
   Status status_;
diff --git a/src/atomicops_internals_mips_gcc.h b/src/atomicops_internals_mips_gcc.h
index 9498fd7..cb8f8b9 100644
--- a/src/atomicops_internals_mips_gcc.h
+++ b/src/atomicops_internals_mips_gcc.h
@@ -30,8 +30,6 @@
 #ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
 #define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
 
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
 namespace v8 {
 namespace internal {
 
@@ -111,9 +109,9 @@
 
 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                                         Atomic32 increment) {
-  ATOMICOPS_COMPILER_BARRIER();
+  MemoryBarrier();
   Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
-  ATOMICOPS_COMPILER_BARRIER();
+  MemoryBarrier();
   return res;
 }
 
@@ -126,19 +124,16 @@
 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
-  ATOMICOPS_COMPILER_BARRIER();
   Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  ATOMICOPS_COMPILER_BARRIER();
+  MemoryBarrier();
   return res;
 }
 
 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
-  ATOMICOPS_COMPILER_BARRIER();
-  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
-  ATOMICOPS_COMPILER_BARRIER();
-  return res;
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
 }
 
 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@@ -176,6 +171,4 @@
 
 } }  // namespace v8::internal
 
-#undef ATOMICOPS_COMPILER_BARRIER
-
 #endif  // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 4be21c3..99c4db5 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -736,7 +736,7 @@
   CompareNilICStub* stub = casted_stub();
   HIfContinuation continuation;
   Handle<Map> sentinel_map(graph()->isolate()->heap()->meta_map());
-  BuildCompareNil(GetParameter(0), stub->GetKind(),
+  BuildCompareNil(GetParameter(0),
                   stub->GetTypes(), sentinel_map,
                   RelocInfo::kNoPosition, &continuation);
   IfBuilder if_nil(this, &continuation);
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index b4479da..6b6e250 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -432,25 +432,18 @@
 
 void CompareNilICStub::Record(Handle<Object> object) {
   ASSERT(types_ != Types::FullCompare());
-  if (equality_kind_ == kStrictEquality) {
-    // When testing for strict equality only one value will evaluate to true
-    types_.RemoveAll();
-    types_.Add((nil_value_ == kNullValue) ? NULL_TYPE:
-                                            UNDEFINED);
+  if (object->IsNull()) {
+    types_.Add(NULL_TYPE);
+  } else if (object->IsUndefined()) {
+    types_.Add(UNDEFINED);
+  } else if (object->IsUndetectableObject() ||
+             object->IsOddball() ||
+             !object->IsHeapObject()) {
+    types_ = Types::FullCompare();
+  } else if (IsMonomorphic()) {
+    types_ = Types::FullCompare();
   } else {
-    if (object->IsNull()) {
-      types_.Add(NULL_TYPE);
-    } else if (object->IsUndefined()) {
-      types_.Add(UNDEFINED);
-    } else if (object->IsUndetectableObject() ||
-               object->IsOddball() ||
-               !object->IsHeapObject()) {
-      types_ = Types::FullCompare();
-    } else if (IsMonomorphic()) {
-      types_ = Types::FullCompare();
-    } else {
-      types_.Add(MONOMORPHIC_MAP);
-    }
+    types_.Add(MONOMORPHIC_MAP);
   }
 }
 
@@ -477,8 +470,6 @@
   types_.Print(stream);
   stream->Add((nil_value_ == kNullValue) ? "(NullValue|":
                                            "(UndefinedValue|");
-  stream->Add((equality_kind_ == kStrictEquality) ? "StrictEquality)":
-                                                    "NonStrictEquality)");
 }
 
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index f4e2b7d..0ea7ac9 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -1154,24 +1154,21 @@
   // boolean flags we need to store. :-P
   STATIC_ASSERT(NUMBER_OF_TYPES <= 6);
 
-  CompareNilICStub(EqualityKind kind, NilValue nil, Types types = Types())
+  CompareNilICStub(NilValue nil, Types types = Types())
       : types_(types) {
-    equality_kind_ = kind;
     nil_value_ = nil;
   }
 
   CompareNilICStub(Code::ExtraICState ic_state,
                    InitializationState init_state = INITIALIZED)
       : HydrogenCodeStub(init_state) {
-    equality_kind_ = EqualityKindField::decode(ic_state);
     nil_value_ = NilValueField::decode(ic_state);
     types_ = Types(ExtractTypesFromExtraICState(ic_state));
   }
 
   static Handle<Code> GetUninitialized(Isolate* isolate,
-                                       EqualityKind kind,
                                        NilValue nil) {
-    return CompareNilICStub(kind, nil, UNINITIALIZED).GetCode(isolate);
+    return CompareNilICStub(nil, UNINITIALIZED).GetCode(isolate);
   }
 
   virtual void InitializeInterfaceDescriptor(
@@ -1179,7 +1176,7 @@
       CodeStubInterfaceDescriptor* descriptor);
 
   static void InitializeForIsolate(Isolate* isolate) {
-    CompareNilICStub compare_stub(kStrictEquality, kNullValue, UNINITIALIZED);
+    CompareNilICStub compare_stub(kNullValue, UNINITIALIZED);
     compare_stub.InitializeInterfaceDescriptor(
         isolate,
         isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC));
@@ -1199,10 +1196,9 @@
 
   Handle<Code> GenerateCode();
 
-  // extra ic state = nil_value | equality_kind | type_n-1 | ... | type_0
+  // extra ic state = nil_value | type_n-1 | ... | type_0
   virtual Code::ExtraICState GetExtraICState() {
     return NilValueField::encode(nil_value_)         |
-           EqualityKindField::encode(equality_kind_) |
            types_.ToIntegral();
   }
   static byte ExtractTypesFromExtraICState(
@@ -1213,32 +1209,25 @@
   void Record(Handle<Object> object);
 
   bool IsMonomorphic() const { return types_.Contains(MONOMORPHIC_MAP); }
-  EqualityKind GetKind() const { return equality_kind_; }
   NilValue GetNilValue() const { return nil_value_; }
   Types GetTypes() const { return types_; }
   void ClearTypes() { types_.RemoveAll(); }
-  void SetKind(EqualityKind kind) { equality_kind_ = kind; }
 
   virtual void PrintName(StringStream* stream);
 
  private:
   friend class CompareNilIC;
 
-  CompareNilICStub(EqualityKind kind, NilValue nil,
-                   InitializationState init_state)
+  CompareNilICStub(NilValue nil, InitializationState init_state)
       : HydrogenCodeStub(init_state) {
-    equality_kind_ = kind;
     nil_value_ = nil;
   }
 
-  class EqualityKindField : public BitField<EqualityKind, NUMBER_OF_TYPES, 1> {
-  };
-  class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES+1, 1> {};
+  class NilValueField : public BitField<NilValue, NUMBER_OF_TYPES, 1> {};
 
   virtual CodeStub::Major MajorKey() { return CompareNilIC; }
   virtual int NotMissMinorKey() { return GetExtraICState(); }
 
-  EqualityKind equality_kind_;
   NilValue nil_value_;
   Types types_;
 
diff --git a/src/compiler.cc b/src/compiler.cc
index c6b911f..5fc107f 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -106,9 +106,6 @@
   opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
   no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
                    ? new List<OffsetRange>(2) : NULL;
-  for (int i = 0; i < DependentCode::kGroupCount; i++) {
-    dependent_maps_[i] = NULL;
-  }
   if (mode == STUB) {
     mode_ = STUB;
     return;
@@ -128,41 +125,6 @@
 CompilationInfo::~CompilationInfo() {
   delete deferred_handles_;
   delete no_frame_ranges_;
-#ifdef DEBUG
-  // Check that no dependent maps have been added or added dependent maps have
-  // been rolled back or committed.
-  for (int i = 0; i < DependentCode::kGroupCount; i++) {
-    ASSERT_EQ(NULL, dependent_maps_[i]);
-  }
-#endif  // DEBUG
-}
-
-
-void CompilationInfo::CommitDependentMaps(Handle<Code> code) {
-  for (int i = 0; i < DependentCode::kGroupCount; i++) {
-    ZoneList<Handle<Map> >* group_maps = dependent_maps_[i];
-    if (group_maps == NULL) continue;
-    ASSERT(!object_wrapper_.is_null());
-    for (int j = 0; j < group_maps->length(); j++) {
-      group_maps->at(j)->dependent_code()->UpdateToFinishedCode(
-          static_cast<DependentCode::DependencyGroup>(i), this, *code);
-    }
-    dependent_maps_[i] = NULL;  // Zone-allocated, no need to delete.
-  }
-}
-
-
-void CompilationInfo::RollbackDependentMaps() {
-  // Unregister from all dependent maps if not yet committed.
-  for (int i = 0; i < DependentCode::kGroupCount; i++) {
-    ZoneList<Handle<Map> >* group_maps = dependent_maps_[i];
-    if (group_maps == NULL) continue;
-    for (int j = 0; j < group_maps->length(); j++) {
-      group_maps->at(j)->dependent_code()->RemoveCompilationInfo(
-          static_cast<DependentCode::DependencyGroup>(i), this);
-    }
-    dependent_maps_[i] = NULL;  // Zone-allocated, no need to delete.
-  }
 }
 
 
@@ -1020,7 +982,7 @@
   // The function may have already been optimized by OSR.  Simply continue.
   // Except when OSR already disabled optimization for some reason.
   if (info->shared_info()->optimization_disabled()) {
-    info->AbortOptimization();
+    info->SetCode(Handle<Code>(info->shared_info()->code()));
     InstallFullCode(*info);
     if (FLAG_trace_parallel_recompilation) {
       PrintF("  ** aborting optimization for ");
@@ -1038,11 +1000,9 @@
   // If crankshaft succeeded, install the optimized code else install
   // the unoptimized code.
   OptimizingCompiler::Status status = optimizing_compiler->last_status();
-  if (info->HasAbortedDueToDependentMap()) {
-    info->set_bailout_reason("bailed out due to dependent map");
-    status = optimizing_compiler->AbortOptimization();
-  } else if (status != OptimizingCompiler::SUCCEEDED) {
-    info->set_bailout_reason("failed/bailed out last time");
+  if (status != OptimizingCompiler::SUCCEEDED) {
+    optimizing_compiler->info()->set_bailout_reason(
+        "failed/bailed out last time");
     status = optimizing_compiler->AbortOptimization();
   } else {
     status = optimizing_compiler->GenerateAndInstallCode();
diff --git a/src/compiler.h b/src/compiler.h
index f53feb9..8e6d295 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -57,8 +57,12 @@
 // is constructed based on the resources available at compile-time.
 class CompilationInfo {
  public:
+  CompilationInfo(Handle<Script> script, Zone* zone);
+  CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
   CompilationInfo(Handle<JSFunction> closure, Zone* zone);
-  virtual ~CompilationInfo();
+  CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
+
+  ~CompilationInfo();
 
   Isolate* isolate() {
     ASSERT(Isolate::Current() == isolate_);
@@ -239,17 +243,6 @@
     deferred_handles_ = deferred_handles;
   }
 
-  ZoneList<Handle<Map> >* dependent_maps(DependentCode::DependencyGroup group) {
-    if (dependent_maps_[group] == NULL) {
-      dependent_maps_[group] = new(zone_) ZoneList<Handle<Map> >(2, zone_);
-    }
-    return dependent_maps_[group];
-  }
-
-  void CommitDependentMaps(Handle<Code> code);
-
-  void RollbackDependentMaps();
-
   void SaveHandles() {
     SaveHandle(&closure_);
     SaveHandle(&shared_info_);
@@ -283,26 +276,6 @@
     return result;
   }
 
-  Handle<Foreign> object_wrapper() {
-    if (object_wrapper_.is_null()) {
-      object_wrapper_ =
-          isolate()->factory()->NewForeign(reinterpret_cast<Address>(this));
-    }
-    return object_wrapper_;
-  }
-
-  void AbortDueToDependentMap() {
-    mode_ = DEPENDENT_MAP_ABORT;
-  }
-
-  bool HasAbortedDueToDependentMap() {
-    return mode_ == DEPENDENT_MAP_ABORT;
-  }
-
- protected:
-  CompilationInfo(Handle<Script> script, Zone* zone);
-  CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
-  CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
 
  private:
   Isolate* isolate_;
@@ -316,8 +289,7 @@
     BASE,
     OPTIMIZE,
     NONOPT,
-    STUB,
-    DEPENDENT_MAP_ABORT
+    STUB
   };
 
   void Initialize(Isolate* isolate, Mode mode, Zone* zone);
@@ -397,8 +369,6 @@
 
   DeferredHandles* deferred_handles_;
 
-  ZoneList<Handle<Map> >* dependent_maps_[DependentCode::kGroupCount];
-
   template<typename T>
   void SaveHandle(Handle<T> *object) {
     if (!object->is_null()) {
@@ -417,8 +387,6 @@
   // during graph optimization.
   int opt_count_;
 
-  Handle<Foreign> object_wrapper_;
-
   DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
 };
 
@@ -439,18 +407,11 @@
       : CompilationInfo(closure, &zone_),
         zone_(closure->GetIsolate()),
         zone_scope_(&zone_, DELETE_ON_EXIT) {}
-  CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
+  explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
       : CompilationInfo(stub, isolate, &zone_),
         zone_(isolate),
         zone_scope_(&zone_, DELETE_ON_EXIT) {}
 
-  // Virtual destructor because a CompilationInfoWithZone has to exit the
-  // zone scope and get rid of dependent maps even when the destructor is
-  // called when cast as a CompilationInfo.
-  virtual ~CompilationInfoWithZone() {
-    RollbackDependentMaps();
-  }
-
  private:
   Zone zone_;
   ZoneScope zone_scope_;
diff --git a/src/d8.cc b/src/d8.cc
index 57e0c04..a917dbd 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1571,6 +1571,13 @@
 #endif
 
 
+class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+  virtual void* Allocate(size_t length) { return malloc(length); }
+  virtual void Free(void* data) { free(data); }
+};
+
+
 int Shell::Main(int argc, char* argv[]) {
   if (!SetOptions(argc, argv)) return 1;
 #ifndef V8_SHARED
@@ -1579,6 +1586,8 @@
 #else
   EnableHarmonyTypedArraysViaCommandLine();
 #endif
+  ShellArrayBufferAllocator array_buffer_allocator;
+  v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
   int result = 0;
   Isolate* isolate = Isolate::GetCurrent();
   DumbLineEditor dumb_line_editor(isolate);
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 49dac4a..b70a532 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -178,7 +178,8 @@
 DEFINE_implication(harmony, harmony_proxies)
 DEFINE_implication(harmony, harmony_collections)
 DEFINE_implication(harmony, harmony_observation)
-DEFINE_implication(harmony, harmony_generators)
+// TODO(wingo): Re-enable when GC bug that appeared in r15060 is gone.
+// DEFINE_implication(harmony, harmony_generators)
 DEFINE_implication(harmony, harmony_iteration)
 DEFINE_implication(harmony_modules, harmony_scoping)
 DEFINE_implication(harmony_observation, harmony_collections)
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index d08f2fe..5717a96 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -2062,7 +2062,7 @@
   if (!FLAG_gdbjit) return;
 
   ScopedLock lock(mutex.Pointer());
-  AssertNoAllocation no_gc;
+  DisallowHeapAllocation no_gc;
 
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
   if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
diff --git a/src/handles.cc b/src/handles.cc
index 123fdc5..81828d9 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -566,7 +566,8 @@
 #if ENABLE_EXTRA_CHECKS
   CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject());
 #endif
-  return result;
+  return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
+                                   result);
 }
 
 
@@ -591,7 +592,8 @@
 #endif
     }
   }
-  return result;
+  return v8::Local<v8::Array>::New(reinterpret_cast<v8::Isolate*>(isolate),
+                                   result);
 }
 
 
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 073f7a1..b36706b 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -528,6 +528,17 @@
 }
 
 
+bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) {
+  bool return_value = false;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    if (it.value()->IsSimulate()) continue;
+    if (!it.value()->CheckFlag(f)) return false;
+    return_value = true;
+  }
+  return return_value;
+}
+
+
 HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
   Advance();
 }
@@ -1429,14 +1440,6 @@
 }
 
 
-HValue* HArithmeticBinaryOperation::Canonicalize() {
-  if (representation().IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) {
-    ClearFlag(kCanOverflow);
-  }
-  return this;
-}
-
-
 static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
   return arg1->representation().IsSpecialization() &&
     arg2->EqualsInteger32Constant(identity);
@@ -1446,13 +1449,13 @@
 HValue* HAdd::Canonicalize() {
   if (IsIdentityOperation(left(), right(), 0)) return left();
   if (IsIdentityOperation(right(), left(), 0)) return right();
-  return HArithmeticBinaryOperation::Canonicalize();
+  return this;
 }
 
 
 HValue* HSub::Canonicalize() {
   if (IsIdentityOperation(left(), right(), 0)) return left();
-  return HArithmeticBinaryOperation::Canonicalize();
+  return this;
 }
 
 
@@ -1513,6 +1516,11 @@
     // If the input is integer32 then we replace the floor instruction
     // with its input. This happens before the representation changes are
     // introduced.
+
+    // TODO(2205): The above comment is lying. All of this happens
+    // *after* representation changes are introduced. We should check
+    // for value->IsChange() and react accordingly if yes.
+
     if (value()->representation().IsInteger32()) return value();
 
 #if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \
@@ -1753,11 +1761,13 @@
     Range* a = left()->range();
     Range* b = right()->range();
     Range* res = a->Copy(zone);
-    if (!res->AddAndCheckOverflow(b)) {
+    if (!res->AddAndCheckOverflow(b) ||
+        CheckFlag(kAllUsesTruncatingToInt32)) {
       ClearFlag(kCanOverflow);
     }
-    bool m0 = a->CanBeMinusZero() && b->CanBeMinusZero();
-    res->set_can_be_minus_zero(m0);
+    if (!CheckFlag(kAllUsesTruncatingToInt32)) {
+      res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeMinusZero());
+    }
     return res;
   } else {
     return HValue::InferRange(zone);
@@ -1770,10 +1780,13 @@
     Range* a = left()->range();
     Range* b = right()->range();
     Range* res = a->Copy(zone);
-    if (!res->SubAndCheckOverflow(b)) {
+    if (!res->SubAndCheckOverflow(b) ||
+        CheckFlag(kAllUsesTruncatingToInt32)) {
       ClearFlag(kCanOverflow);
     }
-    res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
+    if (!CheckFlag(kAllUsesTruncatingToInt32)) {
+      res->set_can_be_minus_zero(a->CanBeMinusZero() && b->CanBeZero());
+    }
     return res;
   } else {
     return HValue::InferRange(zone);
@@ -1787,11 +1800,16 @@
     Range* b = right()->range();
     Range* res = a->Copy(zone);
     if (!res->MulAndCheckOverflow(b)) {
+      // Clearing the kCanOverflow flag when kAllUsesAreTruncatingToInt32
+      // would be wrong, because truncated integer multiplication is too
+      // precise and therefore not the same as converting to Double and back.
       ClearFlag(kCanOverflow);
     }
-    bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
-        (a->CanBeNegative() && b->CanBeZero());
-    res->set_can_be_minus_zero(m0);
+    if (!CheckFlag(kAllUsesTruncatingToInt32)) {
+      bool m0 = (a->CanBeZero() && b->CanBeNegative()) ||
+          (a->CanBeNegative() && b->CanBeZero());
+      res->set_can_be_minus_zero(m0);
+    }
     return res;
   } else {
     return HValue::InferRange(zone);
@@ -1804,12 +1822,14 @@
     Range* a = left()->range();
     Range* b = right()->range();
     Range* result = new(zone) Range();
-    if (a->CanBeMinusZero()) {
-      result->set_can_be_minus_zero(true);
-    }
+    if (!CheckFlag(kAllUsesTruncatingToInt32)) {
+      if (a->CanBeMinusZero()) {
+        result->set_can_be_minus_zero(true);
+      }
 
-    if (a->CanBeZero() && b->CanBeNegative()) {
-      result->set_can_be_minus_zero(true);
+      if (a->CanBeZero() && b->CanBeNegative()) {
+        result->set_can_be_minus_zero(true);
+      }
     }
 
     if (!a->Includes(kMinInt) || !b->Includes(-1)) {
@@ -1841,7 +1861,7 @@
     Range* result = new(zone) Range(left_can_be_negative ? -positive_bound : 0,
                                     a->CanBePositive() ? positive_bound : 0);
 
-    if (left_can_be_negative) {
+    if (left_can_be_negative && !CheckFlag(kAllUsesTruncatingToInt32)) {
       result->set_can_be_minus_zero(true);
     }
 
@@ -2293,10 +2313,6 @@
          current_rep.IsInteger32() &&
          // Mul in Integer32 mode would be too precise.
          !this->IsMul() &&
-         // TODO(jkummerow): Remove blacklisting of Div when the Div
-         // instruction has learned not to deopt when the remainder is
-         // non-zero but all uses are truncating.
-         !this->IsDiv() &&
          CheckUsesForFlag(kTruncatingToInt32);
 }
 
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index fc2a200..82ed261 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -54,6 +54,7 @@
 
 
 #define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V)  \
+  V(ArithmeticBinaryOperation)                 \
   V(BinaryOperation)                           \
   V(BitwiseBinaryOperation)                    \
   V(ControlInstruction)                        \
@@ -797,6 +798,7 @@
     kAllowUndefinedAsNaN,
     kIsArguments,
     kTruncatingToInt32,
+    kAllUsesTruncatingToInt32,
     // Set after an instruction is killed.
     kIsDead,
     // Instructions that are allowed to produce full range unsigned integer
@@ -996,6 +998,9 @@
 
   // Returns true if the flag specified is set for all uses, false otherwise.
   bool CheckUsesForFlag(Flag f);
+  // Returns true if the flag specified is set for all uses, and this set
+  // of uses is non-empty.
+  bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f);
 
   GVNFlagSet gvn_flags() const { return gvn_flags_; }
   void SetGVNFlag(GVNFlag f) { gvn_flags_.Add(f); }
@@ -2972,33 +2977,21 @@
  public:
   HCheckPrototypeMaps(Handle<JSObject> prototype,
                       Handle<JSObject> holder,
-                      Zone* zone,
-                      CompilationInfo* info)
+                      Zone* zone)
       : prototypes_(2, zone),
         maps_(2, zone),
         first_prototype_unique_id_(),
-        last_prototype_unique_id_(),
-        can_omit_prototype_maps_(true) {
+        last_prototype_unique_id_() {
     SetFlag(kUseGVN);
     SetGVNFlag(kDependsOnMaps);
     // Keep a list of all objects on the prototype chain up to the holder
     // and the expected maps.
     while (true) {
       prototypes_.Add(prototype, zone);
-      Handle<Map> map(prototype->map());
-      maps_.Add(map, zone);
-      can_omit_prototype_maps_ &= map->CanOmitPrototypeChecks();
+      maps_.Add(Handle<Map>(prototype->map()), zone);
       if (prototype.is_identical_to(holder)) break;
       prototype = Handle<JSObject>(JSObject::cast(prototype->GetPrototype()));
     }
-    if (can_omit_prototype_maps_) {
-      // Mark in-flight compilation as dependent on those maps.
-      for (int i = 0; i < maps()->length(); i++) {
-        Handle<Map> map = maps()->at(i);
-        map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup,
-                                         info);
-      }
-    }
   }
 
   ZoneList<Handle<JSObject> >* prototypes() { return &prototypes_; }
@@ -3023,7 +3016,12 @@
     last_prototype_unique_id_ = UniqueValueId(prototypes_.last());
   }
 
-  bool CanOmitPrototypeChecks() { return can_omit_prototype_maps_; }
+  bool CanOmitPrototypeChecks() {
+    for (int i = 0; i < maps()->length(); i++) {
+      if (!maps()->at(i)->CanOmitPrototypeChecks()) return false;
+    }
+    return true;
+  }
 
  protected:
   virtual bool DataEquals(HValue* other) {
@@ -3037,7 +3035,6 @@
   ZoneList<Handle<Map> > maps_;
   UniqueValueId first_prototype_unique_id_;
   UniqueValueId last_prototype_unique_id_;
-  bool can_omit_prototype_maps_;
 };
 
 
@@ -3856,7 +3853,7 @@
         : representation();
   }
 
-  virtual HValue* Canonicalize();
+  DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
 
  private:
   virtual bool IsDeletable() const { return true; }
@@ -4488,9 +4485,8 @@
                            HValue* right);
 
   bool HasPowerOf2Divisor() {
-    if (right()->IsConstant() &&
-        HConstant::cast(right())->HasInteger32Value()) {
-      int32_t value = HConstant::cast(right())->Integer32Value();
+    if (right()->IsInteger32Constant()) {
+      int32_t value = right()->GetInteger32Constant();
       return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
     }
 
@@ -5695,7 +5691,6 @@
                        = Representation::Tagged())
       : access_(access),
         field_representation_(field_representation),
-        transition_(),
         transition_unique_id_(),
         new_space_dominator_(NULL) {
     SetOperandAt(0, obj);
@@ -5727,13 +5722,7 @@
   HObjectAccess access() const { return access_; }
   Handle<Map> transition() const { return transition_; }
   UniqueValueId transition_unique_id() const { return transition_unique_id_; }
-  void SetTransition(Handle<Map> map, CompilationInfo* info) {
-    ASSERT(transition_.is_null());  // Only set once.
-    if (map->CanBeDeprecated()) {
-      map->AddDependentCompilationInfo(DependentCode::kTransitionGroup, info);
-    }
-    transition_ = map;
-  }
+  void set_transition(Handle<Map> map) { transition_ = map; }
   HValue* new_space_dominator() const { return new_space_dominator_; }
 
   bool NeedsWriteBarrier() {
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 60edbb7..b2badcd 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1698,7 +1698,6 @@
 
 void HGraphBuilder::BuildCompareNil(
     HValue* value,
-    EqualityKind kind,
     CompareNilICStub::Types types,
     Handle<Map> map,
     int position,
@@ -1733,9 +1732,7 @@
       // emitted below is the actual monomorphic map.
       BuildCheckMap(value, map);
     } else {
-      if (kind == kNonStrictEquality) {
-        if_nil.Deopt();
-      }
+      if_nil.Deopt();
     }
   }
 
@@ -2094,6 +2091,22 @@
 
 void HGraph::Canonicalize() {
   HPhase phase("H_Canonicalize", this);
+  // Before removing no-op instructions, save their semantic value.
+  // We must be careful not to set the flag unnecessarily, because GVN
+  // cannot identify two instructions when their flag value differs.
+  for (int i = 0; i < blocks()->length(); ++i) {
+    HInstruction* instr = blocks()->at(i)->first();
+    while (instr != NULL) {
+      if (instr->IsArithmeticBinaryOperation() &&
+          instr->representation().IsInteger32() &&
+          instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+              HInstruction::kTruncatingToInt32)) {
+        instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
+      }
+      instr = instr->next();
+    }
+  }
+  // Perform actual Canonicalization pass.
   for (int i = 0; i < blocks()->length(); ++i) {
     HInstruction* instr = blocks()->at(i)->first();
     while (instr != NULL) {
@@ -3810,7 +3823,7 @@
 
 
 void HOptimizedGraphBuilder::Bailout(const char* reason) {
-  current_info()->set_bailout_reason(reason);
+  info()->set_bailout_reason(reason);
   SetStackOverflow();
 }
 
@@ -3867,11 +3880,11 @@
 
 
 bool HOptimizedGraphBuilder::BuildGraph() {
-  if (current_info()->function()->is_generator()) {
+  if (info()->function()->is_generator()) {
     Bailout("function is a generator");
     return false;
   }
-  Scope* scope = current_info()->scope();
+  Scope* scope = info()->scope();
   if (scope->HasIllegalRedeclaration()) {
     Bailout("function with illegal redeclaration");
     return false;
@@ -3915,7 +3928,7 @@
   AddInstruction(
       new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
 
-  VisitStatements(current_info()->function()->body());
+  VisitStatements(info()->function()->body());
   if (HasStackOverflow()) return false;
 
   if (current_block() != NULL) {
@@ -3927,7 +3940,7 @@
   // last time this function was compiled, then this recompile is likely not
   // due to missing/inadequate type feedback, but rather too aggressive
   // optimization. Disable optimistic LICM in that case.
-  Handle<Code> unoptimized_code(current_info()->shared_info()->code());
+  Handle<Code> unoptimized_code(info()->shared_info()->code());
   ASSERT(unoptimized_code->kind() == Code::FUNCTION);
   Handle<TypeFeedbackInfo> type_info(
       TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
@@ -5116,7 +5129,7 @@
 
 
 bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
-  return statement->OsrEntryId() == current_info()->osr_ast_id();
+  return statement->OsrEntryId() == info()->osr_ast_id();
 }
 
 
@@ -5503,9 +5516,9 @@
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
   Handle<SharedFunctionInfo> shared_info =
-      SearchSharedFunctionInfo(current_info()->shared_info()->code(), expr);
+      SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
   if (shared_info.is_null()) {
-    shared_info = Compiler::BuildFunctionInfo(expr, current_info()->script());
+    shared_info = Compiler::BuildFunctionInfo(expr, info()->script());
   }
   // We also have a stack overflow if the recursive compilation did.
   if (HasStackOverflow()) return;
@@ -5566,10 +5579,10 @@
 HOptimizedGraphBuilder::GlobalPropertyAccess
     HOptimizedGraphBuilder::LookupGlobalProperty(
         Variable* var, LookupResult* lookup, bool is_store) {
-  if (var->is_this() || !current_info()->has_global_object()) {
+  if (var->is_this() || !info()->has_global_object()) {
     return kUseGeneric;
   }
-  Handle<GlobalObject> global(current_info()->global_object());
+  Handle<GlobalObject> global(info()->global_object());
   global->Lookup(*var->name(), lookup);
   if (!lookup->IsNormal() ||
       (is_store && lookup->IsReadOnly()) ||
@@ -5584,7 +5597,7 @@
 HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
   ASSERT(var->IsContextSlot());
   HValue* context = environment()->LookupContext();
-  int length = current_info()->scope()->ContextChainLength(var->scope());
+  int length = info()->scope()->ContextChainLength(var->scope());
   while (length-- > 0) {
     HInstruction* context_instruction = new(zone()) HOuterContext(context);
     AddInstruction(context_instruction);
@@ -5620,12 +5633,12 @@
           LookupGlobalProperty(variable, &lookup, false);
 
       if (type == kUseCell &&
-          current_info()->global_object()->IsAccessCheckNeeded()) {
+          info()->global_object()->IsAccessCheckNeeded()) {
         type = kUseGeneric;
       }
 
       if (type == kUseCell) {
-        Handle<GlobalObject> global(current_info()->global_object());
+        Handle<GlobalObject> global(info()->global_object());
         Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
         HLoadGlobalCell* instr =
             new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails());
@@ -6216,8 +6229,7 @@
     AddInstruction(new(zone()) HCheckPrototypeMaps(
         Handle<JSObject>(JSObject::cast(map->prototype())),
         Handle<JSObject>(JSObject::cast(proto)),
-        zone(),
-        top_info()));
+        zone()));
   }
 
   HObjectAccess field_access = HObjectAccess::ForField(map, lookup, name);
@@ -6253,7 +6265,7 @@
 
   if (transition_to_field) {
     Handle<Map> transition(lookup->GetTransitionMapFromMap(*map));
-    instr->SetTransition(transition, top_info());
+    instr->set_transition(transition);
     // TODO(fschneider): Record the new map type of the object in the IR to
     // enable elimination of redundant checks after the transition store.
     instr->SetGVNFlag(kChangesMaps);
@@ -6556,7 +6568,7 @@
   LookupResult lookup(isolate());
   GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
   if (type == kUseCell) {
-    Handle<GlobalObject> global(current_info()->global_object());
+    Handle<GlobalObject> global(info()->global_object());
     Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
     HInstruction* instr =
         new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
@@ -6621,13 +6633,13 @@
         // Bail out if we try to mutate a parameter value in a function
         // using the arguments object.  We do not (yet) correctly handle the
         // arguments property of the function.
-        if (current_info()->scope()->arguments() != NULL) {
+        if (info()->scope()->arguments() != NULL) {
           // Parameters will be allocated to context slots.  We have no
           // direct way to detect that the variable is a parameter so we do
           // a linear search of the parameter variables.
-          int count = current_info()->scope()->num_parameters();
+          int count = info()->scope()->num_parameters();
           for (int i = 0; i < count; ++i) {
-            if (var == current_info()->scope()->parameter(i)) {
+            if (var == info()->scope()->parameter(i)) {
               Bailout(
                   "assignment to parameter, function uses arguments object");
             }
@@ -6847,12 +6859,12 @@
         // Bail out if we try to mutate a parameter value in a function using
         // the arguments object.  We do not (yet) correctly handle the
         // arguments property of the function.
-        if (current_info()->scope()->arguments() != NULL) {
+        if (info()->scope()->arguments() != NULL) {
           // Parameters will rewrite to context slots.  We have no direct way
           // to detect that the variable is a parameter.
-          int count = current_info()->scope()->num_parameters();
+          int count = info()->scope()->num_parameters();
           for (int i = 0; i < count; ++i) {
-            if (var == current_info()->scope()->parameter(i)) {
+            if (var == info()->scope()->parameter(i)) {
               return Bailout("assignment to parameter in arguments object");
             }
           }
@@ -7014,8 +7026,8 @@
     Handle<JSObject> holder(lookup.holder());
     Handle<Map> holder_map(holder->map());
     AddCheckMap(object, map);
-    AddInstruction(new(zone()) HCheckPrototypeMaps(
-        prototype, holder, zone(), top_info()));
+    AddInstruction(
+        new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
     HValue* holder_value = AddInstruction(new(zone())
         HConstant(holder, Representation::Tagged()));
     return BuildLoadNamedField(holder_value,
@@ -7029,8 +7041,7 @@
     Handle<JSObject> holder(lookup.holder());
     Handle<Map> holder_map(holder->map());
     AddCheckMap(object, map);
-    AddInstruction(new(zone()) HCheckPrototypeMaps(
-        prototype, holder, zone(), top_info()));
+    AddInstruction(new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
     Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*holder_map));
     return new(zone()) HConstant(function, Representation::Tagged());
   }
@@ -7067,8 +7078,8 @@
       isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
     Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
     Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
-    AddInstruction(new(zone()) HCheckPrototypeMaps(
-        prototype, object_prototype, zone(), top_info()));
+    AddInstruction(
+        new(zone()) HCheckPrototypeMaps(prototype, object_prototype, zone()));
     load_mode = ALLOW_RETURN_HOLE;
     graph()->MarkDependsOnEmptyArrayProtoElements();
   }
@@ -7583,8 +7594,8 @@
                                                    Handle<Map> receiver_map) {
   if (!holder.is_null()) {
     Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
-    AddInstruction(new(zone()) HCheckPrototypeMaps(
-        prototype, holder, zone(), top_info()));
+    AddInstruction(
+        new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
   }
 }
 
@@ -7729,7 +7740,7 @@
     expr->ComputeTarget(map, name);
     AddCheckPrototypeMaps(expr->holder(), map);
     if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
-      Handle<JSFunction> caller = current_info()->closure();
+      Handle<JSFunction> caller = info()->closure();
       SmartArrayPointer<char> caller_name =
           caller->shared()->DebugName()->ToCString();
       PrintF("Trying to inline the polymorphic call to %s from %s\n",
@@ -7813,7 +7824,7 @@
 
   // Precondition: call is monomorphic and we have found a target with the
   // appropriate arity.
-  Handle<JSFunction> caller = current_info()->closure();
+  Handle<JSFunction> caller = info()->closure();
   Handle<SharedFunctionInfo> target_shared(target->shared());
 
   // Do a quick check on source code length to avoid parsing large
@@ -7849,7 +7860,7 @@
   int nodes_added = InliningAstSize(target);
   if (nodes_added == kNotInlinable) return false;
 
-  Handle<JSFunction> caller = current_info()->closure();
+  Handle<JSFunction> caller = info()->closure();
 
   if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
     TraceInline(target, caller, "target AST is too large [early]");
@@ -7858,7 +7869,7 @@
 
 #if !defined(V8_TARGET_ARCH_IA32)
   // Target must be able to use caller's context.
-  CompilationInfo* outer_info = current_info();
+  CompilationInfo* outer_info = info();
   if (target->context() != outer_info->closure()->context() ||
       outer_info->scope()->contains_with() ||
       outer_info->scope()->num_heap_slots() > 0) {
@@ -8293,8 +8304,7 @@
             Call::GetPrototypeForPrimitiveCheck(STRING_CHECK,
                 expr->holder()->GetIsolate()),
             expr->holder(),
-            zone(),
-            top_info()));
+            zone()));
         HInstruction* char_code =
             BuildStringCharCodeAt(context, string, index);
         if (id == kStringCharCodeAt) {
@@ -8445,7 +8455,7 @@
     return false;
   }
 
-  if (current_info()->scope()->arguments() == NULL) return false;
+  if (info()->scope()->arguments() == NULL) return false;
 
   ZoneList<Expression*>* args = expr->arguments();
   if (args->length() != 2) return false;
@@ -8686,8 +8696,8 @@
       LookupResult lookup(isolate());
       GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
       if (type == kUseCell &&
-          !current_info()->global_object()->IsAccessCheckNeeded()) {
-        Handle<GlobalObject> global(current_info()->global_object());
+          !info()->global_object()->IsAccessCheckNeeded()) {
+        Handle<GlobalObject> global(info()->global_object());
         known_global_function = expr->ComputeGlobalTarget(global, &lookup);
       }
       if (known_global_function) {
@@ -8722,7 +8732,7 @@
         }
         if (TryInlineCall(expr)) return;
 
-        if (expr->target().is_identical_to(current_info()->closure())) {
+        if (expr->target().is_identical_to(info()->closure())) {
           graph()->MarkRecursive();
         }
 
@@ -9221,13 +9231,13 @@
         // Bail out if we try to mutate a parameter value in a function
         // using the arguments object.  We do not (yet) correctly handle the
         // arguments property of the function.
-        if (current_info()->scope()->arguments() != NULL) {
+        if (info()->scope()->arguments() != NULL) {
           // Parameters will rewrite to context slots.  We have no direct
           // way to detect that the variable is a parameter so we use a
           // linear search of the parameter list.
-          int count = current_info()->scope()->num_parameters();
+          int count = info()->scope()->num_parameters();
           for (int i = 0; i < count; ++i) {
-            if (var == current_info()->scope()->parameter(i)) {
+            if (var == info()->scope()->parameter(i)) {
               return Bailout("assignment to parameter in arguments object");
             }
           }
@@ -9826,10 +9836,10 @@
     VariableProxy* proxy = expr->right()->AsVariableProxy();
     bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
     if (global_function &&
-        current_info()->has_global_object() &&
-        !current_info()->global_object()->IsAccessCheckNeeded()) {
+        info()->has_global_object() &&
+        !info()->global_object()->IsAccessCheckNeeded()) {
       Handle<String> name = proxy->name();
-      Handle<GlobalObject> global(current_info()->global_object());
+      Handle<GlobalObject> global(info()->global_object());
       LookupResult lookup(isolate());
       global->Lookup(*name, &lookup);
       if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
@@ -9921,19 +9931,22 @@
   ASSERT(!HasStackOverflow());
   ASSERT(current_block() != NULL);
   ASSERT(current_block()->HasPredecessor());
-  EqualityKind kind =
-      expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality;
   HIfContinuation continuation;
   CompareNilICStub::Types types;
-  if (kind == kStrictEquality) {
-    types.Add((nil == kNullValue) ? CompareNilICStub::NULL_TYPE :
-                                    CompareNilICStub::UNDEFINED);
-  } else {
-    types = CompareNilICStub::Types(expr->compare_nil_types());
-    if (types.IsEmpty()) types = CompareNilICStub::Types::FullCompare();
+  if (expr->op() == Token::EQ_STRICT) {
+    IfBuilder if_nil(this);
+    if_nil.If<HCompareObjectEqAndBranch>(
+        value, (nil == kNullValue) ? graph()->GetConstantNull()
+                                   : graph()->GetConstantUndefined());
+    if_nil.Then();
+    if_nil.Else();
+    if_nil.CaptureContinuation(&continuation);
+    return ast_context()->ReturnContinuation(&continuation, expr->id());
   }
+  types = CompareNilICStub::Types(expr->compare_nil_types());
+  if (types.IsEmpty()) types = CompareNilICStub::Types::FullCompare();
   Handle<Map> map_handle = expr->map();
-  BuildCompareNil(value, kind, types, map_handle,
+  BuildCompareNil(value, types, map_handle,
                   expr->position(), &continuation);
   return ast_context()->ReturnContinuation(&continuation, expr->id());
 }
@@ -10269,9 +10282,9 @@
     Handle<FixedArray> array =
        isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
     for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
-    int flags = DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
-        DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
-        DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
+    int flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+                DeclareGlobalsNativeFlag::encode(info()->is_native()) |
+                DeclareGlobalsLanguageMode::encode(info()->language_mode());
     HInstruction* result = new(zone()) HDeclareGlobals(
         environment()->LookupContext(), array, flags);
     AddInstruction(result);
@@ -10325,8 +10338,8 @@
   switch (variable->location()) {
     case Variable::UNALLOCATED: {
       globals_.Add(variable->name(), zone());
-      Handle<SharedFunctionInfo> function = Compiler::BuildFunctionInfo(
-          declaration->fun(), current_info()->script());
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(declaration->fun(), info()->script());
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
       globals_.Add(function, zone());
diff --git a/src/hydrogen.h b/src/hydrogen.h
index eb6473a..ad89e50 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -402,15 +402,13 @@
   }
 
   void MarkDependsOnEmptyArrayProtoElements() {
-    // Add map dependency if not already added.
-    if (depends_on_empty_array_proto_elements_) return;
-    isolate()->initial_object_prototype()->map()->AddDependentCompilationInfo(
-        DependentCode::kElementsCantBeAddedGroup, info());
-    isolate()->initial_array_prototype()->map()->AddDependentCompilationInfo(
-        DependentCode::kElementsCantBeAddedGroup, info());
     depends_on_empty_array_proto_elements_ = true;
   }
 
+  bool depends_on_empty_array_proto_elements() {
+    return depends_on_empty_array_proto_elements_;
+  }
+
   void RecordUint32Instruction(HInstruction* instr) {
     if (uint32_instructions_ == NULL) {
       uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
@@ -970,7 +968,6 @@
   Zone* zone() const { return info_->zone(); }
   HGraph* graph() const { return graph_; }
   Isolate* isolate() const { return graph_->isolate(); }
-  CompilationInfo* top_info() { return info_; }
 
   HGraph* CreateGraph();
 
@@ -1353,7 +1350,6 @@
 
   void BuildCompareNil(
       HValue* value,
-      EqualityKind kind,
       CompareNilICStub::Types types,
       Handle<Map> map,
       int position,
@@ -1492,7 +1488,7 @@
   void set_ast_context(AstContext* context) { ast_context_ = context; }
 
   // Accessors forwarded to the function state.
-  CompilationInfo* current_info() const {
+  CompilationInfo* info() const {
     return function_state()->compilation_info();
   }
   AstContext* call_context() const {
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 7b32f1b..c0b2abd 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -2351,7 +2351,7 @@
 
 
 void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
-  ASSERT(CpuFeatures::IsSupported(SSE4_1));
+  ASSERT(IsEnabled(SSE4_1));
   ASSERT(is_uint8(imm8));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 7aff6e1..c77faaa 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -4768,18 +4768,14 @@
   VisitForAccumulatorValue(sub_expr);
   PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
 
-  EqualityKind kind = expr->op() == Token::EQ_STRICT
-      ? kStrictEquality : kNonStrictEquality;
   Handle<Object> nil_value = nil == kNullValue
       ? isolate()->factory()->null_value()
       : isolate()->factory()->undefined_value();
-  if (kind == kStrictEquality) {
+  if (expr->op() == Token::EQ_STRICT) {
     __ cmp(eax, nil_value);
     Split(equal, if_true, if_false, fall_through);
   } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
-                                                         kNonStrictEquality,
-                                                         nil);
+    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
     CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
     __ test(eax, eax);
     Split(not_zero, if_true, if_false, fall_through);
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index bbecdcc..7d685bf 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -109,7 +109,20 @@
   if (!info()->IsStub()) {
     Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
   }
-  info()->CommitDependentMaps(code);
+  for (int i = 0 ; i < prototype_maps_.length(); i++) {
+    prototype_maps_.at(i)->AddDependentCode(
+        DependentCode::kPrototypeCheckGroup, code);
+  }
+  for (int i = 0 ; i < transition_maps_.length(); i++) {
+    transition_maps_.at(i)->AddDependentCode(
+        DependentCode::kTransitionGroup, code);
+  }
+  if (graph()->depends_on_empty_array_proto_elements()) {
+    isolate()->initial_object_prototype()->map()->AddDependentCode(
+        DependentCode::kElementsCantBeAddedGroup, code);
+    isolate()->initial_array_prototype()->map()->AddDependentCode(
+        DependentCode::kElementsCantBeAddedGroup, code);
+  }
 }
 
 
@@ -1338,8 +1351,7 @@
 void LCodeGen::DoDivI(LDivI* instr) {
   if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
     Register dividend = ToRegister(instr->left());
-    int32_t divisor =
-        HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+    int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
     int32_t test_value = 0;
     int32_t power = 0;
 
@@ -1362,10 +1374,26 @@
     }
 
     if (test_value != 0) {
-      // Deoptimize if remainder is not 0.
-      __ test(dividend, Immediate(test_value));
-      DeoptimizeIf(not_zero, instr->environment());
-      __ sar(dividend, power);
+      if (instr->hydrogen()->CheckFlag(
+          HInstruction::kAllUsesTruncatingToInt32)) {
+        Label done, negative;
+        __ cmp(dividend, 0);
+        __ j(less, &negative, Label::kNear);
+        __ sar(dividend, power);
+        __ jmp(&done, Label::kNear);
+
+        __ bind(&negative);
+        __ neg(dividend);
+        __ sar(dividend, power);
+        if (divisor > 0) __ neg(dividend);
+        __ bind(&done);
+        return;  // Don't fall through to "__ neg" below.
+      } else {
+        // Deoptimize if remainder is not 0.
+        __ test(dividend, Immediate(test_value));
+        DeoptimizeIf(not_zero, instr->environment());
+        __ sar(dividend, power);
+      }
     }
 
     if (divisor < 0) __ neg(dividend);
@@ -1412,11 +1440,7 @@
   __ cdq();
   __ idiv(right_reg);
 
-  if (!instr->is_flooring()) {
-    // Deoptimize if remainder is not 0.
-    __ test(edx, Operand(edx));
-    DeoptimizeIf(not_zero, instr->environment());
-  } else {
+  if (instr->is_flooring()) {
     Label done;
     __ test(edx, edx);
     __ j(zero, &done, Label::kNear);
@@ -1424,6 +1448,11 @@
     __ sar(edx, 31);
     __ add(eax, edx);
     __ bind(&done);
+  } else if (!instr->hydrogen()->CheckFlag(
+      HInstruction::kAllUsesTruncatingToInt32)) {
+    // Deoptimize if remainder is not 0.
+    __ test(edx, Operand(edx));
+    DeoptimizeIf(not_zero, instr->environment());
   }
 }
 
@@ -4302,6 +4331,9 @@
   }
 
   if (!transition.is_null()) {
+    if (transition->CanBeDeprecated()) {
+      transition_maps_.Add(transition, info()->zone());
+    }
     if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
       __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
     } else {
@@ -5977,7 +6009,11 @@
 
   ASSERT(prototypes->length() == maps->length());
 
-  if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
+  if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+    for (int i = 0; i < maps->length(); i++) {
+      prototype_maps_.Add(maps->at(i), info()->zone());
+    }
+  } else {
     for (int i = 0; i < prototypes->length(); i++) {
       __ LoadHeapObject(reg, prototypes->at(i));
       DoCheckMapCommon(reg, maps->at(i), instr);
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 33f0eda..647dd0e 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -58,6 +58,8 @@
         deoptimizations_(4, info->zone()),
         jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
+        prototype_maps_(0, info->zone()),
+        transition_maps_(0, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
@@ -407,6 +409,8 @@
   ZoneList<LEnvironment*> deoptimizations_;
   ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
+  ZoneList<Handle<Map> > prototype_maps_;
+  ZoneList<Handle<Map> > transition_maps_;
   int inlined_function_count_;
   Scope* const scope_;
   Status status_;
diff --git a/src/ic.cc b/src/ic.cc
index db1cb95..94e8773 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -2943,16 +2943,8 @@
 }
 
 
-MaybeObject* CompareNilIC::DoCompareNilSlow(EqualityKind kind,
-                                            NilValue nil,
+MaybeObject* CompareNilIC::DoCompareNilSlow(NilValue nil,
                                             Handle<Object> object) {
-  if (kind == kStrictEquality) {
-    if (nil == kNullValue) {
-      return Smi::FromInt(object->IsNull());
-    } else {
-      return Smi::FromInt(object->IsUndefined());
-    }
-  }
   if (object->IsNull() || object->IsUndefined()) {
     return Smi::FromInt(true);
   }
@@ -2973,7 +2965,6 @@
   stub.Record(object);
   old_types.TraceTransition(stub.GetTypes());
 
-  EqualityKind kind = stub.GetKind();
   NilValue nil = stub.GetNilValue();
 
   // Find or create the specialized stub to support the new set of types.
@@ -2987,7 +2978,7 @@
     code = stub.GetCode(isolate());
   }
   set_target(*code);
-  return DoCompareNilSlow(kind, nil, object);
+  return DoCompareNilSlow(nil, object);
 }
 
 
diff --git a/src/ic.h b/src/ic.h
index dadb743..8c448eb 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -789,8 +789,7 @@
 
   static void Clear(Address address, Code* target);
 
-  static MUST_USE_RESULT MaybeObject* DoCompareNilSlow(EqualityKind kind,
-                                                       NilValue nil,
+  static MUST_USE_RESULT MaybeObject* DoCompareNilSlow(NilValue nil,
                                                        Handle<Object> object);
 };
 
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 99c5b48..dc2db4b 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1002,6 +1002,10 @@
     Code* code = shared->code();
     MarkBit code_mark = Marking::MarkBitFrom(code);
     if (!code_mark.Get()) {
+      if (FLAG_trace_code_flushing && shared->is_compiled()) {
+        SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+        PrintF("[code-flushing clears: %s]\n", *name);
+      }
       shared->set_code(lazy_compile);
       candidate->set_code(lazy_compile);
     } else {
@@ -1039,6 +1043,10 @@
     Code* code = candidate->code();
     MarkBit code_mark = Marking::MarkBitFrom(code);
     if (!code_mark.Get()) {
+      if (FLAG_trace_code_flushing && candidate->is_compiled()) {
+        SmartArrayPointer<char> name = candidate->DebugName()->ToCString();
+        PrintF("[code-flushing clears: %s]\n", *name);
+      }
       candidate->set_code(lazy_compile);
     }
 
@@ -1122,6 +1130,11 @@
   // Make sure previous flushing decisions are revisited.
   isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
 
+  if (FLAG_trace_code_flushing) {
+    SmartArrayPointer<char> name = shared_info->DebugName()->ToCString();
+    PrintF("[code-flushing abandons function-info: %s]\n", *name);
+  }
+
   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
   SharedFunctionInfo* next_candidate;
   if (candidate == shared_info) {
@@ -1153,6 +1166,11 @@
   isolate_->heap()->incremental_marking()->RecordWrites(function);
   isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
 
+  if (FLAG_trace_code_flushing) {
+    SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
+    PrintF("[code-flushing abandons closure: %s]\n", *name);
+  }
+
   JSFunction* candidate = jsfunction_candidates_head_;
   JSFunction* next_candidate;
   if (candidate == function) {
@@ -1183,6 +1201,11 @@
   // Make sure previous flushing decisions are revisited.
   isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
 
+  if (FLAG_trace_code_flushing) {
+    SmartArrayPointer<char> name = code_map_holder->DebugName()->ToCString();
+    PrintF("[code-flushing abandons code-map: %s]\n", *name);
+  }
+
   SharedFunctionInfo* holder = optimized_code_map_holder_head_;
   SharedFunctionInfo* next_holder;
   if (holder == code_map_holder) {
@@ -2481,12 +2504,11 @@
   int number_of_entries = starts.number_of_entries();
   if (number_of_entries == 0) return;
   for (int i = 0; i < number_of_entries; i++) {
-    if (!entries->is_code_at(i)) continue;
     Code* code = entries->code_at(i);
     if (IsMarked(code) && !code->marked_for_deoptimization()) {
       code->set_marked_for_deoptimization(true);
     }
-    entries->clear_at(i);
+    entries->clear_code_at(i);
   }
   map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
 }
@@ -2503,15 +2525,14 @@
   for (int g = 0; g < DependentCode::kGroupCount; g++) {
     int group_number_of_entries = 0;
     for (int i = starts.at(g); i < starts.at(g + 1); i++) {
-      if (!entries->is_code_at(i)) continue;
       Code* code = entries->code_at(i);
       if (IsMarked(code) && !code->marked_for_deoptimization()) {
         if (new_number_of_entries + group_number_of_entries != i) {
-          entries->set_object_at(
-              new_number_of_entries + group_number_of_entries, code);
+          entries->set_code_at(new_number_of_entries +
+                               group_number_of_entries, code);
         }
-        Object** slot = entries->slot_at(new_number_of_entries +
-                                         group_number_of_entries);
+        Object** slot = entries->code_slot_at(new_number_of_entries +
+                                              group_number_of_entries);
         RecordSlot(slot, slot, code);
         group_number_of_entries++;
       }
@@ -2522,7 +2543,7 @@
     new_number_of_entries += group_number_of_entries;
   }
   for (int i = new_number_of_entries; i < number_of_entries; i++) {
-    entries->clear_at(i);
+    entries->clear_code_at(i);
   }
 }
 
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 4844263..eee79a2 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -2198,7 +2198,7 @@
   bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >>
                   (kImm26Bits + kImmFieldShift)) == 0;
   uint32_t target_field =
-      static_cast<uint32_t>(itarget & kJumpAddrMask) >>kImmFieldShift;
+      static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
   bool patched_jump = false;
 
 #ifndef ALLOW_JAL_IN_BOUNDARY_REGION
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 2d8c694..7368ead 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -4799,19 +4799,15 @@
 
   VisitForAccumulatorValue(sub_expr);
   PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  EqualityKind kind = expr->op() == Token::EQ_STRICT
-      ? kStrictEquality : kNonStrictEquality;
   __ mov(a0, result_register());
-  if (kind == kStrictEquality) {
+  if (expr->op() == Token::EQ_STRICT) {
     Heap::RootListIndex nil_value = nil == kNullValue ?
         Heap::kNullValueRootIndex :
         Heap::kUndefinedValueRootIndex;
     __ LoadRoot(a1, nil_value);
     Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
   } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
-                                                         kNonStrictEquality,
-                                                         nil);
+    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
     CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
     Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
   }
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 76b9663..1c8973f 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -87,7 +87,20 @@
     RegisterDependentCodeForEmbeddedMaps(code);
   }
   PopulateDeoptimizationData(code);
-  info()->CommitDependentMaps(code);
+  for (int i = 0 ; i < prototype_maps_.length(); i++) {
+    prototype_maps_.at(i)->AddDependentCode(
+        DependentCode::kPrototypeCheckGroup, code);
+  }
+  for (int i = 0 ; i < transition_maps_.length(); i++) {
+    transition_maps_.at(i)->AddDependentCode(
+        DependentCode::kTransitionGroup, code);
+  }
+  if (graph()->depends_on_empty_array_proto_elements()) {
+    isolate()->initial_object_prototype()->map()->AddDependentCode(
+        DependentCode::kElementsCantBeAddedGroup, code);
+    isolate()->initial_array_prototype()->map()->AddDependentCode(
+        DependentCode::kElementsCantBeAddedGroup, code);
+  }
 }
 
 
@@ -3976,6 +3989,9 @@
   }
 
   if (!transition.is_null()) {
+    if (transition->CanBeDeprecated()) {
+      transition_maps_.Add(transition, info()->zone());
+    }
     __ li(scratch, Operand(transition));
     __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
@@ -5126,7 +5142,11 @@
 
   ASSERT(prototypes->length() == maps->length());
 
-  if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
+  if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+    for (int i = 0; i < maps->length(); i++) {
+      prototype_maps_.Add(maps->at(i), info()->zone());
+    }
+  } else {
     for (int i = 0; i < prototypes->length(); i++) {
       __ LoadHeapObject(prototype_reg, prototypes->at(i));
       __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index ee01383..a208c40 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -55,6 +55,8 @@
         deoptimizations_(4, info->zone()),
         deopt_jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
+        prototype_maps_(0, info->zone()),
+        transition_maps_(0, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
@@ -410,6 +412,8 @@
   ZoneList<LEnvironment*> deoptimizations_;
   ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
+  ZoneList<Handle<Map> > prototype_maps_;
+  ZoneList<Handle<Map> > transition_maps_;
   int inlined_function_count_;
   Scope* const scope_;
   Status status_;
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index baf042c..d8a39ab 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -2091,7 +2091,7 @@
               set_fpu_register_double(fd_reg, fs / ft);
               break;
             case ABS_D:
-              set_fpu_register_double(fd_reg, fs < 0 ? -fs : fs);
+              set_fpu_register_double(fd_reg, fabs(fs));
               break;
             case MOV_D:
               set_fpu_register_double(fd_reg, fs);
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 581935f..e60f0f3 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -3659,6 +3659,17 @@
 }
 
 
+void Map::AddDependentCode(DependentCode::DependencyGroup group,
+                           Handle<Code> code) {
+  Handle<DependentCode> codes =
+      DependentCode::Insert(Handle<DependentCode>(dependent_code()),
+                             group, code);
+  if (*codes != dependent_code()) {
+    set_dependent_code(*codes);
+  }
+}
+
+
 int DependentCode::number_of_entries(DependencyGroup group) {
   if (length() == 0) return 0;
   return Smi::cast(get(group))->value();
@@ -3670,52 +3681,32 @@
 }
 
 
-bool DependentCode::is_code_at(int i) {
-  return get(kCodesStartIndex + i)->IsCode();
-}
-
 Code* DependentCode::code_at(int i) {
   return Code::cast(get(kCodesStartIndex + i));
 }
 
 
-CompilationInfo* DependentCode::compilation_info_at(int i) {
-  return reinterpret_cast<CompilationInfo*>(
-      Foreign::cast(get(kCodesStartIndex + i))->foreign_address());
+void DependentCode::set_code_at(int i, Code* value) {
+  set(kCodesStartIndex + i, value);
 }
 
 
-void DependentCode::set_object_at(int i, Object* object) {
-  set(kCodesStartIndex + i, object);
-}
-
-
-Object* DependentCode::object_at(int i) {
-  return get(kCodesStartIndex + i);
-}
-
-
-Object** DependentCode::slot_at(int i) {
+Object** DependentCode::code_slot_at(int i) {
   return HeapObject::RawField(
       this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
 }
 
 
-void DependentCode::clear_at(int i) {
+void DependentCode::clear_code_at(int i) {
   set_undefined(kCodesStartIndex + i);
 }
 
 
-void DependentCode::copy(int from, int to) {
-  set(kCodesStartIndex + to, get(kCodesStartIndex + from));
-}
-
-
 void DependentCode::ExtendGroup(DependencyGroup group) {
   GroupStartIndexes starts(this);
   for (int g = kGroupCount - 1; g > group; g--) {
     if (starts.at(g) < starts.at(g + 1)) {
-      copy(starts.at(g), starts.at(g + 1));
+      set_code_at(starts.at(g + 1), code_at(starts.at(g)));
     }
   }
 }
diff --git a/src/objects.cc b/src/objects.cc
index fc42fc7..6512c60 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -11091,24 +11091,6 @@
 }
 
 
-void Map::AddDependentCompilationInfo(DependentCode::DependencyGroup group,
-                                      CompilationInfo* info) {
-  Handle<DependentCode> dep(dependent_code());
-  Handle<DependentCode> codes =
-      DependentCode::Insert(dep, group, info->object_wrapper());
-  if (*codes != dependent_code()) set_dependent_code(*codes);
-  info->dependent_maps(group)->Add(Handle<Map>(this), info->zone());
-}
-
-
-void Map::AddDependentCode(DependentCode::DependencyGroup group,
-                           Handle<Code> code) {
-  Handle<DependentCode> codes = DependentCode::Insert(
-      Handle<DependentCode>(dependent_code()), group, code);
-  if (*codes != dependent_code()) set_dependent_code(*codes);
-}
-
-
 DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) {
   Recompute(entries);
 }
@@ -11125,13 +11107,13 @@
 
 Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
                                             DependencyGroup group,
-                                            Handle<Object> object) {
+                                            Handle<Code> value) {
   GroupStartIndexes starts(*entries);
   int start = starts.at(group);
   int end = starts.at(group + 1);
   int number_of_entries = starts.number_of_entries();
-  if (start < end && entries->object_at(end - 1) == *object) {
-    // Do not append the compilation info if it is already in the array.
+  if (start < end && entries->code_at(end - 1) == *value) {
+    // Do not append the code if it is already in the array.
     // It is sufficient to just check only the last element because
     // we process embedded maps of an optimized code in one batch.
     return entries;
@@ -11148,7 +11130,7 @@
     end = starts.at(group + 1);
     number_of_entries = starts.number_of_entries();
     for (int i = 0; i < number_of_entries; i++) {
-      entries->clear_at(i);
+      entries->clear_code_at(i);
     }
     // If the old fixed array was empty, we need to reset counters of the
     // new array.
@@ -11160,78 +11142,17 @@
     entries = new_entries;
   }
   entries->ExtendGroup(group);
-  entries->set_object_at(end, *object);
+  entries->set_code_at(end, *value);
   entries->set_number_of_entries(group, end + 1 - start);
   return entries;
 }
 
 
-void DependentCode::UpdateToFinishedCode(DependencyGroup group,
-                                         CompilationInfo* info,
-                                         Code* code) {
-  DisallowHeapAllocation no_gc;
-  AllowDeferredHandleDereference get_object_wrapper;
-  Foreign* info_wrapper = *info->object_wrapper();
-  GroupStartIndexes starts(this);
-  int start = starts.at(group);
-  int end = starts.at(group + 1);
-  for (int i = start; i < end; i++) {
-    if (object_at(i) == info_wrapper) {
-      set_object_at(i, code);
-      break;
-    }
-  }
-
-#ifdef DEBUG
-  for (int i = start; i < end; i++) {
-    ASSERT(is_code_at(i) || compilation_info_at(i) != info);
-  }
-#endif
-}
-
-
-void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
-                                          CompilationInfo* info) {
-  DisallowHeapAllocation no_allocation;
-  AllowDeferredHandleDereference get_object_wrapper;
-  Foreign* info_wrapper = *info->object_wrapper();
-  GroupStartIndexes starts(this);
-  int start = starts.at(group);
-  int end = starts.at(group + 1);
-  // Find compilation info wrapper.
-  int info_pos = -1;
-  for (int i = start; i < end; i++) {
-    if (object_at(i) == info_wrapper) {
-      info_pos = i;
-      break;
-    }
-  }
-  if (info_pos == -1) return;  // Not found.
-  int gap = info_pos;
-  // Use the last of each group to fill the gap in the previous group.
-  for (int i = group; i < kGroupCount; i++) {
-    int last_of_group = starts.at(group + 1) - 1;
-    ASSERT(last_of_group >= gap);
-    if (last_of_group == gap) continue;
-    copy(last_of_group, gap);
-    gap = last_of_group;
-  }
-  clear_at(gap);  // Clear last gap.
-  set_number_of_entries(group, end - start - 1);
-
-#ifdef DEBUG
-  for (int i = start; i < end - 1; i++) {
-    ASSERT(is_code_at(i) || compilation_info_at(i) != info);
-  }
-#endif
-}
-
-
 bool DependentCode::Contains(DependencyGroup group, Code* code) {
   GroupStartIndexes starts(this);
-  int number_of_entries = starts.number_of_code_entries();
+  int number_of_entries = starts.at(kGroupCount);
   for (int i = 0; i < number_of_entries; i++) {
-    if (object_at(i) == code) return true;
+    if (code_at(i) == code) return true;
   }
   return false;
 }
@@ -11252,25 +11173,20 @@
   DependentCode::GroupStartIndexes starts(this);
   int start = starts.at(group);
   int end = starts.at(group + 1);
-  int code_entries = starts.number_of_code_entries();
+  int number_of_entries = starts.at(DependentCode::kGroupCount);
   if (start == end) return;
   for (int i = start; i < end; i++) {
-    if (is_code_at(i)) {
-      Code* code = code_at(i);
-      code->set_marked_for_deoptimization(true);
-    } else {
-      CompilationInfo* info = compilation_info_at(i);
-      info->AbortDueToDependentMap();
-    }
+    Code* code = code_at(i);
+    code->set_marked_for_deoptimization(true);
   }
   // Compact the array by moving all subsequent groups to fill in the new holes.
-  for (int src = end, dst = start; src < code_entries; src++, dst++) {
-    copy(src, dst);
+  for (int src = end, dst = start; src < number_of_entries; src++, dst++) {
+    set_code_at(dst, code_at(src));
   }
   // Now the holes are at the end of the array, zap them for heap-verifier.
   int removed = end - start;
-  for (int i = code_entries - removed; i < code_entries; i++) {
-    clear_at(i);
+  for (int i = number_of_entries - removed; i < number_of_entries; i++) {
+    clear_code_at(i);
   }
   set_number_of_entries(group, 0);
   DeoptimizeDependentCodeFilter filter;
diff --git a/src/objects.h b/src/objects.h
index 88feac5..1ee31b6 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1433,7 +1433,7 @@
   static inline HeapObject* cast(Object* obj);
 
   // Return the write barrier mode for this. Callers of this function
-  // must be able to present a reference to an AssertNoAllocation
+  // must be able to present a reference to an DisallowHeapAllocation
   // object as a sign that they are not going to use this function
   // from code that allocates and thus invalidates the returned write
   // barrier mode.
@@ -4980,8 +4980,6 @@
 };
 
 
-class CompilationInfo;
-
 // This class describes the layout of dependent codes array of a map. The
 // array is partitioned into several groups of dependent codes. Each group
 // contains codes with the same dependency on the map. The array has the
@@ -5029,23 +5027,14 @@
     void Recompute(DependentCode* entries);
     int at(int i) { return start_indexes_[i]; }
     int number_of_entries() { return start_indexes_[kGroupCount]; }
-    int number_of_code_entries() {
-      return start_indexes_[kGroupCount];
-    }
    private:
     int start_indexes_[kGroupCount + 1];
   };
 
   bool Contains(DependencyGroup group, Code* code);
   static Handle<DependentCode> Insert(Handle<DependentCode> entries,
-                                      DependencyGroup group,
-                                      Handle<Object> object);
-  void UpdateToFinishedCode(DependencyGroup group,
-                            CompilationInfo* info,
-                            Code* code);
-  void RemoveCompilationInfo(DependentCode::DependencyGroup group,
-                             CompilationInfo* info);
-
+                                       DependencyGroup group,
+                                       Handle<Code> value);
   void DeoptimizeDependentCodeGroup(Isolate* isolate,
                                     DependentCode::DependencyGroup group);
 
@@ -5053,14 +5042,10 @@
   // and the mark compact collector.
   inline int number_of_entries(DependencyGroup group);
   inline void set_number_of_entries(DependencyGroup group, int value);
-  inline bool is_code_at(int i);
   inline Code* code_at(int i);
-  inline CompilationInfo* compilation_info_at(int i);
-  inline void set_object_at(int i, Object* object);
-  inline Object** slot_at(int i);
-  inline Object* object_at(int i);
-  inline void clear_at(int i);
-  inline void copy(int from, int to);
+  inline void set_code_at(int i, Code* value);
+  inline Object** code_slot_at(int i);
+  inline void clear_code_at(int i);
   static inline DependentCode* cast(Object* object);
 
  private:
@@ -5569,11 +5554,8 @@
 
   inline bool CanOmitPrototypeChecks();
 
-  void AddDependentCompilationInfo(DependentCode::DependencyGroup group,
-                                        CompilationInfo* info);
-
-  void AddDependentCode(DependentCode::DependencyGroup group,
-                        Handle<Code> code);
+  inline void AddDependentCode(DependentCode::DependencyGroup group,
+                               Handle<Code> code);
 
   bool IsMapInArrayPrototypeChain();
 
diff --git a/src/preparser.cc b/src/preparser.cc
index 243a3ed..828177a 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -662,6 +662,7 @@
 bool PreParser::CheckInOrOf() {
   if (peek() == i::Token::IN ||
       (allow_for_of() &&
+       peek() == i::Token::IDENTIFIER &&
        scanner_->is_next_contextual_keyword(v8::internal::CStrVector("of")))) {
     Next();
     return true;
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 3ebf5a8..fa79276 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -113,8 +113,8 @@
   ASSERT(previous_index <= subject->length());
 
   // No allocations before calling the regexp, but we can't use
-  // AssertNoAllocation, since regexps might be preempted, and another thread
-  // might do allocation anyway.
+  // DisallowHeapAllocation, since regexps might be preempted, and another
+  // thread might do allocation anyway.
 
   String* subject_ptr = *subject;
   // Character offsets into string.
diff --git a/src/runtime.cc b/src/runtime.cc
index 074d8dc..eccf6ea 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -663,7 +663,8 @@
         isolate, array_buffer->byte_length());
     isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
         -static_cast<intptr_t>(allocated_length));
-    free(data);
+    CHECK(V8::ArrayBufferAllocator() != NULL);
+    V8::ArrayBufferAllocator()->Free(data);
   }
   object->Dispose(external_isolate);
 }
@@ -699,8 +700,9 @@
     Handle<JSArrayBuffer> array_buffer,
     size_t allocated_length) {
   void* data;
+  CHECK(V8::ArrayBufferAllocator() != NULL);
   if (allocated_length != 0) {
-    data = malloc(allocated_length);
+    data = V8::ArrayBufferAllocator()->Allocate(allocated_length);
     if (data == NULL) return false;
     memset(data, 0, allocated_length);
   } else {
@@ -8095,15 +8097,13 @@
 }
 
 
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CompleteOptimization) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WaitUntilOptimized) {
   HandleScope scope(isolate);
   ASSERT(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  if (FLAG_parallel_recompilation && V8::UseCrankshaft()) {
-    // While function is in optimization pipeline, it is marked with builtins.
-    while (function->code()->kind() == Code::BUILTIN) {
-      isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
-      OS::Sleep(50);
+  if (FLAG_parallel_recompilation) {
+    if (V8::UseCrankshaft() && function->IsOptimizable()) {
+      while (!function->IsOptimized()) OS::Sleep(50);
     }
   }
   return isolate->heap()->undefined_value();
diff --git a/src/runtime.h b/src/runtime.h
index c52bbe7..ef54016 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -96,7 +96,7 @@
   F(ClearFunctionTypeFeedback, 1, 1) \
   F(RunningInSimulator, 0, 1) \
   F(OptimizeFunctionOnNextCall, -1, 1) \
-  F(CompleteOptimization, 1, 1) \
+  F(WaitUntilOptimized, 1, 1) \
   F(GetOptimizationStatus, 1, 1) \
   F(GetOptimizationCount, 1, 1) \
   F(CompileForOnStackReplacement, 1, 1) \
diff --git a/src/scanner.h b/src/scanner.h
index 368ec1b..eb6764e 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -331,7 +331,7 @@
     return current_.literal_chars->is_ascii();
   }
   bool is_literal_contextual_keyword(Vector<const char> keyword) {
-    ASSERT_NOT_NULL(next_.literal_chars);
+    ASSERT_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->is_contextual_keyword(keyword);
   }
   int literal_length() const {
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 69513da..0f81669 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -909,8 +909,6 @@
 
 Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
                                           CompareNilICStub& stub) {
-  stub.SetKind(kNonStrictEquality);
-
   Handle<String> name(isolate_->heap()->empty_string());
   if (!receiver_map->is_shared()) {
     Handle<Code> cached_ic = FindIC(name, receiver_map, Code::COMPARE_NIL_IC,
diff --git a/src/v8.cc b/src/v8.cc
index e21c815..80b12de 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -56,6 +56,7 @@
 bool V8::has_fatal_error_ = false;
 bool V8::use_crankshaft_ = true;
 List<CallCompletedCallback>* V8::call_completed_callbacks_ = NULL;
+v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
 
 static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
 
diff --git a/src/v8.h b/src/v8.h
index cd25dc7..b8a5ae4 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -121,6 +121,15 @@
   static void RemoveCallCompletedCallback(CallCompletedCallback callback);
   static void FireCallCompletedCallback(Isolate* isolate);
 
+  static v8::ArrayBuffer::Allocator* ArrayBufferAllocator() {
+    return array_buffer_allocator_;
+  }
+
+  static void SetArrayBufferAllocator(v8::ArrayBuffer::Allocator *allocator) {
+    CHECK_EQ(NULL, array_buffer_allocator_);
+    array_buffer_allocator_ = allocator;
+  }
+
  private:
   static void InitializeOncePerProcessImpl();
   static void InitializeOncePerProcess();
@@ -139,6 +148,8 @@
   static bool use_crankshaft_;
   // List of callbacks when a Call completes.
   static List<CallCompletedCallback>* call_completed_callbacks_;
+  // Allocator for external array buffers.
+  static v8::ArrayBuffer::Allocator* array_buffer_allocator_;
 };
 
 
@@ -146,10 +157,6 @@
 enum NilValue { kNullValue, kUndefinedValue };
 
 
-// JavaScript defines two kinds of equality.
-enum EqualityKind { kStrictEquality, kNonStrictEquality };
-
-
 } }  // namespace v8::internal
 
 namespace i = v8::internal;
diff --git a/src/version.cc b/src/version.cc
index 7f424e4..bad15cf 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     19
-#define BUILD_NUMBER      12
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      13
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 53d7027..e9fe2a8 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -4754,18 +4754,14 @@
 
   VisitForAccumulatorValue(sub_expr);
   PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  EqualityKind kind = expr->op() == Token::EQ_STRICT
-      ? kStrictEquality : kNonStrictEquality;
-  if (kind == kStrictEquality) {
+  if (expr->op() == Token::EQ_STRICT) {
     Heap::RootListIndex nil_value = nil == kNullValue ?
         Heap::kNullValueRootIndex :
         Heap::kUndefinedValueRootIndex;
     __ CompareRoot(rax, nil_value);
     Split(equal, if_true, if_false, fall_through);
   } else {
-    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(),
-                                                         kNonStrictEquality,
-                                                         nil);
+    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
     CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
     __ testq(rax, rax);
     Split(not_zero, if_true, if_false, fall_through);
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 2ca585d..f423133 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -92,7 +92,20 @@
     RegisterDependentCodeForEmbeddedMaps(code);
   }
   PopulateDeoptimizationData(code);
-  info()->CommitDependentMaps(code);
+  for (int i = 0 ; i < prototype_maps_.length(); i++) {
+    prototype_maps_.at(i)->AddDependentCode(
+        DependentCode::kPrototypeCheckGroup, code);
+  }
+  for (int i = 0 ; i < transition_maps_.length(); i++) {
+    transition_maps_.at(i)->AddDependentCode(
+        DependentCode::kTransitionGroup, code);
+  }
+  if (graph()->depends_on_empty_array_proto_elements()) {
+    isolate()->initial_object_prototype()->map()->AddDependentCode(
+        DependentCode::kElementsCantBeAddedGroup, code);
+    isolate()->initial_array_prototype()->map()->AddDependentCode(
+        DependentCode::kElementsCantBeAddedGroup, code);
+  }
 }
 
 
@@ -1257,10 +1270,26 @@
     }
 
     if (test_value != 0) {
-      // Deoptimize if remainder is not 0.
-      __ testl(dividend, Immediate(test_value));
-      DeoptimizeIf(not_zero, instr->environment());
-      __ sarl(dividend, Immediate(power));
+      if (instr->hydrogen()->CheckFlag(
+          HInstruction::kAllUsesTruncatingToInt32)) {
+        Label done, negative;
+        __ cmpl(dividend, Immediate(0));
+        __ j(less, &negative, Label::kNear);
+        __ sarl(dividend, Immediate(power));
+        __ jmp(&done, Label::kNear);
+
+        __ bind(&negative);
+        __ negl(dividend);
+        __ sarl(dividend, Immediate(power));
+        if (divisor > 0) __ negl(dividend);
+        __ bind(&done);
+        return;  // Don't fall through to "__ neg" below.
+      } else {
+        // Deoptimize if remainder is not 0.
+        __ testl(dividend, Immediate(test_value));
+        DeoptimizeIf(not_zero, instr->environment());
+        __ sarl(dividend, Immediate(power));
+      }
     }
 
     if (divisor < 0) __ negl(dividend);
@@ -1307,11 +1336,7 @@
   __ cdq();
   __ idivl(right_reg);
 
-  if (!instr->is_flooring()) {
-    // Deoptimize if remainder is not 0.
-    __ testl(rdx, rdx);
-    DeoptimizeIf(not_zero, instr->environment());
-  } else {
+  if (instr->is_flooring()) {
     Label done;
     __ testl(rdx, rdx);
     __ j(zero, &done, Label::kNear);
@@ -1319,6 +1344,11 @@
     __ sarl(rdx, Immediate(31));
     __ addl(rax, rdx);
     __ bind(&done);
+  } else if (!instr->hydrogen()->CheckFlag(
+      HInstruction::kAllUsesTruncatingToInt32)) {
+    // Deoptimize if remainder is not 0.
+    __ testl(rdx, rdx);
+    DeoptimizeIf(not_zero, instr->environment());
   }
 }
 
@@ -3995,6 +4025,9 @@
   }
 
   if (!transition.is_null()) {
+    if (transition->CanBeDeprecated()) {
+      transition_maps_.Add(transition, info()->zone());
+    }
     if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
       __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
     } else {
@@ -5067,7 +5100,11 @@
 
   ASSERT(prototypes->length() == maps->length());
 
-  if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
+  if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+    for (int i = 0; i < maps->length(); i++) {
+      prototype_maps_.Add(maps->at(i), info()->zone());
+    }
+  } else {
     for (int i = 0; i < prototypes->length(); i++) {
       __ LoadHeapObject(reg, prototypes->at(i));
       DoCheckMapCommon(reg, maps->at(i), instr);
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index c3f99c4..07a948c 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -57,6 +57,8 @@
         deoptimizations_(4, info->zone()),
         jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
+        prototype_maps_(0, info->zone()),
+        transition_maps_(0, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
         status_(UNUSED),
@@ -360,6 +362,8 @@
   ZoneList<LEnvironment*> deoptimizations_;
   ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
+  ZoneList<Handle<Map> > prototype_maps_;
+  ZoneList<Handle<Map> > transition_maps_;
   int inlined_function_count_;
   Scope* const scope_;
   Status status_;
diff --git a/test/cctest/cctest.cc b/test/cctest/cctest.cc
index b241f32..94dcce1 100644
--- a/test/cctest/cctest.cc
+++ b/test/cctest/cctest.cc
@@ -98,10 +98,21 @@
 v8::Isolate* CcTest::default_isolate_;
 
 
+class CcTestArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+  virtual void* Allocate(size_t length) { return malloc(length); }
+  virtual void Free(void* data) { free(data); }
+};
+
+
 int main(int argc, char* argv[]) {
   v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
   v8::internal::FLAG_harmony_array_buffer = true;
   v8::internal::FLAG_harmony_typed_arrays = true;
+
+  CcTestArrayBufferAllocator array_buffer_allocator;
+  v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
+
   CcTest::set_default_isolate(v8::Isolate::GetCurrent());
   CHECK(CcTest::default_isolate() != NULL);
   int tests_run = 0;
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 8ce11eb..59b3dc3 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -78,6 +78,9 @@
 # BUG(2628): The test sometimes fails on MIPS simulator.
 test-cpu-profiler/SampleWhenFrameIsNotSetup: PASS || FAIL
 
+# BUG(2657): Test sometimes times out on MIPS simulator.
+test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate: PASS || TIMEOUT
+
 ##############################################################################
 [ $arch == android_arm || $arch == android_ia32 ]
 
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index a69fe90..5d3a79d 100755
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -1037,7 +1037,12 @@
 static const double kFastReturnValueDouble = 2.7;
 // variable return values
 static bool fast_return_value_bool = false;
-static bool fast_return_value_void_is_null = false;
+enum ReturnValueOddball {
+  kNullReturnValue,
+  kUndefinedReturnValue,
+  kEmptyStringReturnValue
+};
+static ReturnValueOddball fast_return_value_void;
 static bool fast_return_value_object_is_empty = false;
 
 template<>
@@ -1072,10 +1077,16 @@
 void FastReturnValueCallback<void>(
     const v8::FunctionCallbackInfo<v8::Value>& info) {
   CheckReturnValue(info);
-  if (fast_return_value_void_is_null) {
-    info.GetReturnValue().SetNull();
-  } else {
-    info.GetReturnValue().SetUndefined();
+  switch (fast_return_value_void) {
+    case kNullReturnValue:
+      info.GetReturnValue().SetNull();
+      break;
+    case kUndefinedReturnValue:
+      info.GetReturnValue().SetUndefined();
+      break;
+    case kEmptyStringReturnValue:
+      info.GetReturnValue().SetEmptyString();
+      break;
   }
 }
 
@@ -1135,13 +1146,25 @@
     CHECK_EQ(fast_return_value_bool, value->ToBoolean()->Value());
   }
   // check oddballs
-  for (int i = 0; i < 2; i++) {
-    fast_return_value_void_is_null = i == 0;
+  ReturnValueOddball oddballs[] = {
+      kNullReturnValue,
+      kUndefinedReturnValue,
+      kEmptyStringReturnValue
+  };
+  for (size_t i = 0; i < ARRAY_SIZE(oddballs); i++) {
+    fast_return_value_void = oddballs[i];
     value = TestFastReturnValues<void>();
-    if (fast_return_value_void_is_null) {
-      CHECK(value->IsNull());
-    } else {
-      CHECK(value->IsUndefined());
+    switch (fast_return_value_void) {
+      case kNullReturnValue:
+        CHECK(value->IsNull());
+        break;
+      case kUndefinedReturnValue:
+        CHECK(value->IsUndefined());
+        break;
+      case kEmptyStringReturnValue:
+        CHECK(value->IsString());
+        CHECK_EQ(0, v8::String::Cast(*value)->Length());
+        break;
     }
   }
   // check handles
@@ -2547,6 +2570,19 @@
 }
 
 
+class ScopedArrayBufferContents {
+ public:
+  explicit ScopedArrayBufferContents(
+      const v8::ArrayBuffer::Contents& contents)
+    : contents_(contents) {}
+  ~ScopedArrayBufferContents() { free(contents_.Data()); }
+  void* Data() const { return contents_.Data(); }
+  size_t ByteLength() const { return contents_.ByteLength(); }
+ private:
+  const v8::ArrayBuffer::Contents contents_;
+};
+
+
 THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
   i::FLAG_harmony_array_buffer = true;
   i::FLAG_harmony_typed_arrays = true;
@@ -2560,8 +2596,7 @@
   CHECK(!ab->IsExternal());
   HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
 
-  v8::ArrayBufferContents ab_contents;
-  ab->Externalize(&ab_contents);
+  ScopedArrayBufferContents ab_contents(ab->Externalize());
   CHECK(ab->IsExternal());
 
   CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
@@ -2603,8 +2638,7 @@
   Local<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::Cast(*result);
   CHECK_EQ(2, static_cast<int>(ab1->ByteLength()));
   CHECK(!ab1->IsExternal());
-  v8::ArrayBufferContents ab1_contents;
-  ab1->Externalize(&ab1_contents);
+  ScopedArrayBufferContents ab1_contents(ab1->Externalize());
   CHECK(ab1->IsExternal());
 
   result = CompileRun("ab1.byteLength");
@@ -2711,8 +2745,7 @@
   v8::Handle<v8::Float64Array> f64a =
     CreateAndCheck<v8::Float64Array, 8>(buffer, 8, 127);
 
-  v8::ArrayBufferContents contents;
-  buffer->Externalize(&contents);
+  ScopedArrayBufferContents contents(buffer->Externalize());
   buffer->Neuter();
   CHECK_EQ(0, static_cast<int>(buffer->ByteLength()));
   CheckIsNeutered(u8a);
@@ -2763,8 +2796,7 @@
   v8::Handle<v8::Float64Array> f64a(
     v8::Float64Array::Cast(*CompileRun("f64a")));
 
-  v8::ArrayBufferContents contents;
-  ab->Externalize(&contents);
+  ScopedArrayBufferContents contents(ab->Externalize());
   ab->Neuter();
   CHECK_EQ(0, static_cast<int>(ab->ByteLength()));
   CheckIsNeutered(u8a);
diff --git a/test/cctest/test-compare-nil-ic-stub.cc b/test/cctest/test-compare-nil-ic-stub.cc
index 6177fde..affb8bd 100644
--- a/test/cctest/test-compare-nil-ic-stub.cc
+++ b/test/cctest/test-compare-nil-ic-stub.cc
@@ -46,9 +46,8 @@
 TEST(ExternalICStateParsing) {
   Types types;
   types.Add(CompareNilICStub::UNDEFINED);
-  CompareNilICStub stub(kNonStrictEquality, kUndefinedValue, types);
+  CompareNilICStub stub(kUndefinedValue, types);
   CompareNilICStub stub2(stub.GetExtraICState());
-  CHECK_EQ(stub.GetKind(), stub2.GetKind());
   CHECK_EQ(stub.GetNilValue(), stub2.GetNilValue());
   CHECK_EQ(stub.GetTypes().ToIntegral(), stub2.GetTypes().ToIntegral());
 }
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index 188fbd9..a615fe9 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -648,56 +648,57 @@
 "}\n";
 
 
-class FooAccessorsData {
+class TestApiCallbacks {
  public:
-  explicit FooAccessorsData(int min_duration_ms)
+  explicit TestApiCallbacks(int min_duration_ms)
       : min_duration_ms_(min_duration_ms),
-        getter_duration_(0),
-        setter_duration_(0),
-        getter_iterations_(0),
-        setter_iterations_(0) {}
+        is_warming_up_(false) {}
 
   static v8::Handle<v8::Value> Getter(v8::Local<v8::String> name,
                                       const v8::AccessorInfo& info) {
-    FooAccessorsData* data = fromInfo(info);
-    data->getter_duration_ = data->Wait(&data->getter_iterations_);
+    TestApiCallbacks* data = fromInfo(info);
+    data->Wait();
     return v8::Int32::New(2013);
   }
 
   static void Setter(v8::Local<v8::String> name,
                      v8::Local<v8::Value> value,
                      const v8::AccessorInfo& info) {
-    FooAccessorsData* data = fromInfo(info);
-    data->setter_duration_ = data->Wait(&data->setter_iterations_);
+    TestApiCallbacks* data = fromInfo(info);
+    data->Wait();
   }
 
-  void PrintAccessorTime() {
-    i::OS::Print("getter: %f ms (%d); setter: %f ms (%d)\n", getter_duration_,
-        getter_iterations_, setter_duration_, setter_iterations_);
+  static void Callback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+    TestApiCallbacks* data = fromInfo(info);
+    data->Wait();
   }
 
+  void set_warming_up(bool value) { is_warming_up_ = value; }
+
  private:
-  double Wait(int* iterations) {
+  void Wait() {
+    if (is_warming_up_) return;
     double start = i::OS::TimeCurrentMillis();
     double duration = 0;
     while (duration < min_duration_ms_) {
       i::OS::Sleep(1);
       duration = i::OS::TimeCurrentMillis() - start;
-      ++*iterations;
     }
-    return duration;
   }
 
-  static FooAccessorsData* fromInfo(const v8::AccessorInfo& info) {
+  static TestApiCallbacks* fromInfo(const v8::AccessorInfo& info) {
     void* data = v8::External::Cast(*info.Data())->Value();
-    return reinterpret_cast<FooAccessorsData*>(data);
+    return reinterpret_cast<TestApiCallbacks*>(data);
+  }
+
+  static TestApiCallbacks* fromInfo(
+      const v8::FunctionCallbackInfo<v8::Value>& info) {
+    void* data = v8::External::Cast(*info.Data())->Value();
+    return reinterpret_cast<TestApiCallbacks*>(data);
   }
 
   int min_duration_ms_;
-  double getter_duration_;
-  double setter_duration_;
-  int getter_iterations_;
-  int setter_iterations_;
+  bool is_warming_up_;
 };
 
 
@@ -705,7 +706,7 @@
 // This test checks the case when the long-running accessors are called
 // only once and the optimizer doesn't have chance to change the invocation
 // code.
-TEST(NativeAccessorNameInProfile1) {
+TEST(NativeAccessorUninitializedIC) {
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -714,11 +715,11 @@
   v8::Local<v8::ObjectTemplate> instance_template =
       func_template->InstanceTemplate();
 
-  FooAccessorsData accessors(100);
+  TestApiCallbacks accessors(100);
   v8::Local<v8::External> data = v8::External::New(&accessors);
   instance_template->SetAccessor(
-      v8::String::New("foo"), &FooAccessorsData::Getter,
-      &FooAccessorsData::Setter, data);
+      v8::String::New("foo"), &TestApiCallbacks::Getter,
+      &TestApiCallbacks::Setter, data);
   v8::Local<v8::Function> func = func_template->GetFunction();
   v8::Local<v8::Object> instance = func->NewInstance();
   env->Global()->Set(v8::String::New("instance"), instance);
@@ -740,7 +741,6 @@
   // Dump collected profile to have a better diagnostic in case of failure.
   reinterpret_cast<i::CpuProfile*>(
       const_cast<v8::CpuProfile*>(profile))->Print();
-  accessors.PrintAccessorTime();
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   const v8::CpuProfileNode* startNode = GetChild(root, "start");
@@ -754,7 +754,7 @@
 // Test that native accessors are properly reported in the CPU profile.
 // This test makes sure that the accessors are called enough times to become
 // hot and to trigger optimizations.
-TEST(NativeAccessorNameInProfile2) {
+TEST(NativeAccessorMonomorphicIC) {
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -763,11 +763,11 @@
   v8::Local<v8::ObjectTemplate> instance_template =
       func_template->InstanceTemplate();
 
-  FooAccessorsData accessors(1);
+  TestApiCallbacks accessors(1);
   v8::Local<v8::External> data = v8::External::New(&accessors);
   instance_template->SetAccessor(
-      v8::String::New("foo"), &FooAccessorsData::Getter,
-      &FooAccessorsData::Setter, data);
+      v8::String::New("foo"), &TestApiCallbacks::Getter,
+      &TestApiCallbacks::Setter, data);
   v8::Local<v8::Function> func = func_template->GetFunction();
   v8::Local<v8::Object> instance = func->NewInstance();
   env->Global()->Set(v8::String::New("instance"), instance);
@@ -776,6 +776,16 @@
   v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
       env->Global()->Get(v8::String::New("start")));
 
+  {
+    // Make sure accessors ICs are in monomorphic state before starting
+    // profiling.
+    accessors.set_warming_up(true);
+    int32_t warm_up_iterations = 3;
+    v8::Handle<v8::Value> args[] = { v8::Integer::New(warm_up_iterations) };
+    function->Call(env->Global(), ARRAY_SIZE(args), args);
+    accessors.set_warming_up(false);
+  }
+
   v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
   v8::Local<v8::String> profile_name = v8::String::New("my_profile");
 
@@ -799,3 +809,111 @@
 
   cpu_profiler->DeleteAllCpuProfiles();
 }
+
+
+static const char* native_method_test_source = "function start(count) {\n"
+"  for (var i = 0; i < count; i++) {\n"
+"    instance.fooMethod();\n"
+"  }\n"
+"}\n";
+
+
+TEST(NativeMethodUninitializedIC) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+
+  TestApiCallbacks callbacks(100);
+  v8::Local<v8::External> data = v8::External::New(&callbacks);
+
+  v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
+  func_template->SetClassName(v8::String::New("Test_InstanceCostructor"));
+  v8::Local<v8::ObjectTemplate> proto_template =
+      func_template->PrototypeTemplate();
+  v8::Local<v8::Signature> signature = v8::Signature::New(func_template);
+  proto_template->Set(v8::String::New("fooMethod"), v8::FunctionTemplate::New(
+      &TestApiCallbacks::Callback, data, signature, 0));
+
+  v8::Local<v8::Function> func = func_template->GetFunction();
+  v8::Local<v8::Object> instance = func->NewInstance();
+  env->Global()->Set(v8::String::New("instance"), instance);
+
+  v8::Script::Compile(v8::String::New(native_method_test_source))->Run();
+  v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("start")));
+
+  v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+  v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+
+  cpu_profiler->StartCpuProfiling(profile_name);
+  int32_t repeat_count = 1;
+  v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+  function->Call(env->Global(), ARRAY_SIZE(args), args);
+  const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
+
+  CHECK_NE(NULL, profile);
+  // Dump collected profile to have a better diagnostic in case of failure.
+  reinterpret_cast<i::CpuProfile*>(
+      const_cast<v8::CpuProfile*>(profile))->Print();
+
+  const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+  const v8::CpuProfileNode* startNode = GetChild(root, "start");
+  GetChild(startNode, "fooMethod");
+
+  cpu_profiler->DeleteAllCpuProfiles();
+}
+
+
+TEST(NativeMethodMonomorphicIC) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+
+  TestApiCallbacks callbacks(1);
+  v8::Local<v8::External> data = v8::External::New(&callbacks);
+
+  v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New();
+  func_template->SetClassName(v8::String::New("Test_InstanceCostructor"));
+  v8::Local<v8::ObjectTemplate> proto_template =
+      func_template->PrototypeTemplate();
+  v8::Local<v8::Signature> signature = v8::Signature::New(func_template);
+  proto_template->Set(v8::String::New("fooMethod"), v8::FunctionTemplate::New(
+      &TestApiCallbacks::Callback, data, signature, 0));
+
+  v8::Local<v8::Function> func = func_template->GetFunction();
+  v8::Local<v8::Object> instance = func->NewInstance();
+  env->Global()->Set(v8::String::New("instance"), instance);
+
+  v8::Script::Compile(v8::String::New(native_method_test_source))->Run();
+  v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::New("start")));
+  {
+    // Make sure method ICs are in monomorphic state before starting
+    // profiling.
+    callbacks.set_warming_up(true);
+    int32_t warm_up_iterations = 3;
+    v8::Handle<v8::Value> args[] = { v8::Integer::New(warm_up_iterations) };
+    function->Call(env->Global(), ARRAY_SIZE(args), args);
+    callbacks.set_warming_up(false);
+  }
+
+  v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+  v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+
+  cpu_profiler->StartCpuProfiling(profile_name);
+  int32_t repeat_count = 100;
+  v8::Handle<v8::Value> args[] = { v8::Integer::New(repeat_count) };
+  function->Call(env->Global(), ARRAY_SIZE(args), args);
+  const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
+
+  CHECK_NE(NULL, profile);
+  // Dump collected profile to have a better diagnostic in case of failure.
+  reinterpret_cast<i::CpuProfile*>(
+      const_cast<v8::CpuProfile*>(profile))->Print();
+
+  const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+  GetChild(root, "start");
+  // TODO(yurys): in CallIC should be changed to report external callback
+  // invocation.
+  // GetChild(startNode, "fooMethod");
+
+  cpu_profiler->DeleteAllCpuProfiles();
+}
diff --git a/test/mjsunit/compiler/parallel-proto-change.js b/test/mjsunit/compiler/parallel-proto-change.js
index aa1ac6d..74e6d86 100644
--- a/test/mjsunit/compiler/parallel-proto-change.js
+++ b/test/mjsunit/compiler/parallel-proto-change.js
@@ -25,12 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
-
-function assertUnoptimized(fun) {
-  assertTrue(%GetOptimizationStatus(fun) != 1);
-}
+// Flags: --allow-natives-syntax --parallel-recompilation
 
 function f(foo) { return foo.bar(); }
 
@@ -41,10 +36,10 @@
 assertEquals(1, f(o));
 
 %OptimizeFunctionOnNextCall(f, "parallel");
-assertEquals(1, f(o));     // Trigger optimization.
-assertUnoptimized(f);      // Optimization not yet done.
-// Change the prototype chain during optimization to trigger map invalidation.
+assertEquals(1, f(o));
+// Change the prototype chain during optimization.
 o.__proto__.__proto__ = { bar: function() { return 2; } };
-%CompleteOptimization(f);  // Conclude optimization with...
-assertUnoptimized(f);      // ... bailing out due to map dependency.
+
+%WaitUntilOptimized(f);
+
 assertEquals(2, f(o));
diff --git a/test/mjsunit/harmony/iteration-semantics.js b/test/mjsunit/harmony/iteration-semantics.js
index 6215522..96b6d14 100644
--- a/test/mjsunit/harmony/iteration-semantics.js
+++ b/test/mjsunit/harmony/iteration-semantics.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony
+// Flags: --harmony --harmony-generators
 
 // Test for-of semantics.
 
diff --git a/test/mjsunit/manual-parallel-recompile.js b/test/mjsunit/manual-parallel-recompile.js
index f090ff4..8d660e0 100644
--- a/test/mjsunit/manual-parallel-recompile.js
+++ b/test/mjsunit/manual-parallel-recompile.js
@@ -25,17 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --expose-gc
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
+// Flags: --allow-natives-syntax --expose-gc --parallel-recompilation
 
 function assertUnoptimized(fun) {
   assertTrue(%GetOptimizationStatus(fun) != 1);
 }
 
-function assertOptimized(fun) {
-  assertTrue(%GetOptimizationStatus(fun) != 2);
-}
-
 function f(x) {
   var xx = x * x;
   var xxstr = xx.toString();
@@ -58,13 +53,10 @@
 
 %OptimizeFunctionOnNextCall(f, "parallel");
 %OptimizeFunctionOnNextCall(g, "parallel");
-f(g(2));  // Trigger optimization.
+f(g(2));
 
-assertUnoptimized(f);  // Not yet optimized.
+assertUnoptimized(f);
 assertUnoptimized(g);
 
-%CompleteOptimization(f);  // Wait till optimized code is installed.
-%CompleteOptimization(g);
-
-assertOptimized(f);  // Optimized now.
-assertOptimized(g);
+%WaitUntilOptimized(f);
+%WaitUntilOptimized(g);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 585d503..8d6274b 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -47,6 +47,10 @@
 stack-traces-gc: PASS || FAIL
 
 ##############################################################################
+# TODO(wingo): Re-enable when GC bug from r15060 is solved.
+harmony/generators-iteration: SKIP
+
+##############################################################################
 # Too slow in debug mode with --stress-opt mode.
 compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
 compiler/regress-funcaller: PASS, SKIP if $mode == debug
diff --git a/test/mjsunit/parallel-invalidate-transition-map.js b/test/mjsunit/parallel-invalidate-transition-map.js
deleted file mode 100644
index 42a266f..0000000
--- a/test/mjsunit/parallel-invalidate-transition-map.js
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --track-fields --track-double-fields --allow-natives-syntax
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
-
-function assertUnoptimized(fun) {
-  assertTrue(%GetOptimizationStatus(fun) != 1);
-}
-
-function new_object() {
-  var o = {};
-  o.a = 1;
-  o.b = 2;
-  return o;
-}
-
-function add_field(obj) {
-  obj.c = 3;
-}
-
-add_field(new_object());
-add_field(new_object());
-%OptimizeFunctionOnNextCall(add_field, "parallel");
-
-var o = new_object();
-add_field(o);                      // Trigger optimization.
-assertUnoptimized(add_field);      // Not yet optimized.
-o.c = 2.2;                         // Invalidate transition map.
-%CompleteOptimization(add_field);  // Conclude optimization with...
-assertUnoptimized(add_field);      // ... bailing out due to map dependency.
-
diff --git a/test/mjsunit/parallel-optimize-disabled.js b/test/mjsunit/parallel-optimize-disabled.js
index 479684d..86b375c 100644
--- a/test/mjsunit/parallel-optimize-disabled.js
+++ b/test/mjsunit/parallel-optimize-disabled.js
@@ -43,4 +43,4 @@
 %OptimizeFunctionOnNextCall(g, "parallel");
 f(0);  // g() is disabled for optimization on inlining attempt.
 // Attempt to optimize g() should not run into any assertion.
-%CompleteOptimization(g);
+%WaitUntilOptimized(g);
diff --git a/test/mjsunit/parallel-initial-prototype-change.js b/test/mjsunit/regress/regress-2132.js
similarity index 69%
rename from test/mjsunit/parallel-initial-prototype-change.js
rename to test/mjsunit/regress/regress-2132.js
index 5544beb..d8987a5 100644
--- a/test/mjsunit/parallel-initial-prototype-change.js
+++ b/test/mjsunit/regress/regress-2132.js
@@ -26,25 +26,23 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --allow-natives-syntax
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
 
-function assertUnoptimized(fun) {
-  assertTrue(%GetOptimizationStatus(fun) != 1);
+function mul(x, y) {
+  return (x * y) | 0;
 }
 
-function f1(a, i) {
-  return a[i] + 0.5;
+mul(0, 0);
+mul(0, 0);
+%OptimizeFunctionOnNextCall(mul);
+assertEquals(0, mul(0, -1));
+assertTrue(%GetOptimizationStatus(mul) != 2);
+
+function div(x, y) {
+  return (x / y) | 0;
 }
 
-var arr = [0.0,,2.5];
-assertEquals(0.5, f1(arr, 0));
-assertEquals(0.5, f1(arr, 0));
-
-// Optimized code of f1 depends on initial object and array maps.
-%OptimizeFunctionOnNextCall(f1, "parallel");
-assertEquals(0.5, f1(arr, 0));
-assertUnoptimized(f1);      // Not yet optimized.
-Object.prototype[1] = 1.5;  // Invalidate current initial object map.
-assertEquals(2, f1(arr, 1));
-%CompleteOptimization(f1);  // Conclude optimization with...
-assertUnoptimized(f1);      // ... bailing out due to map dependency.
+div(4, 2);
+div(4, 2);
+%OptimizeFunctionOnNextCall(div);
+assertEquals(1, div(5, 3));
+assertTrue(%GetOptimizationStatus(div) != 2);
diff --git a/test/mjsunit/parallel-initial-prototype-change.js b/test/mjsunit/regress/regress-crbug-248025.js
similarity index 68%
copy from test/mjsunit/parallel-initial-prototype-change.js
copy to test/mjsunit/regress/regress-crbug-248025.js
index 5544beb..c598859 100644
--- a/test/mjsunit/parallel-initial-prototype-change.js
+++ b/test/mjsunit/regress/regress-crbug-248025.js
@@ -25,26 +25,16 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax
-// Flags: --parallel-recompilation --parallel-recompilation-delay=50
+// Flags: --harmony-iteration
 
-function assertUnoptimized(fun) {
-  assertTrue(%GetOptimizationStatus(fun) != 1);
+// Filler long enough to trigger lazy parsing.
+var filler = "//" + new Array(1024).join('x');
+
+// Test that the pre-parser does not crash when the expected contextual
+// keyword as part if a 'for' statement is not and identifier.
+try {
+  eval(filler + "\nfunction f() { for (x : y) { } }");
+  throw "not reached";
+} catch (e) {
+  if (!(e instanceof SyntaxError)) throw e;
 }
-
-function f1(a, i) {
-  return a[i] + 0.5;
-}
-
-var arr = [0.0,,2.5];
-assertEquals(0.5, f1(arr, 0));
-assertEquals(0.5, f1(arr, 0));
-
-// Optimized code of f1 depends on initial object and array maps.
-%OptimizeFunctionOnNextCall(f1, "parallel");
-assertEquals(0.5, f1(arr, 0));
-assertUnoptimized(f1);      // Not yet optimized.
-Object.prototype[1] = 1.5;  // Invalidate current initial object map.
-assertEquals(2, f1(arr, 1));
-%CompleteOptimization(f1);  // Conclude optimization with...
-assertUnoptimized(f1);      // ... bailing out due to map dependency.