Version 3.2.3

Fixed a number of crash bugs.

Fixed Array::New(length) to return an array with a length (issue 1256).

Fixed FreeBSD build.

Changed __defineGetter__ to not throw (matching the behavior of Safari).

Implemented more of EcmaScript 5 strict mode.

Improved Crankshaft performance on all platforms.


git-svn-id: http://v8.googlecode.com/svn/trunk@7219 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/api.cc b/src/api.cc
index 26d2246..8a8640d 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3960,7 +3960,9 @@
   EnsureInitialized("v8::Array::New()");
   LOG_API("Array::New");
   ENTER_V8;
-  i::Handle<i::JSArray> obj = i::Factory::NewJSArray(length);
+  int real_length = length > 0 ? length : 0;
+  i::Handle<i::JSArray> obj = i::Factory::NewJSArray(real_length);
+  obj->set_length(*i::Factory::NewNumberFromInt(real_length));
   return Utils::ToLocal(obj);
 }
 
@@ -4126,11 +4128,22 @@
                         RetainedObjectInfo* info) {
   if (IsDeadCheck("v8::V8::AddObjectGroup()")) return;
   STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
-  i::GlobalHandles::AddGroup(
+  i::GlobalHandles::AddObjectGroup(
       reinterpret_cast<i::Object***>(objects), length, info);
 }
 
 
+void V8::AddImplicitReferences(Persistent<Object> parent,
+                               Persistent<Value>* children,
+                               size_t length) {
+  if (IsDeadCheck("v8::V8::AddImplicitReferences()")) return;
+  STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
+  i::GlobalHandles::AddImplicitReferences(
+      *Utils::OpenHandle(*parent),
+      reinterpret_cast<i::Object***>(children), length);
+}
+
+
 int V8::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
   if (IsDeadCheck("v8::V8::AdjustAmountOfExternalAllocatedMemory()")) return 0;
   return i::Heap::AdjustAmountOfExternalAllocatedMemory(change_in_bytes);
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 48b91a6..e58d968 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -271,7 +271,8 @@
 
 Assembler::Assembler(void* buffer, int buffer_size)
     : positions_recorder_(this),
-      allow_peephole_optimization_(false) {
+      allow_peephole_optimization_(false),
+      emit_debug_code_(FLAG_debug_code) {
   allow_peephole_optimization_ = FLAG_peephole_optimization;
   if (buffer == NULL) {
     // Do our own buffer management.
@@ -2384,6 +2385,14 @@
 }
 
 
+void Assembler::vneg(const DwVfpRegister dst,
+                     const DwVfpRegister src,
+                     const Condition cond) {
+  emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
+       0x5*B9 | B8 | B6 | src.code());
+}
+
+
 void Assembler::vabs(const DwVfpRegister dst,
                      const DwVfpRegister src,
                      const Condition cond) {
@@ -2657,7 +2666,7 @@
         Serializer::TooLateToEnableNow();
       }
 #endif
-      if (!Serializer::enabled() && !FLAG_debug_code) {
+      if (!Serializer::enabled() && !emit_debug_code()) {
         return;
       }
     }
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 4594ecb..22eee0f 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -554,6 +554,9 @@
   Assembler(void* buffer, int buffer_size);
   ~Assembler();
 
+  // Overrides the default provided by FLAG_debug_code.
+  void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
   // GetCode emits any pending (non-emitted) code and fills the descriptor
   // desc. GetCode() is idempotent; it returns the same result if no other
   // Assembler functions are invoked in between GetCode() calls.
@@ -992,6 +995,9 @@
                     VFPConversionMode mode = kDefaultRoundToZero,
                     const Condition cond = al);
 
+  void vneg(const DwVfpRegister dst,
+            const DwVfpRegister src,
+            const Condition cond = al);
   void vabs(const DwVfpRegister dst,
             const DwVfpRegister src,
             const Condition cond = al);
@@ -1151,6 +1157,8 @@
   void CheckConstPool(bool force_emit, bool require_jump);
 
  protected:
+  bool emit_debug_code() const { return emit_debug_code_; }
+
   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
 
   // Read/patch instructions
@@ -1279,6 +1287,7 @@
 
   PositionsRecorder positions_recorder_;
   bool allow_peephole_optimization_;
+  bool emit_debug_code_;
   friend class PositionsRecorder;
   friend class EnsureSpace;
 };
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 31a7288..f80998a 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -635,64 +635,13 @@
   __ jmp(&done);
 
   __ bind(&not_in_int32_range);
-  __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
-  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+  __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
 
-  // Register scratch1 contains mantissa word, scratch2 contains
-  // sign, exponent and mantissa. Extract biased exponent into dst.
-  __ Ubfx(dst,
-          scratch2,
-          HeapNumber::kExponentShift,
-          HeapNumber::kExponentBits);
-
-  // Express exponent as delta to 31.
-  __ sub(dst, dst, Operand(HeapNumber::kExponentBias + 31));
-
-  Label normal_exponent;
-  // If the delta is larger than kMantissaBits plus one, all bits
-  // would be shifted away, which means that we can return 0.
-  __ cmp(dst, Operand(HeapNumber::kMantissaBits + 1));
-  __ b(&normal_exponent, lt);
-  __ mov(dst, Operand(0));
-  __ jmp(&done);
-
-  __ bind(&normal_exponent);
-  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
-  // Calculate shift.
-  __ add(scratch3, dst, Operand(kShiftBase));
-
-  // Put implicit 1 before the mantissa part in scratch2.
-  __ orr(scratch2,
-         scratch2,
-         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
-
-  // Save sign.
-  Register sign = dst;
-  __ and_(sign, scratch2, Operand(HeapNumber::kSignMask));
-
-  // Shift mantisssa bits the correct position in high word.
-  __ mov(scratch2, Operand(scratch2, LSL, scratch3));
-
-  // Replace the shifted bits with bits from the lower mantissa word.
-  Label pos_shift, shift_done;
-  __ rsb(scratch3, scratch3, Operand(32), SetCC);
-  __ b(&pos_shift, ge);
-
-  // Negate scratch3.
-  __ rsb(scratch3, scratch3, Operand(0));
-  __ mov(scratch1, Operand(scratch1, LSL, scratch3));
-  __ jmp(&shift_done);
-
-  __ bind(&pos_shift);
-  __ mov(scratch1, Operand(scratch1, LSR, scratch3));
-
-  __ bind(&shift_done);
-  __ orr(scratch2, scratch2, Operand(scratch1));
-
-  // Restore sign if necessary.
-  __ cmp(sign, Operand(0));
-  __ rsb(dst, scratch2, Operand(0), LeaveCC, ne);
-  __ mov(dst, scratch2, LeaveCC, eq);
+  __ EmitOutOfInt32RangeTruncate(dst,
+                                 scratch1,
+                                 scratch2,
+                                 scratch3);
   __ jmp(&done);
 
   __ bind(&is_smi);
@@ -5086,7 +5035,7 @@
   __ CompareObjectType(r7, r0, r0, CODE_TYPE);
   __ b(ne, &runtime);
 
-  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
+  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
   // r7: code
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
@@ -5096,7 +5045,7 @@
   __ mov(r1, Operand(r1, ASR, kSmiTagSize));
 
   // r1: previous index
-  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
+  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
   // r7: code
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
@@ -5628,7 +5577,7 @@
   __ b(ne, &slow_case_);
 
   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  // At this point code register contains smi tagged ascii char code.
+  // At this point code register contains smi tagged ASCII char code.
   STATIC_ASSERT(kSmiTag == 0);
   __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
@@ -5960,7 +5909,6 @@
   Register symbol_table = c2;
   __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
 
-  // Load undefined value
   Register undefined = scratch4;
   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
 
@@ -5981,6 +5929,7 @@
   // mask:  capacity mask
   // first_symbol_table_element: address of the first element of
   //                             the symbol table
+  // undefined: the undefined object
   // scratch: -
 
   // Perform a number of probes in the symbol table.
@@ -6008,20 +5957,32 @@
                       kPointerSizeLog2));
 
     // If entry is undefined no string with this hash can be found.
-    __ cmp(candidate, undefined);
+    Label is_string;
+    __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
+    __ b(ne, &is_string);
+
+    __ cmp(undefined, candidate);
     __ b(eq, not_found);
+    // Must be null (deleted entry).
+    if (FLAG_debug_code) {
+      __ LoadRoot(ip, Heap::kNullValueRootIndex);
+      __ cmp(ip, candidate);
+      __ Assert(eq, "oddball in symbol table is not undefined or null");
+    }
+    __ jmp(&next_probe[i]);
+
+    __ bind(&is_string);
+
+    // Check that the candidate is a non-external ASCII string.  The instance
+    // type is still in the scratch register from the CompareObjectType
+    // operation.
+    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
 
     // If length is not 2 the string is not a candidate.
     __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
     __ cmp(scratch, Operand(Smi::FromInt(2)));
     __ b(ne, &next_probe[i]);
 
-    // Check that the candidate is a non-external ascii string.
-    __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
-    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
-                                              &next_probe[i]);
-
     // Check if the two characters match.
     // Assumes that word load is little endian.
     __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
@@ -6177,7 +6138,7 @@
   // r3: from index (untaged smi)
   // r5: string.
   // r7 (a.k.a. from): from offset (smi)
-  // Check for flat ascii string.
+  // Check for flat ASCII string.
   Label non_ascii_flat;
   __ tst(r1, Operand(kStringEncodingMask));
   STATIC_ASSERT(kTwoByteStringTag == 0);
@@ -6353,10 +6314,10 @@
 
   __ bind(&not_same);
 
-  // Check that both objects are sequential ascii strings.
+  // Check that both objects are sequential ASCII strings.
   __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
 
-  // Compare flat ascii strings natively. Remove arguments from stack first.
+  // Compare flat ASCII strings natively. Remove arguments from stack first.
   __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
   GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
@@ -6448,12 +6409,12 @@
   // Adding two lengths can't overflow.
   STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
   __ add(r6, r2, Operand(r3));
-  // Use the runtime system when adding two one character strings, as it
-  // contains optimizations for this specific case using the symbol table.
+  // Use the symbol table when adding two one character strings, as it
+  // helps later optimizations to return a symbol here.
   __ cmp(r6, Operand(2));
   __ b(ne, &longer_than_two);
 
-  // Check that both strings are non-external ascii strings.
+  // Check that both strings are non-external ASCII strings.
   if (flags_ != NO_STRING_ADD_FLAGS) {
     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
     __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -6501,7 +6462,7 @@
   __ b(hs, &string_add_runtime);
 
   // If result is not supposed to be flat, allocate a cons string object.
-  // If both strings are ascii the result is an ascii cons string.
+  // If both strings are ASCII the result is an ASCII cons string.
   if (flags_ != NO_STRING_ADD_FLAGS) {
     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
     __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -6528,7 +6489,7 @@
 
   __ bind(&non_ascii);
   // At least one of the strings is two-byte. Check whether it happens
-  // to contain only ascii characters.
+  // to contain only ASCII characters.
   // r4: first instance type.
   // r5: second instance type.
   __ tst(r4, Operand(kAsciiDataHintMask));
@@ -6709,56 +6670,6 @@
 }
 
 
-void StringCharAtStub::Generate(MacroAssembler* masm) {
-  // Expects two arguments (object, index) on the stack:
-  //  lr: return address
-  //  sp[0]: index
-  //  sp[4]: object
-  Register object = r1;
-  Register index = r0;
-  Register scratch1 = r2;
-  Register scratch2 = r3;
-  Register result = r0;
-
-  // Get object and index from the stack.
-  __ pop(index);
-  __ pop(object);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  scratch1,
-                                  scratch2,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm);
-  __ b(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ LoadRoot(result, Heap::kEmptyStringRootIndex);
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ mov(result, Operand(Smi::FromInt(0)));
-  __ b(&done);
-
-  StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm, call_helper);
-
-  __ bind(&done);
-  __ Ret();
-}
-
-
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
   Label miss;
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 4f243ea..364f652 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1159,7 +1159,7 @@
   }
   // Check that the *signed* result fits in a smi. Not necessary for AND, SAR
   // if the shift if more than 0 or SHR if the shit is more than 1.
-  if (!( (op_ == Token::AND) ||
+  if (!( (op_ == Token::AND && value_ >= 0) ||
         ((op_ == Token::SAR) && (shift_value > 0)) ||
         ((op_ == Token::SHR) && (shift_value > 1)))) {
     __ add(r3, int32, Operand(0x40000000), SetCC);
@@ -1420,8 +1420,10 @@
           default: UNREACHABLE();
         }
         deferred->BindExit();
-        TypeInfo result_type =
-            (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32();
+        TypeInfo result_type = TypeInfo::Integer32();
+        if (op == Token::BIT_AND && int_value >= 0) {
+          result_type = TypeInfo::Smi();
+        }
         frame_->EmitPush(tos, result_type);
       }
       break;
@@ -5584,8 +5586,8 @@
   // Fetch the map and check if array is in fast case.
   // Check that object doesn't require security checks and
   // has no indexed interceptor.
-  __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
-  deferred->Branch(lt);
+  __ CompareObjectType(object, tmp1, tmp2, JS_ARRAY_TYPE);
+  deferred->Branch(ne);
   __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
   __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
   deferred->Branch(ne);
@@ -7139,7 +7141,6 @@
                         scratch1, scratch2);
 
 
-
     // Load the value, key and receiver from the stack.
     bool value_is_harmless = frame_->KnownSmiAt(0);
     if (wb_info == NEVER_NEWSPACE) value_is_harmless = true;
@@ -7187,12 +7188,6 @@
     __ CompareObjectType(receiver, scratch1, scratch1, JS_ARRAY_TYPE);
     deferred->Branch(ne);
 
-    // Check that the key is within bounds. Both the key and the length of
-    // the JSArray are smis. Use unsigned comparison to handle negative keys.
-    __ ldr(scratch1, FieldMemOperand(receiver, JSArray::kLengthOffset));
-    __ cmp(scratch1, key);
-    deferred->Branch(ls);  // Unsigned less equal.
-
     // Get the elements array from the receiver.
     __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
     if (!value_is_harmless && wb_info != LIKELY_SMI) {
@@ -7207,6 +7202,7 @@
     }
     // Check that the elements array is not a dictionary.
     __ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
+
     // The following instructions are the part of the inlined store keyed
     // property code which can be patched. Therefore the exact number of
     // instructions generated need to be fixed, so the constant pool is blocked
@@ -7226,6 +7222,14 @@
       __ cmp(scratch2, scratch3);
       deferred->Branch(ne);
 
+      // Check that the key is within bounds.  Both the key and the length of
+      // the JSArray are smis (because the fixed array check above ensures the
+      // elements are in fast case). Use unsigned comparison to handle negative
+      // keys.
+      __ ldr(scratch3, FieldMemOperand(receiver, JSArray::kLengthOffset));
+      __ cmp(scratch3, key);
+      deferred->Branch(ls);  // Unsigned less equal.
+
       // Store the value.
       __ add(scratch1, scratch1,
              Operand(FixedArray::kHeaderSize - kHeapObjectTag));
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 8f46256..7d48a12 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -268,7 +268,7 @@
   static int GetInlinedKeyedLoadInstructionsAfterPatch() {
     return FLAG_debug_code ? 32 : 13;
   }
-  static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
+  static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
   static int GetInlinedNamedStoreInstructionsAfterPatch() {
     ASSERT(inlined_write_barrier_size_ != -1);
     return inlined_write_barrier_size_ + 4;
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index e6033a8..3adb916 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -388,9 +388,11 @@
 // This mask does not include the "inexact" or "input denormal" cumulative
 // exceptions flags, because we usually don't want to check for it.
 static const uint32_t kVFPExceptionMask = 0xf;
+static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
+static const uint32_t kVFPOverflowExceptionBit = 1 << 2;
+static const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
 static const uint32_t kVFPInexactExceptionBit = 1 << 4;
 static const uint32_t kVFPFlushToZeroMask = 1 << 24;
-static const uint32_t kVFPInvalidExceptionBit = 1;
 
 static const uint32_t kVFPNConditionFlagBit = 1 << 31;
 static const uint32_t kVFPZConditionFlagBit = 1 << 30;
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index f19e693..22640ca 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -115,7 +115,7 @@
   patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
   patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
 #endif
-  patcher.Emit(Debug::debug_break_return()->entry());
+  patcher.Emit(Debug::debug_break_slot()->entry());
 }
 
 
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 08f605b..f154839 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1067,6 +1067,9 @@
       } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
         // vabs
         Format(instr, "vabs'cond 'Dd, 'Dm");
+      } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
+        // vneg
+        Format(instr, "vneg'cond 'Dd, 'Dm");
       } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
         DecodeVCVTBetweenDoubleAndSingle(instr);
       } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 84cca62..369a0b8 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -779,9 +779,9 @@
              prop->key()->AsLiteral()->handle()->IsSmi());
       __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
 
-      Handle<Code> ic(Builtins::builtin(is_strict()
-          ? Builtins::KeyedStoreIC_Initialize_Strict
-          : Builtins::KeyedStoreIC_Initialize));
+      Handle<Code> ic(Builtins::builtin(
+          is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                           : Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       // Value in r0 is ignored (declarations are statements).
     }
@@ -1604,27 +1604,26 @@
       break;
   }
 
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
   if (expr->is_compound()) {
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
       }
     }
 
-    // For property compound assignments we need another deoptimization
-    // point after the property load.
-    if (property != NULL) {
-      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
-    }
-
     Token::Value op = expr->binary_op();
     __ push(r0);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
@@ -1830,8 +1829,8 @@
       __ pop(r0);  // Restore value.
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::StoreIC_Initialize_Strict
-                      : Builtins::StoreIC_Initialize));
+          is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                           : Builtins::StoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
@@ -1853,8 +1852,8 @@
       }
       __ pop(r0);  // Restore value.
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
-                      : Builtins::KeyedStoreIC_Initialize));
+          is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                           : Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
@@ -1879,8 +1878,8 @@
     __ mov(r2, Operand(var->name()));
     __ ldr(r1, GlobalObjectOperand());
     Handle<Code> ic(Builtins::builtin(
-        is_strict() ? Builtins::StoreIC_Initialize_Strict
-                    : Builtins::StoreIC_Initialize));
+        is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                         : Builtins::StoreIC_Initialize));
     EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
@@ -1988,8 +1987,8 @@
   }
 
   Handle<Code> ic(Builtins::builtin(
-      is_strict() ? Builtins::StoreIC_Initialize_Strict
-                  : Builtins::StoreIC_Initialize));
+      is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                       : Builtins::StoreIC_Initialize));
   EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -2034,8 +2033,8 @@
   }
 
   Handle<Code> ic(Builtins::builtin(
-      is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
-                  : Builtins::KeyedStoreIC_Initialize));
+      is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                       : Builtins::KeyedStoreIC_Initialize));
   EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -3131,8 +3130,8 @@
   // Fetch the map and check if array is in fast case.
   // Check that object doesn't require security checks and
   // has no indexed interceptor.
-  __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_OBJECT_TYPE);
-  __ b(lt, &slow_case);
+  __ CompareObjectType(object, scratch1, scratch2, JS_ARRAY_TYPE);
+  __ b(ne, &slow_case);
   // Map is now in scratch1.
 
   __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
@@ -3805,7 +3804,11 @@
 
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
-  PrepareForBailout(expr->increment(), TOS_REG);
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailout(expr->increment(), TOS_REG);
+  }
 
   // Call ToNumber only if operand is not a smi.
   Label no_conversion;
@@ -3886,8 +3889,8 @@
       __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
       __ pop(r1);
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::StoreIC_Initialize_Strict
-                      : Builtins::StoreIC_Initialize));
+          is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                           : Builtins::StoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3903,8 +3906,8 @@
       __ pop(r1);  // Key.
       __ pop(r2);  // Receiver.
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
-                      : Builtins::KeyedStoreIC_Initialize));
+          is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                           : Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index a9b33a0..3327cd8 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1610,12 +1610,15 @@
       LOperand* value = UseRegister(instr->value());
       bool needs_check = !instr->value()->type().IsSmi();
       LInstruction* res = NULL;
-      if (needs_check) {
-        res = DefineSameAsFirst(new LTaggedToI(value, FixedTemp(d1)));
-      } else {
+      if (!needs_check) {
         res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
-      }
-      if (needs_check) {
+      } else {
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
+                                                      : NULL;
+        LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d3)
+                                                      : NULL;
+        res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
         res = AssignEnvironment(res);
       }
       return res;
@@ -1635,7 +1638,10 @@
     } else {
       ASSERT(to.IsInteger32());
       LOperand* value = UseRegister(instr->value());
-      LDoubleToI* res = new LDoubleToI(value, TempRegister());
+      LDoubleToI* res =
+        new LDoubleToI(value,
+                       TempRegister(),
+                       instr->CanTruncateToInt32() ? TempRegister() : NULL);
       return AssignEnvironment(DefineAsRegister(res));
     }
   } else if (from.IsInteger32()) {
@@ -1661,7 +1667,7 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value, eq));
+  return AssignEnvironment(new LCheckNonSmi(value));
 }
 
 
@@ -1682,7 +1688,7 @@
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value, ne));
+  return AssignEnvironment(new LCheckSmi(value));
 }
 
 
@@ -1719,14 +1725,21 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LLoadGlobal* result = new LLoadGlobal();
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell();
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
 
 
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), r0);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
   if (instr->check_hole_value()) {
     LOperand* temp = TempRegister();
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 54faafd..d5c4051 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -69,6 +69,7 @@
   V(CallStub)                                   \
   V(CheckFunction)                              \
   V(CheckInstanceType)                          \
+  V(CheckNonSmi)                                \
   V(CheckMap)                                   \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
@@ -118,7 +119,8 @@
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
   V(LoadFunctionPrototype)                      \
-  V(LoadGlobal)                                 \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadNamedField)                             \
@@ -1221,10 +1223,25 @@
 };
 
 
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadGlobalGeneric(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* global_object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
 };
 
 
@@ -1458,11 +1475,12 @@
 
 
 // Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
+class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
  public:
-  explicit LDoubleToI(LOperand* value, LOperand* temp1) {
+  LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
     temps_[0] = temp1;
+    temps_[1] = temp2;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
@@ -1473,11 +1491,16 @@
 
 
 // Truncating conversion from a tagged value to an int32.
-class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
  public:
-  LTaggedToI(LOperand* value, LOperand* temp) {
+  LTaggedToI(LOperand* value,
+             LOperand* temp1,
+             LOperand* temp2,
+             LOperand* temp3) {
     inputs_[0] = value;
-    temps_[0] = temp;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
@@ -1712,20 +1735,21 @@
 
 class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
  public:
-  LCheckSmi(LOperand* value, Condition condition)
-      : condition_(condition) {
+  explicit LCheckSmi(LOperand* value) {
     inputs_[0] = value;
   }
 
-  Condition condition() const { return condition_; }
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
 
-  virtual void CompileToNative(LCodeGen* generator);
-  virtual const char* Mnemonic() const {
-    return (condition_ == eq) ? "check-non-smi" : "check-smi";
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
   }
 
- private:
-  Condition condition_;
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
 };
 
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index f06e7a0..3d8d0ba 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -757,11 +757,6 @@
       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
-    case CodeStub::StringCharAt: {
-      StringCharAtStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
     case CodeStub::NumberToString: {
       NumberToStringStub stub;
       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -2111,7 +2106,7 @@
 }
 
 
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
   __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
@@ -2123,6 +2118,18 @@
 }
 
 
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(r0));
+  ASSERT(ToRegister(instr->result()).is(r0));
+
+  __ mov(r2, Operand(instr->name()));
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+                                               RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  CallCode(ic, mode, instr);
+}
+
+
 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Register scratch = scratch0();
@@ -3385,19 +3392,30 @@
 
 
 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
-  Label done;
   Register input_reg = ToRegister(instr->InputAt(0));
-  Register scratch = scratch0();
-  DoubleRegister dbl_scratch = d0;
-  SwVfpRegister flt_scratch = s0;
-  DoubleRegister dbl_tmp = ToDoubleRegister(instr->TempAt(0));
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->TempAt(0));
+  DwVfpRegister double_scratch = double_scratch0();
+  SwVfpRegister single_scratch = double_scratch.low();
+
+  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
 
   // Heap number map check.
-  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
-  __ cmp(scratch, Operand(ip));
+  __ cmp(scratch1, Operand(ip));
 
   if (instr->truncating()) {
+    Register scratch3 = ToRegister(instr->TempAt(1));
+    DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
+    ASSERT(!scratch3.is(input_reg) &&
+           !scratch3.is(scratch1) &&
+           !scratch3.is(scratch2));
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
     Label heap_number;
     __ b(eq, &heap_number);
     // Check for undefined. Undefined is converted to zero for truncating
@@ -3409,36 +3427,38 @@
     __ b(&done);
 
     __ bind(&heap_number);
-    __ sub(ip, input_reg, Operand(kHeapObjectTag));
-    __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
-    __ vcmp(dbl_tmp, 0.0);  // Sets overflow bit in FPSCR flags if NaN.
-    __ vcvt_s32_f64(flt_scratch, dbl_tmp);
-    __ vmov(input_reg, flt_scratch);  // 32-bit result of conversion.
-    __ vmrs(pc);  // Move vector status bits to normal status bits.
-    // Overflow bit is set if dbl_tmp is Nan.
-    __ cmn(input_reg, Operand(1), vc);  // 0x7fffffff + 1 -> overflow.
-    __ cmp(input_reg, Operand(1), vc);  // 0x80000000 - 1 -> overflow.
-    DeoptimizeIf(vs, instr->environment());  // Saturation may have occured.
+    __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
+    __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
+
+    __ EmitECMATruncate(input_reg,
+                        double_scratch2,
+                        single_scratch,
+                        scratch1,
+                        scratch2,
+                        scratch3);
 
   } else {
+    CpuFeatures::Scope scope(VFP3);
     // Deoptimize if we don't have a heap number.
     DeoptimizeIf(ne, instr->environment());
 
     __ sub(ip, input_reg, Operand(kHeapObjectTag));
-    __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
-    __ vcvt_s32_f64(flt_scratch, dbl_tmp);
-    __ vmov(input_reg, flt_scratch);  // 32-bit result of conversion.
-    // Non-truncating conversion means that we cannot lose bits, so we convert
-    // back to check; note that using non-overlapping s and d regs would be
-    // slightly faster.
-    __ vcvt_f64_s32(dbl_scratch, flt_scratch);
-    __ VFPCompareAndSetFlags(dbl_scratch, dbl_tmp);
-    DeoptimizeIf(ne, instr->environment());  // Not equal or unordered.
+    __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
+    __ EmitVFPTruncate(kRoundToZero,
+                       single_scratch,
+                       double_scratch,
+                       scratch1,
+                       scratch2,
+                       kCheckForInexactConversion);
+    DeoptimizeIf(ne, instr->environment());
+    // Load the result.
+    __ vmov(input_reg, single_scratch);
+
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      __ tst(input_reg, Operand(input_reg));
+      __ cmp(input_reg, Operand(0));
       __ b(ne, &done);
-      __ vmov(lr, ip, dbl_tmp);
-      __ tst(ip, Operand(1 << 31));  // Test sign bit.
+      __ vmov(scratch1, double_scratch.high());
+      __ tst(scratch1, Operand(HeapNumber::kSignMask));
       DeoptimizeIf(ne, instr->environment());
     }
   }
@@ -3480,56 +3500,52 @@
 
 
 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
-  LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsDoubleRegister());
-  LOperand* result = instr->result();
-  ASSERT(result->IsRegister());
-
-  DoubleRegister double_input = ToDoubleRegister(input);
-  Register result_reg = ToRegister(result);
-  SwVfpRegister single_scratch = double_scratch0().low();
+  Register result_reg = ToRegister(instr->result());
   Register scratch1 = scratch0();
   Register scratch2 = ToRegister(instr->TempAt(0));
+  DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
+  DwVfpRegister double_scratch = double_scratch0();
+  SwVfpRegister single_scratch = double_scratch0().low();
 
-  __ EmitVFPTruncate(kRoundToZero,
-                     single_scratch,
-                     double_input,
-                     scratch1,
-                     scratch2);
+  Label done;
 
-  // Deoptimize if we had a vfp invalid exception.
-  DeoptimizeIf(ne, instr->environment());
-
-  // Retrieve the result.
-  __ vmov(result_reg, single_scratch);
-
-  if (!instr->truncating()) {
-    // Convert result back to double and compare with input
-    // to check if the conversion was exact.
-    __ vmov(single_scratch, result_reg);
-    __ vcvt_f64_s32(double_scratch0(), single_scratch);
-    __ VFPCompareAndSetFlags(double_scratch0(), double_input);
+  if (instr->truncating()) {
+    Register scratch3 = ToRegister(instr->TempAt(1));
+    __ EmitECMATruncate(result_reg,
+                        double_input,
+                        single_scratch,
+                        scratch1,
+                        scratch2,
+                        scratch3);
+  } else {
+    VFPRoundingMode rounding_mode = kRoundToMinusInf;
+    __ EmitVFPTruncate(rounding_mode,
+                       single_scratch,
+                       double_input,
+                       scratch1,
+                       scratch2,
+                       kCheckForInexactConversion);
+    // Deoptimize if we had a vfp invalid exception,
+    // including inexact operation.
     DeoptimizeIf(ne, instr->environment());
-    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      Label done;
-      __ cmp(result_reg, Operand(0));
-      __ b(ne, &done);
-      // Check for -0.
-      __ vmov(scratch1, double_input.high());
-      __ tst(scratch1, Operand(HeapNumber::kSignMask));
-      DeoptimizeIf(ne, instr->environment());
-
-      __ bind(&done);
-    }
+    // Retrieve the result.
+    __ vmov(result_reg, single_scratch);
   }
+    __ bind(&done);
 }
 
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsRegister());
   __ tst(ToRegister(input), Operand(kSmiTagMask));
-  DeoptimizeIf(instr->condition(), instr->environment());
+  DeoptimizeIf(ne, instr->environment());
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  LOperand* input = instr->InputAt(0);
+  __ tst(ToRegister(input), Operand(kSmiTagMask));
+  DeoptimizeIf(eq, instr->environment());
 }
 
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 916110a..9e5417f 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -433,7 +433,7 @@
 void MacroAssembler::RecordWriteHelper(Register object,
                                        Register address,
                                        Register scratch) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // Check that the object is not in new space.
     Label not_in_new_space;
     InNewSpace(object, scratch, ne, &not_in_new_space);
@@ -495,7 +495,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     mov(object, Operand(BitCast<int32_t>(kZapValue)));
     mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
     mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
@@ -527,7 +527,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     mov(object, Operand(BitCast<int32_t>(kZapValue)));
     mov(address, Operand(BitCast<int32_t>(kZapValue)));
     mov(scratch, Operand(BitCast<int32_t>(kZapValue)));
@@ -731,7 +731,7 @@
   mov(fp, Operand(sp));  // Setup new frame pointer.
   // Reserve room for saved entry sp and code object.
   sub(sp, sp, Operand(2 * kPointerSize));
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     mov(ip, Operand(0));
     str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
   }
@@ -1137,7 +1137,7 @@
   // Restore cp otherwise.
   ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
 #ifdef DEBUG
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     mov(lr, Operand(pc));
   }
 #endif
@@ -1210,7 +1210,7 @@
   // Restore cp otherwise.
   ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
 #ifdef DEBUG
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     mov(lr, Operand(pc));
   }
 #endif
@@ -1242,7 +1242,7 @@
   ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // TODO(119): avoid push(holder_reg)/pop(holder_reg)
     // Cannot use ip as a temporary in this verification code. Due to the fact
     // that ip is clobbered as part of cmp with an object Operand.
@@ -1261,7 +1261,7 @@
   b(eq, &same_contexts);
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // TODO(119): avoid push(holder_reg)/pop(holder_reg)
     // Cannot use ip as a temporary in this verification code. Due to the fact
     // that ip is clobbered as part of cmp with an object Operand.
@@ -1303,7 +1303,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       mov(result, Operand(0x7091));
       mov(scratch1, Operand(0x7191));
@@ -1352,7 +1352,7 @@
     // Load allocation top into result and allocation limit into ip.
     ldm(ia, topaddr, result.bit() | ip.bit());
   } else {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Assert that result actually contains top on entry. ip is used
       // immediately below so this use of ip does not cause difference with
       // respect to register content between debug and release mode.
@@ -1386,7 +1386,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       mov(result, Operand(0x7091));
       mov(scratch1, Operand(0x7191));
@@ -1430,7 +1430,7 @@
     // Load allocation top into result and allocation limit into ip.
     ldm(ia, topaddr, result.bit() | ip.bit());
   } else {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Assert that result actually contains top on entry. ip is used
       // immediately below so this use of ip does not cause difference with
       // respect to register content between debug and release mode.
@@ -1455,7 +1455,7 @@
   b(hi, gc_required);
 
   // Update allocation top. result temporarily holds the new top.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     tst(scratch2, Operand(kObjectAlignmentMask));
     Check(eq, "Unaligned allocation in new space");
   }
@@ -1759,7 +1759,7 @@
   // No more valid handles (the result handle was the last one). Restore
   // previous handle scope.
   str(r4, MemOperand(r7, kNextOffset));
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     ldr(r1, MemOperand(r7, kLevelOffset));
     cmp(r1, r6);
     Check(eq, "Unexpected level after return from api call");
@@ -2032,6 +2032,121 @@
 }
 
 
+void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
+                                                 Register input_high,
+                                                 Register input_low,
+                                                 Register scratch) {
+  Label done, normal_exponent, restore_sign;
+
+  // Extract the biased exponent in result.
+  Ubfx(result,
+       input_high,
+       HeapNumber::kExponentShift,
+       HeapNumber::kExponentBits);
+
+  // Check for Infinity and NaNs, which should return 0.
+  cmp(result, Operand(HeapNumber::kExponentMask));
+  mov(result, Operand(0), LeaveCC, eq);
+  b(eq, &done);
+
+  // Express exponent as delta to (number of mantissa bits + 31).
+  sub(result,
+      result,
+      Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31),
+      SetCC);
+
+  // If the delta is strictly positive, all bits would be shifted away,
+  // which means that we can return 0.
+  b(le, &normal_exponent);
+  mov(result, Operand(0));
+  b(&done);
+
+  bind(&normal_exponent);
+  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+  // Calculate shift.
+  add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC);
+
+  // Save the sign.
+  Register sign = result;
+  result = no_reg;
+  and_(sign, input_high, Operand(HeapNumber::kSignMask));
+
+  // Set the implicit 1 before the mantissa part in input_high.
+  orr(input_high,
+      input_high,
+      Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+  // Shift the mantissa bits to the correct position.
+  // We don't need to clear non-mantissa bits as they will be shifted away.
+  // If they weren't, it would mean that the answer is in the 32bit range.
+  mov(input_high, Operand(input_high, LSL, scratch));
+
+  // Replace the shifted bits with bits from the lower mantissa word.
+  Label pos_shift, shift_done;
+  rsb(scratch, scratch, Operand(32), SetCC);
+  b(&pos_shift, ge);
+
+  // Negate scratch.
+  rsb(scratch, scratch, Operand(0));
+  mov(input_low, Operand(input_low, LSL, scratch));
+  b(&shift_done);
+
+  bind(&pos_shift);
+  mov(input_low, Operand(input_low, LSR, scratch));
+
+  bind(&shift_done);
+  orr(input_high, input_high, Operand(input_low));
+  // Restore sign if necessary.
+  cmp(sign, Operand(0));
+  result = sign;
+  sign = no_reg;
+  rsb(result, input_high, Operand(0), LeaveCC, ne);
+  mov(result, input_high, LeaveCC, eq);
+  bind(&done);
+}
+
+
+void MacroAssembler::EmitECMATruncate(Register result,
+                                      DwVfpRegister double_input,
+                                      SwVfpRegister single_scratch,
+                                      Register scratch,
+                                      Register input_high,
+                                      Register input_low) {
+  CpuFeatures::Scope scope(VFP3);
+  ASSERT(!input_high.is(result));
+  ASSERT(!input_low.is(result));
+  ASSERT(!input_low.is(input_high));
+  ASSERT(!scratch.is(result) &&
+         !scratch.is(input_high) &&
+         !scratch.is(input_low));
+  ASSERT(!single_scratch.is(double_input.low()) &&
+         !single_scratch.is(double_input.high()));
+
+  Label done;
+
+  // Clear cumulative exception flags.
+  ClearFPSCRBits(kVFPExceptionMask, scratch);
+  // Try a conversion to a signed integer.
+  vcvt_s32_f64(single_scratch, double_input);
+  vmov(result, single_scratch);
+  // Retrieve he FPSCR.
+  vmrs(scratch);
+  // Check for overflow and NaNs.
+  tst(scratch, Operand(kVFPOverflowExceptionBit |
+                       kVFPUnderflowExceptionBit |
+                       kVFPInvalidOpExceptionBit));
+  // If we had no exceptions we are done.
+  b(eq, &done);
+
+  // Load the double value and perform a manual truncation.
+  vmov(input_low, input_high, double_input);
+  EmitOutOfInt32RangeTruncate(result,
+                              input_high,
+                              input_low,
+                              scratch);
+  bind(&done);
+}
+
+
 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
                                          Register src,
                                          int num_least_bits) {
@@ -2220,14 +2335,14 @@
 
 
 void MacroAssembler::Assert(Condition cond, const char* msg) {
-  if (FLAG_debug_code)
+  if (emit_debug_code())
     Check(cond, msg);
 }
 
 
 void MacroAssembler::AssertRegisterIsRoot(Register reg,
                                           Heap::RootListIndex index) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     LoadRoot(ip, index);
     cmp(reg, ip);
     Check(eq, "Register did not match expected root");
@@ -2236,7 +2351,7 @@
 
 
 void MacroAssembler::AssertFastElements(Register elements) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     ASSERT(!elements.is(ip));
     Label ok;
     push(elements);
@@ -2324,7 +2439,7 @@
   // (i.e., the static scope chain and runtime context chain do not agree).
   // A variable occurring in such a scope should have slot type LOOKUP and
   // not CONTEXT.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
     cmp(dst, ip);
     Check(eq, "Yo dawg, I heard you liked function contexts "
@@ -2349,7 +2464,7 @@
                                                   Register scratch) {
   // Load the initial map. The global functions all have initial maps.
   ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     Label ok, fail;
     CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
     b(&ok);
@@ -2557,7 +2672,7 @@
 
   // Copy bytes in word size chunks.
   bind(&word_loop);
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     tst(src, Operand(kPointerSize - 1));
     Assert(eq, "Expecting alignment for CopyBytes");
   }
@@ -2687,7 +2802,7 @@
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
 #if defined(V8_HOST_ARCH_ARM)
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     int frame_alignment = OS::ActivationFrameAlignment();
     int frame_alignment_mask = frame_alignment - 1;
     if (frame_alignment > kPointerSize) {
@@ -2721,7 +2836,7 @@
   const uint32_t kLdrOffsetMask = (1 << 12) - 1;
   const int32_t kPCRegOffset = 2 * kPointerSize;
   ldr(result, MemOperand(ldr_location));
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // Check that the instruction is a ldr reg, [pc + offset] .
     and_(result, result, Operand(kLdrPCPattern));
     cmp(result, Operand(kLdrPCPattern));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index d79edd7..f72bbed 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -649,11 +649,11 @@
                       DwVfpRegister double_scratch,
                       Label *not_int32);
 
-// Truncates a double using a specific rounding mode.
-// Clears the z flag (ne condition) if an overflow occurs.
-// If exact_conversion is true, the z flag is also cleared if the conversion
-// was inexact, ie. if the double value could not be converted exactly
-// to a 32bit integer.
+  // Truncates a double using a specific rounding mode.
+  // Clears the z flag (ne condition) if an overflow occurs.
+  // If exact_conversion is true, the z flag is also cleared if the conversion
+  // was inexact, ie. if the double value could not be converted exactly
+  // to a 32bit integer.
   void EmitVFPTruncate(VFPRoundingMode rounding_mode,
                        SwVfpRegister result,
                        DwVfpRegister double_input,
@@ -662,6 +662,27 @@
                        CheckForInexactConversion check
                            = kDontCheckForInexactConversion);
 
+  // Helper for EmitECMATruncate.
+  // This will truncate a floating-point value outside of the singed 32bit
+  // integer range to a 32bit signed integer.
+  // Expects the double value loaded in input_high and input_low.
+  // Exits with the answer in 'result'.
+  // Note that this code does not work for values in the 32bit range!
+  void EmitOutOfInt32RangeTruncate(Register result,
+                                   Register input_high,
+                                   Register input_low,
+                                   Register scratch);
+
+  // Performs a truncating conversion of a floating point number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+  // Exits with 'result' holding the answer and all other registers clobbered.
+  void EmitECMATruncate(Register result,
+                        DwVfpRegister double_input,
+                        SwVfpRegister single_scratch,
+                        Register scratch,
+                        Register scratch2,
+                        Register scratch3);
+
   // Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
   // instruction.  On pre-ARM5 hardware this routine gives the wrong answer
   // for 0 (31 instead of 32).  Source and scratch can be the same in which case
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index f475a18..cedce6d 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2467,6 +2467,8 @@
 // vmov :Rt = Sn
 // vcvt: Dd = Sm
 // vcvt: Sd = Dm
+// Dd = vabs(Dm)
+// Dd = vneg(Dm)
 // Dd = vadd(Dn, Dm)
 // Dd = vsub(Dn, Dm)
 // Dd = vmul(Dn, Dm)
@@ -2502,6 +2504,11 @@
         double dm_value = get_double_from_d_register(vm);
         double dd_value = fabs(dm_value);
         set_d_register_from_double(vd, dd_value);
+      } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
+        // vneg
+        double dm_value = get_double_from_d_register(vm);
+        double dd_value = -dm_value;
+        set_d_register_from_double(vd, dd_value);
       } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
         DecodeVCVTBetweenDoubleAndSingle(instr);
       } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
diff --git a/src/ast.cc b/src/ast.cc
index 184aaa5..907ed1d 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -621,24 +621,21 @@
 
 
 bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
-                               Handle<String> name) {
+                               LookupResult* lookup) {
   target_ = Handle<JSFunction>::null();
   cell_ = Handle<JSGlobalPropertyCell>::null();
-  LookupResult lookup;
-  global->Lookup(*name, &lookup);
-  if (lookup.IsProperty() &&
-      lookup.type() == NORMAL &&
-      lookup.holder() == *global) {
-    cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
-    if (cell_->value()->IsJSFunction()) {
-      Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
-      // If the function is in new space we assume it's more likely to
-      // change and thus prefer the general IC code.
-      if (!Heap::InNewSpace(*candidate) &&
-          CanCallWithoutIC(candidate, arguments()->length())) {
-        target_ = candidate;
-        return true;
-      }
+  ASSERT(lookup->IsProperty() &&
+         lookup->type() == NORMAL &&
+         lookup->holder() == *global);
+  cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
+  if (cell_->value()->IsJSFunction()) {
+    Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
+    // If the function is in new space we assume it's more likely to
+    // change and thus prefer the general IC code.
+    if (!Heap::InNewSpace(*candidate) &&
+        CanCallWithoutIC(candidate, arguments()->length())) {
+      target_ = candidate;
+      return true;
     }
   }
   return false;
diff --git a/src/ast.h b/src/ast.h
index b00612d..42a2057 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1307,7 +1307,7 @@
   Handle<JSGlobalPropertyCell> cell() { return cell_; }
 
   bool ComputeTarget(Handle<Map> type, Handle<String> name);
-  bool ComputeGlobalTarget(Handle<GlobalObject> global, Handle<String> name);
+  bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
 
   // Bailout support.
   int ReturnId() const { return return_id_; }
diff --git a/src/code-stubs.h b/src/code-stubs.h
index f9f7b82..6af45d5 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -40,7 +40,6 @@
   V(GenericBinaryOp)                     \
   V(TypeRecordingBinaryOp)               \
   V(StringAdd)                           \
-  V(StringCharAt)                        \
   V(SubString)                           \
   V(StringCompare)                       \
   V(SmiOp)                               \
@@ -438,18 +437,6 @@
 };
 
 
-class StringCharAtStub: public CodeStub {
- public:
-  StringCharAtStub() {}
-
- private:
-  Major MajorKey() { return StringCharAt; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
-};
-
-
 class ICCompareStub: public CodeStub {
  public:
   ICCompareStub(Token::Value op, CompareIC::State state)
diff --git a/src/compiler.cc b/src/compiler.cc
index 553f486..cfba4b2 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -529,6 +529,7 @@
     info.MarkAsGlobal();
     info.SetExtension(extension);
     info.SetPreParseData(pre_data);
+    if (natives == NATIVES_CODE) info.MarkAsAllowingNativesSyntax();
     result = MakeFunctionInfo(&info);
     if (extension == NULL && !result.is_null()) {
       CompilationCache::PutScript(source, result);
diff --git a/src/compiler.h b/src/compiler.h
index e0a437a..92ec9ed 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -80,6 +80,12 @@
     ASSERT(is_lazy());
     flags_ |= IsInLoop::encode(true);
   }
+  void MarkAsAllowingNativesSyntax() {
+    flags_ |= IsNativesSyntaxAllowed::encode(true);
+  }
+  bool allows_natives_syntax() const {
+    return IsNativesSyntaxAllowed::decode(flags_);
+  }
   void SetFunction(FunctionLiteral* literal) {
     ASSERT(function_ == NULL);
     function_ = literal;
@@ -174,6 +180,8 @@
   class IsInLoop: public BitField<bool, 3, 1> {};
   // Strict mode - used in eager compilation.
   class IsStrict: public BitField<bool, 4, 1> {};
+  // Native syntax (%-stuff) allowed?
+  class IsNativesSyntaxAllowed: public BitField<bool, 5, 1> {};
 
   unsigned flags_;
 
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index 335bd2b..a7a4049 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -375,8 +375,10 @@
 // a parent process hangs on waiting while a child process is already a zombie.
 // See http://code.google.com/p/v8/issues/detail?id=401.
 #if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
+#if !defined(__FreeBSD__)
 #define HAS_WAITID 1
 #endif
+#endif
 
 
 // Get exit status of child.
diff --git a/src/debug.cc b/src/debug.cc
index cfeb340..8ae47b9 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -764,15 +764,12 @@
   Handle<String> script_name = Factory::NewStringFromAscii(name);
 
   // Compile the script.
-  bool allow_natives_syntax = FLAG_allow_natives_syntax;
-  FLAG_allow_natives_syntax = true;
   Handle<SharedFunctionInfo> function_info;
   function_info = Compiler::Compile(source_code,
                                     script_name,
                                     0, 0, NULL, NULL,
                                     Handle<String>::null(),
                                     NATIVES_CODE);
-  FLAG_allow_natives_syntax = allow_natives_syntax;
 
   // Silently ignore stack overflows during compilation.
   if (function_info.is_null()) {
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index af2f42e..6b51233 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -923,10 +923,9 @@
   // references. This is fine because the deoptimizer's code section
   // isn't meant to be serialized at all.
   ASSERT(!Serializer::enabled());
-  bool old_debug_code = FLAG_debug_code;
-  FLAG_debug_code = false;
 
   MacroAssembler masm(NULL, 16 * KB);
+  masm.set_emit_debug_code(false);
   GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
   CodeDesc desc;
   masm.GetCode(&desc);
@@ -935,7 +934,6 @@
   LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
   memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
   CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
-  FLAG_debug_code = old_debug_code;
   return chunk;
 }
 
diff --git a/src/factory.cc b/src/factory.cc
index 4d2c6b4..e848b57 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -717,10 +717,10 @@
 }
 
 
-Handle<JSArray> Factory::NewJSArray(int length,
+Handle<JSArray> Factory::NewJSArray(int capacity,
                                     PretenureFlag pretenure) {
   Handle<JSObject> obj = NewJSObject(Top::array_function(), pretenure);
-  CALL_HEAP_FUNCTION(Handle<JSArray>::cast(obj)->Initialize(length), JSArray);
+  CALL_HEAP_FUNCTION(Handle<JSArray>::cast(obj)->Initialize(capacity), JSArray);
 }
 
 
diff --git a/src/factory.h b/src/factory.h
index 23e7001..e03e98b 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -221,7 +221,7 @@
   static Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
 
   // JS arrays are pretenured when allocated by the parser.
-  static Handle<JSArray> NewJSArray(int init_length,
+  static Handle<JSArray> NewJSArray(int capacity,
                                     PretenureFlag pretenure = NOT_TENURED);
 
   static Handle<JSArray> NewJSArrayWithElements(
diff --git a/src/full-codegen.h b/src/full-codegen.h
index 5fb11b4..cc0c206 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -501,9 +501,9 @@
 
   Handle<Script> script() { return info_->script(); }
   bool is_eval() { return info_->is_eval(); }
-  bool is_strict() { return function()->strict_mode(); }
+  bool is_strict_mode() { return function()->strict_mode(); }
   StrictModeFlag strict_mode_flag() {
-    return is_strict() ? kStrictMode : kNonStrictMode;
+    return is_strict_mode() ? kStrictMode : kNonStrictMode;
   }
   FunctionLiteral* function() { return info_->function(); }
   Scope* scope() { return info_->scope(); }
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 6cc0cfb..c75b67c 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -541,16 +541,36 @@
   return &groups;
 }
 
-void GlobalHandles::AddGroup(Object*** handles,
-                             size_t length,
-                             v8::RetainedObjectInfo* info) {
+
+void GlobalHandles::AddObjectGroup(Object*** handles,
+                                   size_t length,
+                                   v8::RetainedObjectInfo* info) {
   ObjectGroup* new_entry = new ObjectGroup(length, info);
-  for (size_t i = 0; i < length; ++i)
+  for (size_t i = 0; i < length; ++i) {
     new_entry->objects_.Add(handles[i]);
+  }
   ObjectGroups()->Add(new_entry);
 }
 
 
+List<ImplicitRefGroup*>* GlobalHandles::ImplicitRefGroups() {
+  // Lazily initialize the list to avoid startup time static constructors.
+  static List<ImplicitRefGroup*> groups(4);
+  return &groups;
+}
+
+
+void GlobalHandles::AddImplicitReferences(HeapObject* parent,
+                                          Object*** children,
+                                          size_t length) {
+  ImplicitRefGroup* new_entry = new ImplicitRefGroup(parent, length);
+  for (size_t i = 0; i < length; ++i) {
+    new_entry->children_.Add(children[i]);
+  }
+  ImplicitRefGroups()->Add(new_entry);
+}
+
+
 void GlobalHandles::RemoveObjectGroups() {
   List<ObjectGroup*>* object_groups = ObjectGroups();
   for (int i = 0; i< object_groups->length(); i++) {
@@ -559,4 +579,14 @@
   object_groups->Clear();
 }
 
+
+void GlobalHandles::RemoveImplicitRefGroups() {
+  List<ImplicitRefGroup*>* ref_groups = ImplicitRefGroups();
+  for (int i = 0; i< ref_groups->length(); i++) {
+    delete ref_groups->at(i);
+  }
+  ref_groups->Clear();
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/global-handles.h b/src/global-handles.h
index 3bab4b8..3559c40 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -39,9 +39,6 @@
 // At GC the destroyed global handles are removed from the free list
 // and deallocated.
 
-// Callback function on handling weak global handles.
-// typedef bool (*WeakSlotCallback)(Object** pointer);
-
 // An object group is treated like a single JS object: if one of object in
 // the group is alive, all objects in the same group are considered alive.
 // An object group is used to simulate object relationship in a DOM tree.
@@ -61,6 +58,24 @@
 };
 
 
+// An implicit references group consists of two parts: a parent object and
+// a list of children objects.  If the parent is alive, all the children
+// are alive too.
+class ImplicitRefGroup : public Malloced {
+ public:
+  ImplicitRefGroup() : children_(4) {}
+  ImplicitRefGroup(HeapObject* parent, size_t capacity)
+      : parent_(parent),
+        children_(static_cast<int>(capacity)) { }
+
+  HeapObject* parent_;
+  List<Object**> children_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ImplicitRefGroup);
+};
+
+
 typedef void (*WeakReferenceGuest)(Object* object, void* parameter);
 
 class GlobalHandles : public AllStatic {
@@ -128,17 +143,28 @@
   static void IdentifyWeakHandles(WeakSlotCallback f);
 
   // Add an object group.
-  // Should only used in GC callback function before a collection.
+  // Should be only used in GC callback function before a collection.
   // All groups are destroyed after a mark-compact collection.
-  static void AddGroup(Object*** handles,
-                       size_t length,
-                       v8::RetainedObjectInfo* info);
+  static void AddObjectGroup(Object*** handles,
+                             size_t length,
+                             v8::RetainedObjectInfo* info);
+
+  // Add an implicit references' group.
+  // Should be only used in GC callback function before a collection.
+  // All groups are destroyed after a mark-compact collection.
+  static void AddImplicitReferences(HeapObject* parent,
+                                    Object*** children,
+                                    size_t length);
 
   // Returns the object groups.
   static List<ObjectGroup*>* ObjectGroups();
 
+  // Returns the implicit references' groups.
+  static List<ImplicitRefGroup*>* ImplicitRefGroups();
+
   // Remove bags, this should only happen after GC.
   static void RemoveObjectGroups();
+  static void RemoveImplicitRefGroups();
 
   // Tear down the global handle structure.
   static void TearDown();
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index fa2deb8..d30a519 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1230,12 +1230,17 @@
 }
 
 
-void HLoadGlobal::PrintDataTo(StringStream* stream) {
+void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
   stream->Add("[%p]", *cell());
   if (check_hole_value()) stream->Add(" (deleteable/read-only)");
 }
 
 
+void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
+  stream->Add("%o ", *name());
+}
+
+
 void HStoreGlobal::PrintDataTo(StringStream* stream) {
   stream->Add("[%p] = ", *cell());
   value()->PrintNameTo(stream);
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index fc376bd..775177a 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -122,7 +122,8 @@
   V(LoadElements)                              \
   V(LoadExternalArrayPointer)                  \
   V(LoadFunctionPrototype)                     \
-  V(LoadGlobal)                                \
+  V(LoadGlobalCell)                            \
+  V(LoadGlobalGeneric)                         \
   V(LoadKeyedFastElement)                      \
   V(LoadKeyedGeneric)                          \
   V(LoadNamedField)                            \
@@ -2785,9 +2786,9 @@
 };
 
 
-class HLoadGlobal: public HTemplateInstruction<0> {
+class HLoadGlobalCell: public HTemplateInstruction<0> {
  public:
-  HLoadGlobal(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
+  HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, bool check_hole_value)
       : cell_(cell), check_hole_value_(check_hole_value) {
     set_representation(Representation::Tagged());
     SetFlag(kUseGVN);
@@ -2808,11 +2809,11 @@
     return Representation::None();
   }
 
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load_global")
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load_global_cell")
 
  protected:
   virtual bool DataEquals(HValue* other) {
-    HLoadGlobal* b = HLoadGlobal::cast(other);
+    HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
     return cell_.is_identical_to(b->cell());
   }
 
@@ -2822,6 +2823,38 @@
 };
 
 
+class HLoadGlobalGeneric: public HBinaryOperation {
+ public:
+  HLoadGlobalGeneric(HValue* context,
+                     HValue* global_object,
+                     Handle<Object> name,
+                     bool for_typeof)
+      : HBinaryOperation(context, global_object),
+        name_(name),
+        for_typeof_(for_typeof) {
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  HValue* context() { return OperandAt(0); }
+  HValue* global_object() { return OperandAt(1); }
+  Handle<Object> name() const { return name_; }
+  bool for_typeof() const { return for_typeof_; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  virtual Representation RequiredInputRepresentation(int index) const {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load_global_generic")
+
+ private:
+  Handle<Object> name_;
+  bool for_typeof_;
+};
+
+
 class HStoreGlobal: public HUnaryOperation {
  public:
   HStoreGlobal(HValue* value,
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index b50399d..77714f6 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1961,7 +1961,10 @@
 // Implementation of utility classes to represent an expression's context in
 // the AST.
 AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
-    : owner_(owner), kind_(kind), outer_(owner->ast_context()) {
+    : owner_(owner),
+      kind_(kind),
+      outer_(owner->ast_context()),
+      for_typeof_(false) {
   owner->set_ast_context(this);  // Push.
 #ifdef DEBUG
   original_length_ = owner->environment()->length();
@@ -2105,6 +2108,14 @@
 }
 
 
+void HGraphBuilder::VisitForTypeOf(Expression* expr) {
+  ValueContext for_value(this);
+  for_value.set_for_typeof(true);
+  Visit(expr);
+}
+
+
+
 void HGraphBuilder::VisitForControl(Expression* expr,
                                     HBasicBlock* true_block,
                                     HBasicBlock* false_block) {
@@ -2797,29 +2808,21 @@
 }
 
 
-void HGraphBuilder::LookupGlobalPropertyCell(Variable* var,
-                                             LookupResult* lookup,
-                                             bool is_store) {
-  if (var->is_this()) {
-    BAILOUT("global this reference");
-  }
-  if (!info()->has_global_object()) {
-    BAILOUT("no global object to optimize VariableProxy");
+HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
+    Variable* var, LookupResult* lookup, bool is_store) {
+  if (var->is_this() || !info()->has_global_object()) {
+    return kUseGeneric;
   }
   Handle<GlobalObject> global(info()->global_object());
   global->Lookup(*var->name(), lookup);
-  if (!lookup->IsProperty()) {
-    BAILOUT("global variable cell not yet introduced");
+  if (!lookup->IsProperty() ||
+      lookup->type() != NORMAL ||
+      (is_store && lookup->IsReadOnly()) ||
+      lookup->holder() != *global) {
+    return kUseGeneric;
   }
-  if (lookup->type() != NORMAL) {
-    BAILOUT("global variable has accessors");
-  }
-  if (is_store && lookup->IsReadOnly()) {
-    BAILOUT("read-only global variable");
-  }
-  if (lookup->holder() != *global) {
-    BAILOUT("global property on prototype of global object");
-  }
+
+  return kUseCell;
 }
 
 
@@ -2855,19 +2858,31 @@
     ast_context()->ReturnInstruction(instr, expr->id());
   } else if (variable->is_global()) {
     LookupResult lookup;
-    LookupGlobalPropertyCell(variable, &lookup, false);
-    CHECK_BAILOUT;
+    GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, false);
 
-    Handle<GlobalObject> global(info()->global_object());
-    // TODO(3039103): Handle global property load through an IC call when access
-    // checks are enabled.
-    if (global->IsAccessCheckNeeded()) {
-      BAILOUT("global object requires access check");
+    if (type == kUseCell &&
+        info()->global_object()->IsAccessCheckNeeded()) {
+      type = kUseGeneric;
     }
-    Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
-    HLoadGlobal* instr = new HLoadGlobal(cell, check_hole);
-    ast_context()->ReturnInstruction(instr, expr->id());
+
+    if (type == kUseCell) {
+      Handle<GlobalObject> global(info()->global_object());
+      Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+      bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+      HLoadGlobalCell* instr = new HLoadGlobalCell(cell, check_hole);
+      ast_context()->ReturnInstruction(instr, expr->id());
+    } else {
+      HContext* context = new HContext;
+      AddInstruction(context);
+      HGlobalObject* global_object = new HGlobalObject(context);
+      AddInstruction(global_object);
+      HLoadGlobalGeneric* instr =
+          new HLoadGlobalGeneric(context,
+                                 global_object,
+                                 variable->name(),
+                                 ast_context()->is_for_typeof());
+      ast_context()->ReturnInstruction(instr, expr->id());
+    }
   } else {
     BAILOUT("reference to a variable which requires dynamic lookup");
   }
@@ -3224,16 +3239,18 @@
                                                    int position,
                                                    int ast_id) {
   LookupResult lookup;
-  LookupGlobalPropertyCell(var, &lookup, true);
-  CHECK_BAILOUT;
-
-  bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
-  Handle<GlobalObject> global(info()->global_object());
-  Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
-  HInstruction* instr = new HStoreGlobal(value, cell, check_hole);
-  instr->set_position(position);
-  AddInstruction(instr);
-  if (instr->HasSideEffects()) AddSimulate(ast_id);
+  GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
+  if (type == kUseCell) {
+    bool check_hole = !lookup.IsDontDelete() || lookup.IsReadOnly();
+    Handle<GlobalObject> global(info()->global_object());
+    Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(&lookup));
+    HInstruction* instr = new HStoreGlobal(value, cell, check_hole);
+    instr->set_position(position);
+    AddInstruction(instr);
+    if (instr->HasSideEffects()) AddSimulate(ast_id);
+  } else {
+    BAILOUT("global store only supported for cells");
+  }
 }
 
 
@@ -4328,10 +4345,12 @@
       // If there is a global property cell for the name at compile time and
       // access check is not enabled we assume that the function will not change
       // and generate optimized code for calling the function.
-      if (info()->has_global_object() &&
+      LookupResult lookup;
+      GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, false);
+      if (type == kUseCell &&
           !info()->global_object()->IsAccessCheckNeeded()) {
         Handle<GlobalObject> global(info()->global_object());
-        known_global_function = expr->ComputeGlobalTarget(global, var->name());
+        known_global_function = expr->ComputeGlobalTarget(global, &lookup);
       }
       if (known_global_function) {
         // Push the global object instead of the global receiver because
@@ -4533,7 +4552,13 @@
       VisitForEffect(expr->expression());
     }
 
-  } else if (op == Token::BIT_NOT || op == Token::SUB) {
+  } else if (op == Token::TYPEOF) {
+    VisitForTypeOf(expr->expression());
+    if (HasStackOverflow()) return;
+    HValue* value = Pop();
+    ast_context()->ReturnInstruction(new HTypeof(value), expr->id());
+
+  } else {
     VISIT_FOR_VALUE(expr->expression());
     HValue* value = Pop();
     HInstruction* instr = NULL;
@@ -4542,19 +4567,16 @@
         instr = new HBitNot(value);
         break;
       case Token::SUB:
-        instr = new HMul(graph_->GetConstantMinus1(), value);
+        instr = new HMul(value, graph_->GetConstantMinus1());
+        break;
+      case Token::ADD:
+        instr = new HMul(value, graph_->GetConstant1());
         break;
       default:
-        UNREACHABLE();
+        BAILOUT("Value: unsupported unary operation");
         break;
     }
     ast_context()->ReturnInstruction(instr, expr->id());
-  } else if (op == Token::TYPEOF) {
-    VISIT_FOR_VALUE(expr->expression());
-    HValue* value = Pop();
-    ast_context()->ReturnInstruction(new HTypeof(value), expr->id());
-  } else {
-    BAILOUT("Value: unsupported unary operation");
   }
 }
 
@@ -4935,7 +4957,8 @@
   if ((expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT) &&
       left_unary != NULL && left_unary->op() == Token::TYPEOF &&
       right_literal != NULL && right_literal->handle()->IsString()) {
-    VISIT_FOR_VALUE(left_unary->expression());
+    VisitForTypeOf(left_unary->expression());
+    if (HasStackOverflow()) return;
     HValue* left = Pop();
     HInstruction* instr = new HTypeofIs(left,
         Handle<String>::cast(right_literal->handle()));
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 235b669..53e908c 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -445,6 +445,9 @@
   // the instruction as value.
   virtual void ReturnInstruction(HInstruction* instr, int ast_id) = 0;
 
+  void set_for_typeof(bool for_typeof) { for_typeof_ = for_typeof; }
+  bool is_for_typeof() { return for_typeof_; }
+
  protected:
   AstContext(HGraphBuilder* owner, Expression::Context kind);
   virtual ~AstContext();
@@ -461,6 +464,7 @@
   HGraphBuilder* owner_;
   Expression::Context kind_;
   AstContext* outer_;
+  bool for_typeof_;
 };
 
 
@@ -727,6 +731,7 @@
   void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
 
   void VisitForValue(Expression* expr);
+  void VisitForTypeOf(Expression* expr);
   void VisitForEffect(Expression* expr);
   void VisitForControl(Expression* expr,
                        HBasicBlock* true_block,
@@ -762,9 +767,13 @@
   HBasicBlock* CreateLoopHeaderBlock();
 
   // Helpers for flow graph construction.
-  void LookupGlobalPropertyCell(Variable* var,
-                                LookupResult* lookup,
-                                bool is_store);
+  enum GlobalPropertyAccess {
+    kUseCell,
+    kUseGeneric
+  };
+  GlobalPropertyAccess LookupGlobalProperty(Variable* var,
+                                            LookupResult* lookup,
+                                            bool is_store);
 
   bool TryArgumentsAccess(Property* expr);
   bool TryCallApply(Call* expr);
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 6652df2..10364eb 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -299,7 +299,8 @@
 byte* Assembler::spare_buffer_ = NULL;
 
 Assembler::Assembler(void* buffer, int buffer_size)
-    : positions_recorder_(this) {
+    : positions_recorder_(this),
+      emit_debug_code_(FLAG_debug_code) {
   if (buffer == NULL) {
     // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
@@ -2761,7 +2762,7 @@
       Serializer::TooLateToEnableNow();
     }
 #endif
-    if (!Serializer::enabled() && !FLAG_debug_code) {
+    if (!Serializer::enabled() && !emit_debug_code()) {
       return;
     }
   }
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index b60157c..7d9e374 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -522,6 +522,9 @@
   Assembler(void* buffer, int buffer_size);
   ~Assembler();
 
+  // Overrides the default provided by FLAG_debug_code.
+  void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
   // GetCode emits any pending (non-emitted) code and fills the descriptor
   // desc. GetCode() is idempotent; it returns the same result if no other
   // Assembler functions are invoked in between GetCode() calls.
@@ -982,6 +985,8 @@
   static const int kMinimalBufferSize = 4*KB;
 
  protected:
+  bool emit_debug_code() const { return emit_debug_code_; }
+
   void movsd(XMMRegister dst, const Operand& src);
   void movsd(const Operand& dst, XMMRegister src);
 
@@ -1057,6 +1062,8 @@
 
   PositionsRecorder positions_recorder_;
 
+  bool emit_debug_code_;
+
   friend class PositionsRecorder;
 };
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 6d7dfe6..01ea60c 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -5509,8 +5509,8 @@
   STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
   // Handle exceptionally long strings in the runtime system.
   __ j(overflow, &string_add_runtime);
-  // Use the runtime system when adding two one character strings, as it
-  // contains optimizations for this specific case using the symbol table.
+  // Use the symbol table when adding two one character strings, as it
+  // helps later optimizations to return a symbol here.
   __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
   __ j(not_equal, &longer_than_two);
 
@@ -5927,6 +5927,8 @@
     // If entry is undefined no string with this hash can be found.
     __ cmp(candidate, Factory::undefined_value());
     __ j(equal, not_found);
+    __ cmp(candidate, Factory::null_value());
+    __ j(equal, &next_probe[i]);
 
     // If length is not 2 the string is not a candidate.
     __ cmp(FieldOperand(candidate, String::kLengthOffset),
@@ -6295,59 +6297,6 @@
 }
 
 
-void StringCharAtStub::Generate(MacroAssembler* masm) {
-  // Expects two arguments (object, index) on the stack:
-
-  // Stack frame on entry.
-  //  esp[0]: return address
-  //  esp[4]: index
-  //  esp[8]: object
-
-  Register object = ebx;
-  Register index = eax;
-  Register scratch1 = ecx;
-  Register scratch2 = edx;
-  Register result = eax;
-
-  __ pop(scratch1);  // Return address.
-  __ pop(index);
-  __ pop(object);
-  __ push(scratch1);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  scratch1,
-                                  scratch2,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm);
-  __ jmp(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ Set(result, Immediate(Factory::empty_string()));
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ Set(result, Immediate(Smi::FromInt(0)));
-  __ jmp(&done);
-
-  StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm, call_helper);
-
-  __ bind(&done);
-  __ ret(0);
-}
-
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
   NearLabel miss;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index fcb06d2..f1d4cac 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -9943,12 +9943,6 @@
     __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
     deferred->Branch(not_equal);
 
-    // Check that the key is within bounds.  Both the key and the length of
-    // the JSArray are smis. Use unsigned comparison to handle negative keys.
-    __ cmp(key.reg(),
-           FieldOperand(receiver.reg(), JSArray::kLengthOffset));
-    deferred->Branch(above_equal);
-
     // Get the elements array from the receiver and check that it is not a
     // dictionary.
     __ mov(tmp.reg(),
@@ -9974,6 +9968,14 @@
            Immediate(Factory::fixed_array_map()));
     deferred->Branch(not_equal);
 
+    // Check that the key is within bounds.  Both the key and the length of
+    // the JSArray are smis (because the fixed array check above ensures the
+    // elements are in fast case). Use unsigned comparison to handle negative
+    // keys.
+    __ cmp(key.reg(),
+           FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+    deferred->Branch(above_equal);
+
     // Store the value.
     __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
     __ IncrementCounter(&Counters::keyed_store_inline, 1);
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index e0cbe35..a7d38ce 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -331,6 +331,7 @@
   int PrintRightOperandHelper(byte* modrmp, RegisterNameMapping register_name);
   int PrintRightOperand(byte* modrmp);
   int PrintRightByteOperand(byte* modrmp);
+  int PrintRightXMMOperand(byte* modrmp);
   int PrintOperands(const char* mnem, OperandOrder op_order, byte* data);
   int PrintImmediateOp(byte* data);
   int F7Instruction(byte* data);
@@ -367,9 +368,11 @@
 
 int DisassemblerIA32::PrintRightOperandHelper(
     byte* modrmp,
-    RegisterNameMapping register_name) {
+    RegisterNameMapping direct_register_name) {
   int mod, regop, rm;
   get_modrm(*modrmp, &mod, &regop, &rm);
+  RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
+      &DisassemblerIA32::NameOfCPURegister;
   switch (mod) {
     case 0:
       if (rm == ebp) {
@@ -454,6 +457,12 @@
 }
 
 
+int DisassemblerIA32::PrintRightXMMOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp,
+                                 &DisassemblerIA32::NameOfXMMRegister);
+}
+
+
 // Returns number of bytes used including the current *data.
 // Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
 int DisassemblerIA32::PrintOperands(const char* mnem,
@@ -937,7 +946,7 @@
           get_modrm(*data, &mod, &regop, &rm);
           if (regop == eax) {
             AppendToBuffer("test_b ");
-            data += PrintRightOperand(data);
+            data += PrintRightByteOperand(data);
             int32_t imm = *data;
             AppendToBuffer(",0x%x", imm);
             data++;
@@ -1035,11 +1044,19 @@
       case 0xC6:  // imm8
         { bool is_byte = *data == 0xC6;
           data++;
-          AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
-          data += PrintRightOperand(data);
-          int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
-          AppendToBuffer(",0x%x", imm);
-          data += is_byte ? 1 : 4;
+          if (is_byte) {
+            AppendToBuffer("%s ", "mov_b");
+            data += PrintRightByteOperand(data);
+            int32_t imm = *data;
+            AppendToBuffer(",0x%x", imm);
+            data++;
+          } else {
+            AppendToBuffer("%s ", "mov");
+            data += PrintRightOperand(data);
+            int32_t imm = *reinterpret_cast<int32_t*>(data);
+            AppendToBuffer(",0x%x", imm);
+            data += 4;
+          }
         }
         break;
 
@@ -1054,7 +1071,7 @@
             default: UnimplementedInstruction();
           }
           AppendToBuffer("%s ", mnem);
-          data += PrintRightOperand(data);
+          data += PrintRightByteOperand(data);
           int32_t imm = *data;
           AppendToBuffer(",0x%x", imm);
           data++;
@@ -1067,9 +1084,15 @@
           int mod, regop, rm;
           data++;
           get_modrm(*data, &mod, &regop, &rm);
-          AppendToBuffer("%s ", is_byte ? "mov_b" : "mov");
-          data += PrintRightOperand(data);
-          AppendToBuffer(",%s", NameOfCPURegister(regop));
+          if (is_byte) {
+            AppendToBuffer("%s ", "mov_b");
+            data += PrintRightByteOperand(data);
+            AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+          } else {
+            AppendToBuffer("%s ", "mov");
+            data += PrintRightOperand(data);
+            AppendToBuffer(",%s", NameOfCPURegister(regop));
+          }
         }
         break;
 
@@ -1181,7 +1204,7 @@
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
-            data += PrintRightOperand(data);
+            data += PrintRightXMMOperand(data);
           } else if (*data == 0x70) {
             data++;
             int mod, regop, rm;
@@ -1224,7 +1247,7 @@
             data++;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
-            data += PrintRightOperand(data);
+            data += PrintRightXMMOperand(data);
             AppendToBuffer(",%s", NameOfXMMRegister(regop));
           } else if (*data == 0x7E) {
             data++;
@@ -1242,12 +1265,16 @@
                            NameOfXMMRegister(rm));
             data++;
           } else if (*data == 0xE7) {
-            AppendToBuffer("movntdq ");
             data++;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
-            data += PrintRightOperand(data);
-            AppendToBuffer(",%s", NameOfXMMRegister(regop));
+            if (mod == 3) {
+              AppendToBuffer("movntdq ");
+              data += PrintRightOperand(data);
+              AppendToBuffer(",%s", NameOfXMMRegister(regop));
+            } else {
+              UnimplementedInstruction();
+            }
           } else if (*data == 0xEF) {
             data++;
             int mod, regop, rm;
@@ -1338,14 +1365,14 @@
             data += 3;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
-            data += PrintRightOperand(data);
+            data += PrintRightXMMOperand(data);
             AppendToBuffer(",%s", NameOfXMMRegister(regop));
           } else if (b2 == 0x10) {
             data += 3;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
-            data += PrintRightOperand(data);
+            data += PrintRightXMMOperand(data);
           } else {
             const char* mnem = "?";
             switch (b2) {
@@ -1361,27 +1388,11 @@
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             if (b2 == 0x2A) {
-              if (mod != 0x3) {
-                AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
-                data += PrintRightOperand(data);
-              } else {
-                AppendToBuffer("%s %s,%s",
-                               mnem,
-                               NameOfXMMRegister(regop),
-                               NameOfCPURegister(rm));
-                data++;
-              }
+              AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+              data += PrintRightOperand(data);
             } else if (b2 == 0x2C) {
-              if (mod != 0x3) {
-                AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
-                data += PrintRightOperand(data);
-              } else {
-                AppendToBuffer("%s %s,%s",
-                               mnem,
-                               NameOfCPURegister(regop),
-                               NameOfXMMRegister(rm));
-                data++;
-              }
+              AppendToBuffer("%s %s,", mnem, NameOfCPURegister(regop));
+              data += PrintRightXMMOperand(data);
             } else if (b2 == 0xC2) {
               // Intel manual 2A, Table 3-18.
               const char* const pseudo_op[] = {
@@ -1400,16 +1411,8 @@
                              NameOfXMMRegister(rm));
               data += 2;
             } else {
-              if (mod != 0x3) {
-                AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
-                data += PrintRightOperand(data);
-              } else {
-                AppendToBuffer("%s %s,%s",
-                               mnem,
-                               NameOfXMMRegister(regop),
-                               NameOfXMMRegister(rm));
-                data++;
-              }
+              AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+              data += PrintRightXMMOperand(data);
             }
           }
         } else {
@@ -1421,27 +1424,28 @@
         if (*(data+1) == 0x0F) {
           if (*(data+2) == 0x2C) {
             data += 3;
-            data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
+            data += PrintRightXMMOperand(data);
           } else  if (*(data+2) == 0x5A) {
             data += 3;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("cvtss2sd %s,%s",
-                           NameOfXMMRegister(regop),
-                           NameOfXMMRegister(rm));
-            data++;
+            AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
+            data += PrintRightXMMOperand(data);
           } else  if (*(data+2) == 0x6F) {
             data += 3;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
-            data += PrintRightOperand(data);
+            data += PrintRightXMMOperand(data);
           } else  if (*(data+2) == 0x7F) {
             AppendToBuffer("movdqu ");
             data += 3;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
-            data += PrintRightOperand(data);
+            data += PrintRightXMMOperand(data);
             AppendToBuffer(",%s", NameOfXMMRegister(regop));
           } else {
             UnimplementedInstruction();
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 174a946..c8f5fdf 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -727,9 +727,9 @@
              prop->key()->AsLiteral()->handle()->IsSmi());
       __ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
 
-      Handle<Code> ic(Builtins::builtin(is_strict()
-          ? Builtins::KeyedStoreIC_Initialize_Strict
-          : Builtins::KeyedStoreIC_Initialize));
+      Handle<Code> ic(Builtins::builtin(
+          is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                           : Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
     }
   }
@@ -1371,8 +1371,8 @@
             __ mov(ecx, Immediate(key->handle()));
             __ mov(edx, Operand(esp, 0));
             Handle<Code> ic(Builtins::builtin(
-                is_strict() ? Builtins::StoreIC_Initialize_Strict
-                            : Builtins::StoreIC_Initialize));
+                is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                                 : Builtins::StoreIC_Initialize));
             EmitCallIC(ic, RelocInfo::CODE_TARGET);
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1543,27 +1543,26 @@
     }
   }
 
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
   if (expr->is_compound()) {
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
       }
     }
 
-    // For property compound assignments we need another deoptimization
-    // point after the property load.
-    if (property != NULL) {
-      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
-    }
-
     Token::Value op = expr->binary_op();
     __ push(eax);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
@@ -1763,8 +1762,8 @@
       __ pop(eax);  // Restore value.
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::StoreIC_Initialize_Strict
-                      : Builtins::StoreIC_Initialize));
+          is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                           : Builtins::StoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
@@ -1786,8 +1785,8 @@
       }
       __ pop(eax);  // Restore value.
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
-                      : Builtins::KeyedStoreIC_Initialize));
+          is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                           : Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
@@ -1812,8 +1811,8 @@
     __ mov(ecx, var->name());
     __ mov(edx, GlobalObjectOperand());
     Handle<Code> ic(Builtins::builtin(
-        is_strict() ? Builtins::StoreIC_Initialize_Strict
-                    : Builtins::StoreIC_Initialize));
+        is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                         : Builtins::StoreIC_Initialize));
     EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
@@ -1915,8 +1914,8 @@
     __ pop(edx);
   }
   Handle<Code> ic(Builtins::builtin(
-      is_strict() ? Builtins::StoreIC_Initialize_Strict
-                  : Builtins::StoreIC_Initialize));
+      is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                       : Builtins::StoreIC_Initialize));
   EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -1955,8 +1954,8 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   Handle<Code> ic(Builtins::builtin(
-      is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
-                  : Builtins::KeyedStoreIC_Initialize));
+      is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                       : Builtins::KeyedStoreIC_Initialize));
   EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -3049,8 +3048,8 @@
   // Fetch the map and check if array is in fast case.
   // Check that object doesn't require security checks and
   // has no indexed interceptor.
-  __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
-  __ j(below, &slow_case);
+  __ CmpObjectType(object, JS_ARRAY_TYPE, temp);
+  __ j(not_equal, &slow_case);
   __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
             KeyedLoadIC::kSlowCaseBitFieldMask);
   __ j(not_zero, &slow_case);
@@ -3748,7 +3747,11 @@
 
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
-  PrepareForBailout(expr->increment(), TOS_REG);
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailout(expr->increment(), TOS_REG);
+  }
 
   // Call ToNumber only if operand is not a smi.
   NearLabel no_conversion;
@@ -3842,8 +3845,8 @@
       __ mov(ecx, prop->key()->AsLiteral()->handle());
       __ pop(edx);
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::StoreIC_Initialize_Strict
-                      : Builtins::StoreIC_Initialize));
+          is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                           : Builtins::StoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3859,8 +3862,8 @@
       __ pop(ecx);
       __ pop(edx);
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
-                      : Builtins::KeyedStoreIC_Initialize));
+          is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                           : Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 269591e..78784b2 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -743,11 +743,6 @@
       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
-    case CodeStub::StringCharAt: {
-      StringCharAtStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
     case CodeStub::NumberToString: {
       NumberToStringStub stub;
       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -898,7 +893,49 @@
   }
 
   if (right->IsConstantOperand()) {
-    __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
+    // Try strength reductions on the multiplication.
+    // All replacement instructions are at most as long as the imul
+    // and have better latency.
+    int constant = ToInteger32(LConstantOperand::cast(right));
+    if (constant == -1) {
+      __ neg(left);
+    } else if (constant == 0) {
+      __ xor_(left, Operand(left));
+    } else if (constant == 2) {
+      __ add(left, Operand(left));
+    } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+      // If we know that the multiplication can't overflow, it's safe to
+      // use instructions that don't set the overflow flag for the
+      // multiplication.
+      switch (constant) {
+        case 1:
+          // Do nothing.
+          break;
+        case 3:
+          __ lea(left, Operand(left, left, times_2, 0));
+          break;
+        case 4:
+          __ shl(left, 2);
+          break;
+        case 5:
+          __ lea(left, Operand(left, left, times_4, 0));
+          break;
+        case 8:
+          __ shl(left, 3);
+          break;
+        case 9:
+          __ lea(left, Operand(left, left, times_8, 0));
+          break;
+       case 16:
+         __ shl(left, 4);
+         break;
+        default:
+          __ imul(left, left, constant);
+          break;
+      }
+    } else {
+      __ imul(left, left, constant);
+    }
   } else {
     __ imul(left, ToOperand(right));
   }
@@ -2026,7 +2063,7 @@
 }
 
 
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
   if (instr->hydrogen()->check_hole_value()) {
@@ -2036,6 +2073,19 @@
 }
 
 
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->context()).is(esi));
+  ASSERT(ToRegister(instr->global_object()).is(eax));
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  __ mov(ecx, instr->name());
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+                                               RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  CallCode(ic, mode, instr);
+}
+
+
 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
@@ -3502,9 +3552,15 @@
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsRegister());
   __ test(ToRegister(input), Immediate(kSmiTagMask));
-  DeoptimizeIf(instr->condition(), instr->environment());
+  DeoptimizeIf(not_zero, instr->environment());
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  LOperand* input = instr->InputAt(0);
+  __ test(ToRegister(input), Immediate(kSmiTagMask));
+  DeoptimizeIf(zero, instr->environment());
 }
 
 
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 3148ae2..5b83e1e 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1683,7 +1683,7 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value, zero));
+  return AssignEnvironment(new LCheckNonSmi(value));
 }
 
 
@@ -1704,7 +1704,7 @@
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value, not_zero));
+  return AssignEnvironment(new LCheckSmi(value));
 }
 
 
@@ -1745,14 +1745,22 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
 
 
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* global_object = UseFixed(instr->global_object(), eax);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(context, global_object);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
   LStoreGlobal* result = new LStoreGlobal(UseRegisterAtStart(instr->value()));
   return instr->check_hole_value() ? AssignEnvironment(result) : result;
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 9a07c6f..c281d64 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -70,6 +70,7 @@
   V(CheckFunction)                              \
   V(CheckInstanceType)                          \
   V(CheckMap)                                   \
+  V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
   V(ClassOfTest)                                \
@@ -120,7 +121,8 @@
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
   V(LoadFunctionPrototype)                      \
-  V(LoadGlobal)                                 \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadNamedField)                             \
@@ -1270,10 +1272,27 @@
 };
 
 
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
 };
 
 
@@ -1792,20 +1811,21 @@
 
 class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
  public:
-  LCheckSmi(LOperand* value, Condition condition)
-      : condition_(condition) {
+  explicit LCheckSmi(LOperand* value) {
     inputs_[0] = value;
   }
 
-  Condition condition() const { return condition_; }
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
 
-  virtual void CompileToNative(LCodeGen* generator);
-  virtual const char* Mnemonic() const {
-    return (condition_ == zero) ? "check-non-smi" : "check-smi";
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
   }
 
- private:
-  Condition condition_;
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
 };
 
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 2ba95c4..d5c3f53 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -52,7 +52,7 @@
 void MacroAssembler::RecordWriteHelper(Register object,
                                        Register addr,
                                        Register scratch) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // Check that the object is not in new space.
     Label not_in_new_space;
     InNewSpace(object, scratch, not_equal, &not_in_new_space);
@@ -113,7 +113,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     mov(object, Immediate(BitCast<int32_t>(kZapValue)));
     mov(value, Immediate(BitCast<int32_t>(kZapValue)));
     mov(scratch, Immediate(BitCast<int32_t>(kZapValue)));
@@ -141,7 +141,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     mov(object, Immediate(BitCast<int32_t>(kZapValue)));
     mov(address, Immediate(BitCast<int32_t>(kZapValue)));
     mov(value, Immediate(BitCast<int32_t>(kZapValue)));
@@ -285,7 +285,7 @@
   push(esi);
   push(Immediate(Smi::FromInt(type)));
   push(Immediate(CodeObject()));
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     cmp(Operand(esp, 0), Immediate(Factory::undefined_value()));
     Check(not_equal, "code object not properly patched");
   }
@@ -293,7 +293,7 @@
 
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
         Immediate(Smi::FromInt(type)));
     Check(equal, "stack frame types must match");
@@ -550,7 +550,7 @@
   mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
 
   // When generating debug code, make sure the lexical context is set.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     cmp(Operand(scratch), Immediate(0));
     Check(not_equal, "we should not have an empty lexical context");
   }
@@ -560,7 +560,7 @@
   mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     push(scratch);
     // Read the first word and compare to global_context_map.
     mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
@@ -584,7 +584,7 @@
   mov(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     cmp(holder_reg, Factory::null_value());
     Check(not_equal, "JSGlobalProxy::context() should not be null.");
 
@@ -637,7 +637,7 @@
 
 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
                                                Register scratch) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     test(result_end, Immediate(kObjectAlignmentMask));
     Check(zero, "Unaligned allocation in new space");
   }
@@ -661,7 +661,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       mov(result, Immediate(0x7091));
       if (result_end.is_valid()) {
@@ -718,7 +718,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       mov(result, Immediate(0x7091));
       mov(result_end, Immediate(0x7191));
@@ -764,7 +764,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       mov(result, Immediate(0x7091));
       mov(result_end, Immediate(0x7191));
@@ -1320,7 +1320,7 @@
     // pointer to out cell.
     lea(scratch, Operand(esp, (argc + 1) * kPointerSize));
     mov(Operand(esp, 0 * kPointerSize), scratch);  // output.
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       mov(Operand(esp, (argc + 1) * kPointerSize), Immediate(0));  // out cell.
     }
   }
@@ -1621,7 +1621,7 @@
   // (i.e., the static scope chain and runtime context chain do not agree).
   // A variable occurring in such a scope should have slot type LOOKUP and
   // not CONTEXT.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     cmp(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
     Check(equal, "Yo dawg, I heard you liked function contexts "
                  "so I put function contexts in all your contexts");
@@ -1643,7 +1643,7 @@
                                                   Register map) {
   // Load the initial map.  The global functions all have initial maps.
   mov(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     Label ok, fail;
     CheckMap(map, Factory::meta_map(), &fail, false);
     jmp(&ok);
@@ -1787,12 +1787,12 @@
 
 
 void MacroAssembler::Assert(Condition cc, const char* msg) {
-  if (FLAG_debug_code) Check(cc, msg);
+  if (emit_debug_code()) Check(cc, msg);
 }
 
 
 void MacroAssembler::AssertFastElements(Register elements) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     Label ok;
     cmp(FieldOperand(elements, HeapObject::kMapOffset),
         Immediate(Factory::fixed_array_map()));
@@ -1860,7 +1860,7 @@
 void MacroAssembler::JumpIfNotNumber(Register reg,
                                      TypeInfo info,
                                      Label* on_not_number) {
-  if (FLAG_debug_code) AbortIfSmi(reg);
+  if (emit_debug_code()) AbortIfSmi(reg);
   if (!info.IsNumber()) {
     cmp(FieldOperand(reg, HeapObject::kMapOffset),
         Factory::heap_number_map());
@@ -1874,7 +1874,7 @@
                                     Register scratch,
                                     TypeInfo info,
                                     Label* on_not_int32) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     AbortIfSmi(source);
     AbortIfNotNumber(source);
   }
@@ -1994,7 +1994,7 @@
 void MacroAssembler::CallCFunction(Register function,
                                    int num_arguments) {
   // Check stack alignment.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     CheckStackAlignment();
   }
 
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 9df9af1..b91f7bc 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1190,14 +1190,41 @@
         MarkObject(HeapObject::cast(*objects[j]));
       }
     }
+
     // Once the entire group has been colored gray, set the object group
     // to NULL so it won't be processed again.
-    delete object_groups->at(i);
+    delete entry;
     object_groups->at(i) = NULL;
   }
 }
 
 
+void MarkCompactCollector::MarkImplicitRefGroups() {
+  List<ImplicitRefGroup*>* ref_groups = GlobalHandles::ImplicitRefGroups();
+
+  for (int i = 0; i < ref_groups->length(); i++) {
+    ImplicitRefGroup* entry = ref_groups->at(i);
+    if (entry == NULL) continue;
+
+    if (!entry->parent_->IsMarked()) continue;
+
+    List<Object**>& children = entry->children_;
+    // A parent object is marked, so mark as gray all child white heap
+    // objects.
+    for (int j = 0; j < children.length(); ++j) {
+      if ((*children[j])->IsHeapObject()) {
+        MarkObject(HeapObject::cast(*children[j]));
+      }
+    }
+
+    // Once the entire group has been colored gray, set the  group
+    // to NULL so it won't be processed again.
+    delete entry;
+    ref_groups->at(i) = NULL;
+  }
+}
+
+
 // Mark all objects reachable from the objects on the marking stack.
 // Before: the marking stack contains zero or more heap object pointers.
 // After: the marking stack is empty, and all objects reachable from the
@@ -1276,11 +1303,12 @@
 }
 
 
-void MarkCompactCollector::ProcessObjectGroups() {
+void MarkCompactCollector::ProcessExternalMarking() {
   bool work_to_do = true;
   ASSERT(marking_stack.is_empty());
   while (work_to_do) {
     MarkObjectGroups();
+    MarkImplicitRefGroups();
     work_to_do = !marking_stack.is_empty();
     ProcessMarkingStack();
   }
@@ -1311,10 +1339,9 @@
   MarkRoots(&root_visitor);
 
   // The objects reachable from the roots are marked, yet unreachable
-  // objects are unmarked.  Mark objects reachable from object groups
-  // containing at least one marked object, and continue until no new
-  // objects are reachable from the object groups.
-  ProcessObjectGroups();
+  // objects are unmarked.  Mark objects reachable due to host
+  // application specific logic.
+  ProcessExternalMarking();
 
   // The objects reachable from the roots or object groups are marked,
   // yet unreachable objects are unmarked.  Mark objects reachable
@@ -1330,9 +1357,9 @@
     EmptyMarkingStack();
   }
 
-  // Repeat the object groups to mark unmarked groups reachable from the
-  // weak roots.
-  ProcessObjectGroups();
+  // Repeat host application specific marking to mark unmarked objects
+  // reachable from the weak roots.
+  ProcessExternalMarking();
 
   // Prune the symbol table removing all symbols only pointed to by the
   // symbol table.  Cannot use symbol_table() here because the symbol
@@ -1350,6 +1377,7 @@
 
   // Remove object groups after marking phase.
   GlobalHandles::RemoveObjectGroups();
+  GlobalHandles::RemoveImplicitRefGroups();
 
   // Flush code from collected candidates.
   FlushCode::ProcessCandidates();
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 1b7e600..9cda31e 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -220,10 +220,13 @@
   // group marked.
   static void MarkObjectGroups();
 
-  // Mark all objects in an object group with at least one marked
-  // object, then all objects reachable from marked objects in object
-  // groups, and repeat.
-  static void ProcessObjectGroups();
+  // Mark objects in implicit references groups if their parent object
+  // is marked.
+  static void MarkImplicitRefGroups();
+
+  // Mark all objects which are reachable due to host application
+  // logic like object groups or implicit references' groups.
+  static void ProcessExternalMarking();
 
   // Mark objects reachable (transitively) from objects in the marking stack
   // or overflowed in the heap.
diff --git a/src/parser.cc b/src/parser.cc
index 0725a0a..98dfa2b 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -5133,7 +5133,7 @@
     result = parser.ParseLazy(info);
   } else {
     bool allow_natives_syntax =
-        FLAG_allow_natives_syntax || Bootstrapper::IsActive();
+        info->allows_natives_syntax() || FLAG_allow_natives_syntax;
     ScriptDataImpl* pre_data = info->pre_parse_data();
     Parser parser(script, allow_natives_syntax, info->extension(), pre_data);
     if (pre_data != NULL && pre_data->has_error()) {
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 21763b5..c2c81dc 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -42,6 +42,7 @@
 #include <sys/stat.h>   // open
 #include <sys/fcntl.h>  // open
 #include <unistd.h>     // getpagesize
+// If you don't have execinfo.h then you need devel/libexecinfo from ports.
 #include <execinfo.h>   // backtrace, backtrace_symbols
 #include <strings.h>    // index
 #include <errno.h>
@@ -526,6 +527,16 @@
     return result;
   }
 
+  virtual bool TryLock() {
+    int result = pthread_mutex_trylock(&mutex_);
+    // Return false if the lock is busy and locking failed.
+    if (result == EBUSY) {
+      return false;
+    }
+    ASSERT(result == 0);  // Verify no other errors.
+    return true;
+  }
+
  private:
   pthread_mutex_t mutex_;   // Pthread mutex for POSIX platforms.
 };
@@ -595,60 +606,124 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 static Sampler* active_sampler_ = NULL;
+static pthread_t vm_tid_ = NULL;
 
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
-  USE(info);
-  if (signal != SIGPROF) return;
-  if (active_sampler_ == NULL) return;
 
-  TickSample sample;
-
-  // We always sample the VM state.
-  sample.state = VMState::current_state();
-
-  // If profiling, we extract the current pc and sp.
-  if (active_sampler_->IsProfiling()) {
-    // Extracting the sample from the context is extremely machine dependent.
-    ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-    mcontext_t& mcontext = ucontext->uc_mcontext;
-#if V8_HOST_ARCH_IA32
-    sample.pc = reinterpret_cast<Address>(mcontext.mc_eip);
-    sample.sp = reinterpret_cast<Address>(mcontext.mc_esp);
-    sample.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
-#elif V8_HOST_ARCH_X64
-    sample.pc = reinterpret_cast<Address>(mcontext.mc_rip);
-    sample.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
-    sample.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
-#elif V8_HOST_ARCH_ARM
-    sample.pc = reinterpret_cast<Address>(mcontext.mc_r15);
-    sample.sp = reinterpret_cast<Address>(mcontext.mc_r13);
-    sample.fp = reinterpret_cast<Address>(mcontext.mc_r11);
-#endif
-    active_sampler_->SampleStack(&sample);
-  }
-
-  active_sampler_->Tick(&sample);
+static pthread_t GetThreadID() {
+  pthread_t thread_id = pthread_self();
+  return thread_id;
 }
 
 
 class Sampler::PlatformData : public Malloced {
  public:
-  PlatformData() {
-    signal_handler_installed_ = false;
+  enum SleepInterval {
+    FULL_INTERVAL,
+    HALF_INTERVAL
+  };
+
+  explicit PlatformData(Sampler* sampler)
+      : sampler_(sampler),
+        signal_handler_installed_(false),
+        signal_sender_launched_(false) {
   }
 
+  void SignalSender() {
+    while (sampler_->IsActive()) {
+      if (rate_limiter_.SuspendIfNecessary()) continue;
+      if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
+        Sleep(FULL_INTERVAL);
+        RuntimeProfiler::NotifyTick();
+      } else {
+        if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+        Sleep(FULL_INTERVAL);
+      }
+    }
+  }
+
+  void Sleep(SleepInterval full_or_half) {
+    // Convert ms to us and subtract 100 us to compensate delays
+    // occuring during signal delivery.
+    useconds_t interval = sampler_->interval_ * 1000 - 100;
+    if (full_or_half == HALF_INTERVAL) interval /= 2;
+    int result = usleep(interval);
+#ifdef DEBUG
+    if (result != 0 && errno != EINTR) {
+      fprintf(stderr,
+              "SignalSender usleep error; interval = %u, errno = %d\n",
+              interval,
+              errno);
+      ASSERT(result == 0 || errno == EINTR);
+    }
+#endif
+    USE(result);
+  }
+
+  Sampler* sampler_;
   bool signal_handler_installed_;
   struct sigaction old_signal_handler_;
   struct itimerval old_timer_value_;
+  bool signal_sender_launched_;
+  pthread_t signal_sender_thread_;
+  RuntimeProfilerRateLimiter rate_limiter_;
 };
 
 
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+  USE(info);
+  if (signal != SIGPROF) return;
+  if (active_sampler_ == NULL) return;
+  if (!active_sampler_->IsActive()) {
+    // Restore old signal handler
+    Sampler::PlatformData* data = active_sampler_->data();
+    if (data->signal_handler_installed_) {
+      sigaction(SIGPROF, &data->old_signal_handler_, 0);
+      data->signal_handler_installed_ = false;
+    }
+    return;
+  }
+
+  if (vm_tid_ != GetThreadID()) return;
+
+  TickSample sample_obj;
+  TickSample* sample = CpuProfiler::TickSampleEvent();
+  if (sample == NULL) sample = &sample_obj;
+
+  // Extracting the sample from the context is extremely machine dependent.
+  ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+  mcontext_t& mcontext = ucontext->uc_mcontext;
+#if V8_HOST_ARCH_IA32
+  sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
+  sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
+  sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
+#elif V8_HOST_ARCH_X64
+  sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
+  sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
+  sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
+#elif V8_HOST_ARCH_ARM
+  sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
+  sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
+  sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
+#endif
+  active_sampler_->SampleStack(sample);
+  active_sampler_->Tick(sample);
+}
+
+
+static void* SenderEntry(void* arg) {
+  Sampler::PlatformData* data =
+      reinterpret_cast<Sampler::PlatformData*>(arg);
+  data->SignalSender();
+  return 0;
+}
+
+
 Sampler::Sampler(int interval)
     : interval_(interval),
       profiling_(false),
       active_(false),
       samples_taken_(0) {
-  data_ = new PlatformData();
+  data_ = new PlatformData(this);
 }
 
 
@@ -660,7 +735,8 @@
 void Sampler::Start() {
   // There can only be one active sampler at the time on POSIX
   // platforms.
-  if (active_sampler_ != NULL) return;
+  ASSERT(!IsActive());
+  vm_tid_ = GetThreadID();
 
   // Request profiling signals.
   struct sigaction sa;
@@ -680,21 +756,29 @@
 
   // Set this sampler as the active sampler.
   active_sampler_ = this;
-  active_ = true;
+  SetActive(true);
+
+  // There's no way to send a signal to a thread on FreeBSD, but we can
+  // start a thread that uses the stack guard to interrupt the JS thread.
+  if (pthread_create(
+          &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
+    data_->signal_sender_launched_ = true;
+  }
 }
 
 
 void Sampler::Stop() {
-  // Restore old signal handler
-  if (data_->signal_handler_installed_) {
-    setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
-    sigaction(SIGPROF, &data_->old_signal_handler_, 0);
-    data_->signal_handler_installed_ = false;
-  }
-
   // This sampler is no longer the active sampler.
   active_sampler_ = NULL;
-  active_ = false;
+  SetActive(false);
+
+  // Wait for signal sender termination (it will exit after setting
+  // active_ to false).
+  if (data_->signal_sender_launched_) {
+    Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
+    pthread_join(data_->signal_sender_thread_, NULL);
+    data_->signal_sender_launched_ = false;
+  }
 }
 
 #endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 256dc75..9118818 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -139,7 +139,7 @@
 
 
 void OS::VPrint(const char* format, va_list args) {
-#if defined(ANDROID)
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
   LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
 #else
   vprintf(format, args);
@@ -156,7 +156,7 @@
 
 
 void OS::VFPrint(FILE* out, const char* format, va_list args) {
-#if defined(ANDROID)
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
   LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
 #else
   vfprintf(out, format, args);
@@ -173,7 +173,7 @@
 
 
 void OS::VPrintError(const char* format, va_list args) {
-#if defined(ANDROID)
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
   LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
 #else
   vfprintf(stderr, format, args);
diff --git a/src/platform.h b/src/platform.h
index 88825e6..e2f50a6 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -613,6 +613,7 @@
   void ResetSamplesTaken() { samples_taken_ = 0; }
 
   class PlatformData;
+  PlatformData* data() { return data_; }
 
  protected:
   virtual void DoSampleStack(TickSample* sample) = 0;
diff --git a/src/runtime.cc b/src/runtime.cc
index 048551b..9349dc5 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -7929,14 +7929,14 @@
 
 static ObjectPair CompileGlobalEval(Handle<String> source,
                                     Handle<Object> receiver,
-                                    StrictModeFlag mode) {
+                                    StrictModeFlag strict_mode) {
   // Deal with a normal eval call with a string argument. Compile it
   // and return the compiled function bound in the local context.
   Handle<SharedFunctionInfo> shared = Compiler::CompileEval(
       source,
       Handle<Context>(Top::context()),
       Top::context()->IsGlobalContext(),
-      mode);
+      strict_mode);
   if (shared.is_null()) return MakePair(Failure::Exception(), NULL);
   Handle<JSFunction> compiled = Factory::NewFunctionFromSharedFunctionInfo(
       shared,
diff --git a/src/serialize.cc b/src/serialize.cc
index 3d74ddb..16d2759 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -722,6 +722,11 @@
     LOG(SnapshotPositionEvent(address, source_->position()));
   }
   ReadChunk(current, limit, space_number, address);
+#ifdef DEBUG
+  bool is_codespace = (space == Heap::code_space()) ||
+      ((space == Heap::lo_space()) && (space_number == kLargeCode));
+  ASSERT(HeapObject::FromAddress(address)->IsCode() == is_codespace);
+#endif
 }
 
 
@@ -877,7 +882,7 @@
   CASE_STATEMENT(where, how, within, CODE_SPACE)                               \
   CASE_BODY(where, how, within, CODE_SPACE, kUnknownOffsetFromStart)           \
   CASE_STATEMENT(where, how, within, kLargeCode)                               \
-  CASE_BODY(where, how, within, LO_SPACE, kUnknownOffsetFromStart)
+  CASE_BODY(where, how, within, kLargeCode, kUnknownOffsetFromStart)
 
 #define EMIT_COMMON_REFERENCE_PATTERNS(pseudo_space_number,                    \
                                        space_number,                           \
diff --git a/src/v8natives.js b/src/v8natives.js
index 52ff8d2..4fcf0ac 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -251,7 +251,11 @@
   if (!IS_FUNCTION(fun)) {
     throw new $TypeError('Object.prototype.__defineGetter__: Expecting function');
   }
-  return %DefineAccessor(ToObject(this), ToString(name), GETTER, fun);
+  var desc = new PropertyDescriptor();
+  desc.setGet(fun);
+  desc.setEnumerable(true);
+  desc.setConfigurable(true);
+  DefineOwnProperty(ToObject(this), ToString(name), desc, false);
 }
 
 
@@ -271,7 +275,11 @@
     throw new $TypeError(
         'Object.prototype.__defineSetter__: Expecting function');
   }
-  return %DefineAccessor(ToObject(this), ToString(name), SETTER, fun);
+  var desc = new PropertyDescriptor();
+  desc.setSet(fun);
+  desc.setEnumerable(true);
+  desc.setConfigurable(true);
+  DefineOwnProperty(ToObject(this), ToString(name), desc, false);
 }
 
 
@@ -394,6 +402,10 @@
   this.hasSetter_ = false;
 }
 
+PropertyDescriptor.prototype.__proto__ = null;
+PropertyDescriptor.prototype.toString = function() {
+  return "[object PropertyDescriptor]";
+};
 
 PropertyDescriptor.prototype.setValue = function(value) {
   this.value_ = value;
@@ -561,8 +573,13 @@
 
   // Error handling according to spec.
   // Step 3
-  if (IS_UNDEFINED(current) && !extensible)
-    throw MakeTypeError("define_disallowed", ["defineProperty"]);
+  if (IS_UNDEFINED(current) && !extensible) {
+    if (should_throw) {
+      throw MakeTypeError("define_disallowed", ["defineProperty"]);
+    } else {
+      return;
+    }
+  }
 
   if (!IS_UNDEFINED(current)) {
     // Step 5 and 6
@@ -587,31 +604,55 @@
       if (desc.isConfigurable() ||
           (desc.hasEnumerable() &&
            desc.isEnumerable() != current.isEnumerable())) {
-        throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+        if (should_throw) {
+          throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+        } else {
+          return;
+        }
       }
       // Step 8
       if (!IsGenericDescriptor(desc)) {
         // Step 9a
         if (IsDataDescriptor(current) != IsDataDescriptor(desc)) {
-          throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+          if (should_throw) {
+            throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+          } else {
+            return;
+          }
         }
         // Step 10a
         if (IsDataDescriptor(current) && IsDataDescriptor(desc)) {
           if (!current.isWritable() && desc.isWritable()) {
-            throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+            if (should_throw) {
+              throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+            } else {
+              return;
+            }
           }
           if (!current.isWritable() && desc.hasValue() &&
               !SameValue(desc.getValue(), current.getValue())) {
-            throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+            if (should_throw) {
+              throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+            } else {
+              return;
+            }
           }
         }
         // Step 11
         if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
           if (desc.hasSetter() && !SameValue(desc.getSet(), current.getSet())) {
-            throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+            if (should_throw) {
+              throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+            } else {
+              return;
+            }
           }
           if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
-            throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+            if (should_throw) {
+              throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
+            } else {
+              return;
+            }
           }
         }
       }
diff --git a/src/version.cc b/src/version.cc
index 45823ba..1529f64 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      2
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      3
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index f879ae7..7eaac91 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -338,7 +338,9 @@
 byte* Assembler::spare_buffer_ = NULL;
 
 Assembler::Assembler(void* buffer, int buffer_size)
-    : code_targets_(100), positions_recorder_(this) {
+    : code_targets_(100),
+      positions_recorder_(this),
+      emit_debug_code_(FLAG_debug_code) {
   if (buffer == NULL) {
     // Do our own buffer management.
     if (buffer_size <= kMinimalBufferSize) {
@@ -3114,7 +3116,7 @@
       Serializer::TooLateToEnableNow();
     }
 #endif
-    if (!Serializer::enabled() && !FLAG_debug_code) {
+    if (!Serializer::enabled() && !emit_debug_code()) {
       return;
     }
   }
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 0b7bdc0..9fcb95c 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -511,6 +511,9 @@
   Assembler(void* buffer, int buffer_size);
   ~Assembler();
 
+  // Overrides the default provided by FLAG_debug_code.
+  void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
   // GetCode emits any pending (non-emitted) code and fills the descriptor
   // desc. GetCode() is idempotent; it returns the same result if no other
   // Assembler functions are invoked in between GetCode() calls.
@@ -656,7 +659,7 @@
 
   // Move sign extended immediate to memory location.
   void movq(const Operand& dst, Immediate value);
-  // New x64 instructions to load a 64-bit immediate into a register.
+  // Instructions to load a 64-bit immediate into a register.
   // All 64-bit immediates must have a relocation mode.
   void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
   void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
@@ -681,7 +684,7 @@
   void repmovsl();
   void repmovsq();
 
-  // New x64 instruction to load from an immediate 64-bit pointer into RAX.
+  // Instruction to load from an immediate 64-bit pointer into RAX.
   void load_rax(void* ptr, RelocInfo::Mode rmode);
   void load_rax(ExternalReference ext);
 
@@ -1350,6 +1353,9 @@
   static const int kMaximalBufferSize = 512*MB;
   static const int kMinimalBufferSize = 4*KB;
 
+ protected:
+  bool emit_debug_code() const { return emit_debug_code_; }
+
  private:
   byte* addr_at(int pos)  { return buffer_ + pos; }
   byte byte_at(int pos)  { return buffer_[pos]; }
@@ -1555,6 +1561,9 @@
   byte* last_pc_;
 
   PositionsRecorder positions_recorder_;
+
+  bool emit_debug_code_;
+
   friend class PositionsRecorder;
 };
 
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index b11072c..ee5237e 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -2244,11 +2244,14 @@
   Label slow;
   __ JumpIfNotSmi(rdx, &slow);
 
-  // Check if the calling frame is an arguments adaptor frame.
+  // Check if the calling frame is an arguments adaptor frame.  We look at the
+  // context offset, and if the frame is not a regular one, then we find a
+  // Smi instead of the context.  We can't use SmiCompare here, because that
+  // only works for comparing two smis.
   Label adaptor;
   __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
-                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &adaptor);
 
   // Check index against formal parameters count limit passed in
@@ -2303,8 +2306,8 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor_frame, try_allocate, runtime;
   __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
-                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &adaptor_frame);
 
   // Get the length from the frame.
@@ -4157,8 +4160,8 @@
   // Look at the length of the result of adding the two strings.
   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
   __ SmiAdd(rbx, rbx, rcx);
-  // Use the runtime system when adding two one character strings, as it
-  // contains optimizations for this specific case using the symbol table.
+  // Use the symbol table when adding two one character strings, as it
+  // helps later optimizations to return a symbol here.
   __ SmiCompare(rbx, Smi::FromInt(2));
   __ j(not_equal, &longer_than_two);
 
@@ -4510,15 +4513,14 @@
                     FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
   __ decl(mask);
 
-  Register undefined = scratch4;
-  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+  Register map = scratch4;
 
   // Registers
   // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
   // hash:         hash of two character string (32-bit int)
   // symbol_table: symbol table
   // mask:         capacity mask (32-bit int)
-  // undefined:    undefined value
+  // map:          -
   // scratch:      -
 
   // Perform a number of probes in the symbol table.
@@ -4533,7 +4535,7 @@
     }
     __ andl(scratch, mask);
 
-    // Load the entry from the symble table.
+    // Load the entry from the symbol table.
     Register candidate = scratch;  // Scratch register contains candidate.
     STATIC_ASSERT(SymbolTable::kEntrySize == 1);
     __ movq(candidate,
@@ -4543,8 +4545,16 @@
                          SymbolTable::kElementsStartOffset));
 
     // If entry is undefined no string with this hash can be found.
-    __ cmpq(candidate, undefined);
+    NearLabel is_string;
+    __ CmpObjectType(candidate, ODDBALL_TYPE, map);
+    __ j(not_equal, &is_string);
+
+    __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
     __ j(equal, not_found);
+    // Must be null (deleted entry).
+    __ jmp(&next_probe[i]);
+
+    __ bind(&is_string);
 
     // If length is not 2 the string is not a candidate.
     __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
@@ -4556,8 +4566,7 @@
     Register temp = kScratchRegister;
 
     // Check that the candidate is a non-external ascii string.
-    __ movq(temp, FieldOperand(candidate, HeapObject::kMapOffset));
-    __ movzxbl(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+    __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
     __ JumpIfInstanceTypeIsNotSequentialAscii(
         temp, temp, &next_probe[i]);
 
@@ -4909,60 +4918,6 @@
 }
 
 
-void StringCharAtStub::Generate(MacroAssembler* masm) {
-  // Expects two arguments (object, index) on the stack:
-
-  // Stack frame on entry.
-  //  rsp[0]: return address
-  //  rsp[8]: index
-  //  rsp[16]: object
-
-  Register object = rbx;
-  Register index = rax;
-  Register scratch1 = rcx;
-  Register scratch2 = rdx;
-  Register result = rax;
-
-  __ pop(scratch1);  // Return address.
-  __ pop(index);
-  __ pop(object);
-  __ push(scratch1);
-
-  Label need_conversion;
-  Label index_out_of_range;
-  Label done;
-  StringCharAtGenerator generator(object,
-                                  index,
-                                  scratch1,
-                                  scratch2,
-                                  result,
-                                  &need_conversion,
-                                  &need_conversion,
-                                  &index_out_of_range,
-                                  STRING_INDEX_IS_NUMBER);
-  generator.GenerateFast(masm);
-  __ jmp(&done);
-
-  __ bind(&index_out_of_range);
-  // When the index is out of range, the spec requires us to return
-  // the empty string.
-  __ LoadRoot(result, Heap::kEmptyStringRootIndex);
-  __ jmp(&done);
-
-  __ bind(&need_conversion);
-  // Move smi zero into the result register, which will trigger
-  // conversion.
-  __ Move(result, Smi::FromInt(0));
-  __ jmp(&done);
-
-  StubRuntimeCallHelper call_helper;
-  generator.GenerateSlow(masm, call_helper);
-
-  __ bind(&done);
-  __ ret(0);
-}
-
-
 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
   ASSERT(state_ == CompareIC::SMIS);
   NearLabel miss;
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 3c054cd..21c90a1 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -766,7 +766,7 @@
       __ AbortIfNotNumber(value.reg());
     }
     // Smi => false iff zero.
-    __ SmiCompare(value.reg(), Smi::FromInt(0));
+    __ Cmp(value.reg(), Smi::FromInt(0));
     if (value.is_smi()) {
       value.Unuse();
       dest->Split(not_zero);
@@ -794,7 +794,7 @@
     dest->false_target()->Branch(equal);
 
     // Smi => false iff zero.
-    __ SmiCompare(value.reg(), Smi::FromInt(0));
+    __ Cmp(value.reg(), Smi::FromInt(0));
     dest->false_target()->Branch(equal);
     Condition is_smi = masm_->CheckSmi(value.reg());
     dest->true_target()->Branch(is_smi);
@@ -1036,7 +1036,7 @@
                                         true, overwrite_mode);
   } else {
     // Set the flags based on the operation, type and loop nesting level.
-    // Bit operations always assume they likely operate on Smis. Still only
+    // Bit operations always assume they likely operate on smis. Still only
     // generate the inline Smi check code if this operation is part of a loop.
     // For all other operations only inline the Smi check code for likely smis
     // if the operation is part of a loop.
@@ -2108,7 +2108,7 @@
       if (cc == equal) {
         Label comparison_done;
         __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
-                Smi::FromInt(1));
+                      Smi::FromInt(1));
         __ j(not_equal, &comparison_done);
         uint8_t char_value =
             static_cast<uint8_t>(String::cast(*right_val)->Get(0));
@@ -2294,7 +2294,7 @@
       // CompareStub and the inline code both support all values of cc.
     }
     // Implement comparison against a constant Smi, inlining the case
-    // where both sides are Smis.
+    // where both sides are smis.
     left_side->ToRegister();
     Register left_reg = left_side->reg();
     Smi* constant_smi = Smi::cast(*right_side->handle());
@@ -2304,7 +2304,6 @@
         __ AbortIfNotSmi(left_reg);
       }
       // Test smi equality and comparison by signed int comparison.
-      // Both sides are smis, so we can use an Immediate.
       __ SmiCompare(left_reg, constant_smi);
       left_side->Unuse();
       right_side->Unuse();
@@ -2314,7 +2313,7 @@
       JumpTarget is_smi;
       if (cc == equal) {
         // We can do the equality comparison before the smi check.
-        __ SmiCompare(left_reg, constant_smi);
+        __ Cmp(left_reg, constant_smi);
         dest->true_target()->Branch(equal);
         Condition left_is_smi = masm_->CheckSmi(left_reg);
         dest->false_target()->Branch(left_is_smi);
@@ -2575,8 +2574,8 @@
       // adaptor frame below it.
       Label invoke, adapted;
       __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-      __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
-                    Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+      __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
+             Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
       __ j(equal, &adapted);
 
       // No arguments adaptor frame. Copy fixed number of arguments.
@@ -3857,7 +3856,7 @@
   __ movq(rbx, rax);
 
   // If the property has been removed while iterating, we just skip it.
-  __ SmiCompare(rbx, Smi::FromInt(0));
+  __ Cmp(rbx, Smi::FromInt(0));
   node->continue_target()->Branch(equal);
 
   end_del_check.Bind();
@@ -6198,15 +6197,15 @@
 
   // Skip the arguments adaptor frame if it exists.
   Label check_frame_marker;
-  __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &check_frame_marker);
   __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
 
   // Check the marker in the calling frame.
   __ bind(&check_frame_marker);
-  __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
-                Smi::FromInt(StackFrame::CONSTRUCT));
+  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+         Smi::FromInt(StackFrame::CONSTRUCT));
   fp.Unuse();
   destination()->Split(equal);
 }
@@ -6226,8 +6225,8 @@
 
   // Check if the calling frame is an arguments adaptor frame.
   __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &exit);
 
   // Arguments adaptor case: Read the arguments length from the
@@ -6783,8 +6782,8 @@
   // Fetch the map and check if array is in fast case.
   // Check that object doesn't require security checks and
   // has no indexed interceptor.
-  __ CmpObjectType(object.reg(), FIRST_JS_OBJECT_TYPE, tmp1.reg());
-  deferred->Branch(below);
+  __ CmpObjectType(object.reg(), JS_ARRAY_TYPE, tmp1.reg());
+  deferred->Branch(not_equal);
   __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset),
            Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
   deferred->Branch(not_zero);
@@ -6826,7 +6825,7 @@
 
   Label done;
   __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done);
-  // Possible optimization: do a check that both values are Smis
+  // Possible optimization: do a check that both values are smis
   // (or them and test against Smi mask.)
 
   __ movq(tmp2.reg(), tmp1.reg());
@@ -8516,12 +8515,6 @@
     __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
     deferred->Branch(not_equal);
 
-    // Check that the key is within bounds.  Both the key and the length of
-    // the JSArray are smis. Use unsigned comparison to handle negative keys.
-    __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
-                  key.reg());
-    deferred->Branch(below_equal);
-
     // Get the elements array from the receiver and check that it is not a
     // dictionary.
     __ movq(tmp.reg(),
@@ -8550,6 +8543,14 @@
             kScratchRegister);
     deferred->Branch(not_equal);
 
+    // Check that the key is within bounds.  Both the key and the length of
+    // the JSArray are smis (because the fixed array check above ensures the
+    // elements are in fast case). Use unsigned comparison to handle negative
+    // keys.
+    __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
+                  key.reg());
+    deferred->Branch(below_equal);
+
     // Store the value.
     SmiIndex index =
         masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 21a100f..80dbfe1 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -451,9 +451,11 @@
 
 int DisassemblerX64::PrintRightOperandHelper(
     byte* modrmp,
-    RegisterNameMapping register_name) {
+    RegisterNameMapping direct_register_name) {
   int mod, regop, rm;
   get_modrm(*modrmp, &mod, &regop, &rm);
+  RegisterNameMapping register_name = (mod == 3) ? direct_register_name :
+      &DisassemblerX64::NameOfCPURegister;
   switch (mod) {
     case 0:
       if ((rm & 7) == 5) {
@@ -1028,7 +1030,7 @@
       } else if (opcode == 0x6F) {
         AppendToBuffer("movdqa %s,",
                        NameOfXMMRegister(regop));
-        current += PrintRightOperand(current);
+        current += PrintRightXMMOperand(current);
       } else if (opcode == 0x7E) {
         AppendToBuffer("mov%c ",
                        rex_w() ? 'q' : 'd');
@@ -1036,7 +1038,7 @@
         AppendToBuffer(", %s", NameOfXMMRegister(regop));
       } else if (opcode == 0x7F) {
         AppendToBuffer("movdqa ");
-        current += PrintRightOperand(current);
+        current += PrintRightXMMOperand(current);
         AppendToBuffer(", %s", NameOfXMMRegister(regop));
       } else {
         const char* mnemonic = "?";
@@ -1068,11 +1070,11 @@
       int mod, regop, rm;
       get_modrm(*current, &mod, &regop, &rm);
       if (opcode == 0x11) {
-        current += PrintRightOperand(current);
+        current += PrintRightXMMOperand(current);
         AppendToBuffer(",%s", NameOfXMMRegister(regop));
       } else {
         AppendToBuffer("%s,", NameOfXMMRegister(regop));
-        current += PrintRightOperand(current);
+        current += PrintRightXMMOperand(current);
       }
     } else if (opcode == 0x2A) {
       // CVTSI2SD: integer to XMM double conversion.
@@ -1435,19 +1437,26 @@
       {
         bool is_byte = *data == 0xC6;
         data++;
-
-        AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code());
-        data += PrintRightOperand(data);
-        int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
-        AppendToBuffer(",0x%x", imm);
-        data += is_byte ? 1 : 4;
+        if (is_byte) {
+          AppendToBuffer("movb ");
+          data += PrintRightByteOperand(data);
+          int32_t imm = *data;
+          AppendToBuffer(",0x%x", imm);
+          data++;
+        } else {
+          AppendToBuffer("mov%c ", operand_size_code());
+          data += PrintRightOperand(data);
+          int32_t imm = *reinterpret_cast<int32_t*>(data);
+          AppendToBuffer(",0x%x", imm);
+          data += 4;
+        }
       }
         break;
 
       case 0x80: {
         data++;
         AppendToBuffer("cmpb ");
-        data += PrintRightOperand(data);
+        data += PrintRightByteOperand(data);
         int32_t imm = *data;
         AppendToBuffer(",0x%x", imm);
         data++;
@@ -1461,9 +1470,15 @@
         int mod, regop, rm;
         data++;
         get_modrm(*data, &mod, &regop, &rm);
-        AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code());
-        data += PrintRightOperand(data);
-        AppendToBuffer(",%s", NameOfCPURegister(regop));
+        if (is_byte) {
+          AppendToBuffer("movb ");
+          data += PrintRightByteOperand(data);
+          AppendToBuffer(",%s", NameOfByteCPURegister(regop));
+        } else {
+          AppendToBuffer("mov%c ", operand_size_code());
+          data += PrintRightOperand(data);
+          AppendToBuffer(",%s", NameOfCPURegister(regop));
+        }
       }
         break;
 
@@ -1493,7 +1508,7 @@
         get_modrm(*data, &mod, &regop, &rm);
         if (regop == 1) {
           AppendToBuffer("decb ");
-          data += PrintRightOperand(data);
+          data += PrintRightByteOperand(data);
         } else {
           UnimplementedInstruction();
         }
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 0a713ef..6625afc 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -549,7 +549,7 @@
   __ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
   __ j(equal, if_false);
   STATIC_ASSERT(kSmiTag == 0);
-  __ SmiCompare(result_register(), Smi::FromInt(0));
+  __ Cmp(result_register(), Smi::FromInt(0));
   __ j(equal, if_false);
   Condition is_smi = masm_->CheckSmi(result_register());
   __ j(is_smi, if_true);
@@ -738,9 +738,9 @@
              prop->key()->AsLiteral()->handle()->IsSmi());
       __ Move(rcx, prop->key()->AsLiteral()->handle());
 
-      Handle<Code> ic(Builtins::builtin(is_strict()
-          ? Builtins::KeyedStoreIC_Initialize_Strict
-          : Builtins::KeyedStoreIC_Initialize));
+      Handle<Code> ic(Builtins::builtin(
+          is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                           : Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
     }
   }
@@ -995,7 +995,7 @@
   __ push(rcx);  // Enumerable.
   __ push(rbx);  // Current entry.
   __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
-  __ SmiCompare(rax, Smi::FromInt(0));
+  __ Cmp(rax, Smi::FromInt(0));
   __ j(equal, loop_statement.continue_target());
   __ movq(rbx, rax);
 
@@ -1558,27 +1558,26 @@
     }
   }
 
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
   if (expr->is_compound()) {
     { AccumulatorValueContext context(this);
       switch (assign_type) {
         case VARIABLE:
           EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+          PrepareForBailout(expr->target(), TOS_REG);
           break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
           break;
       }
     }
 
-    // For property compound assignments we need another deoptimization
-    // point after the property load.
-    if (property != NULL) {
-      PrepareForBailoutForId(expr->CompoundLoadId(), TOS_REG);
-    }
-
     Token::Value op = expr->binary_op();
     __ push(rax);  // Left operand goes on the stack.
     VisitForAccumulatorValue(expr->value());
@@ -1742,8 +1741,8 @@
       __ pop(rax);  // Restore value.
       __ Move(rcx, prop->key()->AsLiteral()->handle());
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::StoreIC_Initialize_Strict
-                      : Builtins::StoreIC_Initialize));
+          is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                           : Builtins::StoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
@@ -1765,8 +1764,8 @@
       }
       __ pop(rax);  // Restore value.
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
-                      : Builtins::KeyedStoreIC_Initialize));
+          is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                           : Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       break;
     }
@@ -1790,9 +1789,9 @@
     // rcx, and the global object on the stack.
     __ Move(rcx, var->name());
     __ movq(rdx, GlobalObjectOperand());
-    Handle<Code> ic(Builtins::builtin(is_strict()
-        ? Builtins::StoreIC_Initialize_Strict
-        : Builtins::StoreIC_Initialize));
+    Handle<Code> ic(Builtins::builtin(
+        is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                         : Builtins::StoreIC_Initialize));
     EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
   } else if (op == Token::INIT_CONST) {
@@ -1894,8 +1893,8 @@
     __ pop(rdx);
   }
   Handle<Code> ic(Builtins::builtin(
-      is_strict() ? Builtins::StoreIC_Initialize_Strict
-                  : Builtins::StoreIC_Initialize));
+      is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                       : Builtins::StoreIC_Initialize));
   EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -1934,8 +1933,8 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   Handle<Code> ic(Builtins::builtin(
-      is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
-                  : Builtins::KeyedStoreIC_Initialize));
+      is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                       : Builtins::KeyedStoreIC_Initialize));
   EmitCallIC(ic, RelocInfo::CODE_TARGET);
 
   // If the assignment ends an initialization block, revert to fast case.
@@ -2502,15 +2501,15 @@
 
   // Skip the arguments adaptor frame if it exists.
   Label check_frame_marker;
-  __ SmiCompare(Operand(rax, StandardFrameConstants::kContextOffset),
-                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Cmp(Operand(rax, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &check_frame_marker);
   __ movq(rax, Operand(rax, StandardFrameConstants::kCallerFPOffset));
 
   // Check the marker in the calling frame.
   __ bind(&check_frame_marker);
-  __ SmiCompare(Operand(rax, StandardFrameConstants::kMarkerOffset),
-                Smi::FromInt(StackFrame::CONSTRUCT));
+  __ Cmp(Operand(rax, StandardFrameConstants::kMarkerOffset),
+         Smi::FromInt(StackFrame::CONSTRUCT));
   PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
   Split(equal, if_true, if_false, fall_through);
 
@@ -2564,8 +2563,8 @@
 
   // Check if the calling frame is an arguments adaptor frame.
   __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
-                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &exit);
 
   // Arguments adaptor case: Read the arguments length from the
@@ -3010,8 +3009,8 @@
   // Fetch the map and check if array is in fast case.
   // Check that object doesn't require security checks and
   // has no indexed interceptor.
-  __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
-  __ j(below, &slow_case);
+  __ CmpObjectType(object, JS_ARRAY_TYPE, temp);
+  __ j(not_equal, &slow_case);
   __ testb(FieldOperand(temp, Map::kBitFieldOffset),
            Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
   __ j(not_zero, &slow_case);
@@ -3450,7 +3449,11 @@
 
   // We need a second deoptimization point after loading the value
   // in case evaluating the property load my have a side effect.
-  PrepareForBailout(expr->increment(), TOS_REG);
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailout(expr->increment(), TOS_REG);
+  }
 
   // Call ToNumber only if operand is not a smi.
   NearLabel no_conversion;
@@ -3547,8 +3550,8 @@
       __ Move(rcx, prop->key()->AsLiteral()->handle());
       __ pop(rdx);
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::StoreIC_Initialize_Strict
-                      : Builtins::StoreIC_Initialize));
+          is_strict_mode() ? Builtins::StoreIC_Initialize_Strict
+                           : Builtins::StoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -3564,8 +3567,8 @@
       __ pop(rcx);
       __ pop(rdx);
       Handle<Code> ic(Builtins::builtin(
-          is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
-                      : Builtins::KeyedStoreIC_Initialize));
+          is_strict_mode() ? Builtins::KeyedStoreIC_Initialize_Strict
+                           : Builtins::KeyedStoreIC_Initialize));
       EmitCallIC(ic, RelocInfo::CODE_TARGET);
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index dbadcac..9cd0c1c 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -714,11 +714,6 @@
       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
       break;
     }
-    case CodeStub::StringCharAt: {
-      StringCharAtStub stub;
-      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-      break;
-    }
     case CodeStub::NumberToString: {
       NumberToStringStub stub;
       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -1249,7 +1244,7 @@
       __ j(equal, true_label);
       __ CompareRoot(reg, Heap::kFalseValueRootIndex);
       __ j(equal, false_label);
-      __ SmiCompare(reg, Smi::FromInt(0));
+      __ Cmp(reg, Smi::FromInt(0));
       __ j(equal, false_label);
       __ JumpIfSmi(reg, true_label);
 
@@ -1989,7 +1984,7 @@
 }
 
 
-void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   Register result = ToRegister(instr->result());
   if (result.is(rax)) {
     __ load_rax(instr->hydrogen()->cell().location(),
@@ -2005,6 +2000,18 @@
 }
 
 
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  ASSERT(ToRegister(instr->global_object()).is(rax));
+  ASSERT(ToRegister(instr->result()).is(rax));
+
+  __ Move(rcx, instr->name());
+  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
+                                               RelocInfo::CODE_TARGET_CONTEXT;
+  Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+  CallCode(ic, mode, instr);
+}
+
+
 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
   Register value = ToRegister(instr->InputAt(0));
   Register temp = ToRegister(instr->TempAt(0));
@@ -2201,8 +2208,8 @@
   // Check for arguments adapter frame.
   NearLabel done, adapted;
   __ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ SmiCompare(Operand(result, StandardFrameConstants::kContextOffset),
-                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &adapted);
 
   // No arguments adaptor frame.
@@ -3302,11 +3309,14 @@
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->InputAt(0);
-  ASSERT(input->IsRegister());
   Condition cc = masm()->CheckSmi(ToRegister(input));
-  if (instr->condition() != equal) {
-    cc = NegateCondition(cc);
-  }
+  DeoptimizeIf(NegateCondition(cc), instr->environment());
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  LOperand* input = instr->InputAt(0);
+  Condition cc = masm()->CheckSmi(ToRegister(input));
   DeoptimizeIf(cc, instr->environment());
 }
 
@@ -3685,15 +3695,15 @@
 
   // Skip the arguments adaptor frame if it exists.
   NearLabel check_frame_marker;
-  __ SmiCompare(Operand(temp, StandardFrameConstants::kContextOffset),
-                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
+         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &check_frame_marker);
   __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset));
 
   // Check the marker in the calling frame.
   __ bind(&check_frame_marker);
-  __ SmiCompare(Operand(temp, StandardFrameConstants::kMarkerOffset),
-                Smi::FromInt(StackFrame::CONSTRUCT));
+  __ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
+         Smi::FromInt(StackFrame::CONSTRUCT));
 }
 
 
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 99a7ace..624d4b5 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1658,7 +1658,7 @@
 
 LInstruction* LChunkBuilder::DoCheckNonSmi(HCheckNonSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value, zero));
+  return AssignEnvironment(new LCheckNonSmi(value));
 }
 
 
@@ -1678,7 +1678,7 @@
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
   LOperand* value = UseRegisterAtStart(instr->value());
-  return AssignEnvironment(new LCheckSmi(value, not_zero));
+  return AssignEnvironment(new LCheckSmi(value));
 }
 
 
@@ -1716,14 +1716,21 @@
 }
 
 
-LInstruction* LChunkBuilder::DoLoadGlobal(HLoadGlobal* instr) {
-  LLoadGlobal* result = new LLoadGlobal;
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new LLoadGlobalCell;
   return instr->check_hole_value()
       ? AssignEnvironment(DefineAsRegister(result))
       : DefineAsRegister(result);
 }
 
 
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* global_object = UseFixed(instr->global_object(), rax);
+  LLoadGlobalGeneric* result = new LLoadGlobalGeneric(global_object);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
 LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
   LStoreGlobal* result = new LStoreGlobal(UseRegister(instr->value()),
                                           TempRegister());
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 926924b..e1a9f4e 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -70,6 +70,7 @@
   V(CheckFunction)                              \
   V(CheckInstanceType)                          \
   V(CheckMap)                                   \
+  V(CheckNonSmi)                                \
   V(CheckPrototypeMaps)                         \
   V(CheckSmi)                                   \
   V(ClassOfTest)                                \
@@ -117,7 +118,8 @@
   V(LoadContextSlot)                            \
   V(LoadElements)                               \
   V(LoadExternalArrayPointer)                   \
-  V(LoadGlobal)                                 \
+  V(LoadGlobalCell)                             \
+  V(LoadGlobalGeneric)                          \
   V(LoadKeyedFastElement)                       \
   V(LoadKeyedGeneric)                           \
   V(LoadNamedField)                             \
@@ -1225,10 +1227,25 @@
 };
 
 
-class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
  public:
-  DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
-  DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadGlobalGeneric(LOperand* global_object) {
+    inputs_[0] = global_object;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* global_object() { return inputs_[0]; }
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
 };
 
 
@@ -1707,20 +1724,21 @@
 
 class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
  public:
-  LCheckSmi(LOperand* value, Condition condition)
-      : condition_(condition) {
+  explicit LCheckSmi(LOperand* value) {
     inputs_[0] = value;
   }
 
-  Condition condition() const { return condition_; }
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
 
-  virtual void CompileToNative(LCodeGen* generator);
-  virtual const char* Mnemonic() const {
-    return (condition_ == zero) ? "check-non-smi" : "check-smi";
+
+class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
   }
 
- private:
-  Condition condition_;
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
 };
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index f4874ad..c5cddf7 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -92,7 +92,7 @@
 void MacroAssembler::RecordWriteHelper(Register object,
                                        Register addr,
                                        Register scratch) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // Check that the object is not in new space.
     NearLabel not_in_new_space;
     InNewSpace(object, scratch, not_equal, &not_in_new_space);
@@ -124,7 +124,7 @@
   ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi));
 
   // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis and stores into young gen.
+  // catch stores of smis and stores into the young generation.
   Label done;
   JumpIfSmi(value, &done);
 
@@ -136,7 +136,7 @@
   // clobbering done inside RecordWriteNonSmi but it's necessary to
   // avoid having the fast case for smis leave the registers
   // unchanged.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
@@ -153,7 +153,7 @@
   ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi));
 
   // First, check if a write barrier is even needed. The tests below
-  // catch stores of Smis and stores into young gen.
+  // catch stores of smis and stores into the young generation.
   Label done;
   JumpIfSmi(value, &done);
 
@@ -165,7 +165,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
@@ -179,7 +179,7 @@
                                        Register index) {
   Label done;
 
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     NearLabel okay;
     JumpIfNotSmi(object, &okay);
     Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
@@ -223,7 +223,7 @@
 
   // Clobber all input registers when running with the debug-code flag
   // turned on to provoke errors.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
     movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
@@ -231,12 +231,12 @@
 }
 
 void MacroAssembler::Assert(Condition cc, const char* msg) {
-  if (FLAG_debug_code) Check(cc, msg);
+  if (emit_debug_code()) Check(cc, msg);
 }
 
 
 void MacroAssembler::AssertFastElements(Register elements) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     NearLabel ok;
     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
                 Heap::kFixedArrayMapRootIndex);
@@ -707,7 +707,7 @@
 }
 
 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     movq(dst,
          reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
          RelocInfo::NONE);
@@ -776,7 +776,7 @@
 
 
 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     testb(dst, Immediate(0x01));
     NearLabel ok;
     j(zero, &ok);
@@ -837,12 +837,24 @@
 }
 
 
-void MacroAssembler::SmiCompare(Register dst, Register src) {
-  cmpq(dst, src);
+void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
+  if (emit_debug_code()) {
+    AbortIfNotSmi(smi1);
+    AbortIfNotSmi(smi2);
+  }
+  cmpq(smi1, smi2);
 }
 
 
 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
+  if (emit_debug_code()) {
+    AbortIfNotSmi(dst);
+  }
+  Cmp(dst, src);
+}
+
+
+void MacroAssembler::Cmp(Register dst, Smi* src) {
   ASSERT(!dst.is(kScratchRegister));
   if (src->value() == 0) {
     testq(dst, dst);
@@ -854,20 +866,39 @@
 
 
 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
+  if (emit_debug_code()) {
+    AbortIfNotSmi(dst);
+    AbortIfNotSmi(src);
+  }
   cmpq(dst, src);
 }
 
 
 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
+  if (emit_debug_code()) {
+    AbortIfNotSmi(dst);
+    AbortIfNotSmi(src);
+  }
   cmpq(dst, src);
 }
 
 
 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
+  if (emit_debug_code()) {
+    AbortIfNotSmi(dst);
+  }
   cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
 }
 
 
+void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
+  // The Operand cannot use the smi register.
+  Register smi_reg = GetSmiConstant(src);
+  ASSERT(!dst.AddressUsesRegister(smi_reg));
+  cmpq(dst, smi_reg);
+}
+
+
 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
   cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
 }
@@ -1352,7 +1383,7 @@
 
 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
   if (source->IsSmi()) {
-    SmiCompare(dst, Smi::cast(*source));
+    Cmp(dst, Smi::cast(*source));
   } else {
     Move(kScratchRegister, source);
     cmpq(dst, kScratchRegister);
@@ -1362,7 +1393,7 @@
 
 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
   if (source->IsSmi()) {
-    SmiCompare(dst, Smi::cast(*source));
+    Cmp(dst, Smi::cast(*source));
   } else {
     ASSERT(source->IsHeapObject());
     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
@@ -1753,7 +1784,12 @@
 
 
 void MacroAssembler::AbortIfNotSmi(Register object) {
-  NearLabel ok;
+  Condition is_smi = CheckSmi(object);
+  Assert(is_smi, "Operand is not a smi");
+}
+
+
+void MacroAssembler::AbortIfNotSmi(const Operand& object) {
   Condition is_smi = CheckSmi(object);
   Assert(is_smi, "Operand is not a smi");
 }
@@ -1991,7 +2027,7 @@
   Push(Smi::FromInt(type));
   movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   push(kScratchRegister);
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     movq(kScratchRegister,
          Factory::undefined_value(),
          RelocInfo::EMBEDDED_OBJECT);
@@ -2002,7 +2038,7 @@
 
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     Move(kScratchRegister, Smi::FromInt(type));
     cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
     Check(equal, "stack frame types must match");
@@ -2152,7 +2188,7 @@
   movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
 
   // When generating debug code, make sure the lexical context is set.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     cmpq(scratch, Immediate(0));
     Check(not_equal, "we should not have an empty lexical context");
   }
@@ -2162,7 +2198,7 @@
   movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
         Factory::global_context_map());
     Check(equal, "JSGlobalObject::global_context should be a global context.");
@@ -2178,7 +2214,7 @@
   // object.
 
   // Check the context is a global context.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     // Preserve original value of holder_reg.
     push(holder_reg);
     movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
@@ -2239,7 +2275,7 @@
 
 void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
                                                Register scratch) {
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     testq(result_end, Immediate(kObjectAlignmentMask));
     Check(zero, "Unaligned allocation in new space");
   }
@@ -2270,7 +2306,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       movl(result, Immediate(0x7091));
       if (result_end.is_valid()) {
@@ -2328,7 +2364,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       movl(result, Immediate(0x7091));
       movl(result_end, Immediate(0x7191));
@@ -2375,7 +2411,7 @@
                                         Label* gc_required,
                                         AllocationFlags flags) {
   if (!FLAG_inline_new) {
-    if (FLAG_debug_code) {
+    if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
       movl(result, Immediate(0x7091));
       movl(result_end, Immediate(0x7191));
@@ -2582,7 +2618,7 @@
   // (i.e., the static scope chain and runtime context chain do not agree).
   // A variable occurring in such a scope should have slot type LOOKUP and
   // not CONTEXT.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     cmpq(dst, Operand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
     Check(equal, "Yo dawg, I heard you liked function contexts "
                  "so I put function contexts in all your contexts");
@@ -2604,7 +2640,7 @@
                                                   Register map) {
   // Load the initial map.  The global functions all have initial maps.
   movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     Label ok, fail;
     CheckMap(map, Factory::meta_map(), &fail, false);
     jmp(&ok);
@@ -2659,7 +2695,7 @@
 
 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
   // Check stack alignment.
-  if (FLAG_debug_code) {
+  if (emit_debug_code()) {
     CheckStackAlignment();
   }
 
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 28f814b..376a597 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -278,8 +278,9 @@
                                            int power);
 
 
-  // Simple comparison of smis.
-  void SmiCompare(Register dst, Register src);
+  // Simple comparison of smis.  Both sides must be known smis to use these,
+  // otherwise use Cmp.
+  void SmiCompare(Register smi1, Register smi2);
   void SmiCompare(Register dst, Smi* src);
   void SmiCompare(Register dst, const Operand& src);
   void SmiCompare(const Operand& dst, Register src);
@@ -609,6 +610,8 @@
   void Move(const Operand& dst, Handle<Object> source);
   void Cmp(Register dst, Handle<Object> source);
   void Cmp(const Operand& dst, Handle<Object> source);
+  void Cmp(Register dst, Smi* src);
+  void Cmp(const Operand& dst, Smi* src);
   void Push(Handle<Object> source);
 
   // Emit code to discard a non-negative number of pointer-sized elements
@@ -702,6 +705,7 @@
 
   // Abort execution if argument is not a smi. Used in debug code.
   void AbortIfNotSmi(Register object);
+  void AbortIfNotSmi(const Operand& object);
 
   // Abort execution if argument is a string. Used in debug code.
   void AbortIfNotString(Register object);