Merge from v8 at revision 3723
diff --git a/src/SConscript b/src/SConscript
index 4eb8722..94428f2 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -60,6 +60,7 @@
     flags.cc
     frame-element.cc
     frames.cc
+    full-codegen.cc
     func-name-inferrer.cc
     global-handles.cc
     handles.cc
@@ -112,8 +113,8 @@
     arm/cpu-arm.cc
     arm/debug-arm.cc
     arm/disasm-arm.cc
-    arm/fast-codegen-arm.cc
     arm/frames-arm.cc
+    arm/full-codegen-arm.cc
     arm/ic-arm.cc
     arm/jump-target-arm.cc
     arm/macro-assembler-arm.cc
@@ -135,8 +136,8 @@
     ia32/cpu-ia32.cc
     ia32/debug-ia32.cc
     ia32/disasm-ia32.cc
-    ia32/fast-codegen-ia32.cc
     ia32/frames-ia32.cc
+    ia32/full-codegen-ia32.cc
     ia32/ic-ia32.cc
     ia32/jump-target-ia32.cc
     ia32/macro-assembler-ia32.cc
@@ -152,8 +153,8 @@
     x64/cpu-x64.cc
     x64/debug-x64.cc
     x64/disasm-x64.cc
-    x64/fast-codegen-x64.cc
     x64/frames-x64.cc
+    x64/full-codegen-x64.cc
     x64/ic-x64.cc
     x64/jump-target-x64.cc
     x64/macro-assembler-x64.cc
@@ -168,6 +169,7 @@
   'os:linux':   ['platform-linux.cc', 'platform-posix.cc'],
   'os:android': ['platform-linux.cc', 'platform-posix.cc'],
   'os:macos':   ['platform-macos.cc', 'platform-posix.cc'],
+  'os:solaris': ['platform-solaris.cc', 'platform-posix.cc'],
   'os:nullos':  ['platform-nullos.cc'],
   'os:win32':   ['platform-win32.cc'],
   'mode:release': [],
@@ -196,6 +198,9 @@
   'os:openbsd': [
     'd8-posix.cc'
   ],
+  'os:solaris': [
+    'd8-posix.cc'
+  ],
   'os:win32': [
     'd8-windows.cc'
   ],
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 07da800..74547be 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -30,9 +30,9 @@
 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
 // OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -1371,6 +1371,36 @@
 
 
 // Support for VFP.
+void Assembler::vldr(const DwVfpRegister dst,
+                     const Register base,
+                     int offset,
+                     const Condition cond) {
+  // Ddst = MEM(Rbase + offset).
+  // Instruction details available in ARM DDI 0406A, A8-628.
+  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+  // Vdst(15-12) | 1011(11-8) | offset
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(offset % 4 == 0);
+  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+       0xB*B8 | ((offset / 4) & 255));
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+                     const Register base,
+                     int offset,
+                     const Condition cond) {
+  // MEM(Rbase + offset) = Dsrc.
+  // Instruction details available in ARM DDI 0406A, A8-786.
+  // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
+  // Vsrc(15-12) | 1011(11-8) | (offset/4)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(offset % 4 == 0);
+  emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+       0xB*B8 | ((offset / 4) & 255));
+}
+
+
 void Assembler::vmov(const DwVfpRegister dst,
                      const Register src1,
                      const Register src2,
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index cd53dd6..8b65b7c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -30,9 +30,9 @@
 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
 // OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 // A light-weight ARM Assembler
 // Generates user mode instructions for the ARM architecture up to version 5
@@ -796,6 +796,14 @@
   // However, some simple modifications can allow
   // these APIs to support D16 to D31.
 
+  void vldr(const DwVfpRegister dst,
+            const Register base,
+            int offset,  // Offset must be a multiple of 4.
+            const Condition cond = al);
+  void vstr(const DwVfpRegister src,
+            const Register base,
+            int offset,  // Offset must be a multiple of 4.
+            const Condition cond = al);
   void vmov(const DwVfpRegister dst,
             const Register src1,
             const Register src2,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 0c1dbcc..38f08d1 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -605,14 +605,19 @@
 }
 
 
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
-    : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+                     Expression* expression,
+                     bool persist_after_get)
+    : cgen_(cgen),
+      expression_(expression),
+      type_(ILLEGAL),
+      persist_after_get_(persist_after_get) {
   cgen->LoadReference(this);
 }
 
 
 Reference::~Reference() {
-  cgen_->UnloadReference(this);
+  ASSERT(is_unloaded() || is_illegal());
 }
 
 
@@ -661,6 +666,7 @@
     frame_->Drop(size);
     frame_->EmitPush(r0);
   }
+  ref->set_unloaded();
 }
 
 
@@ -1244,8 +1250,6 @@
       Reference target(this, node->proxy());
       LoadAndSpill(val);
       target.SetValue(NOT_CONST_INIT);
-      // The reference is removed from the stack (preserving TOS) when
-      // it goes out of scope.
     }
     // Get rid of the assigned value (declarations are statements).
     frame_->Drop();
@@ -1932,25 +1936,17 @@
       if (each.size() > 0) {
         __ ldr(r0, frame_->ElementAt(each.size()));
         frame_->EmitPush(r0);
-      }
-      // If the reference was to a slot we rely on the convenient property
-      // that it doesn't matter whether a value (eg, r3 pushed above) is
-      // right on top of or right underneath a zero-sized reference.
-      each.SetValue(NOT_CONST_INIT);
-      if (each.size() > 0) {
-        // It's safe to pop the value lying on top of the reference before
-        // unloading the reference itself (which preserves the top of stack,
-        // ie, now the topmost value of the non-zero sized reference), since
-        // we will discard the top of stack after unloading the reference
-        // anyway.
-        frame_->EmitPop(r0);
+        each.SetValue(NOT_CONST_INIT);
+        frame_->Drop(2);
+      } else {
+        // If the reference was to a slot we rely on the convenient property
+        // that it doesn't matter whether a value (eg, r3 pushed above) is
+        // right on top of or right underneath a zero-sized reference.
+        each.SetValue(NOT_CONST_INIT);
+        frame_->Drop();
       }
     }
   }
-  // Discard the i'th entry pushed above or else the remainder of the
-  // reference, whichever is currently on top of the stack.
-  frame_->Drop();
-
   // Body.
   CheckStack();  // TODO(1222600): ignore if body contains calls.
   VisitAndSpill(node->body());
@@ -2844,7 +2840,7 @@
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Assignment");
 
-  { Reference target(this, node->target());
+  { Reference target(this, node->target(), node->is_compound());
     if (target.is_illegal()) {
       // Fool the virtual frame into thinking that we left the assignment's
       // value on the frame.
@@ -2859,8 +2855,7 @@
         node->op() == Token::INIT_CONST) {
       LoadAndSpill(node->value());
 
-    } else {
-      // +=, *= and similar binary assignments.
+    } else {  // Assignment is a compound assignment.
       // Get the old value of the lhs.
       target.GetValueAndSpill();
       Literal* literal = node->value()->AsLiteral();
@@ -2881,13 +2876,12 @@
         frame_->EmitPush(r0);
       }
     }
-
     Variable* var = node->target()->AsVariableProxy()->AsVariable();
     if (var != NULL &&
         (var->mode() == Variable::CONST) &&
         node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
       // Assignment ignored - leave the value on the stack.
-
+      UnloadReference(&target);
     } else {
       CodeForSourcePosition(node->position());
       if (node->op() == Token::INIT_CONST) {
@@ -3097,16 +3091,20 @@
       // JavaScript example: 'array[index](1, 2, 3)'
       // -------------------------------------------
 
-      // Load the function to call from the property through a reference.
-      Reference ref(this, property);
-      ref.GetValueAndSpill();  // receiver
-
-      // Pass receiver to called function.
+      LoadAndSpill(property->obj());
+      LoadAndSpill(property->key());
+      EmitKeyedLoad(false);
+      frame_->Drop();  // key
+      // Put the function below the receiver.
       if (property->is_synthetic()) {
+        // Use the global receiver.
+        frame_->Drop();
+        frame_->EmitPush(r0);
         LoadGlobalReceiver(r0);
       } else {
-        __ ldr(r0, frame_->ElementAt(ref.size()));
-        frame_->EmitPush(r0);
+        frame_->EmitPop(r1);  // receiver
+        frame_->EmitPush(r0);  // function
+        frame_->EmitPush(r1);  // receiver
       }
 
       // Call the function.
@@ -3470,6 +3468,20 @@
 }
 
 
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 1);
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r0);
+  __ tst(r0, Operand(kSmiTagMask));
+  false_target()->Branch(eq);
+  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  cc_reg_ = ne;
+}
+
+
 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
   VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 0);
@@ -3562,7 +3574,8 @@
   Load(args->at(0));
   Load(args->at(1));
 
-  frame_->CallRuntime(Runtime::kStringCompare, 2);
+  StringCompareStub stub;
+  frame_->CallStub(&stub, 2);
   frame_->EmitPush(r0);
 }
 
@@ -3792,7 +3805,9 @@
      frame_->EmitPush(r0);
   }
 
-  { Reference target(this, node->expression());
+  // A constant reference is not saved to, so a constant reference is not a
+  // compound assignment reference.
+  { Reference target(this, node->expression(), !is_const);
     if (target.is_illegal()) {
       // Spoof the virtual frame to have the expected height (one higher
       // than on entry).
@@ -4253,6 +4268,16 @@
 }
 
 
+void CodeGenerator::EmitKeyedLoad(bool is_global) {
+  Comment cmnt(masm_, "[ Load from keyed Property");
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  RelocInfo::Mode rmode = is_global
+                          ? RelocInfo::CODE_TARGET_CONTEXT
+                          : RelocInfo::CODE_TARGET;
+  frame_->CallCodeObject(ic, rmode, 0);
+}
+
+
 #ifdef DEBUG
 bool CodeGenerator::HasValidEntryRegisters() { return true; }
 #endif
@@ -4319,23 +4344,21 @@
     case KEYED: {
       // TODO(181): Implement inlined version of array indexing once
       // loop nesting is properly tracked on ARM.
-      VirtualFrame* frame = cgen_->frame();
-      Comment cmnt(masm, "[ Load from keyed Property");
       ASSERT(property != NULL);
-      Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       ASSERT(var == NULL || var->is_global());
-      RelocInfo::Mode rmode = (var == NULL)
-                            ? RelocInfo::CODE_TARGET
-                            : RelocInfo::CODE_TARGET_CONTEXT;
-      frame->CallCodeObject(ic, rmode, 0);
-      frame->EmitPush(r0);
+      cgen_->EmitKeyedLoad(var != NULL);
+      cgen_->frame()->EmitPush(r0);
       break;
     }
 
     default:
       UNREACHABLE();
   }
+
+  if (!persist_after_get_) {
+    cgen_->UnloadReference(this);
+  }
 }
 
 
@@ -4397,6 +4420,7 @@
     default:
       UNREACHABLE();
   }
+  cgen_->UnloadReference(this);
 }
 
 
@@ -4832,14 +4856,14 @@
                                     Label* lhs_not_nan,
                                     Label* slow,
                                     bool strict) {
-  Label lhs_is_smi;
+  Label rhs_is_smi;
   __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &lhs_is_smi);
+  __ b(eq, &rhs_is_smi);
 
-  // Rhs is a Smi.  Check whether the non-smi is a heap number.
+  // Lhs is a Smi.  Check whether the rhs is a heap number.
   __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
   if (strict) {
-    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // If rhs is not a number and lhs is a Smi then strict equality cannot
     // succeed.  Return non-equal (r0 is already not zero)
     __ mov(pc, Operand(lr), LeaveCC, ne);  // Return.
   } else {
@@ -4848,57 +4872,67 @@
     __ b(ne, slow);
   }
 
-  // Rhs is a smi, lhs is a number.
-  __ push(lr);
-
+  // Lhs (r1) is a smi, rhs (r0) is a number.
   if (CpuFeatures::IsSupported(VFP3)) {
+    // Convert lhs to a double in d7              .
     CpuFeatures::Scope scope(VFP3);
-    __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
+    __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+    __ vmov(s15, r7);
+    __ vcvt(d7, s15);
+    // Load the double from rhs, tagged HeapNumber r0, to d6.
+    __ sub(r7, r0, Operand(kHeapObjectTag));
+    __ vldr(d6, r7, HeapNumber::kValueOffset);
   } else {
+    __ push(lr);
+    // Convert lhs to a double in r2, r3.
     __ mov(r7, Operand(r1));
     ConvertToDoubleStub stub1(r3, r2, r7, r6);
     __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+    // Load rhs to a double in r0, r1.
+    __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+    __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ pop(lr);
   }
 
-
-  // r3 and r2 are rhs as double.
-  __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
-  __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
   // We now have both loaded as doubles but we can skip the lhs nan check
-  // since it's a Smi.
-  __ pop(lr);
+  // since it's a smi.
   __ jmp(lhs_not_nan);
 
-  __ bind(&lhs_is_smi);
-  // Lhs is a Smi.  Check whether the non-smi is a heap number.
+  __ bind(&rhs_is_smi);
+  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
   __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
   if (strict) {
-    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // If lhs is not a number and rhs is a smi then strict equality cannot
     // succeed.  Return non-equal.
     __ mov(r0, Operand(1), LeaveCC, ne);  // Non-zero indicates not equal.
     __ mov(pc, Operand(lr), LeaveCC, ne);  // Return.
   } else {
-    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
+    // Smi compared non-strictly with a non-smi non-heap-number.  Call
     // the runtime.
     __ b(ne, slow);
   }
 
-  // Lhs is a smi, rhs is a number.
-  // r0 is Smi and r1 is heap number.
-  __ push(lr);
-  __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
-  __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
-
+  // Rhs (r0) is a smi, lhs (r1) is a heap number.
   if (CpuFeatures::IsSupported(VFP3)) {
+    // Convert rhs to a double in d6              .
     CpuFeatures::Scope scope(VFP3);
-    __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
+    // Load the double from lhs, tagged HeapNumber r1, to d7.
+    __ sub(r7, r1, Operand(kHeapObjectTag));
+    __ vldr(d7, r7, HeapNumber::kValueOffset);
+    __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+    __ vmov(s13, r7);
+    __ vcvt(d6, s13);
   } else {
+    __ push(lr);
+    // Load lhs to a double in r2, r3.
+    __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+    __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    // Convert rhs to a double in r0, r1.
     __ mov(r7, Operand(r0));
     ConvertToDoubleStub stub2(r1, r0, r7, r6);
     __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+    __ pop(lr);
   }
-
-  __ pop(lr);
   // Fall through to both_loaded_as_doubles.
 }
 
@@ -5047,10 +5081,18 @@
 
   // Both are heap numbers.  Load them up then jump to the code we have
   // for that.
-  __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
-  __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
-  __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
-  __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    __ sub(r7, r0, Operand(kHeapObjectTag));
+    __ vldr(d6, r7, HeapNumber::kValueOffset);
+    __ sub(r7, r1, Operand(kHeapObjectTag));
+    __ vldr(d7, r7, HeapNumber::kValueOffset);
+  } else {
+    __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+    __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+    __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+  }
   __ jmp(both_loaded_as_doubles);
 }
 
@@ -5075,8 +5117,9 @@
 }
 
 
-// On entry r0 and r1 are the things to be compared.  On exit r0 is 0,
-// positive or negative to indicate the result of the comparison.
+// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
+// On exit r0 is 0, positive or negative to indicate the result of
+// the comparison.
 void CompareStub::Generate(MacroAssembler* masm) {
   Label slow;  // Call builtin.
   Label not_smis, both_loaded_as_doubles, lhs_not_nan;
@@ -5101,21 +5144,19 @@
   // 3) Fall through to both_loaded_as_doubles.
   // 4) Jump to lhs_not_nan.
   // In cases 3 and 4 we have found out we were dealing with a number-number
-  // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
+  // comparison.  If VFP3 is supported the double values of the numbers have
+  // been loaded into d7 and d6.  Otherwise, the double values have been loaded
+  // into r0, r1, r2, and r3.
   EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
 
   __ bind(&both_loaded_as_doubles);
-  // r0, r1, r2, r3 are the double representations of the right hand side
-  // and the left hand side.
-
+  // The arguments have been converted to doubles and stored in d6 and d7, if
+  // VFP3 is supported, or in r0, r1, r2, and r3.
   if (CpuFeatures::IsSupported(VFP3)) {
     __ bind(&lhs_not_nan);
     CpuFeatures::Scope scope(VFP3);
     Label no_nan;
     // ARMv7 VFP3 instructions to implement double precision comparison.
-    __ vmov(d6, r0, r1);
-    __ vmov(d7, r2, r3);
-
     __ vcmp(d7, d6);
     __ vmrs(pc);  // Move vector status bits to normal status bits.
     Label nan;
@@ -5154,6 +5195,7 @@
   }
 
   Label check_for_symbols;
+  Label flat_string_check;
   // Check for heap-number-heap-number comparison.  Can jump to slow case,
   // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
   // that case.  If the inputs are not doubles then jumps to check_for_symbols.
@@ -5161,7 +5203,7 @@
   EmitCheckForTwoHeapNumbers(masm,
                              &both_loaded_as_doubles,
                              &check_for_symbols,
-                             &slow);
+                             &flat_string_check);
 
   __ bind(&check_for_symbols);
   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
@@ -5169,10 +5211,27 @@
   if (cc_ == eq && !strict_) {
     // Either jumps to slow or returns the answer.  Assumes that r2 is the type
     // of r0 on entry.
-    EmitCheckForSymbols(masm, &slow);
+    EmitCheckForSymbols(masm, &flat_string_check);
   }
 
+  // Check for both being sequential ASCII strings, and inline if that is the
+  // case.
+  __ bind(&flat_string_check);
+
+  __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
+
+  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                     r1,
+                                                     r0,
+                                                     r2,
+                                                     r3,
+                                                     r4,
+                                                     r5);
+  // Never falls through to here.
+
   __ bind(&slow);
+
   __ push(r1);
   __ push(r0);
   // Figure out which native to call and setup the arguments.
@@ -5239,10 +5298,18 @@
   // The new heap number is in r5.  r6 and r7 are scratch.
   AllocateHeapNumber(masm, &slow, r5, r6, r7);
 
-  if (CpuFeatures::IsSupported(VFP3)) {
+  // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
+  // using registers d7 and d6 for the double values.
+  bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
+      Token::MOD != operation;
+  if (use_fp_registers) {
     CpuFeatures::Scope scope(VFP3);
-    __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
-    __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+    __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+    __ vmov(s15, r7);
+    __ vcvt(d7, s15);
+    __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+    __ vmov(s13, r7);
+    __ vcvt(d6, s13);
   } else {
     // Write Smi from r0 to r3 and r2 in double format.  r6 is scratch.
     __ mov(r7, Operand(r0));
@@ -5324,9 +5391,16 @@
   if (mode == OVERWRITE_RIGHT) {
     __ mov(r5, Operand(r0));  // Overwrite this heap number.
   }
-  // Calling convention says that second double is in r2 and r3.
-  __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
-  __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+  if (use_fp_registers) {
+    CpuFeatures::Scope scope(VFP3);
+    // Load the double from tagged HeapNumber r0 to d7.
+    __ sub(r7, r0, Operand(kHeapObjectTag));
+    __ vldr(d7, r7, HeapNumber::kValueOffset);
+  } else {
+    // Calling convention says that second double is in r2 and r3.
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+  }
   __ jmp(&finished_loading_r0);
   __ bind(&r0_is_smi);
   if (mode == OVERWRITE_RIGHT) {
@@ -5334,10 +5408,12 @@
     AllocateHeapNumber(masm, &slow, r5, r6, r7);
   }
 
-
-  if (CpuFeatures::IsSupported(VFP3)) {
+  if (use_fp_registers) {
     CpuFeatures::Scope scope(VFP3);
-    __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+    // Convert smi in r0 to double in d7.
+    __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+    __ vmov(s15, r7);
+    __ vcvt(d7, s15);
   } else {
     // Write Smi from r0 to r3 and r2 in double format.
     __ mov(r7, Operand(r0));
@@ -5357,9 +5433,16 @@
   if (mode == OVERWRITE_LEFT) {
     __ mov(r5, Operand(r1));  // Overwrite this heap number.
   }
-  // Calling convention says that first double is in r0 and r1.
-  __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
-  __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+  if (use_fp_registers) {
+    CpuFeatures::Scope scope(VFP3);
+    // Load the double from tagged HeapNumber r1 to d6.
+    __ sub(r7, r1, Operand(kHeapObjectTag));
+    __ vldr(d6, r7, HeapNumber::kValueOffset);
+  } else {
+    // Calling convention says that first double is in r0 and r1.
+    __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+  }
   __ jmp(&finished_loading_r1);
   __ bind(&r1_is_smi);
   if (mode == OVERWRITE_LEFT) {
@@ -5367,9 +5450,12 @@
     AllocateHeapNumber(masm, &slow, r5, r6, r7);
   }
 
-  if (CpuFeatures::IsSupported(VFP3)) {
+  if (use_fp_registers) {
     CpuFeatures::Scope scope(VFP3);
-    __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+    // Convert smi in r1 to double in d6.
+    __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+    __ vmov(s13, r7);
+    __ vcvt(d6, s13);
   } else {
     // Write Smi from r1 to r1 and r0 in double format.
     __ mov(r7, Operand(r1));
@@ -5382,22 +5468,12 @@
   __ bind(&finished_loading_r1);
 
   __ bind(&do_the_call);
-  // r0: Left value (least significant part of mantissa).
-  // r1: Left value (sign, exponent, top of mantissa).
-  // r2: Right value (least significant part of mantissa).
-  // r3: Right value (sign, exponent, top of mantissa).
-  // r5: Address of heap number for result.
-
-  if (CpuFeatures::IsSupported(VFP3) &&
-      ((Token::MUL == operation) ||
-       (Token::DIV == operation) ||
-       (Token::ADD == operation) ||
-       (Token::SUB == operation))) {
+  // If we are inlining the operation using VFP3 instructions for
+  // add, subtract, multiply, or divide, the arguments are in d6 and d7.
+  if (use_fp_registers) {
     CpuFeatures::Scope scope(VFP3);
     // ARMv7 VFP3 instructions to implement
     // double precision, add, subtract, multiply, divide.
-    __ vmov(d6, r0, r1);
-    __ vmov(d7, r2, r3);
 
     if (Token::MUL == operation) {
       __ vmul(d5, d6, d7);
@@ -5410,15 +5486,20 @@
     } else {
       UNREACHABLE();
     }
-
-    __ vmov(r0, r1, d5);
-
-    __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
-    __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
-    __ mov(r0, Operand(r5));
+    __ sub(r0, r5, Operand(kHeapObjectTag));
+    __ vstr(d5, r0, HeapNumber::kValueOffset);
+    __ add(r0, r0, Operand(kHeapObjectTag));
     __ mov(pc, lr);
     return;
   }
+
+  // If we did not inline the operation, then the arguments are in:
+  // r0: Left value (least significant part of mantissa).
+  // r1: Left value (sign, exponent, top of mantissa).
+  // r2: Right value (least significant part of mantissa).
+  // r3: Right value (sign, exponent, top of mantissa).
+  // r5: Address of heap number for result.
+
   __ push(lr);   // For later.
   __ push(r5);   // Address of heap number that is answer.
   __ AlignStack(0);
@@ -6723,6 +6804,101 @@
 }
 
 
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+                                                        Register left,
+                                                        Register right,
+                                                        Register scratch1,
+                                                        Register scratch2,
+                                                        Register scratch3,
+                                                        Register scratch4) {
+  Label compare_lengths;
+  // Find minimum length and length difference.
+  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
+  Register length_delta = scratch3;
+  __ mov(scratch1, scratch2, LeaveCC, gt);
+  Register min_length = scratch1;
+  __ tst(min_length, Operand(min_length));
+  __ b(eq, &compare_lengths);
+
+  // Setup registers so that we only need to increment one register
+  // in the loop.
+  __ add(scratch2, min_length,
+         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(left, left, Operand(scratch2));
+  __ add(right, right, Operand(scratch2));
+  // Registers left and right points to the min_length character of strings.
+  __ rsb(min_length, min_length, Operand(-1));
+  Register index = min_length;
+  // Index starts at -min_length.
+
+  {
+    // Compare loop.
+    Label loop;
+    __ bind(&loop);
+    // Compare characters.
+    __ add(index, index, Operand(1), SetCC);
+    __ ldrb(scratch2, MemOperand(left, index), ne);
+    __ ldrb(scratch4, MemOperand(right, index), ne);
+    // Skip to compare lengths with eq condition true.
+    __ b(eq, &compare_lengths);
+    __ cmp(scratch2, scratch4);
+    __ b(eq, &loop);
+    // Fallthrough with eq condition false.
+  }
+  // Compare lengths -  strings up to min-length are equal.
+  __ bind(&compare_lengths);
+  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  // Use zero length_delta as result.
+  __ mov(r0, Operand(length_delta), SetCC, eq);
+  // Fall through to here if characters compare not-equal.
+  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
+  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
+  __ Ret();
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  // Stack frame on entry.
+  //  sp[0]: return address
+  //  sp[4]: right string
+  //  sp[8]: left string
+
+  __ ldr(r0, MemOperand(sp, 2 * kPointerSize));  // left
+  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));  // right
+
+  Label not_same;
+  __ cmp(r0, r1);
+  __ b(ne, &not_same);
+  ASSERT_EQ(0, EQUAL);
+  ASSERT_EQ(0, kSmiTag);
+  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+  __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&not_same);
+
+  // Check that both objects are sequential ascii strings.
+  __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
+
+  // Compare flat ascii strings natively. Remove arguments from stack first.
+  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
+
+  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ bind(&runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index f5de0eb..ccca2e9 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,57 +43,69 @@
 // -------------------------------------------------------------------------
 // Reference support
 
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame.  The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
 class Reference BASE_EMBEDDED {
  public:
   // The values of the types is important, see size().
-  enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen, Expression* expression);
+  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+  Reference(CodeGenerator* cgen,
+            Expression* expression,
+            bool persist_after_get = false);
   ~Reference();
 
   Expression* expression() const { return expression_; }
   Type type() const { return type_; }
   void set_type(Type value) {
-    ASSERT(type_ == ILLEGAL);
+    ASSERT_EQ(ILLEGAL, type_);
     type_ = value;
   }
 
+  void set_unloaded() {
+    ASSERT_NE(ILLEGAL, type_);
+    ASSERT_NE(UNLOADED, type_);
+    type_ = UNLOADED;
+  }
   // The size the reference takes up on the stack.
-  int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+  int size() const {
+    return (type_ < SLOT) ? 0 : type_;
+  }
 
   bool is_illegal() const { return type_ == ILLEGAL; }
   bool is_slot() const { return type_ == SLOT; }
   bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+  bool is_unloaded() const { return type_ == UNLOADED; }
 
   // Return the name.  Only valid for named property references.
   Handle<String> GetName();
 
   // Generate code to push the value of the reference on top of the
   // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is left in place with its value above it.
+  // the expression stack, and it is consumed by the call unless the
+  // reference is for a compound assignment.
+  // If the reference is not consumed, it is left in place under its value.
   void GetValue();
 
-  // Generate code to push the value of a reference on top of the expression
-  // stack and then spill the stack frame.  This function is used temporarily
-  // while the code generator is being transformed.
+  // Generate code to pop a reference, push the value of the reference,
+  // and then spill the stack frame.
   inline void GetValueAndSpill();
 
   // Generate code to store the value on top of the expression stack in the
   // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The stored value is left in place (with the
-  // reference intact below it) to support chained assignments.
+  // on the expression stack.  The  value is stored in the location specified
+  // by the reference, and is left on top of the stack, after the reference
+  // is popped from beneath it (unloaded).
   void SetValue(InitState init_state);
 
  private:
   CodeGenerator* cgen_;
   Expression* expression_;
   Type type_;
+  // Keep the reference on the stack after get, so it can be used by set later.
+  bool persist_after_get_;
 };
 
 
@@ -274,6 +286,9 @@
   void LoadFromSlot(Slot* slot, TypeofState typeof_state);
   // Store the value on top of the stack to a slot.
   void StoreToSlot(Slot* slot, InitState init_state);
+  // Load a keyed property, leaving it in r0.  The receiver and key are
+  // passed on the stack, and remain there.
+  void EmitKeyedLoad(bool is_global);
 
   void LoadFromGlobalSlotCheckExtensions(Slot* slot,
                                          TypeofState typeof_state,
@@ -341,6 +356,7 @@
   void GenerateIsArray(ZoneList<Expression*>* args);
   void GenerateIsObject(ZoneList<Expression*>* args);
   void GenerateIsFunction(ZoneList<Expression*>* args);
+  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
 
   // Support for construct call checks.
   void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -427,8 +443,8 @@
   friend class VirtualFrame;
   friend class JumpTarget;
   friend class Reference;
-  friend class FastCodeGenerator;
-  friend class CodeGenSelector;
+  friend class FullCodeGenerator;
+  friend class FullCodeGenSyntaxChecker;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
@@ -511,6 +527,28 @@
 };
 
 
+class StringCompareStub: public CodeStub {
+ public:
+  StringCompareStub() { }
+
+  // Compare two flat ASCII strings and returns result in r0.
+  // Does not use the stack.
+  static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+                                              Register left,
+                                              Register right,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3,
+                                              Register scratch4);
+
+ private:
+  Major MajorKey() { return StringCompare; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 9432207..8a32c95 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -237,6 +237,7 @@
   inline int RnField() const { return Bits(19, 16); }
   inline int RdField() const { return Bits(15, 12); }
 
+  inline int CoprocessorField() const { return Bits(11, 8); }
   // Support for VFP.
   // Vn(19-16) | Vd(15-12) |  Vm(3-0)
   inline int VnField() const { return Bits(19, 16); }
@@ -246,6 +247,8 @@
   inline int MField() const { return Bit(5); }
   inline int DField() const { return Bit(22); }
   inline int RtField() const { return Bits(15, 12); }
+  inline int PField() const { return Bit(24); }
+  inline int UField() const { return Bit(23); }
 
   // Fields used in Data processing instructions
   inline Opcode OpcodeField() const {
@@ -296,6 +299,7 @@
   inline bool HasB()    const { return BField() == 1; }
   inline bool HasW()    const { return WField() == 1; }
   inline bool HasL()    const { return LField() == 1; }
+  inline bool HasU()    const { return UField() == 1; }
   inline bool HasSign() const { return SignField() == 1; }
   inline bool HasH()    const { return HField() == 1; }
   inline bool HasLink() const { return LinkField() == 1; }
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index afed0fa..5b31455 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -998,29 +998,43 @@
 // Decode Type 6 coprocessor instructions.
 // Dm = vmov(Rt, Rt2)
 // <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
 void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
   ASSERT((instr->TypeField() == 6));
 
-  if (instr->Bit(23) == 1) {
-     Unknown(instr);  // Not used by V8.
-  } else if (instr->Bit(22) == 1) {
-    if ((instr->Bits(27, 24) == 0xC) &&
-        (instr->Bit(22) == 1) &&
-        (instr->Bits(11, 8) == 0xB) &&
-        (instr->Bits(7, 6) == 0x0) &&
-        (instr->Bit(4) == 1)) {
-      if (instr->Bit(20) == 0) {
-        Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
-      } else if (instr->Bit(20) == 1) {
-        Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
-      }
-    } else {
-      Unknown(instr);  // Not used by V8.
-    }
-  } else if (instr->Bit(21) == 1) {
+  if (instr->CoprocessorField() != 0xB) {
     Unknown(instr);  // Not used by V8.
   } else {
-    Unknown(instr);  // Not used by V8.
+    switch (instr->OpcodeField()) {
+      case 0x2:
+        // Load and store double to two GP registers
+        if (instr->Bits(7, 4) != 0x1) {
+          Unknown(instr);  // Not used by V8.
+        } else if (instr->HasL()) {
+          Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+        } else {
+          Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+        }
+        break;
+      case 0x8:
+        if (instr->HasL()) {
+          Format(instr, "vldr'cond 'Dd, ['rn - 4*'off8]");
+        } else {
+          Format(instr, "vstr'cond 'Dd, ['rn - 4*'off8]");
+        }
+        break;
+      case 0xC:
+        if (instr->HasL()) {
+          Format(instr, "vldr'cond 'Dd, ['rn + 4*'off8]");
+        } else {
+          Format(instr, "vstr'cond 'Dd, ['rn + 4*'off8]");
+        }
+        break;
+      default:
+        Unknown(instr);  // Not used by V8.
+        break;
+    }
   }
 }
 
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/full-codegen-arm.cc
similarity index 90%
rename from src/arm/fast-codegen-arm.cc
rename to src/arm/full-codegen-arm.cc
index 4256e47..8d1cfeb 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -30,7 +30,7 @@
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
 #include "parser.h"
 
 namespace v8 {
@@ -52,7 +52,7 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-arm.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun) {
   function_ = fun;
   SetFunctionPosition(fun);
   int locals_count = fun->scope()->num_stack_slots();
@@ -167,7 +167,7 @@
 }
 
 
-void FastCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence(int position) {
   Comment cmnt(masm_, "[ Return sequence");
   if (return_label_.is_bound()) {
     __ b(&return_label_);
@@ -214,7 +214,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -250,7 +250,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -270,7 +270,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -290,7 +290,7 @@
 }
 
 
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -323,7 +323,7 @@
 }
 
 
-void FastCodeGenerator::DropAndApply(int count,
+void FullCodeGenerator::DropAndApply(int count,
                                      Expression::Context context,
                                      Register reg) {
   ASSERT(count > 0);
@@ -371,7 +371,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context,
+void FullCodeGenerator::Apply(Expression::Context context,
                               Label* materialize_true,
                               Label* materialize_false) {
   switch (context) {
@@ -432,7 +432,7 @@
 }
 
 
-void FastCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::DoTest(Expression::Context context) {
   // The value to test is pushed on the stack, and duplicated on the stack
   // if necessary (for value/test and test/value contexts).
   ASSERT_NE(NULL, true_label_);
@@ -495,7 +495,7 @@
 }
 
 
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
   switch (slot->type()) {
     case Slot::PARAMETER:
     case Slot::LOCAL:
@@ -514,14 +514,14 @@
 }
 
 
-void FastCodeGenerator::Move(Register destination, Slot* source) {
+void FullCodeGenerator::Move(Register destination, Slot* source) {
   // Use destination as scratch.
   MemOperand slot_operand = EmitSlotSearch(source, destination);
   __ ldr(destination, slot_operand);
 }
 
 
-void FastCodeGenerator::Move(Slot* dst,
+void FullCodeGenerator::Move(Slot* dst,
                              Register src,
                              Register scratch1,
                              Register scratch2) {
@@ -537,7 +537,7 @@
 }
 
 
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
   Comment cmnt(masm_, "[ Declaration");
   Variable* var = decl->proxy()->var();
   ASSERT(var != NULL);  // Must have been resolved.
@@ -637,7 +637,7 @@
 }
 
 
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
   __ mov(r1, Operand(pairs));
@@ -648,7 +648,7 @@
 }
 
 
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
@@ -666,17 +666,21 @@
 }
 
 
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
   EmitVariableLoad(expr->var(), context_);
 }
 
 
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
                                          Expression::Context context) {
-  Expression* rewrite = var->rewrite();
-  if (rewrite == NULL) {
-    ASSERT(var->is_global());
+  // Four cases: non-this global variables, lookup slots, all other
+  // types of slots, and parameters that rewrite to explicit property
+  // accesses on the arguments object.
+  Slot* slot = var->slot();
+  Property* property = var->AsProperty();
+
+  if (var->is_global() && !var->is_this()) {
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in r2 and the global
     // object on the stack.
@@ -686,34 +690,24 @@
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
     DropAndApply(1, context, r0);
-  } else if (rewrite->AsSlot() != NULL) {
-    Slot* slot = rewrite->AsSlot();
-    if (FLAG_debug_code) {
-      switch (slot->type()) {
-        case Slot::PARAMETER:
-        case Slot::LOCAL: {
-          Comment cmnt(masm_, "Stack slot");
-          break;
-        }
-        case Slot::CONTEXT: {
-          Comment cmnt(masm_, "Context slot");
-          break;
-        }
-        case Slot::LOOKUP:
-          UNIMPLEMENTED();
-          break;
-      }
-    }
-    Apply(context, slot);
-  } else {
-    Comment cmnt(masm_, "Variable rewritten to property");
-    // A variable has been rewritten into an explicit access to an object
-    // property.
-    Property* property = rewrite->AsProperty();
-    ASSERT_NOT_NULL(property);
 
-    // The only property expressions that can occur are of the form
-    // "slot[literal]".
+  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    Comment cmnt(masm_, "Lookup slot");
+    __ mov(r1, Operand(var->name()));
+    __ stm(db_w, sp, cp.bit() | r1.bit());  // Context and name.
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    Apply(context, r0);
+
+  } else if (slot != NULL) {
+    Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+                            ? "Context slot"
+                            : "Stack slot");
+    Apply(context, slot);
+
+  } else {
+    Comment cmnt(masm_, "Rewritten parameter");
+    ASSERT_NOT_NULL(property);
+    // Rewritten parameter accesses are of the form "slot[literal]".
 
     // Assert that the object is in a slot.
     Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -745,7 +739,7 @@
 }
 
 
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
   Comment cmnt(masm_, "[ RegExpLiteral");
   Label done;
   // Registers will be used as follows:
@@ -772,7 +766,7 @@
 }
 
 
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
   __ ldr(r2, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
@@ -847,7 +841,7 @@
 }
 
 
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
@@ -900,7 +894,7 @@
 }
 
 
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
   __ mov(r2, Operand(key->handle()));
@@ -909,14 +903,14 @@
 }
 
 
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
 }
 
 
-void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
                                      Expression::Context context) {
   __ pop(r1);
   GenericBinaryOpStub stub(op, NO_OVERWRITE);
@@ -925,11 +919,17 @@
 }
 
 
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Expression::Context context) {
+  // Three main cases: global variables, lookup slots, and all other
+  // types of slots.  Left-hand-side parameters that rewrite to
+  // explicit property accesses do not reach here.
   ASSERT(var != NULL);
   ASSERT(var->is_global() || var->slot() != NULL);
+
+  Slot* slot = var->slot();
   if (var->is_global()) {
+    ASSERT(!var->is_this());
     // Assignment to a global variable.  Use inline caching for the
     // assignment.  Right-hand-side value is passed in r0, variable name in
     // r2, and the global object on the stack.
@@ -941,6 +941,13 @@
     // Overwrite the global object on the stack with the result if needed.
     DropAndApply(1, context, r0);
 
+  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    __ push(result_register());  // Value.
+    __ mov(r1, Operand(var->name()));
+    __ stm(db_w, sp, cp.bit() | r1.bit());  // Context and name.
+    __ CallRuntime(Runtime::kStoreContextSlot, 3);
+    Apply(context, r0);
+
   } else if (var->slot() != NULL) {
     Slot* slot = var->slot();
     switch (slot->type()) {
@@ -967,6 +974,7 @@
         break;
     }
     Apply(context, result_register());
+
   } else {
     // Variables rewritten as properties are not treated as variables in
     // assignments.
@@ -975,7 +983,7 @@
 }
 
 
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a named store IC.
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
@@ -1011,7 +1019,7 @@
 }
 
 
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
 
   // If the assignment starts a block of assignments to the same object,
@@ -1046,7 +1054,7 @@
 }
 
 
-void FastCodeGenerator::VisitProperty(Property* expr) {
+void FullCodeGenerator::VisitProperty(Property* expr) {
   Comment cmnt(masm_, "[ Property");
   Expression* key = expr->key();
 
@@ -1065,7 +1073,7 @@
   }
 }
 
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> ignored,
                                        RelocInfo::Mode mode) {
   // Code common for calls using the IC.
@@ -1087,7 +1095,7 @@
 }
 
 
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -1105,7 +1113,7 @@
 }
 
 
-void FastCodeGenerator::VisitCall(Call* expr) {
+void FullCodeGenerator::VisitCall(Call* expr) {
   Comment cmnt(masm_, "[ Call");
   Expression* fun = expr->expression();
   Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1163,7 +1171,7 @@
     if (lit != NULL &&
         lit->name()->Equals(Heap::empty_string()) &&
         loop_depth() == 0) {
-      lit->set_try_fast_codegen(true);
+      lit->set_try_full_codegen(true);
     }
     VisitForValue(fun, kStack);
     // Load global receiver object.
@@ -1176,7 +1184,7 @@
 }
 
 
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
   // expression in new calls must be evaluated before the
@@ -1211,7 +1219,7 @@
 }
 
 
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   Comment cmnt(masm_, "[ CallRuntime");
   ZoneList<Expression*>* args = expr->arguments();
 
@@ -1246,7 +1254,7 @@
 }
 
 
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1351,13 +1359,26 @@
       break;
     }
 
+    case Token::ADD: {
+      Comment cmt(masm_, "[ UnaryOperation (ADD)");
+      VisitForValue(expr->expression(), kAccumulator);
+      Label no_conversion;
+      __ tst(result_register(), Operand(kSmiTagMask));
+      __ b(eq, &no_conversion);
+      __ push(r0);
+      __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+      __ bind(&no_conversion);
+      Apply(context_, result_register());
+      break;
+    }
+
     default:
       UNREACHABLE();
   }
 }
 
 
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
 
   // Expression can only be a property, a global or a (parameter or local)
@@ -1376,7 +1397,7 @@
   if (assign_type == VARIABLE) {
     ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
     Location saved_location = location_;
-    location_ = kStack;
+    location_ = kAccumulator;
     EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
                      Expression::kValue);
     location_ = saved_location;
@@ -1393,11 +1414,15 @@
       VisitForValue(prop->key(), kStack);
       EmitKeyedPropertyLoad(prop);
     }
-    __ push(r0);
   }
 
-  // Convert to number.
+  // Call ToNumber only if operand is not a smi.
+  Label no_conversion;
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &no_conversion);
+  __ push(r0);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+  __ bind(&no_conversion);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -1429,12 +1454,28 @@
     }
   }
 
-  // Call stub for +1/-1.
+
+  // Inline smi case if we are in a loop.
+  Label stub_call, done;
+  if (loop_depth() > 0) {
+    __ add(r0, r0, Operand(expr->op() == Token::INC
+                           ? Smi::FromInt(1)
+                           : Smi::FromInt(-1)));
+    __ b(vs, &stub_call);
+    // We could eliminate this smi check if we split the code at
+    // the first smi check before calling ToNumber.
+    __ tst(r0, Operand(kSmiTagMask));
+    __ b(eq, &done);
+    __ bind(&stub_call);
+    // Call stub. Undo operation first.
+    __ sub(r0, r0, Operand(r1));
+  }
   __ mov(r1, Operand(expr->op() == Token::INC
                      ? Smi::FromInt(1)
                      : Smi::FromInt(-1)));
   GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
   __ CallStub(&stub);
+  __ bind(&done);
 
   // Store the value returned in r0.
   switch (assign_type) {
@@ -1483,7 +1524,7 @@
 }
 
 
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
   Comment cmnt(masm_, "[ BinaryOperation");
   switch (expr->op()) {
     case Token::COMMA:
@@ -1518,7 +1559,7 @@
 }
 
 
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Always perform the comparison for its control flow.  Pack the result
@@ -1633,25 +1674,25 @@
 }
 
 
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   Apply(context_, r0);
 }
 
 
-Register FastCodeGenerator::result_register() { return r0; }
+Register FullCodeGenerator::result_register() { return r0; }
 
 
-Register FastCodeGenerator::context_register() { return cp; }
+Register FullCodeGenerator::context_register() { return cp; }
 
 
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ str(value, MemOperand(fp, frame_offset));
 }
 
 
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
   __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
 }
 
@@ -1659,7 +1700,7 @@
 // ----------------------------------------------------------------------------
 // Non-local control flow support.
 
-void FastCodeGenerator::EnterFinallyBlock() {
+void FullCodeGenerator::EnterFinallyBlock() {
   ASSERT(!result_register().is(r1));
   // Store result register while executing finally block.
   __ push(result_register());
@@ -1672,7 +1713,7 @@
 }
 
 
-void FastCodeGenerator::ExitFinallyBlock() {
+void FullCodeGenerator::ExitFinallyBlock() {
   ASSERT(!result_register().is(r1));
   // Restore result register from stack.
   __ pop(r1);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index a1f2613..b59c3f0 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -569,11 +569,10 @@
 
   // Get the map of the receiver.
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.  We need
-  // to check this explicitly since this generic stub does not perform
-  // map checks.
+
+  // Check bit field.
   __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
-  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ tst(r3, Operand(kSlowCaseBitFieldMask));
   __ b(ne, &slow);
   // Check that the object is some kind of JS object EXCEPT JS Value type.
   // In the case that the object is a value-wrapper object,
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 18cadac..6c3bbbb 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1221,6 +1221,46 @@
 }
 
 
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
+    Register first,
+    Register second,
+    Register scratch1,
+    Register scratch2,
+    Label* failure) {
+  // Test that both first and second are sequential ASCII strings.
+  // Assume that they are non-smis.
+  ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+  ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+  ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+  int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  and_(scratch1, scratch1, Operand(kFlatAsciiStringMask));
+  and_(scratch2, scratch2, Operand(kFlatAsciiStringMask));
+  cmp(scratch1, Operand(kFlatAsciiStringTag));
+  // Ignore second test if first test failed.
+  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
+  b(ne, failure);
+}
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
+                                                         Register second,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         Label* failure) {
+  // Check that neither is a smi.
+  ASSERT_EQ(0, kSmiTag);
+  and_(scratch1, first, Operand(second));
+  tst(scratch1, Operand(kSmiTagMask));
+  b(eq, failure);
+  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
+                                             second,
+                                             scratch1,
+                                             scratch2,
+                                             failure);
+}
+
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 CodePatcher::CodePatcher(byte* address, int instructions)
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 8f2064a..efc5bfa 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -337,6 +337,25 @@
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
 
+  // ---------------------------------------------------------------------------
+  // String utilities
+
+  // Checks if both objects are sequential ASCII strings and jumps to label
+  // if either is not. Assumes that neither object is a smi.
+  void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
+                                                  Register object2,
+                                                  Register scratch1,
+                                                  Register scratch2,
+                                                  Label *failure);
+
+  // Checks if both objects are sequential ASCII strings and jumps to label
+  // if either is not.
+  void JumpIfNotBothSequentialAsciiStrings(Register first,
+                                           Register second,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           Label* not_flat_ascii_strings);
+
  private:
   List<Unresolved> unresolved_;
   bool generating_stub_;
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index ed06eb2..9dd3b93 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -63,8 +63,6 @@
  *                             through the runtime system)
  *       - stack_area_base    (High end of the memory area to use as
  *                             backtracking stack)
- *       - at_start           (if 1, we are starting at the start of the
- *                             string, otherwise 0)
  *       - int* capture_array (int[num_saved_registers_], for output).
  *       --- sp when called ---
  *       - link address
@@ -76,6 +74,8 @@
  *       - void* input_string (location of a handle containing the string)
  *       - Offset of location before start of input (effectively character
  *         position -1). Used to initialize capture registers to a non-position.
+ *       - At start (if 1, we are starting at the start of the
+ *         string, otherwise 0)
  *       - register 0         (Only positions must be stored in the first
  *       - register 1          num_saved_registers_ registers)
  *       - ...
@@ -610,6 +610,7 @@
   // Set frame pointer just above the arguments.
   __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
   __ push(r0);  // Make room for "position - 1" constant (value is irrelevant).
+  __ push(r0);  // Make room for "at start" constant (value is irrelevant).
 
   // Check if we have space on the stack for registers.
   Label stack_limit_hit;
@@ -653,6 +654,15 @@
   // Store this value in a local variable, for use when clearing
   // position registers.
   __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+  // Determine whether the start index is zero, that is at the start of the
+  // string, and store that value in a local variable.
+  __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
+  __ tst(r1, Operand(r1));
+  __ mov(r1, Operand(1), LeaveCC, eq);
+  __ mov(r1, Operand(0), LeaveCC, ne);
+  __ str(r1, MemOperand(frame_pointer(), kAtStart));
+
   if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
     // Fill saved registers with initial value = start offset - 1
 
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 4459859..7de5f93 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -123,8 +123,7 @@
   static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
   // Stack parameters placed by caller.
   static const int kRegisterOutput = kReturnAddress + kPointerSize;
-  static const int kAtStart = kRegisterOutput + kPointerSize;
-  static const int kStackHighEnd = kAtStart + kPointerSize;
+  static const int kStackHighEnd = kRegisterOutput + kPointerSize;
   static const int kDirectCall = kStackHighEnd + kPointerSize;
 
   // Below the frame pointer.
@@ -136,8 +135,9 @@
   // When adding local variables remember to push space for them in
   // the frame in GetCode.
   static const int kInputStartMinusOne = kInputString - kPointerSize;
+  static const int kAtStart = kInputStartMinusOne - kPointerSize;
   // First register address. Following registers are below it on the stack.
-  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+  static const int kRegisterZero = kAtStart - kPointerSize;
 
   // Initial size of code buffer.
   static const size_t kRegExpCodeSize = 1024;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index c4b1e00..f543151 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,9 +47,9 @@
 using ::v8::internal::DeleteArray;
 
 // This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent was through
-// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
-// Library does not provide vsscanf.
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
 #define SScanF sscanf  // NOLINT
 
 // The Debugger class is used by the simulator while debugging simulated ARM
@@ -2033,42 +2033,62 @@
 // Decode Type 6 coprocessor instructions.
 // Dm = vmov(Rt, Rt2)
 // <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
 void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
   ASSERT((instr->TypeField() == 6));
 
-  int rt = instr->RtField();
-  int rn = instr->RnField();
-  int vm = instr->VmField();
-
-  if (instr->Bit(23) == 1) {
-    UNIMPLEMENTED();
-  } else if (instr->Bit(22) == 1) {
-    if ((instr->Bits(27, 24) == 0xC) &&
-        (instr->Bit(22) == 1) &&
-        (instr->Bits(11, 8) == 0xB) &&
-        (instr->Bits(7, 6) == 0x0) &&
-        (instr->Bit(4) == 1)) {
-      if (instr->Bit(20) == 0) {
-        int32_t rs_val = get_register(rt);
-        int32_t rn_val = get_register(rn);
-
-        set_s_register_from_sinteger(2*vm, rs_val);
-        set_s_register_from_sinteger((2*vm+1), rn_val);
-
-      } else if (instr->Bit(20) == 1) {
-        int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
-        int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
-
-        set_register(rt, rt_int_value);
-        set_register(rn, rn_int_value);
-      }
-    } else {
-      UNIMPLEMENTED();
-    }
-  } else if (instr->Bit(21) == 1) {
-    UNIMPLEMENTED();
+  if (instr->CoprocessorField() != 0xB) {
+    UNIMPLEMENTED();  // Not used by V8.
   } else {
-    UNIMPLEMENTED();
+    switch (instr->OpcodeField()) {
+      case 0x2:
+        // Load and store double to two GP registers
+        if (instr->Bits(7, 4) != 0x1) {
+          UNIMPLEMENTED();  // Not used by V8.
+        } else {
+          int rt = instr->RtField();
+          int rn = instr->RnField();
+          int vm = instr->VmField();
+          if (instr->HasL()) {
+            int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
+            int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
+
+            set_register(rt, rt_int_value);
+            set_register(rn, rn_int_value);
+          } else {
+            int32_t rs_val = get_register(rt);
+            int32_t rn_val = get_register(rn);
+
+            set_s_register_from_sinteger(2*vm, rs_val);
+            set_s_register_from_sinteger((2*vm+1), rn_val);
+          }
+        }
+        break;
+      case 0x8:
+      case 0xC: {  // Load and store double to memory.
+        int rn = instr->RnField();
+        int vd = instr->VdField();
+        int offset = instr->Immed8Field();
+        if (!instr->HasU()) {
+          offset = -offset;
+        }
+        int32_t address = get_register(rn) + 4 * offset;
+        if (instr->HasL()) {
+          // Load double from memory: vldr.
+          set_s_register_from_sinteger(2*vd, ReadW(address, instr));
+          set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
+        } else {
+          // Store double to memory: vstr.
+          WriteW(address, get_sinteger_from_s_register(2*vd), instr);
+          WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
+        }
+        break;
+      }
+      default:
+        UNIMPLEMENTED();  // Not used by V8.
+        break;
+    }
   }
 }
 
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 3ce5b7a..1973730 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -63,8 +63,8 @@
 
 // Call the generated regexp code directly. The entry function pointer should
 // expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
-  entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  entry(p0, p1, p2, p3, p4, p5, p6)
 
 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
   reinterpret_cast<TryCatch*>(try_catch_address)
@@ -79,9 +79,9 @@
       assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
                                                  p0, p1, p2, p3, p4))
 
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
   assembler::arm::Simulator::current()->Call( \
-    FUNCTION_ADDR(entry), 8, p0, p1, p2, p3, p4, p5, p6, p7)
+    FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
 
 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
   try_catch_address == NULL ? \
diff --git a/src/ast.h b/src/ast.h
index e753a52..22e096f 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -1324,7 +1324,7 @@
         loop_nesting_(0),
         function_token_position_(RelocInfo::kNoPosition),
         inferred_name_(Heap::empty_string()),
-        try_fast_codegen_(false) {
+        try_full_codegen_(false) {
 #ifdef DEBUG
     already_compiled_ = false;
 #endif
@@ -1364,8 +1364,8 @@
     inferred_name_ = inferred_name;
   }
 
-  bool try_fast_codegen() { return try_fast_codegen_; }
-  void set_try_fast_codegen(bool flag) { try_fast_codegen_ = flag; }
+  bool try_full_codegen() { return try_full_codegen_; }
+  void set_try_full_codegen(bool flag) { try_full_codegen_ = flag; }
 
 #ifdef DEBUG
   void mark_as_compiled() {
@@ -1389,7 +1389,7 @@
   int loop_nesting_;
   int function_token_position_;
   Handle<String> inferred_name_;
-  bool try_fast_codegen_;
+  bool try_full_codegen_;
 #ifdef DEBUG
   bool already_compiled_;
 #endif
diff --git a/src/codegen.cc b/src/codegen.cc
index fd7e0e8..aa2a2b8 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -217,6 +217,10 @@
 Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
                                      Handle<Script> script,
                                      bool is_eval) {
+  if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+    int len = String::cast(script->source())->length();
+    Counters::total_old_codegen_source_size.Increment(len);
+  }
   MakeCodePrologue(fun);
   // Generate code.
   const int kInitialBufferSize = 4 * KB;
@@ -344,6 +348,7 @@
   {&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
   {&CodeGenerator::GenerateIsObject, "_IsObject"},
   {&CodeGenerator::GenerateIsFunction, "_IsFunction"},
+  {&CodeGenerator::GenerateIsUndetectableObject, "_IsUndetectableObject"},
   {&CodeGenerator::GenerateStringAdd, "_StringAdd"},
   {&CodeGenerator::GenerateSubString, "_SubString"},
   {&CodeGenerator::GenerateStringCompare, "_StringCompare"},
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index d8e186a..5427367 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -37,9 +37,7 @@
 static const int kSubCacheCount = 4;
 
 // The number of generations for each sub cache.
-// TODO(andreip): remove this #ifdef if the page cycler confirms that all is
-// well and we can cache up to 5 script generations.
-#if 0 // defined(ANDROID)
+#if defined(ANDROID)
 static const int kScriptGenerations = 1;
 static const int kEvalGlobalGenerations = 1;
 static const int kEvalContextualGenerations = 1;
diff --git a/src/compiler.cc b/src/compiler.cc
index b7aaedf..7482ae1 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -33,6 +33,7 @@
 #include "compiler.h"
 #include "debug.h"
 #include "fast-codegen.h"
+#include "full-codegen.h"
 #include "oprofile-agent.h"
 #include "rewriter.h"
 #include "scopes.h"
@@ -42,29 +43,6 @@
 namespace internal {
 
 
-class CodeGenSelector: public AstVisitor {
- public:
-  enum CodeGenTag { NORMAL, FAST };
-
-  CodeGenSelector() : has_supported_syntax_(true) {}
-
-  CodeGenTag Select(FunctionLiteral* fun);
-
- private:
-  void VisitDeclarations(ZoneList<Declaration*>* decls);
-  void VisitStatements(ZoneList<Statement*>* stmts);
-
-  // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
-  AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
-  bool has_supported_syntax_;
-
-  DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
-};
-
-
 static Handle<Code> MakeCode(FunctionLiteral* literal,
                              Handle<Script> script,
                              Handle<Context> context,
@@ -104,27 +82,37 @@
     return Handle<Code>::null();
   }
 
-  // Generate code and return it.
-  if (FLAG_fast_compiler) {
-    // If there is no shared function info, try the fast code
-    // generator for code in the global scope.  Otherwise obey the
-    // explicit hint in the shared function info.
-    // If always_fast_compiler is true, always try the fast compiler.
-    if (shared.is_null() && !literal->scope()->is_global_scope() &&
-        !FLAG_always_fast_compiler) {
-      if (FLAG_trace_bailout) PrintF("Non-global scope\n");
-    } else if (!shared.is_null() && !shared->try_fast_codegen() &&
-               !FLAG_always_fast_compiler) {
-      if (FLAG_trace_bailout) PrintF("No hint to try fast\n");
-    } else {
-      CodeGenSelector selector;
-      CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
-      if (code_gen == CodeGenSelector::FAST) {
-        return FastCodeGenerator::MakeCode(literal, script, is_eval);
-      }
-      ASSERT(code_gen == CodeGenSelector::NORMAL);
+  // Generate code and return it.  Code generator selection is governed by
+  // which backends are enabled and whether the function is considered
+  // run-once code or not:
+  //
+  //  --full-compiler enables the dedicated backend for code we expect to be
+  //    run once
+  //  --fast-compiler enables a speculative optimizing backend (for
+  //    non-run-once code)
+  //
+  // The normal choice of backend can be overridden with the flags
+  // --always-full-compiler and --always-fast-compiler, which are mutually
+  // incompatible.
+  CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
+
+  bool is_run_once = (shared.is_null())
+      ? literal->scope()->is_global_scope()
+      : (shared->is_toplevel() || shared->try_full_codegen());
+
+  if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
+    FullCodeGenSyntaxChecker checker;
+    checker.Check(literal);
+    if (checker.has_supported_syntax()) {
+      return FullCodeGenerator::MakeCode(literal, script, is_eval);
     }
+  } else if (FLAG_always_fast_compiler ||
+             (FLAG_fast_compiler && !is_run_once)) {
+    FastCodeGenSyntaxChecker checker;
+    checker.Check(literal);
+    // Does not yet generate code.
   }
+
   return CodeGenerator::MakeCode(literal, script, is_eval);
 }
 
@@ -491,21 +479,30 @@
       return Handle<JSFunction>::null();
     }
 
-    // Generate code and return it.
+    // Generate code and return it.  The way that the compilation mode
+    // is controlled by the command-line flags is described in
+    // the static helper function MakeCode.
+    CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
+    bool is_run_once = literal->try_full_codegen();
     bool is_compiled = false;
-    if (FLAG_fast_compiler && literal->try_fast_codegen()) {
-      CodeGenSelector selector;
-      CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
-      if (code_gen == CodeGenSelector::FAST) {
-        code = FastCodeGenerator::MakeCode(literal,
+    if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
+      FullCodeGenSyntaxChecker checker;
+      checker.Check(literal);
+      if (checker.has_supported_syntax()) {
+        code = FullCodeGenerator::MakeCode(literal,
                                            script,
                                            false);  // Not eval.
         is_compiled = true;
       }
+    } else if (FLAG_always_fast_compiler ||
+               (FLAG_fast_compiler && !is_run_once)) {
+      FastCodeGenSyntaxChecker checker;
+      checker.Check(literal);
+      // Generate no code.
     }
 
     if (!is_compiled) {
-      // We didn't try the fast compiler, or we failed to select it.
+      // We fall back to the classic V8 code generator.
       code = CodeGenerator::MakeCode(literal,
                                      script,
                                      false);  // Not eval.
@@ -567,422 +564,8 @@
   fun->shared()->SetThisPropertyAssignmentsInfo(
       lit->has_only_simple_this_property_assignments(),
       *lit->this_property_assignments());
-  fun->shared()->set_try_fast_codegen(lit->try_fast_codegen());
+  fun->shared()->set_try_full_codegen(lit->try_full_codegen());
 }
 
 
-CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
-  Scope* scope = fun->scope();
-
-  if (scope->num_heap_slots() > 0) {
-    // We support functions with a local context if they do not have
-    // parameters that need to be copied into the context.
-    for (int i = 0, len = scope->num_parameters(); i < len; i++) {
-      Slot* slot = scope->parameter(i)->slot();
-      if (slot != NULL && slot->type() == Slot::CONTEXT) {
-        if (FLAG_trace_bailout) {
-          PrintF("Function has context-allocated parameters.\n");
-        }
-        return NORMAL;
-      }
-    }
-  }
-
-  has_supported_syntax_ = true;
-  VisitDeclarations(scope->declarations());
-  if (!has_supported_syntax_) return NORMAL;
-
-  VisitStatements(fun->body());
-  return has_supported_syntax_ ? FAST : NORMAL;
-}
-
-
-#define BAILOUT(reason)                         \
-  do {                                          \
-    if (FLAG_trace_bailout) {                   \
-      PrintF("%s\n", reason);                   \
-    }                                           \
-    has_supported_syntax_ = false;              \
-    return;                                     \
-  } while (false)
-
-
-#define CHECK_BAILOUT                           \
-  do {                                          \
-    if (!has_supported_syntax_) return;         \
-  } while (false)
-
-
-void CodeGenSelector::VisitDeclarations(ZoneList<Declaration*>* decls) {
-  for (int i = 0; i < decls->length(); i++) {
-    Visit(decls->at(i));
-    CHECK_BAILOUT;
-  }
-}
-
-
-void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
-  for (int i = 0, len = stmts->length(); i < len; i++) {
-    Visit(stmts->at(i));
-    CHECK_BAILOUT;
-  }
-}
-
-
-void CodeGenSelector::VisitDeclaration(Declaration* decl) {
-  Property* prop = decl->proxy()->AsProperty();
-  if (prop != NULL) {
-    Visit(prop->obj());
-    Visit(prop->key());
-  }
-
-  if (decl->fun() != NULL) {
-    Visit(decl->fun());
-  }
-}
-
-
-void CodeGenSelector::VisitBlock(Block* stmt) {
-  VisitStatements(stmt->statements());
-}
-
-
-void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
-  Visit(stmt->expression());
-}
-
-
-void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {}
-
-
-void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
-  Visit(stmt->condition());
-  CHECK_BAILOUT;
-  Visit(stmt->then_statement());
-  CHECK_BAILOUT;
-  Visit(stmt->else_statement());
-}
-
-
-void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {}
-
-
-void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {}
-
-
-void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
-  Visit(stmt->expression());
-}
-
-
-void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
-  Visit(stmt->expression());
-}
-
-
-void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {}
-
-
-void CodeGenSelector::VisitSwitchStatement(SwitchStatement* stmt) {
-  BAILOUT("SwitchStatement");
-}
-
-
-void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) {
-  Visit(stmt->cond());
-  CHECK_BAILOUT;
-  Visit(stmt->body());
-}
-
-
-void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
-  Visit(stmt->cond());
-  CHECK_BAILOUT;
-  Visit(stmt->body());
-}
-
-
-void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
-  BAILOUT("ForStatement");
-}
-
-
-void CodeGenSelector::VisitForInStatement(ForInStatement* stmt) {
-  BAILOUT("ForInStatement");
-}
-
-
-void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  Visit(stmt->try_block());
-  CHECK_BAILOUT;
-  Visit(stmt->catch_block());
-}
-
-
-void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  Visit(stmt->try_block());
-  CHECK_BAILOUT;
-  Visit(stmt->finally_block());
-}
-
-
-void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {}
-
-
-void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {}
-
-
-void CodeGenSelector::VisitFunctionBoilerplateLiteral(
-    FunctionBoilerplateLiteral* expr) {
-  BAILOUT("FunctionBoilerplateLiteral");
-}
-
-
-void CodeGenSelector::VisitConditional(Conditional* expr) {
-  Visit(expr->condition());
-  CHECK_BAILOUT;
-  Visit(expr->then_expression());
-  CHECK_BAILOUT;
-  Visit(expr->else_expression());
-}
-
-
-void CodeGenSelector::VisitSlot(Slot* expr) {
-  UNREACHABLE();
-}
-
-
-void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
-  Variable* var = expr->var();
-  if (!var->is_global()) {
-    Slot* slot = var->slot();
-    if (slot != NULL) {
-      Slot::Type type = slot->type();
-      // When LOOKUP slots are enabled, some currently dead code
-      // implementing unary typeof will become live.
-      if (type == Slot::LOOKUP) {
-        BAILOUT("Lookup slot");
-      }
-    } else {
-      // If not global or a slot, it is a parameter rewritten to an explicit
-      // property reference on the (shadow) arguments object.
-#ifdef DEBUG
-      Property* property = var->AsProperty();
-      ASSERT_NOT_NULL(property);
-      Variable* object = property->obj()->AsVariableProxy()->AsVariable();
-      ASSERT_NOT_NULL(object);
-      ASSERT_NOT_NULL(object->slot());
-      ASSERT_NOT_NULL(property->key()->AsLiteral());
-      ASSERT(property->key()->AsLiteral()->handle()->IsSmi());
-#endif
-    }
-  }
-}
-
-
-void CodeGenSelector::VisitLiteral(Literal* expr) {}
-
-
-void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {}
-
-
-void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
-  ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
-
-  for (int i = 0, len = properties->length(); i < len; i++) {
-    ObjectLiteral::Property* property = properties->at(i);
-    if (property->IsCompileTimeValue()) continue;
-    Visit(property->key());
-    CHECK_BAILOUT;
-    Visit(property->value());
-    CHECK_BAILOUT;
-  }
-}
-
-
-void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
-  ZoneList<Expression*>* subexprs = expr->values();
-  for (int i = 0, len = subexprs->length(); i < len; i++) {
-    Expression* subexpr = subexprs->at(i);
-    if (subexpr->AsLiteral() != NULL) continue;
-    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
-    Visit(subexpr);
-    CHECK_BAILOUT;
-  }
-}
-
-
-void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
-  Visit(expr->key());
-  CHECK_BAILOUT;
-  Visit(expr->value());
-}
-
-
-void CodeGenSelector::VisitAssignment(Assignment* expr) {
-  // We support plain non-compound assignments to properties, parameters and
-  // non-context (stack-allocated) locals, and global variables.
-  Token::Value op = expr->op();
-  if (op == Token::INIT_CONST) BAILOUT("initialize constant");
-
-  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
-  Property* prop = expr->target()->AsProperty();
-  ASSERT(var == NULL || prop == NULL);
-  if (var != NULL) {
-    if (var->mode() == Variable::CONST) {
-      BAILOUT("Assignment to const");
-    }
-    // All global variables are supported.
-    if (!var->is_global()) {
-      ASSERT(var->slot() != NULL);
-      Slot::Type type = var->slot()->type();
-      if (type == Slot::LOOKUP) {
-        BAILOUT("Lookup slot");
-      }
-    }
-  } else if (prop != NULL) {
-    Visit(prop->obj());
-    CHECK_BAILOUT;
-    Visit(prop->key());
-    CHECK_BAILOUT;
-  } else {
-    // This is a throw reference error.
-    BAILOUT("non-variable/non-property assignment");
-  }
-
-  Visit(expr->value());
-}
-
-
-void CodeGenSelector::VisitThrow(Throw* expr) {
-  Visit(expr->exception());
-}
-
-
-void CodeGenSelector::VisitProperty(Property* expr) {
-  Visit(expr->obj());
-  CHECK_BAILOUT;
-  Visit(expr->key());
-}
-
-
-void CodeGenSelector::VisitCall(Call* expr) {
-  Expression* fun = expr->expression();
-  ZoneList<Expression*>* args = expr->arguments();
-  Variable* var = fun->AsVariableProxy()->AsVariable();
-
-  // Check for supported calls
-  if (var != NULL && var->is_possibly_eval()) {
-    BAILOUT("call to the identifier 'eval'");
-  } else if (var != NULL && !var->is_this() && var->is_global()) {
-    // Calls to global variables are supported.
-  } else if (var != NULL && var->slot() != NULL &&
-             var->slot()->type() == Slot::LOOKUP) {
-    BAILOUT("call to a lookup slot");
-  } else if (fun->AsProperty() != NULL) {
-    Property* prop = fun->AsProperty();
-    Visit(prop->obj());
-    CHECK_BAILOUT;
-    Visit(prop->key());
-    CHECK_BAILOUT;
-  } else {
-    // Otherwise the call is supported if the function expression is.
-    Visit(fun);
-  }
-  // Check all arguments to the call.
-  for (int i = 0; i < args->length(); i++) {
-    Visit(args->at(i));
-    CHECK_BAILOUT;
-  }
-}
-
-
-void CodeGenSelector::VisitCallNew(CallNew* expr) {
-  Visit(expr->expression());
-  CHECK_BAILOUT;
-  ZoneList<Expression*>* args = expr->arguments();
-  // Check all arguments to the call
-  for (int i = 0; i < args->length(); i++) {
-    Visit(args->at(i));
-    CHECK_BAILOUT;
-  }
-}
-
-
-void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
-  // Check for inline runtime call
-  if (expr->name()->Get(0) == '_' &&
-      CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
-    BAILOUT("inlined runtime call");
-  }
-  // Check all arguments to the call.  (Relies on TEMP meaning STACK.)
-  for (int i = 0; i < expr->arguments()->length(); i++) {
-    Visit(expr->arguments()->at(i));
-    CHECK_BAILOUT;
-  }
-}
-
-
-void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
-  switch (expr->op()) {
-    case Token::VOID:
-    case Token::NOT:
-    case Token::TYPEOF:
-      Visit(expr->expression());
-      break;
-    case Token::BIT_NOT:
-      BAILOUT("UnaryOperataion: BIT_NOT");
-    case Token::DELETE:
-      BAILOUT("UnaryOperataion: DELETE");
-    default:
-      BAILOUT("UnaryOperataion");
-  }
-}
-
-
-void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
-  Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
-  Property* prop = expr->expression()->AsProperty();
-  ASSERT(var == NULL || prop == NULL);
-  if (var != NULL) {
-    // All global variables are supported.
-    if (!var->is_global()) {
-      ASSERT(var->slot() != NULL);
-      Slot::Type type = var->slot()->type();
-      if (type == Slot::LOOKUP) {
-        BAILOUT("CountOperation with lookup slot");
-      }
-    }
-  } else if (prop != NULL) {
-    Visit(prop->obj());
-    CHECK_BAILOUT;
-    Visit(prop->key());
-    CHECK_BAILOUT;
-  } else {
-    // This is a throw reference error.
-    BAILOUT("CountOperation non-variable/non-property expression");
-  }
-}
-
-
-void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
-  Visit(expr->left());
-  CHECK_BAILOUT;
-  Visit(expr->right());
-}
-
-
-void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
-  Visit(expr->left());
-  CHECK_BAILOUT;
-  Visit(expr->right());
-}
-
-
-void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {}
-
-#undef BAILOUT
-#undef CHECK_BAILOUT
-
-
 } }  // namespace v8::internal
diff --git a/src/debug-delay.js b/src/debug-delay.js
index 04fde1f..14d8c88 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -1704,7 +1704,7 @@
   if (global) {
     // Evaluate in the global context.
     response.body =
-        this.exec_state_.evaluateGlobal(expression), Boolean(disable_break);
+        this.exec_state_.evaluateGlobal(expression, Boolean(disable_break));
     return;
   }
 
diff --git a/src/debug.cc b/src/debug.cc
index 34b3a6d..fc809c5 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1695,9 +1695,7 @@
   // Scan heap for Script objects.
   int count = 0;
   HeapIterator iterator;
-  while (iterator.has_next()) {
-    HeapObject* obj = iterator.next();
-    ASSERT(obj != NULL);
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
       script_cache_->Add(Handle<Script>(Script::cast(obj)));
       count++;
diff --git a/src/dtoa-config.c b/src/dtoa-config.c
index a1acd2d..9c5ee33 100644
--- a/src/dtoa-config.c
+++ b/src/dtoa-config.c
@@ -38,7 +38,8 @@
  */
 
 #if !(defined(__APPLE__) && defined(__MACH__)) && \
-    !defined(WIN32) && !defined(__FreeBSD__) && !defined(__OpenBSD__)
+    !defined(WIN32) && !defined(__FreeBSD__) && !defined(__OpenBSD__) && \
+    !defined(__sun)
 #include <endian.h>
 #endif
 #include <math.h>
@@ -47,7 +48,7 @@
 /* The floating point word order on ARM is big endian when floating point
  * emulation is used, even if the byte order is little endian */
 #if !(defined(__APPLE__) && defined(__MACH__)) && !defined(WIN32) && \
-    !defined(__FreeBSD__) && !defined(__OpenBSD__) && \
+    !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__sun) && \
     __FLOAT_WORD_ORDER == __BIG_ENDIAN
 #define  IEEE_MC68k
 #else
@@ -56,7 +57,7 @@
 
 #define __MATH_H__
 #if defined(__APPLE__) && defined(__MACH__) || defined(__FreeBSD__) || \
-    defined(__OpenBSD__)
+    defined(__OpenBSD__) || defined(__sun)
 /* stdlib.h on FreeBSD and Apple's 10.5 and later SDKs will mangle the
  * name of strtod.  If it's included after strtod is redefined as
  * gay_strtod, it will mangle the name of gay_strtod, which is
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index e90a44e..4e2df74 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -27,681 +27,280 @@
 
 #include "v8.h"
 
-#include "codegen-inl.h"
-#include "compiler.h"
 #include "fast-codegen.h"
-#include "stub-cache.h"
-#include "debug.h"
+#include "scopes.h"
 
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM(masm())
+#define BAILOUT(reason)                         \
+  do {                                          \
+    if (FLAG_trace_bailout) {                   \
+      PrintF("%s\n", reason);                   \
+    }                                           \
+    has_supported_syntax_ = false;              \
+    return;                                     \
+  } while (false)
 
-Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
-                                         Handle<Script> script,
-                                         bool is_eval) {
-  CodeGenerator::MakeCodePrologue(fun);
-  const int kInitialBufferSize = 4 * KB;
-  MacroAssembler masm(NULL, kInitialBufferSize);
-  FastCodeGenerator cgen(&masm, script, is_eval);
-  cgen.Generate(fun);
-  if (cgen.HasStackOverflow()) {
-    ASSERT(!Top::has_pending_exception());
-    return Handle<Code>::null();
+
+#define CHECK_BAILOUT                           \
+  do {                                          \
+    if (!has_supported_syntax_) return;         \
+  } while (false)
+
+
+void FastCodeGenSyntaxChecker::Check(FunctionLiteral* fun) {
+  Scope* scope = fun->scope();
+
+  // We do not support stack or heap slots (both of which require
+  // allocation).
+  if (scope->num_stack_slots() > 0) {
+    BAILOUT("Function has stack-allocated locals");
   }
-  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
-  return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+  if (scope->num_heap_slots() > 0) {
+    BAILOUT("Function has context-allocated locals");
+  }
+
+  VisitDeclarations(scope->declarations());
+  CHECK_BAILOUT;
+
+  // We do not support empty function bodies.
+  if (fun->body()->is_empty()) BAILOUT("Function has an empty body");
+  VisitStatements(fun->body());
 }
 
 
-int FastCodeGenerator::SlotOffset(Slot* slot) {
-  ASSERT(slot != NULL);
-  // Offset is negative because higher indexes are at lower addresses.
-  int offset = -slot->index() * kPointerSize;
-  // Adjust by a (parameter or local) base offset.
-  switch (slot->type()) {
-    case Slot::PARAMETER:
-      offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
-      break;
-    case Slot::LOCAL:
-      offset += JavaScriptFrameConstants::kLocal0Offset;
-      break;
-    case Slot::CONTEXT:
-    case Slot::LOOKUP:
-      UNREACHABLE();
-  }
-  return offset;
+void FastCodeGenSyntaxChecker::VisitDeclarations(
+    ZoneList<Declaration*>* decls) {
+  if (!decls->is_empty()) BAILOUT("Function has declarations");
 }
 
 
-void FastCodeGenerator::VisitDeclarations(
-    ZoneList<Declaration*>* declarations) {
-  int length = declarations->length();
-  int globals = 0;
-  for (int i = 0; i < length; i++) {
-    Declaration* decl = declarations->at(i);
-    Variable* var = decl->proxy()->var();
-    Slot* slot = var->slot();
-
-    // If it was not possible to allocate the variable at compile
-    // time, we need to "declare" it at runtime to make sure it
-    // actually exists in the local context.
-    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
-      VisitDeclaration(decl);
-    } else {
-      // Count global variables and functions for later processing
-      globals++;
-    }
-  }
-
-  // Compute array of global variable and function declarations.
-  // Do nothing in case of no declared global functions or variables.
-  if (globals > 0) {
-    Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
-    for (int j = 0, i = 0; i < length; i++) {
-      Declaration* decl = declarations->at(i);
-      Variable* var = decl->proxy()->var();
-      Slot* slot = var->slot();
-
-      if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
-        array->set(j++, *(var->name()));
-        if (decl->fun() == NULL) {
-          if (var->mode() == Variable::CONST) {
-            // In case this is const property use the hole.
-            array->set_the_hole(j++);
-          } else {
-            array->set_undefined(j++);
-          }
-        } else {
-          Handle<JSFunction> function =
-              Compiler::BuildBoilerplate(decl->fun(), script_, this);
-          // Check for stack-overflow exception.
-          if (HasStackOverflow()) return;
-          array->set(j++, *function);
-        }
-      }
-    }
-    // Invoke the platform-dependent code generator to do the actual
-    // declaration the global variables and functions.
-    DeclareGlobals(array);
+void FastCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
+  for (int i = 0, len = stmts->length(); i < len; i++) {
+    Visit(stmts->at(i));
+    CHECK_BAILOUT;
   }
 }
 
 
-void FastCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) {
-    CodeGenerator::RecordPositions(masm_, fun->start_position());
-  }
+void FastCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
+  UNREACHABLE();
 }
 
 
-void FastCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) {
-    CodeGenerator::RecordPositions(masm_, fun->end_position());
-  }
-}
-
-
-void FastCodeGenerator::SetStatementPosition(Statement* stmt) {
-  if (FLAG_debug_info) {
-    CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
-  }
-}
-
-
-void FastCodeGenerator::SetStatementPosition(int pos) {
-  if (FLAG_debug_info) {
-    CodeGenerator::RecordPositions(masm_, pos);
-  }
-}
-
-
-void FastCodeGenerator::SetSourcePosition(int pos) {
-  if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
-    masm_->RecordPosition(pos);
-  }
-}
-
-
-void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
-  Label eval_right, done;
-
-  // Set up the appropriate context for the left subexpression based
-  // on the operation and our own context.  Initially assume we can
-  // inherit both true and false labels from our context.
-  if (expr->op() == Token::OR) {
-    switch (context_) {
-      case Expression::kUninitialized:
-        UNREACHABLE();
-      case Expression::kEffect:
-        VisitForControl(expr->left(), &done, &eval_right);
-        break;
-      case Expression::kValue:
-        VisitForValueControl(expr->left(),
-                             location_,
-                             &done,
-                             &eval_right);
-        break;
-      case Expression::kTest:
-        VisitForControl(expr->left(), true_label_, &eval_right);
-        break;
-      case Expression::kValueTest:
-        VisitForValueControl(expr->left(),
-                             location_,
-                             true_label_,
-                             &eval_right);
-        break;
-      case Expression::kTestValue:
-        VisitForControl(expr->left(), true_label_, &eval_right);
-        break;
-    }
-  } else {
-    ASSERT_EQ(Token::AND, expr->op());
-    switch (context_) {
-      case Expression::kUninitialized:
-        UNREACHABLE();
-      case Expression::kEffect:
-        VisitForControl(expr->left(), &eval_right, &done);
-        break;
-      case Expression::kValue:
-        VisitForControlValue(expr->left(),
-                             location_,
-                             &eval_right,
-                             &done);
-        break;
-      case Expression::kTest:
-        VisitForControl(expr->left(), &eval_right, false_label_);
-        break;
-      case Expression::kValueTest:
-        VisitForControl(expr->left(), &eval_right, false_label_);
-        break;
-      case Expression::kTestValue:
-        VisitForControlValue(expr->left(),
-                             location_,
-                             &eval_right,
-                             false_label_);
-        break;
-    }
-  }
-
-  __ bind(&eval_right);
-  Visit(expr->right());
-
-  __ bind(&done);
-}
-
-
-void FastCodeGenerator::VisitBlock(Block* stmt) {
-  Comment cmnt(masm_, "[ Block");
-  Breakable nested_statement(this, stmt);
-  SetStatementPosition(stmt);
+void FastCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
   VisitStatements(stmt->statements());
-  __ bind(nested_statement.break_target());
 }
 
 
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  SetStatementPosition(stmt);
-  VisitForEffect(stmt->expression());
+void FastCodeGenSyntaxChecker::VisitExpressionStatement(
+    ExpressionStatement* stmt) {
+  Visit(stmt->expression());
 }
 
 
-void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
-  Comment cmnt(masm_, "[ EmptyStatement");
-  SetStatementPosition(stmt);
+void FastCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
+  // Supported.
 }
 
 
-void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
-  Comment cmnt(masm_, "[ IfStatement");
-  SetStatementPosition(stmt);
-  Label then_part, else_part, done;
-
-  // Do not worry about optimizing for empty then or else bodies.
-  VisitForControl(stmt->condition(), &then_part, &else_part);
-
-  __ bind(&then_part);
-  Visit(stmt->then_statement());
-  __ jmp(&done);
-
-  __ bind(&else_part);
-  Visit(stmt->else_statement());
-
-  __ bind(&done);
+void FastCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
+  BAILOUT("IfStatement");
 }
 
 
-void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
-  Comment cmnt(masm_,  "[ ContinueStatement");
-  SetStatementPosition(stmt);
-  NestedStatement* current = nesting_stack_;
-  int stack_depth = 0;
-  while (!current->IsContinueTarget(stmt->target())) {
-    stack_depth = current->Exit(stack_depth);
-    current = current->outer();
-  }
-  __ Drop(stack_depth);
-
-  Iteration* loop = current->AsIteration();
-  __ jmp(loop->continue_target());
+void FastCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
+  BAILOUT("Continuestatement");
 }
 
 
-void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
-  Comment cmnt(masm_,  "[ BreakStatement");
-  SetStatementPosition(stmt);
-  NestedStatement* current = nesting_stack_;
-  int stack_depth = 0;
-  while (!current->IsBreakTarget(stmt->target())) {
-    stack_depth = current->Exit(stack_depth);
-    current = current->outer();
-  }
-  __ Drop(stack_depth);
-
-  Breakable* target = current->AsBreakable();
-  __ jmp(target->break_target());
+void FastCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
+  BAILOUT("BreakStatement");
 }
 
 
-void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
-  Comment cmnt(masm_, "[ ReturnStatement");
-  SetStatementPosition(stmt);
-  Expression* expr = stmt->expression();
-  VisitForValue(expr, kAccumulator);
-
-  // Exit all nested statements.
-  NestedStatement* current = nesting_stack_;
-  int stack_depth = 0;
-  while (current != NULL) {
-    stack_depth = current->Exit(stack_depth);
-    current = current->outer();
-  }
-  __ Drop(stack_depth);
-
-  EmitReturnSequence(stmt->statement_pos());
+void FastCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
+  BAILOUT("ReturnStatement");
 }
 
 
-void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
-  Comment cmnt(masm_, "[ WithEnterStatement");
-  SetStatementPosition(stmt);
-
-  VisitForValue(stmt->expression(), kStack);
-  if (stmt->is_catch_block()) {
-    __ CallRuntime(Runtime::kPushCatchContext, 1);
-  } else {
-    __ CallRuntime(Runtime::kPushContext, 1);
-  }
-  // Both runtime calls return the new context in both the context and the
-  // result registers.
-
-  // Update local stack frame context field.
-  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+void FastCodeGenSyntaxChecker::VisitWithEnterStatement(
+    WithEnterStatement* stmt) {
+  BAILOUT("WithEnterStatement");
 }
 
 
-void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
-  Comment cmnt(masm_, "[ WithExitStatement");
-  SetStatementPosition(stmt);
-
-  // Pop context.
-  LoadContextField(context_register(), Context::PREVIOUS_INDEX);
-  // Update local stack frame context field.
-  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+void FastCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
+  BAILOUT("WithExitStatement");
 }
 
 
-void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
-  UNREACHABLE();
+void FastCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
+  BAILOUT("SwitchStatement");
 }
 
 
-void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
-  Comment cmnt(masm_, "[ DoWhileStatement");
-  SetStatementPosition(stmt);
-  Label body, stack_limit_hit, stack_check_success;
-
-  Iteration loop_statement(this, stmt);
-  increment_loop_depth();
-
-  __ bind(&body);
-  Visit(stmt->body());
-
-  // Check stack before looping.
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_success);
-
-  __ bind(loop_statement.continue_target());
-  SetStatementPosition(stmt->condition_position());
-  VisitForControl(stmt->cond(), &body, loop_statement.break_target());
-
-  __ bind(&stack_limit_hit);
-  StackCheckStub stack_stub;
-  __ CallStub(&stack_stub);
-  __ jmp(&stack_check_success);
-
-  __ bind(loop_statement.break_target());
-
-  decrement_loop_depth();
+void FastCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  BAILOUT("DoWhileStatement");
 }
 
 
-void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
-  Comment cmnt(masm_, "[ WhileStatement");
-  SetStatementPosition(stmt);
-  Label body, stack_limit_hit, stack_check_success;
-
-  Iteration loop_statement(this, stmt);
-  increment_loop_depth();
-
-  // Emit the test at the bottom of the loop.
-  __ jmp(loop_statement.continue_target());
-
-  __ bind(&body);
-  Visit(stmt->body());
-
-  __ bind(loop_statement.continue_target());
-  // Check stack before looping.
-  __ StackLimitCheck(&stack_limit_hit);
-  __ bind(&stack_check_success);
-
-  VisitForControl(stmt->cond(), &body, loop_statement.break_target());
-
-  __ bind(&stack_limit_hit);
-  StackCheckStub stack_stub;
-  __ CallStub(&stack_stub);
-  __ jmp(&stack_check_success);
-
-  __ bind(loop_statement.break_target());
-  decrement_loop_depth();
+void FastCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
+  BAILOUT("WhileStatement");
 }
 
 
-void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
-  UNREACHABLE();
+void FastCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
+  BAILOUT("ForStatement");
 }
 
 
-void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
-  UNREACHABLE();
+void FastCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
+  BAILOUT("ForInStatement");
 }
 
 
-void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  Comment cmnt(masm_, "[ TryCatchStatement");
-  SetStatementPosition(stmt);
-  // The try block adds a handler to the exception handler chain
-  // before entering, and removes it again when exiting normally.
-  // If an exception is thrown during execution of the try block,
-  // control is passed to the handler, which also consumes the handler.
-  // At this point, the exception is in a register, and store it in
-  // the temporary local variable (prints as ".catch-var") before
-  // executing the catch block. The catch block has been rewritten
-  // to introduce a new scope to bind the catch variable and to remove
-  // that scope again afterwards.
-
-  Label try_handler_setup, catch_entry, done;
-  __ Call(&try_handler_setup);
-  // Try handler code, exception in result register.
-
-  // Store exception in local .catch variable before executing catch block.
-  {
-    // The catch variable is *always* a variable proxy for a local variable.
-    Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
-    ASSERT_NOT_NULL(catch_var);
-    Slot* variable_slot = catch_var->slot();
-    ASSERT_NOT_NULL(variable_slot);
-    ASSERT_EQ(Slot::LOCAL, variable_slot->type());
-    StoreToFrameField(SlotOffset(variable_slot), result_register());
-  }
-
-  Visit(stmt->catch_block());
-  __ jmp(&done);
-
-  // Try block code. Sets up the exception handler chain.
-  __ bind(&try_handler_setup);
-  {
-    TryCatch try_block(this, &catch_entry);
-    __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
-    Visit(stmt->try_block());
-    __ PopTryHandler();
-  }
-  __ bind(&done);
+void FastCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  BAILOUT("TryCatchStatement");
 }
 
 
-void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  Comment cmnt(masm_, "[ TryFinallyStatement");
-  SetStatementPosition(stmt);
-  // Try finally is compiled by setting up a try-handler on the stack while
-  // executing the try body, and removing it again afterwards.
-  //
-  // The try-finally construct can enter the finally block in three ways:
-  // 1. By exiting the try-block normally. This removes the try-handler and
-  //      calls the finally block code before continuing.
-  // 2. By exiting the try-block with a function-local control flow transfer
-  //    (break/continue/return). The site of the, e.g., break removes the
-  //    try handler and calls the finally block code before continuing
-  //    its outward control transfer.
-  // 3. by exiting the try-block with a thrown exception.
-  //    This can happen in nested function calls. It traverses the try-handler
-  //    chain and consumes the try-handler entry before jumping to the
-  //    handler code. The handler code then calls the finally-block before
-  //    rethrowing the exception.
-  //
-  // The finally block must assume a return address on top of the stack
-  // (or in the link register on ARM chips) and a value (return value or
-  // exception) in the result register (rax/eax/r0), both of which must
-  // be preserved. The return address isn't GC-safe, so it should be
-  // cooked before GC.
-  Label finally_entry;
-  Label try_handler_setup;
-
-  // Setup the try-handler chain. Use a call to
-  // Jump to try-handler setup and try-block code. Use call to put try-handler
-  // address on stack.
-  __ Call(&try_handler_setup);
-  // Try handler code. Return address of call is pushed on handler stack.
-  {
-    // This code is only executed during stack-handler traversal when an
-    // exception is thrown. The execption is in the result register, which
-    // is retained by the finally block.
-    // Call the finally block and then rethrow the exception.
-    __ Call(&finally_entry);
-    __ push(result_register());
-    __ CallRuntime(Runtime::kReThrow, 1);
-  }
-
-  __ bind(&finally_entry);
-  {
-    // Finally block implementation.
-    Finally finally_block(this);
-    EnterFinallyBlock();
-    Visit(stmt->finally_block());
-    ExitFinallyBlock();  // Return to the calling code.
-  }
-
-  __ bind(&try_handler_setup);
-  {
-    // Setup try handler (stack pointer registers).
-    TryFinally try_block(this, &finally_entry);
-    __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
-    Visit(stmt->try_block());
-    __ PopTryHandler();
-  }
-  // Execute the finally block on the way out.
-  __ Call(&finally_entry);
+void FastCodeGenSyntaxChecker::VisitTryFinallyStatement(
+    TryFinallyStatement* stmt) {
+  BAILOUT("TryFinallyStatement");
 }
 
 
-void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
-  Comment cmnt(masm_, "[ DebuggerStatement");
-  SetStatementPosition(stmt);
-  __ CallRuntime(Runtime::kDebugBreak, 0);
-  // Ignore the return value.
-#endif
+void FastCodeGenSyntaxChecker::VisitDebuggerStatement(
+    DebuggerStatement* stmt) {
+  BAILOUT("DebuggerStatement");
 }
 
 
-void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
+void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
+  BAILOUT("FunctionLiteral");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
     FunctionBoilerplateLiteral* expr) {
+  BAILOUT("FunctionBoilerplateLiteral");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
+  BAILOUT("Conditional");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
   UNREACHABLE();
 }
 
 
-void FastCodeGenerator::VisitConditional(Conditional* expr) {
-  Comment cmnt(masm_, "[ Conditional");
-  Label true_case, false_case, done;
-  VisitForControl(expr->condition(), &true_case, &false_case);
-
-  __ bind(&true_case);
-  Visit(expr->then_expression());
-  // If control flow falls through Visit, jump to done.
-  if (context_ == Expression::kEffect || context_ == Expression::kValue) {
-    __ jmp(&done);
-  }
-
-  __ bind(&false_case);
-  Visit(expr->else_expression());
-  // If control flow falls through Visit, merge it with true case here.
-  if (context_ == Expression::kEffect || context_ == Expression::kValue) {
-    __ bind(&done);
-  }
+void FastCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
+  // Only global variable references are supported.
+  Variable* var = expr->var();
+  if (!var->is_global()) BAILOUT("Non-global variable");
 }
 
 
-void FastCodeGenerator::VisitSlot(Slot* expr) {
-  // Slots do not appear directly in the AST.
-  UNREACHABLE();
+void FastCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
+  BAILOUT("Literal");
 }
 
 
-void FastCodeGenerator::VisitLiteral(Literal* expr) {
-  Comment cmnt(masm_, "[ Literal");
-  Apply(context_, expr);
+void FastCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
+  BAILOUT("RegExpLiteral");
 }
 
 
-void FastCodeGenerator::VisitAssignment(Assignment* expr) {
-  Comment cmnt(masm_, "[ Assignment");
-  // Left-hand side can only be a property, a global or a (parameter or local)
-  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
-  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
-  LhsKind assign_type = VARIABLE;
+void FastCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
+  BAILOUT("ObjectLiteral");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
+  BAILOUT("ArrayLiteral");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitCatchExtensionObject(
+    CatchExtensionObject* expr) {
+  BAILOUT("CatchExtensionObject");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
+  // Simple assignments to (named) this properties are supported.
+  if (expr->op() != Token::ASSIGN) BAILOUT("Non-simple assignment");
+
   Property* prop = expr->target()->AsProperty();
-  if (prop != NULL) {
-    assign_type =
-        (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+  if (prop == NULL) BAILOUT("Non-property assignment");
+  VariableProxy* proxy = prop->obj()->AsVariableProxy();
+  if (proxy == NULL || !proxy->var()->is_this()) {
+    BAILOUT("Non-this-property assignment");
+  }
+  if (!prop->key()->IsPropertyName()) {
+    BAILOUT("Non-named-property assignment");
   }
 
-  // Evaluate LHS expression.
-  switch (assign_type) {
-    case VARIABLE:
-      // Nothing to do here.
-      break;
-    case NAMED_PROPERTY:
-      VisitForValue(prop->obj(), kStack);
-      break;
-    case KEYED_PROPERTY:
-      VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kStack);
-      break;
-  }
-
-  // If we have a compound assignment: Get value of LHS expression and
-  // store in on top of the stack.
-  if (expr->is_compound()) {
-    Location saved_location = location_;
-    location_ = kStack;
-    switch (assign_type) {
-      case VARIABLE:
-        EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
-                         Expression::kValue);
-        break;
-      case NAMED_PROPERTY:
-        EmitNamedPropertyLoad(prop);
-        __ push(result_register());
-        break;
-      case KEYED_PROPERTY:
-        EmitKeyedPropertyLoad(prop);
-        __ push(result_register());
-        break;
-    }
-    location_ = saved_location;
-  }
-
-  // Evaluate RHS expression.
-  Expression* rhs = expr->value();
-  VisitForValue(rhs, kAccumulator);
-
-  // If we have a compount assignment: Apply operator.
-  if (expr->is_compound()) {
-    Location saved_location = location_;
-    location_ = kAccumulator;
-    EmitBinaryOp(expr->binary_op(), Expression::kValue);
-    location_ = saved_location;
-  }
-
-  // Record source position before possible IC call.
-  SetSourcePosition(expr->position());
-
-  // Store the value.
-  switch (assign_type) {
-    case VARIABLE:
-      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
-                             context_);
-      break;
-    case NAMED_PROPERTY:
-      EmitNamedPropertyAssignment(expr);
-      break;
-    case KEYED_PROPERTY:
-      EmitKeyedPropertyAssignment(expr);
-      break;
-  }
+  Visit(expr->value());
 }
 
 
-void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
-  // Call runtime routine to allocate the catch extension object and
-  // assign the exception value to the catch variable.
-  Comment cmnt(masm_, "[ CatchExtensionObject");
-  VisitForValue(expr->key(), kStack);
-  VisitForValue(expr->value(), kStack);
-  // Create catch extension object.
-  __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
-  Apply(context_, result_register());
+void FastCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
+  BAILOUT("Throw");
 }
 
 
-void FastCodeGenerator::VisitThrow(Throw* expr) {
-  Comment cmnt(masm_, "[ Throw");
-  VisitForValue(expr->exception(), kStack);
-  __ CallRuntime(Runtime::kThrow, 1);
-  // Never returns here.
+void FastCodeGenSyntaxChecker::VisitProperty(Property* expr) {
+  BAILOUT("Property");
 }
 
 
-int FastCodeGenerator::TryFinally::Exit(int stack_depth) {
-  // The macros used here must preserve the result register.
-  __ Drop(stack_depth);
-  __ PopTryHandler();
-  __ Call(finally_entry_);
-  return 0;
+void FastCodeGenSyntaxChecker::VisitCall(Call* expr) {
+  BAILOUT("Call");
 }
 
 
-int FastCodeGenerator::TryCatch::Exit(int stack_depth) {
-  // The macros used here must preserve the result register.
-  __ Drop(stack_depth);
-  __ PopTryHandler();
-  return 0;
+void FastCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
+  BAILOUT("CallNew");
 }
 
 
-#undef __
+void FastCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
+  BAILOUT("CallRuntime");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
+  BAILOUT("UnaryOperation");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
+  BAILOUT("CountOperation");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
+  BAILOUT("BinaryOperation");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
+  BAILOUT("CompareOperation");
+}
+
+
+void FastCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
+  BAILOUT("ThisFunction");
+}
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
 
 
 } }  // namespace v8::internal
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
index c26e0f3..3e0bb41 100644
--- a/src/fast-codegen.h
+++ b/src/fast-codegen.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -35,382 +35,26 @@
 namespace v8 {
 namespace internal {
 
-// -----------------------------------------------------------------------------
-// Fast code generator.
-
-class FastCodeGenerator: public AstVisitor {
+class FastCodeGenSyntaxChecker: public AstVisitor {
  public:
-  FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
-      : masm_(masm),
-        function_(NULL),
-        script_(script),
-        is_eval_(is_eval),
-        nesting_stack_(NULL),
-        loop_depth_(0),
-        location_(kStack),
-        true_label_(NULL),
-        false_label_(NULL) {
-  }
+  FastCodeGenSyntaxChecker() : has_supported_syntax_(true) {}
 
-  static Handle<Code> MakeCode(FunctionLiteral* fun,
-                               Handle<Script> script,
-                               bool is_eval);
+  void Check(FunctionLiteral* fun);
 
-  void Generate(FunctionLiteral* fun);
+  bool has_supported_syntax() { return has_supported_syntax_; }
 
  private:
-  class Breakable;
-  class Iteration;
-  class TryCatch;
-  class TryFinally;
-  class Finally;
-  class ForIn;
-
-  class NestedStatement BASE_EMBEDDED {
-   public:
-    explicit NestedStatement(FastCodeGenerator* codegen) : codegen_(codegen) {
-      // Link into codegen's nesting stack.
-      previous_ = codegen->nesting_stack_;
-      codegen->nesting_stack_ = this;
-    }
-    virtual ~NestedStatement() {
-      // Unlink from codegen's nesting stack.
-      ASSERT_EQ(this, codegen_->nesting_stack_);
-      codegen_->nesting_stack_ = previous_;
-    }
-
-    virtual Breakable* AsBreakable() { return NULL; }
-    virtual Iteration* AsIteration() { return NULL; }
-    virtual TryCatch* AsTryCatch() { return NULL; }
-    virtual TryFinally* AsTryFinally() { return NULL; }
-    virtual Finally* AsFinally() { return NULL; }
-    virtual ForIn* AsForIn() { return NULL; }
-
-    virtual bool IsContinueTarget(Statement* target) { return false; }
-    virtual bool IsBreakTarget(Statement* target) { return false; }
-
-    // Generate code to leave the nested statement. This includes
-    // cleaning up any stack elements in use and restoring the
-    // stack to the expectations of the surrounding statements.
-    // Takes a number of stack elements currently on top of the
-    // nested statement's stack, and returns a number of stack
-    // elements left on top of the surrounding statement's stack.
-    // The generated code must preserve the result register (which
-    // contains the value in case of a return).
-    virtual int Exit(int stack_depth) {
-      // Default implementation for the case where there is
-      // nothing to clean up.
-      return stack_depth;
-    }
-    NestedStatement* outer() { return previous_; }
-   protected:
-    MacroAssembler* masm() { return codegen_->masm(); }
-   private:
-    FastCodeGenerator* codegen_;
-    NestedStatement* previous_;
-    DISALLOW_COPY_AND_ASSIGN(NestedStatement);
-  };
-
-  class Breakable : public NestedStatement {
-   public:
-    Breakable(FastCodeGenerator* codegen,
-              BreakableStatement* break_target)
-        : NestedStatement(codegen),
-          target_(break_target) {}
-    virtual ~Breakable() {}
-    virtual Breakable* AsBreakable() { return this; }
-    virtual bool IsBreakTarget(Statement* statement) {
-      return target_ == statement;
-    }
-    BreakableStatement* statement() { return target_; }
-    Label* break_target() { return &break_target_label_; }
-   private:
-    BreakableStatement* target_;
-    Label break_target_label_;
-    DISALLOW_COPY_AND_ASSIGN(Breakable);
-  };
-
-  class Iteration : public Breakable {
-   public:
-    Iteration(FastCodeGenerator* codegen,
-              IterationStatement* iteration_statement)
-        : Breakable(codegen, iteration_statement) {}
-    virtual ~Iteration() {}
-    virtual Iteration* AsIteration() { return this; }
-    virtual bool IsContinueTarget(Statement* statement) {
-      return this->statement() == statement;
-    }
-    Label* continue_target() { return &continue_target_label_; }
-   private:
-    Label continue_target_label_;
-    DISALLOW_COPY_AND_ASSIGN(Iteration);
-  };
-
-  // The environment inside the try block of a try/catch statement.
-  class TryCatch : public NestedStatement {
-   public:
-    explicit TryCatch(FastCodeGenerator* codegen, Label* catch_entry)
-        : NestedStatement(codegen), catch_entry_(catch_entry) { }
-    virtual ~TryCatch() {}
-    virtual TryCatch* AsTryCatch() { return this; }
-    Label* catch_entry() { return catch_entry_; }
-    virtual int Exit(int stack_depth);
-   private:
-    Label* catch_entry_;
-    DISALLOW_COPY_AND_ASSIGN(TryCatch);
-  };
-
-  // The environment inside the try block of a try/finally statement.
-  class TryFinally : public NestedStatement {
-   public:
-    explicit TryFinally(FastCodeGenerator* codegen, Label* finally_entry)
-        : NestedStatement(codegen), finally_entry_(finally_entry) { }
-    virtual ~TryFinally() {}
-    virtual TryFinally* AsTryFinally() { return this; }
-    Label* finally_entry() { return finally_entry_; }
-    virtual int Exit(int stack_depth);
-   private:
-    Label* finally_entry_;
-    DISALLOW_COPY_AND_ASSIGN(TryFinally);
-  };
-
-  // A FinallyEnvironment represents being inside a finally block.
-  // Abnormal termination of the finally block needs to clean up
-  // the block's parameters from the stack.
-  class Finally : public NestedStatement {
-   public:
-    explicit Finally(FastCodeGenerator* codegen) : NestedStatement(codegen) { }
-    virtual ~Finally() {}
-    virtual Finally* AsFinally() { return this; }
-    virtual int Exit(int stack_depth) {
-      return stack_depth + kFinallyStackElementCount;
-    }
-   private:
-    // Number of extra stack slots occupied during a finally block.
-    static const int kFinallyStackElementCount = 2;
-    DISALLOW_COPY_AND_ASSIGN(Finally);
-  };
-
-  // A ForInEnvironment represents being inside a for-in loop.
-  // Abnormal termination of the for-in block needs to clean up
-  // the block's temporary storage from the stack.
-  class ForIn : public Iteration {
-   public:
-    ForIn(FastCodeGenerator* codegen,
-          ForInStatement* statement)
-        : Iteration(codegen, statement) { }
-    virtual ~ForIn() {}
-    virtual ForIn* AsForIn() { return this; }
-    virtual int Exit(int stack_depth) {
-      return stack_depth + kForInStackElementCount;
-    }
-   private:
-    // TODO(lrn): Check that this value is correct when implementing
-    // for-in.
-    static const int kForInStackElementCount = 5;
-    DISALLOW_COPY_AND_ASSIGN(ForIn);
-  };
-
-  enum Location {
-    kAccumulator,
-    kStack
-  };
-
-  int SlotOffset(Slot* slot);
-
-  // Emit code to convert a pure value (in a register, slot, as a literal,
-  // or on top of the stack) into the result expected according to an
-  // expression context.
-  void Apply(Expression::Context context, Register reg);
-  void Apply(Expression::Context context, Slot* slot);
-  void Apply(Expression::Context context, Literal* lit);
-  void ApplyTOS(Expression::Context context);
-
-  // Emit code to discard count elements from the top of stack, then convert
-  // a pure value into the result expected according to an expression
-  // context.
-  void DropAndApply(int count, Expression::Context context, Register reg);
-
-  // Emit code to convert pure control flow to a pair of labels into the
-  // result expected according to an expression context.
-  void Apply(Expression::Context context,
-             Label* materialize_true,
-             Label* materialize_false);
-
-  // Helper function to convert a pure value into a test context.  The value
-  // is expected on the stack or the accumulator, depending on the platform.
-  // See the platform-specific implementation for details.
-  void DoTest(Expression::Context context);
-
-  void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
-  void Move(Register dst, Slot* source);
-
-  // Return an operand used to read/write to a known (ie, non-LOOKUP) slot.
-  // May emit code to traverse the context chain, destroying the scratch
-  // register.
-  MemOperand EmitSlotSearch(Slot* slot, Register scratch);
-
-  void VisitForEffect(Expression* expr) {
-    Expression::Context saved_context = context_;
-    context_ = Expression::kEffect;
-    Visit(expr);
-    context_ = saved_context;
-  }
-
-  void VisitForValue(Expression* expr, Location where) {
-    Expression::Context saved_context = context_;
-    Location saved_location = location_;
-    context_ = Expression::kValue;
-    location_ = where;
-    Visit(expr);
-    context_ = saved_context;
-    location_ = saved_location;
-  }
-
-  void VisitForControl(Expression* expr, Label* if_true, Label* if_false) {
-    Expression::Context saved_context = context_;
-    Label* saved_true = true_label_;
-    Label* saved_false = false_label_;
-    context_ = Expression::kTest;
-    true_label_ = if_true;
-    false_label_ = if_false;
-    Visit(expr);
-    context_ = saved_context;
-    true_label_ = saved_true;
-    false_label_ = saved_false;
-  }
-
-  void VisitForValueControl(Expression* expr,
-                            Location where,
-                            Label* if_true,
-                            Label* if_false) {
-    Expression::Context saved_context = context_;
-    Location saved_location = location_;
-    Label* saved_true = true_label_;
-    Label* saved_false = false_label_;
-    context_ = Expression::kValueTest;
-    location_ = where;
-    true_label_ = if_true;
-    false_label_ = if_false;
-    Visit(expr);
-    context_ = saved_context;
-    location_ = saved_location;
-    true_label_ = saved_true;
-    false_label_ = saved_false;
-  }
-
-  void VisitForControlValue(Expression* expr,
-                            Location where,
-                            Label* if_true,
-                            Label* if_false) {
-    Expression::Context saved_context = context_;
-    Location saved_location = location_;
-    Label* saved_true = true_label_;
-    Label* saved_false = false_label_;
-    context_ = Expression::kTestValue;
-    location_ = where;
-    true_label_ = if_true;
-    false_label_ = if_false;
-    Visit(expr);
-    context_ = saved_context;
-    location_ = saved_location;
-    true_label_ = saved_true;
-    false_label_ = saved_false;
-  }
-
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
-  void DeclareGlobals(Handle<FixedArray> pairs);
-
-  // Platform-specific return sequence
-  void EmitReturnSequence(int position);
-
-  // Platform-specific code sequences for calls
-  void EmitCallWithStub(Call* expr);
-  void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
-
-  // Platform-specific code for loading variables.
-  void EmitVariableLoad(Variable* expr, Expression::Context context);
-
-  // Platform-specific support for compiling assignments.
-
-  // Load a value from a named property.
-  // The receiver is left on the stack by the IC.
-  void EmitNamedPropertyLoad(Property* expr);
-
-  // Load a value from a keyed property.
-  // The receiver and the key is left on the stack by the IC.
-  void EmitKeyedPropertyLoad(Property* expr);
-
-  // Apply the compound assignment operator. Expects the left operand on top
-  // of the stack and the right one in the accumulator.
-  void EmitBinaryOp(Token::Value op, Expression::Context context);
-
-  // Complete a variable assignment.  The right-hand-side value is expected
-  // in the accumulator.
-  void EmitVariableAssignment(Variable* var, Expression::Context context);
-
-  // Complete a named property assignment.  The receiver is expected on top
-  // of the stack and the right-hand-side value in the accumulator.
-  void EmitNamedPropertyAssignment(Assignment* expr);
-
-  // Complete a keyed property assignment.  The receiver and key are
-  // expected on top of the stack and the right-hand-side value in the
-  // accumulator.
-  void EmitKeyedPropertyAssignment(Assignment* expr);
-
-  void SetFunctionPosition(FunctionLiteral* fun);
-  void SetReturnPosition(FunctionLiteral* fun);
-  void SetStatementPosition(Statement* stmt);
-  void SetStatementPosition(int pos);
-  void SetSourcePosition(int pos);
-
-  // Non-local control flow support.
-  void EnterFinallyBlock();
-  void ExitFinallyBlock();
-
-  // Loop nesting counter.
-  int loop_depth() { return loop_depth_; }
-  void increment_loop_depth() { loop_depth_++; }
-  void decrement_loop_depth() {
-    ASSERT(loop_depth_ > 0);
-    loop_depth_--;
-  }
-
-  MacroAssembler* masm() { return masm_; }
-  static Register result_register();
-  static Register context_register();
-
-  // Set fields in the stack frame. Offsets are the frame pointer relative
-  // offsets defined in, e.g., StandardFrameConstants.
-  void StoreToFrameField(int frame_offset, Register value);
-
-  // Load a value from the current context. Indices are defined as an enum
-  // in v8::internal::Context.
-  void LoadContextField(Register dst, int context_index);
+  void VisitDeclarations(ZoneList<Declaration*>* decls);
+  void VisitStatements(ZoneList<Statement*>* stmts);
 
   // AST node visit functions.
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
-  // Handles the shortcutted logical binary operations in VisitBinaryOperation.
-  void EmitLogicalOperation(BinaryOperation* expr);
 
-  MacroAssembler* masm_;
-  FunctionLiteral* function_;
-  Handle<Script> script_;
-  bool is_eval_;
-  Label return_label_;
-  NestedStatement* nesting_stack_;
-  int loop_depth_;
+  bool has_supported_syntax_;
 
-  Expression::Context context_;
-  Location location_;
-  Label* true_label_;
-  Label* false_label_;
-
-  friend class NestedStatement;
-
-  DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
+  DISALLOW_COPY_AND_ASSIGN(FastCodeGenSyntaxChecker);
 };
 
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 5c0aa0c..0208813 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -143,12 +143,14 @@
 DEFINE_bool(strict, false, "strict error checking")
 DEFINE_int(min_preparse_length, 1024,
            "minimum length for automatic enable preparsing")
-DEFINE_bool(fast_compiler, true,
-            "use the fast-mode compiler for some top-level code")
-DEFINE_bool(trace_bailout, false,
-            "print reasons for failing to use fast compilation")
+DEFINE_bool(full_compiler, true, "enable dedicated backend for run-once code")
+DEFINE_bool(fast_compiler, false, "enable speculative optimizing backend")
+DEFINE_bool(always_full_compiler, false,
+            "try to use the dedicated run-once backend for all code")
 DEFINE_bool(always_fast_compiler, false,
-            "always try using the fast compiler")
+            "try to use the speculative optimizing backend for all code")
+DEFINE_bool(trace_bailout, false,
+            "print reasons for falling back to using the classic V8 backend")
 
 // compilation-cache.cc
 DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -201,6 +203,11 @@
 DEFINE_bool(use_big_map_space, true,
             "Use big map space, but don't compact if it grew too big.")
 
+DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
+           "Maximum number of pages in map space which still allows to encode "
+           "forwarding pointers.  That's actually a constant, but it's useful "
+           "to control it with a flag for better testing.")
+
 // mksnapshot.cc
 DEFINE_bool(h, false, "print this message")
 DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
diff --git a/src/frames.h b/src/frames.h
index 024065a..19860ad 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -607,11 +607,12 @@
   void Advance();
   void Reset();
 
- private:
   static bool IsWithinBounds(
       Address low_bound, Address high_bound, Address addr) {
     return low_bound <= addr && addr <= high_bound;
   }
+
+ private:
   bool IsValidStackAddress(Address addr) const {
     return IsWithinBounds(low_bound_, high_bound_, addr);
   }
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
new file mode 100644
index 0000000..5bd294f
--- /dev/null
+++ b/src/full-codegen.cc
@@ -0,0 +1,1168 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "compiler.h"
+#include "full-codegen.h"
+#include "stub-cache.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+#define BAILOUT(reason)                         \
+  do {                                          \
+    if (FLAG_trace_bailout) {                   \
+      PrintF("%s\n", reason);                   \
+    }                                           \
+    has_supported_syntax_ = false;              \
+    return;                                     \
+  } while (false)
+
+
+#define CHECK_BAILOUT                           \
+  do {                                          \
+    if (!has_supported_syntax_) return;         \
+  } while (false)
+
+
+void FullCodeGenSyntaxChecker::Check(FunctionLiteral* fun) {
+  Scope* scope = fun->scope();
+
+  if (scope->num_heap_slots() > 0) {
+    // We support functions with a local context if they do not have
+    // parameters that need to be copied into the context.
+    for (int i = 0, len = scope->num_parameters(); i < len; i++) {
+      Slot* slot = scope->parameter(i)->slot();
+      if (slot != NULL && slot->type() == Slot::CONTEXT) {
+        BAILOUT("Function has context-allocated parameters.");
+      }
+    }
+  }
+
+  VisitDeclarations(scope->declarations());
+  CHECK_BAILOUT;
+
+  VisitStatements(fun->body());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDeclarations(
+    ZoneList<Declaration*>* decls) {
+  for (int i = 0; i < decls->length(); i++) {
+    Visit(decls->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitStatements(ZoneList<Statement*>* stmts) {
+  for (int i = 0, len = stmts->length(); i < len; i++) {
+    Visit(stmts->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDeclaration(Declaration* decl) {
+  Property* prop = decl->proxy()->AsProperty();
+  if (prop != NULL) {
+    Visit(prop->obj());
+    Visit(prop->key());
+  }
+
+  if (decl->fun() != NULL) {
+    Visit(decl->fun());
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitBlock(Block* stmt) {
+  VisitStatements(stmt->statements());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitExpressionStatement(
+    ExpressionStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitEmptyStatement(EmptyStatement* stmt) {
+  // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitIfStatement(IfStatement* stmt) {
+  Visit(stmt->condition());
+  CHECK_BAILOUT;
+  Visit(stmt->then_statement());
+  CHECK_BAILOUT;
+  Visit(stmt->else_statement());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitContinueStatement(ContinueStatement* stmt) {
+  // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitBreakStatement(BreakStatement* stmt) {
+  // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitReturnStatement(ReturnStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitWithEnterStatement(
+    WithEnterStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitWithExitStatement(WithExitStatement* stmt) {
+  // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitSwitchStatement(SwitchStatement* stmt) {
+  BAILOUT("SwitchStatement");
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  Visit(stmt->cond());
+  CHECK_BAILOUT;
+  Visit(stmt->body());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitWhileStatement(WhileStatement* stmt) {
+  Visit(stmt->cond());
+  CHECK_BAILOUT;
+  Visit(stmt->body());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitForStatement(ForStatement* stmt) {
+  if (!FLAG_always_full_compiler) BAILOUT("ForStatement");
+  if (stmt->init() != NULL) {
+    Visit(stmt->init());
+    CHECK_BAILOUT;
+  }
+  if (stmt->cond() != NULL) {
+    Visit(stmt->cond());
+    CHECK_BAILOUT;
+  }
+  Visit(stmt->body());
+  if (stmt->next() != NULL) {
+    CHECK_BAILOUT;
+    Visit(stmt->next());
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitForInStatement(ForInStatement* stmt) {
+  BAILOUT("ForInStatement");
+}
+
+
+void FullCodeGenSyntaxChecker::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  Visit(stmt->try_block());
+  CHECK_BAILOUT;
+  Visit(stmt->catch_block());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitTryFinallyStatement(
+    TryFinallyStatement* stmt) {
+  Visit(stmt->try_block());
+  CHECK_BAILOUT;
+  Visit(stmt->finally_block());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitDebuggerStatement(
+    DebuggerStatement* stmt) {
+  // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) {
+  // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* expr) {
+  BAILOUT("FunctionBoilerplateLiteral");
+}
+
+
+void FullCodeGenSyntaxChecker::VisitConditional(Conditional* expr) {
+  Visit(expr->condition());
+  CHECK_BAILOUT;
+  Visit(expr->then_expression());
+  CHECK_BAILOUT;
+  Visit(expr->else_expression());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitSlot(Slot* expr) {
+  UNREACHABLE();
+}
+
+
+void FullCodeGenSyntaxChecker::VisitVariableProxy(VariableProxy* expr) {
+  // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitLiteral(Literal* expr) {
+  // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitRegExpLiteral(RegExpLiteral* expr) {
+  // Supported.
+}
+
+
+void FullCodeGenSyntaxChecker::VisitObjectLiteral(ObjectLiteral* expr) {
+  ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+
+  for (int i = 0, len = properties->length(); i < len; i++) {
+    ObjectLiteral::Property* property = properties->at(i);
+    if (property->IsCompileTimeValue()) continue;
+    Visit(property->key());
+    CHECK_BAILOUT;
+    Visit(property->value());
+    CHECK_BAILOUT;
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitArrayLiteral(ArrayLiteral* expr) {
+  ZoneList<Expression*>* subexprs = expr->values();
+  for (int i = 0, len = subexprs->length(); i < len; i++) {
+    Expression* subexpr = subexprs->at(i);
+    if (subexpr->AsLiteral() != NULL) continue;
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+    Visit(subexpr);
+    CHECK_BAILOUT;
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCatchExtensionObject(
+    CatchExtensionObject* expr) {
+  Visit(expr->key());
+  CHECK_BAILOUT;
+  Visit(expr->value());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitAssignment(Assignment* expr) {
+  Token::Value op = expr->op();
+  if (op == Token::INIT_CONST) BAILOUT("initialize constant");
+
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  Property* prop = expr->target()->AsProperty();
+  ASSERT(var == NULL || prop == NULL);
+  if (var != NULL) {
+    if (var->mode() == Variable::CONST) BAILOUT("Assignment to const");
+    // All other variables are supported.
+  } else if (prop != NULL) {
+    Visit(prop->obj());
+    CHECK_BAILOUT;
+    Visit(prop->key());
+    CHECK_BAILOUT;
+  } else {
+    // This is a throw reference error.
+    BAILOUT("non-variable/non-property assignment");
+  }
+
+  Visit(expr->value());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitThrow(Throw* expr) {
+  Visit(expr->exception());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitProperty(Property* expr) {
+  Visit(expr->obj());
+  CHECK_BAILOUT;
+  Visit(expr->key());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCall(Call* expr) {
+  Expression* fun = expr->expression();
+  ZoneList<Expression*>* args = expr->arguments();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+
+  // Check for supported calls
+  if (var != NULL && var->is_possibly_eval()) {
+    BAILOUT("call to the identifier 'eval'");
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // Calls to global variables are supported.
+  } else if (var != NULL && var->slot() != NULL &&
+             var->slot()->type() == Slot::LOOKUP) {
+    BAILOUT("call to a lookup slot");
+  } else if (fun->AsProperty() != NULL) {
+    Property* prop = fun->AsProperty();
+    Visit(prop->obj());
+    CHECK_BAILOUT;
+    Visit(prop->key());
+    CHECK_BAILOUT;
+  } else {
+    // Otherwise the call is supported if the function expression is.
+    Visit(fun);
+  }
+  // Check all arguments to the call.
+  for (int i = 0; i < args->length(); i++) {
+    Visit(args->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCallNew(CallNew* expr) {
+  Visit(expr->expression());
+  CHECK_BAILOUT;
+  ZoneList<Expression*>* args = expr->arguments();
+  // Check all arguments to the call
+  for (int i = 0; i < args->length(); i++) {
+    Visit(args->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCallRuntime(CallRuntime* expr) {
+  // Check for inline runtime call
+  if (expr->name()->Get(0) == '_' &&
+      CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
+    BAILOUT("inlined runtime call");
+  }
+  // Check all arguments to the call.  (Relies on TEMP meaning STACK.)
+  for (int i = 0; i < expr->arguments()->length(); i++) {
+    Visit(expr->arguments()->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitUnaryOperation(UnaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::ADD:
+    case Token::NOT:
+    case Token::TYPEOF:
+    case Token::VOID:
+      Visit(expr->expression());
+      break;
+    case Token::BIT_NOT:
+      BAILOUT("UnaryOperation: BIT_NOT");
+    case Token::DELETE:
+      BAILOUT("UnaryOperation: DELETE");
+    case Token::SUB:
+      BAILOUT("UnaryOperation: SUB");
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCountOperation(CountOperation* expr) {
+  Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+  Property* prop = expr->expression()->AsProperty();
+  ASSERT(var == NULL || prop == NULL);
+  if (var != NULL) {
+    // All global variables are supported.
+    if (!var->is_global()) {
+      ASSERT(var->slot() != NULL);
+      Slot::Type type = var->slot()->type();
+      if (type == Slot::LOOKUP) {
+        BAILOUT("CountOperation with lookup slot");
+      }
+    }
+  } else if (prop != NULL) {
+    Visit(prop->obj());
+    CHECK_BAILOUT;
+    Visit(prop->key());
+    CHECK_BAILOUT;
+  } else {
+    // This is a throw reference error.
+    BAILOUT("CountOperation non-variable/non-property expression");
+  }
+}
+
+
+void FullCodeGenSyntaxChecker::VisitBinaryOperation(BinaryOperation* expr) {
+  Visit(expr->left());
+  CHECK_BAILOUT;
+  Visit(expr->right());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitCompareOperation(CompareOperation* expr) {
+  Visit(expr->left());
+  CHECK_BAILOUT;
+  Visit(expr->right());
+}
+
+
+void FullCodeGenSyntaxChecker::VisitThisFunction(ThisFunction* expr) {
+  // Supported.
+}
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
+
+
+#define __ ACCESS_MASM(masm())
+
+Handle<Code> FullCodeGenerator::MakeCode(FunctionLiteral* fun,
+                                         Handle<Script> script,
+                                         bool is_eval) {
+  if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+    int len = String::cast(script->source())->length();
+    Counters::total_full_codegen_source_size.Increment(len);
+  }
+  CodeGenerator::MakeCodePrologue(fun);
+  const int kInitialBufferSize = 4 * KB;
+  MacroAssembler masm(NULL, kInitialBufferSize);
+  FullCodeGenerator cgen(&masm, script, is_eval);
+  cgen.Generate(fun);
+  if (cgen.HasStackOverflow()) {
+    ASSERT(!Top::has_pending_exception());
+    return Handle<Code>::null();
+  }
+  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
+  return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+}
+
+
+int FullCodeGenerator::SlotOffset(Slot* slot) {
+  ASSERT(slot != NULL);
+  // Offset is negative because higher indexes are at lower addresses.
+  int offset = -slot->index() * kPointerSize;
+  // Adjust by a (parameter or local) base offset.
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+      offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
+      break;
+    case Slot::LOCAL:
+      offset += JavaScriptFrameConstants::kLocal0Offset;
+      break;
+    case Slot::CONTEXT:
+    case Slot::LOOKUP:
+      UNREACHABLE();
+  }
+  return offset;
+}
+
+
+void FullCodeGenerator::VisitDeclarations(
+    ZoneList<Declaration*>* declarations) {
+  int length = declarations->length();
+  int globals = 0;
+  for (int i = 0; i < length; i++) {
+    Declaration* decl = declarations->at(i);
+    Variable* var = decl->proxy()->var();
+    Slot* slot = var->slot();
+
+    // If it was not possible to allocate the variable at compile
+    // time, we need to "declare" it at runtime to make sure it
+    // actually exists in the local context.
+    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
+      VisitDeclaration(decl);
+    } else {
+      // Count global variables and functions for later processing
+      globals++;
+    }
+  }
+
+  // Compute array of global variable and function declarations.
+  // Do nothing in case of no declared global functions or variables.
+  if (globals > 0) {
+    Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+    for (int j = 0, i = 0; i < length; i++) {
+      Declaration* decl = declarations->at(i);
+      Variable* var = decl->proxy()->var();
+      Slot* slot = var->slot();
+
+      if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
+        array->set(j++, *(var->name()));
+        if (decl->fun() == NULL) {
+          if (var->mode() == Variable::CONST) {
+            // In case this is const property use the hole.
+            array->set_the_hole(j++);
+          } else {
+            array->set_undefined(j++);
+          }
+        } else {
+          Handle<JSFunction> function =
+              Compiler::BuildBoilerplate(decl->fun(), script_, this);
+          // Check for stack-overflow exception.
+          if (HasStackOverflow()) return;
+          array->set(j++, *function);
+        }
+      }
+    }
+    // Invoke the platform-dependent code generator to do the actual
+    // declaration the global variables and functions.
+    DeclareGlobals(array);
+  }
+}
+
+
+void FullCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
+  if (FLAG_debug_info) {
+    CodeGenerator::RecordPositions(masm_, fun->start_position());
+  }
+}
+
+
+void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
+  if (FLAG_debug_info) {
+    CodeGenerator::RecordPositions(masm_, fun->end_position());
+  }
+}
+
+
+void FullCodeGenerator::SetStatementPosition(Statement* stmt) {
+  if (FLAG_debug_info) {
+    CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+  }
+}
+
+
+void FullCodeGenerator::SetStatementPosition(int pos) {
+  if (FLAG_debug_info) {
+    CodeGenerator::RecordPositions(masm_, pos);
+  }
+}
+
+
+void FullCodeGenerator::SetSourcePosition(int pos) {
+  if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+    masm_->RecordPosition(pos);
+  }
+}
+
+
+void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
+  Label eval_right, done;
+
+  // Set up the appropriate context for the left subexpression based
+  // on the operation and our own context.  Initially assume we can
+  // inherit both true and false labels from our context.
+  if (expr->op() == Token::OR) {
+    switch (context_) {
+      case Expression::kUninitialized:
+        UNREACHABLE();
+      case Expression::kEffect:
+        VisitForControl(expr->left(), &done, &eval_right);
+        break;
+      case Expression::kValue:
+        VisitForValueControl(expr->left(),
+                             location_,
+                             &done,
+                             &eval_right);
+        break;
+      case Expression::kTest:
+        VisitForControl(expr->left(), true_label_, &eval_right);
+        break;
+      case Expression::kValueTest:
+        VisitForValueControl(expr->left(),
+                             location_,
+                             true_label_,
+                             &eval_right);
+        break;
+      case Expression::kTestValue:
+        VisitForControl(expr->left(), true_label_, &eval_right);
+        break;
+    }
+  } else {
+    ASSERT_EQ(Token::AND, expr->op());
+    switch (context_) {
+      case Expression::kUninitialized:
+        UNREACHABLE();
+      case Expression::kEffect:
+        VisitForControl(expr->left(), &eval_right, &done);
+        break;
+      case Expression::kValue:
+        VisitForControlValue(expr->left(),
+                             location_,
+                             &eval_right,
+                             &done);
+        break;
+      case Expression::kTest:
+        VisitForControl(expr->left(), &eval_right, false_label_);
+        break;
+      case Expression::kValueTest:
+        VisitForControl(expr->left(), &eval_right, false_label_);
+        break;
+      case Expression::kTestValue:
+        VisitForControlValue(expr->left(),
+                             location_,
+                             &eval_right,
+                             false_label_);
+        break;
+    }
+  }
+
+  __ bind(&eval_right);
+  Visit(expr->right());
+
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::VisitBlock(Block* stmt) {
+  Comment cmnt(masm_, "[ Block");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+  VisitStatements(stmt->statements());
+  __ bind(nested_statement.break_target());
+}
+
+
+void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  SetStatementPosition(stmt);
+  VisitForEffect(stmt->expression());
+}
+
+
+void FullCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
+  Comment cmnt(masm_, "[ EmptyStatement");
+  SetStatementPosition(stmt);
+}
+
+
+void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
+  Comment cmnt(masm_, "[ IfStatement");
+  SetStatementPosition(stmt);
+  Label then_part, else_part, done;
+
+  // Do not worry about optimizing for empty then or else bodies.
+  VisitForControl(stmt->condition(), &then_part, &else_part);
+
+  __ bind(&then_part);
+  Visit(stmt->then_statement());
+  __ jmp(&done);
+
+  __ bind(&else_part);
+  Visit(stmt->else_statement());
+
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+  Comment cmnt(masm_,  "[ ContinueStatement");
+  SetStatementPosition(stmt);
+  NestedStatement* current = nesting_stack_;
+  int stack_depth = 0;
+  while (!current->IsContinueTarget(stmt->target())) {
+    stack_depth = current->Exit(stack_depth);
+    current = current->outer();
+  }
+  __ Drop(stack_depth);
+
+  Iteration* loop = current->AsIteration();
+  __ jmp(loop->continue_target());
+}
+
+
+void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+  Comment cmnt(masm_,  "[ BreakStatement");
+  SetStatementPosition(stmt);
+  NestedStatement* current = nesting_stack_;
+  int stack_depth = 0;
+  while (!current->IsBreakTarget(stmt->target())) {
+    stack_depth = current->Exit(stack_depth);
+    current = current->outer();
+  }
+  __ Drop(stack_depth);
+
+  Breakable* target = current->AsBreakable();
+  __ jmp(target->break_target());
+}
+
+
+void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+  Comment cmnt(masm_, "[ ReturnStatement");
+  SetStatementPosition(stmt);
+  Expression* expr = stmt->expression();
+  VisitForValue(expr, kAccumulator);
+
+  // Exit all nested statements.
+  NestedStatement* current = nesting_stack_;
+  int stack_depth = 0;
+  while (current != NULL) {
+    stack_depth = current->Exit(stack_depth);
+    current = current->outer();
+  }
+  __ Drop(stack_depth);
+
+  EmitReturnSequence(stmt->statement_pos());
+}
+
+
+void FullCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  Comment cmnt(masm_, "[ WithEnterStatement");
+  SetStatementPosition(stmt);
+
+  VisitForValue(stmt->expression(), kStack);
+  if (stmt->is_catch_block()) {
+    __ CallRuntime(Runtime::kPushCatchContext, 1);
+  } else {
+    __ CallRuntime(Runtime::kPushContext, 1);
+  }
+  // Both runtime calls return the new context in both the context and the
+  // result registers.
+
+  // Update local stack frame context field.
+  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+}
+
+
+void FullCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
+  Comment cmnt(masm_, "[ WithExitStatement");
+  SetStatementPosition(stmt);
+
+  // Pop context.
+  LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+  // Update local stack frame context field.
+  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  Comment cmnt(masm_, "[ DoWhileStatement");
+  SetStatementPosition(stmt);
+  Label body, stack_limit_hit, stack_check_success;
+
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
+
+  __ bind(&body);
+  Visit(stmt->body());
+
+  // Check stack before looping.
+  __ StackLimitCheck(&stack_limit_hit);
+  __ bind(&stack_check_success);
+
+  __ bind(loop_statement.continue_target());
+  SetStatementPosition(stmt->condition_position());
+  VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+
+  __ bind(&stack_limit_hit);
+  StackCheckStub stack_stub;
+  __ CallStub(&stack_stub);
+  __ jmp(&stack_check_success);
+
+  __ bind(loop_statement.break_target());
+
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
+  Comment cmnt(masm_, "[ WhileStatement");
+  SetStatementPosition(stmt);
+  Label body, stack_limit_hit, stack_check_success;
+
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Emit the test at the bottom of the loop.
+  __ jmp(loop_statement.continue_target());
+
+  __ bind(&body);
+  Visit(stmt->body());
+
+  __ bind(loop_statement.continue_target());
+  // Check stack before looping.
+  __ StackLimitCheck(&stack_limit_hit);
+  __ bind(&stack_check_success);
+
+  VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+
+  __ bind(&stack_limit_hit);
+  StackCheckStub stack_stub;
+  __ CallStub(&stack_stub);
+  __ jmp(&stack_check_success);
+
+  __ bind(loop_statement.break_target());
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
+  Comment cmnt(masm_, "[ ForStatement");
+  SetStatementPosition(stmt);
+  Label test, body, stack_limit_hit, stack_check_success;
+
+  Iteration loop_statement(this, stmt);
+  if (stmt->init() != NULL) {
+    Visit(stmt->init());
+  }
+
+  increment_loop_depth();
+  // Emit the test at the bottom of the loop (even if empty).
+  __ jmp(&test);
+
+  __ bind(&body);
+  Visit(stmt->body());
+
+  __ bind(loop_statement.continue_target());
+
+  SetStatementPosition(stmt);
+  if (stmt->next() != NULL) {
+    Visit(stmt->next());
+  }
+
+  __ bind(&test);
+
+  // Check stack before looping.
+  __ StackLimitCheck(&stack_limit_hit);
+  __ bind(&stack_check_success);
+
+  if (stmt->cond() != NULL) {
+    VisitForControl(stmt->cond(), &body, loop_statement.break_target());
+  } else {
+    __ jmp(&body);
+  }
+
+  __ bind(&stack_limit_hit);
+  StackCheckStub stack_stub;
+  __ CallStub(&stack_stub);
+  __ jmp(&stack_check_success);
+
+  __ bind(loop_statement.break_target());
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  Comment cmnt(masm_, "[ TryCatchStatement");
+  SetStatementPosition(stmt);
+  // The try block adds a handler to the exception handler chain
+  // before entering, and removes it again when exiting normally.
+  // If an exception is thrown during execution of the try block,
+  // control is passed to the handler, which also consumes the handler.
+  // At this point, the exception is in a register, and store it in
+  // the temporary local variable (prints as ".catch-var") before
+  // executing the catch block. The catch block has been rewritten
+  // to introduce a new scope to bind the catch variable and to remove
+  // that scope again afterwards.
+
+  Label try_handler_setup, catch_entry, done;
+  __ Call(&try_handler_setup);
+  // Try handler code, exception in result register.
+
+  // Store exception in local .catch variable before executing catch block.
+  {
+    // The catch variable is *always* a variable proxy for a local variable.
+    Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
+    ASSERT_NOT_NULL(catch_var);
+    Slot* variable_slot = catch_var->slot();
+    ASSERT_NOT_NULL(variable_slot);
+    ASSERT_EQ(Slot::LOCAL, variable_slot->type());
+    StoreToFrameField(SlotOffset(variable_slot), result_register());
+  }
+
+  Visit(stmt->catch_block());
+  __ jmp(&done);
+
+  // Try block code. Sets up the exception handler chain.
+  __ bind(&try_handler_setup);
+  {
+    TryCatch try_block(this, &catch_entry);
+    __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+    Visit(stmt->try_block());
+    __ PopTryHandler();
+  }
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  Comment cmnt(masm_, "[ TryFinallyStatement");
+  SetStatementPosition(stmt);
+  // Try finally is compiled by setting up a try-handler on the stack while
+  // executing the try body, and removing it again afterwards.
+  //
+  // The try-finally construct can enter the finally block in three ways:
+  // 1. By exiting the try-block normally. This removes the try-handler and
+  //      calls the finally block code before continuing.
+  // 2. By exiting the try-block with a function-local control flow transfer
+  //    (break/continue/return). The site of the, e.g., break removes the
+  //    try handler and calls the finally block code before continuing
+  //    its outward control transfer.
+  // 3. by exiting the try-block with a thrown exception.
+  //    This can happen in nested function calls. It traverses the try-handler
+  //    chain and consumes the try-handler entry before jumping to the
+  //    handler code. The handler code then calls the finally-block before
+  //    rethrowing the exception.
+  //
+  // The finally block must assume a return address on top of the stack
+  // (or in the link register on ARM chips) and a value (return value or
+  // exception) in the result register (rax/eax/r0), both of which must
+  // be preserved. The return address isn't GC-safe, so it should be
+  // cooked before GC.
+  Label finally_entry;
+  Label try_handler_setup;
+
+  // Setup the try-handler chain. Use a call to
+  // Jump to try-handler setup and try-block code. Use call to put try-handler
+  // address on stack.
+  __ Call(&try_handler_setup);
+  // Try handler code. Return address of call is pushed on handler stack.
+  {
+    // This code is only executed during stack-handler traversal when an
+    // exception is thrown. The execption is in the result register, which
+    // is retained by the finally block.
+    // Call the finally block and then rethrow the exception.
+    __ Call(&finally_entry);
+    __ push(result_register());
+    __ CallRuntime(Runtime::kReThrow, 1);
+  }
+
+  __ bind(&finally_entry);
+  {
+    // Finally block implementation.
+    Finally finally_block(this);
+    EnterFinallyBlock();
+    Visit(stmt->finally_block());
+    ExitFinallyBlock();  // Return to the calling code.
+  }
+
+  __ bind(&try_handler_setup);
+  {
+    // Setup try handler (stack pointer registers).
+    TryFinally try_block(this, &finally_entry);
+    __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+    Visit(stmt->try_block());
+    __ PopTryHandler();
+  }
+  // Execute the finally block on the way out.
+  __ Call(&finally_entry);
+}
+
+
+void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  Comment cmnt(masm_, "[ DebuggerStatement");
+  SetStatementPosition(stmt);
+  __ CallRuntime(Runtime::kDebugBreak, 0);
+  // Ignore the return value.
+#endif
+}
+
+
+void FullCodeGenerator::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitConditional(Conditional* expr) {
+  Comment cmnt(masm_, "[ Conditional");
+  Label true_case, false_case, done;
+  VisitForControl(expr->condition(), &true_case, &false_case);
+
+  __ bind(&true_case);
+  Visit(expr->then_expression());
+  // If control flow falls through Visit, jump to done.
+  if (context_ == Expression::kEffect || context_ == Expression::kValue) {
+    __ jmp(&done);
+  }
+
+  __ bind(&false_case);
+  Visit(expr->else_expression());
+  // If control flow falls through Visit, merge it with true case here.
+  if (context_ == Expression::kEffect || context_ == Expression::kValue) {
+    __ bind(&done);
+  }
+}
+
+
+void FullCodeGenerator::VisitSlot(Slot* expr) {
+  // Slots do not appear directly in the AST.
+  UNREACHABLE();
+}
+
+
+void FullCodeGenerator::VisitLiteral(Literal* expr) {
+  Comment cmnt(masm_, "[ Literal");
+  Apply(context_, expr);
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+  Comment cmnt(masm_, "[ Assignment");
+  ASSERT(expr->op() != Token::INIT_CONST);
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->target()->AsProperty();
+  if (prop != NULL) {
+    assign_type =
+        (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+  }
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      VisitForValue(prop->obj(), kStack);
+      break;
+    case KEYED_PROPERTY:
+      VisitForValue(prop->obj(), kStack);
+      VisitForValue(prop->key(), kStack);
+      break;
+  }
+
+  // If we have a compound assignment: Get value of LHS expression and
+  // store in on top of the stack.
+  if (expr->is_compound()) {
+    Location saved_location = location_;
+    location_ = kStack;
+    switch (assign_type) {
+      case VARIABLE:
+        EmitVariableLoad(expr->target()->AsVariableProxy()->var(),
+                         Expression::kValue);
+        break;
+      case NAMED_PROPERTY:
+        EmitNamedPropertyLoad(prop);
+        __ push(result_register());
+        break;
+      case KEYED_PROPERTY:
+        EmitKeyedPropertyLoad(prop);
+        __ push(result_register());
+        break;
+    }
+    location_ = saved_location;
+  }
+
+  // Evaluate RHS expression.
+  Expression* rhs = expr->value();
+  VisitForValue(rhs, kAccumulator);
+
+  // If we have a compound assignment: Apply operator.
+  if (expr->is_compound()) {
+    Location saved_location = location_;
+    location_ = kAccumulator;
+    EmitBinaryOp(expr->binary_op(), Expression::kValue);
+    location_ = saved_location;
+  }
+
+  // Record source position before possible IC call.
+  SetSourcePosition(expr->position());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             context_);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
+}
+
+
+void FullCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+  // Call runtime routine to allocate the catch extension object and
+  // assign the exception value to the catch variable.
+  Comment cmnt(masm_, "[ CatchExtensionObject");
+  VisitForValue(expr->key(), kStack);
+  VisitForValue(expr->value(), kStack);
+  // Create catch extension object.
+  __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+  Apply(context_, result_register());
+}
+
+
+void FullCodeGenerator::VisitThrow(Throw* expr) {
+  Comment cmnt(masm_, "[ Throw");
+  VisitForValue(expr->exception(), kStack);
+  __ CallRuntime(Runtime::kThrow, 1);
+  // Never returns here.
+}
+
+
+int FullCodeGenerator::TryFinally::Exit(int stack_depth) {
+  // The macros used here must preserve the result register.
+  __ Drop(stack_depth);
+  __ PopTryHandler();
+  __ Call(finally_entry_);
+  return 0;
+}
+
+
+int FullCodeGenerator::TryCatch::Exit(int stack_depth) {
+  // The macros used here must preserve the result register.
+  __ Drop(stack_depth);
+  __ PopTryHandler();
+  return 0;
+}
+
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/full-codegen.h b/src/full-codegen.h
new file mode 100644
index 0000000..3c8e198
--- /dev/null
+++ b/src/full-codegen.h
@@ -0,0 +1,445 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FULL_CODEGEN_H_
+#define V8_FULL_CODEGEN_H_
+
+#include "v8.h"
+
+#include "ast.h"
+
+namespace v8 {
+namespace internal {
+
+class FullCodeGenSyntaxChecker: public AstVisitor {
+ public:
+  FullCodeGenSyntaxChecker() : has_supported_syntax_(true) {}
+
+  void Check(FunctionLiteral* fun);
+
+  bool has_supported_syntax() { return has_supported_syntax_; }
+
+ private:
+  void VisitDeclarations(ZoneList<Declaration*>* decls);
+  void VisitStatements(ZoneList<Statement*>* stmts);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  bool has_supported_syntax_;
+
+  DISALLOW_COPY_AND_ASSIGN(FullCodeGenSyntaxChecker);
+};
+
+
+// -----------------------------------------------------------------------------
+// Full code generator.
+
+class FullCodeGenerator: public AstVisitor {
+ public:
+  FullCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+      : masm_(masm),
+        function_(NULL),
+        script_(script),
+        is_eval_(is_eval),
+        nesting_stack_(NULL),
+        loop_depth_(0),
+        location_(kStack),
+        true_label_(NULL),
+        false_label_(NULL) {
+  }
+
+  static Handle<Code> MakeCode(FunctionLiteral* fun,
+                               Handle<Script> script,
+                               bool is_eval);
+
+  void Generate(FunctionLiteral* fun);
+
+ private:
+  class Breakable;
+  class Iteration;
+  class TryCatch;
+  class TryFinally;
+  class Finally;
+  class ForIn;
+
+  class NestedStatement BASE_EMBEDDED {
+   public:
+    explicit NestedStatement(FullCodeGenerator* codegen) : codegen_(codegen) {
+      // Link into codegen's nesting stack.
+      previous_ = codegen->nesting_stack_;
+      codegen->nesting_stack_ = this;
+    }
+    virtual ~NestedStatement() {
+      // Unlink from codegen's nesting stack.
+      ASSERT_EQ(this, codegen_->nesting_stack_);
+      codegen_->nesting_stack_ = previous_;
+    }
+
+    virtual Breakable* AsBreakable() { return NULL; }
+    virtual Iteration* AsIteration() { return NULL; }
+    virtual TryCatch* AsTryCatch() { return NULL; }
+    virtual TryFinally* AsTryFinally() { return NULL; }
+    virtual Finally* AsFinally() { return NULL; }
+    virtual ForIn* AsForIn() { return NULL; }
+
+    virtual bool IsContinueTarget(Statement* target) { return false; }
+    virtual bool IsBreakTarget(Statement* target) { return false; }
+
+    // Generate code to leave the nested statement. This includes
+    // cleaning up any stack elements in use and restoring the
+    // stack to the expectations of the surrounding statements.
+    // Takes a number of stack elements currently on top of the
+    // nested statement's stack, and returns a number of stack
+    // elements left on top of the surrounding statement's stack.
+    // The generated code must preserve the result register (which
+    // contains the value in case of a return).
+    virtual int Exit(int stack_depth) {
+      // Default implementation for the case where there is
+      // nothing to clean up.
+      return stack_depth;
+    }
+    NestedStatement* outer() { return previous_; }
+   protected:
+    MacroAssembler* masm() { return codegen_->masm(); }
+   private:
+    FullCodeGenerator* codegen_;
+    NestedStatement* previous_;
+    DISALLOW_COPY_AND_ASSIGN(NestedStatement);
+  };
+
+  class Breakable : public NestedStatement {
+   public:
+    Breakable(FullCodeGenerator* codegen,
+              BreakableStatement* break_target)
+        : NestedStatement(codegen),
+          target_(break_target) {}
+    virtual ~Breakable() {}
+    virtual Breakable* AsBreakable() { return this; }
+    virtual bool IsBreakTarget(Statement* statement) {
+      return target_ == statement;
+    }
+    BreakableStatement* statement() { return target_; }
+    Label* break_target() { return &break_target_label_; }
+   private:
+    BreakableStatement* target_;
+    Label break_target_label_;
+    DISALLOW_COPY_AND_ASSIGN(Breakable);
+  };
+
+  class Iteration : public Breakable {
+   public:
+    Iteration(FullCodeGenerator* codegen,
+              IterationStatement* iteration_statement)
+        : Breakable(codegen, iteration_statement) {}
+    virtual ~Iteration() {}
+    virtual Iteration* AsIteration() { return this; }
+    virtual bool IsContinueTarget(Statement* statement) {
+      return this->statement() == statement;
+    }
+    Label* continue_target() { return &continue_target_label_; }
+   private:
+    Label continue_target_label_;
+    DISALLOW_COPY_AND_ASSIGN(Iteration);
+  };
+
+  // The environment inside the try block of a try/catch statement.
+  class TryCatch : public NestedStatement {
+   public:
+    explicit TryCatch(FullCodeGenerator* codegen, Label* catch_entry)
+        : NestedStatement(codegen), catch_entry_(catch_entry) { }
+    virtual ~TryCatch() {}
+    virtual TryCatch* AsTryCatch() { return this; }
+    Label* catch_entry() { return catch_entry_; }
+    virtual int Exit(int stack_depth);
+   private:
+    Label* catch_entry_;
+    DISALLOW_COPY_AND_ASSIGN(TryCatch);
+  };
+
+  // The environment inside the try block of a try/finally statement.
+  class TryFinally : public NestedStatement {
+   public:
+    explicit TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
+        : NestedStatement(codegen), finally_entry_(finally_entry) { }
+    virtual ~TryFinally() {}
+    virtual TryFinally* AsTryFinally() { return this; }
+    Label* finally_entry() { return finally_entry_; }
+    virtual int Exit(int stack_depth);
+   private:
+    Label* finally_entry_;
+    DISALLOW_COPY_AND_ASSIGN(TryFinally);
+  };
+
+  // A FinallyEnvironment represents being inside a finally block.
+  // Abnormal termination of the finally block needs to clean up
+  // the block's parameters from the stack.
+  class Finally : public NestedStatement {
+   public:
+    explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
+    virtual ~Finally() {}
+    virtual Finally* AsFinally() { return this; }
+    virtual int Exit(int stack_depth) {
+      return stack_depth + kFinallyStackElementCount;
+    }
+   private:
+    // Number of extra stack slots occupied during a finally block.
+    static const int kFinallyStackElementCount = 2;
+    DISALLOW_COPY_AND_ASSIGN(Finally);
+  };
+
+  // A ForInEnvironment represents being inside a for-in loop.
+  // Abnormal termination of the for-in block needs to clean up
+  // the block's temporary storage from the stack.
+  class ForIn : public Iteration {
+   public:
+    ForIn(FullCodeGenerator* codegen,
+          ForInStatement* statement)
+        : Iteration(codegen, statement) { }
+    virtual ~ForIn() {}
+    virtual ForIn* AsForIn() { return this; }
+    virtual int Exit(int stack_depth) {
+      return stack_depth + kForInStackElementCount;
+    }
+   private:
+    // TODO(lrn): Check that this value is correct when implementing
+    // for-in.
+    static const int kForInStackElementCount = 5;
+    DISALLOW_COPY_AND_ASSIGN(ForIn);
+  };
+
+  enum Location {
+    kAccumulator,
+    kStack
+  };
+
+  int SlotOffset(Slot* slot);
+
+  // Emit code to convert a pure value (in a register, slot, as a literal,
+  // or on top of the stack) into the result expected according to an
+  // expression context.
+  void Apply(Expression::Context context, Register reg);
+
+  // Slot cannot have type Slot::LOOKUP.
+  void Apply(Expression::Context context, Slot* slot);
+
+  void Apply(Expression::Context context, Literal* lit);
+  void ApplyTOS(Expression::Context context);
+
+  // Emit code to discard count elements from the top of stack, then convert
+  // a pure value into the result expected according to an expression
+  // context.
+  void DropAndApply(int count, Expression::Context context, Register reg);
+
+  // Emit code to convert pure control flow to a pair of labels into the
+  // result expected according to an expression context.
+  void Apply(Expression::Context context,
+             Label* materialize_true,
+             Label* materialize_false);
+
+  // Helper function to convert a pure value into a test context.  The value
+  // is expected on the stack or the accumulator, depending on the platform.
+  // See the platform-specific implementation for details.
+  void DoTest(Expression::Context context);
+
+  void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
+  void Move(Register dst, Slot* source);
+
+  // Return an operand used to read/write to a known (ie, non-LOOKUP) slot.
+  // May emit code to traverse the context chain, destroying the scratch
+  // register.
+  MemOperand EmitSlotSearch(Slot* slot, Register scratch);
+
+  void VisitForEffect(Expression* expr) {
+    Expression::Context saved_context = context_;
+    context_ = Expression::kEffect;
+    Visit(expr);
+    context_ = saved_context;
+  }
+
+  void VisitForValue(Expression* expr, Location where) {
+    Expression::Context saved_context = context_;
+    Location saved_location = location_;
+    context_ = Expression::kValue;
+    location_ = where;
+    Visit(expr);
+    context_ = saved_context;
+    location_ = saved_location;
+  }
+
+  void VisitForControl(Expression* expr, Label* if_true, Label* if_false) {
+    Expression::Context saved_context = context_;
+    Label* saved_true = true_label_;
+    Label* saved_false = false_label_;
+    context_ = Expression::kTest;
+    true_label_ = if_true;
+    false_label_ = if_false;
+    Visit(expr);
+    context_ = saved_context;
+    true_label_ = saved_true;
+    false_label_ = saved_false;
+  }
+
+  void VisitForValueControl(Expression* expr,
+                            Location where,
+                            Label* if_true,
+                            Label* if_false) {
+    Expression::Context saved_context = context_;
+    Location saved_location = location_;
+    Label* saved_true = true_label_;
+    Label* saved_false = false_label_;
+    context_ = Expression::kValueTest;
+    location_ = where;
+    true_label_ = if_true;
+    false_label_ = if_false;
+    Visit(expr);
+    context_ = saved_context;
+    location_ = saved_location;
+    true_label_ = saved_true;
+    false_label_ = saved_false;
+  }
+
+  void VisitForControlValue(Expression* expr,
+                            Location where,
+                            Label* if_true,
+                            Label* if_false) {
+    Expression::Context saved_context = context_;
+    Location saved_location = location_;
+    Label* saved_true = true_label_;
+    Label* saved_false = false_label_;
+    context_ = Expression::kTestValue;
+    location_ = where;
+    true_label_ = if_true;
+    false_label_ = if_false;
+    Visit(expr);
+    context_ = saved_context;
+    location_ = saved_location;
+    true_label_ = saved_true;
+    false_label_ = saved_false;
+  }
+
+  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  void DeclareGlobals(Handle<FixedArray> pairs);
+
+  // Platform-specific return sequence
+  void EmitReturnSequence(int position);
+
+  // Platform-specific code sequences for calls
+  void EmitCallWithStub(Call* expr);
+  void EmitCallWithIC(Call* expr, Handle<Object> name, RelocInfo::Mode mode);
+
+  // Platform-specific code for loading variables.
+  void EmitVariableLoad(Variable* expr, Expression::Context context);
+
+  // Platform-specific support for compiling assignments.
+
+  // Load a value from a named property.
+  // The receiver is left on the stack by the IC.
+  void EmitNamedPropertyLoad(Property* expr);
+
+  // Load a value from a keyed property.
+  // The receiver and the key is left on the stack by the IC.
+  void EmitKeyedPropertyLoad(Property* expr);
+
+  // Apply the compound assignment operator. Expects the left operand on top
+  // of the stack and the right one in the accumulator.
+  void EmitBinaryOp(Token::Value op, Expression::Context context);
+
+  // Complete a variable assignment.  The right-hand-side value is expected
+  // in the accumulator.
+  void EmitVariableAssignment(Variable* var, Expression::Context context);
+
+  // Complete a named property assignment.  The receiver is expected on top
+  // of the stack and the right-hand-side value in the accumulator.
+  void EmitNamedPropertyAssignment(Assignment* expr);
+
+  // Complete a keyed property assignment.  The receiver and key are
+  // expected on top of the stack and the right-hand-side value in the
+  // accumulator.
+  void EmitKeyedPropertyAssignment(Assignment* expr);
+
+  void SetFunctionPosition(FunctionLiteral* fun);
+  void SetReturnPosition(FunctionLiteral* fun);
+  void SetStatementPosition(Statement* stmt);
+  void SetStatementPosition(int pos);
+  void SetSourcePosition(int pos);
+
+  // Non-local control flow support.
+  void EnterFinallyBlock();
+  void ExitFinallyBlock();
+
+  // Loop nesting counter.
+  int loop_depth() { return loop_depth_; }
+  void increment_loop_depth() { loop_depth_++; }
+  void decrement_loop_depth() {
+    ASSERT(loop_depth_ > 0);
+    loop_depth_--;
+  }
+
+  MacroAssembler* masm() { return masm_; }
+  static Register result_register();
+  static Register context_register();
+
+  // Set fields in the stack frame. Offsets are the frame pointer relative
+  // offsets defined in, e.g., StandardFrameConstants.
+  void StoreToFrameField(int frame_offset, Register value);
+
+  // Load a value from the current context. Indices are defined as an enum
+  // in v8::internal::Context.
+  void LoadContextField(Register dst, int context_index);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+  // Handles the shortcutted logical binary operations in VisitBinaryOperation.
+  void EmitLogicalOperation(BinaryOperation* expr);
+
+  MacroAssembler* masm_;
+  FunctionLiteral* function_;
+  Handle<Script> script_;
+  bool is_eval_;
+  Label return_label_;
+  NestedStatement* nesting_stack_;
+  int loop_depth_;
+
+  Expression::Context context_;
+  Location location_;
+  Label* true_label_;
+  Label* false_label_;
+
+  friend class NestedStatement;
+
+  DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_FULL_CODEGEN_H_
diff --git a/src/handles.cc b/src/handles.cc
index d551e21..3156670 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -681,14 +681,18 @@
 bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag) {
   // Compile the source information to a code object.
   Handle<SharedFunctionInfo> shared(function->shared());
-  return CompileLazyShared(shared, flag, 0);
+  bool result = CompileLazyShared(shared, flag, 0);
+  LOG(FunctionCreateEvent(*function));
+  return result;
 }
 
 
 bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) {
   // Compile the source information to a code object.
   Handle<SharedFunctionInfo> shared(function->shared());
-  return CompileLazyShared(shared, flag, 1);
+  bool result = CompileLazyShared(shared, flag, 1);
+  LOG(FunctionCreateEvent(*function));
+  return result;
 }
 
 OptimizedObjectForAddingMultipleProperties::
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index b615055..3cb65ee 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -625,8 +625,7 @@
   ConstructorHeapProfile js_cons_profile;
   RetainerHeapProfile js_retainer_profile;
   HeapIterator iterator;
-  while (iterator.has_next()) {
-    HeapObject* obj = iterator.next();
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     CollectStats(obj, info);
     js_cons_profile.CollectStats(obj);
     js_retainer_profile.CollectStats(obj);
diff --git a/src/heap.cc b/src/heap.cc
index fba2e87..6be1daf 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -76,8 +76,8 @@
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
 #if defined(ANDROID)
-int Heap::max_semispace_size_  = 512*KB;
-int Heap::max_old_generation_size_ = 128*MB;
+int Heap::max_semispace_size_  = 2*MB;
+int Heap::max_old_generation_size_ = 192*MB;
 int Heap::initial_semispace_size_ = 128*KB;
 size_t Heap::code_range_size_ = 0;
 #elif defined(V8_TARGET_ARCH_X64)
@@ -327,7 +327,7 @@
 int Heap::SizeOfObjects() {
   int total = 0;
   AllSpaces spaces;
-  while (Space* space = spaces.next()) {
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
     total += space->Size();
   }
   return total;
@@ -732,13 +732,14 @@
   // do not expect them.
   VerifyNonPointerSpacePointersVisitor v;
   HeapObjectIterator code_it(Heap::code_space());
-  while (code_it.has_next()) {
-    HeapObject* object = code_it.next();
+  for (HeapObject* object = code_it.next();
+       object != NULL; object = code_it.next())
     object->Iterate(&v);
-  }
 
   HeapObjectIterator data_it(Heap::old_data_space());
-  while (data_it.has_next()) data_it.next()->Iterate(&v);
+  for (HeapObject* object = data_it.next();
+       object != NULL; object = data_it.next())
+    object->Iterate(&v);
 }
 #endif
 
@@ -804,8 +805,8 @@
 
   // Copy objects reachable from cells by scavenging cell values directly.
   HeapObjectIterator cell_iterator(cell_space_);
-  while (cell_iterator.has_next()) {
-    HeapObject* cell = cell_iterator.next();
+  for (HeapObject* cell = cell_iterator.next();
+       cell != NULL; cell = cell_iterator.next()) {
     if (cell->IsJSGlobalPropertyCell()) {
       Address value_address =
           reinterpret_cast<Address>(cell) +
@@ -1013,13 +1014,15 @@
 
 void Heap::RebuildRSets(PagedSpace* space) {
   HeapObjectIterator it(space);
-  while (it.has_next()) Heap::UpdateRSet(it.next());
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+    Heap::UpdateRSet(obj);
 }
 
 
 void Heap::RebuildRSets(LargeObjectSpace* space) {
   LargeObjectIterator it(space);
-  while (it.has_next()) Heap::UpdateRSet(it.next());
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+    Heap::UpdateRSet(obj);
 }
 
 
@@ -1203,7 +1206,7 @@
   map->set_code_cache(empty_fixed_array());
   map->set_unused_property_fields(0);
   map->set_bit_field(0);
-  map->set_bit_field2(0);
+  map->set_bit_field2(1 << Map::kIsExtensible);
 
   // If the map object is aligned fill the padding area with Smi 0 objects.
   if (Map::kPadStart < Map::kSize) {
@@ -3106,7 +3109,8 @@
   if (!HasBeenSetup()) return;
   Top::PrintStack();
   AllSpaces spaces;
-  while (Space* space = spaces.next()) space->Print();
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+    space->Print();
 }
 
 
@@ -3340,6 +3344,11 @@
 
 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
   IterateStrongRoots(v, mode);
+  IterateWeakRoots(v, mode);
+}
+
+
+void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
   v->Synchronize("symbol_table");
   if (mode != VISIT_ALL_IN_SCAVENGE) {
@@ -3394,6 +3403,20 @@
   // Iterate over pointers being held by inactive threads.
   ThreadManager::Iterate(v);
   v->Synchronize("threadmanager");
+
+  // Iterate over the pointers the Serialization/Deserialization code is
+  // holding.
+  // During garbage collection this keeps the partial snapshot cache alive.
+  // During deserialization of the startup snapshot this creates the partial
+  // snapshot cache and deserializes the objects it refers to.  During
+  // serialization this does nothing, since the partial snapshot cache is
+  // empty.  However the next thing we do is create the partial snapshot,
+  // filling up the partial snapshot cache with objects it needs as we go.
+  SerializerDeserializer::Iterate(v);
+  // We don't do a v->Synchronize call here, because in debug mode that will
+  // output a flag to the snapshot.  However at this point the serializer and
+  // deserializer are deliberately a little unsynchronized (see above) so the
+  // checking of the sync flag in the snapshot would fail.
 }
 
 
@@ -3544,7 +3567,8 @@
   // Initialize map space.
   map_space_ = new MapSpace(FLAG_use_big_map_space
       ? max_old_generation_size_
-      : (MapSpace::kMaxMapPageIndex + 1) * Page::kPageSize,
+      : MapSpace::kMaxMapPageIndex * Page::kPageSize,
+      FLAG_max_map_space_pages,
       MAP_SPACE);
   if (map_space_ == NULL) return false;
   if (!map_space_->Setup(NULL, 0)) return false;
@@ -3647,7 +3671,8 @@
 void Heap::Shrink() {
   // Try to shrink all paged spaces.
   PagedSpaces spaces;
-  while (PagedSpace* space = spaces.next()) space->Shrink();
+  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
+    space->Shrink();
 }
 
 
@@ -3656,7 +3681,8 @@
 void Heap::Protect() {
   if (HasBeenSetup()) {
     AllSpaces spaces;
-    while (Space* space = spaces.next()) space->Protect();
+    for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+      space->Protect();
   }
 }
 
@@ -3664,7 +3690,8 @@
 void Heap::Unprotect() {
   if (HasBeenSetup()) {
     AllSpaces spaces;
-    while (Space* space = spaces.next()) space->Unprotect();
+    for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+      space->Unprotect();
   }
 }
 
@@ -3836,34 +3863,25 @@
 }
 
 
-bool HeapIterator::has_next() {
+HeapObject* HeapIterator::next() {
   // No iterator means we are done.
-  if (object_iterator_ == NULL) return false;
+  if (object_iterator_ == NULL) return NULL;
 
-  if (object_iterator_->has_next_object()) {
+  if (HeapObject* obj = object_iterator_->next_object()) {
     // If the current iterator has more objects we are fine.
-    return true;
+    return obj;
   } else {
     // Go though the spaces looking for one that has objects.
     while (space_iterator_->has_next()) {
       object_iterator_ = space_iterator_->next();
-      if (object_iterator_->has_next_object()) {
-        return true;
+      if (HeapObject* obj = object_iterator_->next_object()) {
+        return obj;
       }
     }
   }
   // Done with the last space.
   object_iterator_ = NULL;
-  return false;
-}
-
-
-HeapObject* HeapIterator::next() {
-  if (has_next()) {
-    return object_iterator_->next_object();
-  } else {
-    return NULL;
-  }
+  return NULL;
 }
 
 
diff --git a/src/heap.h b/src/heap.h
index 1f04444..0dd20c0 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -690,6 +690,8 @@
   static void IterateRoots(ObjectVisitor* v, VisitMode mode);
   // Iterates over all strong roots in the heap.
   static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+  // Iterates over all the other roots in the heap.
+  static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
 
   // Iterates remembered set of an old space.
   static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
@@ -1290,7 +1292,6 @@
   explicit HeapIterator();
   virtual ~HeapIterator();
 
-  bool has_next();
   HeapObject* next();
   void reset();
 
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 2cf469a..dc017ae 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -860,6 +860,24 @@
 }
 
 
+void Assembler::cmpb(const Operand& dst, Register src) {
+  ASSERT(src.is_byte_register());
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x38);
+  emit_operand(src, dst);
+}
+
+
+void Assembler::cmpb(Register dst, const Operand& src) {
+  ASSERT(dst.is_byte_register());
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0x3A);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::cmpw(const Operand& op, Immediate imm16) {
   ASSERT(imm16.is_int16());
   EnsureSpace ensure_space(this);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index d675ecf..da27fd0 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -559,6 +559,8 @@
   void and_(const Operand& dst, const Immediate& x);
 
   void cmpb(const Operand& op, int8_t imm8);
+  void cmpb(Register src, const Operand& dst);
+  void cmpb(const Operand& dst, Register src);
   void cmpb_al(const Operand& op);
   void cmpw_ax(const Operand& op);
   void cmpw(const Operand& op, Immediate imm16);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 240f4da..121e155 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -639,15 +639,22 @@
   return frame_->Pop();
 }
 
+//------------------------------------------------------------------------------
+// CodeGenerator implementation of variables, lookups, and stores.
 
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
-    : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+                     Expression* expression,
+                     bool persist_after_get)
+    : cgen_(cgen),
+      expression_(expression),
+      type_(ILLEGAL),
+      persist_after_get_(persist_after_get) {
   cgen->LoadReference(this);
 }
 
 
 Reference::~Reference() {
-  cgen_->UnloadReference(this);
+  ASSERT(is_unloaded() || is_illegal());
 }
 
 
@@ -697,6 +704,7 @@
   // Pop a reference from the stack while preserving TOS.
   Comment cmnt(masm_, "[ UnloadReference");
   frame_->Nip(ref->size());
+  ref->set_unloaded();
 }
 
 
@@ -743,6 +751,12 @@
 
 class FloatingPointHelper : public AllStatic {
  public:
+
+  enum ArgLocation {
+    ARGS_ON_STACK,
+    ARGS_IN_REGISTERS
+  };
+
   // Code pattern for loading a floating point value. Input value must
   // be either a smi or a heap number object (fp value). Requirements:
   // operand in register number. Returns operand as floating point number
@@ -750,9 +764,16 @@
   static void LoadFloatOperand(MacroAssembler* masm, Register number);
   // Code pattern for loading floating point values. Input values must
   // be either smi or heap number objects (fp values). Requirements:
-  // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
-  // floating point numbers on FPU stack.
-  static void LoadFloatOperands(MacroAssembler* masm, Register scratch);
+  // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
+  // Returns operands as floating point numbers on FPU stack.
+  static void LoadFloatOperands(MacroAssembler* masm,
+                                Register scratch,
+                                ArgLocation arg_location = ARGS_ON_STACK);
+
+  // Similar to LoadFloatOperand but assumes that both operands are smis.
+  // Expects operands in edx, eax.
+  static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
+
   // Test if operands are smi or number objects (fp). Requirements:
   // operand_1 in eax, operand_2 in edx; falls through on float
   // operands, jumps to the non_float label otherwise.
@@ -768,7 +789,11 @@
   // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
   // either operand is not a number.  Operands are in edx and eax.
   // Leaves operands unchanged.
-  static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
+  static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
+
+  // Similar to LoadSSE2Operands but assumes that both operands are smis.
+  // Expects operands in edx, eax.
+  static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
 };
 
 
@@ -913,31 +938,6 @@
     return;
   }
 
-  // Set the flags based on the operation, type and loop nesting level.
-  GenericBinaryFlags flags;
-  switch (op) {
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR:
-      // Bit operations always assume they likely operate on Smis. Still only
-      // generate the inline Smi check code if this operation is part of a loop.
-      flags = (loop_nesting() > 0)
-              ? NO_SMI_CODE_IN_STUB
-              : NO_GENERIC_BINARY_FLAGS;
-      break;
-
-    default:
-      // By default only inline the Smi check code for likely smis if this
-      // operation is part of a loop.
-      flags = ((loop_nesting() > 0) && type->IsLikelySmi())
-              ? NO_SMI_CODE_IN_STUB
-              : NO_GENERIC_BINARY_FLAGS;
-      break;
-  }
-
   Result right = frame_->Pop();
   Result left = frame_->Pop();
 
@@ -971,7 +971,6 @@
   bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
   bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
   bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
-  bool generate_no_smi_code = false;  // No smi code at all, inline or in stub.
 
   if (left_is_smi && right_is_smi) {
     // Compute the constant result at compile time, and leave it on the frame.
@@ -980,33 +979,31 @@
     if (FoldConstantSmis(op, left_int, right_int)) return;
   }
 
+  Result answer;
   if (left_is_non_smi || right_is_non_smi) {
-    // Set flag so that we go straight to the slow case, with no smi code.
-    generate_no_smi_code = true;
+    // Go straight to the slow case, with no smi code.
+    GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+    answer = stub.GenerateCall(masm_, frame_, &left, &right);
   } else if (right_is_smi) {
-    ConstantSmiBinaryOperation(op, &left, right.handle(),
-                               type, false, overwrite_mode);
-    return;
+    answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
+                                        type, false, overwrite_mode);
   } else if (left_is_smi) {
-    ConstantSmiBinaryOperation(op, &right, left.handle(),
-                               type, true, overwrite_mode);
-    return;
-  }
-
-  if (((flags & NO_SMI_CODE_IN_STUB) != 0) && !generate_no_smi_code) {
-    LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+    answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
+                                        type, true, overwrite_mode);
   } else {
-    frame_->Push(&left);
-    frame_->Push(&right);
-    // If we know the arguments aren't smis, use the binary operation stub
-    // that does not check for the fast smi case.
-    if (generate_no_smi_code) {
-      flags = NO_SMI_CODE_IN_STUB;
+    // Set the flags based on the operation, type and loop nesting level.
+    // Bit operations always assume they likely operate on Smis. Still only
+    // generate the inline Smi check code if this operation is part of a loop.
+    // For all other operations only inline the Smi check code for likely smis
+    // if the operation is part of a loop.
+    if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
+      answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+    } else {
+      GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+      answer = stub.GenerateCall(masm_, frame_, &left, &right);
     }
-    GenericBinaryOpStub stub(op, overwrite_mode, flags);
-    Result answer = frame_->CallStub(&stub, 2);
-    frame_->Push(&answer);
   }
+  frame_->Push(&answer);
 }
 
 
@@ -1093,10 +1090,11 @@
 
 // Implements a binary operation using a deferred code object and some
 // inline code to operate on smis quickly.
-void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
-                                             Result* left,
-                                             Result* right,
-                                             OverwriteMode overwrite_mode) {
+Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+                                               Result* left,
+                                               Result* right,
+                                               OverwriteMode overwrite_mode) {
+  Result answer;
   // Special handling of div and mod because they use fixed registers.
   if (op == Token::DIV || op == Token::MOD) {
     // We need eax as the quotient register, edx as the remainder
@@ -1218,7 +1216,7 @@
       deferred->BindExit();
       left->Unuse();
       right->Unuse();
-      frame_->Push(&quotient);
+      answer = quotient;
     } else {
       ASSERT(op == Token::MOD);
       // Check for a negative zero result.  If the result is zero, and
@@ -1234,9 +1232,10 @@
       deferred->BindExit();
       left->Unuse();
       right->Unuse();
-      frame_->Push(&remainder);
+      answer = remainder;
     }
-    return;
+    ASSERT(answer.is_valid());
+    return answer;
   }
 
   // Special handling of shift operations because they use fixed
@@ -1257,7 +1256,7 @@
     frame_->Spill(ecx);
 
     // Use a fresh answer register to avoid spilling the left operand.
-    Result answer = allocator_->Allocate();
+    answer = allocator_->Allocate();
     ASSERT(answer.is_valid());
     // Check that both operands are smis using the answer register as a
     // temporary.
@@ -1321,8 +1320,8 @@
     deferred->BindExit();
     left->Unuse();
     right->Unuse();
-    frame_->Push(&answer);
-    return;
+    ASSERT(answer.is_valid());
+    return answer;
   }
 
   // Handle the other binary operations.
@@ -1331,7 +1330,7 @@
   // A newly allocated register answer is used to hold the answer.  The
   // registers containing left and right are not modified so they don't
   // need to be spilled in the fast case.
-  Result answer = allocator_->Allocate();
+  answer = allocator_->Allocate();
   ASSERT(answer.is_valid());
 
   // Perform the smi tag check.
@@ -1353,12 +1352,12 @@
   __ mov(answer.reg(), left->reg());
   switch (op) {
     case Token::ADD:
-      __ add(answer.reg(), Operand(right->reg()));  // Add optimistically.
+      __ add(answer.reg(), Operand(right->reg()));
       deferred->Branch(overflow);
       break;
 
     case Token::SUB:
-      __ sub(answer.reg(), Operand(right->reg()));  // Subtract optimistically.
+      __ sub(answer.reg(), Operand(right->reg()));
       deferred->Branch(overflow);
       break;
 
@@ -1406,7 +1405,8 @@
   deferred->BindExit();
   left->Unuse();
   right->Unuse();
-  frame_->Push(&answer);
+  ASSERT(answer.is_valid());
+  return answer;
 }
 
 
@@ -1575,36 +1575,34 @@
 }
 
 
-void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
-                                               Result* operand,
-                                               Handle<Object> value,
-                                               StaticType* type,
-                                               bool reversed,
-                                               OverwriteMode overwrite_mode) {
+Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+                                                 Result* operand,
+                                                 Handle<Object> value,
+                                                 StaticType* type,
+                                                 bool reversed,
+                                                 OverwriteMode overwrite_mode) {
   // NOTE: This is an attempt to inline (a bit) more of the code for
   // some possible smi operations (like + and -) when (at least) one
   // of the operands is a constant smi.
   // Consumes the argument "operand".
-
   // TODO(199): Optimize some special cases of operations involving a
   // smi literal (multiply by 2, shift by 0, etc.).
   if (IsUnsafeSmi(value)) {
     Result unsafe_operand(value);
     if (reversed) {
-      LikelySmiBinaryOperation(op, &unsafe_operand, operand,
-                               overwrite_mode);
+      return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+                                      overwrite_mode);
     } else {
-      LikelySmiBinaryOperation(op, operand, &unsafe_operand,
-                               overwrite_mode);
+      return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+                                      overwrite_mode);
     }
-    ASSERT(!operand->is_valid());
-    return;
   }
 
   // Get the literal value.
   Smi* smi_value = Smi::cast(*value);
   int int_value = smi_value->value();
 
+  Result answer;
   switch (op) {
     case Token::ADD: {
       operand->ToRegister();
@@ -1627,13 +1625,12 @@
       __ test(operand->reg(), Immediate(kSmiTagMask));
       deferred->Branch(not_zero);
       deferred->BindExit();
-      frame_->Push(operand);
+      answer = *operand;
       break;
     }
 
     case Token::SUB: {
       DeferredCode* deferred = NULL;
-      Result answer;  // Only allocate a new register if reversed.
       if (reversed) {
         // The reversed case is only hit when the right operand is not a
         // constant.
@@ -1661,15 +1658,14 @@
       deferred->Branch(not_zero);
       deferred->BindExit();
       operand->Unuse();
-      frame_->Push(&answer);
       break;
     }
 
     case Token::SAR:
       if (reversed) {
         Result constant_operand(value);
-        LikelySmiBinaryOperation(op, &constant_operand, operand,
-                                 overwrite_mode);
+        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                          overwrite_mode);
       } else {
         // Only the least significant 5 bits of the shift value are used.
         // In the slow case, this masking is done inside the runtime call.
@@ -1689,21 +1685,21 @@
           __ and_(operand->reg(), ~kSmiTagMask);
         }
         deferred->BindExit();
-        frame_->Push(operand);
+        answer = *operand;
       }
       break;
 
     case Token::SHR:
       if (reversed) {
         Result constant_operand(value);
-        LikelySmiBinaryOperation(op, &constant_operand, operand,
-                                 overwrite_mode);
+        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                          overwrite_mode);
       } else {
         // Only the least significant 5 bits of the shift value are used.
         // In the slow case, this masking is done inside the runtime call.
         int shift_value = int_value & 0x1f;
         operand->ToRegister();
-        Result answer = allocator()->Allocate();
+        answer = allocator()->Allocate();
         ASSERT(answer.is_valid());
         DeferredInlineSmiOperation* deferred =
             new DeferredInlineSmiOperation(op,
@@ -1724,7 +1720,6 @@
         operand->Unuse();
         __ SmiTag(answer.reg());
         deferred->BindExit();
-        frame_->Push(&answer);
       }
       break;
 
@@ -1749,7 +1744,7 @@
         }
         operand->Unuse();
 
-        Result answer = allocator()->Allocate();
+        answer = allocator()->Allocate();
         DeferredInlineSmiOperationReversed* deferred =
             new DeferredInlineSmiOperationReversed(op,
                                                    answer.reg(),
@@ -1765,7 +1760,6 @@
         __ SmiTag(answer.reg());
 
         deferred->BindExit();
-        frame_->Push(&answer);
       } else {
         // Only the least significant 5 bits of the shift value are used.
         // In the slow case, this masking is done inside the runtime call.
@@ -1783,10 +1777,10 @@
           __ test(operand->reg(), Immediate(kSmiTagMask));
           deferred->Branch(not_zero);
           deferred->BindExit();
-          frame_->Push(operand);
+          answer = *operand;
         } else {
           // Use a fresh temporary for nonzero shift values.
-          Result answer = allocator()->Allocate();
+          answer = allocator()->Allocate();
           ASSERT(answer.is_valid());
           DeferredInlineSmiOperation* deferred =
               new DeferredInlineSmiOperation(op,
@@ -1808,7 +1802,6 @@
           deferred->Branch(overflow);
           deferred->BindExit();
           operand->Unuse();
-          frame_->Push(&answer);
         }
       }
       break;
@@ -1847,7 +1840,7 @@
         }
       }
       deferred->BindExit();
-      frame_->Push(operand);
+      answer = *operand;
       break;
     }
 
@@ -1873,7 +1866,7 @@
           __ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
         }
         deferred->BindExit();
-        frame_->Push(operand);
+        answer = *operand;
         break;
       }
       // Fall through if we did not find a power of 2 on the right hand side!
@@ -1881,16 +1874,17 @@
     default: {
       Result constant_operand(value);
       if (reversed) {
-        LikelySmiBinaryOperation(op, &constant_operand, operand,
-                                 overwrite_mode);
+        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                          overwrite_mode);
       } else {
-        LikelySmiBinaryOperation(op, operand, &constant_operand,
-                                 overwrite_mode);
+        answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+                                          overwrite_mode);
       }
       break;
     }
   }
-  ASSERT(!operand->is_valid());
+  ASSERT(answer.is_valid());
+  return answer;
 }
 
 
@@ -2311,20 +2305,29 @@
 }
 
 
-void CodeGenerator::CallApplyLazy(Property* apply,
+void CodeGenerator::CallApplyLazy(Expression* applicand,
                                   Expression* receiver,
                                   VariableProxy* arguments,
                                   int position) {
+  // An optimized implementation of expressions of the form
+  // x.apply(y, arguments).
+  // If the arguments object of the scope has not been allocated,
+  // and x.apply is Function.prototype.apply, this optimization
+  // just copies y and the arguments of the current function on the
+  // stack, as receiver and arguments, and calls x.
+  // In the implementation comments, we call x the applicand
+  // and y the receiver.
   ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
   ASSERT(arguments->IsArguments());
 
-  JumpTarget slow, done;
-
-  // Load the apply function onto the stack. This will usually
+  // Load applicand.apply onto the stack. This will usually
   // give us a megamorphic load site. Not super, but it works.
-  Reference ref(this, apply);
-  ref.GetValue();
-  ASSERT(ref.type() == Reference::NAMED);
+  Load(applicand);
+  Handle<String> name = Factory::LookupAsciiSymbol("apply");
+  frame()->Push(name);
+  Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+  __ nop();
+  frame()->Push(&answer);
 
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
@@ -2334,6 +2337,11 @@
   // Emit the source position information after having loaded the
   // receiver and the arguments.
   CodeForSourcePosition(position);
+  // Contents of frame at this point:
+  // Frame[0]: arguments object of the current function or the hole.
+  // Frame[1]: receiver
+  // Frame[2]: applicand.apply
+  // Frame[3]: applicand.
 
   // Check if the arguments object has been lazily allocated
   // already. If so, just use that instead of copying the arguments
@@ -2341,143 +2349,151 @@
   // named 'arguments' has been introduced.
   frame_->Dup();
   Result probe = frame_->Pop();
-  bool try_lazy = true;
-  if (probe.is_constant()) {
-    try_lazy = probe.handle()->IsTheHole();
-  } else {
-    __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
-    probe.Unuse();
-    slow.Branch(not_equal);
-  }
+  { VirtualFrame::SpilledScope spilled_scope;
+    Label slow, done;
+    bool try_lazy = true;
+    if (probe.is_constant()) {
+      try_lazy = probe.handle()->IsTheHole();
+    } else {
+      __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+      probe.Unuse();
+      __ j(not_equal, &slow);
+    }
 
-  if (try_lazy) {
-    JumpTarget build_args;
+    if (try_lazy) {
+      Label build_args;
+      // Get rid of the arguments object probe.
+      frame_->Drop();  // Can be called on a spilled frame.
+      // Stack now has 3 elements on it.
+      // Contents of stack at this point:
+      // esp[0]: receiver
+      // esp[1]: applicand.apply
+      // esp[2]: applicand.
 
-    // Get rid of the arguments object probe.
-    frame_->Drop();
-
-    // Before messing with the execution stack, we sync all
-    // elements. This is bound to happen anyway because we're
-    // about to call a function.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-
-    // Check that the receiver really is a JavaScript object.
-    { frame_->PushElementAt(0);
-      Result receiver = frame_->Pop();
-      receiver.ToRegister();
-      __ test(receiver.reg(), Immediate(kSmiTagMask));
-      build_args.Branch(zero);
-      Result tmp = allocator_->Allocate();
+      // Check that the receiver really is a JavaScript object.
+      __ mov(eax, Operand(esp, 0));
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(zero, &build_args);
       // We allow all JSObjects including JSFunctions.  As long as
       // JS_FUNCTION_TYPE is the last instance type and it is right
       // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
       // bound.
       ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
       ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-      __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
-      build_args.Branch(less);
-    }
+      __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
+      __ j(below, &build_args);
 
-    // Verify that we're invoking Function.prototype.apply.
-    { frame_->PushElementAt(1);
-      Result apply = frame_->Pop();
-      apply.ToRegister();
-      __ test(apply.reg(), Immediate(kSmiTagMask));
-      build_args.Branch(zero);
-      Result tmp = allocator_->Allocate();
-      __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
-      build_args.Branch(not_equal);
-      __ mov(tmp.reg(),
-             FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+      // Check that applicand.apply is Function.prototype.apply.
+      __ mov(eax, Operand(esp, kPointerSize));
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(zero, &build_args);
+      __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
+      __ j(not_equal, &build_args);
+      __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
       Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
-      __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
+      __ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset),
              Immediate(apply_code));
-      build_args.Branch(not_equal);
+      __ j(not_equal, &build_args);
+
+      // Check that applicand is a function.
+      __ mov(edi, Operand(esp, 2 * kPointerSize));
+      __ test(edi, Immediate(kSmiTagMask));
+      __ j(zero, &build_args);
+      __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+      __ j(not_equal, &build_args);
+
+      // Copy the arguments to this function possibly from the
+      // adaptor frame below it.
+      Label invoke, adapted;
+      __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+      __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+      __ cmp(Operand(ecx),
+             Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+      __ j(equal, &adapted);
+
+      // No arguments adaptor frame. Copy fixed number of arguments.
+      __ mov(eax, Immediate(scope_->num_parameters()));
+      for (int i = 0; i < scope_->num_parameters(); i++) {
+        __ push(frame_->ParameterAt(i));
+      }
+      __ jmp(&invoke);
+
+      // Arguments adaptor frame present. Copy arguments from there, but
+      // avoid copying too many arguments to avoid stack overflows.
+      __ bind(&adapted);
+      static const uint32_t kArgumentsLimit = 1 * KB;
+      __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+      __ SmiUntag(eax);
+      __ mov(ecx, Operand(eax));
+      __ cmp(eax, kArgumentsLimit);
+      __ j(above, &build_args);
+
+      // Loop through the arguments pushing them onto the execution
+      // stack. We don't inform the virtual frame of the push, so we don't
+      // have to worry about getting rid of the elements from the virtual
+      // frame.
+      Label loop;
+      // ecx is a small non-negative integer, due to the test above.
+      __ test(ecx, Operand(ecx));
+      __ j(zero, &invoke);
+      __ bind(&loop);
+      __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize));
+      __ dec(ecx);
+      __ j(not_zero, &loop);
+
+      // Invoke the function.
+      __ bind(&invoke);
+      ParameterCount actual(eax);
+      __ InvokeFunction(edi, actual, CALL_FUNCTION);
+      // Drop applicand.apply and applicand from the stack, and push
+      // the result of the function call, but leave the spilled frame
+      // unchanged, with 3 elements, so it is correct when we compile the
+      // slow-case code.
+      __ add(Operand(esp), Immediate(2 * kPointerSize));
+      __ push(eax);
+      // Stack now has 1 element:
+      //   esp[0]: result
+      __ jmp(&done);
+
+      // Slow-case: Allocate the arguments object since we know it isn't
+      // there, and fall-through to the slow-case where we call
+      // applicand.apply.
+      __ bind(&build_args);
+      // Stack now has 3 elements, because we have jumped from where:
+      // esp[0]: receiver
+      // esp[1]: applicand.apply
+      // esp[2]: applicand.
+
+      // StoreArgumentsObject requires a correct frame, and may modify it.
+      Result arguments_object = StoreArgumentsObject(false);
+      frame_->SpillAll();
+      arguments_object.ToRegister();
+      frame_->EmitPush(arguments_object.reg());
+      arguments_object.Unuse();
+      // Stack and frame now have 4 elements.
+      __ bind(&slow);
     }
 
-    // Get the function receiver from the stack. Check that it
-    // really is a function.
-    __ mov(edi, Operand(esp, 2 * kPointerSize));
-    __ test(edi, Immediate(kSmiTagMask));
-    build_args.Branch(zero);
-    __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-    build_args.Branch(not_equal);
+    // Generic computation of x.apply(y, args) with no special optimization.
+    // Flip applicand.apply and applicand on the stack, so
+    // applicand looks like the receiver of the applicand.apply call.
+    // Then process it as a normal function call.
+    __ mov(eax, Operand(esp, 3 * kPointerSize));
+    __ mov(ebx, Operand(esp, 2 * kPointerSize));
+    __ mov(Operand(esp, 2 * kPointerSize), eax);
+    __ mov(Operand(esp, 3 * kPointerSize), ebx);
 
-    // Copy the arguments to this function possibly from the
-    // adaptor frame below it.
-    Label invoke, adapted;
-    __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-    __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
-    __ cmp(Operand(ecx),
-           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-    __ j(equal, &adapted);
-
-    // No arguments adaptor frame. Copy fixed number of arguments.
-    __ mov(eax, Immediate(scope_->num_parameters()));
-    for (int i = 0; i < scope_->num_parameters(); i++) {
-      __ push(frame_->ParameterAt(i));
-    }
-    __ jmp(&invoke);
-
-    // Arguments adaptor frame present. Copy arguments from there, but
-    // avoid copying too many arguments to avoid stack overflows.
-    __ bind(&adapted);
-    static const uint32_t kArgumentsLimit = 1 * KB;
-    __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ SmiUntag(eax);
-    __ mov(ecx, Operand(eax));
-    __ cmp(eax, kArgumentsLimit);
-    build_args.Branch(above);
-
-    // Loop through the arguments pushing them onto the execution
-    // stack. We don't inform the virtual frame of the push, so we don't
-    // have to worry about getting rid of the elements from the virtual
-    // frame.
-    Label loop;
-    __ bind(&loop);
-    __ test(ecx, Operand(ecx));
-    __ j(zero, &invoke);
-    __ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
-    __ dec(ecx);
-    __ jmp(&loop);
-
-    // Invoke the function. The virtual frame knows about the receiver
-    // so make sure to forget that explicitly.
-    __ bind(&invoke);
-    ParameterCount actual(eax);
-    __ InvokeFunction(edi, actual, CALL_FUNCTION);
-    frame_->Forget(1);
-    Result result = allocator()->Allocate(eax);
-    frame_->SetElementAt(0, &result);
-    done.Jump();
-
-    // Slow-case: Allocate the arguments object since we know it isn't
-    // there, and fall-through to the slow-case where we call
-    // Function.prototype.apply.
-    build_args.Bind();
-    Result arguments_object = StoreArgumentsObject(false);
-    frame_->Push(&arguments_object);
-    slow.Bind();
-  }
-
-  // Flip the apply function and the function to call on the stack, so
-  // the function looks like the receiver of the apply call. This way,
-  // the generic Function.prototype.apply implementation can deal with
-  // the call like it usually does.
-  Result a2 = frame_->Pop();
-  Result a1 = frame_->Pop();
-  Result ap = frame_->Pop();
-  Result fn = frame_->Pop();
-  frame_->Push(&ap);
-  frame_->Push(&fn);
-  frame_->Push(&a1);
-  frame_->Push(&a2);
-  CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
-  Result res = frame_->CallStub(&call_function, 3);
-  frame_->Push(&res);
-
-  // All done. Restore context register after call.
-  if (try_lazy) done.Bind();
+    CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+    Result res = frame_->CallStub(&call_function, 3);
+    // The function and its two arguments have been dropped.
+    frame_->Drop(1);  // Drop the receiver as well.
+    res.ToRegister();
+    frame_->EmitPush(res.reg());
+    // Stack now has 1 element:
+    //   esp[0]: result
+    if (try_lazy) __ bind(&done);
+  }  // End of spilled scope.
+  // Restore the context register after a call.
   frame_->RestoreContextRegister();
 }
 
@@ -3517,17 +3533,13 @@
     if (!each.is_illegal()) {
       if (each.size() > 0) {
         frame_->EmitPush(frame_->ElementAt(each.size()));
-      }
-      // If the reference was to a slot we rely on the convenient property
-      // that it doesn't matter whether a value (eg, ebx pushed above) is
-      // right on top of or right underneath a zero-sized reference.
-      each.SetValue(NOT_CONST_INIT);
-      if (each.size() > 0) {
-        // It's safe to pop the value lying on top of the reference before
-        // unloading the reference itself (which preserves the top of stack,
-        // ie, now the topmost value of the non-zero sized reference), since
-        // we will discard the top of stack after unloading the reference
-        // anyway.
+        each.SetValue(NOT_CONST_INIT);
+        frame_->Drop(2);
+      } else {
+        // If the reference was to a slot we rely on the convenient property
+        // that it doesn't matter whether a value (eg, ebx pushed above) is
+        // right on top of or right underneath a zero-sized reference.
+        each.SetValue(NOT_CONST_INIT);
         frame_->Drop();
       }
     }
@@ -3535,10 +3547,6 @@
   // Unloading a reference may leave the frame in an unspilled state.
   frame_->SpillAll();
 
-  // Discard the i'th entry pushed above or else the remainder of the
-  // reference, whichever is currently on top of the stack.
-  frame_->Drop();
-
   // Body.
   CheckStack();  // TODO(1222600): ignore if body contains calls.
   VisitAndSpill(node->body());
@@ -4588,9 +4596,12 @@
 
 
 void CodeGenerator::VisitAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
   Comment cmnt(masm_, "[ Assignment");
 
-  { Reference target(this, node->target());
+  { Reference target(this, node->target(), node->is_compound());
     if (target.is_illegal()) {
       // Fool the virtual frame into thinking that we left the assignment's
       // value on the frame.
@@ -4612,12 +4623,27 @@
       frame_->PushElementAt(target.size() - 1);
       Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
     }
+    if (node->ends_initialization_block()) {
+      // Add an extra copy of the receiver to the frame, so that it can be
+      // converted back to fast case after the assignment.
+      ASSERT(target.type() == Reference::NAMED ||
+             target.type() == Reference::KEYED);
+      if (target.type() == Reference::NAMED) {
+        frame_->Dup();
+        // Dup target receiver on stack.
+      } else {
+        ASSERT(target.type() == Reference::KEYED);
+        Result temp = frame_->Pop();
+        frame_->Dup();
+        frame_->Push(&temp);
+      }
+    }
     if (node->op() == Token::ASSIGN ||
         node->op() == Token::INIT_VAR ||
         node->op() == Token::INIT_CONST) {
       Load(node->value());
 
-    } else {
+    } else {  // Assignment is a compound assignment.
       Literal* literal = node->value()->AsLiteral();
       bool overwrite_value =
           (node->value()->AsBinaryOperation() != NULL &&
@@ -4643,6 +4669,7 @@
         var->mode() == Variable::CONST &&
         node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
       // Assignment ignored - leave the value on the stack.
+      UnloadReference(&target);
     } else {
       CodeForSourcePosition(node->position());
       if (node->op() == Token::INIT_CONST) {
@@ -4654,17 +4681,20 @@
         target.SetValue(NOT_CONST_INIT);
       }
       if (node->ends_initialization_block()) {
-        ASSERT(target.type() == Reference::NAMED ||
-               target.type() == Reference::KEYED);
+        ASSERT(target.type() == Reference::UNLOADED);
         // End of initialization block. Revert to fast case.  The
-        // argument to the runtime call is the receiver, which is the
-        // first value pushed as part of the reference, which is below
-        // the lhs value.
-        frame_->PushElementAt(target.size());
+        // argument to the runtime call is the extra copy of the receiver,
+        // which is below the value of the assignment.
+        // Swap the receiver and the value of the assignment expression.
+        Result lhs = frame_->Pop();
+        Result receiver = frame_->Pop();
+        frame_->Push(&lhs);
+        frame_->Push(&receiver);
         Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
       }
     }
   }
+  ASSERT(frame_->height() == original_height + 1);
 }
 
 
@@ -4827,7 +4857,7 @@
           args->at(1)->AsVariableProxy()->IsArguments()) {
         // Use the optimized Function.prototype.apply that avoids
         // allocating lazily allocated arguments objects.
-        CallApplyLazy(property,
+        CallApplyLazy(property->obj(),
                       args->at(0),
                       args->at(1)->AsVariableProxy(),
                       node->position());
@@ -4860,16 +4890,21 @@
       // -------------------------------------------
 
       // Load the function to call from the property through a reference.
-      Reference ref(this, property);
-      ref.GetValue();
 
       // Pass receiver to called function.
       if (property->is_synthetic()) {
+        Reference ref(this, property);
+        ref.GetValue();
         // Use global object as receiver.
         LoadGlobalReceiver();
       } else {
-        // The reference's size is non-negative.
-        frame_->PushElementAt(ref.size());
+        Load(property->obj());
+        Load(property->key());
+        Result function = EmitKeyedLoad(false);
+        frame_->Drop();  // Key.
+        Result receiver = frame_->Pop();
+        frame_->Push(&function);
+        frame_->Push(&receiver);
       }
 
       // Call the function.
@@ -5183,6 +5218,26 @@
 }
 
 
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result obj = frame_->Pop();
+  obj.ToRegister();
+  __ test(obj.reg(), Immediate(kSmiTagMask));
+  destination()->false_target()->Branch(zero);
+  Result temp = allocator()->Allocate();
+  ASSERT(temp.is_valid());
+  __ mov(temp.reg(),
+         FieldOperand(obj.reg(), HeapObject::kMapOffset));
+  __ movzx_b(temp.reg(),
+             FieldOperand(temp.reg(), Map::kBitFieldOffset));
+  __ test(temp.reg(), Immediate(1 << Map::kIsUndetectable));
+  obj.Unuse();
+  temp.Unuse();
+  destination()->Split(not_zero);
+}
+
+
 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
 
@@ -5760,7 +5815,9 @@
   // value will be in the frame to be spilled.
   if (is_postfix) frame_->Push(Smi::FromInt(0));
 
-  { Reference target(this, node->expression());
+  // A constant reference is not saved to, so a constant reference is not a
+  // compound assignment reference.
+  { Reference target(this, node->expression(), !is_const);
     if (target.is_illegal()) {
       // Spoof the virtual frame to have the expected height (one higher
       // than on entry).
@@ -6363,6 +6420,114 @@
 }
 
 
+Result CodeGenerator::EmitKeyedLoad(bool is_global) {
+  Comment cmnt(masm_, "[ Load from keyed Property");
+  // Inline array load code if inside of a loop.  We do not know
+  // the receiver map yet, so we initially generate the code with
+  // a check against an invalid map.  In the inline cache code, we
+  // patch the map check if appropriate.
+  if (loop_nesting() > 0) {
+    Comment cmnt(masm_, "[ Inlined load from keyed Property");
+
+    Result key = frame_->Pop();
+    Result receiver = frame_->Pop();
+    key.ToRegister();
+    receiver.ToRegister();
+
+    // Use a fresh temporary to load the elements without destroying
+    // the receiver which is needed for the deferred slow case.
+    Result elements = allocator()->Allocate();
+    ASSERT(elements.is_valid());
+
+    // Use a fresh temporary for the index and later the loaded
+    // value.
+    Result index = allocator()->Allocate();
+    ASSERT(index.is_valid());
+
+    DeferredReferenceGetKeyedValue* deferred =
+        new DeferredReferenceGetKeyedValue(index.reg(),
+                                           receiver.reg(),
+                                           key.reg(),
+                                           is_global);
+
+    // Check that the receiver is not a smi (only needed if this
+    // is not a load from the global context) and that it has the
+    // expected map.
+    if (!is_global) {
+      __ test(receiver.reg(), Immediate(kSmiTagMask));
+      deferred->Branch(zero);
+    }
+
+    // Initially, use an invalid map. The map is patched in the IC
+    // initialization code.
+    __ bind(deferred->patch_site());
+    // Use masm-> here instead of the double underscore macro since extra
+    // coverage code can interfere with the patching.
+    masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+              Immediate(Factory::null_value()));
+    deferred->Branch(not_equal);
+
+    // Check that the key is a smi.
+    __ test(key.reg(), Immediate(kSmiTagMask));
+    deferred->Branch(not_zero);
+
+    // Get the elements array from the receiver and check that it
+    // is not a dictionary.
+    __ mov(elements.reg(),
+           FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+    __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+           Immediate(Factory::fixed_array_map()));
+    deferred->Branch(not_equal);
+
+    // Shift the key to get the actual index value and check that
+    // it is within bounds.
+    __ mov(index.reg(), key.reg());
+    __ SmiUntag(index.reg());
+    __ cmp(index.reg(),
+           FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+    deferred->Branch(above_equal);
+
+    // Load and check that the result is not the hole.  We could
+    // reuse the index or elements register for the value.
+    //
+    // TODO(206): Consider whether it makes sense to try some
+    // heuristic about which register to reuse.  For example, if
+    // one is eax, the we can reuse that one because the value
+    // coming from the deferred code will be in eax.
+    Result value = index;
+    __ mov(value.reg(), Operand(elements.reg(),
+                                index.reg(),
+                                times_4,
+                                FixedArray::kHeaderSize - kHeapObjectTag));
+    elements.Unuse();
+    index.Unuse();
+    __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
+    deferred->Branch(equal);
+    __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+    deferred->BindExit();
+    // Restore the receiver and key to the frame and push the
+    // result on top of it.
+    frame_->Push(&receiver);
+    frame_->Push(&key);
+    return value;
+  } else {
+    Comment cmnt(masm_, "[ Load from keyed Property");
+    RelocInfo::Mode mode = is_global
+        ? RelocInfo::CODE_TARGET_CONTEXT
+        : RelocInfo::CODE_TARGET;
+    Result answer = frame_->CallKeyedLoadIC(mode);
+    // Make sure that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to
+    // indicate that we have generated an inline version of the
+    // keyed load.  The explicit nop instruction is here because
+    // the push that follows might be peep-hole optimized away.
+    __ nop();
+    return answer;
+  }
+}
+
+
 #undef __
 #define __ ACCESS_MASM(masm)
 
@@ -6475,121 +6640,21 @@
     }
 
     case KEYED: {
-      Comment cmnt(masm, "[ Load from keyed Property");
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
-
-      // Inline array load code if inside of a loop.  We do not know
-      // the receiver map yet, so we initially generate the code with
-      // a check against an invalid map.  In the inline cache code, we
-      // patch the map check if appropriate.
-      if (cgen_->loop_nesting() > 0) {
-        Comment cmnt(masm, "[ Inlined load from keyed Property");
-
-        Result key = cgen_->frame()->Pop();
-        Result receiver = cgen_->frame()->Pop();
-        key.ToRegister();
-        receiver.ToRegister();
-
-        // Use a fresh temporary to load the elements without destroying
-        // the receiver which is needed for the deferred slow case.
-        Result elements = cgen_->allocator()->Allocate();
-        ASSERT(elements.is_valid());
-
-        // Use a fresh temporary for the index and later the loaded
-        // value.
-        Result index = cgen_->allocator()->Allocate();
-        ASSERT(index.is_valid());
-
-        DeferredReferenceGetKeyedValue* deferred =
-            new DeferredReferenceGetKeyedValue(index.reg(),
-                                               receiver.reg(),
-                                               key.reg(),
-                                               is_global);
-
-        // Check that the receiver is not a smi (only needed if this
-        // is not a load from the global context) and that it has the
-        // expected map.
-        if (!is_global) {
-          __ test(receiver.reg(), Immediate(kSmiTagMask));
-          deferred->Branch(zero);
-        }
-
-        // Initially, use an invalid map. The map is patched in the IC
-        // initialization code.
-        __ bind(deferred->patch_site());
-        // Use masm-> here instead of the double underscore macro since extra
-        // coverage code can interfere with the patching.
-        masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                  Immediate(Factory::null_value()));
-        deferred->Branch(not_equal);
-
-        // Check that the key is a smi.
-        __ test(key.reg(), Immediate(kSmiTagMask));
-        deferred->Branch(not_zero);
-
-        // Get the elements array from the receiver and check that it
-        // is not a dictionary.
-        __ mov(elements.reg(),
-               FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-        __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
-               Immediate(Factory::fixed_array_map()));
-        deferred->Branch(not_equal);
-
-        // Shift the key to get the actual index value and check that
-        // it is within bounds.
-        __ mov(index.reg(), key.reg());
-        __ SmiUntag(index.reg());
-        __ cmp(index.reg(),
-               FieldOperand(elements.reg(), FixedArray::kLengthOffset));
-        deferred->Branch(above_equal);
-
-        // Load and check that the result is not the hole.  We could
-        // reuse the index or elements register for the value.
-        //
-        // TODO(206): Consider whether it makes sense to try some
-        // heuristic about which register to reuse.  For example, if
-        // one is eax, the we can reuse that one because the value
-        // coming from the deferred code will be in eax.
-        Result value = index;
-        __ mov(value.reg(), Operand(elements.reg(),
-                                    index.reg(),
-                                    times_4,
-                                    FixedArray::kHeaderSize - kHeapObjectTag));
-        elements.Unuse();
-        index.Unuse();
-        __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
-        deferred->Branch(equal);
-        __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
-        deferred->BindExit();
-        // Restore the receiver and key to the frame and push the
-        // result on top of it.
-        cgen_->frame()->Push(&receiver);
-        cgen_->frame()->Push(&key);
-        cgen_->frame()->Push(&value);
-
-      } else {
-        Comment cmnt(masm, "[ Load from keyed Property");
-        RelocInfo::Mode mode = is_global
-                               ? RelocInfo::CODE_TARGET_CONTEXT
-                               : RelocInfo::CODE_TARGET;
-        Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
-        // Make sure that we do not have a test instruction after the
-        // call.  A test instruction after the call is used to
-        // indicate that we have generated an inline version of the
-        // keyed load.  The explicit nop instruction is here because
-        // the push that follows might be peep-hole optimized away.
-        __ nop();
-        cgen_->frame()->Push(&answer);
-      }
+      Result value = cgen_->EmitKeyedLoad(is_global);
+      cgen_->frame()->Push(&value);
       break;
     }
 
     default:
       UNREACHABLE();
   }
+
+  if (!persist_after_get_) {
+    cgen_->UnloadReference(this);
+  }
 }
 
 
@@ -6623,6 +6688,9 @@
     ASSERT(slot->type() == Slot::LOCAL);
     cgen_->frame()->TakeLocalAt(slot->index());
   }
+
+  ASSERT(persist_after_get_);
+  // Do not unload the reference, because it is used in SetValue.
 }
 
 
@@ -6752,6 +6820,7 @@
     default:
       UNREACHABLE();
   }
+  cgen_->UnloadReference(this);
 }
 
 
@@ -7062,143 +7131,335 @@
 }
 
 
+Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
+                                         VirtualFrame* frame,
+                                         Result* left,
+                                         Result* right) {
+  if (ArgsInRegistersSupported()) {
+    SetArgsInRegisters();
+    return frame->CallStub(this, left, right);
+  } else {
+    frame->Push(left);
+    frame->Push(right);
+    return frame->CallStub(this, 2);
+  }
+}
+
+
 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
-  // Perform fast-case smi code for the operation (eax <op> ebx) and
-  // leave result in register eax.
+  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
+  // dividend in eax and edx free for the division.  Use eax, ebx for those.
+  Comment load_comment(masm, "-- Load arguments");
+  Register left = edx;
+  Register right = eax;
+  if (op_ == Token::DIV || op_ == Token::MOD) {
+    left = eax;
+    right = ebx;
+    if (HasArgsInRegisters()) {
+      __ mov(ebx, eax);
+      __ mov(eax, edx);
+    }
+  }
+  if (!HasArgsInRegisters()) {
+    __ mov(right, Operand(esp, 1 * kPointerSize));
+    __ mov(left, Operand(esp, 2 * kPointerSize));
+  }
 
-  // Prepare the smi check of both operands by or'ing them together
-  // before checking against the smi mask.
-  __ mov(ecx, Operand(ebx));
-  __ or_(ecx, Operand(eax));
-
+  // 2. Prepare the smi check of both operands by oring them together.
+  Comment smi_check_comment(masm, "-- Smi check arguments");
+  Label not_smis;
+  Register combined = ecx;
+  ASSERT(!left.is(combined) && !right.is(combined));
   switch (op_) {
+    case Token::BIT_OR:
+      // Perform the operation into eax and smi check the result.  Preserve
+      // eax in case the result is not a smi.
+      ASSERT(!left.is(ecx) && !right.is(ecx));
+      __ mov(ecx, right);
+      __ or_(right, Operand(left));  // Bitwise or is commutative.
+      combined = right;
+      break;
+
+    case Token::BIT_XOR:
+    case Token::BIT_AND:
     case Token::ADD:
-      __ add(eax, Operand(ebx));  // add optimistically
-      __ j(overflow, slow, not_taken);
-      break;
-
     case Token::SUB:
-      __ sub(eax, Operand(ebx));  // subtract optimistically
-      __ j(overflow, slow, not_taken);
-      break;
-
+    case Token::MUL:
     case Token::DIV:
     case Token::MOD:
-      // Sign extend eax into edx:eax.
-      __ cdq();
-      // Check for 0 divisor.
-      __ test(ebx, Operand(ebx));
-      __ j(zero, slow, not_taken);
+      __ mov(combined, right);
+      __ or_(combined, Operand(left));
+      break;
+
+    case Token::SHL:
+    case Token::SAR:
+    case Token::SHR:
+      // Move the right operand into ecx for the shift operation, use eax
+      // for the smi check register.
+      ASSERT(!left.is(ecx) && !right.is(ecx));
+      __ mov(ecx, right);
+      __ or_(right, Operand(left));
+      combined = right;
       break;
 
     default:
-      // Fall-through to smi check.
       break;
   }
 
-  // Perform the actual smi check.
-  ASSERT(kSmiTag == 0);  // adjust zero check if not the case
-  __ test(ecx, Immediate(kSmiTagMask));
-  __ j(not_zero, slow, not_taken);
+  // 3. Perform the smi check of the operands.
+  ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
+  __ test(combined, Immediate(kSmiTagMask));
+  __ j(not_zero, &not_smis, not_taken);
 
+  // 4. Operands are both smis, perform the operation leaving the result in
+  // eax and check the result if necessary.
+  Comment perform_smi(masm, "-- Perform smi operation");
+  Label use_fp_on_smis;
   switch (op_) {
+    case Token::BIT_OR:
+      // Nothing to do.
+      break;
+
+    case Token::BIT_XOR:
+      ASSERT(right.is(eax));
+      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
+      break;
+
+    case Token::BIT_AND:
+      ASSERT(right.is(eax));
+      __ and_(right, Operand(left));  // Bitwise and is commutative.
+      break;
+
+    case Token::SHL:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ shl_cl(left);
+      // Check that the *signed* result fits in a smi.
+      __ cmp(left, 0xc0000000);
+      __ j(sign, &use_fp_on_smis, not_taken);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::SAR:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ sar_cl(left);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
+    case Token::SHR:
+      // Remove tags from operands (but keep sign).
+      __ SmiUntag(left);
+      __ SmiUntag(ecx);
+      // Perform the operation.
+      __ shr_cl(left);
+      // Check that the *unsigned* result fits in a smi.
+      // Neither of the two high-order bits can be set:
+      // - 0x80000000: high bit would be lost when smi tagging.
+      // - 0x40000000: this number would convert to negative when
+      // Smi tagging these two cases can only happen with shifts
+      // by 0 or 1 when handed a valid smi.
+      __ test(left, Immediate(0xc0000000));
+      __ j(not_zero, slow, not_taken);
+      // Tag the result and store it in register eax.
+      __ SmiTag(left);
+      __ mov(eax, left);
+      break;
+
     case Token::ADD:
+      ASSERT(right.is(eax));
+      __ add(right, Operand(left));  // Addition is commutative.
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      break;
+
     case Token::SUB:
-      // Do nothing here.
+      __ sub(left, Operand(right));
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      __ mov(eax, left);
       break;
 
     case Token::MUL:
       // If the smi tag is 0 we can just leave the tag on one operand.
-      ASSERT(kSmiTag == 0);  // adjust code below if not the case
+      ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
+      // We can't revert the multiplication if the result is not a smi
+      // so save the right operand.
+      __ mov(ebx, right);
       // Remove tag from one of the operands (but keep sign).
-      __ SmiUntag(eax);
+      __ SmiUntag(right);
       // Do multiplication.
-      __ imul(eax, Operand(ebx));  // multiplication of smis; result in eax
-      // Go slow on overflows.
-      __ j(overflow, slow, not_taken);
-      // Check for negative zero result.
-      __ NegativeZeroTest(eax, ecx, slow);  // use ecx = x | y
+      __ imul(right, Operand(left));  // Multiplication is commutative.
+      __ j(overflow, &use_fp_on_smis, not_taken);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(right, combined, &use_fp_on_smis);
       break;
 
     case Token::DIV:
-      // Divide edx:eax by ebx.
-      __ idiv(ebx);
-      // Check for the corner case of dividing the most negative smi
-      // by -1. We cannot use the overflow flag, since it is not set
-      // by idiv instruction.
+      // We can't revert the division if the result is not a smi so
+      // save the left operand.
+      __ mov(edi, left);
+      // Check for 0 divisor.
+      __ test(right, Operand(right));
+      __ j(zero, &use_fp_on_smis, not_taken);
+      // Sign extend left into edx:eax.
+      ASSERT(left.is(eax));
+      __ cdq();
+      // Divide edx:eax by right.
+      __ idiv(right);
+      // Check for the corner case of dividing the most negative smi by
+      // -1. We cannot use the overflow flag, since it is not set by idiv
+      // instruction.
       ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
       __ cmp(eax, 0x40000000);
-      __ j(equal, slow);
-      // Check for negative zero result.
-      __ NegativeZeroTest(eax, ecx, slow);  // use ecx = x | y
+      __ j(equal, &use_fp_on_smis);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
       // Check that the remainder is zero.
       __ test(edx, Operand(edx));
-      __ j(not_zero, slow);
+      __ j(not_zero, &use_fp_on_smis);
       // Tag the result and store it in register eax.
       __ SmiTag(eax);
       break;
 
     case Token::MOD:
-      // Divide edx:eax by ebx.
-      __ idiv(ebx);
-      // Check for negative zero result.
-      __ NegativeZeroTest(edx, ecx, slow);  // use ecx = x | y
+      // Check for 0 divisor.
+      __ test(right, Operand(right));
+      __ j(zero, &not_smis, not_taken);
+
+      // Sign extend left into edx:eax.
+      ASSERT(left.is(eax));
+      __ cdq();
+      // Divide edx:eax by right.
+      __ idiv(right);
+      // Check for negative zero result.  Use combined = left | right.
+      __ NegativeZeroTest(edx, combined, slow);
       // Move remainder to register eax.
-      __ mov(eax, Operand(edx));
-      break;
-
-    case Token::BIT_OR:
-      __ or_(eax, Operand(ebx));
-      break;
-
-    case Token::BIT_AND:
-      __ and_(eax, Operand(ebx));
-      break;
-
-    case Token::BIT_XOR:
-      __ xor_(eax, Operand(ebx));
-      break;
-
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR:
-      // Move the second operand into register ecx.
-      __ mov(ecx, Operand(ebx));
-      // Remove tags from operands (but keep sign).
-      __ SmiUntag(eax);
-      __ SmiUntag(ecx);
-      // Perform the operation.
-      switch (op_) {
-        case Token::SAR:
-          __ sar_cl(eax);
-          // No checks of result necessary
-          break;
-        case Token::SHR:
-          __ shr_cl(eax);
-          // Check that the *unsigned* result fits in a smi.
-          // Neither of the two high-order bits can be set:
-          // - 0x80000000: high bit would be lost when smi tagging.
-          // - 0x40000000: this number would convert to negative when
-          // Smi tagging these two cases can only happen with shifts
-          // by 0 or 1 when handed a valid smi.
-          __ test(eax, Immediate(0xc0000000));
-          __ j(not_zero, slow, not_taken);
-          break;
-        case Token::SHL:
-          __ shl_cl(eax);
-          // Check that the *signed* result fits in a smi.
-          __ cmp(eax, 0xc0000000);
-          __ j(sign, slow, not_taken);
-          break;
-        default:
-          UNREACHABLE();
-      }
-      // Tag the result and store it in register eax.
-      __ SmiTag(eax);
+      __ mov(eax, edx);
       break;
 
     default:
       UNREACHABLE();
+  }
+
+  // 5. Emit return of result in eax.
+  GenerateReturn(masm);
+
+  // 6. For some operations emit inline code to perform floating point
+  // operations on known smis (e.g., if the result of the operation
+  // overflowed the smi range).
+  switch (op_) {
+    case Token::SHL: {
+      Comment perform_float(masm, "-- Perform float operation on smis");
+      __ bind(&use_fp_on_smis);
+      // Result we want is in left == edx, so we can put the allocated heap
+      // number in eax.
+      __ AllocateHeapNumber(eax, ecx, ebx, slow);
+      // Store the result in the HeapNumber and return.
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        __ cvtsi2sd(xmm0, Operand(left));
+        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+      } else {
+        // It's OK to overwrite the right argument on the stack because we
+        // are about to return.
+        __ mov(Operand(esp, 1 * kPointerSize), left);
+        __ fild_s(Operand(esp, 1 * kPointerSize));
+        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+      }
+      GenerateReturn(masm);
+      break;
+    }
+
+    case Token::ADD:
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV: {
+      Comment perform_float(masm, "-- Perform float operation on smis");
+      __ bind(&use_fp_on_smis);
+      // Restore arguments to edx, eax.
+      switch (op_) {
+        case Token::ADD:
+          // Revert right = right + left.
+          __ sub(right, Operand(left));
+          break;
+        case Token::SUB:
+          // Revert left = left - right.
+          __ add(left, Operand(right));
+          break;
+        case Token::MUL:
+          // Right was clobbered but a copy is in ebx.
+          __ mov(right, ebx);
+          break;
+        case Token::DIV:
+          // Left was clobbered but a copy is in edi.  Right is in ebx for
+          // division.
+          __ mov(edx, edi);
+          __ mov(eax, right);
+          break;
+        default: UNREACHABLE();
+          break;
+      }
+      __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
+      if (CpuFeatures::IsSupported(SSE2)) {
+        CpuFeatures::Scope use_sse2(SSE2);
+        FloatingPointHelper::LoadSSE2Smis(masm, ebx);
+        switch (op_) {
+          case Token::ADD: __ addsd(xmm0, xmm1); break;
+          case Token::SUB: __ subsd(xmm0, xmm1); break;
+          case Token::MUL: __ mulsd(xmm0, xmm1); break;
+          case Token::DIV: __ divsd(xmm0, xmm1); break;
+          default: UNREACHABLE();
+        }
+        __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
+      } else {  // SSE2 not available, use FPU.
+        FloatingPointHelper::LoadFloatSmis(masm, ebx);
+        switch (op_) {
+          case Token::ADD: __ faddp(1); break;
+          case Token::SUB: __ fsubp(1); break;
+          case Token::MUL: __ fmulp(1); break;
+          case Token::DIV: __ fdivp(1); break;
+          default: UNREACHABLE();
+        }
+        __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
+      }
+      __ mov(eax, ecx);
+      GenerateReturn(masm);
+      break;
+    }
+
+    default:
+      break;
+  }
+
+  // 7. Non-smi operands, fall out to the non-smi code with the operands in
+  // edx and eax.
+  Comment done_comment(masm, "-- Enter non-smi code");
+  __ bind(&not_smis);
+  switch (op_) {
+    case Token::BIT_OR:
+    case Token::SHL:
+    case Token::SAR:
+    case Token::SHR:
+      // Right operand is saved in ecx and eax was destroyed by the smi
+      // check.
+      __ mov(eax, ecx);
+      break;
+
+    case Token::DIV:
+    case Token::MOD:
+      // Operands are in eax, ebx at this point.
+      __ mov(edx, eax);
+      __ mov(eax, ebx);
+      break;
+
+    default:
       break;
   }
 }
@@ -7213,30 +7474,20 @@
   // case smi code is not generated by the caller. Generating it here will speed
   // up common operations.
   if (HasSmiCodeInStub()) {
-    Label slow;
-    __ mov(ebx, Operand(esp, 1 * kPointerSize));
-    __ mov(eax, Operand(esp, 2 * kPointerSize));
-    GenerateSmiCode(masm, &slow);
-    GenerateReturn(masm);
-    // Too bad. The fast case smi code didn't succeed.
-    __ bind(&slow);
+    GenerateSmiCode(masm, &call_runtime);
+  } else if (op_ != Token::MOD) {  // MOD goes straight to runtime.
+    GenerateLoadArguments(masm);
   }
 
-  // Make sure the arguments are in edx and eax.
-  GenerateLoadArguments(masm);
-
   // Floating point case.
   switch (op_) {
     case Token::ADD:
     case Token::SUB:
     case Token::MUL:
     case Token::DIV: {
-      // eax: y
-      // edx: x
-
       if (CpuFeatures::IsSupported(SSE2)) {
         CpuFeatures::Scope use_sse2(SSE2);
-        FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
+        FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
 
         switch (op_) {
           case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -7245,59 +7496,15 @@
           case Token::DIV: __ divsd(xmm0, xmm1); break;
           default: UNREACHABLE();
         }
-        // Allocate a heap number, if needed.
-        Label skip_allocation;
-        switch (mode_) {
-          case OVERWRITE_LEFT:
-            __ mov(eax, Operand(edx));
-            // Fall through!
-          case OVERWRITE_RIGHT:
-            // If the argument in eax is already an object, we skip the
-            // allocation of a heap number.
-            __ test(eax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation, not_taken);
-            // Fall through!
-          case NO_OVERWRITE: {
-            // Allocate a heap number for the result. Keep eax and edx intact
-            // for the possible runtime call.
-            __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
-            // Now eax can be overwritten losing one of the arguments as we are
-            // now done and will not need it any more.
-            __ mov(eax, ebx);
-            __ bind(&skip_allocation);
-            break;
-          }
-          default: UNREACHABLE();
-        }
+        GenerateHeapResultAllocation(masm, &call_runtime);
         __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
         GenerateReturn(masm);
       } else {  // SSE2 not available, use FPU.
         FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
-        // Allocate a heap number, if needed.
-        Label skip_allocation;
-        switch (mode_) {
-          case OVERWRITE_LEFT:
-            __ mov(eax, Operand(edx));
-            // Fall through!
-          case OVERWRITE_RIGHT:
-            // If the argument in eax is already an object, we skip the
-            // allocation of a heap number.
-            __ test(eax, Immediate(kSmiTagMask));
-            __ j(not_zero, &skip_allocation, not_taken);
-            // Fall through!
-          case NO_OVERWRITE:
-            // Allocate a heap number for the result. Keep eax and edx intact
-            // for the possible runtime call.
-            __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
-            // Now eax can be overwritten losing one of the arguments as we are
-            // now done and will not need it any more.
-            __ mov(eax, ebx);
-            __ bind(&skip_allocation);
-            break;
-          default: UNREACHABLE();
-        }
-        FloatingPointHelper::LoadFloatOperands(masm, ecx);
-
+        FloatingPointHelper::LoadFloatOperands(
+            masm,
+            ecx,
+            FloatingPointHelper::ARGS_IN_REGISTERS);
         switch (op_) {
           case Token::ADD: __ faddp(1); break;
           case Token::SUB: __ fsubp(1); break;
@@ -7305,8 +7512,13 @@
           case Token::DIV: __ fdivp(1); break;
           default: UNREACHABLE();
         }
+        Label after_alloc_failure;
+        GenerateHeapResultAllocation(masm, &after_alloc_failure);
         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
         GenerateReturn(masm);
+        __ bind(&after_alloc_failure);
+        __ ffree();
+        __ jmp(&call_runtime);
       }
     }
     case Token::MOD: {
@@ -7319,12 +7531,8 @@
     case Token::SAR:
     case Token::SHL:
     case Token::SHR: {
-      Label non_smi_result, skip_allocation;
-      Label operand_conversion_failure;
-      FloatingPointHelper::LoadAsIntegers(
-        masm,
-        use_sse3_,
-        &operand_conversion_failure);
+      Label non_smi_result;
+      FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
       switch (op_) {
         case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
         case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
@@ -7337,7 +7545,7 @@
       if (op_ == Token::SHR) {
         // Check if result is non-negative and fits in a smi.
         __ test(eax, Immediate(0xc0000000));
-        __ j(not_zero, &non_smi_result);
+        __ j(not_zero, &call_runtime);
       } else {
         // Check if result fits in a smi.
         __ cmp(eax, 0xc0000000);
@@ -7352,6 +7560,7 @@
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
         __ mov(ebx, Operand(eax));  // ebx: result
+        Label skip_allocation;
         switch (mode_) {
           case OVERWRITE_LEFT:
           case OVERWRITE_RIGHT:
@@ -7380,15 +7589,6 @@
         }
         GenerateReturn(masm);
       }
-
-      // Go to runtime for non-number inputs.
-      __ bind(&operand_conversion_failure);
-      // SHR should return uint32 - go to runtime for non-smi/negative result.
-      if (op_ == Token::SHR) {
-        __ bind(&non_smi_result);
-      }
-      __ mov(eax, Operand(esp, 1 * kPointerSize));
-      __ mov(edx, Operand(esp, 2 * kPointerSize));
       break;
     }
     default: UNREACHABLE(); break;
@@ -7398,9 +7598,9 @@
   // result. If arguments was passed in registers now place them on the
   // stack in the correct order below the return address.
   __ bind(&call_runtime);
-  if (HasArgumentsInRegisters()) {
+  if (HasArgsInRegisters()) {
     __ pop(ecx);
-    if (HasArgumentsReversed()) {
+    if (HasArgsReversed()) {
       __ push(eax);
       __ push(edx);
     } else {
@@ -7414,17 +7614,15 @@
       // Test for string arguments before calling runtime.
       Label not_strings, not_string1, string1;
       Result answer;
-      __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
-      __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
-      __ test(eax, Immediate(kSmiTagMask));
+      __ test(edx, Immediate(kSmiTagMask));
       __ j(zero, &not_string1);
-      __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
+      __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx);
       __ j(above_equal, &not_string1);
 
-      // First argument is a a string, test second.
-      __ test(edx, Immediate(kSmiTagMask));
+      // First argument is a string, test second.
+      __ test(eax, Immediate(kSmiTagMask));
       __ j(zero, &string1);
-      __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+      __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
       __ j(above_equal, &string1);
 
       // First and second argument are strings. Jump to the string add stub.
@@ -7433,17 +7631,25 @@
 
       // Only first argument is a string.
       __ bind(&string1);
-      __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
+      __ InvokeBuiltin(
+          HasArgsReversed() ?
+              Builtins::STRING_ADD_RIGHT :
+              Builtins::STRING_ADD_LEFT,
+          JUMP_FUNCTION);
 
       // First argument was not a string, test second.
       __ bind(&not_string1);
-      __ test(edx, Immediate(kSmiTagMask));
+      __ test(eax, Immediate(kSmiTagMask));
       __ j(zero, &not_strings);
-      __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
+      __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
       __ j(above_equal, &not_strings);
 
       // Only second argument is a string.
-      __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
+      __ InvokeBuiltin(
+          HasArgsReversed() ?
+              Builtins::STRING_ADD_LEFT :
+              Builtins::STRING_ADD_RIGHT,
+          JUMP_FUNCTION);
 
       __ bind(&not_strings);
       // Neither argument is a string.
@@ -7455,7 +7661,7 @@
       break;
     case Token::MUL:
       __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
-        break;
+      break;
     case Token::DIV:
       __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
       break;
@@ -7486,9 +7692,57 @@
 }
 
 
+void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+                                                       Label* alloc_failure) {
+  Label skip_allocation;
+  OverwriteMode mode = mode_;
+  if (HasArgsReversed()) {
+    if (mode == OVERWRITE_RIGHT) {
+      mode = OVERWRITE_LEFT;
+    } else if (mode == OVERWRITE_LEFT) {
+      mode = OVERWRITE_RIGHT;
+    }
+  }
+  switch (mode) {
+    case OVERWRITE_LEFT: {
+      // If the argument in edx is already an object, we skip the
+      // allocation of a heap number.
+      __ test(edx, Immediate(kSmiTagMask));
+      __ j(not_zero, &skip_allocation, not_taken);
+      // Allocate a heap number for the result. Keep eax and edx intact
+      // for the possible runtime call.
+      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+      // Now edx can be overwritten losing one of the arguments as we are
+      // now done and will not need it any more.
+      __ mov(edx, Operand(ebx));
+      __ bind(&skip_allocation);
+      // Use object in edx as a result holder
+      __ mov(eax, Operand(edx));
+      break;
+    }
+    case OVERWRITE_RIGHT:
+      // If the argument in eax is already an object, we skip the
+      // allocation of a heap number.
+      __ test(eax, Immediate(kSmiTagMask));
+      __ j(not_zero, &skip_allocation, not_taken);
+      // Fall through!
+    case NO_OVERWRITE:
+      // Allocate a heap number for the result. Keep eax and edx intact
+      // for the possible runtime call.
+      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
+      // Now eax can be overwritten losing one of the arguments as we are
+      // now done and will not need it any more.
+      __ mov(eax, ebx);
+      __ bind(&skip_allocation);
+      break;
+    default: UNREACHABLE();
+  }
+}
+
+
 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
   // If arguments are not passed in registers read them from the stack.
-  if (!HasArgumentsInRegisters()) {
+  if (!HasArgsInRegisters()) {
     __ mov(eax, Operand(esp, 1 * kPointerSize));
     __ mov(edx, Operand(esp, 2 * kPointerSize));
   }
@@ -7498,7 +7752,7 @@
 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
   // If arguments are not passed in registers remove them from the stack before
   // returning.
-  if (!HasArgumentsInRegisters()) {
+  if (!HasArgsInRegisters()) {
     __ ret(2 * kPointerSize);  // Remove both operands
   } else {
     __ ret(0);
@@ -7514,6 +7768,7 @@
                     Register source,
                     bool use_sse3,
                     Label* conversion_failure) {
+  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
   Label done, right_exponent, normal_exponent;
   Register scratch = ebx;
   Register scratch2 = edi;
@@ -7716,7 +7971,7 @@
 }
 
 
-void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
+void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
                                            Label* not_numbers) {
   Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
   // Load operand in edx into xmm0, or branch to not_numbers.
@@ -7748,16 +8003,40 @@
 }
 
 
+void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
+                                       Register scratch) {
+  const Register left = edx;
+  const Register right = eax;
+  __ mov(scratch, left);
+  ASSERT(!scratch.is(right));  // We're about to clobber scratch.
+  __ SmiUntag(scratch);
+  __ cvtsi2sd(xmm0, Operand(scratch));
+
+  __ mov(scratch, right);
+  __ SmiUntag(scratch);
+  __ cvtsi2sd(xmm1, Operand(scratch));
+}
+
+
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
-                                            Register scratch) {
+                                            Register scratch,
+                                            ArgLocation arg_location) {
   Label load_smi_1, load_smi_2, done_load_1, done;
-  __ mov(scratch, Operand(esp, 2 * kPointerSize));
+  if (arg_location == ARGS_IN_REGISTERS) {
+    __ mov(scratch, edx);
+  } else {
+    __ mov(scratch, Operand(esp, 2 * kPointerSize));
+  }
   __ test(scratch, Immediate(kSmiTagMask));
   __ j(zero, &load_smi_1, not_taken);
   __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
   __ bind(&done_load_1);
 
-  __ mov(scratch, Operand(esp, 1 * kPointerSize));
+  if (arg_location == ARGS_IN_REGISTERS) {
+    __ mov(scratch, eax);
+  } else {
+    __ mov(scratch, Operand(esp, 1 * kPointerSize));
+  }
   __ test(scratch, Immediate(kSmiTagMask));
   __ j(zero, &load_smi_2, not_taken);
   __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
@@ -7780,6 +8059,24 @@
 }
 
 
+void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
+                                        Register scratch) {
+  const Register left = edx;
+  const Register right = eax;
+  __ mov(scratch, left);
+  ASSERT(!scratch.is(right));  // We're about to clobber scratch.
+  __ SmiUntag(scratch);
+  __ push(scratch);
+  __ fild_s(Operand(esp, 0));
+
+  __ mov(scratch, right);
+  __ SmiUntag(scratch);
+  __ mov(Operand(esp, 0), scratch);
+  __ fild_s(Operand(esp, 0));
+  __ pop(scratch);
+}
+
+
 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
                                              Label* non_float,
                                              Register scratch) {
@@ -8114,10 +8411,24 @@
   //  esp[12]: subject string
   //  esp[16]: JSRegExp object
 
-  Label runtime;
+  static const int kLastMatchInfoOffset = 1 * kPointerSize;
+  static const int kPreviousIndexOffset = 2 * kPointerSize;
+  static const int kSubjectOffset = 3 * kPointerSize;
+  static const int kJSRegExpOffset = 4 * kPointerSize;
+
+  Label runtime, invoke_regexp;
+
+  // Ensure that a RegExp stack is allocated.
+  ExternalReference address_of_regexp_stack_memory_address =
+      ExternalReference::address_of_regexp_stack_memory_address();
+  ExternalReference address_of_regexp_stack_memory_size =
+      ExternalReference::address_of_regexp_stack_memory_size();
+  __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+  __ test(ebx, Operand(ebx));
+  __ j(zero, &runtime, not_taken);
 
   // Check that the first argument is a JSRegExp object.
-  __ mov(eax, Operand(esp, 4 * kPointerSize));
+  __ mov(eax, Operand(esp, kJSRegExpOffset));
   ASSERT_EQ(0, kSmiTag);
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &runtime);
@@ -8153,7 +8464,7 @@
   // ecx: RegExp data (FixedArray)
   // edx: Number of capture registers
   // Check that the second argument is a string.
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  __ mov(eax, Operand(esp, kSubjectOffset));
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &runtime);
   Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
@@ -8165,7 +8476,7 @@
   // ecx: RegExp data (FixedArray)
   // edx: Number of capture registers
   // Check that the third argument is a positive smi.
-  __ mov(eax, Operand(esp, 2 * kPointerSize));
+  __ mov(eax, Operand(esp, kPreviousIndexOffset));
   __ test(eax, Immediate(kSmiTagMask | 0x80000000));
   __ j(not_zero, &runtime);
   // Check that it is not greater than the subject string length.
@@ -8176,7 +8487,7 @@
   // ecx: RegExp data (FixedArray)
   // edx: Number of capture registers
   // Check that the fourth object is a JSArray object.
-  __ mov(eax, Operand(esp, 1 * kPointerSize));
+  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &runtime);
   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
@@ -8194,38 +8505,74 @@
   __ j(greater, &runtime);
 
   // ecx: RegExp data (FixedArray)
-  // Check the representation and encoding of the subject string (only support
-  // flat ascii strings).
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  // Check the representation and encoding of the subject string.
+  Label seq_string, seq_two_byte_string, check_code;
+  const int kStringRepresentationEncodingMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
-  __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
+  __ and_(ebx, kStringRepresentationEncodingMask);
+  // First check for sequential string.
+  ASSERT_EQ(0, kStringTag);
+  ASSERT_EQ(0, kSeqStringTag);
+  __ test(Operand(ebx),
+          Immediate(kIsNotStringMask | kStringRepresentationMask));
+  __ j(zero, &seq_string);
+
+  // Check for flat cons string.
+  // A flat cons string is a cons string where the second part is the empty
+  // string. In that case the subject string is just the first part of the cons
+  // string. Also in this case the first part of the cons string is known to be
+  // a sequential string.
+  __ mov(edx, ebx);
+  __ and_(edx, kStringRepresentationMask);
+  __ cmp(edx, kConsStringTag);
   __ j(not_equal, &runtime);
+  __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
+  __ cmp(Operand(edx), Immediate(Handle<String>(Heap::empty_string())));
+  __ j(not_equal, &runtime);
+  __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
+  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+  __ and_(ebx, kStringRepresentationEncodingMask);
 
-  // ecx: RegExp data (FixedArray)
-  // Ensure that a RegExp stack is allocated.
-  ExternalReference address_of_regexp_stack_memory_address =
-      ExternalReference::address_of_regexp_stack_memory_address();
-  ExternalReference address_of_regexp_stack_memory_size =
-      ExternalReference::address_of_regexp_stack_memory_size();
-  __ mov(eax, Operand::StaticVariable(address_of_regexp_stack_memory_size));
-  __ test(eax, Operand(eax));
-  __ j(zero, &runtime, not_taken);
-
+  __ bind(&seq_string);
+  // eax: subject string (sequential either ascii to two byte)
+  // ebx: suject string type & kStringRepresentationEncodingMask
   // ecx: RegExp data (FixedArray)
   // Check that the irregexp code has been generated for an ascii string. If
-  // it has the field contains a code object otherwise it contains the hole.
+  // it has, the field contains a code object otherwise it contains the hole.
+  __ cmp(ebx, kStringTag | kSeqStringTag | kTwoByteStringTag);
+  __ j(equal, &seq_two_byte_string);
+#ifdef DEBUG
+  __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+  __ Check(equal, "Expected sequential ascii string");
+#endif
   __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+  __ Set(edi, Immediate(1));  // Type is ascii.
+  __ jmp(&check_code);
+
+  __ bind(&seq_two_byte_string);
+  // eax: subject string
+  // ecx: RegExp data (FixedArray)
+  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+  __ Set(edi, Immediate(0));  // Type is two byte.
+
+  __ bind(&check_code);
+  // Check that the irregexp code has been generated for If it has, the field
+  // contains a code object otherwise it contains the hole.
   __ CmpObjectType(edx, CODE_TYPE, ebx);
   __ j(not_equal, &runtime);
 
+  // eax: subject string
+  // edx: code
+  // edi: encoding of subject string (1 if ascii 0 if two_byte);
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
-  __ mov(eax, Operand(esp, 3 * kPointerSize));  // Subject string.
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // Previous index.
-  __ mov(ecx, Operand(esp, 4 * kPointerSize));  // JSRegExp object.
-  __ SmiUntag(ebx);  // Previous index from sim.
+  __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+  __ mov(ecx, Operand(esp, kJSRegExpOffset));
+  __ SmiUntag(ebx);  // Previous index from smi.
 
   // eax: subject string
   // ebx: previous index
@@ -8233,37 +8580,40 @@
   // All checks done. Now push arguments for native regexp code.
   __ IncrementCounter(&Counters::regexp_entry_native, 1);
 
-  // Argument 8: Indicate that this is a direct call from JavaScript.
+  // Argument 7: Indicate that this is a direct call from JavaScript.
   __ push(Immediate(1));
 
-  // Argument 7: Start (high end) of backtracking stack memory area.
+  // Argument 6: Start (high end) of backtracking stack memory area.
   __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
   __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
   __ push(ecx);
 
-  // Argument 6: At start of string?
-  __ xor_(Operand(ecx), ecx);  // setcc only operated on cl (lower byte of ecx).
-  __ test(ebx, Operand(ebx));
-  __ setcc(zero, ecx);  // 1 if 0 (start of string), 0 if positive.
-  __ push(ecx);
-
   // Argument 5: static offsets vector buffer.
   __ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
 
-  // Argument 4: End of string data.
-  __ mov(ecx, FieldOperand(eax, String::kLengthOffset));
-  __ add(ecx, Operand(eax));
-  __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ push(ecx);
+  // Argument 4: End of string data
+  // Argument 3: Start of string data
+  Label push_two_byte, push_rest;
+  __ test(edi, Operand(edi));
+  __ mov(edi, FieldOperand(eax, String::kLengthOffset));
+  __ j(zero, &push_two_byte);
+  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
+  __ push(ecx);  // Argument 4.
+  __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+  __ push(ecx);  // Argument 3.
+  __ jmp(&push_rest);
 
-  // Argument 3: Start of string data.
-  __ mov(ecx, ebx);
-  __ add(ebx, Operand(eax));  // String is ASCII.
-  __ add(Operand(ebx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ push(ebx);
+  __ bind(&push_two_byte);
+  ASSERT(kShortSize == 2);
+  __ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
+  __ push(ecx);  // Argument 4.
+  __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
+  __ push(ecx);  // Argument 3.
+
+  __ bind(&push_rest);
 
   // Argument 2: Previous index.
-  __ push(ecx);
+  __ push(ebx);
 
   // Argument 1: Subject string.
   __ push(eax);
@@ -8272,7 +8622,7 @@
   __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
   __ call(Operand(edx));
   // Remove arguments.
-  __ add(Operand(esp), Immediate(8 * kPointerSize));
+  __ add(Operand(esp), Immediate(7 * kPointerSize));
 
   // Check the result.
   Label success;
@@ -8299,7 +8649,7 @@
 
   // Load RegExp data.
   __ bind(&success);
-  __ mov(eax, Operand(esp, 4 * kPointerSize));
+  __ mov(eax, Operand(esp, kJSRegExpOffset));
   __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
   __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
   // Calculate number of capture registers (number_of_captures + 1) * 2.
@@ -8307,7 +8657,7 @@
 
   // edx: Number of capture registers
   // Load last_match_info which is still known to be a fast case JSArray.
-  __ mov(eax, Operand(esp, 1 * kPointerSize));
+  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
   __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
 
   // ebx: last_match_info backing store (FixedArray)
@@ -8317,11 +8667,11 @@
   __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
   __ SmiUntag(edx);  // Number of capture registers back from smi.
   // Store last subject and last input.
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
   __ mov(ecx, ebx);
   __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
+  __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
   __ mov(ecx, ebx);
   __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
@@ -8335,7 +8685,7 @@
   // ecx: offsets vector
   // edx: number of capture registers
   Label next_capture, done;
-  __ mov(eax, Operand(esp, 2 * kPointerSize));  // Read previous index.
+  __ mov(eax, Operand(esp, kPreviousIndexOffset));
   // Capture register counter starts from number of capture registers and
   // counts down until wraping after zero.
   __ bind(&next_capture);
@@ -8362,7 +8712,7 @@
   __ bind(&done);
 
   // Return last match info.
-  __ mov(eax, Operand(esp, 1 * kPointerSize));
+  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
   __ ret(4 * kPointerSize);
 
   // Do the runtime call to execute the regexp.
@@ -8520,7 +8870,7 @@
     CpuFeatures::Scope use_sse2(SSE2);
     CpuFeatures::Scope use_cmov(CMOV);
 
-    FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
+    FloatingPointHelper::LoadSSE2Operands(masm, &check_for_symbols);
     __ comisd(xmm0, xmm1);
 
     // Jump to builtin for NaN.
@@ -8582,30 +8932,7 @@
 
   __ bind(&check_for_strings);
 
-  // Check that both objects are not smis.
-  ASSERT_EQ(0, kSmiTag);
-  __ mov(ebx, Operand(edx));
-  __ and_(ebx, Operand(eax));
-  __ test(ebx, Immediate(kSmiTagMask));
-  __ j(zero, &call_builtin);
-
-  // Load instance type for both objects.
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
-  // Check that both are flat ascii strings.
-  Label non_ascii_flat;
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiString =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  __ and_(ecx, kFlatAsciiString);
-  __ cmp(ecx, kStringTag | kSeqStringTag | kAsciiStringTag);
-  __ j(not_equal, &call_builtin);
-  __ and_(ebx, kFlatAsciiString);
-  __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
-  __ j(not_equal, &call_builtin);
+  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &call_builtin);
 
   // Inline comparison of ascii strings.
   StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
@@ -9659,79 +9986,76 @@
                                                         Register scratch1,
                                                         Register scratch2,
                                                         Register scratch3) {
-  Label compare_lengths, compare_lengths_1;
-
-  // Find minimum length. If either length is zero just compare lengths.
+  Label result_not_equal;
+  Label result_greater;
+  Label compare_lengths;
+  // Find minimum length.
+  Label left_shorter;
   __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
-  __ test(scratch1, Operand(scratch1));
-  __ j(zero, &compare_lengths_1);
-  __ mov(scratch2, FieldOperand(right, String::kLengthOffset));
-  __ test(scratch2, Operand(scratch2));
-  __ j(zero, &compare_lengths_1);
-  __ cmp(scratch1, Operand(scratch2));
-  if (CpuFeatures::IsSupported(CMOV)) {
-    CpuFeatures::Scope use_cmov(CMOV);
-    __ cmov(greater, scratch1, Operand(scratch2));
-  } else {
-    Label l;
-    __ j(less, &l);
-    __ mov(scratch1, scratch2);
-    __ bind(&l);
+  __ mov(scratch3, scratch1);
+  __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
+
+  Register length_delta = scratch3;
+
+  __ j(less_equal, &left_shorter);
+  // Right string is shorter. Change scratch1 to be length of right string.
+  __ sub(scratch1, Operand(length_delta));
+  __ bind(&left_shorter);
+
+  Register min_length = scratch1;
+
+  // If either length is zero, just compare lengths.
+  __ test(min_length, Operand(min_length));
+  __ j(zero, &compare_lengths);
+
+  // Change index to run from -min_length to -1 by adding min_length
+  // to string start. This means that loop ends when index reaches zero,
+  // which doesn't need an additional compare.
+  __ lea(left,
+         FieldOperand(left,
+                      min_length, times_1,
+                      SeqAsciiString::kHeaderSize));
+  __ lea(right,
+         FieldOperand(right,
+                      min_length, times_1,
+                      SeqAsciiString::kHeaderSize));
+  __ neg(min_length);
+
+  Register index = min_length;  // index = -min_length;
+
+  {
+    // Compare loop.
+    Label loop;
+    __ bind(&loop);
+    // Compare characters.
+    __ mov_b(scratch2, Operand(left, index, times_1, 0));
+    __ cmpb(scratch2, Operand(right, index, times_1, 0));
+    __ j(not_equal, &result_not_equal);
+    __ add(Operand(index), Immediate(1));
+    __ j(not_zero, &loop);
   }
 
-  Label result_greater, result_less;
-  Label loop;
-  // Compare next character.
-  __ mov(scratch3, Immediate(-1));  // Index into strings.
-  __ bind(&loop);
-  // Compare characters.
-  Label character_compare_done;
-  __ add(Operand(scratch3), Immediate(1));
-  __ mov_b(scratch2, Operand(left,
-                             scratch3,
-                             times_1,
-                             SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ subb(scratch2, Operand(right,
-                            scratch3,
-                            times_1,
-                            SeqAsciiString::kHeaderSize - kHeapObjectTag));
-  __ j(not_equal, &character_compare_done);
-  __ sub(Operand(scratch1), Immediate(1));
-  __ j(not_zero, &loop);
-  // If min length characters match compare lengths otherwise last character
-  // compare is the result.
-  __ bind(&character_compare_done);
-  __ j(equal, &compare_lengths);
-  __ j(less, &result_less);
-  __ jmp(&result_greater);
-
-  // Compare lengths.
-  Label result_not_equal;
+  // Compare lengths -  strings up to min-length are equal.
   __ bind(&compare_lengths);
-  __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
-  __ bind(&compare_lengths_1);
-  __ sub(scratch1, FieldOperand(right, String::kLengthOffset));
+  __ test(length_delta, Operand(length_delta));
   __ j(not_zero, &result_not_equal);
 
   // Result is EQUAL.
   ASSERT_EQ(0, EQUAL);
   ASSERT_EQ(0, kSmiTag);
-  __ xor_(eax, Operand(eax));
-  __ IncrementCounter(&Counters::string_compare_native, 1);
+  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
   __ ret(2 * kPointerSize);
+
   __ bind(&result_not_equal);
   __ j(greater, &result_greater);
 
   // Result is LESS.
-  __ bind(&result_less);
-  __ mov(eax, Immediate(Smi::FromInt(LESS)->value()));
-  __ IncrementCounter(&Counters::string_compare_native, 1);
+  __ Set(eax, Immediate(Smi::FromInt(LESS)));
   __ ret(2 * kPointerSize);
 
   // Result is GREATER.
   __ bind(&result_greater);
-  __ mov(eax, Immediate(Smi::FromInt(GREATER)->value()));
-  __ IncrementCounter(&Counters::string_compare_native, 1);
+  __ Set(eax, Immediate(Smi::FromInt(GREATER)));
   __ ret(2 * kPointerSize);
 }
 
@@ -9752,41 +10076,19 @@
   __ j(not_equal, &not_same);
   ASSERT_EQ(0, EQUAL);
   ASSERT_EQ(0, kSmiTag);
-  __ xor_(eax, Operand(eax));
+  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
   __ IncrementCounter(&Counters::string_compare_native, 1);
   __ ret(2 * kPointerSize);
 
   __ bind(&not_same);
 
-  // Check that both objects are not smis.
-  ASSERT_EQ(0, kSmiTag);
-  __ mov(ebx, Operand(edx));
-  __ and_(ebx, Operand(eax));
-  __ test(ebx, Immediate(kSmiTagMask));
-  __ j(zero, &runtime);
-
-  // Load instance type for both strings.
-  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-
-  // Check that both are flat ascii strings.
-  Label non_ascii_flat;
-  __ and_(ecx, kStringRepresentationMask | kStringEncodingMask);
-  __ cmp(ecx, kSeqStringTag | kAsciiStringTag);
-  __ j(not_equal, &non_ascii_flat);
-  const int kFlatAsciiString =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  __ and_(ebx, kFlatAsciiString);
-  __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
-  __ j(not_equal, &non_ascii_flat);
+  // Check that both objects are sequential ascii strings.
+  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
 
   // Compare flat ascii strings.
+  __ IncrementCounter(&Counters::string_compare_native, 1);
   GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
 
-  __ bind(&non_ascii_flat);
-
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
   __ bind(&runtime);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 56cf978..a81a7d1 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,57 +43,70 @@
 // -------------------------------------------------------------------------
 // Reference support
 
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame.  The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
 class Reference BASE_EMBEDDED {
  public:
   // The values of the types is important, see size().
-  enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen, Expression* expression);
+  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+  Reference(CodeGenerator* cgen,
+            Expression* expression,
+            bool persist_after_get = false);
   ~Reference();
 
   Expression* expression() const { return expression_; }
   Type type() const { return type_; }
   void set_type(Type value) {
-    ASSERT(type_ == ILLEGAL);
+    ASSERT_EQ(ILLEGAL, type_);
     type_ = value;
   }
 
+  void set_unloaded() {
+    ASSERT_NE(ILLEGAL, type_);
+    ASSERT_NE(UNLOADED, type_);
+    type_ = UNLOADED;
+  }
   // The size the reference takes up on the stack.
-  int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+  int size() const {
+    return (type_ < SLOT) ? 0 : type_;
+  }
 
   bool is_illegal() const { return type_ == ILLEGAL; }
   bool is_slot() const { return type_ == SLOT; }
   bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+  bool is_unloaded() const { return type_ == UNLOADED; }
 
   // Return the name.  Only valid for named property references.
   Handle<String> GetName();
 
   // Generate code to push the value of the reference on top of the
   // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is left in place with its value above it.
+  // the expression stack, and it is consumed by the call unless the
+  // reference is for a compound assignment.
+  // If the reference is not consumed, it is left in place under its value.
   void GetValue();
 
   // Like GetValue except that the slot is expected to be written to before
-  // being read from again.  Thae value of the reference may be invalidated,
+  // being read from again.  The value of the reference may be invalidated,
   // causing subsequent attempts to read it to fail.
   void TakeValue();
 
   // Generate code to store the value on top of the expression stack in the
   // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The stored value is left in place (with the
-  // reference intact below it) to support chained assignments.
+  // on the expression stack.  The  value is stored in the location specified
+  // by the reference, and is left on top of the stack, after the reference
+  // is popped from beneath it (unloaded).
   void SetValue(InitState init_state);
 
  private:
   CodeGenerator* cgen_;
   Expression* expression_;
   Type type_;
+  // Keep the reference on the stack after get, so it can be used by set later.
+  bool persist_after_get_;
 };
 
 
@@ -420,6 +433,11 @@
   // value in place.
   void StoreToSlot(Slot* slot, InitState init_state);
 
+  // Load a property of an object, returning it in a Result.
+  // The object and the property name are passed on the stack, and
+  // not changed.
+  Result EmitKeyedLoad(bool is_global);
+
   // Special code for typeof expressions: Unfortunately, we must
   // be careful when loading the expression in 'typeof'
   // expressions. We are not allowed to throw reference errors for
@@ -444,20 +462,20 @@
 
   // Emit code to perform a binary operation on a constant
   // smi and a likely smi.  Consumes the Result *operand.
-  void ConstantSmiBinaryOperation(Token::Value op,
-                                  Result* operand,
-                                  Handle<Object> constant_operand,
-                                  StaticType* type,
-                                  bool reversed,
-                                  OverwriteMode overwrite_mode);
+  Result ConstantSmiBinaryOperation(Token::Value op,
+                                    Result* operand,
+                                    Handle<Object> constant_operand,
+                                    StaticType* type,
+                                    bool reversed,
+                                    OverwriteMode overwrite_mode);
 
   // Emit code to perform a binary operation on two likely smis.
   // The code to handle smi arguments is produced inline.
   // Consumes the Results *left and *right.
-  void LikelySmiBinaryOperation(Token::Value op,
-                                Result* left,
-                                Result* right,
-                                OverwriteMode overwrite_mode);
+  Result LikelySmiBinaryOperation(Token::Value op,
+                                  Result* left,
+                                  Result* right,
+                                  OverwriteMode overwrite_mode);
 
   void Comparison(AstNode* node,
                   Condition cc,
@@ -479,10 +497,10 @@
                          CallFunctionFlags flags,
                          int position);
 
-  // Use an optimized version of Function.prototype.apply that avoid
-  // allocating the arguments object and just copies the arguments
-  // from the stack.
-  void CallApplyLazy(Property* apply,
+  // An optimized implementation of expressions of the form
+  // x.apply(y, arguments).  We call x the applicand and y the receiver.
+  // The optimization avoids allocating an arguments object if possible.
+  void CallApplyLazy(Expression* applicand,
                      Expression* receiver,
                      VariableProxy* arguments,
                      int position);
@@ -517,6 +535,7 @@
   void GenerateIsArray(ZoneList<Expression*>* args);
   void GenerateIsObject(ZoneList<Expression*>* args);
   void GenerateIsFunction(ZoneList<Expression*>* args);
+  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
 
   // Support for construct call checks.
   void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -613,8 +632,8 @@
   friend class JumpTarget;
   friend class Reference;
   friend class Result;
-  friend class FastCodeGenerator;
-  friend class CodeGenSelector;
+  friend class FullCodeGenerator;
+  friend class FullCodeGenSyntaxChecker;
 
   friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
 
@@ -651,6 +670,11 @@
   void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
   void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
 
+  Result GenerateCall(MacroAssembler* masm,
+                      VirtualFrame* frame,
+                      Result* left,
+                      Result* right);
+
  private:
   Token::Value op_;
   OverwriteMode mode_;
@@ -697,11 +721,11 @@
   void GenerateSmiCode(MacroAssembler* masm, Label* slow);
   void GenerateLoadArguments(MacroAssembler* masm);
   void GenerateReturn(MacroAssembler* masm);
+  void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
 
   bool ArgsInRegistersSupported() {
-    return ((op_ == Token::ADD) || (op_ == Token::SUB)
-            || (op_ == Token::MUL) || (op_ == Token::DIV))
-        && flags_ != NO_SMI_CODE_IN_STUB;
+    return op_ == Token::ADD || op_ == Token::SUB
+        || op_ == Token::MUL || op_ == Token::DIV;
   }
   bool IsOperationCommutative() {
     return (op_ == Token::ADD) || (op_ == Token::MUL);
@@ -710,8 +734,8 @@
   void SetArgsInRegisters() { args_in_registers_ = true; }
   void SetArgsReversed() { args_reversed_ = true; }
   bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
-  bool HasArgumentsInRegisters() { return args_in_registers_; }
-  bool HasArgumentsReversed() { return args_reversed_; }
+  bool HasArgsInRegisters() { return args_in_registers_; }
+  bool HasArgsReversed() { return args_reversed_; }
 };
 
 
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 1fbaa3c..cb500d5 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -53,23 +53,25 @@
 
 static ByteMnemonic two_operands_instr[] = {
   {0x03, "add", REG_OPER_OP_ORDER},
-  {0x21, "and", OPER_REG_OP_ORDER},
-  {0x23, "and", REG_OPER_OP_ORDER},
-  {0x3B, "cmp", REG_OPER_OP_ORDER},
-  {0x8D, "lea", REG_OPER_OP_ORDER},
   {0x09, "or", OPER_REG_OP_ORDER},
   {0x0B, "or", REG_OPER_OP_ORDER},
   {0x1B, "sbb", REG_OPER_OP_ORDER},
+  {0x21, "and", OPER_REG_OP_ORDER},
+  {0x23, "and", REG_OPER_OP_ORDER},
   {0x29, "sub", OPER_REG_OP_ORDER},
   {0x2A, "subb", REG_OPER_OP_ORDER},
   {0x2B, "sub", REG_OPER_OP_ORDER},
-  {0x84, "test_b", REG_OPER_OP_ORDER},
-  {0x85, "test", REG_OPER_OP_ORDER},
   {0x31, "xor", OPER_REG_OP_ORDER},
   {0x33, "xor", REG_OPER_OP_ORDER},
+  {0x38, "cmpb", OPER_REG_OP_ORDER},
+  {0x3A, "cmpb", REG_OPER_OP_ORDER},
+  {0x3B, "cmp", REG_OPER_OP_ORDER},
+  {0x84, "test_b", REG_OPER_OP_ORDER},
+  {0x85, "test", REG_OPER_OP_ORDER},
   {0x87, "xchg", REG_OPER_OP_ORDER},
   {0x8A, "mov_b", REG_OPER_OP_ORDER},
   {0x8B, "mov", REG_OPER_OP_ORDER},
+  {0x8D, "lea", REG_OPER_OP_ORDER},
   {-1, "", UNSET_OP_ORDER}
 };
 
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
similarity index 90%
rename from src/ia32/fast-codegen-ia32.cc
rename to src/ia32/full-codegen-ia32.cc
index f485d9e..03fe54d 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -30,7 +30,7 @@
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
 #include "parser.h"
 
 namespace v8 {
@@ -51,7 +51,7 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-ia32.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun) {
   function_ = fun;
   SetFunctionPosition(fun);
 
@@ -160,7 +160,7 @@
 }
 
 
-void FastCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence(int position) {
   Comment cmnt(masm_, "[ Return sequence");
   if (return_label_.is_bound()) {
     __ jmp(&return_label_);
@@ -193,7 +193,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -236,7 +236,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -279,7 +279,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -320,7 +320,7 @@
 }
 
 
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -361,7 +361,7 @@
 }
 
 
-void FastCodeGenerator::DropAndApply(int count,
+void FullCodeGenerator::DropAndApply(int count,
                                      Expression::Context context,
                                      Register reg) {
   ASSERT(count > 0);
@@ -413,7 +413,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context,
+void FullCodeGenerator::Apply(Expression::Context context,
                               Label* materialize_true,
                               Label* materialize_false) {
   switch (context) {
@@ -478,7 +478,7 @@
 }
 
 
-void FastCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::DoTest(Expression::Context context) {
   // The value to test is in the accumulator.  If the value might be needed
   // on the stack (value/test and test/value contexts with a stack location
   // desired), then the value is already duplicated on the stack.
@@ -612,7 +612,7 @@
 }
 
 
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
   switch (slot->type()) {
     case Slot::PARAMETER:
     case Slot::LOCAL:
@@ -631,13 +631,13 @@
 }
 
 
-void FastCodeGenerator::Move(Register destination, Slot* source) {
+void FullCodeGenerator::Move(Register destination, Slot* source) {
   MemOperand location = EmitSlotSearch(source, destination);
   __ mov(destination, location);
 }
 
 
-void FastCodeGenerator::Move(Slot* dst,
+void FullCodeGenerator::Move(Slot* dst,
                              Register src,
                              Register scratch1,
                              Register scratch2) {
@@ -653,7 +653,7 @@
 }
 
 
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
   Comment cmnt(masm_, "[ Declaration");
   Variable* var = decl->proxy()->var();
   ASSERT(var != NULL);  // Must have been resolved.
@@ -751,7 +751,7 @@
 }
 
 
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(esi);  // The context is the first argument.
   __ push(Immediate(pairs));
@@ -761,7 +761,7 @@
 }
 
 
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
@@ -779,17 +779,21 @@
 }
 
 
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
   EmitVariableLoad(expr->var(), context_);
 }
 
 
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
                                          Expression::Context context) {
-  Expression* rewrite = var->rewrite();
-  if (rewrite == NULL) {
-    ASSERT(var->is_global());
+  // Four cases: non-this global variables, lookup slots, all other
+  // types of slots, and parameters that rewrite to explicit property
+  // accesses on the arguments object.
+  Slot* slot = var->slot();
+  Property* property = var->AsProperty();
+
+  if (var->is_global() && !var->is_this()) {
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in ecx and the global
     // object on the stack.
@@ -803,34 +807,24 @@
     // (eg, push/pop elimination).
     __ nop();
     DropAndApply(1, context, eax);
-  } else if (rewrite->AsSlot() != NULL) {
-    Slot* slot = rewrite->AsSlot();
-    if (FLAG_debug_code) {
-      switch (slot->type()) {
-        case Slot::PARAMETER:
-        case Slot::LOCAL: {
-          Comment cmnt(masm_, "Stack slot");
-          break;
-        }
-        case Slot::CONTEXT: {
-          Comment cmnt(masm_, "Context slot");
-          break;
-        }
-        case Slot::LOOKUP:
-          UNIMPLEMENTED();
-          break;
-      }
-    }
-    Apply(context, slot);
-  } else {
-    Comment cmnt(masm_, "Variable rewritten to property");
-    // A variable has been rewritten into an explicit access to an object
-    // property.
-    Property* property = rewrite->AsProperty();
-    ASSERT_NOT_NULL(property);
 
-    // The only property expressions that can occur are of the form
-    // "slot[literal]".
+  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    Comment cmnt(masm_, "Lookup slot");
+    __ push(esi);  // Context.
+    __ push(Immediate(var->name()));
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    Apply(context, eax);
+
+  } else if (slot != NULL) {
+    Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+                            ? "Context slot"
+                            : "Stack slot");
+    Apply(context, slot);
+
+  } else {
+    Comment cmnt(masm_, "Rewritten parameter");
+    ASSERT_NOT_NULL(property);
+    // Rewritten parameter accesses are of the form "slot[literal]".
 
     // Assert that the object is in a slot.
     Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -862,7 +856,7 @@
 }
 
 
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
   Comment cmnt(masm_, "[ RegExpLiteral");
   Label done;
   // Registers will be used as follows:
@@ -889,7 +883,7 @@
 }
 
 
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
   __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -958,7 +952,7 @@
 }
 
 
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
   __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
@@ -1008,7 +1002,7 @@
 }
 
 
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
   __ mov(ecx, Immediate(key->handle()));
@@ -1018,7 +1012,7 @@
 }
 
 
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
   __ call(ic, RelocInfo::CODE_TARGET);
@@ -1026,7 +1020,7 @@
 }
 
 
-void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
                                      Expression::Context context) {
   __ push(result_register());
   GenericBinaryOpStub stub(op,
@@ -1037,11 +1031,17 @@
 }
 
 
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Expression::Context context) {
+  // Three main cases: global variables, lookup slots, and all other
+  // types of slots.  Left-hand-side parameters that rewrite to
+  // explicit property accesses do not reach here.
   ASSERT(var != NULL);
   ASSERT(var->is_global() || var->slot() != NULL);
+
+  Slot* slot = var->slot();
   if (var->is_global()) {
+    ASSERT(!var->is_this());
     // Assignment to a global variable.  Use inline caching for the
     // assignment.  Right-hand-side value is passed in eax, variable name in
     // ecx, and the global object on the stack.
@@ -1053,8 +1053,14 @@
     // Overwrite the receiver on the stack with the result if needed.
     DropAndApply(1, context, eax);
 
-  } else if (var->slot() != NULL) {
-    Slot* slot = var->slot();
+  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    __ push(result_register());  // Value.
+    __ push(esi);  // Context.
+    __ push(Immediate(var->name()));
+    __ CallRuntime(Runtime::kStoreContextSlot, 3);
+    Apply(context, eax);
+
+  } else if (slot != NULL) {
     switch (slot->type()) {
       case Slot::LOCAL:
       case Slot::PARAMETER:
@@ -1086,7 +1092,7 @@
 }
 
 
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a named store IC.
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
@@ -1121,7 +1127,7 @@
 }
 
 
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
 
   // If the assignment starts a block of assignments to the same object,
@@ -1157,7 +1163,7 @@
 }
 
 
-void FastCodeGenerator::VisitProperty(Property* expr) {
+void FullCodeGenerator::VisitProperty(Property* expr) {
   Comment cmnt(masm_, "[ Property");
   Expression* key = expr->key();
 
@@ -1177,7 +1183,7 @@
 }
 
 
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> name,
                                        RelocInfo::Mode mode) {
   // Code common for calls using the IC.
@@ -1198,7 +1204,7 @@
 }
 
 
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -1215,7 +1221,7 @@
 }
 
 
-void FastCodeGenerator::VisitCall(Call* expr) {
+void FullCodeGenerator::VisitCall(Call* expr) {
   Comment cmnt(masm_, "[ Call");
   Expression* fun = expr->expression();
   Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1269,12 +1275,12 @@
   } else {
     // Call to some other expression.  If the expression is an anonymous
     // function literal not called in a loop, mark it as one that should
-    // also use the fast code generator.
+    // also use the full code generator.
     FunctionLiteral* lit = fun->AsFunctionLiteral();
     if (lit != NULL &&
         lit->name()->Equals(Heap::empty_string()) &&
         loop_depth() == 0) {
-      lit->set_try_fast_codegen(true);
+      lit->set_try_full_codegen(true);
     }
     VisitForValue(fun, kStack);
     // Load global receiver object.
@@ -1286,7 +1292,7 @@
 }
 
 
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
   // expression in new calls must be evaluated before the
@@ -1321,7 +1327,7 @@
 }
 
 
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   Comment cmnt(masm_, "[ CallRuntime");
   ZoneList<Expression*>* args = expr->arguments();
 
@@ -1353,7 +1359,7 @@
 }
 
 
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1457,13 +1463,26 @@
       break;
     }
 
+    case Token::ADD: {
+      Comment cmt(masm_, "[ UnaryOperation (ADD)");
+      VisitForValue(expr->expression(), kAccumulator);
+      Label no_conversion;
+      __ test(result_register(), Immediate(kSmiTagMask));
+      __ j(zero, &no_conversion);
+      __ push(result_register());
+      __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+      __ bind(&no_conversion);
+      Apply(context_, result_register());
+      break;
+    }
+
     default:
       UNREACHABLE();
   }
 }
 
 
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
 
   // Expression can only be a property, a global or a (parameter or local)
@@ -1482,7 +1501,7 @@
   if (assign_type == VARIABLE) {
     ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
     Location saved_location = location_;
-    location_ = kStack;
+    location_ = kAccumulator;
     EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
                      Expression::kValue);
     location_ = saved_location;
@@ -1498,11 +1517,15 @@
       VisitForValue(prop->key(), kStack);
       EmitKeyedPropertyLoad(prop);
     }
-    __ push(eax);
   }
 
-  // Convert to number.
+  // Call ToNumber only if operand is not a smi.
+  Label no_conversion;
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(zero, &no_conversion);
+  __ push(eax);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+  __ bind(&no_conversion);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -1534,13 +1557,33 @@
     }
   }
 
+  // Inline smi case if we are in a loop.
+  Label stub_call, done;
+  if (loop_depth() > 0) {
+    if (expr->op() == Token::INC) {
+      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+    } else {
+      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+    }
+    __ j(overflow, &stub_call);
+    // We could eliminate this smi check if we split the code at
+    // the first smi check before calling ToNumber.
+    __ test(eax, Immediate(kSmiTagMask));
+    __ j(zero, &done);
+    __ bind(&stub_call);
+    // Call stub. Undo operation first.
+    if (expr->op() == Token::INC) {
+      __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+    } else {
+      __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+    }
+  }
   // Call stub for +1/-1.
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(1)));
   GenericBinaryOpStub stub(expr->binary_op(),
                            NO_OVERWRITE,
                            NO_GENERIC_BINARY_FLAGS);
-  __ CallStub(&stub);
+  stub.GenerateCall(masm(), eax, Smi::FromInt(1));
+  __ bind(&done);
 
   // Store the value returned in eax.
   switch (assign_type) {
@@ -1595,7 +1638,7 @@
 }
 
 
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
   Comment cmnt(masm_, "[ BinaryOperation");
   switch (expr->op()) {
     case Token::COMMA:
@@ -1630,7 +1673,7 @@
 }
 
 
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Always perform the comparison for its control flow.  Pack the result
@@ -1745,25 +1788,25 @@
 }
 
 
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   Apply(context_, eax);
 }
 
 
-Register FastCodeGenerator::result_register() { return eax; }
+Register FullCodeGenerator::result_register() { return eax; }
 
 
-Register FastCodeGenerator::context_register() { return esi; }
+Register FullCodeGenerator::context_register() { return esi; }
 
 
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ mov(Operand(ebp, frame_offset), value);
 }
 
 
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
   __ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
 }
 
@@ -1771,7 +1814,7 @@
 // ----------------------------------------------------------------------------
 // Non-local control flow support.
 
-void FastCodeGenerator::EnterFinallyBlock() {
+void FullCodeGenerator::EnterFinallyBlock() {
   // Cook return address on top of stack (smi encoded Code* delta)
   ASSERT(!result_register().is(edx));
   __ mov(edx, Operand(esp, 0));
@@ -1785,7 +1828,7 @@
 }
 
 
-void FastCodeGenerator::ExitFinallyBlock() {
+void FullCodeGenerator::ExitFinallyBlock() {
   ASSERT(!result_register().is(edx));
   // Restore result register from stack.
   __ pop(result_register());
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 5658605..ebc2cfa9 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -244,11 +244,10 @@
 
   // Get the map of the receiver.
   __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.  We need
-  // to check this explicitly since this generic stub does not perform
-  // map checks.
+
+  // Check bit field.
   __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
-  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ test(ebx, Immediate(kSlowCaseBitFieldMask));
   __ j(not_zero, &slow, not_taken);
   // Check that the object is some kind of JS object EXCEPT JS Value type.
   // In the case that the object is a value-wrapper object,
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index d7c7d3a..a16c103 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1454,6 +1454,36 @@
 }
 
 
+void MacroAssembler::IncrementCounter(Condition cc,
+                                      StatsCounter* counter,
+                                      int value) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    Label skip;
+    j(NegateCondition(cc), &skip);
+    pushfd();
+    IncrementCounter(counter, value);
+    popfd();
+    bind(&skip);
+  }
+}
+
+
+void MacroAssembler::DecrementCounter(Condition cc,
+                                      StatsCounter* counter,
+                                      int value) {
+  ASSERT(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    Label skip;
+    j(NegateCondition(cc), &skip);
+    pushfd();
+    DecrementCounter(counter, value);
+    popfd();
+    bind(&skip);
+  }
+}
+
+
 void MacroAssembler::Assert(Condition cc, const char* msg) {
   if (FLAG_debug_code) Check(cc, msg);
 }
@@ -1495,6 +1525,38 @@
 }
 
 
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
+                                                         Register object2,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         Label* failure) {
+  // Check that both objects are not smis.
+  ASSERT_EQ(0, kSmiTag);
+  mov(scratch1, Operand(object1));
+  and_(scratch1, Operand(object2));
+  test(scratch1, Immediate(kSmiTagMask));
+  j(zero, failure);
+
+  // Load instance type for both strings.
+  mov(scratch1, FieldOperand(object1, HeapObject::kMapOffset));
+  mov(scratch2, FieldOperand(object2, HeapObject::kMapOffset));
+  movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+  movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+  // Check that both are flat ascii strings.
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  // Interleave bits from both instance types and compare them in one check.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  and_(scratch1, kFlatAsciiStringMask);
+  and_(scratch2, kFlatAsciiStringMask);
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+  j(not_equal, failure);
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index ceecebf..3f000ee 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -392,6 +392,8 @@
   void SetCounter(StatsCounter* counter, int value);
   void IncrementCounter(StatsCounter* counter, int value);
   void DecrementCounter(StatsCounter* counter, int value);
+  void IncrementCounter(Condition cc, StatsCounter* counter, int value);
+  void DecrementCounter(Condition cc, StatsCounter* counter, int value);
 
 
   // ---------------------------------------------------------------------------
@@ -413,6 +415,17 @@
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
 
+  // ---------------------------------------------------------------------------
+  // String utilities.
+
+  // Checks if both objects are sequential ASCII strings, and jumps to label
+  // if either is not.
+  void JumpIfNotBothSequentialAsciiStrings(Register object1,
+                                           Register object2,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           Label *on_not_flat_ascii_strings);
+
  private:
   List<Unresolved> unresolved_;
   bool generating_stub_;
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 4af59dd..f6da693 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -59,8 +59,6 @@
  *                               call through the runtime system)
  *       - stack_area_base      (High end of the memory area to use as
  *                               backtracking stack)
- *       - at_start             (if 1, we are starting at the start of the
- *                               string, otherwise 0)
  *       - int* capture_array   (int[num_saved_registers_], for output).
  *       - end of input         (Address of end of string)
  *       - start of input       (Address of first character in string)
@@ -74,6 +72,8 @@
  *       - backup of caller ebx
  *       - Offset of location before start of input (effectively character
  *         position -1). Used to initialize capture registers to a non-position.
+ *       - Boolean at start (if 1, we are starting at the start of the string,
+ *         otherwise 0)
  *       - register 0  ebp[-4]  (Only positions must be stored in the first
  *       - register 1  ebp[-8]   num_saved_registers_ registers)
  *       - ...
@@ -625,6 +625,7 @@
   __ push(edi);
   __ push(ebx);  // Callee-save on MacOS.
   __ push(Immediate(0));  // Make room for "input start - 1" constant.
+  __ push(Immediate(0));  // Make room for "at start" constant.
 
   // Check if we have space on the stack for registers.
   Label stack_limit_hit;
@@ -667,6 +668,15 @@
   // Store this value in a local variable, for use when clearing
   // position registers.
   __ mov(Operand(ebp, kInputStartMinusOne), eax);
+
+  // Determine whether the start index is zero, that is at the start of the
+  // string, and store that value in a local variable.
+  __ mov(ebx, Operand(ebp, kStartIndex));
+  __ xor_(Operand(ecx), ecx);  // setcc only operates on cl (lower byte of ecx).
+  __ test(ebx, Operand(ebx));
+  __ setcc(zero, ecx);  // 1 if 0 (start of string), 0 if positive.
+  __ mov(Operand(ebp, kAtStart), ecx);
+
   if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
     // Fill saved registers with initial value = start offset - 1
     // Fill in stack push order, to avoid accessing across an unwritten
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 8e7a6a5..d9866b7 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -123,8 +123,7 @@
   static const int kInputStart = kStartIndex + kPointerSize;
   static const int kInputEnd = kInputStart + kPointerSize;
   static const int kRegisterOutput = kInputEnd + kPointerSize;
-  static const int kAtStart = kRegisterOutput + kPointerSize;
-  static const int kStackHighEnd = kAtStart + kPointerSize;
+  static const int kStackHighEnd = kRegisterOutput + kPointerSize;
   static const int kDirectCall = kStackHighEnd + kPointerSize;
   // Below the frame pointer - local stack variables.
   // When adding local variables remember to push space for them in
@@ -133,8 +132,9 @@
   static const int kBackup_edi = kBackup_esi - kPointerSize;
   static const int kBackup_ebx = kBackup_edi - kPointerSize;
   static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
+  static const int kAtStart = kInputStartMinusOne - kPointerSize;
   // First register address. Following registers are below it on the stack.
-  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+  static const int kRegisterZero = kAtStart - kPointerSize;
 
   // Initial size of code buffer.
   static const size_t kRegExpCodeSize = 1024;
diff --git a/src/ia32/simulator-ia32.h b/src/ia32/simulator-ia32.h
index 3ebd2e6..94ef7bf 100644
--- a/src/ia32/simulator-ia32.h
+++ b/src/ia32/simulator-ia32.h
@@ -53,8 +53,8 @@
 
 // Call the generated regexp code directly. The entry function pointer should
 // expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
-  entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  entry(p0, p1, p2, p3, p4, p5, p6)
 
 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
   reinterpret_cast<TryCatch*>(try_catch_address)
diff --git a/src/ic.h b/src/ic.h
index be7f956..8f0eb37 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -295,6 +295,13 @@
   static void ClearInlinedVersion(Address address);
 
  private:
+  // Bit mask to be tested against bit field for the cases when
+  // generic stub should go into slow case.
+  // Access check is necessary explicitly since generic stub does not perform
+  // map checks.
+  static const int kSlowCaseBitFieldMask =
+      (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
+
   static void Generate(MacroAssembler* masm, const ExternalReference& f);
 
   // Update the inline cache.
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 8af472d..505cf03 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -4462,10 +4462,13 @@
   while (i1 < n1 || i2 < n2) {
     CharacterRange next_range;
     int range_source;
-    if (i2 == n2 || first_set->at(i1).from() < second_set->at(i2).from()) {
+    if (i2 == n2 ||
+        (i1 < n1 && first_set->at(i1).from() < second_set->at(i2).from())) {
+      // Next smallest element is in first set.
       next_range = first_set->at(i1++);
       range_source = kInsideFirst;
     } else {
+      // Next smallest element is in second set.
       next_range = second_set->at(i2++);
       range_source = kInsideSecond;
     }
diff --git a/src/list.h b/src/list.h
index aff63c3..d3c2767 100644
--- a/src/list.h
+++ b/src/list.h
@@ -68,7 +68,8 @@
   // not safe to use after operations that can change the list's
   // backing store (eg, Add).
   inline T& operator[](int i) const  {
-    ASSERT(0 <= i && i < length_);
+    ASSERT(0 <= i);
+    ASSERT(i < length_);
     return data_[i];
   }
   inline T& at(int i) const  { return operator[](i); }
diff --git a/src/log.cc b/src/log.cc
index 98dd562..5de7429 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -155,6 +155,13 @@
     return;
   }
 
+  const Address functionAddr =
+      sample->fp + JavaScriptFrameConstants::kFunctionOffset;
+  if (SafeStackFrameIterator::IsWithinBounds(sample->sp, js_entry_sp,
+                                             functionAddr)) {
+    sample->function = Memory::Address_at(functionAddr) - kHeapObjectTag;
+  }
+
   int i = 0;
   const Address callback = Logger::current_state_ != NULL ?
       Logger::current_state_->external_callback() : NULL;
@@ -162,11 +169,8 @@
     sample->stack[i++] = callback;
   }
 
-  SafeStackTraceFrameIterator it(
-      reinterpret_cast<Address>(sample->fp),
-      reinterpret_cast<Address>(sample->sp),
-      reinterpret_cast<Address>(sample->sp),
-      js_entry_sp);
+  SafeStackTraceFrameIterator it(sample->fp, sample->sp,
+                                 sample->sp, js_entry_sp);
   while (!it.done() && i < TickSample::kMaxFramesCount) {
     sample->stack[i++] = it.frame()->pc();
     it.Advance();
@@ -837,36 +841,14 @@
 
 void Logger::CodeMoveEvent(Address from, Address to) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  static Address prev_to_ = NULL;
-  if (!Log::IsEnabled() || !FLAG_log_code) return;
-  LogMessageBuilder msg;
-  msg.Append("%s,", log_events_[CODE_MOVE_EVENT]);
-  msg.AppendAddress(from);
-  msg.Append(',');
-  msg.AppendAddress(to, prev_to_);
-  prev_to_ = to;
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
-  msg.Append('\n');
-  msg.WriteToLogFile();
+  MoveEventInternal(CODE_MOVE_EVENT, from, to);
 #endif
 }
 
 
 void Logger::CodeDeleteEvent(Address from) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (!Log::IsEnabled() || !FLAG_log_code) return;
-  LogMessageBuilder msg;
-  msg.Append("%s,", log_events_[CODE_DELETE_EVENT]);
-  msg.AppendAddress(from);
-  if (FLAG_compress_log) {
-    ASSERT(compression_helper_ != NULL);
-    if (!compression_helper_->HandleMessage(&msg)) return;
-  }
-  msg.Append('\n');
-  msg.WriteToLogFile();
+  DeleteEventInternal(CODE_DELETE_EVENT, from);
 #endif
 }
 
@@ -888,6 +870,78 @@
 }
 
 
+void Logger::FunctionCreateEvent(JSFunction* function) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static Address prev_code = NULL;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,", log_events_[FUNCTION_CREATION_EVENT]);
+  msg.AppendAddress(function->address());
+  msg.Append(',');
+  msg.AppendAddress(function->code()->address(), prev_code);
+  prev_code = function->code()->address();
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::FunctionMoveEvent(Address from, Address to) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  MoveEventInternal(FUNCTION_MOVE_EVENT, from, to);
+#endif
+}
+
+
+void Logger::FunctionDeleteEvent(Address from) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  DeleteEventInternal(FUNCTION_DELETE_EVENT, from);
+#endif
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::MoveEventInternal(LogEventsAndTags event,
+                               Address from,
+                               Address to) {
+  static Address prev_to_ = NULL;
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,", log_events_[event]);
+  msg.AppendAddress(from);
+  msg.Append(',');
+  msg.AppendAddress(to, prev_to_);
+  prev_to_ = to;
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+}
+#endif
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) {
+  if (!Log::IsEnabled() || !FLAG_log_code) return;
+  LogMessageBuilder msg;
+  msg.Append("%s,", log_events_[event]);
+  msg.AppendAddress(from);
+  if (FLAG_compress_log) {
+    ASSERT(compression_helper_ != NULL);
+    if (!compression_helper_->HandleMessage(&msg)) return;
+  }
+  msg.Append('\n');
+  msg.WriteToLogFile();
+}
+#endif
+
+
 void Logger::ResourceEvent(const char* name, const char* tag) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log) return;
@@ -1069,13 +1123,17 @@
 void Logger::TickEvent(TickSample* sample, bool overflow) {
   if (!Log::IsEnabled() || !FLAG_prof) return;
   static Address prev_sp = NULL;
+  static Address prev_function = NULL;
   LogMessageBuilder msg;
   msg.Append("%s,", log_events_[TICK_EVENT]);
-  Address prev_addr = reinterpret_cast<Address>(sample->pc);
+  Address prev_addr = sample->pc;
   msg.AppendAddress(prev_addr);
   msg.Append(',');
-  msg.AppendAddress(reinterpret_cast<Address>(sample->sp), prev_sp);
-  prev_sp = reinterpret_cast<Address>(sample->sp);
+  msg.AppendAddress(sample->sp, prev_sp);
+  prev_sp = sample->sp;
+  msg.Append(',');
+  msg.AppendAddress(sample->function, prev_function);
+  prev_function = sample->function;
   msg.Append(",%d", static_cast<int>(sample->state));
   if (overflow) {
     msg.Append(",overflow");
@@ -1144,6 +1202,7 @@
       LOG(UncheckedStringEvent("profiler", "resume"));
       FLAG_log_code = true;
       LogCompiledFunctions();
+      LogFunctionObjects();
       LogAccessorCallbacks();
       if (!FLAG_sliding_state_window) ticker_->Start();
     }
@@ -1178,9 +1237,7 @@
   AssertNoAllocation no_alloc;
   int compiled_funcs_count = 0;
   HeapIterator iterator;
-  while (iterator.has_next()) {
-    HeapObject* obj = iterator.next();
-    ASSERT(obj != NULL);
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsSharedFunctionInfo()) continue;
     SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
     if (sfi->is_compiled()
@@ -1290,12 +1347,22 @@
 }
 
 
+void Logger::LogFunctionObjects() {
+  AssertNoAllocation no_alloc;
+  HeapIterator iterator;
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+    if (!obj->IsJSFunction()) continue;
+    JSFunction* jsf = JSFunction::cast(obj);
+    if (!jsf->is_compiled()) continue;
+    LOG(FunctionCreateEvent(jsf));
+  }
+}
+
+
 void Logger::LogAccessorCallbacks() {
   AssertNoAllocation no_alloc;
   HeapIterator iterator;
-  while (iterator.has_next()) {
-    HeapObject* obj = iterator.next();
-    ASSERT(obj != NULL);
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
     if (!obj->IsAccessorInfo()) continue;
     AccessorInfo* ai = AccessorInfo::cast(obj);
     if (!ai->name()->IsString()) continue;
diff --git a/src/log.h b/src/log.h
index e21df03..1f6e60e 100644
--- a/src/log.h
+++ b/src/log.h
@@ -116,6 +116,9 @@
   V(CODE_CREATION_EVENT,            "code-creation",          "cc")       \
   V(CODE_MOVE_EVENT,                "code-move",              "cm")       \
   V(CODE_DELETE_EVENT,              "code-delete",            "cd")       \
+  V(FUNCTION_CREATION_EVENT,        "function-creation",      "fc")       \
+  V(FUNCTION_MOVE_EVENT,            "function-move",          "fm")       \
+  V(FUNCTION_DELETE_EVENT,          "function-delete",        "fd")       \
   V(SNAPSHOT_POSITION_EVENT,        "snapshot-pos",           "sp")       \
   V(TICK_EVENT,                     "tick",                   "t")        \
   V(REPEAT_META_EVENT,              "repeat",                 "r")        \
@@ -224,6 +227,12 @@
   static void CodeMoveEvent(Address from, Address to);
   // Emits a code delete event.
   static void CodeDeleteEvent(Address from);
+  // Emits a function object create event.
+  static void FunctionCreateEvent(JSFunction* function);
+  // Emits a function move event.
+  static void FunctionMoveEvent(Address from, Address to);
+  // Emits a function delete event.
+  static void FunctionDeleteEvent(Address from);
 
   static void SnapshotPositionEvent(Address addr, int pos);
 
@@ -278,6 +287,8 @@
 
   // Logs all compiled functions found in the heap.
   static void LogCompiledFunctions();
+  // Logs all compiled JSFunction objects found in the heap.
+  static void LogFunctionObjects();
   // Logs all accessor callbacks found in the heap.
   static void LogAccessorCallbacks();
   // Used for logging stubs found in the snapshot.
@@ -299,6 +310,15 @@
                                     const char* name,
                                     Address entry_point);
 
+  // Internal configurable move event.
+  static void MoveEventInternal(LogEventsAndTags event,
+                                Address from,
+                                Address to);
+
+  // Internal configurable move event.
+  static void DeleteEventInternal(LogEventsAndTags event,
+                                  Address from);
+
   // Emits aliases for compressed messages.
   static void LogAliases();
 
diff --git a/src/macros.py b/src/macros.py
index 1e436a0..c160b49 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -92,6 +92,7 @@
 macro IS_SCRIPT(arg)            = (%_ClassOf(arg) === 'Script');
 macro IS_ARGUMENTS(arg)         = (%_ClassOf(arg) === 'Arguments');
 macro IS_GLOBAL(arg)            = (%_ClassOf(arg) === 'global');
+macro IS_UNDETECTABLE(arg)      = (%_IsUndetectableObject(arg));
 macro FLOOR(arg)                = $floor(arg);
 
 # Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index e284b42..1f2c37d 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -129,7 +129,8 @@
 #endif
 
   PagedSpaces spaces;
-  while (PagedSpace* space = spaces.next()) {
+  for (PagedSpace* space = spaces.next();
+       space != NULL; space = spaces.next()) {
     space->PrepareForMarkCompact(compacting_collection_);
   }
 
@@ -172,7 +173,7 @@
   int old_gen_used = 0;
 
   OldSpaces spaces;
-  while (OldSpace* space = spaces.next()) {
+  for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
     old_gen_recoverable += space->Waste() + space->AvailableFree();
     old_gen_used += space->Size();
   }
@@ -475,8 +476,8 @@
 
 void MarkCompactCollector::CreateBackPointers() {
   HeapObjectIterator iterator(Heap::map_space());
-  while (iterator.has_next()) {
-    Object* next_object = iterator.next();
+  for (HeapObject* next_object = iterator.next();
+       next_object != NULL; next_object = iterator.next()) {
     if (next_object->IsMap()) {  // Could also be ByteArray on free list.
       Map* map = Map::cast(next_object);
       if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
@@ -509,8 +510,7 @@
   // so that we don't waste effort pointlessly scanning for objects.
   ASSERT(!marking_stack.is_full());
 
-  while (it->has_next()) {
-    HeapObject* object = it->next();
+  for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
     if (object->IsOverflowed()) {
       object->ClearOverflow();
       ASSERT(object->IsMarked());
@@ -793,8 +793,9 @@
   // scan the descriptor arrays of those maps, not all maps.
   // All of these actions are carried out only on maps of JSObjects
   // and related subtypes.
-  while (map_iterator.has_next()) {
-    Map* map = reinterpret_cast<Map*>(map_iterator.next());
+  for (HeapObject* obj = map_iterator.next();
+       obj != NULL; obj = map_iterator.next()) {
+    Map* map = reinterpret_cast<Map*>(obj);
     if (!map->IsMarked() && map->IsByteArray()) continue;
 
     ASSERT(SafeIsMap(map));
@@ -969,12 +970,6 @@
 inline void IgnoreNonLiveObject(HeapObject* object) {}
 
 
-// A code deletion event is logged for non-live code objects.
-inline void LogNonLiveCodeObject(HeapObject* object) {
-  if (object->IsCode()) LOG(CodeDeleteEvent(object->address()));
-}
-
-
 // Function template that, given a range of addresses (eg, a semispace or a
 // paged space page), iterates through the objects in the range to clear
 // mark bits and compute and encode forwarding addresses.  As a side effect,
@@ -1122,10 +1117,7 @@
           is_previous_alive = true;
         }
       } else {
-        if (object->IsCode()) {
-          // Notify the logger that compiled code has been collected.
-          LOG(CodeDeleteEvent(Code::cast(object)->address()));
-        }
+        MarkCompactCollector::ReportDeleteIfNeeded(object);
         if (is_previous_alive) {  // Transition from live to free.
           free_start = current;
           is_previous_alive = false;
@@ -1204,7 +1196,7 @@
 
   // Compute the forwarding pointers in each space.
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
-                                        IgnoreNonLiveObject>(
+                                        ReportDeleteIfNeeded>(
       Heap::old_pointer_space());
 
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
@@ -1212,7 +1204,7 @@
       Heap::old_data_space());
 
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
-                                        LogNonLiveCodeObject>(
+                                        ReportDeleteIfNeeded>(
       Heap::code_space());
 
   EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
@@ -1291,6 +1283,7 @@
     MapIterator it;
     HeapObject* o = it.next();
     for (; o != first_map_to_evacuate_; o = it.next()) {
+      ASSERT(o != NULL);
       Map* map = reinterpret_cast<Map*>(o);
       ASSERT(!map->IsMarked());
       ASSERT(!map->IsOverflowed());
@@ -1316,10 +1309,8 @@
 
   void UpdateMapPointersInLargeObjectSpace() {
     LargeObjectIterator it(Heap::lo_space());
-    while (true) {
-      if (!it.has_next()) break;
-      UpdateMapPointersInObject(it.next());
-    }
+    for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+      UpdateMapPointersInObject(obj);
   }
 
   void Finish() {
@@ -1362,8 +1353,8 @@
 
   static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
     while (true) {
-      ASSERT(it->has_next());
       HeapObject* next = it->next();
+      ASSERT(next != NULL);
       if (next == last)
         return NULL;
       ASSERT(!next->IsOverflowed());
@@ -1452,8 +1443,9 @@
     if (!FLAG_enable_slow_asserts)
       return;
 
-    while (map_to_evacuate_it_.has_next())
-      ASSERT(FreeListNode::IsFreeListNode(map_to_evacuate_it_.next()));
+    for (HeapObject* obj = map_to_evacuate_it_.next();
+         obj != NULL; obj = map_to_evacuate_it_.next())
+      ASSERT(FreeListNode::IsFreeListNode(obj));
   }
 #endif
 };
@@ -1486,7 +1478,8 @@
 
     map_compact.FinishMapSpace();
     PagedSpaces spaces;
-    while (PagedSpace* space = spaces.next()) {
+    for (PagedSpace* space = spaces.next();
+         space != NULL; space = spaces.next()) {
       if (space == Heap::map_space()) continue;
       map_compact.UpdateMapPointersInPagedSpace(space);
     }
@@ -1661,7 +1654,8 @@
 
   // Large objects do not move, the map word can be updated directly.
   LargeObjectIterator it(Heap::lo_space());
-  while (it.has_next()) UpdatePointersInNewObject(it.next());
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+    UpdatePointersInNewObject(obj);
 
   USE(live_maps);
   USE(live_pointer_olds);
@@ -1825,7 +1819,8 @@
   Page::set_rset_state(Page::IN_USE);
 #endif
   PagedSpaces spaces;
-  while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo();
+  for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
+    space->MCCommitRelocationInfo();
 }
 
 
@@ -1906,6 +1901,11 @@
 
   ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
 
+  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+  if (copied_to->IsJSFunction()) {
+    LOG(FunctionMoveEvent(old_addr, new_addr));
+  }
+
   return obj_size;
 }
 
@@ -1986,6 +1986,11 @@
   }
 #endif
 
+  HeapObject* copied_to = HeapObject::FromAddress(new_addr);
+  if (copied_to->IsJSFunction()) {
+    LOG(FunctionMoveEvent(old_addr, new_addr));
+  }
+
   return obj_size;
 }
 
@@ -2001,4 +2006,15 @@
   Heap::RebuildRSets();
 }
 
+
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (obj->IsCode()) {
+    LOG(CodeDeleteEvent(obj->address()));
+  } else if (obj->IsJSFunction()) {
+    LOG(FunctionDeleteEvent(obj->address()));
+  }
+#endif
+}
+
 } }  // namespace v8::internal
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 02aedb3..ab572f6 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -115,6 +115,9 @@
   static bool in_use() { return state_ > PREPARE_GC; }
 #endif
 
+  // Determine type of object and emit deletion log event.
+  static void ReportDeleteIfNeeded(HeapObject* obj);
+
  private:
 #ifdef DEBUG
   enum CollectorState {
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 10138d9..6457ae7 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -164,10 +164,10 @@
   }
   context.Dispose();
   CppByteSink sink(argv[1]);
-  i::Serializer ser(&sink);
   // This results in a somewhat smaller snapshot, probably because it gets rid
   // of some things that are cached between garbage collections.
   i::Heap::CollectAllGarbage(true);
+  i::StartupSerializer ser(&sink);
   ser.Serialize();
   return 0;
 }
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 3003342..6d48b5b 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -2372,8 +2372,8 @@
             kHasOnlySimpleThisPropertyAssignments)
 BOOL_ACCESSORS(SharedFunctionInfo,
                compiler_hints,
-               try_fast_codegen,
-               kTryFastCodegen)
+               try_full_codegen,
+               kTryFullCodegen)
 
 INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
 INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
diff --git a/src/objects.cc b/src/objects.cc
index 118c489..c76fc83 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2839,7 +2839,11 @@
       if (result.IsReadOnly()) return Heap::undefined_value();
       if (result.type() == CALLBACKS) {
         Object* obj = result.GetCallbackObject();
-        if (obj->IsFixedArray()) return obj;
+        if (obj->IsFixedArray()) {
+          PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+          SetNormalizedProperty(name, obj, details);
+          return obj;
+        }
       }
     }
   }
diff --git a/src/objects.h b/src/objects.h
index 40be0df..0b22b0e 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -3222,8 +3222,8 @@
   // this.x = y; where y is either a constant or refers to an argument.
   inline bool has_only_simple_this_property_assignments();
 
-  inline bool try_fast_codegen();
-  inline void set_try_fast_codegen(bool flag);
+  inline bool try_full_codegen();
+  inline void set_try_full_codegen(bool flag);
 
   // For functions which only contains this property assignments this provides
   // access to the names for the properties assigned.
@@ -3304,7 +3304,7 @@
 
   // Bit positions in compiler_hints.
   static const int kHasOnlySimpleThisPropertyAssignments = 0;
-  static const int kTryFastCodegen = 1;
+  static const int kTryFullCodegen = 1;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
 };
@@ -3649,6 +3649,8 @@
       FixedArray::kHeaderSize + kTagIndex * kPointerSize;
   static const int kDataAsciiCodeOffset =
       FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
+  static const int kDataUC16CodeOffset =
+      FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
   static const int kIrregexpCaptureCountOffset =
       FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
 };
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 353d165..ff75776 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -95,6 +95,24 @@
 }
 
 
+const char* OS::LocalTimezone(double time) {
+  if (isnan(time)) return "";
+  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset() {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
 // and verification).  The estimate is conservative, ie, not all addresses in
@@ -555,17 +573,17 @@
     ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
     mcontext_t& mcontext = ucontext->uc_mcontext;
 #if V8_HOST_ARCH_IA32
-    sample.pc = mcontext.mc_eip;
-    sample.sp = mcontext.mc_esp;
-    sample.fp = mcontext.mc_ebp;
+    sample.pc = reinterpret_cast<Address>(mcontext.mc_eip);
+    sample.sp = reinterpret_cast<Address>(mcontext.mc_esp);
+    sample.fp = reinterpret_cast<Address>(mcontext.mc_ebp);
 #elif V8_HOST_ARCH_X64
-    sample.pc = mcontext.mc_rip;
-    sample.sp = mcontext.mc_rsp;
-    sample.fp = mcontext.mc_rbp;
+    sample.pc = reinterpret_cast<Address>(mcontext.mc_rip);
+    sample.sp = reinterpret_cast<Address>(mcontext.mc_rsp);
+    sample.fp = reinterpret_cast<Address>(mcontext.mc_rbp);
 #elif V8_HOST_ARCH_ARM
-    sample.pc = mcontext.mc_r15;
-    sample.sp = mcontext.mc_r13;
-    sample.fp = mcontext.mc_r11;
+    sample.pc = reinterpret_cast<Address>(mcontext.mc_r15);
+    sample.sp = reinterpret_cast<Address>(mcontext.mc_r13);
+    sample.fp = reinterpret_cast<Address>(mcontext.mc_r11);
 #endif
     active_sampler_->SampleStack(&sample);
   }
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index bfcd8fb..005b1de 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -159,6 +159,24 @@
 }
 
 
+const char* OS::LocalTimezone(double time) {
+  if (isnan(time)) return "";
+  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset() {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
 // and verification).  The estimate is conservative, ie, not all addresses in
@@ -707,23 +725,23 @@
     ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
     mcontext_t& mcontext = ucontext->uc_mcontext;
 #if V8_HOST_ARCH_IA32
-    sample.pc = mcontext.gregs[REG_EIP];
-    sample.sp = mcontext.gregs[REG_ESP];
-    sample.fp = mcontext.gregs[REG_EBP];
+    sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+    sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+    sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
 #elif V8_HOST_ARCH_X64
-    sample.pc = mcontext.gregs[REG_RIP];
-    sample.sp = mcontext.gregs[REG_RSP];
-    sample.fp = mcontext.gregs[REG_RBP];
+    sample.pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+    sample.sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+    sample.fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
 #elif V8_HOST_ARCH_ARM
 // An undefined macro evaluates to 0, so this applies to Android's Bionic also.
 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
-    sample.pc = mcontext.gregs[R15];
-    sample.sp = mcontext.gregs[R13];
-    sample.fp = mcontext.gregs[R11];
+    sample.pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+    sample.sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+    sample.fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
 #else
-    sample.pc = mcontext.arm_pc;
-    sample.sp = mcontext.arm_sp;
-    sample.fp = mcontext.arm_fp;
+    sample.pc = reinterpret_cast<Address>(mcontext.arm_pc);
+    sample.sp = reinterpret_cast<Address>(mcontext.arm_sp);
+    sample.fp = reinterpret_cast<Address>(mcontext.arm_fp);
 #endif
 #endif
     if (IsVmThread())
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 0d5be45..e379ae2 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -259,6 +259,24 @@
 }
 
 
+const char* OS::LocalTimezone(double time) {
+  if (isnan(time)) return "";
+  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset() {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
 int OS::StackWalk(Vector<StackFrame> frames) {
   // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
   if (backtrace == NULL)
@@ -559,9 +577,9 @@
                              flavor,
                              reinterpret_cast<natural_t*>(&state),
                              &count) == KERN_SUCCESS) {
-          sample.pc = state.REGISTER_FIELD(ip);
-          sample.sp = state.REGISTER_FIELD(sp);
-          sample.fp = state.REGISTER_FIELD(bp);
+          sample.pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
+          sample.sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
+          sample.fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
           sampler_->SampleStack(&sample);
         }
         thread_resume(profiled_thread_);
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index 6d27304..62e6004 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -94,6 +94,24 @@
 }
 
 
+const char* OS::LocalTimezone(double time) {
+  if (isnan(time)) return "";
+  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset() {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
 // We keep the lowest and highest addresses mapped as a quick way of
 // determining that pointers are outside the heap (used mostly in assertions
 // and verification).  The estimate is conservative, ie, not all addresses in
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 41e0e64..89f4d98 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -99,15 +99,6 @@
 }
 
 
-const char* OS::LocalTimezone(double time) {
-  if (isnan(time)) return "";
-  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
-  struct tm* t = localtime(&tv);
-  if (NULL == t) return "";
-  return t->tm_zone;
-}
-
-
 double OS::DaylightSavingsOffset(double time) {
   if (isnan(time)) return nan_value();
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
@@ -117,15 +108,6 @@
 }
 
 
-double OS::LocalTimeOffset() {
-  time_t tv = time(NULL);
-  struct tm* t = localtime(&tv);
-  // tm_gmtoff includes any daylight savings offset, so subtract it.
-  return static_cast<double>(t->tm_gmtoff * msPerSecond -
-                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
 // ----------------------------------------------------------------------------
 // POSIX stdio support.
 //
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
new file mode 100644
index 0000000..85c2c54
--- /dev/null
+++ b/src/platform-solaris.cc
@@ -0,0 +1,607 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Platform specific code for Solaris 10 goes here. For the POSIX comaptible
+// parts the implementation is in platform-posix.cc.
+
+#ifdef __sparc
+# error "V8 does not support the SPARC CPU architecture."
+#endif
+
+#include <sys/stack.h>  // for stack alignment
+#include <unistd.h>  // getpagesize(), usleep()
+#include <sys/mman.h>  // mmap()
+#include <execinfo.h>  // backtrace(), backtrace_symbols()
+#include <pthread.h>
+#include <sched.h>  // for sched_yield
+#include <semaphore.h>
+#include <time.h>
+#include <sys/time.h>  // gettimeofday(), timeradd()
+#include <errno.h>
+#include <ieeefp.h>  // finite()
+#include <signal.h>  // sigemptyset(), etc
+
+
+#undef MAP_TYPE
+
+#include "v8.h"
+
+#include "platform.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// 0 is never a valid thread id on Solaris since the main thread is 1 and
+// subsequent have their ids incremented from there
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+double ceiling(double x) {
+  return ceil(x);
+}
+
+
+void OS::Setup() {
+  // Seed the random number generator.
+  // Convert the current time to a 64-bit integer first, before converting it
+  // to an unsigned. Going directly will cause an overflow and the seed to be
+  // set to all ones. The seed will be identical for different instances that
+  // call this setup code within the same millisecond.
+  uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
+  srandom(static_cast<unsigned int>(seed));
+}
+
+
+uint64_t OS::CpuFeaturesImpliedByPlatform() {
+  return 0;  // Solaris runs on a lot of things.
+}
+
+
+int OS::ActivationFrameAlignment() {
+  return STACK_ALIGN;
+}
+
+
+const char* OS::LocalTimezone(double time) {
+  if (isnan(time)) return "";
+  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return tzname[0];  // The location of the timezone string on Solaris.
+}
+
+
+double OS::LocalTimeOffset() {
+  // On Solaris, struct tm does not contain a tm_gmtoff field.
+  time_t utc = time(NULL);
+  ASSERT(utc != -1);
+  struct tm* loc = localtime(&utc);
+  ASSERT(loc != NULL);
+  return static_cast<double>((mktime(loc) - utc) * msPerSecond);
+}
+
+
+// We keep the lowest and highest addresses mapped as a quick way of
+// determining that pointers are outside the heap (used mostly in assertions
+// and verification).  The estimate is conservative, ie, not all addresses in
+// 'allocated' space are actually allocated to our heap.  The range is
+// [lowest, highest), inclusive on the low and and exclusive on the high end.
+static void* lowest_ever_allocated = reinterpret_cast<void*>(-1);
+static void* highest_ever_allocated = reinterpret_cast<void*>(0);
+
+
+static void UpdateAllocatedSpaceLimits(void* address, int size) {
+  lowest_ever_allocated = Min(lowest_ever_allocated, address);
+  highest_ever_allocated =
+      Max(highest_ever_allocated,
+          reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size));
+}
+
+
+bool OS::IsOutsideAllocatedSpace(void* address) {
+  return address < lowest_ever_allocated || address >= highest_ever_allocated;
+}
+
+
+size_t OS::AllocateAlignment() {
+  return static_cast<size_t>(getpagesize());
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, getpagesize());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+  if (mbase == MAP_FAILED) {
+    LOG(StringEvent("OS::Allocate", "mmap failed"));
+    return NULL;
+  }
+  *allocated = msize;
+  UpdateAllocatedSpaceLimits(mbase, msize);
+  return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+  // TODO(1240712): munmap has a return value which is ignored here.
+  int result = munmap(address, size);
+  USE(result);
+  ASSERT(result == 0);
+}
+
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void OS::Protect(void* address, size_t size) {
+  // TODO(1240712): mprotect has a return value which is ignored here.
+  mprotect(address, size, PROT_READ);
+}
+
+
+void OS::Unprotect(void* address, size_t size, bool is_executable) {
+  // TODO(1240712): mprotect has a return value which is ignored here.
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  mprotect(address, size, prot);
+}
+
+#endif
+
+
+void OS::Sleep(int milliseconds) {
+  useconds_t ms = static_cast<useconds_t>(milliseconds);
+  usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+  // Redirect to std abort to signal abnormal program termination.
+  abort();
+}
+
+
+void OS::DebugBreak() {
+  asm("int $3");
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) munmap(memory_, size_);
+  fclose(file_);
+}
+
+
+void OS::LogSharedLibraryAddresses() {
+}
+
+
+int OS::StackWalk(Vector<OS::StackFrame> frames) {
+  int frames_size = frames.length();
+  void** addresses = NewArray<void*>(frames_size);
+
+  int frames_count = backtrace(addresses, frames_size);
+
+  char** symbols;
+  symbols = backtrace_symbols(addresses, frames_count);
+  if (symbols == NULL) {
+    DeleteArray(addresses);
+    return kStackWalkError;
+  }
+
+  for (int i = 0; i < frames_count; i++) {
+    frames[i].address = addresses[i];
+    // Format a text representation of the frame based on the information
+    // available.
+    SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen),
+             "%s",
+             symbols[i]);
+    // Make sure line termination is in place.
+    frames[i].text[kStackWalkMaxTextLen - 1] = '\0';
+  }
+
+  DeleteArray(addresses);
+  free(symbols);
+
+  return frames_count;
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+  address_ = mmap(NULL, size, PROT_NONE,
+                  MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                  kMmapFd, kMmapFdOffset);
+  size_ = size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    if (0 == munmap(address(), size())) address_ = MAP_FAILED;
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != MAP_FAILED;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
+  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address, size, prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd, kMmapFdOffset)) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(address, size);
+  return true;
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return mmap(address, size, PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd, kMmapFdOffset) != MAP_FAILED;
+}
+
+
+class ThreadHandle::PlatformData : public Malloced {
+ public:
+  explicit PlatformData(ThreadHandle::Kind kind) {
+    Initialize(kind);
+  }
+
+  void Initialize(ThreadHandle::Kind kind) {
+    switch (kind) {
+      case ThreadHandle::SELF: thread_ = pthread_self(); break;
+      case ThreadHandle::INVALID: thread_ = kNoThread; break;
+    }
+  }
+
+  pthread_t thread_;  // Thread handle for pthread.
+};
+
+
+ThreadHandle::ThreadHandle(Kind kind) {
+  data_ = new PlatformData(kind);
+}
+
+
+void ThreadHandle::Initialize(ThreadHandle::Kind kind) {
+  data_->Initialize(kind);
+}
+
+
+ThreadHandle::~ThreadHandle() {
+  delete data_;
+}
+
+
+bool ThreadHandle::IsSelf() const {
+  return pthread_equal(data_->thread_, pthread_self());
+}
+
+
+bool ThreadHandle::IsValid() const {
+  return data_->thread_ != kNoThread;
+}
+
+
+Thread::Thread() : ThreadHandle(ThreadHandle::INVALID) {
+}
+
+
+Thread::~Thread() {
+}
+
+
+static void* ThreadEntry(void* arg) {
+  Thread* thread = reinterpret_cast<Thread*>(arg);
+  // This is also initialized by the first argument to pthread_create() but we
+  // don't know which thread will run first (the original thread or the new
+  // one) so we initialize it here too.
+  thread->thread_handle_data()->thread_ = pthread_self();
+  ASSERT(thread->IsValid());
+  thread->Run();
+  return NULL;
+}
+
+
+void Thread::Start() {
+  pthread_create(&thread_handle_data()->thread_, NULL, ThreadEntry, this);
+  ASSERT(IsValid());
+}
+
+
+void Thread::Join() {
+  pthread_join(thread_handle_data()->thread_, NULL);
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+  pthread_key_t key;
+  int result = pthread_key_create(&key, NULL);
+  USE(result);
+  ASSERT(result == 0);
+  return static_cast<LocalStorageKey>(key);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  int result = pthread_key_delete(pthread_key);
+  USE(result);
+  ASSERT(result == 0);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
+  pthread_setspecific(pthread_key, value);
+}
+
+
+void Thread::YieldCPU() {
+  sched_yield();
+}
+
+
+class SolarisMutex : public Mutex {
+ public:
+
+  SolarisMutex() {
+    pthread_mutexattr_t attr;
+    pthread_mutexattr_init(&attr);
+    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+    pthread_mutex_init(&mutex_, &attr);
+  }
+
+  ~SolarisMutex() { pthread_mutex_destroy(&mutex_); }
+
+  int Lock() { return pthread_mutex_lock(&mutex_); }
+
+  int Unlock() { return pthread_mutex_unlock(&mutex_); }
+
+ private:
+  pthread_mutex_t mutex_;
+};
+
+
+Mutex* OS::CreateMutex() {
+  return new SolarisMutex();
+}
+
+
+class SolarisSemaphore : public Semaphore {
+ public:
+  explicit SolarisSemaphore(int count) {  sem_init(&sem_, 0, count); }
+  virtual ~SolarisSemaphore() { sem_destroy(&sem_); }
+
+  virtual void Wait();
+  virtual bool Wait(int timeout);
+  virtual void Signal() { sem_post(&sem_); }
+ private:
+  sem_t sem_;
+};
+
+
+void SolarisSemaphore::Wait() {
+  while (true) {
+    int result = sem_wait(&sem_);
+    if (result == 0) return;  // Successfully got semaphore.
+    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
+  }
+}
+
+
+#ifndef TIMEVAL_TO_TIMESPEC
+#define TIMEVAL_TO_TIMESPEC(tv, ts) do {                            \
+    (ts)->tv_sec = (tv)->tv_sec;                                    \
+    (ts)->tv_nsec = (tv)->tv_usec * 1000;                           \
+} while (false)
+#endif
+
+
+#ifndef timeradd
+#define timeradd(a, b, result) \
+  do { \
+    (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \
+    (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \
+    if ((result)->tv_usec >= 1000000) { \
+      ++(result)->tv_sec; \
+      (result)->tv_usec -= 1000000; \
+    } \
+  } while (0)
+#endif
+
+
+bool SolarisSemaphore::Wait(int timeout) {
+  const long kOneSecondMicros = 1000000;  // NOLINT
+
+  // Split timeout into second and nanosecond parts.
+  struct timeval delta;
+  delta.tv_usec = timeout % kOneSecondMicros;
+  delta.tv_sec = timeout / kOneSecondMicros;
+
+  struct timeval current_time;
+  // Get the current time.
+  if (gettimeofday(&current_time, NULL) == -1) {
+    return false;
+  }
+
+  // Calculate time for end of timeout.
+  struct timeval end_time;
+  timeradd(&current_time, &delta, &end_time);
+
+  struct timespec ts;
+  TIMEVAL_TO_TIMESPEC(&end_time, &ts);
+  // Wait for semaphore signalled or timeout.
+  while (true) {
+    int result = sem_timedwait(&sem_, &ts);
+    if (result == 0) return true;  // Successfully got semaphore.
+    if (result == -1 && errno == ETIMEDOUT) return false;  // Timeout.
+    CHECK(result == -1 && errno == EINTR);  // Signal caused spurious wakeup.
+  }
+}
+
+
+Semaphore* OS::CreateSemaphore(int count) {
+  return new SolarisSemaphore(count);
+}
+
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+static Sampler* active_sampler_ = NULL;
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+  USE(info);
+  if (signal != SIGPROF) return;
+  if (active_sampler_ == NULL) return;
+
+  TickSample sample;
+  sample.pc = 0;
+  sample.sp = 0;
+  sample.fp = 0;
+
+  // We always sample the VM state.
+  sample.state = Logger::state();
+
+  active_sampler_->Tick(&sample);
+}
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+  PlatformData() {
+    signal_handler_installed_ = false;
+  }
+
+  bool signal_handler_installed_;
+  struct sigaction old_signal_handler_;
+  struct itimerval old_timer_value_;
+};
+
+
+Sampler::Sampler(int interval, bool profiling)
+    : interval_(interval), profiling_(profiling), active_(false) {
+  data_ = new PlatformData();
+}
+
+
+Sampler::~Sampler() {
+  delete data_;
+}
+
+
+void Sampler::Start() {
+  // There can only be one active sampler at the time on POSIX
+  // platforms.
+  if (active_sampler_ != NULL) return;
+
+  // Request profiling signals.
+  struct sigaction sa;
+  sa.sa_sigaction = ProfilerSignalHandler;
+  sigemptyset(&sa.sa_mask);
+  sa.sa_flags = SA_SIGINFO;
+  if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
+  data_->signal_handler_installed_ = true;
+
+  // Set the itimer to generate a tick for each interval.
+  itimerval itimer;
+  itimer.it_interval.tv_sec = interval_ / 1000;
+  itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
+  itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
+  itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
+  setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
+
+  // Set this sampler as the active sampler.
+  active_sampler_ = this;
+  active_ = true;
+}
+
+
+void Sampler::Stop() {
+  // Restore old signal handler
+  if (data_->signal_handler_installed_) {
+    setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
+    sigaction(SIGPROF, &data_->old_signal_handler_, 0);
+    data_->signal_handler_installed_ = false;
+  }
+
+  // This sampler is no longer the active sampler.
+  active_sampler_ = NULL;
+  active_ = false;
+}
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
+
+} }  // namespace v8::internal
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 1be4b77..81b0d4c 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1813,13 +1813,13 @@
         context.ContextFlags = CONTEXT_FULL;
         if (GetThreadContext(profiled_thread_, &context) != 0) {
 #if V8_HOST_ARCH_X64
-          sample.pc = context.Rip;
-          sample.sp = context.Rsp;
-          sample.fp = context.Rbp;
+          sample.pc = reinterpret_cast<Address>(context.Rip);
+          sample.sp = reinterpret_cast<Address>(context.Rsp);
+          sample.fp = reinterpret_cast<Address>(context.Rbp);
 #else
-          sample.pc = context.Eip;
-          sample.sp = context.Esp;
-          sample.fp = context.Ebp;
+          sample.pc = reinterpret_cast<Address>(context.Eip);
+          sample.sp = reinterpret_cast<Address>(context.Esp);
+          sample.fp = reinterpret_cast<Address>(context.Ebp);
 #endif
           sampler_->SampleStack(&sample);
         }
diff --git a/src/platform.h b/src/platform.h
index 75e557c..bc2e9d6 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -44,6 +44,12 @@
 #ifndef V8_PLATFORM_H_
 #define V8_PLATFORM_H_
 
+#ifdef __sun
+// On Solaris, to get isinf, INFINITY, fpclassify and other macros one needs
+// to define this symbol
+#define __C99FEATURES__ 1
+#endif
+
 #define V8_INFINITY INFINITY
 
 // Windows specific stuff.
@@ -506,11 +512,18 @@
 // TickSample captures the information collected for each sample.
 class TickSample {
  public:
-  TickSample() : pc(0), sp(0), fp(0), state(OTHER), frames_count(0) {}
-  uintptr_t pc;  // Instruction pointer.
-  uintptr_t sp;  // Stack pointer.
-  uintptr_t fp;  // Frame pointer.
-  StateTag state;   // The state of the VM.
+  TickSample()
+      : pc(NULL),
+        sp(NULL),
+        fp(NULL),
+        function(NULL),
+        state(OTHER),
+        frames_count(0) {}
+  Address pc;  // Instruction pointer.
+  Address sp;  // Stack pointer.
+  Address fp;  // Frame pointer.
+  Address function;  // The last called JS function.
+  StateTag state;  // The state of the VM.
   static const int kMaxFramesCount = 100;
   EmbeddedVector<Address, kMaxFramesCount> stack;  // Call stack.
   int frames_count;  // Number of captured frames.
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 3685fcd..0fcfc33 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -122,7 +122,10 @@
 
   bool is_ascii = subject->IsAsciiRepresentation();
 
+  // The string has been flattened, so it it is a cons string it contains the
+  // full string in the first part.
   if (StringShape(subject_ptr).IsCons()) {
+    ASSERT_EQ(0, ConsString::cast(subject_ptr)->second()->length());
     subject_ptr = ConsString::cast(subject_ptr)->first();
   }
   // Ensure that an underlying string has the same ascii-ness.
@@ -141,8 +144,7 @@
                        start_offset,
                        input_start,
                        input_end,
-                       offsets_vector,
-                       previous_index == 0);
+                       offsets_vector);
   return res;
 }
 
@@ -153,14 +155,11 @@
     int start_offset,
     const byte* input_start,
     const byte* input_end,
-    int* output,
-    bool at_start) {
+    int* output) {
   typedef int (*matcher)(String*, int, const byte*,
-                         const byte*, int*, int, Address, int);
+                         const byte*, int*, Address, int);
   matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
 
-  int at_start_val = at_start ? 1 : 0;
-
   // Ensure that the minimum stack has been allocated.
   RegExpStack stack;
   Address stack_base = RegExpStack::stack_base();
@@ -172,7 +171,6 @@
                                           input_start,
                                           input_end,
                                           output,
-                                          at_start_val,
                                           stack_base,
                                           direct_call);
   ASSERT(result <= SUCCESS);
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 2e619bd..105d8cc 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -218,8 +218,7 @@
                         int start_offset,
                         const byte* input_start,
                         const byte* input_end,
-                        int* output,
-                        bool at_start);
+                        int* output);
 };
 
 #endif  // V8_NATIVE_REGEXP
diff --git a/src/runtime.cc b/src/runtime.cc
index b6da528..51c1ba2 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -4782,7 +4782,7 @@
     return Code::cast(code);
   }
 
-  return Builtins::builtin(Builtins::JSConstructStubGeneric);
+  return shared->construct_stub();
 }
 
 
@@ -4830,6 +4830,7 @@
     CompileLazyShared(Handle<SharedFunctionInfo>(function->shared()),
                                                  CLEAR_EXCEPTION,
                                                  0);
+    LOG(FunctionCreateEvent(*function));
   }
 
   bool first_allocation = !function->has_initial_map();
@@ -7211,9 +7212,8 @@
   Handle<SharedFunctionInfo> last;
   while (!done) {
     HeapIterator iterator;
-    while (iterator.has_next()) {
-      HeapObject* obj = iterator.next();
-      ASSERT(obj != NULL);
+    for (HeapObject* obj = iterator.next();
+         obj != NULL; obj = iterator.next()) {
       if (obj->IsSharedFunctionInfo()) {
         Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(obj));
         if (shared->script() == *script) {
@@ -7669,10 +7669,10 @@
   int count = 0;
   JSObject* last = NULL;
   HeapIterator iterator;
-  while (iterator.has_next() &&
+  HeapObject* heap_obj = NULL;
+  while (((heap_obj = iterator.next()) != NULL) &&
          (max_references == 0 || count < max_references)) {
     // Only look at all JSObjects.
-    HeapObject* heap_obj = iterator.next();
     if (heap_obj->IsJSObject()) {
       // Skip context extension objects and argument arrays as these are
       // checked in the context of functions using them.
@@ -7782,10 +7782,10 @@
   // Iterate the heap.
   int count = 0;
   HeapIterator iterator;
-  while (iterator.has_next() &&
+  HeapObject* heap_obj = NULL;
+  while (((heap_obj = iterator.next()) != NULL) &&
          (max_references == 0 || count < max_references)) {
     // Only look at all JSObjects.
-    HeapObject* heap_obj = iterator.next();
     if (heap_obj->IsJSObject()) {
       JSObject* obj = JSObject::cast(heap_obj);
       if (obj->map()->constructor() == constructor) {
@@ -7933,8 +7933,8 @@
   // script data.
   Handle<Script> script;
   HeapIterator iterator;
-  while (script.is_null() && iterator.has_next()) {
-    HeapObject* obj = iterator.next();
+  HeapObject* obj = NULL;
+  while (script.is_null() && ((obj = iterator.next()) != NULL)) {
     // If a script is found check if it has the script data requested.
     if (obj->IsScript()) {
       if (Script::cast(obj)->name()->IsString()) {
diff --git a/src/runtime.js b/src/runtime.js
index ce2f197..c4c855e 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -541,7 +541,9 @@
   if (IS_STRING(x)) return new $String(x);
   if (IS_NUMBER(x)) return new $Number(x);
   if (IS_BOOLEAN(x)) return new $Boolean(x);
-  if (x == null) throw %MakeTypeError('null_to_object', []);
+  if (IS_NULL_OR_UNDEFINED(x) && !IS_UNDETECTABLE(x)) {
+    throw %MakeTypeError('null_to_object', []);
+  }
   return x;
 }
 
diff --git a/src/serialize.cc b/src/serialize.cc
index ec3a967..6b85893 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -44,67 +44,6 @@
 namespace v8 {
 namespace internal {
 
-// Mapping objects to their location after deserialization.
-// This is used during building, but not at runtime by V8.
-class SerializationAddressMapper {
- public:
-  static bool IsMapped(HeapObject* obj) {
-    EnsureMapExists();
-    return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
-  }
-
-  static int MappedTo(HeapObject* obj) {
-    ASSERT(IsMapped(obj));
-    return static_cast<int>(reinterpret_cast<intptr_t>(
-        serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
-  }
-
-  static void Map(HeapObject* obj, int to) {
-    EnsureMapExists();
-    ASSERT(!IsMapped(obj));
-    HashMap::Entry* entry =
-        serialization_map_->Lookup(Key(obj), Hash(obj), true);
-    entry->value = Value(to);
-  }
-
-  static void Zap() {
-    if (serialization_map_ != NULL) {
-      delete serialization_map_;
-    }
-    serialization_map_ = NULL;
-  }
-
- private:
-  static bool SerializationMatchFun(void* key1, void* key2) {
-    return key1 == key2;
-  }
-
-  static uint32_t Hash(HeapObject* obj) {
-    return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
-  }
-
-  static void* Key(HeapObject* obj) {
-    return reinterpret_cast<void*>(obj->address());
-  }
-
-  static void* Value(int v) {
-    return reinterpret_cast<void*>(v);
-  }
-
-  static void EnsureMapExists() {
-    if (serialization_map_ == NULL) {
-      serialization_map_ = new HashMap(&SerializationMatchFun);
-    }
-  }
-
-  static HashMap* serialization_map_;
-};
-
-
-HashMap* SerializationAddressMapper::serialization_map_ = NULL;
-
-
-
 
 // -----------------------------------------------------------------------------
 // Coding of external references.
@@ -647,10 +586,13 @@
   ASSERT_EQ(NULL, ThreadState::FirstInUse());
   // No active handles.
   ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
+  // Make sure the entire partial snapshot cache is traversed, filling it with
+  // valid object pointers.
+  partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
   ASSERT_EQ(NULL, external_reference_decoder_);
   external_reference_decoder_ = new ExternalReferenceDecoder();
-  Heap::IterateRoots(this, VISIT_ONLY_STRONG);
-  ASSERT(source_->AtEOF());
+  Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
+  Heap::IterateWeakRoots(this, VISIT_ALL);
 }
 
 
@@ -666,7 +608,8 @@
 }
 
 
-void Deserializer::TearDown() {
+Deserializer::~Deserializer() {
+  ASSERT(source_->AtEOF());
   if (external_reference_decoder_ != NULL) {
     delete external_reference_decoder_;
     external_reference_decoder_ = NULL;
@@ -891,6 +834,16 @@
         *current++ = Heap::roots_address()[root_id];
         break;
       }
+      case PARTIAL_SNAPSHOT_CACHE_ENTRY: {
+        int cache_index = source_->GetInt();
+        *current++ = partial_snapshot_cache_[cache_index];
+        break;
+      }
+      case SYNCHRONIZE: {
+        // If we get here then that indicates that you have a mismatch between
+        // the number of GC roots when serializing and deserializing.
+        UNREACHABLE();
+      }
       default:
         UNREACHABLE();
     }
@@ -944,7 +897,6 @@
     : sink_(sink),
       current_root_index_(0),
       external_reference_encoder_(NULL),
-      partial_(false),
       large_object_total_(0) {
   for (int i = 0; i <= LAST_SPACE; i++) {
     fullness_[i] = 0;
@@ -952,7 +904,7 @@
 }
 
 
-void Serializer::Serialize() {
+void StartupSerializer::SerializeStrongReferences() {
   // No active threads.
   CHECK_EQ(NULL, ThreadState::FirstInUse());
   // No active or weak handles.
@@ -966,20 +918,30 @@
     CHECK_NE(v8::INSTALLED, ext->state());
   }
   external_reference_encoder_ = new ExternalReferenceEncoder();
-  Heap::IterateRoots(this, VISIT_ONLY_STRONG);
+  Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
   delete external_reference_encoder_;
   external_reference_encoder_ = NULL;
-  SerializationAddressMapper::Zap();
 }
 
 
-void Serializer::SerializePartial(Object** object) {
-  partial_ = true;
+void PartialSerializer::Serialize(Object** object) {
   external_reference_encoder_ = new ExternalReferenceEncoder();
   this->VisitPointer(object);
+
+  // After we have done the partial serialization the partial snapshot cache
+  // will contain some references needed to decode the partial snapshot.  We
+  // fill it up with undefineds so it has a predictable length so the
+  // deserialization code doesn't need to know the length.
+  for (int index = partial_snapshot_cache_length_;
+       index < kPartialSnapshotCacheCapacity;
+       index++) {
+    partial_snapshot_cache_[index] = Heap::undefined_value();
+    startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]);
+  }
+  partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
+
   delete external_reference_encoder_;
   external_reference_encoder_ = NULL;
-  SerializationAddressMapper::Zap();
 }
 
 
@@ -998,7 +960,54 @@
 }
 
 
-int Serializer::RootIndex(HeapObject* heap_object) {
+Object* SerializerDeserializer::partial_snapshot_cache_[
+    kPartialSnapshotCacheCapacity];
+int SerializerDeserializer::partial_snapshot_cache_length_ = 0;
+
+
+// This ensures that the partial snapshot cache keeps things alive during GC and
+// tracks their movement.  When it is called during serialization of the startup
+// snapshot the partial snapshot is empty, so nothing happens.  When the partial
+// (context) snapshot is created, this array is populated with the pointers that
+// the partial snapshot will need. As that happens we emit serialized objects to
+// the startup snapshot that correspond to the elements of this cache array.  On
+// deserialization we therefore need to visit the cache array.  This fills it up
+// with pointers to deserialized objects.
+void SerializerDeserializer::Iterate(ObjectVisitor *visitor) {
+  visitor->VisitPointers(
+      &partial_snapshot_cache_[0],
+      &partial_snapshot_cache_[partial_snapshot_cache_length_]);
+}
+
+
+// When deserializing we need to set the size of the snapshot cache.  This means
+// the root iteration code (above) will iterate over array elements, writing the
+// references to deserialized objects in them.
+void SerializerDeserializer::SetSnapshotCacheSize(int size) {
+  partial_snapshot_cache_length_ = size;
+}
+
+
+int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
+  for (int i = 0; i < partial_snapshot_cache_length_; i++) {
+    Object* entry = partial_snapshot_cache_[i];
+    if (entry == heap_object) return i;
+  }
+  // We didn't find the object in the cache.  So we add it to the cache and
+  // then visit the pointer so that it becomes part of the startup snapshot
+  // and we can refer to it from the partial snapshot.
+  int length = partial_snapshot_cache_length_;
+  CHECK(length < kPartialSnapshotCacheCapacity);
+  partial_snapshot_cache_[length] = heap_object;
+  startup_serializer_->VisitPointer(&partial_snapshot_cache_[length]);
+  // We don't recurse from the startup snapshot generator into the partial
+  // snapshot generator.
+  ASSERT(length == partial_snapshot_cache_length_);
+  return partial_snapshot_cache_length_++;
+}
+
+
+int PartialSerializer::RootIndex(HeapObject* heap_object) {
   for (int i = 0; i < Heap::kRootListLength; i++) {
     Object* root = Heap::roots_address()[i];
     if (root == heap_object) return i;
@@ -1007,67 +1016,136 @@
 }
 
 
-void Serializer::SerializeObject(
+// Encode the location of an already deserialized object in order to write its
+// location into a later object.  We can encode the location as an offset from
+// the start of the deserialized objects or as an offset backwards from the
+// current allocation pointer.
+void Serializer::SerializeReferenceToPreviousObject(
+    int space,
+    int address,
+    ReferenceRepresentation reference_representation) {
+  int offset = CurrentAllocationAddress(space) - address;
+  bool from_start = true;
+  if (SpaceIsPaged(space)) {
+    // For paged space it is simple to encode back from current allocation if
+    // the object is on the same page as the current allocation pointer.
+    if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
+        (address >> kPageSizeBits)) {
+      from_start = false;
+      address = offset;
+    }
+  } else if (space == NEW_SPACE) {
+    // For new space it is always simple to encode back from current allocation.
+    if (offset < address) {
+      from_start = false;
+      address = offset;
+    }
+  }
+  // If we are actually dealing with real offsets (and not a numbering of
+  // all objects) then we should shift out the bits that are always 0.
+  if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
+  // On some architectures references between code objects are encoded
+  // specially (as relative offsets).  Such references have their own
+  // special tags to simplify the deserializer.
+  if (reference_representation == CODE_TARGET_REPRESENTATION) {
+    if (from_start) {
+      sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
+      sink_->PutInt(address, "address");
+    } else {
+      sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
+      sink_->PutInt(address, "address");
+    }
+  } else {
+    // Regular absolute references.
+    CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
+    if (from_start) {
+      // There are some common offsets that have their own specialized encoding.
+#define COMMON_REFS_CASE(tag, common_space, common_offset)               \
+      if (space == common_space && address == common_offset) {           \
+        sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer");      \
+      } else  /* NOLINT */
+      COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+#undef COMMON_REFS_CASE
+      {  /* NOLINT */
+        sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
+        sink_->PutInt(address, "address");
+      }
+    } else {
+      sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+      sink_->PutInt(address, "address");
+    }
+  }
+}
+
+
+void StartupSerializer::SerializeObject(
     Object* o,
     ReferenceRepresentation reference_representation) {
   CHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
-  if (partial_) {
-    int root_index = RootIndex(heap_object);
-    if (root_index != kInvalidRootIndex) {
-      sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
-      sink_->PutInt(root_index, "root_index");
-      return;
-    }
-    // All the symbols that the snapshot needs should be in the root table.
-    ASSERT(!heap_object->IsSymbol());
-  }
-  if (SerializationAddressMapper::IsMapped(heap_object)) {
+
+  if (address_mapper_.IsMapped(heap_object)) {
     int space = SpaceOfAlreadySerializedObject(heap_object);
-    int address = SerializationAddressMapper::MappedTo(heap_object);
-    int offset = CurrentAllocationAddress(space) - address;
-    bool from_start = true;
-    if (SpaceIsPaged(space)) {
-      if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
-          (address >> kPageSizeBits)) {
-        from_start = false;
-        address = offset;
-      }
-    } else if (space == NEW_SPACE) {
-      if (offset < address) {
-        from_start = false;
-        address = offset;
-      }
-    }
-    // If we are actually dealing with real offsets (and not a numbering of
-    // all objects) then we should shift out the bits that are always 0.
-    if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
-    if (reference_representation == CODE_TARGET_REPRESENTATION) {
-      if (from_start) {
-        sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
-        sink_->PutInt(address, "address");
-      } else {
-        sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
-        sink_->PutInt(address, "address");
-      }
-    } else {
-      CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
-      if (from_start) {
-#define COMMON_REFS_CASE(tag, common_space, common_offset)                 \
-        if (space == common_space && address == common_offset) {           \
-          sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer");      \
-        } else  /* NOLINT */
-        COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
-#undef COMMON_REFS_CASE
-        {  /* NOLINT */
-          sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
-          sink_->PutInt(address, "address");
-        }
-      } else {
-        sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
-        sink_->PutInt(address, "address");
-      }
-    }
+    int address = address_mapper_.MappedTo(heap_object);
+    SerializeReferenceToPreviousObject(space,
+                                       address,
+                                       reference_representation);
+  } else {
+    // Object has not yet been serialized.  Serialize it here.
+    ObjectSerializer object_serializer(this,
+                                       heap_object,
+                                       sink_,
+                                       reference_representation);
+    object_serializer.Serialize();
+  }
+}
+
+
+void StartupSerializer::SerializeWeakReferences() {
+  for (int i = partial_snapshot_cache_length_;
+       i < kPartialSnapshotCacheCapacity;
+       i++) {
+    sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+    sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
+  }
+  Heap::IterateWeakRoots(this, VISIT_ALL);
+}
+
+
+void PartialSerializer::SerializeObject(
+    Object* o,
+    ReferenceRepresentation reference_representation) {
+  CHECK(o->IsHeapObject());
+  HeapObject* heap_object = HeapObject::cast(o);
+
+  int root_index;
+  if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+    sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+    sink_->PutInt(root_index, "root_index");
+    return;
+  }
+
+  if (ShouldBeInThePartialSnapshotCache(heap_object)) {
+    int cache_index = PartialSnapshotCacheIndex(heap_object);
+    sink_->Put(PARTIAL_SNAPSHOT_CACHE_ENTRY, "PartialSnapshotCache");
+    sink_->PutInt(cache_index, "partial_snapshot_cache_index");
+    return;
+  }
+
+  // Pointers from the partial snapshot to the objects in the startup snapshot
+  // should go through the root array or through the partial snapshot cache.
+  // If this is not the case you may have to add something to the root array.
+  ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
+  // All the symbols that the partial snapshot needs should be either in the
+  // root table or in the partial snapshot cache.
+  ASSERT(!heap_object->IsSymbol());
+
+  if (address_mapper_.IsMapped(heap_object)) {
+    int space = SpaceOfAlreadySerializedObject(heap_object);
+    int address = address_mapper_.MappedTo(heap_object);
+    SerializeReferenceToPreviousObject(space,
+                                       address,
+                                       reference_representation);
   } else {
     // Object has not yet been serialized.  Serialize it here.
     ObjectSerializer serializer(this,
@@ -1079,7 +1157,6 @@
 }
 
 
-
 void Serializer::ObjectSerializer::Serialize() {
   int space = Serializer::SpaceOfObject(object_);
   int size = object_->Size();
@@ -1096,9 +1173,8 @@
 
   // Mark this object as already serialized.
   bool start_new_page;
-  SerializationAddressMapper::Map(
-    object_,
-    serializer_->Allocate(space, size, &start_new_page));
+  int offset = serializer_->Allocate(space, size, &start_new_page);
+  serializer_->address_mapper()->AddMapping(object_, offset);
   if (start_new_page) {
     sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage");
     sink_->PutSection(space, "NewPageSpace");
diff --git a/src/serialize.h b/src/serialize.h
index 8dd193f..ce3b006 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -147,7 +147,7 @@
     return position_ == length_;
   }
 
-  const int position() { return position_; }
+  int position() { return position_; }
 
  private:
   const byte* data_;
@@ -185,9 +185,14 @@
   f(14, 32) \
   f(15, 36)
 
-// The SerDes class is a common superclass for Serializer and Deserializer
-// which is used to store common constants and methods used by both.
-class SerDes: public ObjectVisitor {
+// The Serializer/Deserializer class is a common superclass for Serializer and
+// Deserializer which is used to store common constants and methods used by
+// both.
+class SerializerDeserializer: public ObjectVisitor {
+ public:
+  static void Iterate(ObjectVisitor* visitor);
+  static void SetSnapshotCacheSize(int size);
+
  protected:
   enum DataType {
     RAW_DATA_SERIALIZATION = 0,
@@ -202,7 +207,8 @@
     START_NEW_PAGE_SERIALIZATION = 37,
     NATIVES_STRING_RESOURCE = 38,
     ROOT_SERIALIZATION = 39,
-    // Free: 40-47.
+    PARTIAL_SNAPSHOT_CACHE_ENTRY = 40,
+    // Free: 41-47.
     BACKREF_SERIALIZATION = 48,
     // One per space, must be kSpaceMask aligned.
     // Free: 57-63.
@@ -227,17 +233,21 @@
   static inline bool SpaceIsPaged(int space) {
     return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
   }
+
+  static int partial_snapshot_cache_length_;
+  static const int kPartialSnapshotCacheCapacity = 1024;
+  static Object* partial_snapshot_cache_[];
 };
 
 
 
 // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-class Deserializer: public SerDes {
+class Deserializer: public SerializerDeserializer {
  public:
   // Create a deserializer from a snapshot byte source.
   explicit Deserializer(SnapshotByteSource* source);
 
-  virtual ~Deserializer() { }
+  virtual ~Deserializer();
 
   // Deserialize the snapshot into an empty heap.
   void Deserialize();
@@ -249,8 +259,6 @@
   virtual void Synchronize(const char* tag);
 #endif
 
-  static void TearDown();
-
  private:
   virtual void VisitPointers(Object** start, Object** end);
 
@@ -272,7 +280,7 @@
   // (In large object space we are keeping track of individual objects
   // rather than pages.)  In new space we just need the address of the
   // first object and the others will flow from that.
-  List<Address> pages_[SerDes::kNumberOfSpaces];
+  List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
 
   SnapshotByteSource* source_;
   static ExternalReferenceDecoder* external_reference_decoder_;
@@ -300,13 +308,62 @@
 };
 
 
-class Serializer : public SerDes {
+// Mapping objects to their location after deserialization.
+// This is used during building, but not at runtime by V8.
+class SerializationAddressMapper {
+ public:
+  SerializationAddressMapper()
+      : serialization_map_(new HashMap(&SerializationMatchFun)),
+        no_allocation_(new AssertNoAllocation()) { }
+
+  ~SerializationAddressMapper() {
+    delete serialization_map_;
+    delete no_allocation_;
+  }
+
+  bool IsMapped(HeapObject* obj) {
+    return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
+  }
+
+  int MappedTo(HeapObject* obj) {
+    ASSERT(IsMapped(obj));
+    return static_cast<int>(reinterpret_cast<intptr_t>(
+        serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
+  }
+
+  void AddMapping(HeapObject* obj, int to) {
+    ASSERT(!IsMapped(obj));
+    HashMap::Entry* entry =
+        serialization_map_->Lookup(Key(obj), Hash(obj), true);
+    entry->value = Value(to);
+  }
+
+ private:
+  static bool SerializationMatchFun(void* key1, void* key2) {
+    return key1 == key2;
+  }
+
+  static uint32_t Hash(HeapObject* obj) {
+    return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
+  }
+
+  static void* Key(HeapObject* obj) {
+    return reinterpret_cast<void*>(obj->address());
+  }
+
+  static void* Value(int v) {
+    return reinterpret_cast<void*>(v);
+  }
+
+  HashMap* serialization_map_;
+  AssertNoAllocation* no_allocation_;
+  DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
+};
+
+
+class Serializer : public SerializerDeserializer {
  public:
   explicit Serializer(SnapshotByteSink* sink);
-  // Serialize the current state of the heap.
-  void Serialize();
-  // Serialize a single object and the objects reachable from it.
-  void SerializePartial(Object** obj);
   void VisitPointers(Object** start, Object** end);
   // You can call this after serialization to find out how much space was used
   // in each space.
@@ -327,15 +384,20 @@
   // going on.
   static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
   static bool enabled() { return serialization_enabled_; }
+  SerializationAddressMapper* address_mapper() { return &address_mapper_; }
 #ifdef DEBUG
   virtual void Synchronize(const char* tag);
 #endif
 
- private:
+ protected:
   enum ReferenceRepresentation {
     TAGGED_REPRESENTATION,      // A tagged object reference.
     CODE_TARGET_REPRESENTATION  // A reference to first instruction in target.
   };
+  static const int kInvalidRootIndex = -1;
+  virtual int RootIndex(HeapObject* heap_object) = 0;
+  virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
+
   class ObjectSerializer : public ObjectVisitor {
    public:
     ObjectSerializer(Serializer* serializer,
@@ -371,7 +433,12 @@
     int bytes_processed_so_far_;
   };
 
-  void SerializeObject(Object* o, ReferenceRepresentation representation);
+  virtual void SerializeObject(Object* o,
+                               ReferenceRepresentation representation) = 0;
+  void SerializeReferenceToPreviousObject(
+      int space,
+      int address,
+      ReferenceRepresentation reference_representation);
   void InitializeAllocators();
   // This will return the space for an object.  If the object is in large
   // object space it may return kLargeCode or kLargeFixedArray in order
@@ -386,8 +453,6 @@
   int EncodeExternalReference(Address addr) {
     return external_reference_encoder_->Encode(addr);
   }
-  int RootIndex(HeapObject* heap_object);
-  static const int kInvalidRootIndex = -1;
 
   // Keep track of the fullness of each space in order to generate
   // relative addresses for back references.  Large objects are
@@ -397,11 +462,11 @@
   SnapshotByteSink* sink_;
   int current_root_index_;
   ExternalReferenceEncoder* external_reference_encoder_;
-  bool partial_;
   static bool serialization_enabled_;
   // Did we already make use of the fact that serialization was not enabled?
   static bool too_late_to_enable_now_;
   int large_object_total_;
+  SerializationAddressMapper address_mapper_;
 
   friend class ObjectSerializer;
   friend class Deserializer;
@@ -409,6 +474,62 @@
   DISALLOW_COPY_AND_ASSIGN(Serializer);
 };
 
+
+class PartialSerializer : public Serializer {
+ public:
+  PartialSerializer(Serializer* startup_snapshot_serializer,
+                    SnapshotByteSink* sink)
+    : Serializer(sink),
+      startup_serializer_(startup_snapshot_serializer) {
+  }
+
+  // Serialize the objects reachable from a single object pointer.
+  virtual void Serialize(Object** o);
+  virtual void SerializeObject(Object* o,
+                               ReferenceRepresentation representation);
+
+ protected:
+  virtual int RootIndex(HeapObject* o);
+  virtual int PartialSnapshotCacheIndex(HeapObject* o);
+  virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+    return o->IsString() || o->IsSharedFunctionInfo();
+  }
+
+ private:
+  Serializer* startup_serializer_;
+  DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
+};
+
+
+class StartupSerializer : public Serializer {
+ public:
+  explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
+    // Clear the cache of objects used by the partial snapshot.  After the
+    // strong roots have been serialized we can create a partial snapshot
+    // which will repopulate the cache with objects neede by that partial
+    // snapshot.
+    partial_snapshot_cache_length_ = 0;
+  }
+  // Serialize the current state of the heap.  The order is:
+  // 1) Strong references.
+  // 2) Partial snapshot cache.
+  // 3) Weak references (eg the symbol table).
+  virtual void SerializeStrongReferences();
+  virtual void SerializeObject(Object* o,
+                               ReferenceRepresentation representation);
+  void SerializeWeakReferences();
+  void Serialize() {
+    SerializeStrongReferences();
+    SerializeWeakReferences();
+  }
+
+ private:
+  virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
+  virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+    return false;
+  }
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_SERIALIZE_H_
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 448c3fd..1e81b8e 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -59,42 +59,4 @@
   return false;
 }
 
-
-class FileByteSink : public SnapshotByteSink {
- public:
-  explicit FileByteSink(const char* snapshot_file) {
-    fp_ = OS::FOpen(snapshot_file, "wb");
-    if (fp_ == NULL) {
-      PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
-      exit(1);
-    }
-  }
-  virtual ~FileByteSink() {
-    if (fp_ != NULL) {
-      fclose(fp_);
-    }
-  }
-  virtual void Put(int byte, const char* description) {
-    if (fp_ != NULL) {
-      fputc(byte, fp_);
-    }
-  }
-  virtual int Position() {
-    return ftell(fp_);
-  }
-
- private:
-  FILE* fp_;
-};
-
-
-bool Snapshot::WriteToFile(const char* snapshot_file) {
-  FileByteSink file(snapshot_file);
-  Serializer ser(&file);
-  ser.Serialize();
-  return true;
-}
-
-
-
 } }  // namespace v8::internal
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index 847bb9a..4fd8a6c 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -36,32 +36,6 @@
 
 
 // -----------------------------------------------------------------------------
-// HeapObjectIterator
-
-bool HeapObjectIterator::has_next() {
-  if (cur_addr_ < cur_limit_) {
-    return true;  // common case
-  }
-  ASSERT(cur_addr_ == cur_limit_);
-  return HasNextInNextPage();  // slow path
-}
-
-
-HeapObject* HeapObjectIterator::next() {
-  ASSERT(has_next());
-
-  HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-  int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
-  ASSERT_OBJECT_SIZE(obj_size);
-
-  cur_addr_ += obj_size;
-  ASSERT(cur_addr_ <= cur_limit_);
-
-  return obj;
-}
-
-
-// -----------------------------------------------------------------------------
 // PageIterator
 
 bool PageIterator::has_next() {
diff --git a/src/spaces.cc b/src/spaces.cc
index cd09398..2850900 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -82,8 +82,8 @@
 }
 
 
-bool HeapObjectIterator::HasNextInNextPage() {
-  if (cur_addr_ == end_addr_) return false;
+HeapObject* HeapObjectIterator::FromNextPage() {
+  if (cur_addr_ == end_addr_) return NULL;
 
   Page* cur_page = Page::FromAllocationTop(cur_addr_);
   cur_page = cur_page->next_page();
@@ -92,12 +92,12 @@
   cur_addr_ = cur_page->ObjectAreaStart();
   cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
 
-  if (cur_addr_ == end_addr_) return false;
+  if (cur_addr_ == end_addr_) return NULL;
   ASSERT(cur_addr_ < cur_limit_);
 #ifdef DEBUG
   Verify();
 #endif
-  return true;
+  return FromCurrentPage();
 }
 
 
@@ -1437,7 +1437,8 @@
 void NewSpace::CollectStatistics() {
   ClearHistograms();
   SemiSpaceIterator it(this);
-  while (it.has_next()) RecordAllocation(it.next());
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
+    RecordAllocation(obj);
 }
 
 
@@ -2054,8 +2055,7 @@
 // - by code comment
 void PagedSpace::CollectCodeStatistics() {
   HeapObjectIterator obj_it(this);
-  while (obj_it.has_next()) {
-    HeapObject* obj = obj_it.next();
+  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       code_kind_statistics[code->kind()] += code->Size();
@@ -2157,7 +2157,8 @@
 
   ClearHistograms();
   HeapObjectIterator obj_it(this);
-  while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
+  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+    CollectHistogramInfo(obj);
   ReportHistogram(true);
 }
 
@@ -2393,7 +2394,8 @@
 
   ClearHistograms();
   HeapObjectIterator obj_it(this);
-  while (obj_it.has_next()) { CollectHistogramInfo(obj_it.next()); }
+  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
+    CollectHistogramInfo(obj);
   ReportHistogram(false);
 }
 
@@ -2462,7 +2464,8 @@
 
 
 HeapObject* LargeObjectIterator::next() {
-  ASSERT(has_next());
+  if (current_ == NULL) return NULL;
+
   HeapObject* object = current_->GetObject();
   current_ = current_->next();
   return object;
@@ -2639,8 +2642,7 @@
   ASSERT(Page::is_rset_in_use());
 
   LargeObjectIterator it(this);
-  while (it.has_next()) {
-    HeapObject* object = it.next();
+  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
     // We only have code, sequential strings, or fixed arrays in large
     // object space, and only fixed arrays need remembered set support.
     if (object->IsFixedArray()) {
@@ -2668,11 +2670,10 @@
       30);
 
   LargeObjectIterator it(this);
-  while (it.has_next()) {
+  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
     // We only have code, sequential strings, or fixed arrays in large
     // object space, and only fixed arrays can possibly contain pointers to
     // the young generation.
-    HeapObject* object = it.next();
     if (object->IsFixedArray()) {
       // Iterate the normal page remembered set range.
       Page* page = Page::FromAddress(object->address());
@@ -2718,9 +2719,7 @@
       }
 
       // Free the chunk.
-      if (object->IsCode()) {
-        LOG(CodeDeleteEvent(object->address()));
-      }
+      MarkCompactCollector::ReportDeleteIfNeeded(object);
       size_ -= static_cast<int>(chunk_size);
       page_count_--;
       MemoryAllocator::FreeRawMemory(chunk_address, chunk_size);
@@ -2800,8 +2799,8 @@
 
 void LargeObjectSpace::Print() {
   LargeObjectIterator it(this);
-  while (it.has_next()) {
-    it.next()->Print();
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+    obj->Print();
   }
 }
 
@@ -2811,9 +2810,9 @@
   int num_objects = 0;
   ClearHistograms();
   LargeObjectIterator it(this);
-  while (it.has_next()) {
+  for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
     num_objects++;
-    CollectHistogramInfo(it.next());
+    CollectHistogramInfo(obj);
   }
 
   PrintF("  number of objects %d\n", num_objects);
@@ -2823,8 +2822,7 @@
 
 void LargeObjectSpace::CollectCodeStatistics() {
   LargeObjectIterator obj_it(this);
-  while (obj_it.has_next()) {
-    HeapObject* obj = obj_it.next();
+  for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       code_kind_statistics[code->kind()] += code->Size();
@@ -2835,8 +2833,7 @@
 
 void LargeObjectSpace::PrintRSet() {
   LargeObjectIterator it(this);
-  while (it.has_next()) {
-    HeapObject* object = it.next();
+  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
     if (object->IsFixedArray()) {
       Page* page = Page::FromAddress(object->address());
 
diff --git a/src/spaces.h b/src/spaces.h
index 4786fb4..37117f9 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -597,15 +597,14 @@
 // Interface for heap object iterator to be implemented by all object space
 // object iterators.
 //
-// NOTE: The space specific object iterators also implements the own has_next()
-//       and next() methods which are used to avoid using virtual functions
+// NOTE: The space specific object iterators also implements the own next()
+//       method which is used to avoid using virtual functions
 //       iterating a specific space.
 
 class ObjectIterator : public Malloced {
  public:
   virtual ~ObjectIterator() { }
 
-  virtual bool has_next_object() = 0;
   virtual HeapObject* next_object() = 0;
 };
 
@@ -645,11 +644,11 @@
                      Address start,
                      HeapObjectCallback size_func);
 
-  inline bool has_next();
-  inline HeapObject* next();
+  inline HeapObject* next() {
+    return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
+  }
 
   // implementation of ObjectIterator.
-  virtual bool has_next_object() { return has_next(); }
   virtual HeapObject* next_object() { return next(); }
 
  private:
@@ -659,9 +658,21 @@
   HeapObjectCallback size_func_;  // size function
   Page* end_page_;  // caches the page of the end address
 
-  // Slow path of has_next, checks whether there are more objects in
-  // the next page.
-  bool HasNextInNextPage();
+  HeapObject* FromCurrentPage() {
+    ASSERT(cur_addr_ < cur_limit_);
+
+    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+    ASSERT_OBJECT_SIZE(obj_size);
+
+    cur_addr_ += obj_size;
+    ASSERT(cur_addr_ <= cur_limit_);
+
+    return obj;
+  }
+
+  // Slow path of next, goes into the next page.
+  HeapObject* FromNextPage();
 
   // Initializes fields.
   void Initialize(Address start, Address end, HeapObjectCallback size_func);
@@ -982,6 +993,18 @@
     return Page::FromAllocationTop(alloc_info.limit);
   }
 
+  int CountPagesToTop() {
+    Page* p = Page::FromAllocationTop(allocation_info_.top);
+    PageIterator it(this, PageIterator::ALL_PAGES);
+    int counter = 1;
+    while (it.has_next()) {
+      if (it.next() == p) return counter;
+      counter++;
+    }
+    UNREACHABLE();
+    return -1;
+  }
+
   // Expands the space by allocating a fixed number of pages. Returns false if
   // it cannot allocate requested number of pages from OS. Newly allocated
   // pages are append to the last_page;
@@ -1194,10 +1217,8 @@
   SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
   SemiSpaceIterator(NewSpace* space, Address start);
 
-  bool has_next() {return current_ < limit_; }
-
   HeapObject* next() {
-    ASSERT(has_next());
+    if (current_ == limit_) return NULL;
 
     HeapObject* object = HeapObject::FromAddress(current_);
     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
@@ -1207,7 +1228,6 @@
   }
 
   // Implementation of the ObjectIterator functions.
-  virtual bool has_next_object() { return has_next(); }
   virtual HeapObject* next_object() { return next(); }
 
  private:
@@ -1753,8 +1773,11 @@
 class MapSpace : public FixedSpace {
  public:
   // Creates a map space object with a maximum capacity.
-  MapSpace(int max_capacity, AllocationSpace id)
-      : FixedSpace(max_capacity, id, Map::kSize, "map") {}
+  MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
+      : FixedSpace(max_capacity, id, Map::kSize, "map"),
+        max_map_space_pages_(max_map_space_pages) {
+    ASSERT(max_map_space_pages < kMaxMapPageIndex);
+  }
 
   // Prepares for a mark-compact GC.
   virtual void PrepareForMarkCompact(bool will_compact);
@@ -1762,24 +1785,21 @@
   // Given an index, returns the page address.
   Address PageAddress(int page_index) { return page_addresses_[page_index]; }
 
-  // Constants.
-  static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
+  static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
 
   // Are map pointers encodable into map word?
   bool MapPointersEncodable() {
     if (!FLAG_use_big_map_space) {
-      ASSERT(CountTotalPages() <= kMaxMapPageIndex);
+      ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
       return true;
     }
-    int n_of_pages = Capacity() / Page::kObjectAreaSize;
-    ASSERT(n_of_pages == CountTotalPages());
-    return n_of_pages <= kMaxMapPageIndex;
+    return CountPagesToTop() <= max_map_space_pages_;
   }
 
   // Should be called after forced sweep to find out if map space needs
   // compaction.
   bool NeedsCompaction(int live_maps) {
-    return !MapPointersEncodable() && live_maps <= kCompactionThreshold;
+    return !MapPointersEncodable() && live_maps <= CompactionThreshold();
   }
 
   Address TopAfterCompaction(int live_maps) {
@@ -1817,7 +1837,7 @@
 
 #ifdef DEBUG
     if (FLAG_enable_slow_asserts) {
-      int actual_size = 0;
+      intptr_t actual_size = 0;
       for (Page* p = first_page_; p != top_page; p = p->next_page())
         actual_size += kMapsPerPage * Map::kSize;
       actual_size += (new_top - top_page->ObjectAreaStart());
@@ -1838,10 +1858,14 @@
   static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
 
   // Do map space compaction if there is a page gap.
-  static const int kCompactionThreshold = kMapsPerPage * (kMaxMapPageIndex - 1);
+  int CompactionThreshold() {
+    return kMapsPerPage * (max_map_space_pages_ - 1);
+  }
+
+  const int max_map_space_pages_;
 
   // An array of page start address in a map space.
-  Address page_addresses_[kMaxMapPageIndex + 1];
+  Address page_addresses_[kMaxMapPageIndex];
 
  public:
   TRACK_MEMORY("MapSpace")
@@ -2036,11 +2060,9 @@
   explicit LargeObjectIterator(LargeObjectSpace* space);
   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
 
-  bool has_next() { return current_ != NULL; }
   HeapObject* next();
 
   // implementation of ObjectIterator.
-  virtual bool has_next_object() { return has_next(); }
   virtual HeapObject* next_object() { return next(); }
 
  private:
diff --git a/src/v8-counters.h b/src/v8-counters.h
index fb1e926..7397c30 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -60,40 +60,44 @@
 // lines) rather than one macro (of length about 80 lines) to work around
 // this problem.  Please avoid using recursive macros of this length when
 // possible.
-#define STATS_COUNTER_LIST_1(SC)                                 \
-  /* Global Handle Count*/                                       \
-  SC(global_handles, V8.GlobalHandles)                           \
-  /* Mallocs from PCRE */                                        \
-  SC(pcre_mallocs, V8.PcreMallocCount)                           \
-  /* OS Memory allocated */                                      \
-  SC(memory_allocated, V8.OsMemoryAllocated)                     \
-  SC(props_to_dictionary, V8.ObjectPropertiesToDictionary)       \
-  SC(elements_to_dictionary, V8.ObjectElementsToDictionary)      \
-  SC(alive_after_last_gc, V8.AliveAfterLastGC)                   \
-  SC(objs_since_last_young, V8.ObjsSinceLastYoung)               \
-  SC(objs_since_last_full, V8.ObjsSinceLastFull)                 \
-  SC(symbol_table_capacity, V8.SymbolTableCapacity)              \
-  SC(number_of_symbols, V8.NumberOfSymbols)                      \
-  SC(script_wrappers, V8.ScriptWrappers)                         \
-  SC(call_initialize_stubs, V8.CallInitializeStubs)              \
-  SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs)      \
-  SC(call_normal_stubs, V8.CallNormalStubs)                      \
-  SC(call_megamorphic_stubs, V8.CallMegamorphicStubs)            \
-  SC(arguments_adaptors, V8.ArgumentsAdaptors)                   \
-  SC(compilation_cache_hits, V8.CompilationCacheHits)            \
-  SC(compilation_cache_misses, V8.CompilationCacheMisses)        \
-  SC(regexp_cache_hits, V8.RegExpCacheHits)                      \
-  SC(regexp_cache_misses, V8.RegExpCacheMisses)                  \
-  /* Amount of evaled source code. */                            \
-  SC(total_eval_size, V8.TotalEvalSize)                          \
-  /* Amount of loaded source code. */                            \
-  SC(total_load_size, V8.TotalLoadSize)                          \
-  /* Amount of parsed source code. */                            \
-  SC(total_parse_size, V8.TotalParseSize)                        \
-  /* Amount of source code skipped over using preparsing. */     \
-  SC(total_preparse_skipped, V8.TotalPreparseSkipped)            \
-  /* Amount of compiled source code. */                          \
-  SC(total_compile_size, V8.TotalCompileSize)
+#define STATS_COUNTER_LIST_1(SC)                                      \
+  /* Global Handle Count*/                                            \
+  SC(global_handles, V8.GlobalHandles)                                \
+  /* Mallocs from PCRE */                                             \
+  SC(pcre_mallocs, V8.PcreMallocCount)                                \
+  /* OS Memory allocated */                                           \
+  SC(memory_allocated, V8.OsMemoryAllocated)                          \
+  SC(props_to_dictionary, V8.ObjectPropertiesToDictionary)            \
+  SC(elements_to_dictionary, V8.ObjectElementsToDictionary)           \
+  SC(alive_after_last_gc, V8.AliveAfterLastGC)                        \
+  SC(objs_since_last_young, V8.ObjsSinceLastYoung)                    \
+  SC(objs_since_last_full, V8.ObjsSinceLastFull)                      \
+  SC(symbol_table_capacity, V8.SymbolTableCapacity)                   \
+  SC(number_of_symbols, V8.NumberOfSymbols)                           \
+  SC(script_wrappers, V8.ScriptWrappers)                              \
+  SC(call_initialize_stubs, V8.CallInitializeStubs)                   \
+  SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs)           \
+  SC(call_normal_stubs, V8.CallNormalStubs)                           \
+  SC(call_megamorphic_stubs, V8.CallMegamorphicStubs)                 \
+  SC(arguments_adaptors, V8.ArgumentsAdaptors)                        \
+  SC(compilation_cache_hits, V8.CompilationCacheHits)                 \
+  SC(compilation_cache_misses, V8.CompilationCacheMisses)             \
+  SC(regexp_cache_hits, V8.RegExpCacheHits)                           \
+  SC(regexp_cache_misses, V8.RegExpCacheMisses)                       \
+  /* Amount of evaled source code. */                                 \
+  SC(total_eval_size, V8.TotalEvalSize)                               \
+  /* Amount of loaded source code. */                                 \
+  SC(total_load_size, V8.TotalLoadSize)                               \
+  /* Amount of parsed source code. */                                 \
+  SC(total_parse_size, V8.TotalParseSize)                             \
+  /* Amount of source code skipped over using preparsing. */          \
+  SC(total_preparse_skipped, V8.TotalPreparseSkipped)                 \
+  /* Amount of compiled source code. */                               \
+  SC(total_compile_size, V8.TotalCompileSize)                         \
+  /* Amount of source code compiled with the old codegen. */          \
+  SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize)     \
+  /* Amount of source code compiled with the full codegen. */         \
+  SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize)
 
 
 #define STATS_COUNTER_LIST_2(SC)                                    \
diff --git a/src/v8.cc b/src/v8.cc
index db570a4..3bec827 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -146,7 +146,6 @@
 
   Heap::TearDown();
   Logger::TearDown();
-  Deserializer::TearDown();
 
   is_running_ = false;
   has_been_disposed_ = true;
diff --git a/src/v8natives.js b/src/v8natives.js
index 3dcf430..7475065 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -197,7 +197,7 @@
 
 // ECMA-262 - 15.2.4.2
 function ObjectToString() {
-  return "[object " + %_ClassOf(this) + "]";
+  return "[object " + %_ClassOf(ToObject(this)) + "]";
 }
 
 
@@ -209,7 +209,7 @@
 
 // ECMA-262 - 15.2.4.4
 function ObjectValueOf() {
-  return this;
+  return ToObject(this);
 }
 
 
diff --git a/src/version.cc b/src/version.cc
index 2724f6e..ab2eab3 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,8 +33,8 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
-#define MINOR_VERSION     0
-#define BUILD_NUMBER      7
+#define MINOR_VERSION     1
+#define BUILD_NUMBER      0
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION true
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 4ac3933..9cfe98a 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1537,6 +1537,40 @@
 }
 
 
+void Assembler::repmovsb() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF3);
+  emit(0xA4);
+}
+
+
+void Assembler::repmovsw() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);  // Operand size override.
+  emit(0xF3);
+  emit(0xA4);
+}
+
+
+void Assembler::repmovsl() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF3);
+  emit(0xA5);
+}
+
+
+void Assembler::repmovsq() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF3);
+  emit_rex_64();
+  emit(0xA5);
+}
+
+
 void Assembler::mul(Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2079,6 +2113,16 @@
 }
 
 
+void Assembler::fisttp_d(const Operand& adr) {
+  ASSERT(CpuFeatures::IsEnabled(SSE3));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(adr);
+  emit(0xDD);
+  emit_operand(1, adr);
+}
+
+
 void Assembler::fist_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 1bddb2f..5d17edf 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -574,6 +574,13 @@
   void movzxwq(Register dst, const Operand& src);
   void movzxwl(Register dst, const Operand& src);
 
+  // Repeated moves.
+
+  void repmovsb();
+  void repmovsw();
+  void repmovsl();
+  void repmovsq();
+
   // New x64 instruction to load from an immediate 64-bit pointer into RAX.
   void load_rax(void* ptr, RelocInfo::Mode rmode);
   void load_rax(ExternalReference ext);
@@ -1052,6 +1059,7 @@
   void fistp_d(const Operand& adr);
 
   void fisttp_s(const Operand& adr);
+  void fisttp_d(const Operand& adr);
 
   void fabs();
   void fchs();
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 0cf68eb..1a0138f 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -224,20 +224,17 @@
                                 Register lhs,
                                 Register rhs);
 
-  // Code pattern for loading a floating point value and converting it
-  // to a 32 bit integer. Input value must be either a smi or a heap number
-  // object.
-  // Returns operands as 32-bit sign extended integers in a general purpose
-  // registers.
-  static void LoadInt32Operand(MacroAssembler* masm,
-                               const Operand& src,
-                               Register dst);
-
   // Test if operands are smi or number objects (fp). Requirements:
   // operand_1 in rax, operand_2 in rdx; falls through on float or smi
   // operands, jumps to the non_float label otherwise.
   static void CheckNumberOperands(MacroAssembler* masm,
                                   Label* non_float);
+
+  // Takes the operands in rdx and rax and loads them as integers in rax
+  // and rcx.
+  static void LoadAsIntegers(MacroAssembler* masm,
+                             bool use_sse3,
+                             Label* operand_conversion_failure);
 };
 
 
@@ -654,20 +651,29 @@
 }
 
 
-void CodeGenerator::CallApplyLazy(Property* apply,
+void CodeGenerator::CallApplyLazy(Expression* applicand,
                                   Expression* receiver,
                                   VariableProxy* arguments,
                                   int position) {
+  // An optimized implementation of expressions of the form
+  // x.apply(y, arguments).
+  // If the arguments object of the scope has not been allocated,
+  // and x.apply is Function.prototype.apply, this optimization
+  // just copies y and the arguments of the current function on the
+  // stack, as receiver and arguments, and calls x.
+  // In the implementation comments, we call x the applicand
+  // and y the receiver.
   ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
   ASSERT(arguments->IsArguments());
 
-  JumpTarget slow, done;
-
-  // Load the apply function onto the stack. This will usually
+  // Load applicand.apply onto the stack. This will usually
   // give us a megamorphic load site. Not super, but it works.
-  Reference ref(this, apply);
-  ref.GetValue();
-  ASSERT(ref.type() == Reference::NAMED);
+  Load(applicand);
+  Handle<String> name = Factory::LookupAsciiSymbol("apply");
+  frame()->Push(name);
+  Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
+  __ nop();
+  frame()->Push(&answer);
 
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
@@ -677,6 +683,11 @@
   // Emit the source position information after having loaded the
   // receiver and the arguments.
   CodeForSourcePosition(position);
+  // Contents of frame at this point:
+  // Frame[0]: arguments object of the current function or the hole.
+  // Frame[1]: receiver
+  // Frame[2]: applicand.apply
+  // Frame[3]: applicand.
 
   // Check if the arguments object has been lazily allocated
   // already. If so, just use that instead of copying the arguments
@@ -684,143 +695,149 @@
   // named 'arguments' has been introduced.
   frame_->Dup();
   Result probe = frame_->Pop();
-  bool try_lazy = true;
-  if (probe.is_constant()) {
-    try_lazy = probe.handle()->IsTheHole();
-  } else {
-    __ Cmp(probe.reg(), Factory::the_hole_value());
-    probe.Unuse();
-    slow.Branch(not_equal);
-  }
+  { VirtualFrame::SpilledScope spilled_scope;
+    Label slow, done;
+    bool try_lazy = true;
+    if (probe.is_constant()) {
+      try_lazy = probe.handle()->IsTheHole();
+    } else {
+      __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+      probe.Unuse();
+      __ j(not_equal, &slow);
+    }
 
-  if (try_lazy) {
-    JumpTarget build_args;
+    if (try_lazy) {
+      Label build_args;
+      // Get rid of the arguments object probe.
+      frame_->Drop();  // Can be called on a spilled frame.
+      // Stack now has 3 elements on it.
+      // Contents of stack at this point:
+      // rsp[0]: receiver
+      // rsp[1]: applicand.apply
+      // rsp[2]: applicand.
 
-    // Get rid of the arguments object probe.
-    frame_->Drop();
-
-    // Before messing with the execution stack, we sync all
-    // elements. This is bound to happen anyway because we're
-    // about to call a function.
-    frame_->SyncRange(0, frame_->element_count() - 1);
-
-    // Check that the receiver really is a JavaScript object.
-    {
-      frame_->PushElementAt(0);
-      Result receiver = frame_->Pop();
-      receiver.ToRegister();
-      Condition is_smi = masm_->CheckSmi(receiver.reg());
-      build_args.Branch(is_smi);
+      // Check that the receiver really is a JavaScript object.
+      __ movq(rax, Operand(rsp, 0));
+      Condition is_smi = masm_->CheckSmi(rax);
+      __ j(is_smi, &build_args);
       // We allow all JSObjects including JSFunctions.  As long as
       // JS_FUNCTION_TYPE is the last instance type and it is right
       // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
       // bound.
       ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
       ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
-      __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
-      build_args.Branch(below);
-    }
+      __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
+      __ j(below, &build_args);
 
-    // Verify that we're invoking Function.prototype.apply.
-    {
-      frame_->PushElementAt(1);
-      Result apply = frame_->Pop();
-      apply.ToRegister();
-      Condition is_smi = masm_->CheckSmi(apply.reg());
-      build_args.Branch(is_smi);
-      Result tmp = allocator_->Allocate();
-      __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
-      build_args.Branch(not_equal);
-      __ movq(tmp.reg(),
-              FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
+      // Check that applicand.apply is Function.prototype.apply.
+      __ movq(rax, Operand(rsp, kPointerSize));
+      is_smi = masm_->CheckSmi(rax);
+      __ j(is_smi, &build_args);
+      __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
+      __ j(not_equal, &build_args);
+      __ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
       Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
-      __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
-             apply_code);
-      build_args.Branch(not_equal);
+      __ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
+      __ j(not_equal, &build_args);
+
+      // Check that applicand is a function.
+      __ movq(rdi, Operand(rsp, 2 * kPointerSize));
+      is_smi = masm_->CheckSmi(rdi);
+      __ j(is_smi, &build_args);
+      __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+      __ j(not_equal, &build_args);
+
+      // Copy the arguments to this function possibly from the
+      // adaptor frame below it.
+      Label invoke, adapted;
+      __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+      __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+                    Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+      __ j(equal, &adapted);
+
+      // No arguments adaptor frame. Copy fixed number of arguments.
+      __ movq(rax, Immediate(scope_->num_parameters()));
+      for (int i = 0; i < scope_->num_parameters(); i++) {
+        __ push(frame_->ParameterAt(i));
+      }
+      __ jmp(&invoke);
+
+      // Arguments adaptor frame present. Copy arguments from there, but
+      // avoid copying too many arguments to avoid stack overflows.
+      __ bind(&adapted);
+      static const uint32_t kArgumentsLimit = 1 * KB;
+      __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+      __ SmiToInteger32(rax, rax);
+      __ movq(rcx, rax);
+      __ cmpq(rax, Immediate(kArgumentsLimit));
+      __ j(above, &build_args);
+
+      // Loop through the arguments pushing them onto the execution
+      // stack. We don't inform the virtual frame of the push, so we don't
+      // have to worry about getting rid of the elements from the virtual
+      // frame.
+      Label loop;
+      // rcx is a small non-negative integer, due to the test above.
+      __ testl(rcx, rcx);
+      __ j(zero, &invoke);
+      __ bind(&loop);
+      __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
+      __ decl(rcx);
+      __ j(not_zero, &loop);
+
+      // Invoke the function.
+      __ bind(&invoke);
+      ParameterCount actual(rax);
+      __ InvokeFunction(rdi, actual, CALL_FUNCTION);
+      // Drop applicand.apply and applicand from the stack, and push
+      // the result of the function call, but leave the spilled frame
+      // unchanged, with 3 elements, so it is correct when we compile the
+      // slow-case code.
+      __ addq(rsp, Immediate(2 * kPointerSize));
+      __ push(rax);
+      // Stack now has 1 element:
+      //   rsp[0]: result
+      __ jmp(&done);
+
+      // Slow-case: Allocate the arguments object since we know it isn't
+      // there, and fall-through to the slow-case where we call
+      // applicand.apply.
+      __ bind(&build_args);
+      // Stack now has 3 elements, because we have jumped from where:
+      // rsp[0]: receiver
+      // rsp[1]: applicand.apply
+      // rsp[2]: applicand.
+
+      // StoreArgumentsObject requires a correct frame, and may modify it.
+      Result arguments_object = StoreArgumentsObject(false);
+      frame_->SpillAll();
+      arguments_object.ToRegister();
+      frame_->EmitPush(arguments_object.reg());
+      arguments_object.Unuse();
+      // Stack and frame now have 4 elements.
+      __ bind(&slow);
     }
 
-    // Get the function receiver from the stack. Check that it
-    // really is a function.
-    __ movq(rdi, Operand(rsp, 2 * kPointerSize));
-    Condition is_smi = masm_->CheckSmi(rdi);
-    build_args.Branch(is_smi);
-    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-    build_args.Branch(not_equal);
+    // Generic computation of x.apply(y, args) with no special optimization.
+    // Flip applicand.apply and applicand on the stack, so
+    // applicand looks like the receiver of the applicand.apply call.
+    // Then process it as a normal function call.
+    __ movq(rax, Operand(rsp, 3 * kPointerSize));
+    __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+    __ movq(Operand(rsp, 2 * kPointerSize), rax);
+    __ movq(Operand(rsp, 3 * kPointerSize), rbx);
 
-    // Copy the arguments to this function possibly from the
-    // adaptor frame below it.
-    Label invoke, adapted;
-    __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-    __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
-                  Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-    __ j(equal, &adapted);
-
-    // No arguments adaptor frame. Copy fixed number of arguments.
-    __ movq(rax, Immediate(scope_->num_parameters()));
-    for (int i = 0; i < scope_->num_parameters(); i++) {
-      __ push(frame_->ParameterAt(i));
-    }
-    __ jmp(&invoke);
-
-    // Arguments adaptor frame present. Copy arguments from there, but
-    // avoid copying too many arguments to avoid stack overflows.
-    __ bind(&adapted);
-    static const uint32_t kArgumentsLimit = 1 * KB;
-    __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ SmiToInteger32(rax, rax);
-    __ movq(rcx, rax);
-    __ cmpq(rax, Immediate(kArgumentsLimit));
-    build_args.Branch(above);
-
-    // Loop through the arguments pushing them onto the execution
-    // stack. We don't inform the virtual frame of the push, so we don't
-    // have to worry about getting rid of the elements from the virtual
-    // frame.
-    Label loop;
-    __ testl(rcx, rcx);
-    __ j(zero, &invoke);
-    __ bind(&loop);
-    __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
-    __ decl(rcx);
-    __ j(not_zero, &loop);
-
-    // Invoke the function. The virtual frame knows about the receiver
-    // so make sure to forget that explicitly.
-    __ bind(&invoke);
-    ParameterCount actual(rax);
-    __ InvokeFunction(rdi, actual, CALL_FUNCTION);
-    frame_->Forget(1);
-    Result result = allocator()->Allocate(rax);
-    frame_->SetElementAt(0, &result);
-    done.Jump();
-
-    // Slow-case: Allocate the arguments object since we know it isn't
-    // there, and fall-through to the slow-case where we call
-    // Function.prototype.apply.
-    build_args.Bind();
-    Result arguments_object = StoreArgumentsObject(false);
-    frame_->Push(&arguments_object);
-    slow.Bind();
-  }
-
-  // Flip the apply function and the function to call on the stack, so
-  // the function looks like the receiver of the apply call. This way,
-  // the generic Function.prototype.apply implementation can deal with
-  // the call like it usually does.
-  Result a2 = frame_->Pop();
-  Result a1 = frame_->Pop();
-  Result ap = frame_->Pop();
-  Result fn = frame_->Pop();
-  frame_->Push(&ap);
-  frame_->Push(&fn);
-  frame_->Push(&a1);
-  frame_->Push(&a2);
-  CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
-  Result res = frame_->CallStub(&call_function, 3);
-  frame_->Push(&res);
-
-  // All done. Restore context register after call.
-  if (try_lazy) done.Bind();
+    CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
+    Result res = frame_->CallStub(&call_function, 3);
+    // The function and its two arguments have been dropped.
+    frame_->Drop(1);  // Drop the receiver as well.
+    res.ToRegister();
+    frame_->EmitPush(res.reg());
+    // Stack now has 1 element:
+    //   rsp[0]: result
+    if (try_lazy) __ bind(&done);
+  }  // End of spilled scope.
+  // Restore the context register after a call.
   frame_->RestoreContextRegister();
 }
 
@@ -1817,28 +1834,20 @@
     if (!each.is_illegal()) {
       if (each.size() > 0) {
         frame_->EmitPush(frame_->ElementAt(each.size()));
-      }
-      // If the reference was to a slot we rely on the convenient property
-      // that it doesn't matter whether a value (eg, ebx pushed above) is
-      // right on top of or right underneath a zero-sized reference.
-      each.SetValue(NOT_CONST_INIT);
-      if (each.size() > 0) {
-        // It's safe to pop the value lying on top of the reference before
-        // unloading the reference itself (which preserves the top of stack,
-        // ie, now the topmost value of the non-zero sized reference), since
-        // we will discard the top of stack after unloading the reference
-        // anyway.
-        frame_->Drop();
+        each.SetValue(NOT_CONST_INIT);
+        frame_->Drop(2);  // Drop the original and the copy of the element.
+      } else {
+        // If the reference has size zero then we can use the value below
+        // the reference as if it were above the reference, instead of pushing
+        // a new copy of it above the reference.
+        each.SetValue(NOT_CONST_INIT);
+        frame_->Drop();  // Drop the original of the element.
       }
     }
   }
   // Unloading a reference may leave the frame in an unspilled state.
   frame_->SpillAll();
 
-  // Discard the i'th entry pushed above or else the remainder of the
-  // reference, whichever is currently on top of the stack.
-  frame_->Drop();
-
   // Body.
   CheckStack();  // TODO(1222600): ignore if body contains calls.
   VisitAndSpill(node->body());
@@ -2549,7 +2558,7 @@
 void CodeGenerator::VisitAssignment(Assignment* node) {
   Comment cmnt(masm_, "[ Assignment");
 
-  { Reference target(this, node->target());
+  { Reference target(this, node->target(), node->is_compound());
     if (target.is_illegal()) {
       // Fool the virtual frame into thinking that we left the assignment's
       // value on the frame.
@@ -2571,12 +2580,27 @@
       frame_->PushElementAt(target.size() - 1);
       Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
     }
+    if (node->ends_initialization_block()) {
+      // Add an extra copy of the receiver to the frame, so that it can be
+      // converted back to fast case after the assignment.
+      ASSERT(target.type() == Reference::NAMED ||
+             target.type() == Reference::KEYED);
+      if (target.type() == Reference::NAMED) {
+        frame_->Dup();
+        // Dup target receiver on stack.
+      } else {
+        ASSERT(target.type() == Reference::KEYED);
+        Result temp = frame_->Pop();
+        frame_->Dup();
+        frame_->Push(&temp);
+      }
+    }
     if (node->op() == Token::ASSIGN ||
         node->op() == Token::INIT_VAR ||
         node->op() == Token::INIT_CONST) {
       Load(node->value());
 
-    } else {
+    } else {  // Assignment is a compound assignment.
       Literal* literal = node->value()->AsLiteral();
       bool overwrite_value =
           (node->value()->AsBinaryOperation() != NULL &&
@@ -2602,6 +2626,7 @@
         var->mode() == Variable::CONST &&
         node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
       // Assignment ignored - leave the value on the stack.
+      UnloadReference(&target);
     } else {
       CodeForSourcePosition(node->position());
       if (node->op() == Token::INIT_CONST) {
@@ -2613,13 +2638,15 @@
         target.SetValue(NOT_CONST_INIT);
       }
       if (node->ends_initialization_block()) {
-        ASSERT(target.type() == Reference::NAMED ||
-               target.type() == Reference::KEYED);
+        ASSERT(target.type() == Reference::UNLOADED);
         // End of initialization block. Revert to fast case.  The
-        // argument to the runtime call is the receiver, which is the
-        // first value pushed as part of the reference, which is below
-        // the lhs value.
-        frame_->PushElementAt(target.size());
+        // argument to the runtime call is the extra copy of the receiver,
+        // which is below the value of the assignment.
+        // Swap the receiver and the value of the assignment expression.
+        Result lhs = frame_->Pop();
+        Result receiver = frame_->Pop();
+        frame_->Push(&lhs);
+        frame_->Push(&receiver);
         Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
       }
     }
@@ -2787,7 +2814,7 @@
           args->at(1)->AsVariableProxy()->IsArguments()) {
         // Use the optimized Function.prototype.apply that avoids
         // allocating lazily allocated arguments objects.
-        CallApplyLazy(property,
+        CallApplyLazy(property->obj(),
                       args->at(0),
                       args->at(1)->AsVariableProxy(),
                       node->position());
@@ -2819,16 +2846,24 @@
       // -------------------------------------------
 
       // Load the function to call from the property through a reference.
-      Reference ref(this, property);
-      ref.GetValue();
-
-      // Pass receiver to called function.
       if (property->is_synthetic()) {
+        Reference ref(this, property, false);
+        ref.GetValue();
         // Use global object as receiver.
         LoadGlobalReceiver();
       } else {
-        // The reference's size is non-negative.
-        frame_->PushElementAt(ref.size());
+        Reference ref(this, property, false);
+        ASSERT(ref.size() == 2);
+        Result key = frame_->Pop();
+        frame_->Dup();  // Duplicate the receiver.
+        frame_->Push(&key);
+        ref.GetValue();
+        // Top of frame contains function to call, with duplicate copy of
+        // receiver below it.  Swap them.
+        Result function = frame_->Pop();
+        Result receiver = frame_->Pop();
+        frame_->Push(&function);
+        frame_->Push(&receiver);
       }
 
       // Call the function.
@@ -3012,6 +3047,9 @@
     }
 
   } else {
+    bool overwrite =
+      (node->expression()->AsBinaryOperation() != NULL &&
+       node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
     Load(node->expression());
     switch (op) {
       case Token::NOT:
@@ -3021,9 +3059,6 @@
         break;
 
       case Token::SUB: {
-        bool overwrite =
-          (node->expression()->AsBinaryOperation() != NULL &&
-           node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
         GenericUnaryOpStub stub(Token::SUB, overwrite);
         // TODO(1222589): remove dependency of TOS being cached inside stub
         Result operand = frame_->Pop();
@@ -3042,10 +3077,10 @@
         Condition is_smi = masm_->CheckSmi(operand.reg());
         smi_label.Branch(is_smi, &operand);
 
-        frame_->Push(&operand);  // undo popping of TOS
-        Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
-                                              CALL_FUNCTION, 1);
+        GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+        Result answer = frame_->CallStub(&stub, &operand);
         continue_label.Jump(&answer);
+
         smi_label.Bind(&answer);
         answer.ToRegister();
         frame_->Spill(answer.reg());
@@ -3167,7 +3202,9 @@
   // value will be in the frame to be spilled.
   if (is_postfix) frame_->Push(Smi::FromInt(0));
 
-  { Reference target(this, node->expression());
+  // A constant reference is not saved to, so the reference is not a
+  // compound assignment reference.
+  { Reference target(this, node->expression(), !is_const);
     if (target.is_illegal()) {
       // Spoof the virtual frame to have the expected height (one higher
       // than on entry).
@@ -3622,6 +3659,22 @@
 }
 
 
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Load(args->at(0));
+  Result obj = frame_->Pop();
+  obj.ToRegister();
+  Condition is_smi = masm_->CheckSmi(obj.reg());
+  destination()->false_target()->Branch(is_smi);
+  __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
+  __ movzxbl(kScratchRegister,
+             FieldOperand(kScratchRegister, Map::kBitFieldOffset));
+  __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
+  obj.Unuse();
+  destination()->Split(not_zero);
+}
+
+
 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
 
@@ -3926,7 +3979,8 @@
   Load(args->at(1));
   Load(args->at(2));
 
-  Result answer = frame_->CallRuntime(Runtime::kSubString, 3);
+  SubStringStub stub;
+  Result answer = frame_->CallStub(&stub, 3);
   frame_->Push(&answer);
 }
 
@@ -4239,14 +4293,19 @@
 //------------------------------------------------------------------------------
 // CodeGenerator implementation of variables, lookups, and stores.
 
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
-    : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+                     Expression* expression,
+                     bool  persist_after_get)
+    : cgen_(cgen),
+      expression_(expression),
+      type_(ILLEGAL),
+      persist_after_get_(persist_after_get) {
   cgen->LoadReference(this);
 }
 
 
 Reference::~Reference() {
-  cgen_->UnloadReference(this);
+  ASSERT(is_unloaded() || is_illegal());
 }
 
 
@@ -4296,6 +4355,7 @@
   // Pop a reference from the stack while preserving TOS.
   Comment cmnt(masm_, "[ UnloadReference");
   frame_->Nip(ref->size());
+  ref->set_unloaded();
 }
 
 
@@ -5014,31 +5074,6 @@
     return;
   }
 
-  // Set the flags based on the operation, type and loop nesting level.
-  GenericBinaryFlags flags;
-  switch (op) {
-    case Token::BIT_OR:
-    case Token::BIT_AND:
-    case Token::BIT_XOR:
-    case Token::SHL:
-    case Token::SHR:
-    case Token::SAR:
-      // Bit operations always assume they likely operate on Smis. Still only
-      // generate the inline Smi check code if this operation is part of a loop.
-      flags = (loop_nesting() > 0)
-              ? NO_SMI_CODE_IN_STUB
-              : NO_GENERIC_BINARY_FLAGS;
-      break;
-
-    default:
-      // By default only inline the Smi check code for likely smis if this
-      // operation is part of a loop.
-      flags = ((loop_nesting() > 0) && type->IsLikelySmi())
-              ? NO_SMI_CODE_IN_STUB
-              : NO_GENERIC_BINARY_FLAGS;
-      break;
-  }
-
   Result right = frame_->Pop();
   Result left = frame_->Pop();
 
@@ -5072,7 +5107,6 @@
   bool left_is_non_smi = left.is_constant() && !left.handle()->IsSmi();
   bool right_is_smi = right.is_constant() && right.handle()->IsSmi();
   bool right_is_non_smi = right.is_constant() && !right.handle()->IsSmi();
-  bool generate_no_smi_code = false;  // No smi code at all, inline or in stub.
 
   if (left_is_smi && right_is_smi) {
     // Compute the constant result at compile time, and leave it on the frame.
@@ -5081,34 +5115,35 @@
     if (FoldConstantSmis(op, left_int, right_int)) return;
   }
 
+  Result answer;
   if (left_is_non_smi || right_is_non_smi) {
-    // Set flag so that we go straight to the slow case, with no smi code.
-    generate_no_smi_code = true;
-  } else if (right_is_smi) {
-    ConstantSmiBinaryOperation(op, &left, right.handle(),
-                               type, false, overwrite_mode);
-    return;
-  } else if (left_is_smi) {
-    ConstantSmiBinaryOperation(op, &right, left.handle(),
-                               type, true, overwrite_mode);
-    return;
-  }
-
-  if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
-    LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
-  } else {
+    // Go straight to the slow case, with no smi code
     frame_->Push(&left);
     frame_->Push(&right);
-    // If we know the arguments aren't smis, use the binary operation stub
-    // that does not check for the fast smi case.
-    // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
-    if (generate_no_smi_code) {
-      flags = NO_SMI_CODE_IN_STUB;
+    GenericBinaryOpStub stub(op, overwrite_mode, NO_SMI_CODE_IN_STUB);
+    answer = frame_->CallStub(&stub, 2);
+  } else if (right_is_smi) {
+    answer = ConstantSmiBinaryOperation(op, &left, right.handle(),
+                                        type, false, overwrite_mode);
+  } else if (left_is_smi) {
+    answer = ConstantSmiBinaryOperation(op, &right, left.handle(),
+                                        type, true, overwrite_mode);
+  } else {
+    // Set the flags based on the operation, type and loop nesting level.
+    // Bit operations always assume they likely operate on Smis. Still only
+    // generate the inline Smi check code if this operation is part of a loop.
+    // For all other operations only inline the Smi check code for likely smis
+    // if the operation is part of a loop.
+    if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
+      answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
+    } else {
+      frame_->Push(&left);
+      frame_->Push(&right);
+      GenericBinaryOpStub stub(op, overwrite_mode, NO_GENERIC_BINARY_FLAGS);
+      answer = frame_->CallStub(&stub, 2);
     }
-    GenericBinaryOpStub stub(op, overwrite_mode, flags);
-    Result answer = frame_->CallStub(&stub, 2);
-    frame_->Push(&answer);
   }
+  frame_->Push(&answer);
 }
 
 
@@ -5189,12 +5224,12 @@
 }
 
 
-void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
-                                               Result* operand,
-                                               Handle<Object> value,
-                                               StaticType* type,
-                                               bool reversed,
-                                               OverwriteMode overwrite_mode) {
+Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
+                                                 Result* operand,
+                                                 Handle<Object> value,
+                                                 StaticType* type,
+                                                 bool reversed,
+                                                 OverwriteMode overwrite_mode) {
   // NOTE: This is an attempt to inline (a bit) more of the code for
   // some possible smi operations (like + and -) when (at least) one
   // of the operands is a constant smi.
@@ -5205,20 +5240,19 @@
   if (IsUnsafeSmi(value)) {
     Result unsafe_operand(value);
     if (reversed) {
-      LikelySmiBinaryOperation(op, &unsafe_operand, operand,
+      return LikelySmiBinaryOperation(op, &unsafe_operand, operand,
                                overwrite_mode);
     } else {
-      LikelySmiBinaryOperation(op, operand, &unsafe_operand,
+      return LikelySmiBinaryOperation(op, operand, &unsafe_operand,
                                overwrite_mode);
     }
-    ASSERT(!operand->is_valid());
-    return;
   }
 
   // Get the literal value.
   Smi* smi_value = Smi::cast(*value);
   int int_value = smi_value->value();
 
+  Result answer;
   switch (op) {
     case Token::ADD: {
       operand->ToRegister();
@@ -5239,15 +5273,15 @@
                         smi_value,
                         deferred->entry_label());
       deferred->BindExit();
-      frame_->Push(operand);
+      answer = *operand;
       break;
     }
 
     case Token::SUB: {
       if (reversed) {
         Result constant_operand(value);
-        LikelySmiBinaryOperation(op, &constant_operand, operand,
-                                 overwrite_mode);
+        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                          overwrite_mode);
       } else {
         operand->ToRegister();
         frame_->Spill(operand->reg());
@@ -5261,7 +5295,7 @@
                           smi_value,
                           deferred->entry_label());
         deferred->BindExit();
-        frame_->Push(operand);
+        answer = *operand;
       }
       break;
     }
@@ -5269,8 +5303,8 @@
     case Token::SAR:
       if (reversed) {
         Result constant_operand(value);
-        LikelySmiBinaryOperation(op, &constant_operand, operand,
-                                 overwrite_mode);
+        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                          overwrite_mode);
       } else {
         // Only the least significant 5 bits of the shift value are used.
         // In the slow case, this masking is done inside the runtime call.
@@ -5288,21 +5322,21 @@
                                            operand->reg(),
                                            shift_value);
         deferred->BindExit();
-        frame_->Push(operand);
+        answer = *operand;
       }
       break;
 
     case Token::SHR:
       if (reversed) {
         Result constant_operand(value);
-        LikelySmiBinaryOperation(op, &constant_operand, operand,
-                                 overwrite_mode);
+        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                          overwrite_mode);
       } else {
         // Only the least significant 5 bits of the shift value are used.
         // In the slow case, this masking is done inside the runtime call.
         int shift_value = int_value & 0x1f;
         operand->ToRegister();
-        Result answer = allocator()->Allocate();
+        answer = allocator()->Allocate();
         ASSERT(answer.is_valid());
         DeferredInlineSmiOperation* deferred =
             new DeferredInlineSmiOperation(op,
@@ -5317,15 +5351,14 @@
                                         deferred->entry_label());
         deferred->BindExit();
         operand->Unuse();
-        frame_->Push(&answer);
       }
       break;
 
     case Token::SHL:
       if (reversed) {
         Result constant_operand(value);
-        LikelySmiBinaryOperation(op, &constant_operand, operand,
-                                 overwrite_mode);
+        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                          overwrite_mode);
       } else {
         // Only the least significant 5 bits of the shift value are used.
         // In the slow case, this masking is done inside the runtime call.
@@ -5342,10 +5375,10 @@
                                              overwrite_mode);
           __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
           deferred->BindExit();
-          frame_->Push(operand);
+          answer = *operand;
         } else {
           // Use a fresh temporary for nonzero shift values.
-          Result answer = allocator()->Allocate();
+          answer = allocator()->Allocate();
           ASSERT(answer.is_valid());
           DeferredInlineSmiOperation* deferred =
               new DeferredInlineSmiOperation(op,
@@ -5360,7 +5393,6 @@
                                   deferred->entry_label());
           deferred->BindExit();
           operand->Unuse();
-          frame_->Push(&answer);
         }
       }
       break;
@@ -5395,7 +5427,7 @@
         }
       }
       deferred->BindExit();
-      frame_->Push(operand);
+      answer = *operand;
       break;
     }
 
@@ -5423,7 +5455,7 @@
                             Smi::FromInt(int_value - 1));
         }
         deferred->BindExit();
-        frame_->Push(operand);
+        answer = *operand;
         break;  // This break only applies if we generated code for MOD.
       }
       // Fall through if we did not find a power of 2 on the right hand side!
@@ -5432,22 +5464,24 @@
     default: {
       Result constant_operand(value);
       if (reversed) {
-        LikelySmiBinaryOperation(op, &constant_operand, operand,
-                                 overwrite_mode);
+        answer = LikelySmiBinaryOperation(op, &constant_operand, operand,
+                                          overwrite_mode);
       } else {
-        LikelySmiBinaryOperation(op, operand, &constant_operand,
-                                 overwrite_mode);
+        answer = LikelySmiBinaryOperation(op, operand, &constant_operand,
+                                          overwrite_mode);
       }
       break;
     }
   }
-  ASSERT(!operand->is_valid());
+  ASSERT(answer.is_valid());
+  return answer;
 }
 
-void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
-                                             Result* left,
-                                             Result* right,
-                                             OverwriteMode overwrite_mode) {
+Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
+                                               Result* left,
+                                               Result* right,
+                                               OverwriteMode overwrite_mode) {
+  Result answer;
   // Special handling of div and mod because they use fixed registers.
   if (op == Token::DIV || op == Token::MOD) {
     // We need rax as the quotient register, rdx as the remainder
@@ -5529,16 +5563,17 @@
       deferred->BindExit();
       left->Unuse();
       right->Unuse();
-      frame_->Push(&quotient);
+      answer = quotient;
     } else {
       ASSERT(op == Token::MOD);
       __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
       deferred->BindExit();
       left->Unuse();
       right->Unuse();
-      frame_->Push(&remainder);
+      answer = remainder;
     }
-    return;
+    ASSERT(answer.is_valid());
+    return answer;
   }
 
   // Special handling of shift operations because they use fixed
@@ -5559,7 +5594,7 @@
     frame_->Spill(rcx);
 
     // Use a fresh answer register to avoid spilling the left operand.
-    Result answer = allocator_->Allocate();
+    answer = allocator_->Allocate();
     ASSERT(answer.is_valid());
     // Check that both operands are smis using the answer register as a
     // temporary.
@@ -5598,8 +5633,8 @@
     deferred->BindExit();
     left->Unuse();
     right->Unuse();
-    frame_->Push(&answer);
-    return;
+    ASSERT(answer.is_valid());
+    return answer;
   }
 
   // Handle the other binary operations.
@@ -5608,7 +5643,7 @@
   // A newly allocated register answer is used to hold the answer.  The
   // registers containing left and right are not modified so they don't
   // need to be spilled in the fast case.
-  Result answer = allocator_->Allocate();
+  answer = allocator_->Allocate();
   ASSERT(answer.is_valid());
 
   // Perform the smi tag check.
@@ -5662,7 +5697,122 @@
   deferred->BindExit();
   left->Unuse();
   right->Unuse();
-  frame_->Push(&answer);
+  ASSERT(answer.is_valid());
+  return answer;
+}
+
+
+Result CodeGenerator::EmitKeyedLoad(bool is_global) {
+  Comment cmnt(masm_, "[ Load from keyed Property");
+  // Inline array load code if inside of a loop.  We do not know
+  // the receiver map yet, so we initially generate the code with
+  // a check against an invalid map.  In the inline cache code, we
+  // patch the map check if appropriate.
+  if (loop_nesting() > 0) {
+    Comment cmnt(masm_, "[ Inlined load from keyed Property");
+
+    Result key = frame_->Pop();
+    Result receiver = frame_->Pop();
+    key.ToRegister();
+    receiver.ToRegister();
+
+    // Use a fresh temporary to load the elements without destroying
+    // the receiver which is needed for the deferred slow case.
+    Result elements = allocator()->Allocate();
+    ASSERT(elements.is_valid());
+
+    // Use a fresh temporary for the index and later the loaded
+    // value.
+    Result index = allocator()->Allocate();
+    ASSERT(index.is_valid());
+
+    DeferredReferenceGetKeyedValue* deferred =
+        new DeferredReferenceGetKeyedValue(index.reg(),
+                                           receiver.reg(),
+                                           key.reg(),
+                                           is_global);
+
+    // Check that the receiver is not a smi (only needed if this
+    // is not a load from the global context) and that it has the
+    // expected map.
+    if (!is_global) {
+      __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+    }
+
+    // Initially, use an invalid map. The map is patched in the IC
+    // initialization code.
+    __ bind(deferred->patch_site());
+    // Use masm-> here instead of the double underscore macro since extra
+    // coverage code can interfere with the patching.  Do not use
+    // root array to load null_value, since it must be patched with
+    // the expected receiver map.
+    masm_->movq(kScratchRegister, Factory::null_value(),
+                RelocInfo::EMBEDDED_OBJECT);
+    masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                kScratchRegister);
+    deferred->Branch(not_equal);
+
+    // Check that the key is a non-negative smi.
+    __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
+
+    // Get the elements array from the receiver and check that it
+    // is not a dictionary.
+    __ movq(elements.reg(),
+            FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+    __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+           Factory::fixed_array_map());
+    deferred->Branch(not_equal);
+
+    // Shift the key to get the actual index value and check that
+    // it is within bounds.
+    __ SmiToInteger32(index.reg(), key.reg());
+    __ cmpl(index.reg(),
+            FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+    deferred->Branch(above_equal);
+
+    // The index register holds the un-smi-tagged key. It has been
+    // zero-extended to 64-bits, so it can be used directly as index in the
+    // operand below.
+    // Load and check that the result is not the hole.  We could
+    // reuse the index or elements register for the value.
+    //
+    // TODO(206): Consider whether it makes sense to try some
+    // heuristic about which register to reuse.  For example, if
+    // one is rax, the we can reuse that one because the value
+    // coming from the deferred code will be in rax.
+    Result value = index;
+    __ movq(value.reg(),
+            Operand(elements.reg(),
+                    index.reg(),
+                    times_pointer_size,
+                    FixedArray::kHeaderSize - kHeapObjectTag));
+    elements.Unuse();
+    index.Unuse();
+    __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+    deferred->Branch(equal);
+    __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+    deferred->BindExit();
+    // Restore the receiver and key to the frame and push the
+    // result on top of it.
+    frame_->Push(&receiver);
+    frame_->Push(&key);
+    return value;
+
+  } else {
+    Comment cmnt(masm_, "[ Load from keyed Property");
+    RelocInfo::Mode mode = is_global
+        ? RelocInfo::CODE_TARGET_CONTEXT
+        : RelocInfo::CODE_TARGET;
+    Result answer = frame_->CallKeyedLoadIC(mode);
+    // Make sure that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to
+    // indicate that we have generated an inline version of the
+    // keyed load.  The explicit nop instruction is here because
+    // the push that follows might be peep-hole optimized away.
+    __ nop();
+    return answer;
+  }
 }
 
 
@@ -5795,119 +5945,18 @@
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
 
-      // Inline array load code if inside of a loop.  We do not know
-      // the receiver map yet, so we initially generate the code with
-      // a check against an invalid map.  In the inline cache code, we
-      // patch the map check if appropriate.
-      if (cgen_->loop_nesting() > 0) {
-        Comment cmnt(masm, "[ Inlined load from keyed Property");
-
-        Result key = cgen_->frame()->Pop();
-        Result receiver = cgen_->frame()->Pop();
-        key.ToRegister();
-        receiver.ToRegister();
-
-        // Use a fresh temporary to load the elements without destroying
-        // the receiver which is needed for the deferred slow case.
-        Result elements = cgen_->allocator()->Allocate();
-        ASSERT(elements.is_valid());
-
-        // Use a fresh temporary for the index and later the loaded
-        // value.
-        Result index = cgen_->allocator()->Allocate();
-        ASSERT(index.is_valid());
-
-        DeferredReferenceGetKeyedValue* deferred =
-            new DeferredReferenceGetKeyedValue(index.reg(),
-                                               receiver.reg(),
-                                               key.reg(),
-                                               is_global);
-
-        // Check that the receiver is not a smi (only needed if this
-        // is not a load from the global context) and that it has the
-        // expected map.
-        if (!is_global) {
-          __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-        }
-
-        // Initially, use an invalid map. The map is patched in the IC
-        // initialization code.
-        __ bind(deferred->patch_site());
-        // Use masm-> here instead of the double underscore macro since extra
-        // coverage code can interfere with the patching.
-        masm->movq(kScratchRegister, Factory::null_value(),
-                   RelocInfo::EMBEDDED_OBJECT);
-        masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                   kScratchRegister);
-        deferred->Branch(not_equal);
-
-        // Check that the key is a non-negative smi.
-        __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
-
-        // Get the elements array from the receiver and check that it
-        // is not a dictionary.
-        __ movq(elements.reg(),
-                FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-        __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
-               Factory::fixed_array_map());
-        deferred->Branch(not_equal);
-
-        // Shift the key to get the actual index value and check that
-        // it is within bounds.
-        __ SmiToInteger32(index.reg(), key.reg());
-        __ cmpl(index.reg(),
-                FieldOperand(elements.reg(), FixedArray::kLengthOffset));
-        deferred->Branch(above_equal);
-
-        // The index register holds the un-smi-tagged key. It has been
-        // zero-extended to 64-bits, so it can be used directly as index in the
-        // operand below.
-        // Load and check that the result is not the hole.  We could
-        // reuse the index or elements register for the value.
-        //
-        // TODO(206): Consider whether it makes sense to try some
-        // heuristic about which register to reuse.  For example, if
-        // one is rax, the we can reuse that one because the value
-        // coming from the deferred code will be in rax.
-        Result value = index;
-        __ movq(value.reg(),
-                Operand(elements.reg(),
-                        index.reg(),
-                        times_pointer_size,
-                        FixedArray::kHeaderSize - kHeapObjectTag));
-        elements.Unuse();
-        index.Unuse();
-        __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
-        deferred->Branch(equal);
-        __ IncrementCounter(&Counters::keyed_load_inline, 1);
-
-        deferred->BindExit();
-        // Restore the receiver and key to the frame and push the
-        // result on top of it.
-        cgen_->frame()->Push(&receiver);
-        cgen_->frame()->Push(&key);
-        cgen_->frame()->Push(&value);
-
-      } else {
-        Comment cmnt(masm, "[ Load from keyed Property");
-        RelocInfo::Mode mode = is_global
-                               ? RelocInfo::CODE_TARGET_CONTEXT
-                               : RelocInfo::CODE_TARGET;
-        Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
-        // Make sure that we do not have a test instruction after the
-        // call.  A test instruction after the call is used to
-        // indicate that we have generated an inline version of the
-        // keyed load.  The explicit nop instruction is here because
-        // the push that follows might be peep-hole optimized away.
-        __ nop();
-        cgen_->frame()->Push(&answer);
-      }
+      Result value = cgen_->EmitKeyedLoad(is_global);
+      cgen_->frame()->Push(&value);
       break;
     }
 
     default:
       UNREACHABLE();
   }
+
+  if (!persist_after_get_) {
+    cgen_->UnloadReference(this);
+  }
 }
 
 
@@ -5944,6 +5993,9 @@
     ASSERT(slot->type() == Slot::LOCAL);
     cgen_->frame()->TakeLocalAt(slot->index());
   }
+
+  ASSERT(persist_after_get_);
+  // Do not unload the reference, because it is used in SetValue.
 }
 
 
@@ -6072,6 +6124,7 @@
     default:
       UNREACHABLE();
   }
+  cgen_->UnloadReference(this);
 }
 
 
@@ -6213,19 +6266,17 @@
 
 
 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
-  // TODO(X64): This method is identical to the ia32 version.
-  // Either find a reason to change it, or move it somewhere where it can be
-  // shared. (Notice: It assumes that a Smi can fit in an int).
-
   Object* answer_object = Heap::undefined_value();
   switch (op) {
     case Token::ADD:
-      if (Smi::IsValid(left + right)) {
+      // Use intptr_t to detect overflow of 32-bit int.
+      if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
         answer_object = Smi::FromInt(left + right);
       }
       break;
     case Token::SUB:
-      if (Smi::IsValid(left - right)) {
+      // Use intptr_t to detect overflow of 32-bit int.
+      if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
         answer_object = Smi::FromInt(left - right);
       }
       break;
@@ -6299,56 +6350,216 @@
 
 // End of CodeGenerator implementation.
 
+// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes rdi and rbx.  Dest is rcx.  Source cannot be rcx or one of the
+// trashed registers.
+void IntegerConvert(MacroAssembler* masm,
+                    Register source,
+                    bool use_sse3,
+                    Label* conversion_failure) {
+  ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
+  Label done, right_exponent, normal_exponent;
+  Register scratch = rbx;
+  Register scratch2 = rdi;
+  // Get exponent word.
+  __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+  // Get exponent alone in scratch2.
+  __ movl(scratch2, scratch);
+  __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
+  if (use_sse3) {
+    CpuFeatures::Scope scope(SSE3);
+    // Check whether the exponent is too big for a 64 bit signed integer.
+    static const uint32_t kTooBigExponent =
+        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+    __ cmpl(scratch2, Immediate(kTooBigExponent));
+    __ j(greater_equal, conversion_failure);
+    // Load x87 register with heap number.
+    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+    // Reserve space for 64 bit answer.
+    __ subq(rsp, Immediate(sizeof(uint64_t)));  // Nolint.
+    // Do conversion, which cannot fail because we checked the exponent.
+    __ fisttp_d(Operand(rsp, 0));
+    __ movl(rcx, Operand(rsp, 0));  // Load low word of answer into rcx.
+    __ addq(rsp, Immediate(sizeof(uint64_t)));  // Nolint.
+  } else {
+    // Load rcx with zero.  We use this either for the final shift or
+    // for the answer.
+    __ xor_(rcx, rcx);
+    // Check whether the exponent matches a 32 bit signed int that cannot be
+    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
+    // exponent is 30 (biased).  This is the exponent that we are fastest at and
+    // also the highest exponent we can handle here.
+    const uint32_t non_smi_exponent =
+        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+    __ cmpl(scratch2, Immediate(non_smi_exponent));
+    // If we have a match of the int32-but-not-Smi exponent then skip some
+    // logic.
+    __ j(equal, &right_exponent);
+    // If the exponent is higher than that then go to slow case.  This catches
+    // numbers that don't fit in a signed int32, infinities and NaNs.
+    __ j(less, &normal_exponent);
+
+    {
+      // Handle a big exponent.  The only reason we have this code is that the
+      // >>> operator has a tendency to generate numbers with an exponent of 31.
+      const uint32_t big_non_smi_exponent =
+          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+      __ cmpl(scratch2, Immediate(big_non_smi_exponent));
+      __ j(not_equal, conversion_failure);
+      // We have the big exponent, typically from >>>.  This means the number is
+      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
+      __ movl(scratch2, scratch);
+      __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
+      // Put back the implicit 1.
+      __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
+      // Shift up the mantissa bits to take up the space the exponent used to
+      // take. We just orred in the implicit bit so that took care of one and
+      // we want to use the full unsigned range so we subtract 1 bit from the
+      // shift distance.
+      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+      __ shl(scratch2, Immediate(big_shift_distance));
+      // Get the second half of the double.
+      __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
+      // Shift down 21 bits to get the most significant 11 bits or the low
+      // mantissa word.
+      __ shr(rcx, Immediate(32 - big_shift_distance));
+      __ or_(rcx, scratch2);
+      // We have the answer in rcx, but we may need to negate it.
+      __ testl(scratch, scratch);
+      __ j(positive, &done);
+      __ neg(rcx);
+      __ jmp(&done);
+    }
+
+    __ bind(&normal_exponent);
+    // Exponent word in scratch, exponent part of exponent word in scratch2.
+    // Zero in rcx.
+    // We know the exponent is smaller than 30 (biased).  If it is less than
+    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+    // it rounds to zero.
+    const uint32_t zero_exponent =
+        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+    __ subl(scratch2, Immediate(zero_exponent));
+    // rcx already has a Smi zero.
+    __ j(less, &done);
+
+    // We have a shifted exponent between 0 and 30 in scratch2.
+    __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
+    __ movl(rcx, Immediate(30));
+    __ subl(rcx, scratch2);
+
+    __ bind(&right_exponent);
+    // Here rcx is the shift, scratch is the exponent word.
+    // Get the top bits of the mantissa.
+    __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
+    // Put back the implicit 1.
+    __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
+    // Shift up the mantissa bits to take up the space the exponent used to
+    // take. We have kExponentShift + 1 significant bits int he low end of the
+    // word.  Shift them to the top bits.
+    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+    __ shl(scratch, Immediate(shift_distance));
+    // Get the second half of the double. For some exponents we don't
+    // actually need this because the bits get shifted out again, but
+    // it's probably slower to test than just to do it.
+    __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+    // Shift down 22 bits to get the most significant 10 bits or the low
+    // mantissa word.
+    __ shr(scratch2, Immediate(32 - shift_distance));
+    __ or_(scratch2, scratch);
+    // Move down according to the exponent.
+    __ shr_cl(scratch2);
+    // Now the unsigned answer is in scratch2.  We need to move it to rcx and
+    // we may need to fix the sign.
+    Label negative;
+    __ xor_(rcx, rcx);
+    __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
+    __ j(greater, &negative);
+    __ movl(rcx, scratch2);
+    __ jmp(&done);
+    __ bind(&negative);
+    __ subl(rcx, scratch2);
+    __ bind(&done);
+  }
+}
+
+
 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
-  ASSERT(op_ == Token::SUB);
+  Label slow, done;
 
-  Label slow;
-  Label done;
-  Label try_float;
-  // Check whether the value is a smi.
-  __ JumpIfNotSmi(rax, &try_float);
+  if (op_ == Token::SUB) {
+    // Check whether the value is a smi.
+    Label try_float;
+    __ JumpIfNotSmi(rax, &try_float);
 
-  // Enter runtime system if the value of the smi is zero
-  // to make sure that we switch between 0 and -0.
-  // Also enter it if the value of the smi is Smi::kMinValue.
-  __ SmiNeg(rax, rax, &done);
+    // Enter runtime system if the value of the smi is zero
+    // to make sure that we switch between 0 and -0.
+    // Also enter it if the value of the smi is Smi::kMinValue.
+    __ SmiNeg(rax, rax, &done);
 
-  // Either zero or Smi::kMinValue, neither of which become a smi when negated.
-  __ SmiCompare(rax, Smi::FromInt(0));
-  __ j(not_equal, &slow);
-  __ Move(rax, Factory::minus_zero_value());
-  __ jmp(&done);
+    // Either zero or Smi::kMinValue, neither of which become a smi when
+    // negated.
+    __ SmiCompare(rax, Smi::FromInt(0));
+    __ j(not_equal, &slow);
+    __ Move(rax, Factory::minus_zero_value());
+    __ jmp(&done);
 
-  // Enter runtime system.
+    // Try floating point case.
+    __ bind(&try_float);
+    __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+    __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &slow);
+    // Operand is a float, negate its value by flipping sign bit.
+    __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
+    __ movq(kScratchRegister, Immediate(0x01));
+    __ shl(kScratchRegister, Immediate(63));
+    __ xor_(rdx, kScratchRegister);  // Flip sign.
+    // rdx is value to store.
+    if (overwrite_) {
+      __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
+    } else {
+      __ AllocateHeapNumber(rcx, rbx, &slow);
+      // rcx: allocated 'empty' number
+      __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
+      __ movq(rax, rcx);
+    }
+  } else if (op_ == Token::BIT_NOT) {
+    // Check if the operand is a heap number.
+    __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+    __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &slow);
+
+    // Convert the heap number in rax to an untagged integer in rcx.
+    IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
+
+    // Do the bitwise operation and check if the result fits in a smi.
+    Label try_float;
+    __ not_(rcx);
+    // Tag the result as a smi and we're done.
+    ASSERT(kSmiTagSize == 1);
+    __ Integer32ToSmi(rax, rcx);
+  }
+
+  // Return from the stub.
+  __ bind(&done);
+  __ StubReturn(1);
+
+  // Handle the slow case by jumping to the JavaScript builtin.
   __ bind(&slow);
   __ pop(rcx);  // pop return address
   __ push(rax);
   __ push(rcx);  // push return address
-  __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
-  __ jmp(&done);
-
-  // Try floating point case.
-  __ bind(&try_float);
-  __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
-  __ Cmp(rdx, Factory::heap_number_map());
-  __ j(not_equal, &slow);
-  // Operand is a float, negate its value by flipping sign bit.
-  __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
-  __ movq(kScratchRegister, Immediate(0x01));
-  __ shl(kScratchRegister, Immediate(63));
-  __ xor_(rdx, kScratchRegister);  // Flip sign.
-  // rdx is value to store.
-  if (overwrite_) {
-    __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
-  } else {
-    __ AllocateHeapNumber(rcx, rbx, &slow);
-    // rcx: allocated 'empty' number
-    __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
-    __ movq(rax, rcx);
+  switch (op_) {
+    case Token::SUB:
+      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+      break;
+    case Token::BIT_NOT:
+      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+      break;
+    default:
+      UNREACHABLE();
   }
-
-  __ bind(&done);
-  __ StubReturn(1);
 }
 
 
@@ -7297,15 +7508,6 @@
 }
 
 
-void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
-                                           const Operand& src,
-                                           Register dst) {
-  // TODO(X64): Convert number operands to int32 values.
-  // Don't convert a Smi to a double first.
-  UNIMPLEMENTED();
-}
-
-
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
   Label load_smi_1, load_smi_2, done_load_1, done;
   __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
@@ -7335,6 +7537,61 @@
 }
 
 
+// Input: rdx, rax are the left and right objects of a bit op.
+// Output: rax, rcx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+                                         bool use_sse3,
+                                         Label* conversion_failure) {
+  // Check float operands.
+  Label arg1_is_object, check_undefined_arg1;
+  Label arg2_is_object, check_undefined_arg2;
+  Label load_arg2, done;
+
+  __ JumpIfNotSmi(rdx, &arg1_is_object);
+  __ SmiToInteger32(rdx, rdx);
+  __ jmp(&load_arg2);
+
+  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+  __ bind(&check_undefined_arg1);
+  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+  __ j(not_equal, conversion_failure);
+  __ movl(rdx, Immediate(0));
+  __ jmp(&load_arg2);
+
+  __ bind(&arg1_is_object);
+  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+  __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &check_undefined_arg1);
+  // Get the untagged integer version of the edx heap number in rcx.
+  IntegerConvert(masm, rdx, use_sse3, conversion_failure);
+  __ movl(rdx, rcx);
+
+  // Here edx has the untagged integer, eax has a Smi or a heap number.
+  __ bind(&load_arg2);
+  // Test if arg2 is a Smi.
+  __ JumpIfNotSmi(rax, &arg2_is_object);
+  __ SmiToInteger32(rax, rax);
+  __ movl(rcx, rax);
+  __ jmp(&done);
+
+  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+  __ bind(&check_undefined_arg2);
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(not_equal, conversion_failure);
+  __ movl(rcx, Immediate(0));
+  __ jmp(&done);
+
+  __ bind(&arg2_is_object);
+  __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+  __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &check_undefined_arg2);
+  // Get the untagged integer version of the eax heap number in ecx.
+  IntegerConvert(masm, rax, use_sse3, conversion_failure);
+  __ bind(&done);
+  __ movl(rax, rdx);
+}
+
+
 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
                                             Register lhs,
                                             Register rhs) {
@@ -7575,7 +7832,7 @@
     case Token::SHL:
     case Token::SHR:
     case Token::SAR:
-      // Move the second operand into register ecx.
+      // Move the second operand into register rcx.
       __ movq(rcx, rbx);
       // Perform the operation.
       switch (op_) {
@@ -7671,44 +7928,8 @@
     case Token::SAR:
     case Token::SHL:
     case Token::SHR: {
-      FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
-      // TODO(X64): Don't convert a Smi to float and then back to int32
-      // afterwards.
-      FloatingPointHelper::LoadFloatOperands(masm);
-
-      Label skip_allocation, non_smi_result, operand_conversion_failure;
-
-      // Reserve space for converted numbers.
-      __ subq(rsp, Immediate(2 * kPointerSize));
-
-      if (use_sse3_) {
-        // Truncate the operands to 32-bit integers and check for
-        // exceptions in doing so.
-        CpuFeatures::Scope scope(SSE3);
-        __ fisttp_s(Operand(rsp, 0 * kPointerSize));
-        __ fisttp_s(Operand(rsp, 1 * kPointerSize));
-        __ fnstsw_ax();
-        __ testl(rax, Immediate(1));
-        __ j(not_zero, &operand_conversion_failure);
-      } else {
-        // Check if right operand is int32.
-        __ fist_s(Operand(rsp, 0 * kPointerSize));
-        __ fild_s(Operand(rsp, 0 * kPointerSize));
-        __ FCmp();
-        __ j(not_zero, &operand_conversion_failure);
-        __ j(parity_even, &operand_conversion_failure);
-
-        // Check if left operand is int32.
-        __ fist_s(Operand(rsp, 1 * kPointerSize));
-        __ fild_s(Operand(rsp, 1 * kPointerSize));
-        __ FCmp();
-        __ j(not_zero, &operand_conversion_failure);
-        __ j(parity_even, &operand_conversion_failure);
-      }
-
-      // Get int32 operands and perform bitop.
-      __ pop(rcx);
-      __ pop(rax);
+      Label skip_allocation, non_smi_result;
+      FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
       switch (op_) {
         case Token::BIT_OR:  __ orl(rax, rcx); break;
         case Token::BIT_AND: __ andl(rax, rcx); break;
@@ -7756,22 +7977,6 @@
         GenerateReturn(masm);
       }
 
-      // Clear the FPU exception flag and reset the stack before calling
-      // the runtime system.
-      __ bind(&operand_conversion_failure);
-      __ addq(rsp, Immediate(2 * kPointerSize));
-      if (use_sse3_) {
-        // If we've used the SSE3 instructions for truncating the
-        // floating point values to integers and it failed, we have a
-        // pending #IA exception. Clear it.
-        __ fnclex();
-      } else {
-        // The non-SSE3 variant does early bailout if the right
-        // operand isn't a 32-bit integer, so we may have a single
-        // value on the FPU stack we need to get rid of.
-        __ ffree(0);
-      }
-
       // SHR should return uint32 - go to runtime for non-smi/negative result.
       if (op_ == Token::SHR) {
         __ bind(&non_smi_result);
@@ -7991,8 +8196,8 @@
   // Both strings are non-empty.
   // rax: first string
   // rbx: length of first string
-  // ecx: length of second string
-  // edx: second string
+  // rcx: length of second string
+  // rdx: second string
   // r8: instance type of first string if string check was performed above
   // r9: instance type of first string if string check was performed above
   Label string_add_flat_result;
@@ -8148,11 +8353,11 @@
 }
 
 
-void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
-                                           Register dest,
-                                           Register src,
-                                           Register count,
-                                           bool ascii) {
+void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
+                                            Register dest,
+                                            Register src,
+                                            Register count,
+                                            bool ascii) {
   Label loop;
   __ bind(&loop);
   // This loop just copies one character at a time, as it is only used for very
@@ -8173,6 +8378,174 @@
 }
 
 
+void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
+                                               Register dest,
+                                               Register src,
+                                               Register count,
+                                               bool ascii) {
+  // Copy characters using rep movs of doublewords. Align destination on 4 byte
+  // boundary before starting rep movs. Copy remaining characters after running
+  // rep movs.
+  ASSERT(dest.is(rdi));  // rep movs destination
+  ASSERT(src.is(rsi));  // rep movs source
+  ASSERT(count.is(rcx));  // rep movs count
+
+  // Nothing to do for zero characters.
+  Label done;
+  __ testq(count, count);
+  __ j(zero, &done);
+
+  // Make count the number of bytes to copy.
+  if (!ascii) {
+    ASSERT_EQ(2, sizeof(uc16));  // NOLINT
+    __ addq(count, count);
+  }
+
+  // Don't enter the rep movs if there are less than 4 bytes to copy.
+  Label last_bytes;
+  __ testq(count, Immediate(~7));
+  __ j(zero, &last_bytes);
+
+  // Copy from edi to esi using rep movs instruction.
+  __ movq(kScratchRegister, count);
+  __ sar(count, Immediate(3));  // Number of doublewords to copy.
+  __ repmovsq();
+
+  // Find number of bytes left.
+  __ movq(count, kScratchRegister);
+  __ and_(count, Immediate(7));
+
+  // Check if there are more bytes to copy.
+  __ bind(&last_bytes);
+  __ testq(count, count);
+  __ j(zero, &done);
+
+  // Copy remaining characters.
+  Label loop;
+  __ bind(&loop);
+  __ movb(kScratchRegister, Operand(src, 0));
+  __ movb(Operand(dest, 0), kScratchRegister);
+  __ addq(src, Immediate(1));
+  __ addq(dest, Immediate(1));
+  __ subq(count, Immediate(1));
+  __ j(not_zero, &loop);
+
+  __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  // Stack frame on entry.
+  //  rsp[0]: return address
+  //  rsp[8]: to
+  //  rsp[16]: from
+  //  rsp[24]: string
+
+  const int kToOffset = 1 * kPointerSize;
+  const int kFromOffset = kToOffset + kPointerSize;
+  const int kStringOffset = kFromOffset + kPointerSize;
+  const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
+
+  // Make sure first argument is a string.
+  __ movq(rax, Operand(rsp, kStringOffset));
+  ASSERT_EQ(0, kSmiTag);
+  __ testl(rax, Immediate(kSmiTagMask));
+  __ j(zero, &runtime);
+  Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
+  __ j(NegateCondition(is_string), &runtime);
+
+  // rax: string
+  // rbx: instance type
+  // Calculate length of sub string using the smi values.
+  __ movq(rcx, Operand(rsp, kToOffset));
+  __ movq(rdx, Operand(rsp, kFromOffset));
+  __ JumpIfNotBothPositiveSmi(rcx, rdx, &runtime);
+
+  __ SmiSub(rcx, rcx, rdx, NULL);  // Overflow doesn't happen.
+  __ j(negative, &runtime);
+  // Handle sub-strings of length 2 and less in the runtime system.
+  __ SmiToInteger32(rcx, rcx);
+  __ cmpl(rcx, Immediate(2));
+  __ j(below_equal, &runtime);
+
+  // rax: string
+  // rbx: instance type
+  // rcx: result string length
+  // Check for flat ascii string
+  Label non_ascii_flat;
+  __ and_(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
+  __ cmpb(rbx, Immediate(kSeqStringTag | kAsciiStringTag));
+  __ j(not_equal, &non_ascii_flat);
+
+  // Allocate the result.
+  __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+  // rax: result string
+  // rcx: result string length
+  __ movq(rdx, rsi);  // esi used by following code.
+  // Locate first character of result.
+  __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+  // Load string argument and locate character of sub string start.
+  __ movq(rsi, Operand(rsp, kStringOffset));
+  __ movq(rbx, Operand(rsp, kFromOffset));
+  {
+    SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
+    __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+                        SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  }
+
+  // rax: result string
+  // rcx: result length
+  // rdx: original value of rsi
+  // rdi: first character of result
+  // rsi: character of sub string start
+  GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
+  __ movq(rsi, rdx);  // Restore rsi.
+  __ IncrementCounter(&Counters::sub_string_native, 1);
+  __ ret(kArgumentsSize);
+
+  __ bind(&non_ascii_flat);
+  // rax: string
+  // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
+  // rcx: result string length
+  // Check for sequential two byte string
+  __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
+  __ j(not_equal, &runtime);
+
+  // Allocate the result.
+  __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
+
+  // rax: result string
+  // rcx: result string length
+  __ movq(rdx, rsi);  // esi used by following code.
+  // Locate first character of result.
+  __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+  // Load string argument and locate character of sub string start.
+  __ movq(rsi, Operand(rsp, kStringOffset));
+  __ movq(rbx, Operand(rsp, kFromOffset));
+  {
+    SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
+    __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
+                        SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  }
+
+  // rax: result string
+  // rcx: result length
+  // rdx: original value of rsi
+  // rdi: first character of result
+  // rsi: character of sub string start
+  GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
+  __ movq(rsi, rdx);  // Restore esi.
+  __ IncrementCounter(&Counters::sub_string_native, 1);
+  __ ret(kArgumentsSize);
+
+  // Just jump to runtime to create the sub string.
+  __ bind(&runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+}
+
 
 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
                                                         Register left,
@@ -8241,7 +8614,6 @@
 
   // Result is EQUAL.
   __ Move(rax, Smi::FromInt(EQUAL));
-  __ IncrementCounter(&Counters::string_compare_native, 1);
   __ ret(2 * kPointerSize);
 
   Label result_greater;
@@ -8251,13 +8623,11 @@
 
   // Result is LESS.
   __ Move(rax, Smi::FromInt(LESS));
-  __ IncrementCounter(&Counters::string_compare_native, 1);
   __ ret(2 * kPointerSize);
 
   // Result is GREATER.
   __ bind(&result_greater);
   __ Move(rax, Smi::FromInt(GREATER));
-  __ IncrementCounter(&Counters::string_compare_native, 1);
   __ ret(2 * kPointerSize);
 }
 
@@ -8287,6 +8657,7 @@
   __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
 
   // Inline comparison of ascii strings.
+  __ IncrementCounter(&Counters::string_compare_native, 1);
   GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
 
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 50bb023..72c8416 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,57 +43,70 @@
 // -------------------------------------------------------------------------
 // Reference support
 
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame.  The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
 class Reference BASE_EMBEDDED {
  public:
   // The values of the types is important, see size().
-  enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen, Expression* expression);
+  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+
+  Reference(CodeGenerator* cgen,
+            Expression* expression,
+            bool persist_after_get = false);
   ~Reference();
 
   Expression* expression() const { return expression_; }
   Type type() const { return type_; }
   void set_type(Type value) {
-    ASSERT(type_ == ILLEGAL);
+    ASSERT_EQ(ILLEGAL, type_);
     type_ = value;
   }
 
+  void set_unloaded() {
+    ASSERT_NE(ILLEGAL, type_);
+    ASSERT_NE(UNLOADED, type_);
+    type_ = UNLOADED;
+  }
   // The size the reference takes up on the stack.
-  int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+  int size() const {
+    return (type_ < SLOT) ? 0 : type_;
+  }
 
   bool is_illegal() const { return type_ == ILLEGAL; }
   bool is_slot() const { return type_ == SLOT; }
   bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+  bool is_unloaded() const { return type_ == UNLOADED; }
 
   // Return the name.  Only valid for named property references.
   Handle<String> GetName();
 
   // Generate code to push the value of the reference on top of the
   // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is left in place with its value above it.
+  // the expression stack, and it is consumed by the call unless the
+  // reference is for a compound assignment.
+  // If the reference is not consumed, it is left in place under its value.
   void GetValue();
 
   // Like GetValue except that the slot is expected to be written to before
-  // being read from again.  Thae value of the reference may be invalidated,
+  // being read from again.  The value of the reference may be invalidated,
   // causing subsequent attempts to read it to fail.
   void TakeValue();
 
   // Generate code to store the value on top of the expression stack in the
   // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The stored value is left in place (with the
-  // reference intact below it) to support chained assignments.
+  // on the expression stack.  The  value is stored in the location specified
+  // by the reference, and is left on top of the stack, after the reference
+  // is popped from beneath it (unloaded).
   void SetValue(InitState init_state);
 
  private:
   CodeGenerator* cgen_;
   Expression* expression_;
   Type type_;
+  bool persist_after_get_;
 };
 
 
@@ -422,6 +435,11 @@
   // value in place.
   void StoreToSlot(Slot* slot, InitState init_state);
 
+  // Load a property of an object, returning it in a Result.
+  // The object and the property name are passed on the stack, and
+  // not changed.
+  Result EmitKeyedLoad(bool is_global);
+
   // Special code for typeof expressions: Unfortunately, we must
   // be careful when loading the expression in 'typeof'
   // expressions. We are not allowed to throw reference errors for
@@ -446,20 +464,20 @@
 
   // Emit code to perform a binary operation on a constant
   // smi and a likely smi.  Consumes the Result *operand.
-  void ConstantSmiBinaryOperation(Token::Value op,
-                                  Result* operand,
-                                  Handle<Object> constant_operand,
-                                  StaticType* type,
-                                  bool reversed,
-                                  OverwriteMode overwrite_mode);
+  Result ConstantSmiBinaryOperation(Token::Value op,
+                                    Result* operand,
+                                    Handle<Object> constant_operand,
+                                    StaticType* type,
+                                    bool reversed,
+                                    OverwriteMode overwrite_mode);
 
   // Emit code to perform a binary operation on two likely smis.
   // The code to handle smi arguments is produced inline.
   // Consumes the Results *left and *right.
-  void LikelySmiBinaryOperation(Token::Value op,
-                                Result* left,
-                                Result* right,
-                                OverwriteMode overwrite_mode);
+  Result LikelySmiBinaryOperation(Token::Value op,
+                                  Result* left,
+                                  Result* right,
+                                  OverwriteMode overwrite_mode);
 
   void Comparison(Condition cc,
                   bool strict,
@@ -478,10 +496,10 @@
                          CallFunctionFlags flags,
                          int position);
 
-  // Use an optimized version of Function.prototype.apply that avoid
-  // allocating the arguments object and just copies the arguments
-  // from the stack.
-  void CallApplyLazy(Property* apply,
+  // An optimized implementation of expressions of the form
+  // x.apply(y, arguments).  We call x the applicand and y the receiver.
+  // The optimization avoids allocating an arguments object if possible.
+  void CallApplyLazy(Expression* applicand,
                      Expression* receiver,
                      VariableProxy* arguments,
                      int position);
@@ -514,6 +532,7 @@
   void GenerateIsArray(ZoneList<Expression*>* args);
   void GenerateIsObject(ZoneList<Expression*>* args);
   void GenerateIsFunction(ZoneList<Expression*>* args);
+  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
 
   // Support for construct call checks.
   void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -610,8 +629,8 @@
   friend class JumpTarget;
   friend class Reference;
   friend class Result;
-  friend class FastCodeGenerator;
-  friend class CodeGenSelector;
+  friend class FullCodeGenerator;
+  friend class FullCodeGenSyntaxChecker;
 
   friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
 
@@ -712,6 +731,29 @@
 };
 
 
+class StringStubBase: public CodeStub {
+ public:
+  // Generate code for copying characters using a simple loop. This should only
+  // be used in places where the number of characters is small and the
+  // additional setup and checking in GenerateCopyCharactersREP adds too much
+  // overhead. Copying of overlapping regions is not supported.
+  void GenerateCopyCharacters(MacroAssembler* masm,
+                              Register dest,
+                              Register src,
+                              Register count,
+                              bool ascii);
+
+  // Generate code for copying characters using the rep movs instruction.
+  // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
+  // not supported.
+  void GenerateCopyCharactersREP(MacroAssembler* masm,
+                                 Register dest,     // Must be rdi.
+                                 Register src,      // Must be rsi.
+                                 Register count,    // Must be rcx.
+                                 bool ascii);
+};
+
+
 // Flag that indicates how to generate code for the stub StringAddStub.
 enum StringAddFlags {
   NO_STRING_ADD_FLAGS = 0,
@@ -719,7 +761,7 @@
 };
 
 
-class StringAddStub: public CodeStub {
+class StringAddStub: public StringStubBase {
  public:
   explicit StringAddStub(StringAddFlags flags) {
     string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -731,17 +773,23 @@
 
   void Generate(MacroAssembler* masm);
 
-  void GenerateCopyCharacters(MacroAssembler* masm,
-                              Register desc,
-                              Register src,
-                              Register count,
-                              bool ascii);
-
   // Should the stub check whether arguments are strings?
   bool string_check_;
 };
 
 
+class SubStringStub: public StringStubBase {
+ public:
+  SubStringStub() {}
+
+ private:
+  Major MajorKey() { return SubString; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+};
+
+
 class StringCompareStub: public CodeStub {
  public:
   explicit StringCompareStub() {}
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 0b43e76..ce3aae8 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -114,6 +114,10 @@
   { 0x9E, UNSET_OP_ORDER, "sahf" },
   { 0x99, UNSET_OP_ORDER, "cdq" },
   { 0x9B, UNSET_OP_ORDER, "fwait" },
+  { 0xA4, UNSET_OP_ORDER, "movs" },
+  { 0xA5, UNSET_OP_ORDER, "movs" },
+  { 0xA6, UNSET_OP_ORDER, "cmps" },
+  { 0xA7, UNSET_OP_ORDER, "cmps" },
   { -1, UNSET_OP_ORDER, "" }
 };
 
@@ -157,6 +161,16 @@
 };
 
 
+enum Prefixes {
+  ESCAPE_PREFIX = 0x0F,
+  OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
+  ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
+  REPNE_PREFIX = 0xF2,
+  REP_PREFIX = 0xF3,
+  REPEQ_PREFIX = REP_PREFIX
+};
+
+
 struct InstructionDesc {
   const char* mnem;
   InstructionType type;
@@ -1128,12 +1142,12 @@
   // Scan for prefixes.
   while (true) {
     current = *data;
-    if (current == 0x66) {  // Group 3 prefix.
+    if (current == OPERAND_SIZE_OVERRIDE_PREFIX) {  // Group 3 prefix.
       operand_size_ = current;
     } else if ((current & 0xF0) == 0x40) {  // REX prefix.
       setRex(current);
       if (rex_w()) AppendToBuffer("REX.W ");
-    } else if ((current & 0xFE) == 0xF2) {  // Group 1 prefix.
+    } else if ((current & 0xFE) == 0xF2) {  // Group 1 prefix (0xF2 or 0xF3).
       group_1_prefix_ = current;
     } else {  // Not a prefix - an opcode.
       break;
@@ -1145,7 +1159,17 @@
   byte_size_operand_ = idesc.byte_size_operation;
   switch (idesc.type) {
     case ZERO_OPERANDS_INSTR:
-      AppendToBuffer(idesc.mnem);
+      if (current >= 0xA4 && current <= 0xA7) {
+        // String move or compare operations.
+        if (group_1_prefix_ == REP_PREFIX) {
+          // REP.
+          AppendToBuffer("rep ");
+        }
+        if (rex_w()) AppendToBuffer("REX.W ");
+        AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
+      } else {
+        AppendToBuffer("%s", idesc.mnem, operand_size_code());
+      }
       data++;
       break;
 
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/full-codegen-x64.cc
similarity index 90%
rename from src/x64/fast-codegen-x64.cc
rename to src/x64/full-codegen-x64.cc
index 0f28433..3755109 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -30,7 +30,7 @@
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
 #include "parser.h"
 
 namespace v8 {
@@ -51,7 +51,7 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-x64.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun) {
   function_ = fun;
   SetFunctionPosition(fun);
 
@@ -161,7 +161,7 @@
 }
 
 
-void FastCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence(int position) {
   Comment cmnt(masm_, "[ Return sequence");
   if (return_label_.is_bound()) {
     __ jmp(&return_label_);
@@ -200,7 +200,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -243,7 +243,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -285,7 +285,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -324,7 +324,7 @@
 }
 
 
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -364,7 +364,7 @@
 }
 
 
-void FastCodeGenerator::DropAndApply(int count,
+void FullCodeGenerator::DropAndApply(int count,
                                      Expression::Context context,
                                      Register reg) {
   ASSERT(count > 0);
@@ -415,7 +415,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context,
+void FullCodeGenerator::Apply(Expression::Context context,
                               Label* materialize_true,
                               Label* materialize_false) {
   switch (context) {
@@ -480,7 +480,7 @@
 }
 
 
-void FastCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::DoTest(Expression::Context context) {
   // The value to test is in the accumulator.  If the value might be needed
   // on the stack (value/test and test/value contexts with a stack location
   // desired), then the value is already duplicated on the stack.
@@ -614,7 +614,7 @@
 }
 
 
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
   switch (slot->type()) {
     case Slot::PARAMETER:
     case Slot::LOCAL:
@@ -633,13 +633,13 @@
 }
 
 
-void FastCodeGenerator::Move(Register destination, Slot* source) {
+void FullCodeGenerator::Move(Register destination, Slot* source) {
   MemOperand location = EmitSlotSearch(source, destination);
   __ movq(destination, location);
 }
 
 
-void FastCodeGenerator::Move(Slot* dst,
+void FullCodeGenerator::Move(Slot* dst,
                              Register src,
                              Register scratch1,
                              Register scratch2) {
@@ -655,7 +655,7 @@
 }
 
 
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
   Comment cmnt(masm_, "[ Declaration");
   Variable* var = decl->proxy()->var();
   ASSERT(var != NULL);  // Must have been resolved.
@@ -754,7 +754,7 @@
 }
 
 
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   __ push(rsi);  // The context is the first argument.
   __ Push(pairs);
@@ -764,7 +764,7 @@
 }
 
 
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
@@ -782,17 +782,21 @@
 }
 
 
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
   EmitVariableLoad(expr->var(), context_);
 }
 
 
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
                                          Expression::Context context) {
-  Expression* rewrite = var->rewrite();
-  if (rewrite == NULL) {
-    ASSERT(var->is_global());
+  // Four cases: non-this global variables, lookup slots, all other
+  // types of slots, and parameters that rewrite to explicit property
+  // accesses on the arguments object.
+  Slot* slot = var->slot();
+  Property* property = var->AsProperty();
+
+  if (var->is_global() && !var->is_this()) {
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in rcx and the global
     // object on the stack.
@@ -805,34 +809,24 @@
     // is no test rax instruction here.
     __ nop();
     DropAndApply(1, context, rax);
-  } else if (rewrite->AsSlot() != NULL) {
-    Slot* slot = rewrite->AsSlot();
-    if (FLAG_debug_code) {
-      switch (slot->type()) {
-        case Slot::PARAMETER:
-        case Slot::LOCAL: {
-          Comment cmnt(masm_, "Stack slot");
-          break;
-        }
-        case Slot::CONTEXT: {
-          Comment cmnt(masm_, "Context slot");
-          break;
-        }
-        case Slot::LOOKUP:
-          UNIMPLEMENTED();
-          break;
-      }
-    }
-    Apply(context, slot);
-  } else {
-    Comment cmnt(masm_, "Variable rewritten to property");
-    // A variable has been rewritten into an explicit access to an object
-    // property.
-    Property* property = rewrite->AsProperty();
-    ASSERT_NOT_NULL(property);
 
-    // The only property expressions that can occur are of the form
-    // "slot[literal]".
+  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    Comment cmnt(masm_, "Lookup slot");
+    __ push(rsi);  // Context.
+    __ Push(var->name());
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    Apply(context, rax);
+
+  } else if (slot != NULL) {
+    Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+                            ? "Context slot"
+                            : "Stack slot");
+    Apply(context, slot);
+
+  } else {
+    Comment cmnt(masm_, "Rewritten parameter");
+    ASSERT_NOT_NULL(property);
+    // Rewritten parameter accesses are of the form "slot[literal]".
 
     // Assert that the object is in a slot.
     Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -864,7 +858,7 @@
 }
 
 
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
   Comment cmnt(masm_, "[ RegExpLiteral");
   Label done;
   // Registers will be used as follows:
@@ -890,7 +884,7 @@
 }
 
 
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
   __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -960,7 +954,7 @@
 }
 
 
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
   __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
@@ -1010,7 +1004,7 @@
 }
 
 
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
   __ Move(rcx, key->handle());
@@ -1020,7 +1014,7 @@
 }
 
 
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
@@ -1028,7 +1022,7 @@
 }
 
 
-void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
                                      Expression::Context context) {
   __ push(result_register());
   GenericBinaryOpStub stub(op,
@@ -1039,11 +1033,16 @@
 }
 
 
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Expression::Context context) {
+  // Three main cases: non-this global variables, lookup slots, and
+  // all other types of slots.  Left-hand-side parameters that rewrite
+  // to explicit property accesses do not reach here.
   ASSERT(var != NULL);
   ASSERT(var->is_global() || var->slot() != NULL);
+  Slot* slot = var->slot();
   if (var->is_global()) {
+    ASSERT(!var->is_this());
     // Assignment to a global variable.  Use inline caching for the
     // assignment.  Right-hand-side value is passed in rax, variable name in
     // rcx, and the global object on the stack.
@@ -1054,8 +1053,14 @@
     // Overwrite the global object on the stack with the result if needed.
     DropAndApply(1, context, rax);
 
+  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    __ push(result_register());  // Value.
+    __ push(rsi);  // Context.
+    __ Push(var->name());
+    __ CallRuntime(Runtime::kStoreContextSlot, 3);
+    Apply(context, rax);
+
   } else if (var->slot() != NULL) {
-    Slot* slot = var->slot();
     switch (slot->type()) {
       case Slot::LOCAL:
       case Slot::PARAMETER:
@@ -1078,6 +1083,7 @@
         break;
     }
     Apply(context, result_register());
+
   } else {
     // Variables rewritten as properties are not treated as variables in
     // assignments.
@@ -1086,7 +1092,7 @@
 }
 
 
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a named store IC.
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
@@ -1121,7 +1127,7 @@
 }
 
 
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
 
   // If the assignment starts a block of assignments to the same object,
@@ -1157,7 +1163,7 @@
 }
 
 
-void FastCodeGenerator::VisitProperty(Property* expr) {
+void FullCodeGenerator::VisitProperty(Property* expr) {
   Comment cmnt(masm_, "[ Property");
   Expression* key = expr->key();
 
@@ -1177,7 +1183,7 @@
 }
 
 
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> ignored,
                                        RelocInfo::Mode mode) {
   // Code common for calls using the IC.
@@ -1200,7 +1206,7 @@
 }
 
 
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -1218,7 +1224,7 @@
 }
 
 
-void FastCodeGenerator::VisitCall(Call* expr) {
+void FullCodeGenerator::VisitCall(Call* expr) {
   Comment cmnt(masm_, "[ Call");
   Expression* fun = expr->expression();
   Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1280,7 +1286,7 @@
     if (lit != NULL &&
         lit->name()->Equals(Heap::empty_string()) &&
         loop_depth() == 0) {
-      lit->set_try_fast_codegen(true);
+      lit->set_try_full_codegen(true);
     }
     VisitForValue(fun, kStack);
     // Load global receiver object.
@@ -1292,7 +1298,7 @@
 }
 
 
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
   // expression in new calls must be evaluated before the
@@ -1327,7 +1333,7 @@
 }
 
 
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   Comment cmnt(masm_, "[ CallRuntime");
   ZoneList<Expression*>* args = expr->arguments();
 
@@ -1360,7 +1366,7 @@
 }
 
 
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1464,13 +1470,27 @@
       break;
     }
 
+    case Token::ADD: {
+      Comment cmt(masm_, "[ UnaryOperation (ADD)");
+      VisitForValue(expr->expression(), kAccumulator);
+      Label no_conversion;
+      Condition is_smi;
+      is_smi = masm_->CheckSmi(result_register());
+      __ j(is_smi, &no_conversion);
+      __ push(result_register());
+      __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+      __ bind(&no_conversion);
+      Apply(context_, result_register());
+      break;
+    }
+
     default:
       UNREACHABLE();
   }
 }
 
 
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
 
   // Expression can only be a property, a global or a (parameter or local)
@@ -1489,7 +1509,7 @@
   if (assign_type == VARIABLE) {
     ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
     Location saved_location = location_;
-    location_ = kStack;
+    location_ = kAccumulator;
     EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
                      Expression::kValue);
     location_ = saved_location;
@@ -1505,11 +1525,16 @@
       VisitForValue(prop->key(), kStack);
       EmitKeyedPropertyLoad(prop);
     }
-    __ push(rax);
   }
 
-  // Convert to number.
+  // Call ToNumber only if operand is not a smi.
+  Label no_conversion;
+  Condition is_smi;
+  is_smi = masm_->CheckSmi(rax);
+  __ j(is_smi, &no_conversion);
+  __ push(rax);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+  __ bind(&no_conversion);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -1541,6 +1566,27 @@
     }
   }
 
+  // Inline smi case if we are in a loop.
+  Label stub_call, done;
+  if (loop_depth() > 0) {
+    if (expr->op() == Token::INC) {
+      __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+    } else {
+      __ SmiSubConstant(rax, rax, Smi::FromInt(1));
+    }
+    __ j(overflow, &stub_call);
+    // We could eliminate this smi check if we split the code at
+    // the first smi check before calling ToNumber.
+    is_smi = masm_->CheckSmi(rax);
+    __ j(is_smi, &done);
+    __ bind(&stub_call);
+    // Call stub. Undo operation first.
+    if (expr->op() == Token::INC) {
+      __ SmiSubConstant(rax, rax, Smi::FromInt(1));
+    } else {
+      __ SmiAddConstant(rax, rax, Smi::FromInt(1));
+    }
+  }
   // Call stub for +1/-1.
   __ push(rax);
   __ Push(Smi::FromInt(1));
@@ -1548,6 +1594,7 @@
                            NO_OVERWRITE,
                            NO_GENERIC_BINARY_FLAGS);
   __ CallStub(&stub);
+  __ bind(&done);
 
   // Store the value returned in rax.
   switch (assign_type) {
@@ -1601,7 +1648,7 @@
   }
 }
 
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
   Comment cmnt(masm_, "[ BinaryOperation");
   switch (expr->op()) {
     case Token::COMMA:
@@ -1636,7 +1683,7 @@
 }
 
 
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Always perform the comparison for its control flow.  Pack the result
@@ -1748,25 +1795,25 @@
 }
 
 
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
   __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   Apply(context_, rax);
 }
 
 
-Register FastCodeGenerator::result_register() { return rax; }
+Register FullCodeGenerator::result_register() { return rax; }
 
 
-Register FastCodeGenerator::context_register() { return rsi; }
+Register FullCodeGenerator::context_register() { return rsi; }
 
 
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   ASSERT(IsAligned(frame_offset, kPointerSize));
   __ movq(Operand(rbp, frame_offset), value);
 }
 
 
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
   __ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
 }
 
@@ -1775,7 +1822,7 @@
 // Non-local control flow support.
 
 
-void FastCodeGenerator::EnterFinallyBlock() {
+void FullCodeGenerator::EnterFinallyBlock() {
   ASSERT(!result_register().is(rdx));
   ASSERT(!result_register().is(rcx));
   // Cook return address on top of stack (smi encoded Code* delta)
@@ -1789,7 +1836,7 @@
 }
 
 
-void FastCodeGenerator::ExitFinallyBlock() {
+void FullCodeGenerator::ExitFinallyBlock() {
   ASSERT(!result_register().is(rdx));
   ASSERT(!result_register().is(rcx));
   // Restore result register from stack.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 457ece5..e293247 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -271,11 +271,10 @@
   ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
   __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
   __ j(below, &slow);
-  // Check that the receiver does not require access checks.  We need
-  // to check this explicitly since this generic stub does not perform
-  // map checks.  The map is already in rdx.
+
+  // Check bit field.
   __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
-           Immediate(1 << Map::kIsAccessCheckNeeded));
+           Immediate(kSlowCaseBitFieldMask));
   __ j(not_zero, &slow);
 
   // Check that the key is a smi.
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 65a408b..b06b8c8 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -581,6 +581,20 @@
 }
 
 
+Condition MacroAssembler::CheckBothPositiveSmi(Register first,
+                                               Register second) {
+  if (first.is(second)) {
+    return CheckPositiveSmi(first);
+  }
+  movl(kScratchRegister, first);
+  orl(kScratchRegister, second);
+  rol(kScratchRegister, Immediate(1));
+  testl(kScratchRegister, Immediate(0x03));
+  return zero;
+}
+
+
+
 Condition MacroAssembler::CheckEitherSmi(Register first, Register second) {
   if (first.is(second)) {
     return CheckSmi(first);
@@ -660,7 +674,17 @@
                             Register src2,
                             Label* on_not_smi_result) {
   ASSERT(!dst.is(src2));
-  if (dst.is(src1)) {
+  if (on_not_smi_result == NULL) {
+    // No overflow checking. Use only when it's known that
+    // overflowing is impossible (e.g., subtracting two positive smis).
+    if (dst.is(src1)) {
+      subq(dst, src2);
+    } else {
+      movq(dst, src1);
+      subq(dst, src2);
+    }
+    Assert(no_overflow, "Smi substraction onverflow");
+  } else if (dst.is(src1)) {
     subq(dst, src2);
     Label smi_result;
     j(no_overflow, &smi_result);
@@ -1292,6 +1316,14 @@
 }
 
 
+void MacroAssembler::JumpIfNotBothPositiveSmi(Register src1, Register src2,
+                                              Label* on_not_both_smi) {
+  Condition both_smi = CheckBothPositiveSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+
 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
                                                          Register second_object,
                                                          Register scratch1,
@@ -1311,8 +1343,7 @@
   ASSERT(kNotStringTag != 0);
   const int kFlatAsciiStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringBits =
-      kNotStringTag | kSeqStringTag | kAsciiStringTag;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
 
   andl(scratch1, Immediate(kFlatAsciiStringMask));
   andl(scratch2, Immediate(kFlatAsciiStringMask));
@@ -1320,7 +1351,7 @@
   ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
   lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
   cmpl(scratch1,
-       Immediate(kFlatAsciiStringBits + (kFlatAsciiStringBits << 3)));
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
   j(not_equal, on_fail);
 }
 
@@ -1518,6 +1549,17 @@
 }
 
 
+Condition MacroAssembler::IsObjectStringType(Register heap_object,
+                                             Register map,
+                                             Register instance_type) {
+  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+  movzxbq(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+  ASSERT(kNotStringTag != 0);
+  testb(instance_type, Immediate(kIsNotStringMask));
+  return zero;
+}
+
+
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Label* miss) {
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index ce2848c..8d4a8f2 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -207,6 +207,9 @@
   // Are both values tagged smis.
   Condition CheckBothSmi(Register first, Register second);
 
+  // Are both values tagged smis.
+  Condition CheckBothPositiveSmi(Register first, Register second);
+
   // Are either value a tagged smi.
   Condition CheckEitherSmi(Register first, Register second);
 
@@ -248,6 +251,10 @@
   // Jump if either or both register are not smi values.
   void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
 
+  // Jump if either or both register are not positive smi values.
+  void JumpIfNotBothPositiveSmi(Register src1, Register src2,
+                                Label* on_not_both_smi);
+
   // Operations on tagged smi values.
 
   // Smis represent a subset of integers. The subset is always equivalent to
@@ -452,6 +459,15 @@
   // Always use unsigned comparisons: above and below, not less and greater.
   void CmpInstanceType(Register map, InstanceType type);
 
+  // Check if the object in register heap_object is a string. Afterwards the
+  // register map contains the object map and the register instance_type
+  // contains the instance_type. The registers map and instance_type can be the
+  // same in which case it contains the instance type afterwards. Either of the
+  // registers map and instance_type can be the same as heap_object.
+  Condition IsObjectStringType(Register heap_object,
+                               Register map,
+                               Register instance_type);
+
   // FCmp is similar to integer cmp, but requires unsigned
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 75bbf3e..6142ce3 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -71,8 +71,6 @@
  *                            through the runtime system)
  *    - stack_area_base      (High end of the memory area to use as
  *                            backtracking stack)
- *    - at_start             (if 1, we are starting at the start of the
- *                            string, otherwise 0)
  *    - int* capture_array   (int[num_saved_registers_], for output).
  *    - end of input         (Address of end of string)
  *    - start of input       (Address of first character in string)
@@ -82,6 +80,8 @@
  *    - backup of callee save registers (rbx, possibly rsi and rdi).
  *    - Offset of location before start of input (effectively character
  *      position -1). Used to initialize capture registers to a non-position.
+ *    - At start of string (if 1, we are starting at the start of the
+ *      string, otherwise 0)
  *    - register 0  rbp[-n]   (Only positions must be stored in the first
  *    - register 1  rbp[-n-8]  num_saved_registers_ registers)
  *    - ...
@@ -661,7 +661,7 @@
   ASSERT_EQ(kInputStart, -3 * kPointerSize);
   ASSERT_EQ(kInputEnd, -4 * kPointerSize);
   ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
-  ASSERT_EQ(kAtStart, -6 * kPointerSize);
+  ASSERT_EQ(kStackHighEnd, -6 * kPointerSize);
   __ push(rdi);
   __ push(rsi);
   __ push(rdx);
@@ -672,6 +672,7 @@
   __ push(rbx);  // Callee-save
 #endif
   __ push(Immediate(0));  // Make room for "input start - 1" constant.
+  __ push(Immediate(0));  // Make room for "at start" constant.
 
   // Check if we have space on the stack for registers.
   Label stack_limit_hit;
@@ -716,6 +717,15 @@
   // Store this value in a local variable, for use when clearing
   // position registers.
   __ movq(Operand(rbp, kInputStartMinusOne), rax);
+
+  // Determine whether the start index is zero, that is at the start of the
+  // string, and store that value in a local variable.
+  __ movq(rbx, Operand(rbp, kStartIndex));
+  __ xor_(rcx, rcx);  // setcc only operates on cl (lower byte of rcx).
+  __ testq(rbx, rbx);
+  __ setcc(zero, rcx);  // 1 if 0 (start of string), 0 if positive.
+  __ movq(Operand(rbp, kAtStart), rcx);
+
   if (num_saved_registers_ > 0) {
     // Fill saved registers with initial value = start offset - 1
     // Fill in stack push order, to avoid accessing across an unwritten
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 694cba0..c17f2b8 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -138,9 +138,7 @@
   static const int kInputStart = kStartIndex + kPointerSize;
   static const int kInputEnd = kInputStart + kPointerSize;
   static const int kRegisterOutput = kInputEnd + kPointerSize;
-  // AtStart is passed as 32 bit int (values 0 or 1).
-  static const int kAtStart = kRegisterOutput + kPointerSize;
-  static const int kStackHighEnd = kAtStart + kPointerSize;
+  static const int kStackHighEnd = kRegisterOutput + kPointerSize;
   // DirectCall is passed as 32 bit int (values 0 or 1).
   static const int kDirectCall = kStackHighEnd + kPointerSize;
 #else
@@ -152,9 +150,8 @@
   static const int kInputStart = kStartIndex - kPointerSize;
   static const int kInputEnd = kInputStart - kPointerSize;
   static const int kRegisterOutput = kInputEnd - kPointerSize;
-  static const int kAtStart = kRegisterOutput - kPointerSize;
-  static const int kStackHighEnd = kFrameAlign;
-  static const int kDirectCall = kStackHighEnd + kPointerSize;
+  static const int kStackHighEnd = kRegisterOutput - kPointerSize;
+  static const int kDirectCall = kFrameAlign;
 #endif
 
 #ifdef _WIN64
@@ -168,7 +165,7 @@
   // AMD64 Calling Convention has only one callee-save register that
   // we use. We push this after the frame pointer (and after the
   // parameters).
-  static const int kBackup_rbx = kAtStart - kPointerSize;
+  static const int kBackup_rbx = kStackHighEnd - kPointerSize;
   static const int kLastCalleeSaveRegister = kBackup_rbx;
 #endif
 
@@ -176,9 +173,10 @@
   // the frame in GetCode.
   static const int kInputStartMinusOne =
       kLastCalleeSaveRegister - kPointerSize;
+  static const int kAtStart = kInputStartMinusOne - kPointerSize;
 
   // First register address. Following registers are below it on the stack.
-  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+  static const int kRegisterZero = kAtStart - kPointerSize;
 
   // Initial size of code buffer.
   static const size_t kRegExpCodeSize = 1024;
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index 015ba13..a0fc3cb 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -54,8 +54,8 @@
 
 // Call the generated regexp code directly. The entry function pointer should
 // expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
-  entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  entry(p0, p1, p2, p3, p4, p5, p6)
 
 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
   reinterpret_cast<TryCatch*>(try_catch_address)