Version 2.2.10

Performance improvements in the x64 and ARM backends.


git-svn-id: http://v8.googlecode.com/svn/trunk@4660 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 5509830..68ae026 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -33,6 +33,7 @@
 #include "debug.h"
 #include "ic-inl.h"
 #include "jsregexp.h"
+#include "jump-target-light-inl.h"
 #include "parser.h"
 #include "regexp-macro-assembler.h"
 #include "regexp-stack.h"
@@ -40,10 +41,12 @@
 #include "runtime.h"
 #include "scopes.h"
 #include "virtual-frame-inl.h"
+#include "virtual-frame-arm-inl.h"
 
 namespace v8 {
 namespace internal {
 
+
 #define __ ACCESS_MASM(masm_)
 
 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
@@ -274,7 +277,7 @@
 
     // Initialize the function return target after the locals are set
     // up, because it needs the expected frame height from the frame.
-    function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+    function_return_.SetExpectedHeight();
     function_return_is_shadowed_ = false;
 
     // Generate code to 'execute' declarations and initialize functions
@@ -1143,44 +1146,66 @@
       int shift_value = int_value & 0x1f;  // least significant 5 bits
       DeferredCode* deferred =
         new DeferredInlineSmiOperation(op, shift_value, false, mode, tos);
-      __ tst(tos, Operand(kSmiTagMask));
-      deferred->Branch(ne);
-      __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // remove tags
+      uint32_t problematic_mask = kSmiTagMask;
+      // For unsigned shift by zero all negative smis are problematic.
+      if (shift_value == 0 && op == Token::SHR) problematic_mask |= 0x80000000;
+      __ tst(tos, Operand(problematic_mask));
+      deferred->Branch(ne);  // Go slow for problematic input.
       switch (op) {
         case Token::SHL: {
           if (shift_value != 0) {
-            __ mov(scratch, Operand(scratch, LSL, shift_value));
+            int adjusted_shift = shift_value - kSmiTagSize;
+            ASSERT(adjusted_shift >= 0);
+            if (adjusted_shift != 0) {
+              __ mov(scratch, Operand(tos, LSL, adjusted_shift));
+              // Check that the *signed* result fits in a smi.
+              __ add(scratch2, scratch, Operand(0x40000000), SetCC);
+              deferred->Branch(mi);
+              __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
+            } else {
+              // Check that the *signed* result fits in a smi.
+              __ add(scratch2, tos, Operand(0x40000000), SetCC);
+              deferred->Branch(mi);
+              __ mov(tos, Operand(tos, LSL, kSmiTagSize));
+            }
           }
-          // check that the *signed* result fits in a smi
-          __ add(scratch2, scratch, Operand(0x40000000), SetCC);
-          deferred->Branch(mi);
           break;
         }
         case Token::SHR: {
-          // LSR by immediate 0 means shifting 32 bits.
           if (shift_value != 0) {
+            __ mov(scratch, Operand(tos, ASR, kSmiTagSize));  // Remove tag.
+            // LSR by immediate 0 means shifting 32 bits.
             __ mov(scratch, Operand(scratch, LSR, shift_value));
+            if (shift_value == 1) {
+              // check that the *unsigned* result fits in a smi
+              // neither of the two high-order bits can be set:
+              // - 0x80000000: high bit would be lost when smi tagging
+              // - 0x40000000: this number would convert to negative when
+              // smi tagging these two cases can only happen with shifts
+              // by 0 or 1 when handed a valid smi
+              __ tst(scratch, Operand(0xc0000000));
+              deferred->Branch(ne);
+            }
+            __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
           }
-          // check that the *unsigned* result fits in a smi
-          // neither of the two high-order bits can be set:
-          // - 0x80000000: high bit would be lost when smi tagging
-          // - 0x40000000: this number would convert to negative when
-          // smi tagging these two cases can only happen with shifts
-          // by 0 or 1 when handed a valid smi
-          __ tst(scratch, Operand(0xc0000000));
-          deferred->Branch(ne);
           break;
         }
         case Token::SAR: {
+          // In the ARM instructions set, ASR by immediate 0 means shifting 32
+          // bits.
           if (shift_value != 0) {
-            // ASR by immediate 0 means shifting 32 bits.
-            __ mov(scratch, Operand(scratch, ASR, shift_value));
+            // Do the shift and the tag removal in one operation.  If the shift
+            // is 31 bits (the highest possible value) then we emit the
+            // instruction as a shift by 0 which means shift arithmetically by
+            // 32.
+            __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f));
+            // Put tag back.
+            __ mov(tos, Operand(tos, LSL, kSmiTagSize));
           }
           break;
         }
         default: UNREACHABLE();
       }
-      __ mov(tos, Operand(scratch, LSL, kSmiTagSize));
       deferred->BindExit();
       frame_->EmitPush(tos);
       break;
@@ -1549,7 +1574,7 @@
   VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ Block");
   CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->break_target()->SetExpectedHeight();
   VisitStatementsAndSpill(node->statements());
   if (node->break_target()->is_linked()) {
     node->break_target()->Bind();
@@ -1836,7 +1861,7 @@
   VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ SwitchStatement");
   CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->break_target()->SetExpectedHeight();
 
   LoadAndSpill(node->tag());
 
@@ -1925,7 +1950,7 @@
   VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ DoWhileStatement");
   CodeForStatementPosition(node);
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->break_target()->SetExpectedHeight();
   JumpTarget body(JumpTarget::BIDIRECTIONAL);
   IncrementLoopNesting();
 
@@ -1935,14 +1960,14 @@
   ConditionAnalysis info = AnalyzeCondition(node->cond());
   switch (info) {
     case ALWAYS_TRUE:
-      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->SetExpectedHeight();
       node->continue_target()->Bind();
       break;
     case ALWAYS_FALSE:
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      node->continue_target()->SetExpectedHeight();
       break;
     case DONT_KNOW:
-      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      node->continue_target()->SetExpectedHeight();
       body.Bind();
       break;
   }
@@ -2006,12 +2031,12 @@
   ConditionAnalysis info = AnalyzeCondition(node->cond());
   if (info == ALWAYS_FALSE) return;
 
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->break_target()->SetExpectedHeight();
   IncrementLoopNesting();
 
   // Label the top of the loop with the continue target for the backward
   // CFG edge.
-  node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+  node->continue_target()->SetExpectedHeight();
   node->continue_target()->Bind();
 
   if (info == DONT_KNOW) {
@@ -2060,17 +2085,17 @@
   ConditionAnalysis info = AnalyzeCondition(node->cond());
   if (info == ALWAYS_FALSE) return;
 
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->break_target()->SetExpectedHeight();
   IncrementLoopNesting();
 
   // If there is no update statement, label the top of the loop with the
   // continue target, otherwise with the loop target.
   JumpTarget loop(JumpTarget::BIDIRECTIONAL);
   if (node->next() == NULL) {
-    node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+    node->continue_target()->SetExpectedHeight();
     node->continue_target()->Bind();
   } else {
-    node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+    node->continue_target()->SetExpectedHeight();
     loop.Bind();
   }
 
@@ -2275,8 +2300,8 @@
   // sp[4] : enumerable
   // Grab the current frame's height for the break and continue
   // targets only after all the state is pushed on the frame.
-  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
-  node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  node->break_target()->SetExpectedHeight();
+  node->continue_target()->SetExpectedHeight();
 
   // Load the current count to r0, load the length to r1.
   __ ldrd(r0, frame_->ElementAt(0));
@@ -2766,45 +2791,13 @@
     JumpTarget slow;
     JumpTarget done;
 
-    // Generate fast-case code for variables that might be shadowed by
-    // eval-introduced variables.  Eval is used a lot without
-    // introducing variables.  In those cases, we do not want to
-    // perform a runtime call for all variables in the scope
-    // containing the eval.
-    if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-      LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
-      // If there was no control flow to slow, we can exit early.
-      if (!slow.is_linked()) {
-        frame_->EmitPush(r0);
-        return;
-      }
-      frame_->SpillAll();
-
-      done.Jump();
-
-    } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-      frame_->SpillAll();
-      Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
-      // Only generate the fast case for locals that rewrite to slots.
-      // This rules out argument loads because eval forces arguments
-      // access to be through the arguments object.
-      if (potential_slot != NULL) {
-        __ ldr(r0,
-               ContextSlotOperandCheckExtensions(potential_slot,
-                                                 r1,
-                                                 r2,
-                                                 &slow));
-        if (potential_slot->var()->mode() == Variable::CONST) {
-          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-          __ cmp(r0, ip);
-          __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-        }
-        // There is always control flow to slow from
-        // ContextSlotOperandCheckExtensions so we have to jump around
-        // it.
-        done.Jump();
-      }
-    }
+    // Generate fast case for loading from slots that correspond to
+    // local/global variables or arguments unless they are shadowed by
+    // eval-introduced bindings.
+    EmitDynamicLoadFromSlotFastCase(slot,
+                                    typeof_state,
+                                    &slow,
+                                    &done);
 
     slow.Bind();
     VirtualFrame::SpilledScope spilled_scope(frame_);
@@ -3019,6 +3012,67 @@
 }
 
 
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+                                                    TypeofState typeof_state,
+                                                    JumpTarget* slow,
+                                                    JumpTarget* done) {
+  // Generate fast-case code for variables that might be shadowed by
+  // eval-introduced variables.  Eval is used a lot without
+  // introducing variables.  In those cases, we do not want to
+  // perform a runtime call for all variables in the scope
+  // containing the eval.
+  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+    LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
+    frame_->SpillAll();
+    done->Jump();
+
+  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+    frame_->SpillAll();
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+    if (potential_slot != NULL) {
+      // Generate fast case for locals that rewrite to slots.
+      __ ldr(r0,
+             ContextSlotOperandCheckExtensions(potential_slot,
+                                               r1,
+                                               r2,
+                                               slow));
+      if (potential_slot->var()->mode() == Variable::CONST) {
+        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+        __ cmp(r0, ip);
+        __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+      }
+      done->Jump();
+    } else if (rewrite != NULL) {
+      // Generate fast case for argument loads.
+      Property* property = rewrite->AsProperty();
+      if (property != NULL) {
+        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+        Literal* key_literal = property->key()->AsLiteral();
+        if (obj_proxy != NULL &&
+            key_literal != NULL &&
+            obj_proxy->IsArguments() &&
+            key_literal->handle()->IsSmi()) {
+          // Load arguments object if there are no eval-introduced
+          // variables. Then load the argument from the arguments
+          // object using keyed load.
+          __ ldr(r0,
+                 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+                                                   r1,
+                                                   r2,
+                                                   slow));
+          frame_->EmitPush(r0);
+          __ mov(r1, Operand(key_literal->handle()));
+          frame_->EmitPush(r1);
+          EmitKeyedLoad();
+          done->Jump();
+        }
+      }
+    }
+  }
+}
+
+
 void CodeGenerator::VisitSlot(Slot* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
@@ -3705,52 +3759,26 @@
     // ----------------------------------
     // JavaScript examples:
     //
-    //  with (obj) foo(1, 2, 3)  // foo is in obj
+    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
     //
     //  function f() {};
     //  function g() {
     //    eval(...);
-    //    f();  // f could be in extension object
+    //    f();  // f could be in extension object.
     //  }
     // ----------------------------------
 
     // JumpTargets do not yet support merging frames so the frame must be
     // spilled when jumping to these targets.
-    JumpTarget slow;
-    JumpTarget done;
+    JumpTarget slow, done;
 
-    // Generate fast-case code for variables that might be shadowed by
-    // eval-introduced variables.  Eval is used a lot without
-    // introducing variables.  In those cases, we do not want to
-    // perform a runtime call for all variables in the scope
-    // containing the eval.
-    if (var->mode() == Variable::DYNAMIC_GLOBAL) {
-      LoadFromGlobalSlotCheckExtensions(var->slot(), NOT_INSIDE_TYPEOF, &slow);
-      frame_->EmitPush(r0);
-      LoadGlobalReceiver(r1);
-      done.Jump();
-
-    } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
-      Slot* potential_slot = var->local_if_not_shadowed()->slot();
-      // Only generate the fast case for locals that rewrite to slots.
-      // This rules out argument loads because eval forces arguments
-      // access to be through the arguments object.
-      if (potential_slot != NULL) {
-        __ ldr(r0,
-               ContextSlotOperandCheckExtensions(potential_slot,
-                                                 r1,
-                                                 r2,
-                                                 &slow));
-        if (potential_slot->var()->mode() == Variable::CONST) {
-          __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-          __ cmp(r0, ip);
-          __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
-        }
-        frame_->EmitPush(r0);
-        LoadGlobalReceiver(r1);
-        done.Jump();
-      }
-    }
+    // Generate fast case for loading functions from slots that
+    // correspond to local/global variables or arguments unless they
+    // are shadowed by eval-introduced bindings.
+    EmitDynamicLoadFromSlotFastCase(var->slot(),
+                                    NOT_INSIDE_TYPEOF,
+                                    &slow,
+                                    &done);
 
     slow.Bind();
     // Load the function
@@ -3764,7 +3792,18 @@
     frame_->EmitPush(r0);  // function
     frame_->EmitPush(r1);  // receiver
 
-    done.Bind();
+    // If fast case code has been generated, emit code to push the
+    // function and receiver and have the slow path jump around this
+    // code.
+    if (done.is_linked()) {
+      JumpTarget call;
+      call.Jump();
+      done.Bind();
+      frame_->EmitPush(r0);  // function
+      LoadGlobalReceiver(r1);  // receiver
+      call.Bind();
+    }
+
     // Call the function. At this point, everything is spilled but the
     // function and receiver are in r0 and r1.
     CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
@@ -4892,7 +4931,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ CountOperation");
 
   bool is_postfix = node->is_postfix();
@@ -4901,10 +4939,8 @@
   Variable* var = node->expression()->AsVariableProxy()->AsVariable();
   bool is_const = (var != NULL && var->mode() == Variable::CONST);
 
-  // Postfix: Make room for the result.
   if (is_postfix) {
-     __ mov(r0, Operand(0));
-     frame_->EmitPush(r0);
+    frame_->EmitPush(Operand(Smi::FromInt(0)));
   }
 
   // A constant reference is not saved to, so a constant reference is not a
@@ -4914,35 +4950,33 @@
       // Spoof the virtual frame to have the expected height (one higher
       // than on entry).
       if (!is_postfix) {
-        __ mov(r0, Operand(Smi::FromInt(0)));
-        frame_->EmitPush(r0);
+        frame_->EmitPush(Operand(Smi::FromInt(0)));
       }
       ASSERT_EQ(original_height + 1, frame_->height());
       return;
     }
+    // This pushes 0, 1 or 2 words on the object to be used later when updating
+    // the target.  It also pushes the current value of the target.
     target.GetValue();
-    frame_->EmitPop(r0);
 
     JumpTarget slow;
     JumpTarget exit;
 
-    // Load the value (1) into register r1.
-    __ mov(r1, Operand(Smi::FromInt(1)));
-
     // Check for smi operand.
-    __ tst(r0, Operand(kSmiTagMask));
+    Register value = frame_->PopToRegister();
+    __ tst(value, Operand(kSmiTagMask));
     slow.Branch(ne);
 
     // Postfix: Store the old value as the result.
     if (is_postfix) {
-      __ str(r0, frame_->ElementAt(target.size()));
+      frame_->SetElementAt(value, target.size());
     }
 
     // Perform optimistic increment/decrement.
     if (is_increment) {
-      __ add(r0, r0, Operand(r1), SetCC);
+      __ add(value, value, Operand(Smi::FromInt(1)), SetCC);
     } else {
-      __ sub(r0, r0, Operand(r1), SetCC);
+      __ sub(value, value, Operand(Smi::FromInt(1)), SetCC);
     }
 
     // If the increment/decrement didn't overflow, we're done.
@@ -4950,41 +4984,50 @@
 
     // Revert optimistic increment/decrement.
     if (is_increment) {
-      __ sub(r0, r0, Operand(r1));
+      __ sub(value, value, Operand(Smi::FromInt(1)));
     } else {
-      __ add(r0, r0, Operand(r1));
+      __ add(value, value, Operand(Smi::FromInt(1)));
     }
 
-    // Slow case: Convert to number.
+    // Slow case: Convert to number.  At this point the
+    // value to be incremented is in the value register..
     slow.Bind();
+
+    // Convert the operand to a number.
+    frame_->EmitPush(value);
+
     {
-      // Convert the operand to a number.
-      frame_->EmitPush(r0);
+      VirtualFrame::SpilledScope spilled(frame_);
       frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
-    }
-    if (is_postfix) {
-      // Postfix: store to result (on the stack).
-      __ str(r0, frame_->ElementAt(target.size()));
+
+      if (is_postfix) {
+        // Postfix: store to result (on the stack).
+        __ str(r0, frame_->ElementAt(target.size()));
+      }
+
+      // Compute the new value.
+      frame_->EmitPush(r0);
+      frame_->EmitPush(Operand(Smi::FromInt(1)));
+      if (is_increment) {
+        frame_->CallRuntime(Runtime::kNumberAdd, 2);
+      } else {
+        frame_->CallRuntime(Runtime::kNumberSub, 2);
+      }
     }
 
-    // Compute the new value.
-    __ mov(r1, Operand(Smi::FromInt(1)));
-    frame_->EmitPush(r0);
-    frame_->EmitPush(r1);
-    if (is_increment) {
-      frame_->CallRuntime(Runtime::kNumberAdd, 2);
-    } else {
-      frame_->CallRuntime(Runtime::kNumberSub, 2);
-    }
-
+    __ Move(value, r0);
     // Store the new value in the target if not const.
+    // At this point the answer is in the value register.
     exit.Bind();
-    frame_->EmitPush(r0);
+    frame_->EmitPush(value);
+    // Set the target with the result, leaving the result on
+    // top of the stack.  Removes the target from the stack if
+    // it has a non-zero size.
     if (!is_const) target.SetValue(NOT_CONST_INIT);
   }
 
   // Postfix: Discard the new value and use the old.
-  if (is_postfix) frame_->EmitPop(r0);
+  if (is_postfix) frame_->Pop();
   ASSERT_EQ(original_height + 1, frame_->height());
 }
 
@@ -5427,20 +5470,34 @@
 
 class DeferredReferenceGetKeyedValue: public DeferredCode {
  public:
-  DeferredReferenceGetKeyedValue() {
+  DeferredReferenceGetKeyedValue(Register key, Register receiver)
+      : key_(key), receiver_(receiver) {
     set_comment("[ DeferredReferenceGetKeyedValue");
   }
 
   virtual void Generate();
+
+ private:
+  Register key_;
+  Register receiver_;
 };
 
 
 void DeferredReferenceGetKeyedValue::Generate() {
+  ASSERT((key_.is(r0) && receiver_.is(r1)) ||
+         (key_.is(r1) && receiver_.is(r0)));
+
   Register scratch1 = VirtualFrame::scratch0();
   Register scratch2 = VirtualFrame::scratch1();
   __ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
   __ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
 
+  // Ensure key in r0 and receiver in r1 to match keyed load ic calling
+  // convention.
+  if (key_.is(r1)) {
+    __ Swap(r0, r1, ip);
+  }
+
   // The rest of the instructions in the deferred code must be together.
   { Assembler::BlockConstPoolScope block_const_pool(masm_);
     // Call keyed load IC. It has the arguments key and receiver in r0 and r1.
@@ -5576,15 +5633,14 @@
     __ IncrementCounter(&Counters::keyed_load_inline, 1,
                         frame_->scratch0(), frame_->scratch1());
 
-    // Load the key and receiver from the stack to r0 and r1.
-    frame_->PopToR1R0();
-    Register receiver = r0;
-    Register key = r1;
+    // Load the key and receiver from the stack.
+    Register key = frame_->PopToRegister();
+    Register receiver = frame_->PopToRegister(key);
     VirtualFrame::SpilledScope spilled(frame_);
 
-    // The deferred code expects key and receiver in r0 and r1.
+    // The deferred code expects key and receiver in registers.
     DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue();
+        new DeferredReferenceGetKeyedValue(key, receiver);
 
     // Check that the receiver is a heap object.
     __ tst(receiver, Operand(kSmiTagMask));
@@ -5594,17 +5650,16 @@
     // property code which can be patched. Therefore the exact number of
     // instructions generated need to be fixed, so the constant pool is blocked
     // while generating this code.
-#ifdef DEBUG
-    int kInlinedKeyedLoadInstructions = 19;
-    Label check_inlined_codesize;
-    masm_->bind(&check_inlined_codesize);
-#endif
     { Assembler::BlockConstPoolScope block_const_pool(masm_);
       Register scratch1 = VirtualFrame::scratch0();
       Register scratch2 = VirtualFrame::scratch1();
       // Check the map. The null map used below is patched by the inline cache
       // code.
       __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+#ifdef DEBUG
+    Label check_inlined_codesize;
+    masm_->bind(&check_inlined_codesize);
+#endif
       __ mov(scratch2, Operand(Factory::null_value()));
       __ cmp(scratch1, scratch2);
       deferred->Branch(ne);
@@ -5632,17 +5687,15 @@
       __ add(scratch1,
              scratch1,
              Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-      __ ldr(r0,
+      __ ldr(scratch1,
              MemOperand(scratch1, key, LSL,
                         kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
-      __ cmp(r0, scratch2);
-      // This is the only branch to deferred where r0 and r1 do not contain the
-      // receiver and key.  We can't just load undefined here because we have to
-      // check the prototype.
+      __ cmp(scratch1, scratch2);
       deferred->Branch(eq);
 
+      __ mov(r0, scratch1);
       // Make sure that the expected number of instructions are generated.
-      ASSERT_EQ(kInlinedKeyedLoadInstructions,
+      ASSERT_EQ(kInlinedKeyedLoadInstructionsAfterPatchSize,
                 masm_->InstructionsGeneratedSince(&check_inlined_codesize));
     }
 
@@ -8486,9 +8539,9 @@
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
   // at compilation.
-#ifndef V8_NATIVE_REGEXP
+#ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#else  // V8_NATIVE_REGEXP
+#else  // V8_INTERPRETED_REGEXP
   if (!FLAG_regexp_entry_native) {
     __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
     return;
@@ -8598,7 +8651,7 @@
   __ ldr(last_match_info_elements,
          FieldMemOperand(r0, JSArray::kElementsOffset));
   __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
-  __ LoadRoot(ip, kFixedArrayMapRootIndex);
+  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
   __ cmp(r0, ip);
   __ b(ne, &runtime);
   // Check that the last match info has space for the capture registers and the
@@ -8821,7 +8874,7 @@
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
-#endif  // V8_NATIVE_REGEXP
+#endif  // V8_INTERPRETED_REGEXP
 }
 
 
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index bb76b63..33a85c4 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -29,6 +29,7 @@
 #define V8_ARM_CODEGEN_ARM_H_
 
 #include "ic-inl.h"
+#include "ast.h"
 
 namespace v8 {
 namespace internal {
@@ -36,6 +37,7 @@
 // Forward declarations
 class CompilationInfo;
 class DeferredCode;
+class JumpTarget;
 class RegisterAllocator;
 class RegisterFile;
 
@@ -217,6 +219,9 @@
   // expected arguments. Otherwise return -1.
   static int InlineRuntimeCallArgumentsCount(Handle<String> name);
 
+  // Constants related to patching of inlined load/store.
+  static const int kInlinedKeyedLoadInstructionsAfterPatchSize = 19;
+
  private:
   // Construction/Destruction
   explicit CodeGenerator(MacroAssembler* masm);
@@ -309,6 +314,7 @@
   // Read a value from a slot and leave it on top of the expression stack.
   void LoadFromSlot(Slot* slot, TypeofState typeof_state);
   void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
+
   // Store the value on top of the stack to a slot.
   void StoreToSlot(Slot* slot, InitState init_state);
 
@@ -338,6 +344,15 @@
                                          TypeofState typeof_state,
                                          JumpTarget* slow);
 
+  // Support for loading from local/global variables and arguments
+  // whose location is known unless they are shadowed by
+  // eval-introduced bindings. Generates no code for unsupported slot
+  // types and therefore expects to fall through to the slow jump target.
+  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
+                                       TypeofState typeof_state,
+                                       JumpTarget* slow,
+                                       JumpTarget* done);
+
   // Special code for typeof expressions: Unfortunately, we must
   // be careful when loading the expression in 'typeof'
   // expressions. We are not allowed to throw reference errors for
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 34ba5e5..c308d69 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "assembler-arm.h"
+#include "codegen.h"
 #include "codegen-inl.h"
 #include "disasm.h"
 #include "ic-inl.h"
@@ -639,7 +640,9 @@
 
   // Patch the map check.
   Address ldr_map_instr_address =
-      inline_end_address - 18 * Assembler::kInstrSize;
+      inline_end_address -
+      CodeGenerator::kInlinedKeyedLoadInstructionsAfterPatchSize *
+      Assembler::kInstrSize;
   Assembler::set_target_address_at(ldr_map_instr_address,
                                    reinterpret_cast<Address>(map));
   return true;
diff --git a/src/arm/jump-target-arm.cc b/src/arm/jump-target-arm.cc
index a13de0e..8d182be 100644
--- a/src/arm/jump-target-arm.cc
+++ b/src/arm/jump-target-arm.cc
@@ -47,28 +47,15 @@
   // which are still live in the C++ code.
   ASSERT(cgen()->HasValidEntryRegisters());
 
-  if (is_bound()) {
-    // Backward jump.  There already a frame expectation at the target.
-    ASSERT(direction_ == BIDIRECTIONAL);
-    cgen()->frame()->MergeTo(entry_frame_);
+  if (entry_frame_set_) {
+    // There already a frame expectation at the target.
+    cgen()->frame()->MergeTo(&entry_frame_);
     cgen()->DeleteFrame();
   } else {
-    // Use the current frame as the expected one at the target if necessary.
-    if (entry_frame_ == NULL) {
-      entry_frame_ = cgen()->frame();
-      RegisterFile empty;
-      cgen()->SetFrame(NULL, &empty);
-    } else {
-      cgen()->frame()->MergeTo(entry_frame_);
-      cgen()->DeleteFrame();
-    }
-
-    // The predicate is_linked() should be made true.  Its implementation
-    // detects the presence of a frame pointer in the reaching_frames_ list.
-    if (!is_linked()) {
-      reaching_frames_.Add(NULL);
-      ASSERT(is_linked());
-    }
+    // Clone the current frame to use as the expected one at the target.
+    set_entry_frame(cgen()->frame());
+    RegisterFile empty;
+    cgen()->SetFrame(NULL, &empty);
   }
   __ jmp(&entry_label_);
 }
@@ -77,23 +64,19 @@
 void JumpTarget::DoBranch(Condition cc, Hint ignored) {
   ASSERT(cgen()->has_valid_frame());
 
-  if (is_bound()) {
-    ASSERT(direction_ == BIDIRECTIONAL);
+  if (entry_frame_set_) {
     // Backward branch.  We have an expected frame to merge to on the
     // backward edge.
-    cgen()->frame()->MergeTo(entry_frame_);
+    if (cc == al) {
+      cgen()->frame()->MergeTo(&entry_frame_);
+    } else {
+      // We can't do conditional merges yet so you have to ensure that all
+      // conditional branches to the JumpTarget have the same virtual frame.
+      ASSERT(cgen()->frame()->Equals(&entry_frame_));
+    }
   } else {
-    // Clone the current frame to use as the expected one at the target if
-    // necessary.
-    if (entry_frame_ == NULL) {
-      entry_frame_ = new VirtualFrame(cgen()->frame());
-    }
-    // The predicate is_linked() should be made true.  Its implementation
-    // detects the presence of a frame pointer in the reaching_frames_ list.
-    if (!is_linked()) {
-      reaching_frames_.Add(NULL);
-      ASSERT(is_linked());
-    }
+    // Clone the current frame to use as the expected one at the target.
+    set_entry_frame(cgen()->frame());
   }
   __ b(cc, &entry_label_);
 }
@@ -113,15 +96,10 @@
 
   // Calls are always 'forward' so we use a copy of the current frame (plus
   // one for a return address) as the expected frame.
-  ASSERT(entry_frame_ == NULL);
-  VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
-  target_frame->Adjust(1);
-  entry_frame_ = target_frame;
-
-  // The predicate is_linked() should now be made true.  Its implementation
-  // detects the presence of a frame pointer in the reaching_frames_ list.
-  reaching_frames_.Add(NULL);
-  ASSERT(is_linked());
+  ASSERT(!entry_frame_set_);
+  VirtualFrame target_frame = *cgen()->frame();
+  target_frame.Adjust(1);
+  set_entry_frame(&target_frame);
 
   __ bl(&entry_label_);
 }
@@ -136,76 +114,24 @@
 
   if (cgen()->has_valid_frame()) {
     // If there is a current frame we can use it on the fall through.
-    if (entry_frame_ == NULL) {
-      entry_frame_ = new VirtualFrame(cgen()->frame());
+    if (!entry_frame_set_) {
+      entry_frame_ = *cgen()->frame();
+      entry_frame_set_ = true;
     } else {
-      ASSERT(cgen()->frame()->Equals(entry_frame_));
+      cgen()->frame()->MergeTo(&entry_frame_);
     }
   } else {
     // If there is no current frame we must have an entry frame which we can
     // copy.
-    ASSERT(entry_frame_ != NULL);
+    ASSERT(entry_frame_set_);
     RegisterFile empty;
-    cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
-  }
-
-  // The predicate is_linked() should be made false.  Its implementation
-  // detects the presence (or absence) of frame pointers in the
-  // reaching_frames_ list.  If we inserted a bogus frame to make
-  // is_linked() true, remove it now.
-  if (is_linked()) {
-    reaching_frames_.Clear();
+    cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
   }
 
   __ bind(&entry_label_);
 }
 
 
-void BreakTarget::Jump() {
-  // On ARM we do not currently emit merge code for jumps, so we need to do
-  // it explicitly here.  The only merging necessary is to drop extra
-  // statement state from the stack.
-  ASSERT(cgen()->has_valid_frame());
-  int count = cgen()->frame()->height() - expected_height_;
-  cgen()->frame()->Drop(count);
-  DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
-  UNIMPLEMENTED();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
-  // All the forward-reaching frames should have been adjusted at the
-  // jumps to this target.
-  for (int i = 0; i < reaching_frames_.length(); i++) {
-    ASSERT(reaching_frames_[i] == NULL ||
-           reaching_frames_[i]->height() == expected_height_);
-  }
-#endif
-  // Drop leftover statement state from the frame before merging, even
-  // on the fall through.  This is so we can bind the return target
-  // with state on the frame.
-  if (cgen()->has_valid_frame()) {
-    int count = cgen()->frame()->height() - expected_height_;
-    // On ARM we do not currently emit merge code at binding sites, so we need
-    // to do it explicitly here.  The only merging necessary is to drop extra
-    // statement state from the stack.
-    cgen()->frame()->Drop(count);
-  }
-
-  DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
-  UNIMPLEMENTED();
-}
-
-
 #undef __
 
 
diff --git a/src/arm/virtual-frame-arm-inl.h b/src/arm/virtual-frame-arm-inl.h
new file mode 100644
index 0000000..a97cde4
--- /dev/null
+++ b/src/arm/virtual-frame-arm-inl.h
@@ -0,0 +1,53 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_
+#define V8_VIRTUAL_FRAME_ARM_INL_H_
+
+#include "assembler-arm.h"
+#include "virtual-frame-arm.h"
+
+namespace v8 {
+namespace internal {
+
+// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h
+// file if such a thing existed.
+MemOperand VirtualFrame::ParameterAt(int index) {
+  // Index -1 corresponds to the receiver.
+  ASSERT(-1 <= index);  // -1 is the receiver.
+  ASSERT(index <= parameter_count());
+  return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
+}
+
+  // The receiver frame slot.
+MemOperand VirtualFrame::Receiver() {
+  return ParameterAt(-1);
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_VIRTUAL_FRAME_ARM_INL_H_
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 0ec6e20..f7b337d 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -72,8 +72,15 @@
 
 void VirtualFrame::MergeTo(VirtualFrame* expected) {
   if (Equals(expected)) return;
+  MergeTOSTo(expected->top_of_stack_state_);
+  ASSERT(register_allocation_map_ == expected->register_allocation_map_);
+}
+
+
+void VirtualFrame::MergeTOSTo(
+    VirtualFrame::TopOfStack expected_top_of_stack_state) {
 #define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
-  switch (CASE_NUMBER(top_of_stack_state_, expected->top_of_stack_state_)) {
+  switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
     case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
       break;
     case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
@@ -154,7 +161,7 @@
       UNREACHABLE();
 #undef CASE_NUMBER
   }
-  ASSERT(register_allocation_map_ == expected->register_allocation_map_);
+  top_of_stack_state_ = expected_top_of_stack_state;
 }
 
 
@@ -418,7 +425,7 @@
 
 
 void VirtualFrame::EmitPop(Register reg) {
-  ASSERT(!is_used(reg));
+  ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
   if (top_of_stack_state_ == NO_TOS_REGISTERS) {
     __ pop(reg);
   } else {
@@ -576,7 +583,6 @@
   ASSERT(but_not_to_this_one.is(r0) ||
          but_not_to_this_one.is(r1) ||
          but_not_to_this_one.is(no_reg));
-  AssertIsNotSpilled();
   element_count_--;
   if (top_of_stack_state_ == NO_TOS_REGISTERS) {
     if (but_not_to_this_one.is(r0)) {
@@ -628,6 +634,39 @@
 }
 
 
+void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
+  if (this_far_down == 0) {
+    Pop();
+    Register dest = GetTOSRegister();
+    if (dest.is(reg)) {
+      // We already popped one item off the top of the stack.  If the only
+      // free register is the one we were asked to push then we have been
+      // asked to push a register that was already in use, which cannot
+      // happen.  It therefore folows that there are two free TOS registers:
+      ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
+      dest = dest.is(r0) ? r1 : r0;
+    }
+    __ mov(dest, reg);
+    EmitPush(dest);
+  } else if (this_far_down == 1) {
+    int virtual_elements = kVirtualElements[top_of_stack_state_];
+    if (virtual_elements < 2) {
+      __ str(reg, ElementAt(this_far_down));
+    } else {
+      ASSERT(virtual_elements == 2);
+      ASSERT(!reg.is(r0));
+      ASSERT(!reg.is(r1));
+      Register dest = kBottomRegister[top_of_stack_state_];
+      __ mov(dest, reg);
+    }
+  } else {
+    ASSERT(this_far_down >= 2);
+    ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
+    __ str(reg, ElementAt(this_far_down));
+  }
+}
+
+
 Register VirtualFrame::GetTOSRegister() {
   if (SpilledScope::is_spilled()) return r0;
 
diff --git a/src/arm/virtual-frame-arm.h b/src/arm/virtual-frame-arm.h
index b255929..655194d 100644
--- a/src/arm/virtual-frame-arm.h
+++ b/src/arm/virtual-frame-arm.h
@@ -29,11 +29,14 @@
 #define V8_ARM_VIRTUAL_FRAME_ARM_H_
 
 #include "register-allocator.h"
-#include "scopes.h"
 
 namespace v8 {
 namespace internal {
 
+// This dummy class is only used to create invalid virtual frames.
+extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
+
+
 // -------------------------------------------------------------------------
 // Virtual frames
 //
@@ -82,26 +85,8 @@
     // is not spilled, ie. where register allocation occurs.  Eventually
     // when RegisterAllocationScope is ubiquitous it can be removed
     // along with the (by then unused) SpilledScope class.
-    explicit RegisterAllocationScope(CodeGenerator* cgen)
-      : cgen_(cgen),
-        old_is_spilled_(SpilledScope::is_spilled_) {
-      SpilledScope::is_spilled_ = false;
-      if (old_is_spilled_) {
-        VirtualFrame* frame = cgen->frame();
-        if (frame != NULL) {
-          frame->AssertIsSpilled();
-        }
-      }
-    }
-    ~RegisterAllocationScope() {
-      SpilledScope::is_spilled_ = old_is_spilled_;
-      if (old_is_spilled_) {
-        VirtualFrame* frame = cgen_->frame();
-        if (frame != NULL) {
-          frame->SpillAll();
-        }
-      }
-    }
+    inline explicit RegisterAllocationScope(CodeGenerator* cgen);
+    inline ~RegisterAllocationScope();
 
    private:
     CodeGenerator* cgen_;
@@ -116,19 +101,20 @@
   // Construct an initial virtual frame on entry to a JS function.
   inline VirtualFrame();
 
+  // Construct an invalid virtual frame, used by JumpTargets.
+  inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
+
   // Construct a virtual frame as a clone of an existing one.
   explicit inline VirtualFrame(VirtualFrame* original);
 
-  CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
-  MacroAssembler* masm() { return cgen()->masm(); }
+  inline CodeGenerator* cgen();
+  inline MacroAssembler* masm();
 
   // The number of elements on the virtual frame.
   int element_count() { return element_count_; }
 
   // The height of the virtual expression stack.
-  int height() {
-    return element_count() - expression_base_index();
-  }
+  inline int height();
 
   bool is_used(int num) {
     switch (num) {
@@ -160,10 +146,6 @@
     }
   }
 
-  bool is_used(Register reg) {
-    return is_used(RegisterAllocator::ToNumber(reg));
-  }
-
   // Add extra in-memory elements to the top of the frame to match an actual
   // frame (eg, the frame after an exception handler is pushed).  No code is
   // emitted.
@@ -247,16 +229,13 @@
 
   // An element of the expression stack as an assembly operand.
   MemOperand ElementAt(int index) {
-    AssertIsSpilled();
-    return MemOperand(sp, index * kPointerSize);
+    int adjusted_index = index - kVirtualElements[top_of_stack_state_];
+    ASSERT(adjusted_index >= 0);
+    return MemOperand(sp, adjusted_index * kPointerSize);
   }
 
   // A frame-allocated local as an assembly operand.
-  MemOperand LocalAt(int index) {
-    ASSERT(0 <= index);
-    ASSERT(index < local_count());
-    return MemOperand(fp, kLocal0Offset - index * kPointerSize);
-  }
+  inline MemOperand LocalAt(int index);
 
   // Push the address of the receiver slot on the frame.
   void PushReceiverSlotAddress();
@@ -268,26 +247,17 @@
   MemOperand Context() { return MemOperand(fp, kContextOffset); }
 
   // A parameter as an assembly operand.
-  MemOperand ParameterAt(int index) {
-    // Index -1 corresponds to the receiver.
-    ASSERT(-1 <= index);  // -1 is the receiver.
-    ASSERT(index <= parameter_count());
-    return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
-  }
+  inline MemOperand ParameterAt(int index);
 
   // The receiver frame slot.
-  MemOperand Receiver() { return ParameterAt(-1); }
+  inline MemOperand Receiver();
 
   // Push a try-catch or try-finally handler on top of the virtual frame.
   void PushTryHandler(HandlerType type);
 
   // Call stub given the number of arguments it expects on (and
   // removes from) the stack.
-  void CallStub(CodeStub* stub, int arg_count) {
-    if (arg_count != 0) Forget(arg_count);
-    ASSERT(cgen()->HasValidEntryRegisters());
-    masm()->CallStub(stub);
-  }
+  inline void CallStub(CodeStub* stub, int arg_count);
 
   // Call JS function from top of the stack with arguments
   // taken from the stack.
@@ -386,6 +356,12 @@
   void EmitPush(MemOperand operand);
   void EmitPushRoot(Heap::RootListIndex index);
 
+  // Overwrite the nth thing on the stack.  If the nth position is in a
+  // register then this turns into a mov, otherwise an str.  Afterwards
+  // you can still use the register even if it is a register that can be
+  // used for TOS (r0 or r1).
+  void SetElementAt(Register reg, int this_far_down);
+
   // Get a register which is free and which must be immediately used to
   // push on the top of the stack.
   Register GetTOSRegister();
@@ -449,13 +425,13 @@
   int stack_pointer() { return element_count_ - 1; }
 
   // The number of frame-allocated locals and parameters respectively.
-  int parameter_count() { return cgen()->scope()->num_parameters(); }
-  int local_count() { return cgen()->scope()->num_stack_slots(); }
+  inline int parameter_count();
+  inline int local_count();
 
   // The index of the element that is at the processor's frame pointer
   // (the fp register).  The parameters, receiver, function, and context
   // are below the frame pointer.
-  int frame_pointer() { return parameter_count() + 3; }
+  inline int frame_pointer();
 
   // The index of the first parameter.  The receiver lies below the first
   // parameter.
@@ -463,26 +439,22 @@
 
   // The index of the context slot in the frame.  It is immediately
   // below the frame pointer.
-  int context_index() { return frame_pointer() - 1; }
+  inline int context_index();
 
   // The index of the function slot in the frame.  It is below the frame
   // pointer and context slot.
-  int function_index() { return frame_pointer() - 2; }
+  inline int function_index();
 
   // The index of the first local.  Between the frame pointer and the
   // locals lies the return address.
-  int local0_index() { return frame_pointer() + 2; }
+  inline int local0_index();
 
   // The index of the base of the expression stack.
-  int expression_base_index() { return local0_index() + local_count(); }
+  inline int expression_base_index();
 
   // Convert a frame index into a frame pointer relative offset into the
   // actual stack.
-  int fp_relative(int index) {
-    ASSERT(index < element_count());
-    ASSERT(frame_pointer() < element_count());  // FP is on the frame.
-    return (frame_pointer() - index) * kPointerSize;
-  }
+  inline int fp_relative(int index);
 
   // Spill all elements in registers. Spill the top spilled_args elements
   // on the frame.  Sync all other frame elements.
@@ -494,10 +466,13 @@
   // onto the physical stack and made free.
   void EnsureOneFreeTOSRegister();
 
+  // Emit instructions to get the top of stack state from where we are to where
+  // we want to be.
+  void MergeTOSTo(TopOfStack expected_state);
+
   inline bool Equals(VirtualFrame* other);
 
   friend class JumpTarget;
-  friend class DeferredCode;
 };
 
 
diff --git a/src/ast-inl.h b/src/ast-inl.h
new file mode 100644
index 0000000..2b5d7c4
--- /dev/null
+++ b/src/ast-inl.h
@@ -0,0 +1,79 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ast.h"
+
+namespace v8 {
+namespace internal {
+
+BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
+    : labels_(labels), type_(type) {
+  ASSERT(labels == NULL || labels->length() > 0);
+}
+
+
+SwitchStatement::SwitchStatement(ZoneStringList* labels)
+    : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
+      tag_(NULL), cases_(NULL) {
+}
+
+
+IterationStatement::IterationStatement(ZoneStringList* labels)
+    : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) {
+}
+
+
+Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
+    : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
+      statements_(capacity),
+      is_initializer_block_(is_initializer_block) {
+}
+
+
+ForStatement::ForStatement(ZoneStringList* labels)
+    : IterationStatement(labels),
+      init_(NULL),
+      cond_(NULL),
+      next_(NULL),
+      may_have_function_literal_(true),
+      loop_variable_(NULL),
+      peel_this_loop_(false) {
+}
+
+
+ForInStatement::ForInStatement(ZoneStringList* labels)
+    : IterationStatement(labels), each_(NULL), enumerable_(NULL) {
+}
+
+
+DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
+    : IterationStatement(labels), cond_(NULL), condition_position_(-1) {
+}
+
+} }  // namespace v8::internal
diff --git a/src/ast.cc b/src/ast.cc
index 75b2945..92df990 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -32,6 +32,8 @@
 #include "parser.h"
 #include "scopes.h"
 #include "string-stream.h"
+#include "ast-inl.h"
+#include "jump-target-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -786,6 +788,13 @@
 }
 
 
+WhileStatement::WhileStatement(ZoneStringList* labels)
+    : IterationStatement(labels),
+      cond_(NULL),
+      may_have_function_literal_(true) {
+}
+
+
 ExpressionStatement::ExpressionStatement(ExpressionStatement* other,
                                          Expression* expression)
     : Statement(other), expression_(expression) {}
@@ -809,6 +818,11 @@
     : BreakableStatement(other), body_(body) {}
 
 
+CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
+    : label_(label), statements_(statements) {
+}
+
+
 ForStatement::ForStatement(ForStatement* other,
                            Statement* init,
                            Expression* cond,
diff --git a/src/ast.h b/src/ast.h
index dfc08ee..a3a9734 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -351,10 +351,7 @@
   bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
 
  protected:
-  BreakableStatement(ZoneStringList* labels, Type type)
-      : labels_(labels), type_(type) {
-    ASSERT(labels == NULL || labels->length() > 0);
-  }
+  inline BreakableStatement(ZoneStringList* labels, Type type);
 
   explicit BreakableStatement(BreakableStatement* other);
 
@@ -367,10 +364,7 @@
 
 class Block: public BreakableStatement {
  public:
-  Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
-      : BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
-        statements_(capacity),
-        is_initializer_block_(is_initializer_block) { }
+  inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
 
   // Construct a clone initialized from the original block and
   // a deep copy of all statements of the original block.
@@ -437,8 +431,7 @@
   BreakTarget* continue_target()  { return &continue_target_; }
 
  protected:
-  explicit IterationStatement(ZoneStringList* labels)
-      : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { }
+  explicit inline IterationStatement(ZoneStringList* labels);
 
   // Construct a clone initialized from  original and
   // a deep copy of the original body.
@@ -456,9 +449,7 @@
 
 class DoWhileStatement: public IterationStatement {
  public:
-  explicit DoWhileStatement(ZoneStringList* labels)
-      : IterationStatement(labels), cond_(NULL), condition_position_(-1) {
-  }
+  explicit inline DoWhileStatement(ZoneStringList* labels);
 
   void Initialize(Expression* cond, Statement* body) {
     IterationStatement::Initialize(body);
@@ -482,11 +473,7 @@
 
 class WhileStatement: public IterationStatement {
  public:
-  explicit WhileStatement(ZoneStringList* labels)
-      : IterationStatement(labels),
-        cond_(NULL),
-        may_have_function_literal_(true) {
-  }
+  explicit WhileStatement(ZoneStringList* labels);
 
   void Initialize(Expression* cond, Statement* body) {
     IterationStatement::Initialize(body);
@@ -511,14 +498,7 @@
 
 class ForStatement: public IterationStatement {
  public:
-  explicit ForStatement(ZoneStringList* labels)
-      : IterationStatement(labels),
-        init_(NULL),
-        cond_(NULL),
-        next_(NULL),
-        may_have_function_literal_(true),
-        loop_variable_(NULL),
-        peel_this_loop_(false) {}
+  explicit inline ForStatement(ZoneStringList* labels);
 
   // Construct a for-statement initialized from another for-statement
   // and deep copies of all parts of the original statement.
@@ -574,8 +554,7 @@
 
 class ForInStatement: public IterationStatement {
  public:
-  explicit ForInStatement(ZoneStringList* labels)
-      : IterationStatement(labels), each_(NULL), enumerable_(NULL) { }
+  explicit inline ForInStatement(ZoneStringList* labels);
 
   void Initialize(Expression* each, Expression* enumerable, Statement* body) {
     IterationStatement::Initialize(body);
@@ -691,8 +670,7 @@
 
 class CaseClause: public ZoneObject {
  public:
-  CaseClause(Expression* label, ZoneList<Statement*>* statements)
-      : label_(label), statements_(statements) { }
+  CaseClause(Expression* label, ZoneList<Statement*>* statements);
 
   bool is_default() const  { return label_ == NULL; }
   Expression* label() const  {
@@ -711,9 +689,7 @@
 
 class SwitchStatement: public BreakableStatement {
  public:
-  explicit SwitchStatement(ZoneStringList* labels)
-      : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
-        tag_(NULL), cases_(NULL) { }
+  explicit inline SwitchStatement(ZoneStringList* labels);
 
   void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
     tag_ = tag;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index df1e98a..0874131 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -1753,8 +1753,8 @@
         CreateNewGlobals(global_template, global_object, &inner_global);
     HookUpGlobalProxy(inner_global, global_proxy);
     InitializeGlobal(inner_global, empty_function);
-    if (!InstallNatives()) return;
     InstallJSFunctionResultCaches();
+    if (!InstallNatives()) return;
 
     MakeFunctionInstancePrototypeWritable();
 
diff --git a/src/builtins.cc b/src/builtins.cc
index 4971275..9a0fbd2 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -330,22 +330,19 @@
 }
 
 
-static bool ArrayPrototypeHasNoElements() {
+static bool ArrayPrototypeHasNoElements(Context* global_context,
+                                        JSObject* array_proto) {
   // This method depends on non writability of Object and Array prototype
   // fields.
-  Context* global_context = Top::context()->global_context();
-  // Array.prototype
-  JSObject* proto =
-      JSObject::cast(global_context->array_function()->prototype());
-  if (proto->elements() != Heap::empty_fixed_array()) return false;
+  if (array_proto->elements() != Heap::empty_fixed_array()) return false;
   // Hidden prototype
-  proto = JSObject::cast(proto->GetPrototype());
-  ASSERT(proto->elements() == Heap::empty_fixed_array());
+  array_proto = JSObject::cast(array_proto->GetPrototype());
+  ASSERT(array_proto->elements() == Heap::empty_fixed_array());
   // Object.prototype
-  proto = JSObject::cast(proto->GetPrototype());
-  if (proto != global_context->initial_object_prototype()) return false;
-  if (proto->elements() != Heap::empty_fixed_array()) return false;
-  ASSERT(proto->GetPrototype()->IsNull());
+  array_proto = JSObject::cast(array_proto->GetPrototype());
+  if (array_proto != global_context->initial_object_prototype()) return false;
+  if (array_proto->elements() != Heap::empty_fixed_array()) return false;
+  ASSERT(array_proto->GetPrototype()->IsNull());
   return true;
 }
 
@@ -368,6 +365,18 @@
 }
 
 
+static bool IsFastElementMovingAllowed(Object* receiver,
+                                       FixedArray** elements) {
+  if (!IsJSArrayWithFastElements(receiver, elements)) return false;
+
+  Context* global_context = Top::context()->global_context();
+  JSObject* array_proto =
+      JSObject::cast(global_context->array_function()->prototype());
+  if (JSArray::cast(receiver)->GetPrototype() != array_proto) return false;
+  return ArrayPrototypeHasNoElements(global_context, array_proto);
+}
+
+
 static Object* CallJsBuiltin(const char* name,
                              BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
   HandleScope handleScope;
@@ -465,11 +474,7 @@
     return top;
   }
 
-  // Remember to check the prototype chain.
-  JSFunction* array_function =
-      Top::context()->global_context()->array_function();
-  JSObject* prototype = JSObject::cast(array_function->prototype());
-  top = prototype->GetElement(len - 1);
+  top = array->GetPrototype()->GetElement(len - 1);
 
   return top;
 }
@@ -478,8 +483,7 @@
 BUILTIN(ArrayShift) {
   Object* receiver = *args.receiver();
   FixedArray* elms = NULL;
-  if (!IsJSArrayWithFastElements(receiver, &elms)
-      || !ArrayPrototypeHasNoElements()) {
+  if (!IsFastElementMovingAllowed(receiver, &elms)) {
     return CallJsBuiltin("ArrayShift", args);
   }
   JSArray* array = JSArray::cast(receiver);
@@ -515,8 +519,7 @@
 BUILTIN(ArrayUnshift) {
   Object* receiver = *args.receiver();
   FixedArray* elms = NULL;
-  if (!IsJSArrayWithFastElements(receiver, &elms)
-      || !ArrayPrototypeHasNoElements()) {
+  if (!IsFastElementMovingAllowed(receiver, &elms)) {
     return CallJsBuiltin("ArrayUnshift", args);
   }
   JSArray* array = JSArray::cast(receiver);
@@ -565,8 +568,7 @@
 BUILTIN(ArraySlice) {
   Object* receiver = *args.receiver();
   FixedArray* elms = NULL;
-  if (!IsJSArrayWithFastElements(receiver, &elms)
-      || !ArrayPrototypeHasNoElements()) {
+  if (!IsFastElementMovingAllowed(receiver, &elms)) {
     return CallJsBuiltin("ArraySlice", args);
   }
   JSArray* array = JSArray::cast(receiver);
@@ -635,8 +637,7 @@
 BUILTIN(ArraySplice) {
   Object* receiver = *args.receiver();
   FixedArray* elms = NULL;
-  if (!IsJSArrayWithFastElements(receiver, &elms)
-      || !ArrayPrototypeHasNoElements()) {
+  if (!IsFastElementMovingAllowed(receiver, &elms)) {
     return CallJsBuiltin("ArraySplice", args);
   }
   JSArray* array = JSArray::cast(receiver);
@@ -788,7 +789,10 @@
 
 
 BUILTIN(ArrayConcat) {
-  if (!ArrayPrototypeHasNoElements()) {
+  Context* global_context = Top::context()->global_context();
+  JSObject* array_proto =
+      JSObject::cast(global_context->array_function()->prototype());
+  if (!ArrayPrototypeHasNoElements(global_context, array_proto)) {
     return CallJsBuiltin("ArrayConcat", args);
   }
 
@@ -798,7 +802,8 @@
   int result_len = 0;
   for (int i = 0; i < n_arguments; i++) {
     Object* arg = args[i];
-    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()) {
+    if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
+        || JSArray::cast(arg)->GetPrototype() != array_proto) {
       return CallJsBuiltin("ArrayConcat", args);
     }
 
diff --git a/src/codegen.h b/src/codegen.h
index a5bb31f..667d100 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -28,7 +28,6 @@
 #ifndef V8_CODEGEN_H_
 #define V8_CODEGEN_H_
 
-#include "ast.h"
 #include "code-stubs.h"
 #include "runtime.h"
 #include "type-info.h"
diff --git a/src/d8.js b/src/d8.js
index b9ff09c..5c3da13 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -341,6 +341,11 @@
       this.request_ = this.breakCommandToJSONRequest_(args);
       break;
 
+    case 'breakpoints':
+    case 'bb':
+      this.request_ = this.breakpointsCommandToJSONRequest_(args);
+      break;
+
     case 'clear':
       this.request_ = this.clearCommandToJSONRequest_(args);
       break;
@@ -770,6 +775,15 @@
 };
 
 
+DebugRequest.prototype.breakpointsCommandToJSONRequest_ = function(args) {
+  if (args && args.length > 0) {
+    throw new Error('Unexpected arguments.');
+  }
+  var request = this.createRequest('listbreakpoints');
+  return request.toJSONProtocol();
+};
+
+
 // Create a JSON request for the clear command.
 DebugRequest.prototype.clearCommandToJSONRequest_ = function(args) {
   // Build a evaluate request from the text command.
@@ -947,6 +961,39 @@
         result += body.breakpoint;
         details.text = result;
         break;
+        
+      case 'listbreakpoints':
+        result = 'breakpoints: (' + body.breakpoints.length + ')';
+        for (var i = 0; i < body.breakpoints.length; i++) {
+          var breakpoint = body.breakpoints[i];
+          result += '\n id=' + breakpoint.number;
+          result += ' type=' + breakpoint.type;
+          if (breakpoint.script_id) {
+              result += ' script_id=' + breakpoint.script_id;
+          }
+          if (breakpoint.script_name) {
+              result += ' script_name=' + breakpoint.script_name;
+          }
+          result += ' line=' + breakpoint.line;
+          if (breakpoint.column != null) {
+            result += ' column=' + breakpoint.column;
+          }
+          if (breakpoint.groupId) {
+            result += ' groupId=' + breakpoint.groupId;
+          }
+          if (breakpoint.ignoreCount) {
+              result += ' ignoreCount=' + breakpoint.ignoreCount;
+          }
+          if (breakpoint.active === false) {
+            result += ' inactive';
+          }
+          if (breakpoint.condition) {
+            result += ' condition=' + breakpoint.condition;
+          }
+          result += ' hit_count=' + breakpoint.hit_count;
+        }
+        details.text = result;
+        break;
 
       case 'backtrace':
         if (body.totalFrames == 0) {
@@ -1136,8 +1183,8 @@
 
       default:
         details.text =
-            'Response for unknown command \'' + response.command + '\'' +
-            ' (' + json_response + ')';
+            'Response for unknown command \'' + response.command() + '\'' +
+            ' (' + response.raw_json() + ')';
     }
   } catch (e) {
     details.text = 'Error: "' + e + '" formatting response';
@@ -1153,6 +1200,7 @@
  * @constructor
  */
 function ProtocolPackage(json) {
+  this.raw_json_ = json;
   this.packet_ = JSON.parse(json);
   this.refs_ = [];
   if (this.packet_.refs) {
@@ -1243,6 +1291,11 @@
 }
 
 
+ProtocolPackage.prototype.raw_json = function() {
+  return this.raw_json_;
+}
+
+
 function ProtocolValue(value, packet) {
   this.value_ = value;
   this.packet_ = packet;
diff --git a/src/date.js b/src/date.js
index b9e19d6..e780cb8 100644
--- a/src/date.js
+++ b/src/date.js
@@ -238,7 +238,15 @@
   return time + DaylightSavingsOffset(time) + local_time_offset;
 }
 
+
+var ltcache = {
+  key: null, 
+  val: null
+};
+
 function LocalTimeNoCheck(time) {
+  var ltc = ltcache;
+  if (%_ObjectEquals(time, ltc.key)) return ltc.val;
   if (time < -MAX_TIME_MS || time > MAX_TIME_MS) {
     return $NaN;
   }
@@ -252,7 +260,8 @@
   } else {
     var dst_offset = DaylightSavingsOffset(time);
   }
-  return time + local_time_offset + dst_offset;
+  ltc.key = time;
+  return (ltc.val = time + local_time_offset + dst_offset);
 }
 
 
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index e94cee4..77fa1dd 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -1266,6 +1266,8 @@
         this.clearBreakPointRequest_(request, response);
       } else if (request.command == 'clearbreakpointgroup') {
         this.clearBreakPointGroupRequest_(request, response);
+      } else if (request.command == 'listbreakpoints') {
+        this.listBreakpointsRequest_(request, response);
       } else if (request.command == 'backtrace') {
         this.backtraceRequest_(request, response);
       } else if (request.command == 'frame') {
@@ -1581,6 +1583,35 @@
   response.body = { breakpoint: break_point }
 }
 
+DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(request, response) {
+  var array = [];
+  for (var i = 0; i < script_break_points.length; i++) {
+    var break_point = script_break_points[i];
+
+    var description = {
+      number: break_point.number(),
+      line: break_point.line(),
+      column: break_point.column(),
+      groupId: break_point.groupId(),
+      hit_count: break_point.hit_count(),
+      active: break_point.active(),
+      condition: break_point.condition(),
+      ignoreCount: break_point.ignoreCount()
+    }
+    
+    if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
+      description.type = 'scriptId';
+      description.script_id = break_point.script_id();
+    } else {
+      description.type = 'scriptName';
+      description.script_name = break_point.script_name();
+    }
+    array.push(description);
+  }
+  
+  response.body = { breakpoints: array }
+}
+
 
 DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) {
   // Get the number of frames.
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index 80e421b..2db21d5 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -1067,7 +1067,7 @@
   //  -- esp[0] : return address
   //  -- esp[4] : last argument
   // -----------------------------------
-  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+  Label generic_array_code;
 
   // Get the Array function.
   GenerateLoadArrayFunction(masm, edi);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 63286a7..4393e44 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -4227,8 +4227,7 @@
 
   // Get the i'th entry of the array.
   __ mov(edx, frame_->ElementAt(2));
-  __ mov(ebx, Operand(edx, eax, times_2,
-                      FixedArray::kHeaderSize - kHeapObjectTag));
+  __ mov(ebx, FixedArrayElementOperand(edx, eax));
 
   // Get the expected map from the stack or a zero map in the
   // permanent slow case eax: current iteration count ebx: i'th entry
@@ -4724,43 +4723,14 @@
     JumpTarget slow;
     JumpTarget done;
 
-    // Generate fast-case code for variables that might be shadowed by
-    // eval-introduced variables.  Eval is used a lot without
-    // introducing variables.  In those cases, we do not want to
-    // perform a runtime call for all variables in the scope
-    // containing the eval.
-    if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-      result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
-      // If there was no control flow to slow, we can exit early.
-      if (!slow.is_linked()) return result;
-      done.Jump(&result);
-
-    } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-      Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
-      // Only generate the fast case for locals that rewrite to slots.
-      // This rules out argument loads because eval forces arguments
-      // access to be through the arguments object.
-      if (potential_slot != NULL) {
-        // Allocate a fresh register to use as a temp in
-        // ContextSlotOperandCheckExtensions and to hold the result
-        // value.
-        result = allocator()->Allocate();
-        ASSERT(result.is_valid());
-        __ mov(result.reg(),
-               ContextSlotOperandCheckExtensions(potential_slot,
-                                                 result,
-                                                 &slow));
-        if (potential_slot->var()->mode() == Variable::CONST) {
-          __ cmp(result.reg(), Factory::the_hole_value());
-          done.Branch(not_equal, &result);
-          __ mov(result.reg(), Factory::undefined_value());
-        }
-        // There is always control flow to slow from
-        // ContextSlotOperandCheckExtensions so we have to jump around
-        // it.
-        done.Jump(&result);
-      }
-    }
+    // Generate fast case for loading from slots that correspond to
+    // local/global variables or arguments unless they are shadowed by
+    // eval-introduced bindings.
+    EmitDynamicLoadFromSlotFastCase(slot,
+                                    typeof_state,
+                                    &result,
+                                    &slow,
+                                    &done);
 
     slow.Bind();
     // A runtime call is inevitable.  We eagerly sync frame elements
@@ -4929,6 +4899,68 @@
 }
 
 
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+                                                    TypeofState typeof_state,
+                                                    Result* result,
+                                                    JumpTarget* slow,
+                                                    JumpTarget* done) {
+  // Generate fast-case code for variables that might be shadowed by
+  // eval-introduced variables.  Eval is used a lot without
+  // introducing variables.  In those cases, we do not want to
+  // perform a runtime call for all variables in the scope
+  // containing the eval.
+  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+    *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
+    done->Jump(result);
+
+  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+    if (potential_slot != NULL) {
+      // Generate fast case for locals that rewrite to slots.
+      // Allocate a fresh register to use as a temp in
+      // ContextSlotOperandCheckExtensions and to hold the result
+      // value.
+      *result = allocator()->Allocate();
+      ASSERT(result->is_valid());
+      __ mov(result->reg(),
+             ContextSlotOperandCheckExtensions(potential_slot, *result, slow));
+      if (potential_slot->var()->mode() == Variable::CONST) {
+        __ cmp(result->reg(), Factory::the_hole_value());
+        done->Branch(not_equal, result);
+        __ mov(result->reg(), Factory::undefined_value());
+      }
+      done->Jump(result);
+    } else if (rewrite != NULL) {
+      // Generate fast case for calls of an argument function.
+      Property* property = rewrite->AsProperty();
+      if (property != NULL) {
+        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+        Literal* key_literal = property->key()->AsLiteral();
+        if (obj_proxy != NULL &&
+            key_literal != NULL &&
+            obj_proxy->IsArguments() &&
+            key_literal->handle()->IsSmi()) {
+          // Load arguments object if there are no eval-introduced
+          // variables. Then load the argument from the arguments
+          // object using keyed load.
+          Result arguments = allocator()->Allocate();
+          ASSERT(arguments.is_valid());
+          __ mov(arguments.reg(),
+                 ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+                                                   arguments,
+                                                   slow));
+          frame_->Push(&arguments);
+          frame_->Push(key_literal->handle());
+          *result = EmitKeyedLoad();
+          done->Jump(result);
+        }
+      }
+    }
+  }
+}
+
+
 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
   if (slot->type() == Slot::LOOKUP) {
     ASSERT(slot->var()->is_dynamic());
@@ -5765,59 +5797,26 @@
     // ----------------------------------
     // JavaScript examples:
     //
-    //  with (obj) foo(1, 2, 3)  // foo is in obj
+    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
     //
     //  function f() {};
     //  function g() {
     //    eval(...);
-    //    f();  // f could be in extension object
+    //    f();  // f could be in extension object.
     //  }
     // ----------------------------------
 
-    JumpTarget slow;
-    JumpTarget done;
-
-    // Generate fast-case code for variables that might be shadowed by
-    // eval-introduced variables.  Eval is used a lot without
-    // introducing variables.  In those cases, we do not want to
-    // perform a runtime call for all variables in the scope
-    // containing the eval.
+    JumpTarget slow, done;
     Result function;
-    if (var->mode() == Variable::DYNAMIC_GLOBAL) {
-      function = LoadFromGlobalSlotCheckExtensions(var->slot(),
-                                                   NOT_INSIDE_TYPEOF,
-                                                   &slow);
-      frame_->Push(&function);
-      LoadGlobalReceiver();
-      done.Jump();
 
-    } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
-      Slot* potential_slot = var->local_if_not_shadowed()->slot();
-      // Only generate the fast case for locals that rewrite to slots.
-      // This rules out argument loads because eval forces arguments
-      // access to be through the arguments object.
-      if (potential_slot != NULL) {
-        // Allocate a fresh register to use as a temp in
-        // ContextSlotOperandCheckExtensions and to hold the result
-        // value.
-        function = allocator()->Allocate();
-        ASSERT(function.is_valid());
-        __ mov(function.reg(),
-               ContextSlotOperandCheckExtensions(potential_slot,
-                                                 function,
-                                                 &slow));
-        JumpTarget push_function_and_receiver;
-        if (potential_slot->var()->mode() == Variable::CONST) {
-          __ cmp(function.reg(), Factory::the_hole_value());
-          push_function_and_receiver.Branch(not_equal, &function);
-          __ mov(function.reg(), Factory::undefined_value());
-        }
-        push_function_and_receiver.Bind(&function);
-        frame_->Push(&function);
-        LoadGlobalReceiver();
-        done.Jump();
-      }
-    }
+    // Generate fast case for loading functions from slots that
+    // correspond to local/global variables or arguments unless they
+    // are shadowed by eval-introduced bindings.
+    EmitDynamicLoadFromSlotFastCase(var->slot(),
+                                    NOT_INSIDE_TYPEOF,
+                                    &function,
+                                    &slow,
+                                    &done);
 
     slow.Bind();
     // Enter the runtime system to load the function from the context.
@@ -5839,7 +5838,18 @@
     ASSERT(!allocator()->is_used(edx));
     frame_->EmitPush(edx);
 
-    done.Bind();
+    // If fast case code has been generated, emit code to push the
+    // function and receiver and have the slow path jump around this
+    // code.
+    if (done.is_linked()) {
+      JumpTarget call;
+      call.Jump();
+      done.Bind(&function);
+      frame_->Push(&function);
+      LoadGlobalReceiver();
+      call.Bind();
+    }
+
     // Call the function.
     CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
 
@@ -6634,16 +6644,6 @@
 };
 
 
-// Return a position of the element at |index_as_smi| + |additional_offset|
-// in FixedArray pointer to which is held in |array|.  |index_as_smi| is Smi.
-static Operand ArrayElement(Register array,
-                            Register index_as_smi,
-                            int additional_offset = 0) {
-  int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
-  return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
-}
-
-
 void DeferredSearchCache::Generate() {
   Label first_loop, search_further, second_loop, cache_miss;
 
@@ -6660,11 +6660,11 @@
   __ cmp(Operand(dst_), Immediate(kEntriesIndexSmi));
   __ j(less, &search_further);
 
-  __ cmp(key_, ArrayElement(cache_, dst_));
+  __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
   __ j(not_equal, &first_loop);
 
   __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ mov(dst_, ArrayElement(cache_, dst_, 1));
+  __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
   __ jmp(exit_label());
 
   __ bind(&search_further);
@@ -6678,11 +6678,11 @@
   __ cmp(dst_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
   __ j(less_equal, &cache_miss);
 
-  __ cmp(key_, ArrayElement(cache_, dst_));
+  __ cmp(key_, CodeGenerator::FixedArrayElementOperand(cache_, dst_));
   __ j(not_equal, &second_loop);
 
   __ mov(FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_);
-  __ mov(dst_, ArrayElement(cache_, dst_, 1));
+  __ mov(dst_, CodeGenerator::FixedArrayElementOperand(cache_, dst_, 1));
   __ jmp(exit_label());
 
   __ bind(&cache_miss);
@@ -6730,7 +6730,7 @@
   __ pop(ebx);  // restore the key
   __ mov(FieldOperand(ecx, JSFunctionResultCache::kFingerOffset), edx);
   // Store key.
-  __ mov(ArrayElement(ecx, edx), ebx);
+  __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
   __ RecordWrite(ecx, 0, ebx, edx);
 
   // Store value.
@@ -6738,7 +6738,7 @@
   __ mov(edx, FieldOperand(ecx, JSFunctionResultCache::kFingerOffset));
   __ add(Operand(edx), Immediate(Smi::FromInt(1)));
   __ mov(ebx, eax);
-  __ mov(ArrayElement(ecx, edx), ebx);
+  __ mov(CodeGenerator::FixedArrayElementOperand(ecx, edx), ebx);
   __ RecordWrite(ecx, 0, ebx, edx);
 
   if (!dst_.is(eax)) {
@@ -6785,11 +6785,11 @@
   // tmp.reg() now holds finger offset as a smi.
   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   __ mov(tmp.reg(), FieldOperand(cache.reg(),
-                    JSFunctionResultCache::kFingerOffset));
-  __ cmp(key.reg(), ArrayElement(cache.reg(), tmp.reg()));
+                                 JSFunctionResultCache::kFingerOffset));
+  __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
   deferred->Branch(not_equal);
 
-  __ mov(tmp.reg(), ArrayElement(cache.reg(), tmp.reg(), 1));
+  __ mov(tmp.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg(), 1));
 
   deferred->BindExit();
   frame_->Push(&tmp);
@@ -6888,14 +6888,8 @@
   deferred->Branch(not_zero);
 
   // Bring addresses into index1 and index2.
-  __ lea(index1.reg(), FieldOperand(tmp1.reg(),
-                                    index1.reg(),
-                                    times_half_pointer_size,  // index1 is Smi
-                                    FixedArray::kHeaderSize));
-  __ lea(index2.reg(), FieldOperand(tmp1.reg(),
-                                    index2.reg(),
-                                    times_half_pointer_size,  // index2 is Smi
-                                    FixedArray::kHeaderSize));
+  __ lea(index1.reg(), FixedArrayElementOperand(tmp1.reg(), index1.reg()));
+  __ lea(index2.reg(), FixedArrayElementOperand(tmp1.reg(), index2.reg()));
 
   // Swap elements.
   __ mov(object.reg(), Operand(index1.reg(), 0));
@@ -8768,11 +8762,7 @@
     deferred->Branch(not_equal);
 
     // Store the value.
-    __ mov(Operand(tmp.reg(),
-                   key.reg(),
-                   times_2,
-                   FixedArray::kHeaderSize - kHeapObjectTag),
-           result.reg());
+    __ mov(FixedArrayElementOperand(tmp.reg(), key.reg()), result.reg());
     __ IncrementCounter(&Counters::keyed_store_inline, 1);
 
     deferred->BindExit();
@@ -9074,7 +9064,7 @@
   __ mov(ecx, Operand(esp, 3 * kPointerSize));
   __ mov(eax, Operand(esp, 2 * kPointerSize));
   ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
-  __ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
+  __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax));
   __ cmp(ecx, Factory::undefined_value());
   __ j(equal, &slow_case);
 
@@ -10296,6 +10286,11 @@
   Label done, right_exponent, normal_exponent;
   Register scratch = ebx;
   Register scratch2 = edi;
+  if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
+    CpuFeatures::Scope scope(SSE2);
+    __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
+    return;
+  }
   if (!type_info.IsInteger32() || !use_sse3) {
     // Get exponent word.
     __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 5967338..e00bec7 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -28,7 +28,9 @@
 #ifndef V8_IA32_CODEGEN_IA32_H_
 #define V8_IA32_CODEGEN_IA32_H_
 
+#include "ast.h"
 #include "ic-inl.h"
+#include "jump-target-heavy.h"
 
 namespace v8 {
 namespace internal {
@@ -343,6 +345,15 @@
   // expected arguments. Otherwise return -1.
   static int InlineRuntimeCallArgumentsCount(Handle<String> name);
 
+  // Return a position of the element at |index_as_smi| + |additional_offset|
+  // in FixedArray pointer to which is held in |array|.  |index_as_smi| is Smi.
+  static Operand FixedArrayElementOperand(Register array,
+                                          Register index_as_smi,
+                                          int additional_offset = 0) {
+    int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
+    return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
+  }
+
  private:
   // Construction/Destruction
   explicit CodeGenerator(MacroAssembler* masm);
@@ -454,6 +465,16 @@
                                            TypeofState typeof_state,
                                            JumpTarget* slow);
 
+  // Support for loading from local/global variables and arguments
+  // whose location is known unless they are shadowed by
+  // eval-introduced bindings. Generates no code for unsupported slot
+  // types and therefore expects to fall through to the slow jump target.
+  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
+                                       TypeofState typeof_state,
+                                       Result* result,
+                                       JumpTarget* slow,
+                                       JumpTarget* done);
+
   // Store the value on top of the expression stack into a slot, leaving the
   // value in place.
   void StoreToSlot(Slot* slot, InitState init_state);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index bc7a33c..4929c8a 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -868,7 +868,7 @@
   // ecx: key (a smi)
   // edx: receiver
   // edi: FixedArray receiver->elements
-  __ mov(FieldOperand(edi, ecx, times_2, FixedArray::kHeaderSize), eax);
+  __ mov(CodeGenerator::FixedArrayElementOperand(edi, ecx), eax);
   // Update write barrier for the elements array address.
   __ mov(edx, Operand(eax));
   __ RecordWrite(edi, 0, edx, ecx);
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index fdf3b9f..d9dddd6 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -51,7 +51,7 @@
  * - esp : points to tip of C stack.
  * - ecx : points to tip of backtrack stack
  *
- * The registers eax, ebx and ecx are free to use for computations.
+ * The registers eax and ebx are free to use for computations.
  *
  * Each call to a public method should retain this convention.
  * The stack will have the following structure:
@@ -72,8 +72,6 @@
  *       - backup of caller ebx
  *       - Offset of location before start of input (effectively character
  *         position -1). Used to initialize capture registers to a non-position.
- *       - Boolean at start (if 1, we are starting at the start of the string,
- *         otherwise 0)
  *       - register 0  ebp[-4]  (Only positions must be stored in the first
  *       - register 1  ebp[-8]   num_saved_registers_ registers)
  *       - ...
@@ -178,8 +176,8 @@
 void RegExpMacroAssemblerIA32::CheckAtStart(Label* on_at_start) {
   Label not_at_start;
   // Did we start the match at the start of the string at all?
-  __ cmp(Operand(ebp, kAtStart), Immediate(0));
-  BranchOrBacktrack(equal, &not_at_start);
+  __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+  BranchOrBacktrack(not_equal, &not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(eax, Operand(esi, edi, times_1, 0));
   __ cmp(eax, Operand(ebp, kInputStart));
@@ -190,8 +188,8 @@
 
 void RegExpMacroAssemblerIA32::CheckNotAtStart(Label* on_not_at_start) {
   // Did we start the match at the start of the string at all?
-  __ cmp(Operand(ebp, kAtStart), Immediate(0));
-  BranchOrBacktrack(equal, on_not_at_start);
+  __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+  BranchOrBacktrack(not_equal, on_not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(eax, Operand(esi, edi, times_1, 0));
   __ cmp(eax, Operand(ebp, kInputStart));
@@ -209,6 +207,15 @@
                                                int cp_offset,
                                                Label* on_failure,
                                                bool check_end_of_string) {
+#ifdef DEBUG
+  // If input is ASCII, don't even bother calling here if the string to
+  // match contains a non-ascii character.
+  if (mode_ == ASCII) {
+    for (int i = 0; i < str.length(); i++) {
+      ASSERT(str[i] <= String::kMaxAsciiCharCodeU);
+    }
+  }
+#endif
   int byte_length = str.length() * char_size();
   int byte_offset = cp_offset * char_size();
   if (check_end_of_string) {
@@ -222,14 +229,56 @@
     on_failure = &backtrack_label_;
   }
 
-  for (int i = 0; i < str.length(); i++) {
+  // Do one character test first to minimize loading for the case that
+  // we don't match at all (loading more than one character introduces that
+  // chance of reading unaligned and reading across cache boundaries).
+  // If the first character matches, expect a larger chance of matching the
+  // string, and start loading more characters at a time.
+  if (mode_ == ASCII) {
+    __ cmpb(Operand(esi, edi, times_1, byte_offset),
+            static_cast<int8_t>(str[0]));
+  } else {
+    // Don't use 16-bit immediate. The size changing prefix throws off
+    // pre-decoding.
+    __ movzx_w(eax,
+               Operand(esi, edi, times_1, byte_offset));
+    __ cmp(eax, static_cast<int32_t>(str[0]));
+  }
+  BranchOrBacktrack(not_equal, on_failure);
+
+  __ lea(ebx, Operand(esi, edi, times_1, 0));
+  for (int i = 1, n = str.length(); i < n;) {
     if (mode_ == ASCII) {
-      __ cmpb(Operand(esi, edi, times_1, byte_offset + i),
-              static_cast<int8_t>(str[i]));
+      if (i <= n - 4) {
+        int combined_chars =
+            (static_cast<uint32_t>(str[i + 0]) << 0) |
+            (static_cast<uint32_t>(str[i + 1]) << 8) |
+            (static_cast<uint32_t>(str[i + 2]) << 16) |
+            (static_cast<uint32_t>(str[i + 3]) << 24);
+        __ cmp(Operand(ebx, byte_offset + i), Immediate(combined_chars));
+        i += 4;
+      } else {
+        __ cmpb(Operand(ebx, byte_offset + i),
+                static_cast<int8_t>(str[i]));
+        i += 1;
+      }
     } else {
       ASSERT(mode_ == UC16);
-      __ cmpw(Operand(esi, edi, times_1, byte_offset + i * sizeof(uc16)),
-              Immediate(str[i]));
+      if (i <= n - 2) {
+        __ cmp(Operand(ebx, byte_offset + i * sizeof(uc16)),
+               Immediate(*reinterpret_cast<const int*>(&str[i])));
+        i += 2;
+      } else {
+        // Avoid a 16-bit immediate operation. It uses the length-changing
+        // 0x66 prefix which causes pre-decoder misprediction and pipeline
+        // stalls. See
+        // "Intel(R) 64 and IA-32 Architectures Optimization Reference Manual"
+        // (248966.pdf) section 3.4.2.3 "Length-Changing Prefixes (LCP)"
+        __ movzx_w(eax,
+                   Operand(ebx, byte_offset + i * sizeof(uc16)));
+        __ cmp(eax, static_cast<int32_t>(str[i]));
+        i += 1;
+      }
     }
     BranchOrBacktrack(not_equal, on_failure);
   }
@@ -625,7 +674,6 @@
   __ push(edi);
   __ push(ebx);  // Callee-save on MacOS.
   __ push(Immediate(0));  // Make room for "input start - 1" constant.
-  __ push(Immediate(0));  // Make room for "at start" constant.
 
   // Check if we have space on the stack for registers.
   Label stack_limit_hit;
@@ -677,14 +725,6 @@
   // position registers.
   __ mov(Operand(ebp, kInputStartMinusOne), eax);
 
-  // Determine whether the start index is zero, that is at the start of the
-  // string, and store that value in a local variable.
-  __ xor_(Operand(ecx), ecx);  // setcc only operates on cl (lower byte of ecx).
-  // Register ebx still holds -stringIndex.
-  __ test(ebx, Operand(ebx));
-  __ setcc(zero, ecx);  // 1 if 0 (start of string), 0 if positive.
-  __ mov(Operand(ebp, kAtStart), ecx);
-
   if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
     // Fill saved registers with initial value = start offset - 1
     // Fill in stack push order, to avoid accessing across an unwritten
@@ -712,8 +752,8 @@
   __ mov(backtrack_stackpointer(), Operand(ebp, kStackHighEnd));
   // Load previous char as initial value of current-character.
   Label at_start;
-  __ cmp(Operand(ebp, kAtStart), Immediate(0));
-  __ j(not_equal, &at_start);
+  __ cmp(Operand(ebp, kStartIndex), Immediate(0));
+  __ j(equal, &at_start);
   LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
   __ jmp(&start_label_);
   __ bind(&at_start);
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index 823bc03..8b8eeed 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -132,9 +132,8 @@
   static const int kBackup_edi = kBackup_esi - kPointerSize;
   static const int kBackup_ebx = kBackup_edi - kPointerSize;
   static const int kInputStartMinusOne = kBackup_ebx - kPointerSize;
-  static const int kAtStart = kInputStartMinusOne - kPointerSize;
   // First register address. Following registers are below it on the stack.
-  static const int kRegisterZero = kAtStart - kPointerSize;
+  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
 
   // Initial size of code buffer.
   static const size_t kRegExpCodeSize = 1024;
diff --git a/src/ia32/virtual-frame-ia32.h b/src/ia32/virtual-frame-ia32.h
index 14fe466..51a802c 100644
--- a/src/ia32/virtual-frame-ia32.h
+++ b/src/ia32/virtual-frame-ia32.h
@@ -28,9 +28,10 @@
 #ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
 #define V8_IA32_VIRTUAL_FRAME_IA32_H_
 
-#include "type-info.h"
+#include "codegen.h"
 #include "register-allocator.h"
 #include "scopes.h"
+#include "type-info.h"
 
 namespace v8 {
 namespace internal {
@@ -97,23 +98,16 @@
     return register_locations_[num];
   }
 
-  int register_location(Register reg) {
-    return register_locations_[RegisterAllocator::ToNumber(reg)];
-  }
+  inline int register_location(Register reg);
 
-  void set_register_location(Register reg, int index) {
-    register_locations_[RegisterAllocator::ToNumber(reg)] = index;
-  }
+  inline void set_register_location(Register reg, int index);
 
   bool is_used(int num) {
     ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
     return register_locations_[num] != kIllegalIndex;
   }
 
-  bool is_used(Register reg) {
-    return register_locations_[RegisterAllocator::ToNumber(reg)]
-        != kIllegalIndex;
-  }
+  inline bool is_used(Register reg);
 
   // Add extra in-memory elements to the top of the frame to match an actual
   // frame (eg, the frame after an exception handler is pushed).  No code is
@@ -217,10 +211,7 @@
   void SetElementAt(int index, Result* value);
 
   // Set a frame element to a constant.  The index is frame-top relative.
-  void SetElementAt(int index, Handle<Object> value) {
-    Result temp(value);
-    SetElementAt(index, &temp);
-  }
+  inline void SetElementAt(int index, Handle<Object> value);
 
   void PushElementAt(int index) {
     PushFrameSlotAt(element_count() - index - 1);
@@ -315,10 +306,7 @@
 
   // Call stub given the number of arguments it expects on (and
   // removes from) the stack.
-  Result CallStub(CodeStub* stub, int arg_count) {
-    PrepareForCall(arg_count, arg_count);
-    return RawCallStub(stub);
-  }
+  inline Result CallStub(CodeStub* stub, int arg_count);
 
   // Call stub that takes a single argument passed in eax.  The
   // argument is given as a result which does not have to be eax or
@@ -473,12 +461,9 @@
   int register_locations_[RegisterAllocator::kNumRegisters];
 
   // The number of frame-allocated locals and parameters respectively.
-  int parameter_count() {
-    return cgen()->scope()->num_parameters();
-  }
-  int local_count() {
-    return cgen()->scope()->num_stack_slots();
-  }
+  inline int parameter_count();
+
+  inline int local_count();
 
   // The index of the element that is at the processor's frame pointer
   // (the ebp register).  The parameters, receiver, and return address
diff --git a/src/jump-target-heavy.cc b/src/jump-target-heavy.cc
index 85620a2..468cf4a 100644
--- a/src/jump-target-heavy.cc
+++ b/src/jump-target-heavy.cc
@@ -35,6 +35,9 @@
 namespace internal {
 
 
+bool JumpTarget::compiling_deferred_code_ = false;
+
+
 void JumpTarget::Jump(Result* arg) {
   ASSERT(cgen()->has_valid_frame());
 
@@ -360,4 +363,64 @@
   }
 }
 
+
+void JumpTarget::Unuse() {
+  reaching_frames_.Clear();
+  merge_labels_.Clear();
+  entry_frame_ = NULL;
+  entry_label_.Unuse();
+}
+
+
+void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
+  ASSERT(reaching_frames_.length() == merge_labels_.length());
+  ASSERT(entry_frame_ == NULL);
+  Label fresh;
+  merge_labels_.Add(fresh);
+  reaching_frames_.Add(frame);
+}
+
+
+// -------------------------------------------------------------------------
+// BreakTarget implementation.
+
+void BreakTarget::set_direction(Directionality direction) {
+  JumpTarget::set_direction(direction);
+  ASSERT(cgen()->has_valid_frame());
+  expected_height_ = cgen()->frame()->height();
+}
+
+
+void BreakTarget::CopyTo(BreakTarget* destination) {
+  ASSERT(destination != NULL);
+  destination->direction_ = direction_;
+  destination->reaching_frames_.Rewind(0);
+  destination->reaching_frames_.AddAll(reaching_frames_);
+  destination->merge_labels_.Rewind(0);
+  destination->merge_labels_.AddAll(merge_labels_);
+  destination->entry_frame_ = entry_frame_;
+  destination->entry_label_ = entry_label_;
+  destination->expected_height_ = expected_height_;
+}
+
+
+void BreakTarget::Branch(Condition cc, Hint hint) {
+  ASSERT(cgen()->has_valid_frame());
+
+  int count = cgen()->frame()->height() - expected_height_;
+  if (count > 0) {
+    // We negate and branch here rather than using DoBranch's negate
+    // and branch.  This gives us a hook to remove statement state
+    // from the frame.
+    JumpTarget fall_through;
+    // Branch to fall through will not negate, because it is a
+    // forward-only target.
+    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
+    Jump();  // May emit merge code here.
+    fall_through.Bind();
+  } else {
+    DoBranch(cc, hint);
+  }
+}
+
 } }  // namespace v8::internal
diff --git a/src/jump-target-heavy.h b/src/jump-target-heavy.h
new file mode 100644
index 0000000..b923fe5
--- /dev/null
+++ b/src/jump-target-heavy.h
@@ -0,0 +1,242 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JUMP_TARGET_HEAVY_H_
+#define V8_JUMP_TARGET_HEAVY_H_
+
+#include "macro-assembler.h"
+#include "zone-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class FrameElement;
+class Result;
+class VirtualFrame;
+
+// -------------------------------------------------------------------------
+// Jump targets
+//
+// A jump target is an abstraction of a basic-block entry in generated
+// code.  It collects all the virtual frames reaching the block by
+// forward jumps and pairs them with labels for the merge code along
+// all forward-reaching paths.  When bound, an expected frame for the
+// block is determined and code is generated to merge to the expected
+// frame.  For backward jumps, the merge code is generated at the edge
+// leaving the predecessor block.
+//
+// A jump target must have been reached via control flow (either by
+// jumping, branching, or falling through) at the time it is bound.
+// In particular, this means that at least one of the control-flow
+// graph edges reaching the target must be a forward edge.
+
+class JumpTarget : public ZoneObject {  // Shadows are dynamically allocated.
+ public:
+  // Forward-only jump targets can only be reached by forward CFG edges.
+  enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
+
+  // Construct a jump target used to generate code and to provide
+  // access to a current frame.
+  explicit JumpTarget(Directionality direction)
+      : direction_(direction),
+        reaching_frames_(0),
+        merge_labels_(0),
+        entry_frame_(NULL) {
+  }
+
+  // Construct a jump target.
+  JumpTarget()
+      : direction_(FORWARD_ONLY),
+        reaching_frames_(0),
+        merge_labels_(0),
+        entry_frame_(NULL) {
+  }
+
+  virtual ~JumpTarget() {}
+
+  // Set the direction of the jump target.
+  virtual void set_direction(Directionality direction) {
+    direction_ = direction;
+  }
+
+  // Treat the jump target as a fresh one.  The state is reset.
+  void Unuse();
+
+  inline CodeGenerator* cgen();
+
+  Label* entry_label() { return &entry_label_; }
+
+  VirtualFrame* entry_frame() const { return entry_frame_; }
+  void set_entry_frame(VirtualFrame* frame) {
+    entry_frame_ = frame;
+  }
+
+  // Predicates testing the state of the encapsulated label.
+  bool is_bound() const { return entry_label_.is_bound(); }
+  bool is_linked() const {
+    return !is_bound() && !reaching_frames_.is_empty();
+  }
+  bool is_unused() const {
+    // This is !is_bound() && !is_linked().
+    return !is_bound() && reaching_frames_.is_empty();
+  }
+
+  // Emit a jump to the target.  There must be a current frame at the
+  // jump and there will be no current frame after the jump.
+  virtual void Jump();
+  virtual void Jump(Result* arg);
+
+  // Emit a conditional branch to the target.  There must be a current
+  // frame at the branch.  The current frame will fall through to the
+  // code after the branch.  The arg is a result that is live both at
+  // the target and the fall-through.
+  virtual void Branch(Condition cc, Hint hint = no_hint);
+  virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
+  virtual void Branch(Condition cc,
+                      Result* arg0,
+                      Result* arg1,
+                      Hint hint = no_hint);
+
+  // Bind a jump target.  If there is no current frame at the binding
+  // site, there must be at least one frame reaching via a forward
+  // jump.
+  virtual void Bind();
+  virtual void Bind(Result* arg);
+  virtual void Bind(Result* arg0, Result* arg1);
+
+  // Emit a call to a jump target.  There must be a current frame at
+  // the call.  The frame at the target is the same as the current
+  // frame except for an extra return address on top of it.  The frame
+  // after the call is the same as the frame before the call.
+  void Call();
+
+  static void set_compiling_deferred_code(bool flag) {
+    compiling_deferred_code_ = flag;
+  }
+
+ protected:
+  // Directionality flag set at initialization time.
+  Directionality direction_;
+
+  // A list of frames reaching this block via forward jumps.
+  ZoneList<VirtualFrame*> reaching_frames_;
+
+  // A parallel list of labels for merge code.
+  ZoneList<Label> merge_labels_;
+
+  // The frame used on entry to the block and expected at backward
+  // jumps to the block.  Set when the jump target is bound, but may
+  // or may not be set for forward-only blocks.
+  VirtualFrame* entry_frame_;
+
+  // The actual entry label of the block.
+  Label entry_label_;
+
+  // Implementations of Jump, Branch, and Bind with all arguments and
+  // return values using the virtual frame.
+  void DoJump();
+  void DoBranch(Condition cc, Hint hint);
+  void DoBind();
+
+ private:
+  static bool compiling_deferred_code_;
+
+  // Add a virtual frame reaching this labeled block via a forward jump,
+  // and a corresponding merge code label.
+  void AddReachingFrame(VirtualFrame* frame);
+
+  // Perform initialization required during entry frame computation
+  // after setting the virtual frame element at index in frame to be
+  // target.
+  inline void InitializeEntryElement(int index, FrameElement* target);
+
+  // Compute a frame to use for entry to this block.
+  void ComputeEntryFrame();
+
+  DISALLOW_COPY_AND_ASSIGN(JumpTarget);
+};
+
+
+// -------------------------------------------------------------------------
+// Break targets
+//
+// A break target is a jump target that can be used to break out of a
+// statement that keeps extra state on the stack (eg, for/in or
+// try/finally).  They know the expected stack height at the target
+// and will drop state from nested statements as part of merging.
+//
+// Break targets are used for return, break, and continue targets.
+
+class BreakTarget : public JumpTarget {
+ public:
+  // Construct a break target.
+  BreakTarget() {}
+
+  virtual ~BreakTarget() {}
+
+  // Set the direction of the break target.
+  virtual void set_direction(Directionality direction);
+
+  // Copy the state of this break target to the destination.  The
+  // lists of forward-reaching frames and merge-point labels are
+  // copied.  All virtual frame pointers are copied, not the
+  // pointed-to frames.  The previous state of the destination is
+  // overwritten, without deallocating pointed-to virtual frames.
+  void CopyTo(BreakTarget* destination);
+
+  // Emit a jump to the target.  There must be a current frame at the
+  // jump and there will be no current frame after the jump.
+  virtual void Jump();
+  virtual void Jump(Result* arg);
+
+  // Emit a conditional branch to the target.  There must be a current
+  // frame at the branch.  The current frame will fall through to the
+  // code after the branch.
+  virtual void Branch(Condition cc, Hint hint = no_hint);
+  virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
+
+  // Bind a break target.  If there is no current frame at the binding
+  // site, there must be at least one frame reaching via a forward
+  // jump.
+  virtual void Bind();
+  virtual void Bind(Result* arg);
+
+  // Setter for expected height.
+  void set_expected_height(int expected) { expected_height_ = expected; }
+
+ private:
+  // The expected height of the expression stack where the target will
+  // be bound, statically known at initialization time.
+  int expected_height_;
+
+  DISALLOW_COPY_AND_ASSIGN(BreakTarget);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_JUMP_TARGET_HEAVY_H_
diff --git a/src/jump-target-light-inl.h b/src/jump-target-light-inl.h
index 8d6c3ac..0b4eee4 100644
--- a/src/jump-target-light-inl.h
+++ b/src/jump-target-light-inl.h
@@ -33,10 +33,20 @@
 namespace v8 {
 namespace internal {
 
-void JumpTarget::InitializeEntryElement(int index, FrameElement* target) {
-  UNIMPLEMENTED();
+// Construct a jump target.
+JumpTarget::JumpTarget(Directionality direction)
+    : entry_frame_set_(false),
+      entry_frame_(kInvalidVirtualFrameInitializer) {
 }
 
+JumpTarget::JumpTarget()
+    : entry_frame_set_(false),
+      entry_frame_(kInvalidVirtualFrameInitializer) {
+}
+
+
+BreakTarget::BreakTarget() { }
+
 } }  // namespace v8::internal
 
 #endif  // V8_JUMP_TARGET_LIGHT_INL_H_
diff --git a/src/jump-target-light.cc b/src/jump-target-light.cc
index befb430..76c3cb7 100644
--- a/src/jump-target-light.cc
+++ b/src/jump-target-light.cc
@@ -34,41 +34,6 @@
 namespace internal {
 
 
-void JumpTarget::Jump(Result* arg) {
-  UNIMPLEMENTED();
-}
-
-
-void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) {
-  UNIMPLEMENTED();
-}
-
-
-void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) {
-  UNIMPLEMENTED();
-}
-
-
-void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) {
-  UNIMPLEMENTED();
-}
-
-
-void JumpTarget::Bind(Result* arg) {
-  UNIMPLEMENTED();
-}
-
-
-void JumpTarget::Bind(Result* arg0, Result* arg1) {
-  UNIMPLEMENTED();
-}
-
-
-void JumpTarget::ComputeEntryFrame() {
-  UNIMPLEMENTED();
-}
-
-
 DeferredCode::DeferredCode()
     : masm_(CodeGeneratorScope::Current()->masm()),
       statement_position_(masm_->current_statement_position()),
@@ -83,4 +48,62 @@
 #endif
 }
 
+
+// -------------------------------------------------------------------------
+// BreakTarget implementation.
+
+
+void BreakTarget::SetExpectedHeight() {
+  expected_height_ = cgen()->frame()->height();
+}
+
+
+void BreakTarget::Jump() {
+  ASSERT(cgen()->has_valid_frame());
+
+  int count = cgen()->frame()->height() - expected_height_;
+  if (count > 0) {
+    cgen()->frame()->Drop(count);
+  }
+  DoJump();
+}
+
+
+void BreakTarget::Branch(Condition cc, Hint hint) {
+  if (cc == al) {
+    Jump();
+    return;
+  }
+
+  ASSERT(cgen()->has_valid_frame());
+
+  int count = cgen()->frame()->height() - expected_height_;
+  if (count > 0) {
+    // We negate and branch here rather than using DoBranch's negate
+    // and branch.  This gives us a hook to remove statement state
+    // from the frame.
+    JumpTarget fall_through;
+    // Branch to fall through will not negate, because it is a
+    // forward-only target.
+    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
+    // Emit merge code.
+    cgen()->frame()->Drop(count);
+    DoJump();
+    fall_through.Bind();
+  } else {
+    DoBranch(cc, hint);
+  }
+}
+
+
+void BreakTarget::Bind() {
+  if (cgen()->has_valid_frame()) {
+    int count = cgen()->frame()->height() - expected_height_;
+    if (count > 0) {
+      cgen()->frame()->Drop(count);
+    }
+  }
+  DoBind();
+}
+
 } }  // namespace v8::internal
diff --git a/src/jump-target-light.h b/src/jump-target-light.h
new file mode 100644
index 0000000..656ec75
--- /dev/null
+++ b/src/jump-target-light.h
@@ -0,0 +1,187 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_JUMP_TARGET_LIGHT_H_
+#define V8_JUMP_TARGET_LIGHT_H_
+
+#include "macro-assembler.h"
+#include "zone-inl.h"
+#include "virtual-frame.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class FrameElement;
+class Result;
+
+// -------------------------------------------------------------------------
+// Jump targets
+//
+// A jump target is an abstraction of a basic-block entry in generated
+// code.  It collects all the virtual frames reaching the block by
+// forward jumps and pairs them with labels for the merge code along
+// all forward-reaching paths.  When bound, an expected frame for the
+// block is determined and code is generated to merge to the expected
+// frame.  For backward jumps, the merge code is generated at the edge
+// leaving the predecessor block.
+//
+// A jump target must have been reached via control flow (either by
+// jumping, branching, or falling through) at the time it is bound.
+// In particular, this means that at least one of the control-flow
+// graph edges reaching the target must be a forward edge.
+
+class JumpTarget : public ZoneObject {  // Shadows are dynamically allocated.
+ public:
+  // Forward-only jump targets can only be reached by forward CFG edges.
+  enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
+
+  // Construct a jump target.
+  explicit inline JumpTarget(Directionality direction);
+
+  inline JumpTarget();
+
+  virtual ~JumpTarget() {}
+
+  void Unuse() {
+    entry_frame_set_ = false;
+    entry_label_.Unuse();
+  }
+
+  inline CodeGenerator* cgen();
+
+  const VirtualFrame* entry_frame() const {
+    return entry_frame_set_ ? &entry_frame_ : NULL;
+  }
+
+  void set_entry_frame(VirtualFrame* frame) {
+    entry_frame_ = *frame;
+    entry_frame_set_ = true;
+  }
+
+  // Predicates testing the state of the encapsulated label.
+  bool is_bound() const { return entry_label_.is_bound(); }
+  bool is_linked() const { return entry_label_.is_linked(); }
+  bool is_unused() const { return entry_label_.is_unused(); }
+
+  // Copy the state of this jump target to the destination.
+  inline void CopyTo(JumpTarget* destination) {
+    *destination = *this;
+  }
+
+  // Emit a jump to the target.  There must be a current frame at the
+  // jump and there will be no current frame after the jump.
+  virtual void Jump();
+
+  // Emit a conditional branch to the target.  There must be a current
+  // frame at the branch.  The current frame will fall through to the
+  // code after the branch.  The arg is a result that is live both at
+  // the target and the fall-through.
+  virtual void Branch(Condition cc, Hint hint = no_hint);
+
+  // Bind a jump target.  If there is no current frame at the binding
+  // site, there must be at least one frame reaching via a forward
+  // jump.
+  virtual void Bind();
+
+  // Emit a call to a jump target.  There must be a current frame at
+  // the call.  The frame at the target is the same as the current
+  // frame except for an extra return address on top of it.  The frame
+  // after the call is the same as the frame before the call.
+  void Call();
+
+ protected:
+  // Has an entry frame been found?
+  bool entry_frame_set_;
+
+  // The frame used on entry to the block and expected at backward
+  // jumps to the block.  Set the first time something branches to this
+  // jump target.
+  VirtualFrame entry_frame_;
+
+  // The actual entry label of the block.
+  Label entry_label_;
+
+  // Implementations of Jump, Branch, and Bind with all arguments and
+  // return values using the virtual frame.
+  void DoJump();
+  void DoBranch(Condition cc, Hint hint);
+  void DoBind();
+};
+
+
+// -------------------------------------------------------------------------
+// Break targets
+//
+// A break target is a jump target that can be used to break out of a
+// statement that keeps extra state on the stack (eg, for/in or
+// try/finally).  They know the expected stack height at the target
+// and will drop state from nested statements as part of merging.
+//
+// Break targets are used for return, break, and continue targets.
+
+class BreakTarget : public JumpTarget {
+ public:
+  // Construct a break target.
+  inline BreakTarget();
+
+  virtual ~BreakTarget() {}
+
+  // Copy the state of this jump target to the destination.
+  inline void CopyTo(BreakTarget* destination) {
+    *destination = *this;
+  }
+
+  // Emit a jump to the target.  There must be a current frame at the
+  // jump and there will be no current frame after the jump.
+  virtual void Jump();
+
+  // Emit a conditional branch to the target.  There must be a current
+  // frame at the branch.  The current frame will fall through to the
+  // code after the branch.
+  virtual void Branch(Condition cc, Hint hint = no_hint);
+
+  // Bind a break target.  If there is no current frame at the binding
+  // site, there must be at least one frame reaching via a forward
+  // jump.
+  virtual void Bind();
+
+  // Setter for expected height.
+  void set_expected_height(int expected) { expected_height_ = expected; }
+
+  // Uses the current frame to set the expected height.
+  void SetExpectedHeight();
+
+ private:
+  // The expected height of the expression stack where the target will
+  // be bound, statically known at initialization time.
+  int expected_height_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_JUMP_TARGET_LIGHT_H_
diff --git a/src/jump-target.cc b/src/jump-target.cc
index 8b29995..72aada8 100644
--- a/src/jump-target.cc
+++ b/src/jump-target.cc
@@ -37,17 +37,6 @@
 // -------------------------------------------------------------------------
 // JumpTarget implementation.
 
-bool JumpTarget::compiling_deferred_code_ = false;
-
-
-void JumpTarget::Unuse() {
-  reaching_frames_.Clear();
-  merge_labels_.Clear();
-  entry_frame_ = NULL;
-  entry_label_.Unuse();
-}
-
-
 void JumpTarget::Jump() {
   DoJump();
 }
@@ -63,58 +52,6 @@
 }
 
 
-void JumpTarget::AddReachingFrame(VirtualFrame* frame) {
-  ASSERT(reaching_frames_.length() == merge_labels_.length());
-  ASSERT(entry_frame_ == NULL);
-  Label fresh;
-  merge_labels_.Add(fresh);
-  reaching_frames_.Add(frame);
-}
-
-
-// -------------------------------------------------------------------------
-// BreakTarget implementation.
-
-void BreakTarget::set_direction(Directionality direction) {
-  JumpTarget::set_direction(direction);
-  ASSERT(cgen()->has_valid_frame());
-  expected_height_ = cgen()->frame()->height();
-}
-
-
-void BreakTarget::CopyTo(BreakTarget* destination) {
-  ASSERT(destination != NULL);
-  destination->direction_ = direction_;
-  destination->reaching_frames_.Rewind(0);
-  destination->reaching_frames_.AddAll(reaching_frames_);
-  destination->merge_labels_.Rewind(0);
-  destination->merge_labels_.AddAll(merge_labels_);
-  destination->entry_frame_ = entry_frame_;
-  destination->entry_label_ = entry_label_;
-  destination->expected_height_ = expected_height_;
-}
-
-
-void BreakTarget::Branch(Condition cc, Hint hint) {
-  ASSERT(cgen()->has_valid_frame());
-
-  int count = cgen()->frame()->height() - expected_height_;
-  if (count > 0) {
-    // We negate and branch here rather than using DoBranch's negate
-    // and branch.  This gives us a hook to remove statement state
-    // from the frame.
-    JumpTarget fall_through;
-    // Branch to fall through will not negate, because it is a
-    // forward-only target.
-    fall_through.Branch(NegateCondition(cc), NegateHint(hint));
-    Jump();  // May emit merge code here.
-    fall_through.Bind();
-  } else {
-    DoBranch(cc, hint);
-  }
-}
-
-
 // -------------------------------------------------------------------------
 // ShadowTarget implementation.
 
@@ -151,5 +88,4 @@
 #endif
 }
 
-
 } }  // namespace v8::internal
diff --git a/src/jump-target.h b/src/jump-target.h
index db523b5..a0d2686 100644
--- a/src/jump-target.h
+++ b/src/jump-target.h
@@ -28,216 +28,21 @@
 #ifndef V8_JUMP_TARGET_H_
 #define V8_JUMP_TARGET_H_
 
-#include "macro-assembler.h"
-#include "zone-inl.h"
+#if V8_TARGET_ARCH_IA32
+#include "jump-target-heavy.h"
+#elif V8_TARGET_ARCH_X64
+#include "jump-target-heavy.h"
+#elif V8_TARGET_ARCH_ARM
+#include "jump-target-light.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "jump-target-light.h"
+#else
+#error Unsupported target architecture.
+#endif
 
 namespace v8 {
 namespace internal {
 
-// Forward declarations.
-class FrameElement;
-class Result;
-class VirtualFrame;
-
-// -------------------------------------------------------------------------
-// Jump targets
-//
-// A jump target is an abstraction of a basic-block entry in generated
-// code.  It collects all the virtual frames reaching the block by
-// forward jumps and pairs them with labels for the merge code along
-// all forward-reaching paths.  When bound, an expected frame for the
-// block is determined and code is generated to merge to the expected
-// frame.  For backward jumps, the merge code is generated at the edge
-// leaving the predecessor block.
-//
-// A jump target must have been reached via control flow (either by
-// jumping, branching, or falling through) at the time it is bound.
-// In particular, this means that at least one of the control-flow
-// graph edges reaching the target must be a forward edge.
-
-class JumpTarget : public ZoneObject {  // Shadows are dynamically allocated.
- public:
-  // Forward-only jump targets can only be reached by forward CFG edges.
-  enum Directionality { FORWARD_ONLY, BIDIRECTIONAL };
-
-  // Construct a jump target used to generate code and to provide
-  // access to a current frame.
-  explicit JumpTarget(Directionality direction)
-      : direction_(direction),
-        reaching_frames_(0),
-        merge_labels_(0),
-        entry_frame_(NULL) {
-  }
-
-  // Construct a jump target.
-  JumpTarget()
-      : direction_(FORWARD_ONLY),
-        reaching_frames_(0),
-        merge_labels_(0),
-        entry_frame_(NULL) {
-  }
-
-  virtual ~JumpTarget() {}
-
-  // Set the direction of the jump target.
-  virtual void set_direction(Directionality direction) {
-    direction_ = direction;
-  }
-
-  // Treat the jump target as a fresh one.  The state is reset.
-  void Unuse();
-
-  inline CodeGenerator* cgen();
-
-  Label* entry_label() { return &entry_label_; }
-
-  VirtualFrame* entry_frame() const { return entry_frame_; }
-  void set_entry_frame(VirtualFrame* frame) {
-    entry_frame_ = frame;
-  }
-
-  // Predicates testing the state of the encapsulated label.
-  bool is_bound() const { return entry_label_.is_bound(); }
-  bool is_linked() const {
-    return !is_bound() && !reaching_frames_.is_empty();
-  }
-  bool is_unused() const {
-    // This is !is_bound() && !is_linked().
-    return !is_bound() && reaching_frames_.is_empty();
-  }
-
-  // Emit a jump to the target.  There must be a current frame at the
-  // jump and there will be no current frame after the jump.
-  virtual void Jump();
-  virtual void Jump(Result* arg);
-
-  // Emit a conditional branch to the target.  There must be a current
-  // frame at the branch.  The current frame will fall through to the
-  // code after the branch.  The arg is a result that is live both at
-  // the target and the fall-through.
-  virtual void Branch(Condition cc, Hint hint = no_hint);
-  virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
-  virtual void Branch(Condition cc,
-                      Result* arg0,
-                      Result* arg1,
-                      Hint hint = no_hint);
-
-  // Bind a jump target.  If there is no current frame at the binding
-  // site, there must be at least one frame reaching via a forward
-  // jump.
-  virtual void Bind();
-  virtual void Bind(Result* arg);
-  virtual void Bind(Result* arg0, Result* arg1);
-
-  // Emit a call to a jump target.  There must be a current frame at
-  // the call.  The frame at the target is the same as the current
-  // frame except for an extra return address on top of it.  The frame
-  // after the call is the same as the frame before the call.
-  void Call();
-
-  static void set_compiling_deferred_code(bool flag) {
-    compiling_deferred_code_ = flag;
-  }
-
- protected:
-  // Directionality flag set at initialization time.
-  Directionality direction_;
-
-  // A list of frames reaching this block via forward jumps.
-  ZoneList<VirtualFrame*> reaching_frames_;
-
-  // A parallel list of labels for merge code.
-  ZoneList<Label> merge_labels_;
-
-  // The frame used on entry to the block and expected at backward
-  // jumps to the block.  Set when the jump target is bound, but may
-  // or may not be set for forward-only blocks.
-  VirtualFrame* entry_frame_;
-
-  // The actual entry label of the block.
-  Label entry_label_;
-
-  // Implementations of Jump, Branch, and Bind with all arguments and
-  // return values using the virtual frame.
-  void DoJump();
-  void DoBranch(Condition cc, Hint hint);
-  void DoBind();
-
- private:
-  static bool compiling_deferred_code_;
-
-  // Add a virtual frame reaching this labeled block via a forward jump,
-  // and a corresponding merge code label.
-  void AddReachingFrame(VirtualFrame* frame);
-
-  // Perform initialization required during entry frame computation
-  // after setting the virtual frame element at index in frame to be
-  // target.
-  inline void InitializeEntryElement(int index, FrameElement* target);
-
-  // Compute a frame to use for entry to this block.
-  void ComputeEntryFrame();
-
-  DISALLOW_COPY_AND_ASSIGN(JumpTarget);
-};
-
-
-// -------------------------------------------------------------------------
-// Break targets
-//
-// A break target is a jump target that can be used to break out of a
-// statement that keeps extra state on the stack (eg, for/in or
-// try/finally).  They know the expected stack height at the target
-// and will drop state from nested statements as part of merging.
-//
-// Break targets are used for return, break, and continue targets.
-
-class BreakTarget : public JumpTarget {
- public:
-  // Construct a break target.
-  BreakTarget() {}
-
-  virtual ~BreakTarget() {}
-
-  // Set the direction of the break target.
-  virtual void set_direction(Directionality direction);
-
-  // Copy the state of this break target to the destination.  The
-  // lists of forward-reaching frames and merge-point labels are
-  // copied.  All virtual frame pointers are copied, not the
-  // pointed-to frames.  The previous state of the destination is
-  // overwritten, without deallocating pointed-to virtual frames.
-  void CopyTo(BreakTarget* destination);
-
-  // Emit a jump to the target.  There must be a current frame at the
-  // jump and there will be no current frame after the jump.
-  virtual void Jump();
-  virtual void Jump(Result* arg);
-
-  // Emit a conditional branch to the target.  There must be a current
-  // frame at the branch.  The current frame will fall through to the
-  // code after the branch.
-  virtual void Branch(Condition cc, Hint hint = no_hint);
-  virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
-
-  // Bind a break target.  If there is no current frame at the binding
-  // site, there must be at least one frame reaching via a forward
-  // jump.
-  virtual void Bind();
-  virtual void Bind(Result* arg);
-
-  // Setter for expected height.
-  void set_expected_height(int expected) { expected_height_ = expected; }
-
- private:
-  // The expected height of the expression stack where the target will
-  // be bound, statically known at initialization time.
-  int expected_height_;
-
-  DISALLOW_COPY_AND_ASSIGN(BreakTarget);
-};
-
-
 // -------------------------------------------------------------------------
 // Shadow break targets
 //
@@ -280,7 +85,6 @@
   DISALLOW_COPY_AND_ASSIGN(ShadowTarget);
 };
 
-
 } }  // namespace v8::internal
 
 #endif  // V8_JUMP_TARGET_H_
diff --git a/src/macros.py b/src/macros.py
index d6ba2ca..1533741 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -112,6 +112,11 @@
 macro IS_UNDETECTABLE(arg)      = (%_IsUndetectableObject(arg));
 macro FLOOR(arg)                = $floor(arg);
 
+# Macro for ECMAScript 5 queries of the type:
+# "Type(O) is object."
+# This is the same as being either a function or an object in V8 terminology.
+macro IS_SPEC_OBJECT_OR_NULL(arg) = (%_IsObject(arg) || %_IsFunction(arg));
+
 # Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
 macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
 macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
diff --git a/src/parser.cc b/src/parser.cc
index 089eeee..c482fdf 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -33,12 +33,15 @@
 #include "codegen.h"
 #include "compiler.h"
 #include "messages.h"
+#include "parser.h"
 #include "platform.h"
 #include "runtime.h"
-#include "parser.h"
 #include "scopes.h"
 #include "string-stream.h"
 
+#include "ast-inl.h"
+#include "jump-target-inl.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/platform.h b/src/platform.h
index 82e2e3c..7156441 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -44,12 +44,6 @@
 #ifndef V8_PLATFORM_H_
 #define V8_PLATFORM_H_
 
-#ifdef __sun
-// On Solaris, to get isinf, INFINITY, fpclassify and other macros one needs
-// to define this symbol
-#define __C99FEATURES__ 1
-#endif
-
 #define V8_INFINITY INFINITY
 
 // Windows specific stuff.
diff --git a/src/register-allocator.cc b/src/register-allocator.cc
index b9989a5..31d0a49 100644
--- a/src/register-allocator.cc
+++ b/src/register-allocator.cc
@@ -84,15 +84,16 @@
 
 Result RegisterAllocator::Allocate(Register target) {
   // If the target is not referenced, it can simply be allocated.
-  if (!is_used(target)) {
+  if (!is_used(RegisterAllocator::ToNumber(target))) {
     return Result(target);
   }
   // If the target is only referenced in the frame, it can be spilled and
   // then allocated.
   ASSERT(cgen_->has_valid_frame());
-  if (cgen_->frame()->is_used(target) && count(target) == 1)  {
+  if (cgen_->frame()->is_used(RegisterAllocator::ToNumber(target)) &&
+      count(target) == 1)  {
     cgen_->frame()->Spill(target);
-    ASSERT(!is_used(target));
+    ASSERT(!is_used(RegisterAllocator::ToNumber(target)));
     return Result(target);
   }
   // Otherwise (if it's referenced outside the frame) we cannot allocate it.
diff --git a/src/runtime.js b/src/runtime.js
index be93c4f..8e3883f 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -80,10 +80,7 @@
     } else {
       // x is not a number, boolean, null or undefined.
       if (y == null) return 1;  // not equal
-      if (IS_OBJECT(y)) {
-        return %_ObjectEquals(x, y) ? 0 : 1;
-      }
-      if (IS_FUNCTION(y)) {
+      if (IS_SPEC_OBJECT_OR_NULL(y)) {
         return %_ObjectEquals(x, y) ? 0 : 1;
       }
 
@@ -344,7 +341,7 @@
 
 // ECMA-262, section 11.8.7, page 54.
 function IN(x) {
-  if (x == null || (!IS_OBJECT(x) && !IS_FUNCTION(x))) {
+  if (x == null || !IS_SPEC_OBJECT_OR_NULL(x)) {
     throw %MakeTypeError('invalid_in_operator_use', [this, x]);
   }
   return %_IsNonNegativeSmi(this) ? %HasElement(x, this) : %HasProperty(x, %ToString(this));
@@ -362,13 +359,13 @@
   }
 
   // If V is not an object, return false.
-  if (IS_NULL(V) || (!IS_OBJECT(V) && !IS_FUNCTION(V))) {
+  if (IS_NULL(V) || !IS_SPEC_OBJECT_OR_NULL(V)) {
     return 1;
   }
 
   // Get the prototype of F; if it is not an object, throw an error.
   var O = F.prototype;
-  if (IS_NULL(O) || (!IS_OBJECT(O) && !IS_FUNCTION(O))) {
+  if (IS_NULL(O) || !IS_SPEC_OBJECT_OR_NULL(O)) {
     throw %MakeTypeError('instanceof_nonobject_proto', [O]);
   }
 
@@ -482,7 +479,7 @@
   // Fast case check.
   if (IS_STRING(x)) return x;
   // Normal behavior.
-  if (!IS_OBJECT(x) && !IS_FUNCTION(x)) return x;
+  if (!IS_SPEC_OBJECT_OR_NULL(x)) return x;
   if (x == null) return x;  // check for null, undefined
   if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
   return (hint == NUMBER_HINT) ? %DefaultNumber(x) : %DefaultString(x);
@@ -587,7 +584,7 @@
 // Returns if the given x is a primitive value - not an object or a
 // function.
 function IsPrimitive(x) {
-  if (!IS_OBJECT(x) && !IS_FUNCTION(x)) {
+  if (!IS_SPEC_OBJECT_OR_NULL(x)) {
     return true;
   } else {
     // Even though the type of null is "object", null is still
diff --git a/src/string.js b/src/string.js
index 9433249..59a501f 100644
--- a/src/string.js
+++ b/src/string.js
@@ -241,7 +241,13 @@
     %_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
     if (IS_FUNCTION(replace)) {
       regExpCache.type = 'none';
-      return StringReplaceRegExpWithFunction(subject, search, replace);
+      if (search.global) {
+        return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
+      } else {
+        return StringReplaceNonGlobalRegExpWithFunction(subject,
+                                                        search,
+                                                        replace);
+      }
     } else {
       return StringReplaceRegExp(subject, search, replace);
     }
@@ -396,9 +402,9 @@
   var scaled = index << 1;
   // Compute start and end.
   var start = lastCaptureInfo[CAPTURE(scaled)];
+  // If start isn't valid, return undefined.
+  if (start < 0) return;
   var end = lastCaptureInfo[CAPTURE(scaled + 1)];
-  // If either start or end is missing return undefined.
-  if (start < 0 || end < 0) return;
   return SubString(string, start, end);
 };
 
@@ -410,9 +416,8 @@
   var scaled = index << 1;
   // Compute start and end.
   var start = matchInfo[CAPTURE(scaled)];
+  if (start < 0) return;
   var end = matchInfo[CAPTURE(scaled + 1)];
-  // If either start or end is missing return.
-  if (start < 0 || end <= start) return;
   builder.addSpecialSlice(start, end);
 };
 
@@ -423,112 +428,116 @@
 
 // Helper function for replacing regular expressions with the result of a
 // function application in String.prototype.replace.
-function StringReplaceRegExpWithFunction(subject, regexp, replace) {
-  if (regexp.global) {
-    var resultArray = reusableReplaceArray;
-    if (resultArray) {
-      reusableReplaceArray = null;
-    } else {
-      // Inside a nested replace (replace called from the replacement function
-      // of another replace) or we have failed to set the reusable array
-      // back due to an exception in a replacement function. Create a new
-      // array to use in the future, or until the original is written back.
-      resultArray = $Array(16);
-    }
-
-    var res = %RegExpExecMultiple(regexp,
-                                  subject,
-                                  lastMatchInfo,
-                                  resultArray);
-    regexp.lastIndex = 0;
-    if (IS_NULL(res)) {
-      // No matches at all.
-      return subject;
-    }
-    var len = res.length;
-    var i = 0;
-    if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
-      var match_start = 0;
-      var override = [null, 0, subject];
-      while (i < len) {
-        var elem = res[i];
-        if (%_IsSmi(elem)) {
-          if (elem > 0) {
-            match_start = (elem >> 11) + (elem & 0x7ff);
-          } else {
-            match_start = res[++i] - elem;
-          }
-        } else {
-          override[0] = elem;
-          override[1] = match_start;
-          lastMatchInfoOverride = override;
-          var func_result = replace.call(null, elem, match_start, subject);
-          if (!IS_STRING(func_result)) {
-            func_result = NonStringToString(func_result);
-          }
-          res[i] = func_result;
-          match_start += elem.length;
-        }
-        i++;
-      }
-    } else {
-      while (i < len) {
-        var elem = res[i];
-        if (!%_IsSmi(elem)) {
-          // elem must be an Array.
-          // Use the apply argument as backing for global RegExp properties.
-          lastMatchInfoOverride = elem;
-          var func_result = replace.apply(null, elem);
-          if (!IS_STRING(func_result)) {
-            func_result = NonStringToString(func_result);
-          }
-          res[i] = func_result;
-        }
-        i++;
-      }
-    }
-    var resultBuilder = new ReplaceResultBuilder(subject, res);
-    var result = resultBuilder.generate();
-    resultArray.length = 0;
-    reusableReplaceArray = resultArray;
-    return result;
-  } else { // Not a global regexp, no need to loop.
-    var matchInfo = DoRegExpExec(regexp, subject, 0);
-    if (IS_NULL(matchInfo)) return subject;
-
-    var result = new ReplaceResultBuilder(subject);
-    result.addSpecialSlice(0, matchInfo[CAPTURE0]);
-    var endOfMatch = matchInfo[CAPTURE1];
-    result.add(ApplyReplacementFunction(replace, matchInfo, subject));
-    // Can't use matchInfo any more from here, since the function could
-    // overwrite it.
-    result.addSpecialSlice(endOfMatch, subject.length);
-    return result.generate();
+function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
+  var resultArray = reusableReplaceArray;
+  if (resultArray) {
+    reusableReplaceArray = null;
+  } else {
+    // Inside a nested replace (replace called from the replacement function
+    // of another replace) or we have failed to set the reusable array
+    // back due to an exception in a replacement function. Create a new
+    // array to use in the future, or until the original is written back.
+    resultArray = $Array(16);
   }
+  var res = %RegExpExecMultiple(regexp,
+                                subject,
+                                lastMatchInfo,
+                                resultArray);
+  regexp.lastIndex = 0;
+  if (IS_NULL(res)) {
+    // No matches at all.
+    reusableReplaceArray = resultArray;
+    return subject;
+  }
+  var len = res.length;
+  var i = 0;
+  if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
+    var match_start = 0;
+    var override = [null, 0, subject];
+    var receiver = %GetGlobalReceiver();
+    while (i < len) {
+      var elem = res[i];
+      if (%_IsSmi(elem)) {
+        if (elem > 0) {
+          match_start = (elem >> 11) + (elem & 0x7ff);
+        } else {
+          match_start = res[++i] - elem;
+        }
+      } else {
+        override[0] = elem;
+        override[1] = match_start;
+        lastMatchInfoOverride = override;
+        var func_result =
+            %_CallFunction(receiver, elem, match_start, subject, replace);
+        if (!IS_STRING(func_result)) {
+          func_result = NonStringToString(func_result);
+        }
+        res[i] = func_result;
+        match_start += elem.length;
+      }
+      i++;
+    }
+  } else {
+    while (i < len) {
+      var elem = res[i];
+      if (!%_IsSmi(elem)) {
+        // elem must be an Array.
+        // Use the apply argument as backing for global RegExp properties.
+        lastMatchInfoOverride = elem;
+        var func_result = replace.apply(null, elem);
+        if (!IS_STRING(func_result)) {
+          func_result = NonStringToString(func_result);
+        }
+        res[i] = func_result;
+      }
+      i++;
+    }
+  }
+  var resultBuilder = new ReplaceResultBuilder(subject, res);
+  var result = resultBuilder.generate();
+  resultArray.length = 0;
+  reusableReplaceArray = resultArray;
+  return result;
 }
 
 
-// Helper function to apply a string replacement function once.
-function ApplyReplacementFunction(replace, matchInfo, subject) {
+function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
+  var matchInfo = DoRegExpExec(regexp, subject, 0);
+  if (IS_NULL(matchInfo)) return subject;
+  var result = new ReplaceResultBuilder(subject);
+  var index = matchInfo[CAPTURE0];
+  result.addSpecialSlice(0, index);
+  var endOfMatch = matchInfo[CAPTURE1];
   // Compute the parameter list consisting of the match, captures, index,
   // and subject for the replace function invocation.
-  var index = matchInfo[CAPTURE0];
   // The number of captures plus one for the match.
   var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
+  var replacement;
   if (m == 1) {
-    var s = CaptureString(subject, matchInfo, 0);
+    // No captures, only the match, which is always valid.
+    var s = SubString(subject, index, endOfMatch);
     // Don't call directly to avoid exposing the built-in global object.
-    return replace.call(null, s, index, subject);
+    replacement =
+        %_CallFunction(%GetGlobalReceiver(), s, index, subject, replace);
+  } else {
+    var parameters = $Array(m + 2);
+    for (var j = 0; j < m; j++) {
+      parameters[j] = CaptureString(subject, matchInfo, j);
+    }
+    parameters[j] = index;
+    parameters[j + 1] = subject;
+
+    replacement = replace.apply(null, parameters);
   }
-  var parameters = $Array(m + 2);
-  for (var j = 0; j < m; j++) {
-    parameters[j] = CaptureString(subject, matchInfo, j);
-  }
-  parameters[j] = index;
-  parameters[j + 1] = subject;
-  return replace.apply(null, parameters);
+
+  result.add(replacement);  // The add method converts to string if necessary.
+  // Can't use matchInfo any more from here, since the function could
+  // overwrite it.
+  result.addSpecialSlice(endOfMatch, subject.length);
+  return result.generate();
 }
 
+
 // ECMA-262 section 15.5.4.12
 function StringSearch(re) {
   var regexp;
diff --git a/src/v8natives.js b/src/v8natives.js
index 66a20ee..531bd0e 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -225,7 +225,7 @@
 
 // ECMA-262 - 15.2.4.6
 function ObjectIsPrototypeOf(V) {
-  if (!IS_OBJECT(V) && !IS_FUNCTION(V) && !IS_UNDETECTABLE(V)) return false;
+  if (!IS_SPEC_OBJECT_OR_NULL(V) && !IS_UNDETECTABLE(V)) return false;
   return %IsInPrototypeChain(this, V);
 }
 
@@ -233,7 +233,7 @@
 // ECMA-262 - 15.2.4.6
 function ObjectPropertyIsEnumerable(V) {
   if (this == null) return false;
-  if (!IS_OBJECT(this) && !IS_FUNCTION(this)) return false;
+  if (!IS_SPEC_OBJECT_OR_NULL(this)) return false;
   return %IsPropertyEnumerable(this, ToString(V));
 }
 
@@ -279,7 +279,7 @@
 
 
 function ObjectKeys(obj) {
-  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+  if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
       !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["keys"]);
   return %LocalKeys(obj);
@@ -329,7 +329,7 @@
 
 // ES5 8.10.5.
 function ToPropertyDescriptor(obj) {
-  if (!IS_OBJECT(obj)) {
+  if (!IS_SPEC_OBJECT_OR_NULL(obj)) {
     throw MakeTypeError("property_desc_object", [obj]);
   }
   var desc = new PropertyDescriptor();
@@ -599,7 +599,7 @@
 
 // ES5 section 15.2.3.2.
 function ObjectGetPrototypeOf(obj) {
-  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+  if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
       !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]);
   return obj.__proto__;
@@ -608,7 +608,7 @@
 
 // ES5 section 15.2.3.3
 function ObjectGetOwnPropertyDescriptor(obj, p) {
-  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+  if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
       !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]);
   var desc = GetOwnProperty(obj, p);
@@ -618,7 +618,7 @@
 
 // ES5 section 15.2.3.4.
 function ObjectGetOwnPropertyNames(obj) {
-  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+  if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
       !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]);
 
@@ -660,8 +660,7 @@
 
 // ES5 section 15.2.3.5.
 function ObjectCreate(proto, properties) {
-  // IS_OBJECT will return true on null covering that case.
-  if (!IS_OBJECT(proto) && !IS_FUNCTION(proto)) {
+  if (!IS_SPEC_OBJECT_OR_NULL(proto)) {
     throw MakeTypeError("proto_object_or_null", [proto]);
   }
   var obj = new $Object();
@@ -673,7 +672,7 @@
 
 // ES5 section 15.2.3.6.
 function ObjectDefineProperty(obj, p, attributes) {
-  if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+  if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
       !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
   var name = ToString(p);
@@ -685,7 +684,7 @@
 
 // ES5 section 15.2.3.7.
 function ObjectDefineProperties(obj, properties) {
- if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) &&
+ if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
      !IS_UNDETECTABLE(obj))
     throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]);
   var props = ToObject(properties);
diff --git a/src/version.cc b/src/version.cc
index 562f3ac..1bf543d 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      9 
+#define BUILD_NUMBER      10
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/virtual-frame-heavy-inl.h b/src/virtual-frame-heavy-inl.h
index 6381d01..2755eee 100644
--- a/src/virtual-frame-heavy-inl.h
+++ b/src/virtual-frame-heavy-inl.h
@@ -31,6 +31,8 @@
 #include "type-info.h"
 #include "register-allocator.h"
 #include "scopes.h"
+#include "register-allocator-inl.h"
+#include "codegen-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -147,6 +149,44 @@
   Push(Handle<Object> (value));
 }
 
+
+int VirtualFrame::register_location(Register reg) {
+  return register_locations_[RegisterAllocator::ToNumber(reg)];
+}
+
+
+void VirtualFrame::set_register_location(Register reg, int index) {
+  register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+}
+
+
+bool VirtualFrame::is_used(Register reg) {
+  return register_locations_[RegisterAllocator::ToNumber(reg)]
+      != kIllegalIndex;
+}
+
+
+void VirtualFrame::SetElementAt(int index, Handle<Object> value) {
+  Result temp(value);
+  SetElementAt(index, &temp);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  return RawCallStub(stub);
+}
+
+
+int VirtualFrame::parameter_count() {
+  return cgen()->scope()->num_parameters();
+}
+
+
+int VirtualFrame::local_count() {
+  return cgen()->scope()->num_stack_slots();
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_VIRTUAL_FRAME_HEAVY_INL_H_
diff --git a/src/virtual-frame-light-inl.h b/src/virtual-frame-light-inl.h
index c50e6c8..17b1c50 100644
--- a/src/virtual-frame-light-inl.h
+++ b/src/virtual-frame-light-inl.h
@@ -28,13 +28,23 @@
 #ifndef V8_VIRTUAL_FRAME_LIGHT_INL_H_
 #define V8_VIRTUAL_FRAME_LIGHT_INL_H_
 
-#include "type-info.h"
+#include "codegen.h"
 #include "register-allocator.h"
 #include "scopes.h"
+#include "type-info.h"
+
+#include "codegen-inl.h"
+#include "jump-target-light-inl.h"
 
 namespace v8 {
 namespace internal {
 
+VirtualFrame::VirtualFrame(InvalidVirtualFrameInitializer* dummy)
+    : element_count_(0),
+      top_of_stack_state_(NO_TOS_REGISTERS),
+      register_allocation_map_(0) { }
+
+
 // On entry to a function, the virtual frame already contains the receiver,
 // the parameters, and a return address.  All frame elements are in memory.
 VirtualFrame::VirtualFrame()
@@ -64,6 +74,87 @@
 }
 
 
+VirtualFrame::RegisterAllocationScope::RegisterAllocationScope(
+    CodeGenerator* cgen)
+  : cgen_(cgen),
+    old_is_spilled_(SpilledScope::is_spilled_) {
+  SpilledScope::is_spilled_ = false;
+  if (old_is_spilled_) {
+    VirtualFrame* frame = cgen->frame();
+    if (frame != NULL) {
+      frame->AssertIsSpilled();
+    }
+  }
+}
+
+
+VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
+  SpilledScope::is_spilled_ = old_is_spilled_;
+  if (old_is_spilled_) {
+    VirtualFrame* frame = cgen_->frame();
+    if (frame != NULL) {
+      frame->SpillAll();
+    }
+  }
+}
+
+
+CodeGenerator* VirtualFrame::cgen() { return CodeGeneratorScope::Current(); }
+
+
+MacroAssembler* VirtualFrame::masm() { return cgen()->masm(); }
+
+
+void VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
+  if (arg_count != 0) Forget(arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  masm()->CallStub(stub);
+}
+
+
+int VirtualFrame::parameter_count() {
+  return cgen()->scope()->num_parameters();
+}
+
+
+int VirtualFrame::local_count() { return cgen()->scope()->num_stack_slots(); }
+
+
+int VirtualFrame::frame_pointer() { return parameter_count() + 3; }
+
+
+int VirtualFrame::context_index() { return frame_pointer() - 1; }
+
+
+int VirtualFrame::function_index() { return frame_pointer() - 2; }
+
+
+int VirtualFrame::local0_index() { return frame_pointer() + 2; }
+
+
+int VirtualFrame::fp_relative(int index) {
+  ASSERT(index < element_count());
+  ASSERT(frame_pointer() < element_count());  // FP is on the frame.
+  return (frame_pointer() - index) * kPointerSize;
+}
+
+
+int VirtualFrame::expression_base_index() {
+  return local0_index() + local_count();
+}
+
+
+int VirtualFrame::height() {
+  return element_count() - expression_base_index();
+}
+
+
+MemOperand VirtualFrame::LocalAt(int index) {
+  ASSERT(0 <= index);
+  ASSERT(index < local_count());
+  return MemOperand(fp, kLocal0Offset - index * kPointerSize);
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_VIRTUAL_FRAME_LIGHT_INL_H_
diff --git a/src/virtual-frame-light.cc b/src/virtual-frame-light.cc
index 27c48a5..9c019cf 100644
--- a/src/virtual-frame-light.cc
+++ b/src/virtual-frame-light.cc
@@ -46,4 +46,7 @@
   return no_reg;
 }
 
+
+InvalidVirtualFrameInitializer* kInvalidVirtualFrameInitializer = NULL;
+
 } }  // namespace v8::internal
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 1c00ebc..fcfa8d0 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -2510,6 +2510,17 @@
 }
 
 
+void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF2);
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0x2C);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index d077865..0f06c3c 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -606,6 +606,14 @@
     immediate_arithmetic_op(0x0, dst, src);
   }
 
+  void sbbl(Register dst, Register src) {
+    if (dst.low_bits() == 4) {  // Forces SIB byte if dst is base register.
+      arithmetic_op_32(0x19, src, dst);
+    } else {
+      arithmetic_op_32(0x1b, dst, src);
+    }
+  }
+
   void cmpb(Register dst, Immediate src) {
     immediate_arithmetic_op_8(0x7, dst, src);
   }
@@ -1092,6 +1100,7 @@
 
   void cvttss2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, const Operand& src);
+  void cvttsd2siq(Register dst, XMMRegister src);
 
   void cvtlsi2sd(XMMRegister dst, const Operand& src);
   void cvtlsi2sd(XMMRegister dst, Register src);
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 740be83..d9586cc 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -277,7 +277,6 @@
   // Takes the operands in rdx and rax and loads them as integers in rax
   // and rcx.
   static void LoadAsIntegers(MacroAssembler* masm,
-                             bool use_sse3,
                              Label* operand_conversion_failure);
 };
 
@@ -2868,59 +2867,26 @@
     // ----------------------------------
     // JavaScript examples:
     //
-    //  with (obj) foo(1, 2, 3)  // foo is in obj
+    //  with (obj) foo(1, 2, 3)  // foo may be in obj.
     //
     //  function f() {};
     //  function g() {
     //    eval(...);
-    //    f();  // f could be in extension object
+    //    f();  // f could be in extension object.
     //  }
     // ----------------------------------
 
-    JumpTarget slow;
-    JumpTarget done;
-
-    // Generate fast-case code for variables that might be shadowed by
-    // eval-introduced variables.  Eval is used a lot without
-    // introducing variables.  In those cases, we do not want to
-    // perform a runtime call for all variables in the scope
-    // containing the eval.
+    JumpTarget slow, done;
     Result function;
-    if (var->mode() == Variable::DYNAMIC_GLOBAL) {
-      function = LoadFromGlobalSlotCheckExtensions(var->slot(),
-                                                   NOT_INSIDE_TYPEOF,
-                                                   &slow);
-      frame_->Push(&function);
-      LoadGlobalReceiver();
-      done.Jump();
 
-    } else if (var->mode() == Variable::DYNAMIC_LOCAL) {
-      Slot* potential_slot = var->local_if_not_shadowed()->slot();
-      // Only generate the fast case for locals that rewrite to slots.
-      // This rules out argument loads because eval forces arguments
-      // access to be through the arguments object.
-      if (potential_slot != NULL) {
-        // Allocate a fresh register to use as a temp in
-        // ContextSlotOperandCheckExtensions and to hold the result
-        // value.
-        function = allocator()->Allocate();
-        ASSERT(function.is_valid());
-        __ movq(function.reg(),
-                ContextSlotOperandCheckExtensions(potential_slot,
-                                                  function,
-                                                  &slow));
-        JumpTarget push_function_and_receiver;
-        if (potential_slot->var()->mode() == Variable::CONST) {
-          __ CompareRoot(function.reg(), Heap::kTheHoleValueRootIndex);
-          push_function_and_receiver.Branch(not_equal, &function);
-          __ LoadRoot(function.reg(), Heap::kUndefinedValueRootIndex);
-        }
-        push_function_and_receiver.Bind(&function);
-        frame_->Push(&function);
-        LoadGlobalReceiver();
-        done.Jump();
-      }
-    }
+    // Generate fast case for loading functions from slots that
+    // correspond to local/global variables or arguments unless they
+    // are shadowed by eval-introduced bindings.
+    EmitDynamicLoadFromSlotFastCase(var->slot(),
+                                    NOT_INSIDE_TYPEOF,
+                                    &function,
+                                    &slow,
+                                    &done);
 
     slow.Bind();
     // Load the function from the context.  Sync the frame so we can
@@ -2941,7 +2907,18 @@
     ASSERT(!allocator()->is_used(rdx));
     frame_->EmitPush(rdx);
 
-    done.Bind();
+    // If fast case code has been generated, emit code to push the
+    // function and receiver and have the slow path jump around this
+    // code.
+    if (done.is_linked()) {
+      JumpTarget call;
+      call.Jump();
+      done.Bind(&function);
+      frame_->Push(&function);
+      LoadGlobalReceiver();
+      call.Bind();
+    }
+
     // Call the function.
     CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position());
 
@@ -5225,6 +5202,11 @@
     // The expression is a variable proxy that does not rewrite to a
     // property.  Global variables are treated as named property references.
     if (var->is_global()) {
+      // If rax is free, the register allocator prefers it.  Thus the code
+      // generator will load the global object into rax, which is where
+      // LoadIC wants it.  Most uses of Reference call LoadIC directly
+      // after the reference is created.
+      frame_->Spill(rax);
       LoadGlobal();
       ref->set_type(Reference::NAMED);
     } else {
@@ -5336,47 +5318,14 @@
     JumpTarget done;
     Result value;
 
-    // Generate fast-case code for variables that might be shadowed by
-    // eval-introduced variables.  Eval is used a lot without
-    // introducing variables.  In those cases, we do not want to
-    // perform a runtime call for all variables in the scope
-    // containing the eval.
-    if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
-      value = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
-      // If there was no control flow to slow, we can exit early.
-      if (!slow.is_linked()) {
-        frame_->Push(&value);
-        return;
-      }
-
-      done.Jump(&value);
-
-    } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
-      Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
-      // Only generate the fast case for locals that rewrite to slots.
-      // This rules out argument loads because eval forces arguments
-      // access to be through the arguments object.
-      if (potential_slot != NULL) {
-        // Allocate a fresh register to use as a temp in
-        // ContextSlotOperandCheckExtensions and to hold the result
-        // value.
-        value = allocator_->Allocate();
-        ASSERT(value.is_valid());
-        __ movq(value.reg(),
-               ContextSlotOperandCheckExtensions(potential_slot,
-                                                 value,
-                                                 &slow));
-        if (potential_slot->var()->mode() == Variable::CONST) {
-          __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
-          done.Branch(not_equal, &value);
-          __ LoadRoot(value.reg(), Heap::kUndefinedValueRootIndex);
-        }
-        // There is always control flow to slow from
-        // ContextSlotOperandCheckExtensions so we have to jump around
-        // it.
-        done.Jump(&value);
-      }
-    }
+    // Generate fast case for loading from slots that correspond to
+    // local/global variables or arguments unless they are shadowed by
+    // eval-introduced bindings.
+    EmitDynamicLoadFromSlotFastCase(slot,
+                                    typeof_state,
+                                    &value,
+                                    &slow,
+                                    &done);
 
     slow.Bind();
     // A runtime call is inevitable.  We eagerly sync frame elements
@@ -5642,6 +5591,71 @@
 }
 
 
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+                                                    TypeofState typeof_state,
+                                                    Result* result,
+                                                    JumpTarget* slow,
+                                                    JumpTarget* done) {
+  // Generate fast-case code for variables that might be shadowed by
+  // eval-introduced variables.  Eval is used a lot without
+  // introducing variables.  In those cases, we do not want to
+  // perform a runtime call for all variables in the scope
+  // containing the eval.
+  if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+    *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
+    done->Jump(result);
+
+  } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+    Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+    Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
+    if (potential_slot != NULL) {
+      // Generate fast case for locals that rewrite to slots.
+      // Allocate a fresh register to use as a temp in
+      // ContextSlotOperandCheckExtensions and to hold the result
+      // value.
+      *result = allocator_->Allocate();
+      ASSERT(result->is_valid());
+      __ movq(result->reg(),
+              ContextSlotOperandCheckExtensions(potential_slot,
+                                                *result,
+                                                slow));
+      if (potential_slot->var()->mode() == Variable::CONST) {
+        __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
+        done->Branch(not_equal, result);
+        __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
+      }
+      done->Jump(result);
+    } else if (rewrite != NULL) {
+      // Generate fast case for argument loads.
+      Property* property = rewrite->AsProperty();
+      if (property != NULL) {
+        VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
+        Literal* key_literal = property->key()->AsLiteral();
+        if (obj_proxy != NULL &&
+            key_literal != NULL &&
+            obj_proxy->IsArguments() &&
+            key_literal->handle()->IsSmi()) {
+          // Load arguments object if there are no eval-introduced
+          // variables. Then load the argument from the arguments
+          // object using keyed load.
+          Result arguments = allocator()->Allocate();
+          ASSERT(arguments.is_valid());
+          __ movq(arguments.reg(),
+                  ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
+                                                    arguments,
+                                                    slow));
+          frame_->Push(&arguments);
+          frame_->Push(key_literal->handle());
+          *result = EmitKeyedLoad(false);
+          frame_->Drop(2);  // Drop key and receiver.
+          done->Jump(result);
+        }
+      }
+    }
+  }
+}
+
+
 void CodeGenerator::LoadGlobal() {
   if (in_spilled_code()) {
     frame_->EmitPush(GlobalObject());
@@ -8015,138 +8029,71 @@
 }
 
 
-// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
-// is faster than using the built-in instructions on floating point registers.
-// Trashes rdi and rbx.  Dest is rcx.  Source cannot be rcx or one of the
-// trashed registers.
+// Get the integer part of a heap number.
+// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
 void IntegerConvert(MacroAssembler* masm,
-                    Register source,
-                    bool use_sse3,
-                    Label* conversion_failure) {
-  ASSERT(!source.is(rcx) && !source.is(rdi) && !source.is(rbx));
-  Label done, right_exponent, normal_exponent;
-  Register scratch = rbx;
-  Register scratch2 = rdi;
-  // Get exponent word.
-  __ movl(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
-  // Get exponent alone in scratch2.
-  __ movl(scratch2, scratch);
-  __ and_(scratch2, Immediate(HeapNumber::kExponentMask));
-  if (use_sse3) {
-    CpuFeatures::Scope scope(SSE3);
-    // Check whether the exponent is too big for a 64 bit signed integer.
-    static const uint32_t kTooBigExponent =
-        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
-    __ cmpl(scratch2, Immediate(kTooBigExponent));
-    __ j(greater_equal, conversion_failure);
-    // Load x87 register with heap number.
-    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
-    // Reserve space for 64 bit answer.
-    __ subq(rsp, Immediate(sizeof(uint64_t)));  // Nolint.
-    // Do conversion, which cannot fail because we checked the exponent.
-    __ fisttp_d(Operand(rsp, 0));
-    __ movl(rcx, Operand(rsp, 0));  // Load low word of answer into rcx.
-    __ addq(rsp, Immediate(sizeof(uint64_t)));  // Nolint.
+                    Register result,
+                    Register source) {
+  // Result may be rcx. If result and source are the same register, source will
+  // be overwritten.
+  ASSERT(!result.is(rdi) && !result.is(rbx));
+  // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
+  // cvttsd2si (32-bit version) directly.
+  Register double_exponent = rbx;
+  Register double_value = rdi;
+  Label done, exponent_63_plus;
+  // Get double and extract exponent.
+  __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
+  // Clear result preemptively, in case we need to return zero.
+  __ xorl(result, result);
+  __ movq(xmm0, double_value);  // Save copy in xmm0 in case we need it there.
+  // Double to remove sign bit, shift exponent down to least significant bits.
+  // and subtract bias to get the unshifted, unbiased exponent.
+  __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
+  __ shr(double_exponent, Immediate(64 - HeapNumber::KExponentBits));
+  __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
+  // Check whether the exponent is too big for a 63 bit unsigned integer.
+  __ cmpl(double_exponent, Immediate(63));
+  __ j(above_equal, &exponent_63_plus);
+  // Handle exponent range 0..62.
+  __ cvttsd2siq(result, xmm0);
+  __ jmp(&done);
+
+  __ bind(&exponent_63_plus);
+  // Exponent negative or 63+.
+  __ cmpl(double_exponent, Immediate(83));
+  // If exponent negative or above 83, number contains no significant bits in
+  // the range 0..2^31, so result is zero, and rcx already holds zero.
+  __ j(above, &done);
+
+  // Exponent in rage 63..83.
+  // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
+  // the least significant exponent-52 bits.
+
+  // Negate low bits of mantissa if value is negative.
+  __ addq(double_value, double_value);  // Move sign bit to carry.
+  __ sbbl(result, result);  // And convert carry to -1 in result register.
+  // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
+  __ addl(double_value, result);
+  // Do xor in opposite directions depending on where we want the result
+  // (depending on whether result is rcx or not).
+
+  if (result.is(rcx)) {
+    __ xorl(double_value, result);
+    // Left shift mantissa by (exponent - mantissabits - 1) to save the
+    // bits that have positional values below 2^32 (the extra -1 comes from the
+    // doubling done above to move the sign bit into the carry flag).
+    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+    __ shll_cl(double_value);
+    __ movl(result, double_value);
   } else {
-    // Load rcx with zero.  We use this either for the final shift or
-    // for the answer.
-    __ xor_(rcx, rcx);
-    // Check whether the exponent matches a 32 bit signed int that cannot be
-    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
-    // exponent is 30 (biased).  This is the exponent that we are fastest at and
-    // also the highest exponent we can handle here.
-    const uint32_t non_smi_exponent =
-        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-    __ cmpl(scratch2, Immediate(non_smi_exponent));
-    // If we have a match of the int32-but-not-Smi exponent then skip some
-    // logic.
-    __ j(equal, &right_exponent);
-    // If the exponent is higher than that then go to slow case.  This catches
-    // numbers that don't fit in a signed int32, infinities and NaNs.
-    __ j(less, &normal_exponent);
-
-    {
-      // Handle a big exponent.  The only reason we have this code is that the
-      // >>> operator has a tendency to generate numbers with an exponent of 31.
-      const uint32_t big_non_smi_exponent =
-          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
-      __ cmpl(scratch2, Immediate(big_non_smi_exponent));
-      __ j(not_equal, conversion_failure);
-      // We have the big exponent, typically from >>>.  This means the number is
-      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
-      __ movl(scratch2, scratch);
-      __ and_(scratch2, Immediate(HeapNumber::kMantissaMask));
-      // Put back the implicit 1.
-      __ or_(scratch2, Immediate(1 << HeapNumber::kExponentShift));
-      // Shift up the mantissa bits to take up the space the exponent used to
-      // take. We just orred in the implicit bit so that took care of one and
-      // we want to use the full unsigned range so we subtract 1 bit from the
-      // shift distance.
-      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
-      __ shl(scratch2, Immediate(big_shift_distance));
-      // Get the second half of the double.
-      __ movl(rcx, FieldOperand(source, HeapNumber::kMantissaOffset));
-      // Shift down 21 bits to get the most significant 11 bits or the low
-      // mantissa word.
-      __ shr(rcx, Immediate(32 - big_shift_distance));
-      __ or_(rcx, scratch2);
-      // We have the answer in rcx, but we may need to negate it.
-      __ testl(scratch, scratch);
-      __ j(positive, &done);
-      __ neg(rcx);
-      __ jmp(&done);
-    }
-
-    __ bind(&normal_exponent);
-    // Exponent word in scratch, exponent part of exponent word in scratch2.
-    // Zero in rcx.
-    // We know the exponent is smaller than 30 (biased).  If it is less than
-    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
-    // it rounds to zero.
-    const uint32_t zero_exponent =
-        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
-    __ subl(scratch2, Immediate(zero_exponent));
-    // rcx already has a Smi zero.
-    __ j(less, &done);
-
-    // We have a shifted exponent between 0 and 30 in scratch2.
-    __ shr(scratch2, Immediate(HeapNumber::kExponentShift));
-    __ movl(rcx, Immediate(30));
-    __ subl(rcx, scratch2);
-
-    __ bind(&right_exponent);
-    // Here rcx is the shift, scratch is the exponent word.
-    // Get the top bits of the mantissa.
-    __ and_(scratch, Immediate(HeapNumber::kMantissaMask));
-    // Put back the implicit 1.
-    __ or_(scratch, Immediate(1 << HeapNumber::kExponentShift));
-    // Shift up the mantissa bits to take up the space the exponent used to
-    // take. We have kExponentShift + 1 significant bits int he low end of the
-    // word.  Shift them to the top bits.
-    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-    __ shl(scratch, Immediate(shift_distance));
-    // Get the second half of the double. For some exponents we don't
-    // actually need this because the bits get shifted out again, but
-    // it's probably slower to test than just to do it.
-    __ movl(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
-    // Shift down 22 bits to get the most significant 10 bits or the low
-    // mantissa word.
-    __ shr(scratch2, Immediate(32 - shift_distance));
-    __ or_(scratch2, scratch);
-    // Move down according to the exponent.
-    __ shr_cl(scratch2);
-    // Now the unsigned answer is in scratch2.  We need to move it to rcx and
-    // we may need to fix the sign.
-    Label negative;
-    __ xor_(rcx, rcx);
-    __ cmpl(rcx, FieldOperand(source, HeapNumber::kExponentOffset));
-    __ j(greater, &negative);
-    __ movl(rcx, scratch2);
-    __ jmp(&done);
-    __ bind(&negative);
-    __ subl(rcx, scratch2);
-    __ bind(&done);
+    // As the then-branch, but move double-value to result before shifting.
+    __ xorl(result, double_value);
+    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
+    __ shll_cl(result);
   }
+
+  __ bind(&done);
 }
 
 
@@ -8196,14 +8143,11 @@
     __ j(not_equal, &slow);
 
     // Convert the heap number in rax to an untagged integer in rcx.
-    IntegerConvert(masm, rax, CpuFeatures::IsSupported(SSE3), &slow);
+    IntegerConvert(masm, rax, rax);
 
-    // Do the bitwise operation and check if the result fits in a smi.
-    Label try_float;
-    __ not_(rcx);
-    // Tag the result as a smi and we're done.
-    ASSERT(kSmiTagSize == 1);
-    __ Integer32ToSmi(rax, rcx);
+    // Do the bitwise operation and smi tag the result.
+    __ notl(rax);
+    __ Integer32ToSmi(rax, rax);
   }
 
   // Return from the stub.
@@ -9777,7 +9721,6 @@
 // Input: rdx, rax are the left and right objects of a bit op.
 // Output: rax, rcx are left and right integers for a bit op.
 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
-                                         bool use_sse3,
                                          Label* conversion_failure) {
   // Check float operands.
   Label arg1_is_object, check_undefined_arg1;
@@ -9800,10 +9743,9 @@
   __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
   __ j(not_equal, &check_undefined_arg1);
   // Get the untagged integer version of the edx heap number in rcx.
-  IntegerConvert(masm, rdx, use_sse3, conversion_failure);
-  __ movl(rdx, rcx);
+  IntegerConvert(masm, rdx, rdx);
 
-  // Here edx has the untagged integer, eax has a Smi or a heap number.
+  // Here rdx has the untagged integer, rax has a Smi or a heap number.
   __ bind(&load_arg2);
   // Test if arg2 is a Smi.
   __ JumpIfNotSmi(rax, &arg2_is_object);
@@ -9823,7 +9765,7 @@
   __ CompareRoot(rbx, Heap::kHeapNumberMapRootIndex);
   __ j(not_equal, &check_undefined_arg2);
   // Get the untagged integer version of the eax heap number in ecx.
-  IntegerConvert(masm, rax, use_sse3, conversion_failure);
+  IntegerConvert(masm, rcx, rax);
   __ bind(&done);
   __ movl(rax, rdx);
 }
@@ -9892,13 +9834,12 @@
   }
 
   OS::SNPrintF(Vector<char>(name_, len),
-               "GenericBinaryOpStub_%s_%s%s_%s%s_%s%s_%s",
+               "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
                op_name,
                overwrite_name,
                (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
                args_in_registers_ ? "RegArgs" : "StackArgs",
                args_reversed_ ? "_R" : "",
-               use_sse3_ ? "SSE3" : "SSE2",
                static_operands_type_.ToString(),
                BinaryOpIC::GetName(runtime_operands_type_));
   return name_;
@@ -10331,7 +10272,7 @@
       case Token::SHL:
       case Token::SHR: {
         Label skip_allocation, non_smi_result;
-        FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
+        FloatingPointHelper::LoadAsIntegers(masm, &call_runtime);
         switch (op_) {
           case Token::BIT_OR:  __ orl(rax, rcx); break;
           case Token::BIT_AND: __ andl(rax, rcx); break;
@@ -10342,7 +10283,7 @@
           default: UNREACHABLE();
         }
         if (op_ == Token::SHR) {
-          // Check if result is non-negative. This can only happen for a shift
+          // Check if result is negative. This can only happen for a shift
           // by zero, which also doesn't update the sign flag.
           __ testl(rax, rax);
           __ j(negative, &non_smi_result);
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 5d9861b..01bbd20 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -28,7 +28,9 @@
 #ifndef V8_X64_CODEGEN_X64_H_
 #define V8_X64_CODEGEN_X64_H_
 
+#include "ast.h"
 #include "ic-inl.h"
+#include "jump-target-heavy.h"
 
 namespace v8 {
 namespace internal {
@@ -433,6 +435,16 @@
                                            TypeofState typeof_state,
                                            JumpTarget* slow);
 
+  // Support for loading from local/global variables and arguments
+  // whose location is known unless they are shadowed by
+  // eval-introduced bindings. Generates no code for unsupported slot
+  // types and therefore expects to fall through to the slow jump target.
+  void EmitDynamicLoadFromSlotFastCase(Slot* slot,
+                                       TypeofState typeof_state,
+                                       Result* result,
+                                       JumpTarget* slow,
+                                       JumpTarget* done);
+
   // Store the value on top of the expression stack into a slot, leaving the
   // value in place.
   void StoreToSlot(Slot* slot, InitState init_state);
@@ -711,7 +723,6 @@
         static_operands_type_(operands_type),
         runtime_operands_type_(BinaryOpIC::DEFAULT),
         name_(NULL) {
-    use_sse3_ = CpuFeatures::IsSupported(SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
@@ -721,7 +732,6 @@
         flags_(FlagBits::decode(key)),
         args_in_registers_(ArgsInRegistersBits::decode(key)),
         args_reversed_(ArgsReversedBits::decode(key)),
-        use_sse3_(SSE3Bits::decode(key)),
         static_operands_type_(TypeInfo::ExpandedRepresentation(
             StaticTypeInfoBits::decode(key))),
         runtime_operands_type_(type_info),
@@ -746,7 +756,6 @@
   GenericBinaryFlags flags_;
   bool args_in_registers_;  // Arguments passed in registers not on the stack.
   bool args_reversed_;  // Left and right argument are swapped.
-  bool use_sse3_;
 
   // Number type information of operands, determined by code generator.
   TypeInfo static_operands_type_;
@@ -772,15 +781,14 @@
   }
 #endif
 
-  // Minor key encoding in 18 bits TTNNNFRASOOOOOOOMM.
+  // Minor key encoding in 17 bits TTNNNFRAOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
   class OpBits: public BitField<Token::Value, 2, 7> {};
-  class SSE3Bits: public BitField<bool, 9, 1> {};
-  class ArgsInRegistersBits: public BitField<bool, 10, 1> {};
-  class ArgsReversedBits: public BitField<bool, 11, 1> {};
-  class FlagBits: public BitField<GenericBinaryFlags, 12, 1> {};
-  class StaticTypeInfoBits: public BitField<int, 13, 3> {};
-  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 16, 2> {};
+  class ArgsInRegistersBits: public BitField<bool, 9, 1> {};
+  class ArgsReversedBits: public BitField<bool, 10, 1> {};
+  class FlagBits: public BitField<GenericBinaryFlags, 11, 1> {};
+  class StaticTypeInfoBits: public BitField<int, 12, 3> {};
+  class RuntimeTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 15, 2> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
@@ -788,7 +796,6 @@
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
            | FlagBits::encode(flags_)
-           | SSE3Bits::encode(use_sse3_)
            | ArgsInRegistersBits::encode(args_in_registers_)
            | ArgsReversedBits::encode(args_reversed_)
            | StaticTypeInfoBits::encode(
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 50b4120..d9b75b1 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -188,8 +188,8 @@
 void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
   Label not_at_start;
   // Did we start the match at the start of the string at all?
-  __ cmpb(Operand(rbp, kAtStart), Immediate(0));
-  BranchOrBacktrack(equal, &not_at_start);
+  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+  BranchOrBacktrack(not_equal, &not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(rax, Operand(rsi, rdi, times_1, 0));
   __ cmpq(rax, Operand(rbp, kInputStart));
@@ -200,8 +200,8 @@
 
 void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
   // Did we start the match at the start of the string at all?
-  __ cmpb(Operand(rbp, kAtStart), Immediate(0));
-  BranchOrBacktrack(equal, on_not_at_start);
+  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+  BranchOrBacktrack(not_equal, on_not_at_start);
   // If we did, are we still at the start of the input?
   __ lea(rax, Operand(rsi, rdi, times_1, 0));
   __ cmpq(rax, Operand(rbp, kInputStart));
@@ -219,6 +219,15 @@
                                               int cp_offset,
                                               Label* on_failure,
                                               bool check_end_of_string) {
+#ifdef DEBUG
+  // If input is ASCII, don't even bother calling here if the string to
+  // match contains a non-ascii character.
+  if (mode_ == ASCII) {
+    for (int i = 0; i < str.length(); i++) {
+      ASSERT(str[i] <= String::kMaxAsciiCharCodeU);
+    }
+  }
+#endif
   int byte_length = str.length() * char_size();
   int byte_offset = cp_offset * char_size();
   if (check_end_of_string) {
@@ -232,16 +241,71 @@
     on_failure = &backtrack_label_;
   }
 
-  // TODO(lrn): Test multiple characters at a time by loading 4 or 8 bytes
-  // at a time.
-  for (int i = 0; i < str.length(); i++) {
+  // Do one character test first to minimize loading for the case that
+  // we don't match at all (loading more than one character introduces that
+  // chance of reading unaligned and reading across cache boundaries).
+  // If the first character matches, expect a larger chance of matching the
+  // string, and start loading more characters at a time.
+  if (mode_ == ASCII) {
+    __ cmpb(Operand(rsi, rdi, times_1, byte_offset),
+            Immediate(static_cast<int8_t>(str[0])));
+  } else {
+    // Don't use 16-bit immediate. The size changing prefix throws off
+    // pre-decoding.
+    __ movzxwl(rax,
+               Operand(rsi, rdi, times_1, byte_offset));
+    __ cmpl(rax, Immediate(static_cast<int32_t>(str[0])));
+  }
+  BranchOrBacktrack(not_equal, on_failure);
+
+  __ lea(rbx, Operand(rsi, rdi, times_1, 0));
+  for (int i = 1, n = str.length(); i < n; ) {
     if (mode_ == ASCII) {
-      __ cmpb(Operand(rsi, rdi, times_1, byte_offset + i),
-              Immediate(static_cast<int8_t>(str[i])));
+      if (i + 8 <= n) {
+        uint64_t combined_chars =
+            (static_cast<uint64_t>(str[i + 0]) << 0) ||
+            (static_cast<uint64_t>(str[i + 1]) << 8) ||
+            (static_cast<uint64_t>(str[i + 2]) << 16) ||
+            (static_cast<uint64_t>(str[i + 3]) << 24) ||
+            (static_cast<uint64_t>(str[i + 4]) << 32) ||
+            (static_cast<uint64_t>(str[i + 5]) << 40) ||
+            (static_cast<uint64_t>(str[i + 6]) << 48) ||
+            (static_cast<uint64_t>(str[i + 7]) << 56);
+        __ movq(rax, combined_chars, RelocInfo::NONE);
+        __ cmpq(rax, Operand(rbx, byte_offset + i));
+        i += 8;
+      } else if (i + 4 <= n) {
+        uint32_t combined_chars =
+            (static_cast<uint32_t>(str[i + 0]) << 0) ||
+            (static_cast<uint32_t>(str[i + 1]) << 8) ||
+            (static_cast<uint32_t>(str[i + 2]) << 16) ||
+            (static_cast<uint32_t>(str[i + 3]) << 24);
+        __ cmpl(Operand(rbx, byte_offset + i), Immediate(combined_chars));
+        i += 4;
+      } else {
+        __ cmpb(Operand(rbx, byte_offset + i),
+                Immediate(static_cast<int8_t>(str[i])));
+        i++;
+      }
     } else {
       ASSERT(mode_ == UC16);
-      __ cmpw(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
-              Immediate(str[i]));
+      if (i + 4 <= n) {
+        uint64_t combined_chars = *reinterpret_cast<const uint64_t*>(&str[i]);
+        __ movq(rax, combined_chars, RelocInfo::NONE);
+        __ cmpq(rax,
+                Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
+        i += 4;
+      } else if (i + 2 <= n) {
+        uint32_t combined_chars = *reinterpret_cast<const uint32_t*>(&str[i]);
+        __ cmpl(Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)),
+                Immediate(combined_chars));
+        i += 2;
+      } else {
+        __ movzxwl(rax,
+                   Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
+        __ cmpl(rax, Immediate(str[i]));
+        i++;
+      }
     }
     BranchOrBacktrack(not_equal, on_failure);
   }
@@ -671,7 +735,6 @@
   __ push(rbx);  // Callee-save
 #endif
 
-  __ push(Immediate(0));  // Make room for "input start - 1" constant.
   __ push(Immediate(0));  // Make room for "at start" constant.
 
   // Check if we have space on the stack for registers.
@@ -724,14 +787,6 @@
   // position registers.
   __ movq(Operand(rbp, kInputStartMinusOne), rax);
 
-  // Determine whether the start index is zero, that is at the start of the
-  // string, and store that value in a local variable.
-  __ movq(rbx, Operand(rbp, kStartIndex));
-  __ xor_(rcx, rcx);  // setcc only operates on cl (lower byte of rcx).
-  __ testq(rbx, rbx);
-  __ setcc(zero, rcx);  // 1 if 0 (start of string), 0 if positive.
-  __ movq(Operand(rbp, kAtStart), rcx);
-
   if (num_saved_registers_ > 0) {
     // Fill saved registers with initial value = start offset - 1
     // Fill in stack push order, to avoid accessing across an unwritten
@@ -761,8 +816,8 @@
   __ Move(code_object_pointer(), masm_->CodeObject());
   // Load previous char as initial value of current-character.
   Label at_start;
-  __ cmpb(Operand(rbp, kAtStart), Immediate(0));
-  __ j(not_equal, &at_start);
+  __ cmpb(Operand(rbp, kStartIndex), Immediate(0));
+  __ j(equal, &at_start);
   LoadCurrentCharacterUnchecked(-1, 1);  // Load previous char.
   __ jmp(&start_label_);
   __ bind(&at_start);
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 4903269..3bcc3ac 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -173,10 +173,9 @@
   // the frame in GetCode.
   static const int kInputStartMinusOne =
       kLastCalleeSaveRegister - kPointerSize;
-  static const int kAtStart = kInputStartMinusOne - kPointerSize;
 
   // First register address. Following registers are below it on the stack.
-  static const int kRegisterZero = kAtStart - kPointerSize;
+  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
 
   // Initial size of code buffer.
   static const size_t kRegExpCodeSize = 1024;
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 7cda181..529f47a 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -31,6 +31,7 @@
 #include "type-info.h"
 #include "register-allocator.h"
 #include "scopes.h"
+#include "codegen.h"
 
 namespace v8 {
 namespace internal {
@@ -98,23 +99,16 @@
     return register_locations_[num];
   }
 
-  int register_location(Register reg) {
-    return register_locations_[RegisterAllocator::ToNumber(reg)];
-  }
+  inline int register_location(Register reg);
 
-  void set_register_location(Register reg, int index) {
-    register_locations_[RegisterAllocator::ToNumber(reg)] = index;
-  }
+  inline void set_register_location(Register reg, int index);
 
   bool is_used(int num) {
     ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
     return register_locations_[num] != kIllegalIndex;
   }
 
-  bool is_used(Register reg) {
-    return register_locations_[RegisterAllocator::ToNumber(reg)]
-        != kIllegalIndex;
-  }
+  inline bool is_used(Register reg);
 
   // Add extra in-memory elements to the top of the frame to match an actual
   // frame (eg, the frame after an exception handler is pushed).  No code is
@@ -218,10 +212,7 @@
   void SetElementAt(int index, Result* value);
 
   // Set a frame element to a constant.  The index is frame-top relative.
-  void SetElementAt(int index, Handle<Object> value) {
-    Result temp(value);
-    SetElementAt(index, &temp);
-  }
+  inline void SetElementAt(int index, Handle<Object> value);
 
   void PushElementAt(int index) {
     PushFrameSlotAt(element_count() - index - 1);
@@ -302,10 +293,7 @@
 
   // Call stub given the number of arguments it expects on (and
   // removes from) the stack.
-  Result CallStub(CodeStub* stub, int arg_count) {
-    PrepareForCall(arg_count, arg_count);
-    return RawCallStub(stub);
-  }
+  inline Result CallStub(CodeStub* stub, int arg_count);
 
   // Call stub that takes a single argument passed in eax.  The
   // argument is given as a result which does not have to be eax or
@@ -446,8 +434,8 @@
   int register_locations_[RegisterAllocator::kNumRegisters];
 
   // The number of frame-allocated locals and parameters respectively.
-  int parameter_count() { return cgen()->scope()->num_parameters(); }
-  int local_count() { return cgen()->scope()->num_stack_slots(); }
+  inline int parameter_count();
+  inline int local_count();
 
   // The index of the element that is at the processor's frame pointer
   // (the ebp register).  The parameters, receiver, and return address