Merge from v8 at revision 3723
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 07da800..74547be 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -30,9 +30,9 @@
 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
 // OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 #include "v8.h"
 
@@ -1371,6 +1371,36 @@
 
 
 // Support for VFP.
+void Assembler::vldr(const DwVfpRegister dst,
+                     const Register base,
+                     int offset,
+                     const Condition cond) {
+  // Ddst = MEM(Rbase + offset).
+  // Instruction details available in ARM DDI 0406A, A8-628.
+  // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
+  // Vdst(15-12) | 1011(11-8) | offset
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(offset % 4 == 0);
+  emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
+       0xB*B8 | ((offset / 4) & 255));
+}
+
+
+void Assembler::vstr(const DwVfpRegister src,
+                     const Register base,
+                     int offset,
+                     const Condition cond) {
+  // MEM(Rbase + offset) = Dsrc.
+  // Instruction details available in ARM DDI 0406A, A8-786.
+  // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
+  // Vsrc(15-12) | 1011(11-8) | (offset/4)
+  ASSERT(CpuFeatures::IsEnabled(VFP3));
+  ASSERT(offset % 4 == 0);
+  emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
+       0xB*B8 | ((offset / 4) & 255));
+}
+
+
 void Assembler::vmov(const DwVfpRegister dst,
                      const Register src1,
                      const Register src2,
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index cd53dd6..8b65b7c 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -30,9 +30,9 @@
 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
 // OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// The original source code covered by the above license above has been modified
-// significantly by Google Inc.
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2010 the V8 project authors. All rights reserved.
 
 // A light-weight ARM Assembler
 // Generates user mode instructions for the ARM architecture up to version 5
@@ -796,6 +796,14 @@
   // However, some simple modifications can allow
   // these APIs to support D16 to D31.
 
+  void vldr(const DwVfpRegister dst,
+            const Register base,
+            int offset,  // Offset must be a multiple of 4.
+            const Condition cond = al);
+  void vstr(const DwVfpRegister src,
+            const Register base,
+            int offset,  // Offset must be a multiple of 4.
+            const Condition cond = al);
   void vmov(const DwVfpRegister dst,
             const Register src1,
             const Register src2,
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 0c1dbcc..38f08d1 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -605,14 +605,19 @@
 }
 
 
-Reference::Reference(CodeGenerator* cgen, Expression* expression)
-    : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+Reference::Reference(CodeGenerator* cgen,
+                     Expression* expression,
+                     bool persist_after_get)
+    : cgen_(cgen),
+      expression_(expression),
+      type_(ILLEGAL),
+      persist_after_get_(persist_after_get) {
   cgen->LoadReference(this);
 }
 
 
 Reference::~Reference() {
-  cgen_->UnloadReference(this);
+  ASSERT(is_unloaded() || is_illegal());
 }
 
 
@@ -661,6 +666,7 @@
     frame_->Drop(size);
     frame_->EmitPush(r0);
   }
+  ref->set_unloaded();
 }
 
 
@@ -1244,8 +1250,6 @@
       Reference target(this, node->proxy());
       LoadAndSpill(val);
       target.SetValue(NOT_CONST_INIT);
-      // The reference is removed from the stack (preserving TOS) when
-      // it goes out of scope.
     }
     // Get rid of the assigned value (declarations are statements).
     frame_->Drop();
@@ -1932,25 +1936,17 @@
       if (each.size() > 0) {
         __ ldr(r0, frame_->ElementAt(each.size()));
         frame_->EmitPush(r0);
-      }
-      // If the reference was to a slot we rely on the convenient property
-      // that it doesn't matter whether a value (eg, r3 pushed above) is
-      // right on top of or right underneath a zero-sized reference.
-      each.SetValue(NOT_CONST_INIT);
-      if (each.size() > 0) {
-        // It's safe to pop the value lying on top of the reference before
-        // unloading the reference itself (which preserves the top of stack,
-        // ie, now the topmost value of the non-zero sized reference), since
-        // we will discard the top of stack after unloading the reference
-        // anyway.
-        frame_->EmitPop(r0);
+        each.SetValue(NOT_CONST_INIT);
+        frame_->Drop(2);
+      } else {
+        // If the reference was to a slot we rely on the convenient property
+        // that it doesn't matter whether a value (eg, r3 pushed above) is
+        // right on top of or right underneath a zero-sized reference.
+        each.SetValue(NOT_CONST_INIT);
+        frame_->Drop();
       }
     }
   }
-  // Discard the i'th entry pushed above or else the remainder of the
-  // reference, whichever is currently on top of the stack.
-  frame_->Drop();
-
   // Body.
   CheckStack();  // TODO(1222600): ignore if body contains calls.
   VisitAndSpill(node->body());
@@ -2844,7 +2840,7 @@
   VirtualFrame::SpilledScope spilled_scope;
   Comment cmnt(masm_, "[ Assignment");
 
-  { Reference target(this, node->target());
+  { Reference target(this, node->target(), node->is_compound());
     if (target.is_illegal()) {
       // Fool the virtual frame into thinking that we left the assignment's
       // value on the frame.
@@ -2859,8 +2855,7 @@
         node->op() == Token::INIT_CONST) {
       LoadAndSpill(node->value());
 
-    } else {
-      // +=, *= and similar binary assignments.
+    } else {  // Assignment is a compound assignment.
       // Get the old value of the lhs.
       target.GetValueAndSpill();
       Literal* literal = node->value()->AsLiteral();
@@ -2881,13 +2876,12 @@
         frame_->EmitPush(r0);
       }
     }
-
     Variable* var = node->target()->AsVariableProxy()->AsVariable();
     if (var != NULL &&
         (var->mode() == Variable::CONST) &&
         node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
       // Assignment ignored - leave the value on the stack.
-
+      UnloadReference(&target);
     } else {
       CodeForSourcePosition(node->position());
       if (node->op() == Token::INIT_CONST) {
@@ -3097,16 +3091,20 @@
       // JavaScript example: 'array[index](1, 2, 3)'
       // -------------------------------------------
 
-      // Load the function to call from the property through a reference.
-      Reference ref(this, property);
-      ref.GetValueAndSpill();  // receiver
-
-      // Pass receiver to called function.
+      LoadAndSpill(property->obj());
+      LoadAndSpill(property->key());
+      EmitKeyedLoad(false);
+      frame_->Drop();  // key
+      // Put the function below the receiver.
       if (property->is_synthetic()) {
+        // Use the global receiver.
+        frame_->Drop();
+        frame_->EmitPush(r0);
         LoadGlobalReceiver(r0);
       } else {
-        __ ldr(r0, frame_->ElementAt(ref.size()));
-        frame_->EmitPush(r0);
+        frame_->EmitPop(r1);  // receiver
+        frame_->EmitPush(r0);  // function
+        frame_->EmitPush(r1);  // receiver
       }
 
       // Call the function.
@@ -3470,6 +3468,20 @@
 }
 
 
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+  VirtualFrame::SpilledScope spilled_scope;
+  ASSERT(args->length() == 1);
+  LoadAndSpill(args->at(0));
+  frame_->EmitPop(r0);
+  __ tst(r0, Operand(kSmiTagMask));
+  false_target()->Branch(eq);
+  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  cc_reg_ = ne;
+}
+
+
 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
   VirtualFrame::SpilledScope spilled_scope;
   ASSERT(args->length() == 0);
@@ -3562,7 +3574,8 @@
   Load(args->at(0));
   Load(args->at(1));
 
-  frame_->CallRuntime(Runtime::kStringCompare, 2);
+  StringCompareStub stub;
+  frame_->CallStub(&stub, 2);
   frame_->EmitPush(r0);
 }
 
@@ -3792,7 +3805,9 @@
      frame_->EmitPush(r0);
   }
 
-  { Reference target(this, node->expression());
+  // A constant reference is not saved to, so a constant reference is not a
+  // compound assignment reference.
+  { Reference target(this, node->expression(), !is_const);
     if (target.is_illegal()) {
       // Spoof the virtual frame to have the expected height (one higher
       // than on entry).
@@ -4253,6 +4268,16 @@
 }
 
 
+void CodeGenerator::EmitKeyedLoad(bool is_global) {
+  Comment cmnt(masm_, "[ Load from keyed Property");
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+  RelocInfo::Mode rmode = is_global
+                          ? RelocInfo::CODE_TARGET_CONTEXT
+                          : RelocInfo::CODE_TARGET;
+  frame_->CallCodeObject(ic, rmode, 0);
+}
+
+
 #ifdef DEBUG
 bool CodeGenerator::HasValidEntryRegisters() { return true; }
 #endif
@@ -4319,23 +4344,21 @@
     case KEYED: {
       // TODO(181): Implement inlined version of array indexing once
       // loop nesting is properly tracked on ARM.
-      VirtualFrame* frame = cgen_->frame();
-      Comment cmnt(masm, "[ Load from keyed Property");
       ASSERT(property != NULL);
-      Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       ASSERT(var == NULL || var->is_global());
-      RelocInfo::Mode rmode = (var == NULL)
-                            ? RelocInfo::CODE_TARGET
-                            : RelocInfo::CODE_TARGET_CONTEXT;
-      frame->CallCodeObject(ic, rmode, 0);
-      frame->EmitPush(r0);
+      cgen_->EmitKeyedLoad(var != NULL);
+      cgen_->frame()->EmitPush(r0);
       break;
     }
 
     default:
       UNREACHABLE();
   }
+
+  if (!persist_after_get_) {
+    cgen_->UnloadReference(this);
+  }
 }
 
 
@@ -4397,6 +4420,7 @@
     default:
       UNREACHABLE();
   }
+  cgen_->UnloadReference(this);
 }
 
 
@@ -4832,14 +4856,14 @@
                                     Label* lhs_not_nan,
                                     Label* slow,
                                     bool strict) {
-  Label lhs_is_smi;
+  Label rhs_is_smi;
   __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &lhs_is_smi);
+  __ b(eq, &rhs_is_smi);
 
-  // Rhs is a Smi.  Check whether the non-smi is a heap number.
+  // Lhs is a Smi.  Check whether the rhs is a heap number.
   __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
   if (strict) {
-    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // If rhs is not a number and lhs is a Smi then strict equality cannot
     // succeed.  Return non-equal (r0 is already not zero)
     __ mov(pc, Operand(lr), LeaveCC, ne);  // Return.
   } else {
@@ -4848,57 +4872,67 @@
     __ b(ne, slow);
   }
 
-  // Rhs is a smi, lhs is a number.
-  __ push(lr);
-
+  // Lhs (r1) is a smi, rhs (r0) is a number.
   if (CpuFeatures::IsSupported(VFP3)) {
+    // Convert lhs to a double in d7              .
     CpuFeatures::Scope scope(VFP3);
-    __ IntegerToDoubleConversionWithVFP3(r1, r3, r2);
+    __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+    __ vmov(s15, r7);
+    __ vcvt(d7, s15);
+    // Load the double from rhs, tagged HeapNumber r0, to d6.
+    __ sub(r7, r0, Operand(kHeapObjectTag));
+    __ vldr(d6, r7, HeapNumber::kValueOffset);
   } else {
+    __ push(lr);
+    // Convert lhs to a double in r2, r3.
     __ mov(r7, Operand(r1));
     ConvertToDoubleStub stub1(r3, r2, r7, r6);
     __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+    // Load rhs to a double in r0, r1.
+    __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+    __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ pop(lr);
   }
 
-
-  // r3 and r2 are rhs as double.
-  __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
-  __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
   // We now have both loaded as doubles but we can skip the lhs nan check
-  // since it's a Smi.
-  __ pop(lr);
+  // since it's a smi.
   __ jmp(lhs_not_nan);
 
-  __ bind(&lhs_is_smi);
-  // Lhs is a Smi.  Check whether the non-smi is a heap number.
+  __ bind(&rhs_is_smi);
+  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
   __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
   if (strict) {
-    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // If lhs is not a number and rhs is a smi then strict equality cannot
     // succeed.  Return non-equal.
     __ mov(r0, Operand(1), LeaveCC, ne);  // Non-zero indicates not equal.
     __ mov(pc, Operand(lr), LeaveCC, ne);  // Return.
   } else {
-    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
+    // Smi compared non-strictly with a non-smi non-heap-number.  Call
     // the runtime.
     __ b(ne, slow);
   }
 
-  // Lhs is a smi, rhs is a number.
-  // r0 is Smi and r1 is heap number.
-  __ push(lr);
-  __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
-  __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
-
+  // Rhs (r0) is a smi, lhs (r1) is a heap number.
   if (CpuFeatures::IsSupported(VFP3)) {
+    // Convert rhs to a double in d6              .
     CpuFeatures::Scope scope(VFP3);
-    __ IntegerToDoubleConversionWithVFP3(r0, r1, r0);
+    // Load the double from lhs, tagged HeapNumber r1, to d7.
+    __ sub(r7, r1, Operand(kHeapObjectTag));
+    __ vldr(d7, r7, HeapNumber::kValueOffset);
+    __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+    __ vmov(s13, r7);
+    __ vcvt(d6, s13);
   } else {
+    __ push(lr);
+    // Load lhs to a double in r2, r3.
+    __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+    __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    // Convert rhs to a double in r0, r1.
     __ mov(r7, Operand(r0));
     ConvertToDoubleStub stub2(r1, r0, r7, r6);
     __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+    __ pop(lr);
   }
-
-  __ pop(lr);
   // Fall through to both_loaded_as_doubles.
 }
 
@@ -5047,10 +5081,18 @@
 
   // Both are heap numbers.  Load them up then jump to the code we have
   // for that.
-  __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
-  __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
-  __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
-  __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+  if (CpuFeatures::IsSupported(VFP3)) {
+    CpuFeatures::Scope scope(VFP3);
+    __ sub(r7, r0, Operand(kHeapObjectTag));
+    __ vldr(d6, r7, HeapNumber::kValueOffset);
+    __ sub(r7, r1, Operand(kHeapObjectTag));
+    __ vldr(d7, r7, HeapNumber::kValueOffset);
+  } else {
+    __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+    __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+    __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+  }
   __ jmp(both_loaded_as_doubles);
 }
 
@@ -5075,8 +5117,9 @@
 }
 
 
-// On entry r0 and r1 are the things to be compared.  On exit r0 is 0,
-// positive or negative to indicate the result of the comparison.
+// On entry r0 (rhs) and r1 (lhs) are the values to be compared.
+// On exit r0 is 0, positive or negative to indicate the result of
+// the comparison.
 void CompareStub::Generate(MacroAssembler* masm) {
   Label slow;  // Call builtin.
   Label not_smis, both_loaded_as_doubles, lhs_not_nan;
@@ -5101,21 +5144,19 @@
   // 3) Fall through to both_loaded_as_doubles.
   // 4) Jump to lhs_not_nan.
   // In cases 3 and 4 we have found out we were dealing with a number-number
-  // comparison and the numbers have been loaded into r0, r1, r2, r3 as doubles.
+  // comparison.  If VFP3 is supported the double values of the numbers have
+  // been loaded into d7 and d6.  Otherwise, the double values have been loaded
+  // into r0, r1, r2, and r3.
   EmitSmiNonsmiComparison(masm, &lhs_not_nan, &slow, strict_);
 
   __ bind(&both_loaded_as_doubles);
-  // r0, r1, r2, r3 are the double representations of the right hand side
-  // and the left hand side.
-
+  // The arguments have been converted to doubles and stored in d6 and d7, if
+  // VFP3 is supported, or in r0, r1, r2, and r3.
   if (CpuFeatures::IsSupported(VFP3)) {
     __ bind(&lhs_not_nan);
     CpuFeatures::Scope scope(VFP3);
     Label no_nan;
     // ARMv7 VFP3 instructions to implement double precision comparison.
-    __ vmov(d6, r0, r1);
-    __ vmov(d7, r2, r3);
-
     __ vcmp(d7, d6);
     __ vmrs(pc);  // Move vector status bits to normal status bits.
     Label nan;
@@ -5154,6 +5195,7 @@
   }
 
   Label check_for_symbols;
+  Label flat_string_check;
   // Check for heap-number-heap-number comparison.  Can jump to slow case,
   // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
   // that case.  If the inputs are not doubles then jumps to check_for_symbols.
@@ -5161,7 +5203,7 @@
   EmitCheckForTwoHeapNumbers(masm,
                              &both_loaded_as_doubles,
                              &check_for_symbols,
-                             &slow);
+                             &flat_string_check);
 
   __ bind(&check_for_symbols);
   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
@@ -5169,10 +5211,27 @@
   if (cc_ == eq && !strict_) {
     // Either jumps to slow or returns the answer.  Assumes that r2 is the type
     // of r0 on entry.
-    EmitCheckForSymbols(masm, &slow);
+    EmitCheckForSymbols(masm, &flat_string_check);
   }
 
+  // Check for both being sequential ASCII strings, and inline if that is the
+  // case.
+  __ bind(&flat_string_check);
+
+  __ JumpIfNonSmisNotBothSequentialAsciiStrings(r0, r1, r2, r3, &slow);
+
+  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+                                                     r1,
+                                                     r0,
+                                                     r2,
+                                                     r3,
+                                                     r4,
+                                                     r5);
+  // Never falls through to here.
+
   __ bind(&slow);
+
   __ push(r1);
   __ push(r0);
   // Figure out which native to call and setup the arguments.
@@ -5239,10 +5298,18 @@
   // The new heap number is in r5.  r6 and r7 are scratch.
   AllocateHeapNumber(masm, &slow, r5, r6, r7);
 
-  if (CpuFeatures::IsSupported(VFP3)) {
+  // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
+  // using registers d7 and d6 for the double values.
+  bool use_fp_registers = CpuFeatures::IsSupported(VFP3) &&
+      Token::MOD != operation;
+  if (use_fp_registers) {
     CpuFeatures::Scope scope(VFP3);
-    __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
-    __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+    __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+    __ vmov(s15, r7);
+    __ vcvt(d7, s15);
+    __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+    __ vmov(s13, r7);
+    __ vcvt(d6, s13);
   } else {
     // Write Smi from r0 to r3 and r2 in double format.  r6 is scratch.
     __ mov(r7, Operand(r0));
@@ -5324,9 +5391,16 @@
   if (mode == OVERWRITE_RIGHT) {
     __ mov(r5, Operand(r0));  // Overwrite this heap number.
   }
-  // Calling convention says that second double is in r2 and r3.
-  __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
-  __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+  if (use_fp_registers) {
+    CpuFeatures::Scope scope(VFP3);
+    // Load the double from tagged HeapNumber r0 to d7.
+    __ sub(r7, r0, Operand(kHeapObjectTag));
+    __ vldr(d7, r7, HeapNumber::kValueOffset);
+  } else {
+    // Calling convention says that second double is in r2 and r3.
+    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + 4));
+  }
   __ jmp(&finished_loading_r0);
   __ bind(&r0_is_smi);
   if (mode == OVERWRITE_RIGHT) {
@@ -5334,10 +5408,12 @@
     AllocateHeapNumber(masm, &slow, r5, r6, r7);
   }
 
-
-  if (CpuFeatures::IsSupported(VFP3)) {
+  if (use_fp_registers) {
     CpuFeatures::Scope scope(VFP3);
-    __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+    // Convert smi in r0 to double in d7.
+    __ mov(r7, Operand(r0, ASR, kSmiTagSize));
+    __ vmov(s15, r7);
+    __ vcvt(d7, s15);
   } else {
     // Write Smi from r0 to r3 and r2 in double format.
     __ mov(r7, Operand(r0));
@@ -5357,9 +5433,16 @@
   if (mode == OVERWRITE_LEFT) {
     __ mov(r5, Operand(r1));  // Overwrite this heap number.
   }
-  // Calling convention says that first double is in r0 and r1.
-  __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
-  __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+  if (use_fp_registers) {
+    CpuFeatures::Scope scope(VFP3);
+    // Load the double from tagged HeapNumber r1 to d6.
+    __ sub(r7, r1, Operand(kHeapObjectTag));
+    __ vldr(d6, r7, HeapNumber::kValueOffset);
+  } else {
+    // Calling convention says that first double is in r0 and r1.
+    __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + 4));
+  }
   __ jmp(&finished_loading_r1);
   __ bind(&r1_is_smi);
   if (mode == OVERWRITE_LEFT) {
@@ -5367,9 +5450,12 @@
     AllocateHeapNumber(masm, &slow, r5, r6, r7);
   }
 
-  if (CpuFeatures::IsSupported(VFP3)) {
+  if (use_fp_registers) {
     CpuFeatures::Scope scope(VFP3);
-    __ IntegerToDoubleConversionWithVFP3(r1, r1, r0);
+    // Convert smi in r1 to double in d6.
+    __ mov(r7, Operand(r1, ASR, kSmiTagSize));
+    __ vmov(s13, r7);
+    __ vcvt(d6, s13);
   } else {
     // Write Smi from r1 to r1 and r0 in double format.
     __ mov(r7, Operand(r1));
@@ -5382,22 +5468,12 @@
   __ bind(&finished_loading_r1);
 
   __ bind(&do_the_call);
-  // r0: Left value (least significant part of mantissa).
-  // r1: Left value (sign, exponent, top of mantissa).
-  // r2: Right value (least significant part of mantissa).
-  // r3: Right value (sign, exponent, top of mantissa).
-  // r5: Address of heap number for result.
-
-  if (CpuFeatures::IsSupported(VFP3) &&
-      ((Token::MUL == operation) ||
-       (Token::DIV == operation) ||
-       (Token::ADD == operation) ||
-       (Token::SUB == operation))) {
+  // If we are inlining the operation using VFP3 instructions for
+  // add, subtract, multiply, or divide, the arguments are in d6 and d7.
+  if (use_fp_registers) {
     CpuFeatures::Scope scope(VFP3);
     // ARMv7 VFP3 instructions to implement
     // double precision, add, subtract, multiply, divide.
-    __ vmov(d6, r0, r1);
-    __ vmov(d7, r2, r3);
 
     if (Token::MUL == operation) {
       __ vmul(d5, d6, d7);
@@ -5410,15 +5486,20 @@
     } else {
       UNREACHABLE();
     }
-
-    __ vmov(r0, r1, d5);
-
-    __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
-    __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4));
-    __ mov(r0, Operand(r5));
+    __ sub(r0, r5, Operand(kHeapObjectTag));
+    __ vstr(d5, r0, HeapNumber::kValueOffset);
+    __ add(r0, r0, Operand(kHeapObjectTag));
     __ mov(pc, lr);
     return;
   }
+
+  // If we did not inline the operation, then the arguments are in:
+  // r0: Left value (least significant part of mantissa).
+  // r1: Left value (sign, exponent, top of mantissa).
+  // r2: Right value (least significant part of mantissa).
+  // r3: Right value (sign, exponent, top of mantissa).
+  // r5: Address of heap number for result.
+
   __ push(lr);   // For later.
   __ push(r5);   // Address of heap number that is answer.
   __ AlignStack(0);
@@ -6723,6 +6804,101 @@
 }
 
 
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+                                                        Register left,
+                                                        Register right,
+                                                        Register scratch1,
+                                                        Register scratch2,
+                                                        Register scratch3,
+                                                        Register scratch4) {
+  Label compare_lengths;
+  // Find minimum length and length difference.
+  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
+  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
+  Register length_delta = scratch3;
+  __ mov(scratch1, scratch2, LeaveCC, gt);
+  Register min_length = scratch1;
+  __ tst(min_length, Operand(min_length));
+  __ b(eq, &compare_lengths);
+
+  // Setup registers so that we only need to increment one register
+  // in the loop.
+  __ add(scratch2, min_length,
+         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+  __ add(left, left, Operand(scratch2));
+  __ add(right, right, Operand(scratch2));
+  // Registers left and right points to the min_length character of strings.
+  __ rsb(min_length, min_length, Operand(-1));
+  Register index = min_length;
+  // Index starts at -min_length.
+
+  {
+    // Compare loop.
+    Label loop;
+    __ bind(&loop);
+    // Compare characters.
+    __ add(index, index, Operand(1), SetCC);
+    __ ldrb(scratch2, MemOperand(left, index), ne);
+    __ ldrb(scratch4, MemOperand(right, index), ne);
+    // Skip to compare lengths with eq condition true.
+    __ b(eq, &compare_lengths);
+    __ cmp(scratch2, scratch4);
+    __ b(eq, &loop);
+    // Fallthrough with eq condition false.
+  }
+  // Compare lengths -  strings up to min-length are equal.
+  __ bind(&compare_lengths);
+  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  // Use zero length_delta as result.
+  __ mov(r0, Operand(length_delta), SetCC, eq);
+  // Fall through to here if characters compare not-equal.
+  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
+  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
+  __ Ret();
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  // Stack frame on entry.
+  //  sp[0]: return address
+  //  sp[4]: right string
+  //  sp[8]: left string
+
+  __ ldr(r0, MemOperand(sp, 2 * kPointerSize));  // left
+  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));  // right
+
+  Label not_same;
+  __ cmp(r0, r1);
+  __ b(ne, &not_same);
+  ASSERT_EQ(0, EQUAL);
+  ASSERT_EQ(0, kSmiTag);
+  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
+  __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&not_same);
+
+  // Check that both objects are sequential ascii strings.
+  __ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
+
+  // Compare flat ascii strings natively. Remove arguments from stack first.
+  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
+  __ add(sp, sp, Operand(2 * kPointerSize));
+  GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
+
+  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ bind(&runtime);
+  __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+}
+
+
 #undef __
 
 } }  // namespace v8::internal
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index f5de0eb..ccca2e9 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -43,57 +43,69 @@
 // -------------------------------------------------------------------------
 // Reference support
 
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
+// A reference is a C++ stack-allocated object that puts a
+// reference on the virtual frame.  The reference may be consumed
+// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
+// When the lifetime (scope) of a valid reference ends, it must have
+// been consumed, and be in state UNLOADED.
 class Reference BASE_EMBEDDED {
  public:
   // The values of the types is important, see size().
-  enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
-  Reference(CodeGenerator* cgen, Expression* expression);
+  enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+  Reference(CodeGenerator* cgen,
+            Expression* expression,
+            bool persist_after_get = false);
   ~Reference();
 
   Expression* expression() const { return expression_; }
   Type type() const { return type_; }
   void set_type(Type value) {
-    ASSERT(type_ == ILLEGAL);
+    ASSERT_EQ(ILLEGAL, type_);
     type_ = value;
   }
 
+  void set_unloaded() {
+    ASSERT_NE(ILLEGAL, type_);
+    ASSERT_NE(UNLOADED, type_);
+    type_ = UNLOADED;
+  }
   // The size the reference takes up on the stack.
-  int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+  int size() const {
+    return (type_ < SLOT) ? 0 : type_;
+  }
 
   bool is_illegal() const { return type_ == ILLEGAL; }
   bool is_slot() const { return type_ == SLOT; }
   bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+  bool is_unloaded() const { return type_ == UNLOADED; }
 
   // Return the name.  Only valid for named property references.
   Handle<String> GetName();
 
   // Generate code to push the value of the reference on top of the
   // expression stack.  The reference is expected to be already on top of
-  // the expression stack, and it is left in place with its value above it.
+  // the expression stack, and it is consumed by the call unless the
+  // reference is for a compound assignment.
+  // If the reference is not consumed, it is left in place under its value.
   void GetValue();
 
-  // Generate code to push the value of a reference on top of the expression
-  // stack and then spill the stack frame.  This function is used temporarily
-  // while the code generator is being transformed.
+  // Generate code to pop a reference, push the value of the reference,
+  // and then spill the stack frame.
   inline void GetValueAndSpill();
 
   // Generate code to store the value on top of the expression stack in the
   // reference.  The reference is expected to be immediately below the value
-  // on the expression stack.  The stored value is left in place (with the
-  // reference intact below it) to support chained assignments.
+  // on the expression stack.  The  value is stored in the location specified
+  // by the reference, and is left on top of the stack, after the reference
+  // is popped from beneath it (unloaded).
   void SetValue(InitState init_state);
 
  private:
   CodeGenerator* cgen_;
   Expression* expression_;
   Type type_;
+  // Keep the reference on the stack after get, so it can be used by set later.
+  bool persist_after_get_;
 };
 
 
@@ -274,6 +286,9 @@
   void LoadFromSlot(Slot* slot, TypeofState typeof_state);
   // Store the value on top of the stack to a slot.
   void StoreToSlot(Slot* slot, InitState init_state);
+  // Load a keyed property, leaving it in r0.  The receiver and key are
+  // passed on the stack, and remain there.
+  void EmitKeyedLoad(bool is_global);
 
   void LoadFromGlobalSlotCheckExtensions(Slot* slot,
                                          TypeofState typeof_state,
@@ -341,6 +356,7 @@
   void GenerateIsArray(ZoneList<Expression*>* args);
   void GenerateIsObject(ZoneList<Expression*>* args);
   void GenerateIsFunction(ZoneList<Expression*>* args);
+  void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
 
   // Support for construct call checks.
   void GenerateIsConstructCall(ZoneList<Expression*>* args);
@@ -427,8 +443,8 @@
   friend class VirtualFrame;
   friend class JumpTarget;
   friend class Reference;
-  friend class FastCodeGenerator;
-  friend class CodeGenSelector;
+  friend class FullCodeGenerator;
+  friend class FullCodeGenSyntaxChecker;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
@@ -511,6 +527,28 @@
 };
 
 
+class StringCompareStub: public CodeStub {
+ public:
+  StringCompareStub() { }
+
+  // Compare two flat ASCII strings and returns result in r0.
+  // Does not use the stack.
+  static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+                                              Register left,
+                                              Register right,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3,
+                                              Register scratch4);
+
+ private:
+  Major MajorKey() { return StringCompare; }
+  int MinorKey() { return 0; }
+
+  void Generate(MacroAssembler* masm);
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 9432207..8a32c95 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -237,6 +237,7 @@
   inline int RnField() const { return Bits(19, 16); }
   inline int RdField() const { return Bits(15, 12); }
 
+  inline int CoprocessorField() const { return Bits(11, 8); }
   // Support for VFP.
   // Vn(19-16) | Vd(15-12) |  Vm(3-0)
   inline int VnField() const { return Bits(19, 16); }
@@ -246,6 +247,8 @@
   inline int MField() const { return Bit(5); }
   inline int DField() const { return Bit(22); }
   inline int RtField() const { return Bits(15, 12); }
+  inline int PField() const { return Bit(24); }
+  inline int UField() const { return Bit(23); }
 
   // Fields used in Data processing instructions
   inline Opcode OpcodeField() const {
@@ -296,6 +299,7 @@
   inline bool HasB()    const { return BField() == 1; }
   inline bool HasW()    const { return WField() == 1; }
   inline bool HasL()    const { return LField() == 1; }
+  inline bool HasU()    const { return UField() == 1; }
   inline bool HasSign() const { return SignField() == 1; }
   inline bool HasH()    const { return HField() == 1; }
   inline bool HasLink() const { return LinkField() == 1; }
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index afed0fa..5b31455 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2007-2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -998,29 +998,43 @@
 // Decode Type 6 coprocessor instructions.
 // Dm = vmov(Rt, Rt2)
 // <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
 void Decoder::DecodeType6CoprocessorIns(Instr* instr) {
   ASSERT((instr->TypeField() == 6));
 
-  if (instr->Bit(23) == 1) {
-     Unknown(instr);  // Not used by V8.
-  } else if (instr->Bit(22) == 1) {
-    if ((instr->Bits(27, 24) == 0xC) &&
-        (instr->Bit(22) == 1) &&
-        (instr->Bits(11, 8) == 0xB) &&
-        (instr->Bits(7, 6) == 0x0) &&
-        (instr->Bit(4) == 1)) {
-      if (instr->Bit(20) == 0) {
-        Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
-      } else if (instr->Bit(20) == 1) {
-        Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
-      }
-    } else {
-      Unknown(instr);  // Not used by V8.
-    }
-  } else if (instr->Bit(21) == 1) {
+  if (instr->CoprocessorField() != 0xB) {
     Unknown(instr);  // Not used by V8.
   } else {
-    Unknown(instr);  // Not used by V8.
+    switch (instr->OpcodeField()) {
+      case 0x2:
+        // Load and store double to two GP registers
+        if (instr->Bits(7, 4) != 0x1) {
+          Unknown(instr);  // Not used by V8.
+        } else if (instr->HasL()) {
+          Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
+        } else {
+          Format(instr, "vmov'cond 'Dm, 'rt, 'rn");
+        }
+        break;
+      case 0x8:
+        if (instr->HasL()) {
+          Format(instr, "vldr'cond 'Dd, ['rn - 4*'off8]");
+        } else {
+          Format(instr, "vstr'cond 'Dd, ['rn - 4*'off8]");
+        }
+        break;
+      case 0xC:
+        if (instr->HasL()) {
+          Format(instr, "vldr'cond 'Dd, ['rn + 4*'off8]");
+        } else {
+          Format(instr, "vstr'cond 'Dd, ['rn + 4*'off8]");
+        }
+        break;
+      default:
+        Unknown(instr);  // Not used by V8.
+        break;
+    }
   }
 }
 
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/full-codegen-arm.cc
similarity index 90%
rename from src/arm/fast-codegen-arm.cc
rename to src/arm/full-codegen-arm.cc
index 4256e47..8d1cfeb 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -30,7 +30,7 @@
 #include "codegen-inl.h"
 #include "compiler.h"
 #include "debug.h"
-#include "fast-codegen.h"
+#include "full-codegen.h"
 #include "parser.h"
 
 namespace v8 {
@@ -52,7 +52,7 @@
 //
 // The function builds a JS frame.  Please see JavaScriptFrameConstants in
 // frames-arm.h for its layout.
-void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+void FullCodeGenerator::Generate(FunctionLiteral* fun) {
   function_ = fun;
   SetFunctionPosition(fun);
   int locals_count = fun->scope()->num_stack_slots();
@@ -167,7 +167,7 @@
 }
 
 
-void FastCodeGenerator::EmitReturnSequence(int position) {
+void FullCodeGenerator::EmitReturnSequence(int position) {
   Comment cmnt(masm_, "[ Return sequence");
   if (return_label_.is_bound()) {
     __ b(&return_label_);
@@ -214,7 +214,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -250,7 +250,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Slot* slot) {
+void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -270,7 +270,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -290,7 +290,7 @@
 }
 
 
-void FastCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::ApplyTOS(Expression::Context context) {
   switch (context) {
     case Expression::kUninitialized:
       UNREACHABLE();
@@ -323,7 +323,7 @@
 }
 
 
-void FastCodeGenerator::DropAndApply(int count,
+void FullCodeGenerator::DropAndApply(int count,
                                      Expression::Context context,
                                      Register reg) {
   ASSERT(count > 0);
@@ -371,7 +371,7 @@
 }
 
 
-void FastCodeGenerator::Apply(Expression::Context context,
+void FullCodeGenerator::Apply(Expression::Context context,
                               Label* materialize_true,
                               Label* materialize_false) {
   switch (context) {
@@ -432,7 +432,7 @@
 }
 
 
-void FastCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::DoTest(Expression::Context context) {
   // The value to test is pushed on the stack, and duplicated on the stack
   // if necessary (for value/test and test/value contexts).
   ASSERT_NE(NULL, true_label_);
@@ -495,7 +495,7 @@
 }
 
 
-MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
   switch (slot->type()) {
     case Slot::PARAMETER:
     case Slot::LOCAL:
@@ -514,14 +514,14 @@
 }
 
 
-void FastCodeGenerator::Move(Register destination, Slot* source) {
+void FullCodeGenerator::Move(Register destination, Slot* source) {
   // Use destination as scratch.
   MemOperand slot_operand = EmitSlotSearch(source, destination);
   __ ldr(destination, slot_operand);
 }
 
 
-void FastCodeGenerator::Move(Slot* dst,
+void FullCodeGenerator::Move(Slot* dst,
                              Register src,
                              Register scratch1,
                              Register scratch2) {
@@ -537,7 +537,7 @@
 }
 
 
-void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
   Comment cmnt(masm_, "[ Declaration");
   Variable* var = decl->proxy()->var();
   ASSERT(var != NULL);  // Must have been resolved.
@@ -637,7 +637,7 @@
 }
 
 
-void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
   __ mov(r1, Operand(pairs));
@@ -648,7 +648,7 @@
 }
 
 
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function boilerplate and instantiate it.
@@ -666,17 +666,21 @@
 }
 
 
-void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
   EmitVariableLoad(expr->var(), context_);
 }
 
 
-void FastCodeGenerator::EmitVariableLoad(Variable* var,
+void FullCodeGenerator::EmitVariableLoad(Variable* var,
                                          Expression::Context context) {
-  Expression* rewrite = var->rewrite();
-  if (rewrite == NULL) {
-    ASSERT(var->is_global());
+  // Four cases: non-this global variables, lookup slots, all other
+  // types of slots, and parameters that rewrite to explicit property
+  // accesses on the arguments object.
+  Slot* slot = var->slot();
+  Property* property = var->AsProperty();
+
+  if (var->is_global() && !var->is_this()) {
     Comment cmnt(masm_, "Global variable");
     // Use inline caching. Variable name is passed in r2 and the global
     // object on the stack.
@@ -686,34 +690,24 @@
     Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
     __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
     DropAndApply(1, context, r0);
-  } else if (rewrite->AsSlot() != NULL) {
-    Slot* slot = rewrite->AsSlot();
-    if (FLAG_debug_code) {
-      switch (slot->type()) {
-        case Slot::PARAMETER:
-        case Slot::LOCAL: {
-          Comment cmnt(masm_, "Stack slot");
-          break;
-        }
-        case Slot::CONTEXT: {
-          Comment cmnt(masm_, "Context slot");
-          break;
-        }
-        case Slot::LOOKUP:
-          UNIMPLEMENTED();
-          break;
-      }
-    }
-    Apply(context, slot);
-  } else {
-    Comment cmnt(masm_, "Variable rewritten to property");
-    // A variable has been rewritten into an explicit access to an object
-    // property.
-    Property* property = rewrite->AsProperty();
-    ASSERT_NOT_NULL(property);
 
-    // The only property expressions that can occur are of the form
-    // "slot[literal]".
+  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    Comment cmnt(masm_, "Lookup slot");
+    __ mov(r1, Operand(var->name()));
+    __ stm(db_w, sp, cp.bit() | r1.bit());  // Context and name.
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    Apply(context, r0);
+
+  } else if (slot != NULL) {
+    Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
+                            ? "Context slot"
+                            : "Stack slot");
+    Apply(context, slot);
+
+  } else {
+    Comment cmnt(masm_, "Rewritten parameter");
+    ASSERT_NOT_NULL(property);
+    // Rewritten parameter accesses are of the form "slot[literal]".
 
     // Assert that the object is in a slot.
     Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
@@ -745,7 +739,7 @@
 }
 
 
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
   Comment cmnt(masm_, "[ RegExpLiteral");
   Label done;
   // Registers will be used as follows:
@@ -772,7 +766,7 @@
 }
 
 
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
   __ ldr(r2, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
@@ -847,7 +841,7 @@
 }
 
 
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
@@ -900,7 +894,7 @@
 }
 
 
-void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
   __ mov(r2, Operand(key->handle()));
@@ -909,14 +903,14 @@
 }
 
 
-void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
   __ Call(ic, RelocInfo::CODE_TARGET);
 }
 
 
-void FastCodeGenerator::EmitBinaryOp(Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(Token::Value op,
                                      Expression::Context context) {
   __ pop(r1);
   GenericBinaryOpStub stub(op, NO_OVERWRITE);
@@ -925,11 +919,17 @@
 }
 
 
-void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Expression::Context context) {
+  // Three main cases: global variables, lookup slots, and all other
+  // types of slots.  Left-hand-side parameters that rewrite to
+  // explicit property accesses do not reach here.
   ASSERT(var != NULL);
   ASSERT(var->is_global() || var->slot() != NULL);
+
+  Slot* slot = var->slot();
   if (var->is_global()) {
+    ASSERT(!var->is_this());
     // Assignment to a global variable.  Use inline caching for the
     // assignment.  Right-hand-side value is passed in r0, variable name in
     // r2, and the global object on the stack.
@@ -941,6 +941,13 @@
     // Overwrite the global object on the stack with the result if needed.
     DropAndApply(1, context, r0);
 
+  } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+    __ push(result_register());  // Value.
+    __ mov(r1, Operand(var->name()));
+    __ stm(db_w, sp, cp.bit() | r1.bit());  // Context and name.
+    __ CallRuntime(Runtime::kStoreContextSlot, 3);
+    Apply(context, r0);
+
   } else if (var->slot() != NULL) {
     Slot* slot = var->slot();
     switch (slot->type()) {
@@ -967,6 +974,7 @@
         break;
     }
     Apply(context, result_register());
+
   } else {
     // Variables rewritten as properties are not treated as variables in
     // assignments.
@@ -975,7 +983,7 @@
 }
 
 
-void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a named store IC.
   Property* prop = expr->target()->AsProperty();
   ASSERT(prop != NULL);
@@ -1011,7 +1019,7 @@
 }
 
 
-void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
 
   // If the assignment starts a block of assignments to the same object,
@@ -1046,7 +1054,7 @@
 }
 
 
-void FastCodeGenerator::VisitProperty(Property* expr) {
+void FullCodeGenerator::VisitProperty(Property* expr) {
   Comment cmnt(masm_, "[ Property");
   Expression* key = expr->key();
 
@@ -1065,7 +1073,7 @@
   }
 }
 
-void FastCodeGenerator::EmitCallWithIC(Call* expr,
+void FullCodeGenerator::EmitCallWithIC(Call* expr,
                                        Handle<Object> ignored,
                                        RelocInfo::Mode mode) {
   // Code common for calls using the IC.
@@ -1087,7 +1095,7 @@
 }
 
 
-void FastCodeGenerator::EmitCallWithStub(Call* expr) {
+void FullCodeGenerator::EmitCallWithStub(Call* expr) {
   // Code common for calls using the call stub.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -1105,7 +1113,7 @@
 }
 
 
-void FastCodeGenerator::VisitCall(Call* expr) {
+void FullCodeGenerator::VisitCall(Call* expr) {
   Comment cmnt(masm_, "[ Call");
   Expression* fun = expr->expression();
   Variable* var = fun->AsVariableProxy()->AsVariable();
@@ -1163,7 +1171,7 @@
     if (lit != NULL &&
         lit->name()->Equals(Heap::empty_string()) &&
         loop_depth() == 0) {
-      lit->set_try_fast_codegen(true);
+      lit->set_try_full_codegen(true);
     }
     VisitForValue(fun, kStack);
     // Load global receiver object.
@@ -1176,7 +1184,7 @@
 }
 
 
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
   // expression in new calls must be evaluated before the
@@ -1211,7 +1219,7 @@
 }
 
 
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   Comment cmnt(masm_, "[ CallRuntime");
   ZoneList<Expression*>* args = expr->arguments();
 
@@ -1246,7 +1254,7 @@
 }
 
 
-void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
@@ -1351,13 +1359,26 @@
       break;
     }
 
+    case Token::ADD: {
+      Comment cmt(masm_, "[ UnaryOperation (ADD)");
+      VisitForValue(expr->expression(), kAccumulator);
+      Label no_conversion;
+      __ tst(result_register(), Operand(kSmiTagMask));
+      __ b(eq, &no_conversion);
+      __ push(r0);
+      __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+      __ bind(&no_conversion);
+      Apply(context_, result_register());
+      break;
+    }
+
     default:
       UNREACHABLE();
   }
 }
 
 
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
 
   // Expression can only be a property, a global or a (parameter or local)
@@ -1376,7 +1397,7 @@
   if (assign_type == VARIABLE) {
     ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
     Location saved_location = location_;
-    location_ = kStack;
+    location_ = kAccumulator;
     EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
                      Expression::kValue);
     location_ = saved_location;
@@ -1393,11 +1414,15 @@
       VisitForValue(prop->key(), kStack);
       EmitKeyedPropertyLoad(prop);
     }
-    __ push(r0);
   }
 
-  // Convert to number.
+  // Call ToNumber only if operand is not a smi.
+  Label no_conversion;
+  __ tst(r0, Operand(kSmiTagMask));
+  __ b(eq, &no_conversion);
+  __ push(r0);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
+  __ bind(&no_conversion);
 
   // Save result for postfix expressions.
   if (expr->is_postfix()) {
@@ -1429,12 +1454,28 @@
     }
   }
 
-  // Call stub for +1/-1.
+
+  // Inline smi case if we are in a loop.
+  Label stub_call, done;
+  if (loop_depth() > 0) {
+    __ add(r0, r0, Operand(expr->op() == Token::INC
+                           ? Smi::FromInt(1)
+                           : Smi::FromInt(-1)));
+    __ b(vs, &stub_call);
+    // We could eliminate this smi check if we split the code at
+    // the first smi check before calling ToNumber.
+    __ tst(r0, Operand(kSmiTagMask));
+    __ b(eq, &done);
+    __ bind(&stub_call);
+    // Call stub. Undo operation first.
+    __ sub(r0, r0, Operand(r1));
+  }
   __ mov(r1, Operand(expr->op() == Token::INC
                      ? Smi::FromInt(1)
                      : Smi::FromInt(-1)));
   GenericBinaryOpStub stub(Token::ADD, NO_OVERWRITE);
   __ CallStub(&stub);
+  __ bind(&done);
 
   // Store the value returned in r0.
   switch (assign_type) {
@@ -1483,7 +1524,7 @@
 }
 
 
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
   Comment cmnt(masm_, "[ BinaryOperation");
   switch (expr->op()) {
     case Token::COMMA:
@@ -1518,7 +1559,7 @@
 }
 
 
-void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Always perform the comparison for its control flow.  Pack the result
@@ -1633,25 +1674,25 @@
 }
 
 
-void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
   __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   Apply(context_, r0);
 }
 
 
-Register FastCodeGenerator::result_register() { return r0; }
+Register FullCodeGenerator::result_register() { return r0; }
 
 
-Register FastCodeGenerator::context_register() { return cp; }
+Register FullCodeGenerator::context_register() { return cp; }
 
 
-void FastCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
   ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ str(value, MemOperand(fp, frame_offset));
 }
 
 
-void FastCodeGenerator::LoadContextField(Register dst, int context_index) {
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
   __ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
 }
 
@@ -1659,7 +1700,7 @@
 // ----------------------------------------------------------------------------
 // Non-local control flow support.
 
-void FastCodeGenerator::EnterFinallyBlock() {
+void FullCodeGenerator::EnterFinallyBlock() {
   ASSERT(!result_register().is(r1));
   // Store result register while executing finally block.
   __ push(result_register());
@@ -1672,7 +1713,7 @@
 }
 
 
-void FastCodeGenerator::ExitFinallyBlock() {
+void FullCodeGenerator::ExitFinallyBlock() {
   ASSERT(!result_register().is(r1));
   // Restore result register from stack.
   __ pop(r1);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index a1f2613..b59c3f0 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -569,11 +569,10 @@
 
   // Get the map of the receiver.
   __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.  We need
-  // to check this explicitly since this generic stub does not perform
-  // map checks.
+
+  // Check bit field.
   __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
-  __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+  __ tst(r3, Operand(kSlowCaseBitFieldMask));
   __ b(ne, &slow);
   // Check that the object is some kind of JS object EXCEPT JS Value type.
   // In the case that the object is a value-wrapper object,
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 18cadac..6c3bbbb 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1221,6 +1221,46 @@
 }
 
 
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
+    Register first,
+    Register second,
+    Register scratch1,
+    Register scratch2,
+    Label* failure) {
+  // Test that both first and second are sequential ASCII strings.
+  // Assume that they are non-smis.
+  ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+  ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+  ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+  int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+  and_(scratch1, scratch1, Operand(kFlatAsciiStringMask));
+  and_(scratch2, scratch2, Operand(kFlatAsciiStringMask));
+  cmp(scratch1, Operand(kFlatAsciiStringTag));
+  // Ignore second test if first test failed.
+  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
+  b(ne, failure);
+}
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
+                                                         Register second,
+                                                         Register scratch1,
+                                                         Register scratch2,
+                                                         Label* failure) {
+  // Check that neither is a smi.
+  ASSERT_EQ(0, kSmiTag);
+  and_(scratch1, first, Operand(second));
+  tst(scratch1, Operand(kSmiTagMask));
+  b(eq, failure);
+  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
+                                             second,
+                                             scratch1,
+                                             scratch2,
+                                             failure);
+}
+
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
 CodePatcher::CodePatcher(byte* address, int instructions)
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 8f2064a..efc5bfa 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -337,6 +337,25 @@
   void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
   bool allow_stub_calls() { return allow_stub_calls_; }
 
+  // ---------------------------------------------------------------------------
+  // String utilities
+
+  // Checks if both objects are sequential ASCII strings and jumps to label
+  // if either is not. Assumes that neither object is a smi.
+  void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
+                                                  Register object2,
+                                                  Register scratch1,
+                                                  Register scratch2,
+                                                  Label *failure);
+
+  // Checks if both objects are sequential ASCII strings and jumps to label
+  // if either is not.
+  void JumpIfNotBothSequentialAsciiStrings(Register first,
+                                           Register second,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           Label* not_flat_ascii_strings);
+
  private:
   List<Unresolved> unresolved_;
   bool generating_stub_;
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index ed06eb2..9dd3b93 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -63,8 +63,6 @@
  *                             through the runtime system)
  *       - stack_area_base    (High end of the memory area to use as
  *                             backtracking stack)
- *       - at_start           (if 1, we are starting at the start of the
- *                             string, otherwise 0)
  *       - int* capture_array (int[num_saved_registers_], for output).
  *       --- sp when called ---
  *       - link address
@@ -76,6 +74,8 @@
  *       - void* input_string (location of a handle containing the string)
  *       - Offset of location before start of input (effectively character
  *         position -1). Used to initialize capture registers to a non-position.
+ *       - At start (if 1, we are starting at the start of the
+ *         string, otherwise 0)
  *       - register 0         (Only positions must be stored in the first
  *       - register 1          num_saved_registers_ registers)
  *       - ...
@@ -610,6 +610,7 @@
   // Set frame pointer just above the arguments.
   __ add(frame_pointer(), sp, Operand(4 * kPointerSize));
   __ push(r0);  // Make room for "position - 1" constant (value is irrelevant).
+  __ push(r0);  // Make room for "at start" constant (value is irrelevant).
 
   // Check if we have space on the stack for registers.
   Label stack_limit_hit;
@@ -653,6 +654,15 @@
   // Store this value in a local variable, for use when clearing
   // position registers.
   __ str(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+  // Determine whether the start index is zero, that is at the start of the
+  // string, and store that value in a local variable.
+  __ ldr(r1, MemOperand(frame_pointer(), kStartIndex));
+  __ tst(r1, Operand(r1));
+  __ mov(r1, Operand(1), LeaveCC, eq);
+  __ mov(r1, Operand(0), LeaveCC, ne);
+  __ str(r1, MemOperand(frame_pointer(), kAtStart));
+
   if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
     // Fill saved registers with initial value = start offset - 1
 
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 4459859..7de5f93 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -123,8 +123,7 @@
   static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
   // Stack parameters placed by caller.
   static const int kRegisterOutput = kReturnAddress + kPointerSize;
-  static const int kAtStart = kRegisterOutput + kPointerSize;
-  static const int kStackHighEnd = kAtStart + kPointerSize;
+  static const int kStackHighEnd = kRegisterOutput + kPointerSize;
   static const int kDirectCall = kStackHighEnd + kPointerSize;
 
   // Below the frame pointer.
@@ -136,8 +135,9 @@
   // When adding local variables remember to push space for them in
   // the frame in GetCode.
   static const int kInputStartMinusOne = kInputString - kPointerSize;
+  static const int kAtStart = kInputStartMinusOne - kPointerSize;
   // First register address. Following registers are below it on the stack.
-  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+  static const int kRegisterZero = kAtStart - kPointerSize;
 
   // Initial size of code buffer.
   static const size_t kRegExpCodeSize = 1024;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index c4b1e00..f543151 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -47,9 +47,9 @@
 using ::v8::internal::DeleteArray;
 
 // This macro provides a platform independent use of sscanf. The reason for
-// SScanF not being implemented in a platform independent was through
-// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
-// Library does not provide vsscanf.
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
 #define SScanF sscanf  // NOLINT
 
 // The Debugger class is used by the simulator while debugging simulated ARM
@@ -2033,42 +2033,62 @@
 // Decode Type 6 coprocessor instructions.
 // Dm = vmov(Rt, Rt2)
 // <Rt, Rt2> = vmov(Dm)
+// Ddst = MEM(Rbase + 4*offset).
+// MEM(Rbase + 4*offset) = Dsrc.
 void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
   ASSERT((instr->TypeField() == 6));
 
-  int rt = instr->RtField();
-  int rn = instr->RnField();
-  int vm = instr->VmField();
-
-  if (instr->Bit(23) == 1) {
-    UNIMPLEMENTED();
-  } else if (instr->Bit(22) == 1) {
-    if ((instr->Bits(27, 24) == 0xC) &&
-        (instr->Bit(22) == 1) &&
-        (instr->Bits(11, 8) == 0xB) &&
-        (instr->Bits(7, 6) == 0x0) &&
-        (instr->Bit(4) == 1)) {
-      if (instr->Bit(20) == 0) {
-        int32_t rs_val = get_register(rt);
-        int32_t rn_val = get_register(rn);
-
-        set_s_register_from_sinteger(2*vm, rs_val);
-        set_s_register_from_sinteger((2*vm+1), rn_val);
-
-      } else if (instr->Bit(20) == 1) {
-        int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
-        int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
-
-        set_register(rt, rt_int_value);
-        set_register(rn, rn_int_value);
-      }
-    } else {
-      UNIMPLEMENTED();
-    }
-  } else if (instr->Bit(21) == 1) {
-    UNIMPLEMENTED();
+  if (instr->CoprocessorField() != 0xB) {
+    UNIMPLEMENTED();  // Not used by V8.
   } else {
-    UNIMPLEMENTED();
+    switch (instr->OpcodeField()) {
+      case 0x2:
+        // Load and store double to two GP registers
+        if (instr->Bits(7, 4) != 0x1) {
+          UNIMPLEMENTED();  // Not used by V8.
+        } else {
+          int rt = instr->RtField();
+          int rn = instr->RnField();
+          int vm = instr->VmField();
+          if (instr->HasL()) {
+            int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
+            int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
+
+            set_register(rt, rt_int_value);
+            set_register(rn, rn_int_value);
+          } else {
+            int32_t rs_val = get_register(rt);
+            int32_t rn_val = get_register(rn);
+
+            set_s_register_from_sinteger(2*vm, rs_val);
+            set_s_register_from_sinteger((2*vm+1), rn_val);
+          }
+        }
+        break;
+      case 0x8:
+      case 0xC: {  // Load and store double to memory.
+        int rn = instr->RnField();
+        int vd = instr->VdField();
+        int offset = instr->Immed8Field();
+        if (!instr->HasU()) {
+          offset = -offset;
+        }
+        int32_t address = get_register(rn) + 4 * offset;
+        if (instr->HasL()) {
+          // Load double from memory: vldr.
+          set_s_register_from_sinteger(2*vd, ReadW(address, instr));
+          set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
+        } else {
+          // Store double to memory: vstr.
+          WriteW(address, get_sinteger_from_s_register(2*vd), instr);
+          WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
+        }
+        break;
+      }
+      default:
+        UNIMPLEMENTED();  // Not used by V8.
+        break;
+    }
   }
 }
 
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 3ce5b7a..1973730 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -63,8 +63,8 @@
 
 // Call the generated regexp code directly. The entry function pointer should
 // expect eight int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
-  entry(p0, p1, p2, p3, p4, p5, p6, p7)
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+  entry(p0, p1, p2, p3, p4, p5, p6)
 
 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
   reinterpret_cast<TryCatch*>(try_catch_address)
@@ -79,9 +79,9 @@
       assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
                                                  p0, p1, p2, p3, p4))
 
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
   assembler::arm::Simulator::current()->Call( \
-    FUNCTION_ADDR(entry), 8, p0, p1, p2, p3, p4, p5, p6, p7)
+    FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
 
 #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
   try_catch_address == NULL ? \