Version 2.2.12

Allowed accessors to be defined on objects rather than just object templates.

Changed the ScriptData API.

git-svn-id: http://v8.googlecode.com/svn/trunk@4730 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 33b6d14..3c7003a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+2010-05-26: Version 2.2.12
+
+        Allowed accessors to be defined on objects rather than just object
+        templates.
+
+        Changed the ScriptData API.
+
+
 2010-05-21: Version 2.2.11
 
         Fix crash bug in liveedit on 64 bit.
diff --git a/include/v8.h b/include/v8.h
index eb12de8..5b5dabe 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -126,6 +126,7 @@
 class FunctionTemplate;
 class ObjectTemplate;
 class Data;
+class AccessorInfo;
 class StackTrace;
 class StackFrame;
 
@@ -512,11 +513,37 @@
 class V8EXPORT ScriptData {  // NOLINT
  public:
   virtual ~ScriptData() { }
+  /**
+   * Pre-compiles the specified script (context-independent).
+   *
+   * \param input Pointer to UTF-8 script source code.
+   * \param length Length of UTF-8 script source code.
+   */
   static ScriptData* PreCompile(const char* input, int length);
-  static ScriptData* New(unsigned* data, int length);
 
+  /**
+   * Load previous pre-compilation data.
+   *
+   * \param data Pointer to data returned by a call to Data() of a previous
+   *   ScriptData. Ownership is not transferred.
+   * \param length Length of data.
+   */
+  static ScriptData* New(const char* data, int length);
+
+  /**
+   * Returns the length of Data().
+   */
   virtual int Length() = 0;
-  virtual unsigned* Data() = 0;
+
+  /**
+   * Returns a serialized representation of this ScriptData that can later be
+   * passed to New(). NOTE: Serialized data is platform-dependent.
+   */
+  virtual const char* Data() = 0;
+
+  /**
+   * Returns true if the source code could not be parsed.
+   */
   virtual bool HasError() = 0;
 };
 
@@ -1306,6 +1333,41 @@
 };
 
 /**
+ * Accessor[Getter|Setter] are used as callback functions when
+ * setting|getting a particular property. See Object and ObjectTemplate's
+ * method SetAccessor.
+ */
+typedef Handle<Value> (*AccessorGetter)(Local<String> property,
+                                        const AccessorInfo& info);
+
+
+typedef void (*AccessorSetter)(Local<String> property,
+                               Local<Value> value,
+                               const AccessorInfo& info);
+
+
+/**
+ * Access control specifications.
+ *
+ * Some accessors should be accessible across contexts.  These
+ * accessors have an explicit access control parameter which specifies
+ * the kind of cross-context access that should be allowed.
+ *
+ * Additionally, for security, accessors can prohibit overwriting by
+ * accessors defined in JavaScript.  For objects that have such
+ * accessors either locally or in their prototype chain it is not
+ * possible to overwrite the accessor by using __defineGetter__ or
+ * __defineSetter__ from JavaScript code.
+ */
+enum AccessControl {
+  DEFAULT               = 0,
+  ALL_CAN_READ          = 1,
+  ALL_CAN_WRITE         = 1 << 1,
+  PROHIBITS_OVERWRITING = 1 << 2
+};
+
+
+/**
  * A JavaScript object (ECMA-262, 4.3.3)
  */
 class V8EXPORT Object : public Value {
@@ -1347,6 +1409,13 @@
 
   bool Delete(uint32_t index);
 
+  bool SetAccessor(Handle<String> name,
+                   AccessorGetter getter,
+                   AccessorSetter setter = 0,
+                   Handle<Value> data = Handle<Value>(),
+                   AccessControl settings = DEFAULT,
+                   PropertyAttribute attribute = None);
+
   /**
    * Returns an array containing the names of the enumerable properties
    * of this object, including properties from prototype objects.  The
@@ -1642,19 +1711,6 @@
 typedef int (*LookupCallback)(Local<Object> self, Local<String> name);
 
 /**
- * Accessor[Getter|Setter] are used as callback functions when
- * setting|getting a particular property. See objectTemplate::SetAccessor.
- */
-typedef Handle<Value> (*AccessorGetter)(Local<String> property,
-                                        const AccessorInfo& info);
-
-
-typedef void (*AccessorSetter)(Local<String> property,
-                               Local<Value> value,
-                               const AccessorInfo& info);
-
-
-/**
  * NamedProperty[Getter|Setter] are used as interceptors on object.
  * See ObjectTemplate::SetNamedPropertyHandler.
  */
@@ -1734,27 +1790,6 @@
 
 
 /**
- * Access control specifications.
- *
- * Some accessors should be accessible across contexts.  These
- * accessors have an explicit access control parameter which specifies
- * the kind of cross-context access that should be allowed.
- *
- * Additionally, for security, accessors can prohibit overwriting by
- * accessors defined in JavaScript.  For objects that have such
- * accessors either locally or in their prototype chain it is not
- * possible to overwrite the accessor by using __defineGetter__ or
- * __defineSetter__ from JavaScript code.
- */
-enum AccessControl {
-  DEFAULT               = 0,
-  ALL_CAN_READ          = 1,
-  ALL_CAN_WRITE         = 1 << 1,
-  PROHIBITS_OVERWRITING = 1 << 2
-};
-
-
-/**
  * Access type specification.
  */
 enum AccessType {
@@ -2866,7 +2901,12 @@
    */
   void ReattachGlobal(Handle<Object> global_object);
 
-  /** Creates a new context. */
+  /** Creates a new context.
+   *
+   * Returns a persistent handle to the newly allocated context. This
+   * persistent handle has to be disposed when the context is no
+   * longer used so the context can be garbage collected.
+   */
   static Persistent<Context> New(
       ExtensionConfiguration* extensions = NULL,
       Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
diff --git a/src/api.cc b/src/api.cc
index cf940c6..a7948ae 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -58,11 +58,10 @@
 
 namespace v8 {
 
-
-#define ON_BAILOUT(location, code)              \
-  if (IsDeadCheck(location)) {                  \
-    code;                                       \
-    UNREACHABLE();                              \
+#define ON_BAILOUT(location, code)                                 \
+  if (IsDeadCheck(location) || v8::V8::IsExecutionTerminating()) { \
+    code;                                                          \
+    UNREACHABLE();                                                 \
   }
 
 
@@ -776,6 +775,28 @@
 }
 
 
+static i::Handle<i::AccessorInfo> MakeAccessorInfo(
+      v8::Handle<String> name,
+      AccessorGetter getter,
+      AccessorSetter setter,
+      v8::Handle<Value> data,
+      v8::AccessControl settings,
+      v8::PropertyAttribute attributes) {
+  i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
+  ASSERT(getter != NULL);
+  obj->set_getter(*FromCData(getter));
+  obj->set_setter(*FromCData(setter));
+  if (data.IsEmpty()) data = v8::Undefined();
+  obj->set_data(*Utils::OpenHandle(*data));
+  obj->set_name(*Utils::OpenHandle(*name));
+  if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
+  if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
+  if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
+  obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
+  return obj;
+}
+
+
 void FunctionTemplate::AddInstancePropertyAccessor(
       v8::Handle<String> name,
       AccessorGetter getter,
@@ -788,18 +809,10 @@
   }
   ENTER_V8;
   HandleScope scope;
-  i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
-  ASSERT(getter != NULL);
-  obj->set_getter(*FromCData(getter));
-  obj->set_setter(*FromCData(setter));
-  if (data.IsEmpty()) data = v8::Undefined();
-  obj->set_data(*Utils::OpenHandle(*data));
-  obj->set_name(*Utils::OpenHandle(*name));
-  if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
-  if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
-  if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
-  obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
 
+  i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name,
+                                                    getter, setter, data,
+                                                    settings, attributes);
   i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
   if (list->IsUndefined()) {
     list = NeanderArray().value();
@@ -1106,8 +1119,19 @@
 }
 
 
-ScriptData* ScriptData::New(unsigned* data, int length) {
-  return new i::ScriptDataImpl(i::Vector<unsigned>(data, length));
+ScriptData* ScriptData::New(const char* data, int length) {
+  // Return an empty ScriptData if the length is obviously invalid.
+  if (length % sizeof(unsigned) != 0) {
+    return new i::ScriptDataImpl(i::Vector<unsigned>());
+  }
+
+  // Copy the data to ensure it is properly aligned.
+  int deserialized_data_length = length / sizeof(unsigned);
+  unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
+  memcpy(deserialized_data, data, length);
+
+  return new i::ScriptDataImpl(
+      i::Vector<unsigned>(deserialized_data, deserialized_data_length));
 }
 
 
@@ -2354,6 +2378,23 @@
 }
 
 
+bool Object::SetAccessor(Handle<String> name,
+                         AccessorGetter getter,
+                         AccessorSetter setter,
+                         v8::Handle<Value> data,
+                         AccessControl settings,
+                         PropertyAttribute attributes) {
+  ON_BAILOUT("v8::Object::SetAccessor()", return false);
+  ENTER_V8;
+  HandleScope scope;
+  i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
+                                                     getter, setter, data,
+                                                     settings, attributes);
+  i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
+  return !result.is_null() && !result->IsUndefined();
+}
+
+
 bool v8::Object::HasRealNamedProperty(Handle<String> key) {
   ON_BAILOUT("v8::Object::HasRealNamedProperty()", return false);
   return Utils::OpenHandle(this)->HasRealNamedProperty(
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index a5c5bd1..e292cef 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -39,6 +39,7 @@
 
 #include "arm/assembler-arm.h"
 #include "cpu.h"
+#include "debug.h"
 
 
 namespace v8 {
@@ -73,6 +74,11 @@
 }
 
 
+int RelocInfo::target_address_size() {
+  return Assembler::kExternalTargetSize;
+}
+
+
 void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
@@ -162,6 +168,26 @@
 }
 
 
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    visitor->VisitPointer(target_object_address());
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    visitor->VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  } else if (Debug::has_break_points() &&
+             RelocInfo::IsJSReturn(mode) &&
+             IsPatchedReturnSequence()) {
+    visitor->VisitDebugTarget(this);
+#endif
+  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+
 Operand::Operand(int32_t immediate, RelocInfo::Mode rmode)  {
   rm_ = no_reg;
   imm32_ = immediate;
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index dba62e6..050e15b 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1363,40 +1363,25 @@
 }
 
 
-void Assembler::ldrd(Register dst, const MemOperand& src, Condition cond) {
+void Assembler::ldrd(Register dst1, Register dst2,
+                     const MemOperand& src, Condition cond) {
+  ASSERT(CpuFeatures::IsEnabled(ARMv7));
   ASSERT(src.rm().is(no_reg));
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
-  addrmod3(cond | B7 | B6 | B4, dst, src);
-#else
-  // Generate two ldr instructions if ldrd is not available.
-  MemOperand src1(src);
-  src1.set_offset(src1.offset() + 4);
-  Register dst1(dst);
-  dst1.set_code(dst1.code() + 1);
-  if (dst.is(src.rn())) {
-    ldr(dst1, src1, cond);
-    ldr(dst, src, cond);
-  } else {
-    ldr(dst, src, cond);
-    ldr(dst1, src1, cond);
-  }
-#endif
+  ASSERT(!dst1.is(lr));  // r14.
+  ASSERT_EQ(0, dst1.code() % 2);
+  ASSERT_EQ(dst1.code() + 1, dst2.code());
+  addrmod3(cond | B7 | B6 | B4, dst1, src);
 }
 
 
-void Assembler::strd(Register src, const MemOperand& dst, Condition cond) {
+void Assembler::strd(Register src1, Register src2,
+                     const MemOperand& dst, Condition cond) {
   ASSERT(dst.rm().is(no_reg));
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
-  addrmod3(cond | B7 | B6 | B5 | B4, src, dst);
-#else
-  // Generate two str instructions if strd is not available.
-  MemOperand dst1(dst);
-  dst1.set_offset(dst1.offset() + 4);
-  Register src1(src);
-  src1.set_code(src1.code() + 1);
-  str(src, dst, cond);
-  str(src1, dst1, cond);
-#endif
+  ASSERT(!src1.is(lr));  // r14.
+  ASSERT_EQ(0, src1.code() % 2);
+  ASSERT_EQ(src1.code() + 1, src2.code());
+  ASSERT(CpuFeatures::IsEnabled(ARMv7));
+  addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
 }
 
 // Load/Store multiple instructions.
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 5a5d64b..a1b98f6 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -773,8 +773,12 @@
   void strh(Register src, const MemOperand& dst, Condition cond = al);
   void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
   void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
-  void ldrd(Register dst, const MemOperand& src, Condition cond = al);
-  void strd(Register src, const MemOperand& dst, Condition cond = al);
+  void ldrd(Register dst1,
+            Register dst2,
+            const MemOperand& src, Condition cond = al);
+  void strd(Register src1,
+            Register src2,
+            const MemOperand& dst, Condition cond = al);
 
   // Load/Store multiple instructions
   void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 7b62da9..64ed425 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1514,7 +1514,7 @@
   // Then process it as a normal function call.
   __ ldr(r0, MemOperand(sp, 3 * kPointerSize));
   __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
-  __ strd(r0, MemOperand(sp, 2 * kPointerSize));
+  __ Strd(r0, r1, MemOperand(sp, 2 * kPointerSize));
 
   CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS);
   frame_->CallStub(&call_function, 3);
@@ -2307,12 +2307,10 @@
   node->continue_target()->SetExpectedHeight();
 
   // Load the current count to r0, load the length to r1.
-  __ ldrd(r0, frame_->ElementAt(0));
+  __ Ldrd(r0, r1, frame_->ElementAt(0));
   __ cmp(r0, r1);  // compare to the array length
   node->break_target()->Branch(hs);
 
-  __ ldr(r0, frame_->ElementAt(0));
-
   // Get the i'th entry of the array.
   __ ldr(r2, frame_->ElementAt(2));
   __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -2730,7 +2728,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ FunctionLiteral");
 
   // Build the function info and instantiate it.
@@ -2751,7 +2748,6 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   Comment cmnt(masm_, "[ SharedFunctionInfoLiteral");
   InstantiateFunction(node->shared_function_info());
   ASSERT_EQ(original_height + 1, frame_->height());
@@ -4045,37 +4041,35 @@
 
 
 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
-  LoadAndSpill(args->at(0));
-  frame_->EmitPop(r0);
-  __ tst(r0, Operand(kSmiTagMask));
+  Load(args->at(0));
+  Register reg = frame_->PopToRegister();
+  __ tst(reg, Operand(kSmiTagMask));
   cc_reg_ = eq;
 }
 
 
 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
   ASSERT_EQ(args->length(), 3);
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (ShouldGenerateLog(args->at(0))) {
-    LoadAndSpill(args->at(1));
-    LoadAndSpill(args->at(2));
+    Load(args->at(1));
+    Load(args->at(2));
+    frame_->SpillAll();
+    VirtualFrame::SpilledScope spilled_scope(frame_);
     __ CallRuntime(Runtime::kLog, 2);
   }
 #endif
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-  frame_->EmitPush(r0);
+  frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
 }
 
 
 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 1);
-  LoadAndSpill(args->at(0));
-  frame_->EmitPop(r0);
-  __ tst(r0, Operand(kSmiTagMask | 0x80000000u));
+  Load(args->at(0));
+  Register reg = frame_->PopToRegister();
+  __ tst(reg, Operand(kSmiTagMask | 0x80000000u));
   cc_reg_ = eq;
 }
 
@@ -4106,22 +4100,23 @@
 // flatten the string, which will ensure that the answer is in the left hand
 // side the next time around.
 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 2);
   Comment(masm_, "[ GenerateFastCharCodeAt");
 
-  LoadAndSpill(args->at(0));
-  LoadAndSpill(args->at(1));
-  frame_->EmitPop(r1);  // Index.
-  frame_->EmitPop(r2);  // String.
+  Load(args->at(0));
+  Load(args->at(1));
+  Register index = frame_->PopToRegister();         // Index.
+  Register string = frame_->PopToRegister(index);   // String.
+  Register result = VirtualFrame::scratch0();
+  Register scratch = VirtualFrame::scratch1();
 
   Label slow_case;
   Label exit;
   StringHelper::GenerateFastCharCodeAt(masm_,
-                                       r2,
-                                       r1,
-                                       r3,
-                                       r0,
+                                       string,
+                                       index,
+                                       scratch,
+                                       result,
                                        &slow_case,
                                        &slow_case,
                                        &slow_case,
@@ -4131,10 +4126,10 @@
   __ bind(&slow_case);
   // Move the undefined value into the result register, which will
   // trigger the slow case.
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
 
   __ bind(&exit);
-  frame_->EmitPush(r0);
+  frame_->EmitPush(result);
 }
 
 
@@ -4214,9 +4209,8 @@
   __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
   // Undetectable objects behave like undefined when tested with typeof.
   __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
-  __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
-  __ cmp(r1, Operand(1 << Map::kIsUndetectable));
-  false_target()->Branch(eq);
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  false_target()->Branch(ne);
 
   __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
   __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
@@ -4256,48 +4250,52 @@
 
 
 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 0);
 
+  Register scratch0 = VirtualFrame::scratch0();
+  Register scratch1 = VirtualFrame::scratch1();
   // Get the frame pointer for the calling frame.
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
 
   // Skip the arguments adaptor frame if it exists.
-  Label check_frame_marker;
-  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &check_frame_marker);
-  __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch1,
+         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ ldr(scratch0,
+         MemOperand(scratch0, StandardFrameConstants::kCallerFPOffset), eq);
 
   // Check the marker in the calling frame.
-  __ bind(&check_frame_marker);
-  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
-  __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+  __ ldr(scratch1,
+         MemOperand(scratch0, StandardFrameConstants::kMarkerOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
   cc_reg_ = eq;
 }
 
 
 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 0);
 
-  Label exit;
-
-  // Get the number of formal parameters.
-  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+  Register tos = frame_->GetTOSRegister();
+  Register scratch0 = VirtualFrame::scratch0();
+  Register scratch1 = VirtualFrame::scratch1();
 
   // Check if the calling frame is an arguments adaptor frame.
-  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
-  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &exit);
+  __ ldr(scratch0,
+         MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(scratch1,
+         MemOperand(scratch0, StandardFrameConstants::kContextOffset));
+  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Get the number of formal parameters.
+  __ mov(tos, Operand(Smi::FromInt(scope()->num_parameters())), LeaveCC, ne);
 
   // Arguments adaptor case: Read the arguments length from the
   // adaptor frame.
-  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ ldr(tos,
+         MemOperand(scratch0, ArgumentsAdaptorFrameConstants::kLengthOffset),
+         eq);
 
-  __ bind(&exit);
-  frame_->EmitPush(r0);
+  frame_->EmitPush(tos);
 }
 
 
@@ -4735,15 +4733,14 @@
 
 
 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
-  VirtualFrame::SpilledScope spilled_scope(frame_);
   ASSERT(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
-  LoadAndSpill(args->at(0));
-  LoadAndSpill(args->at(1));
-  frame_->EmitPop(r0);
-  frame_->EmitPop(r1);
-  __ cmp(r0, r1);
+  Load(args->at(0));
+  Load(args->at(1));
+  Register lhs = frame_->PopToRegister();
+  Register rhs = frame_->PopToRegister(lhs);
+  __ cmp(lhs, rhs);
   cc_reg_ = eq;
 }
 
@@ -5042,6 +5039,7 @@
   // after evaluating the left hand side (due to the shortcut
   // semantics), but the compiler must (statically) know if the result
   // of compiling the binary operation is materialized or not.
+  VirtualFrame::SpilledScope spilled_scope(frame_);
   if (node->op() == Token::AND) {
     JumpTarget is_true;
     LoadConditionAndSpill(node->left(),
@@ -5053,8 +5051,7 @@
       JumpTarget pop_and_continue;
       JumpTarget exit;
 
-      __ ldr(r0, frame_->Top());  // Duplicate the stack top.
-      frame_->EmitPush(r0);
+      frame_->Dup();
       // Avoid popping the result if it converts to 'false' using the
       // standard ToBoolean() conversion as described in ECMA-262,
       // section 9.2, page 30.
@@ -5063,7 +5060,7 @@
 
       // Pop the result of evaluating the first part.
       pop_and_continue.Bind();
-      frame_->EmitPop(r0);
+      frame_->Pop();
 
       // Evaluate right side expression.
       is_true.Bind();
@@ -5100,8 +5097,7 @@
       JumpTarget pop_and_continue;
       JumpTarget exit;
 
-      __ ldr(r0, frame_->Top());
-      frame_->EmitPush(r0);
+      frame_->Dup();
       // Avoid popping the result if it converts to 'true' using the
       // standard ToBoolean() conversion as described in ECMA-262,
       // section 9.2, page 30.
@@ -5110,7 +5106,7 @@
 
       // Pop the result of evaluating the first part.
       pop_and_continue.Bind();
-      frame_->EmitPop(r0);
+      frame_->Pop();
 
       // Evaluate right side expression.
       is_false.Bind();
@@ -5145,7 +5141,6 @@
   Comment cmnt(masm_, "[ BinaryOperation");
 
   if (node->op() == Token::AND || node->op() == Token::OR) {
-    VirtualFrame::SpilledScope spilled_scope(frame_);
     GenerateLogicalBooleanOperation(node);
   } else {
     // Optimize for the case where (at least) one of the expressions
@@ -5198,9 +5193,7 @@
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
-  VirtualFrame::SpilledScope spilled_scope(frame_);
-  __ ldr(r0, frame_->Function());
-  frame_->EmitPush(r0);
+  frame_->EmitPush(MemOperand(frame_->Function()));
   ASSERT_EQ(original_height + 1, frame_->height());
 }
 
@@ -6386,7 +6379,7 @@
     ConvertToDoubleStub stub1(r3, r2, r7, r6);
     __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
     // Load rhs to a double in r0, r1.
-    __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
     __ pop(lr);
   }
 
@@ -6421,7 +6414,7 @@
   } else {
     __ push(lr);
     // Load lhs to a double in r2, r3.
-    __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
     // Convert rhs to a double in r0, r1.
     __ mov(r7, Operand(r0));
     ConvertToDoubleStub stub2(r1, r0, r7, r6);
@@ -6585,8 +6578,8 @@
     __ sub(r7, r1, Operand(kHeapObjectTag));
     __ vldr(d7, r7, HeapNumber::kValueOffset);
   } else {
-    __ ldrd(r2, FieldMemOperand(r1, HeapNumber::kValueOffset));
-    __ ldrd(r0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+    __ Ldrd(r2, r3, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ Ldrd(r0, r1, FieldMemOperand(r0, HeapNumber::kValueOffset));
   }
   __ jmp(both_loaded_as_doubles);
 }
@@ -6963,7 +6956,7 @@
       __ vldr(d7, r7, HeapNumber::kValueOffset);
     } else {
       // Calling convention says that second double is in r2 and r3.
-      __ ldrd(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+      __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
     }
     __ jmp(&finished_loading_r0);
     __ bind(&r0_is_smi);
@@ -7015,7 +7008,7 @@
       __ vldr(d6, r7, HeapNumber::kValueOffset);
     } else {
       // Calling convention says that first double is in r0 and r1.
-      __ ldrd(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+      __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
     }
     __ jmp(&finished_loading_r1);
     __ bind(&r1_is_smi);
@@ -7086,7 +7079,7 @@
       __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
   #else
       // Double returned in registers 0 and 1.
-      __ strd(r0, FieldMemOperand(r5, HeapNumber::kValueOffset));
+      __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
   #endif
       __ mov(r0, Operand(r5));
       // And we are done.
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index c2f6ea9..fecc213 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -64,7 +64,7 @@
   if (mode == PRIMARY) {
     int locals_count = scope()->num_stack_slots();
 
-    __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+    __ Push(lr, fp, cp, r1);
     if (locals_count > 0) {
       // Load undefined value here, so the value is ready for the loop
       // below.
@@ -82,11 +82,17 @@
     bool function_in_register = true;
 
     // Possibly allocate a local context.
-    if (scope()->num_heap_slots() > 0) {
+    int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    if (heap_slots > 0) {
       Comment cmnt(masm_, "[ Allocate local context");
       // Argument to NewContext is the function, which is in r1.
       __ push(r1);
-      __ CallRuntime(Runtime::kNewContext, 1);
+      if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+        FastNewContextStub stub(heap_slots);
+        __ CallStub(&stub);
+      } else {
+        __ CallRuntime(Runtime::kNewContext, 1);
+      }
       function_in_register = false;
       // Context is returned in both r0 and cp.  It replaces the context
       // passed to us.  It's saved in the stack and kept live in cp.
@@ -144,6 +150,21 @@
     }
   }
 
+  { Comment cmnt(masm_, "[ Declarations");
+    // For named function expressions, declare the function name as a
+    // constant.
+    if (scope()->is_function_scope() && scope()->function() != NULL) {
+      EmitDeclaration(scope()->function(), Variable::CONST, NULL);
+    }
+    // Visit all the explicit declarations unless there is an illegal
+    // redeclaration.
+    if (scope()->HasIllegalRedeclaration()) {
+      scope()->VisitIllegalRedeclaration(this);
+    } else {
+      VisitDeclarations(scope()->declarations());
+    }
+  }
+
   // Check the stack for overflow or break request.
   // Put the lr setup instruction in the delay slot.  The kInstrSize is
   // added to the implicit 8 byte offset that always applies to operations
@@ -160,10 +181,6 @@
            lo);
   }
 
-  { Comment cmnt(masm_, "[ Declarations");
-    VisitDeclarations(scope()->declarations());
-  }
-
   if (FLAG_trace) {
     __ CallRuntime(Runtime::kTraceEnter, 0);
   }
@@ -384,6 +401,38 @@
   }
 }
 
+void FullCodeGenerator::PrepareTest(Label* materialize_true,
+                                    Label* materialize_false,
+                                    Label** if_true,
+                                    Label** if_false) {
+  switch (context_) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+    case Expression::kEffect:
+      // In an effect context, the true and the false case branch to the
+      // same label.
+      *if_true = *if_false = materialize_true;
+      break;
+    case Expression::kValue:
+      *if_true = materialize_true;
+      *if_false = materialize_false;
+      break;
+    case Expression::kTest:
+      *if_true = true_label_;
+      *if_false = false_label_;
+      break;
+    case Expression::kValueTest:
+      *if_true = materialize_true;
+      *if_false = false_label_;
+      break;
+    case Expression::kTestValue:
+      *if_true = true_label_;
+      *if_false = materialize_false;
+      break;
+  }
+}
+
 
 void FullCodeGenerator::Apply(Expression::Context context,
                               Label* materialize_true,
@@ -398,19 +447,25 @@
 
     case Expression::kValue: {
       Label done;
-      __ bind(materialize_true);
-      __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
-      __ jmp(&done);
-      __ bind(materialize_false);
-      __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
-      __ bind(&done);
       switch (location_) {
         case kAccumulator:
+          __ bind(materialize_true);
+          __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+          __ jmp(&done);
+          __ bind(materialize_false);
+          __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
           break;
         case kStack:
-          __ push(result_register());
+          __ bind(materialize_true);
+          __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+          __ push(ip);
+          __ jmp(&done);
+          __ bind(materialize_false);
+          __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+          __ push(ip);
           break;
       }
+      __ bind(&done);
       break;
     }
 
@@ -419,12 +474,13 @@
 
     case Expression::kValueTest:
       __ bind(materialize_true);
-      __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
       switch (location_) {
         case kAccumulator:
+          __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
           break;
         case kStack:
-          __ push(result_register());
+          __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+          __ push(ip);
           break;
       }
       __ jmp(true_label_);
@@ -432,12 +488,13 @@
 
     case Expression::kTestValue:
       __ bind(materialize_false);
-      __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
       switch (location_) {
         case kAccumulator:
+          __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
           break;
         case kStack:
-          __ push(result_register());
+          __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+          __ push(ip);
           break;
       }
       __ jmp(false_label_);
@@ -446,6 +503,68 @@
 }
 
 
+// Convert constant control flow (true or false) to the result expected for
+// a given expression context.
+void FullCodeGenerator::Apply(Expression::Context context, bool flag) {
+  switch (context) {
+    case Expression::kUninitialized:
+      UNREACHABLE();
+      break;
+    case Expression::kEffect:
+      break;
+    case Expression::kValue: {
+      Heap::RootListIndex value_root_index =
+          flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+      switch (location_) {
+        case kAccumulator:
+          __ LoadRoot(result_register(), value_root_index);
+          break;
+        case kStack:
+          __ LoadRoot(ip, value_root_index);
+          __ push(ip);
+          break;
+      }
+      break;
+    }
+    case Expression::kTest:
+      __ b(flag ? true_label_ : false_label_);
+      break;
+    case Expression::kTestValue:
+      switch (location_) {
+        case kAccumulator:
+          // If value is false it's needed.
+          if (!flag) __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+          break;
+        case kStack:
+          // If value is false it's needed.
+          if (!flag) {
+            __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+            __ push(ip);
+          }
+          break;
+      }
+      __ b(flag ? true_label_ : false_label_);
+      break;
+    case Expression::kValueTest:
+      switch (location_) {
+        case kAccumulator:
+          // If value is true it's needed.
+          if (flag) __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+          break;
+        case kStack:
+          // If value is true it's needed.
+          if (flag) {
+            __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+            __ push(ip);
+          }
+          break;
+      }
+      __ b(flag ? true_label_ : false_label_);
+      break;
+  }
+}
+
+
 void FullCodeGenerator::DoTest(Expression::Context context) {
   // The value to test is pushed on the stack, and duplicated on the stack
   // if necessary (for value/test and test/value contexts).
@@ -551,22 +670,23 @@
 }
 
 
-void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+                                        Variable::Mode mode,
+                                        FunctionLiteral* function) {
   Comment cmnt(masm_, "[ Declaration");
-  Variable* var = decl->proxy()->var();
-  ASSERT(var != NULL);  // Must have been resolved.
-  Slot* slot = var->slot();
-  Property* prop = var->AsProperty();
+  ASSERT(variable != NULL);  // Must have been resolved.
+  Slot* slot = variable->slot();
+  Property* prop = variable->AsProperty();
 
   if (slot != NULL) {
     switch (slot->type()) {
       case Slot::PARAMETER:
       case Slot::LOCAL:
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
           __ str(ip, MemOperand(fp, SlotOffset(slot)));
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kAccumulator);
+        } else if (function != NULL) {
+          VisitForValue(function, kAccumulator);
           __ str(result_register(), MemOperand(fp, SlotOffset(slot)));
         }
         break;
@@ -576,7 +696,7 @@
         // this specific context.
 
         // The variable in the decl always resides in the current context.
-        ASSERT_EQ(0, scope()->ContextChainLength(var->scope()));
+        ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
         if (FLAG_debug_code) {
           // Check if we have the correct context pointer.
           __ ldr(r1,
@@ -584,12 +704,12 @@
           __ cmp(r1, cp);
           __ Check(eq, "Unexpected declaration in current context.");
         }
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
           __ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
           // No write barrier since the_hole_value is in old space.
-        } else if (decl->fun() != NULL) {
-          VisitForValue(decl->fun(), kAccumulator);
+        } else if (function != NULL) {
+          VisitForValue(function, kAccumulator);
           __ str(result_register(),
                  CodeGenerator::ContextOperand(cp, slot->index()));
           int offset = Context::SlotOffset(slot->index());
@@ -601,27 +721,27 @@
         break;
 
       case Slot::LOOKUP: {
-        __ mov(r2, Operand(var->name()));
+        __ mov(r2, Operand(variable->name()));
         // Declaration nodes are always introduced in one of two modes.
-        ASSERT(decl->mode() == Variable::VAR ||
-               decl->mode() == Variable::CONST);
+        ASSERT(mode == Variable::VAR ||
+               mode == Variable::CONST);
         PropertyAttributes attr =
-            (decl->mode() == Variable::VAR) ? NONE : READ_ONLY;
+            (mode == Variable::VAR) ? NONE : READ_ONLY;
         __ mov(r1, Operand(Smi::FromInt(attr)));
         // Push initial value, if any.
         // Note: For variables we must not push an initial value (such as
         // 'undefined') because we may have a (legal) redeclaration and we
         // must not destroy the current value.
-        if (decl->mode() == Variable::CONST) {
+        if (mode == Variable::CONST) {
           __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
-          __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
-        } else if (decl->fun() != NULL) {
-          __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit());
+          __ Push(cp, r2, r1, r0);
+        } else if (function != NULL) {
+          __ Push(cp, r2, r1);
           // Push initial value for function declaration.
-          VisitForValue(decl->fun(), kStack);
+          VisitForValue(function, kStack);
         } else {
           __ mov(r0, Operand(Smi::FromInt(0)));  // No initial value!
-          __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit());
+          __ Push(cp, r2, r1, r0);
         }
         __ CallRuntime(Runtime::kDeclareContextSlot, 4);
         break;
@@ -629,47 +749,259 @@
     }
 
   } else if (prop != NULL) {
-    if (decl->fun() != NULL || decl->mode() == Variable::CONST) {
+    if (function != NULL || mode == Variable::CONST) {
       // We are declaring a function or constant that rewrites to a
       // property.  Use (keyed) IC to set the initial value.
       VisitForValue(prop->obj(), kStack);
-      VisitForValue(prop->key(), kStack);
-
-      if (decl->fun() != NULL) {
-        VisitForValue(decl->fun(), kAccumulator);
+      if (function != NULL) {
+        VisitForValue(prop->key(), kStack);
+        VisitForValue(function, kAccumulator);
+        __ pop(r1);  // Key.
       } else {
+        VisitForValue(prop->key(), kAccumulator);
+        __ mov(r1, result_register());  // Key.
         __ LoadRoot(result_register(), Heap::kTheHoleValueRootIndex);
       }
+      __ pop(r2);  // Receiver.
 
       Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-      __ pop(r1);  // Key.
-      __ pop(r2);  // Receiver.
       __ Call(ic, RelocInfo::CODE_TARGET);
-
       // Value in r0 is ignored (declarations are statements).
     }
   }
 }
 
 
+void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
+  EmitDeclaration(decl->proxy()->var(), decl->mode(), decl->fun());
+}
+
+
 void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   // Call the runtime to declare the globals.
   // The context is the first argument.
   __ mov(r1, Operand(pairs));
   __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
-  __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
+  __ Push(cp, r1, r0);
   __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
 
 void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
-  UNREACHABLE();
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+  // Keep the switch value on the stack until a case matches.
+  VisitForValue(stmt->tag(), kStack);
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
+
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForValue(clause->label(), kAccumulator);
+
+    // Perform the comparison as if via '==='.  The comparison stub expects
+    // the smi vs. smi case to be handled before it is called.
+    Label slow_case;
+    __ ldr(r1, MemOperand(sp, 0));  // Switch value.
+    __ mov(r2, r1);
+    __ orr(r2, r2, r0);
+    __ tst(r2, Operand(kSmiTagMask));
+    __ b(ne, &slow_case);
+    __ cmp(r1, r0);
+    __ b(ne, &next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ b(clause->body_target()->entry_label());
+
+    __ bind(&slow_case);
+    CompareStub stub(eq, true);
+    __ CallStub(&stub);
+    __ tst(r0, r0);
+    __ b(ne, &next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ b(clause->body_target()->entry_label());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  __ Drop(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ b(nested_statement.break_target());
+  } else {
+    __ b(default_clause->body_target()->entry_label());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target()->entry_label());
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_target());
 }
 
 
 void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
-  UNREACHABLE();
+  Comment cmnt(masm_, "[ ForInStatement");
+  SetStatementPosition(stmt);
+
+  Label loop, exit;
+  ForIn loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Get the object to enumerate over. Both SpiderMonkey and JSC
+  // ignore null and undefined in contrast to the specification; see
+  // ECMA-262 section 12.6.4.
+  VisitForValue(stmt->enumerable(), kAccumulator);
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, &exit);
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, &exit);
+
+  // Convert the object to a JS object.
+  Label convert, done_convert;
+  __ BranchOnSmi(r0, &convert);
+  __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+  __ b(hs, &done_convert);
+  __ bind(&convert);
+  __ push(r0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+  __ bind(&done_convert);
+  __ push(r0);
+
+  // TODO(kasperl): Check cache validity in generated code. This is a
+  // fast case for the JSObject::IsSimpleEnum cache validity
+  // checks. If we cannot guarantee cache validity, call the runtime
+  // system to check cache validity or get the property names in a
+  // fixed array.
+
+  // Get the set of properties to enumerate.
+  __ push(r0);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ mov(r2, r0);
+  __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+  __ cmp(r1, ip);
+  __ b(ne, &fixed_array);
+
+  // We got a map in register r0. Get the enumeration cache from it.
+  __ ldr(r1, FieldMemOperand(r0, Map::kInstanceDescriptorsOffset));
+  __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
+  __ ldr(r2, FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Setup the four remaining stack slots.
+  __ push(r0);  // Map.
+  __ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
+  __ mov(r1, Operand(r1, LSL, kSmiTagSize));
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  // Push enumeration cache, enumeration cache length (as smi) and zero.
+  __ Push(r2, r1, r0);
+  __ jmp(&loop);
+
+  // We got a fixed array in register r0. Iterate through that.
+  __ bind(&fixed_array);
+  __ mov(r1, Operand(Smi::FromInt(0)));  // Map (0) - force slow check.
+  __ Push(r1, r0);
+  __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
+  __ mov(r1, Operand(r1, LSL, kSmiTagSize));
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ Push(r1, r0);  // Fixed array length (as smi) and initial index.
+
+  // Generate code for doing the condition check.
+  __ bind(&loop);
+  // Load the current count to r0, load the length to r1.
+  __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
+  __ cmp(r0, r1);  // Compare to the array length.
+  __ b(hs, loop_statement.break_target());
+
+  // Get the current entry of the array into register r3.
+  __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
+  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+  // Get the expected map from the stack or a zero map in the
+  // permanent slow case into register r2.
+  __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we have to filter the key.
+  Label update_each;
+  __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
+  __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ cmp(r4, Operand(r2));
+  __ b(eq, &update_each);
+
+  // Convert the entry to a string or null if it isn't a property
+  // anymore. If the property has been removed while iterating, we
+  // just skip it.
+  __ push(r1);  // Enumerable.
+  __ push(r3);  // Current entry.
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS);
+  __ mov(r3, Operand(r0));
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r3, ip);
+  __ b(eq, loop_statement.continue_target());
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register r3.
+  __ bind(&update_each);
+  __ mov(result_register(), r3);
+  // Perform the assignment as if via '='.
+  EmitAssignment(stmt->each());
+
+  // Generate code for the body of the loop.
+  Label stack_limit_hit, stack_check_done;
+  Visit(stmt->body());
+
+  __ StackLimitCheck(&stack_limit_hit);
+  __ bind(&stack_check_done);
+
+  // Generate code for the going to the next element by incrementing
+  // the index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_target());
+  __ pop(r0);
+  __ add(r0, r0, Operand(Smi::FromInt(1)));
+  __ push(r0);
+  __ b(&loop);
+
+  // Slow case for the stack limit check.
+  StackCheckStub stack_check_stub;
+  __ bind(&stack_limit_hit);
+  __ CallStub(&stack_check_stub);
+  __ b(&stack_check_done);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_target());
+  __ Drop(5);
+
+  // Exit and decrement the loop depth.
+  __ bind(&exit);
+  decrement_loop_depth();
 }
 
 
@@ -683,7 +1015,7 @@
     __ CallStub(&stub);
   } else {
     __ mov(r0, Operand(info));
-    __ stm(db_w, sp, cp.bit() | r0.bit());
+    __ Push(cp, r0);
     __ CallRuntime(Runtime::kNewClosure, 2);
   }
   Apply(context_, r0);
@@ -717,7 +1049,7 @@
   } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
     Comment cmnt(masm_, "Lookup slot");
     __ mov(r1, Operand(var->name()));
-    __ stm(db_w, sp, cp.bit() | r1.bit());  // Context and name.
+    __ Push(cp, r1);  // Context and name.
     __ CallRuntime(Runtime::kLoadContextSlot, 2);
     Apply(context, r0);
 
@@ -725,8 +1057,21 @@
     Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
                             ? "Context slot"
                             : "Stack slot");
-    Apply(context, slot);
-
+    if (var->mode() == Variable::CONST) {
+       // Constants may be the hole value if they have not been initialized.
+       // Unhole them.
+       Label done;
+       MemOperand slot_operand = EmitSlotSearch(slot, r0);
+       __ ldr(r0, slot_operand);
+       __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+       __ cmp(r0, ip);
+       __ b(ne, &done);
+       __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+       __ bind(&done);
+       Apply(context, r0);
+     } else {
+       Apply(context, slot);
+     }
   } else {
     Comment cmnt(masm_, "Rewritten parameter");
     ASSERT_NOT_NULL(property);
@@ -862,6 +1207,10 @@
 
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
+
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
@@ -869,16 +1218,18 @@
   __ Push(r3, r2, r1);
   if (expr->depth() > 1) {
     __ CallRuntime(Runtime::kCreateArrayLiteral, 3);
-  } else {
+  } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
     __ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
+  } else {
+    FastCloneShallowArrayStub stub(length);
+    __ CallStub(&stub);
   }
 
   bool result_saved = false;  // Is the result saved to the stack?
 
   // Emit code to evaluate all the non-constant subexpressions and to store
   // them into the newly cloned array.
-  ZoneList<Expression*>* subexprs = expr->values();
-  for (int i = 0, len = subexprs->length(); i < len; i++) {
+  for (int i = 0; i < length; i++) {
     Expression* subexpr = subexprs->at(i);
     // If the subexpression is a literal or a simple materialized literal it
     // is already set in the cloned array.
@@ -1041,6 +1392,56 @@
 }
 
 
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+  // Invalid left-hand sides are rewritten to have a 'throw
+  // ReferenceError' on the left-hand side.
+  if (!expr->IsValidLeftHandSide()) {
+    VisitForEffect(expr);
+    return;
+  }
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->AsProperty();
+  if (prop != NULL) {
+    assign_type = (prop->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EmitVariableAssignment(var, Token::ASSIGN, Expression::kEffect);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      __ push(r0);  // Preserve value.
+      VisitForValue(prop->obj(), kAccumulator);
+      __ mov(r1, r0);
+      __ pop(r0);  // Restore value.
+      __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+      Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+      __ Call(ic, RelocInfo::CODE_TARGET);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ push(r0);  // Preserve value.
+      VisitForValue(prop->obj(), kStack);
+      VisitForValue(prop->key(), kAccumulator);
+      __ mov(r1, r0);
+      __ pop(r2);
+      __ pop(r0);  // Restore value.
+      Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+      __ Call(ic, RelocInfo::CODE_TARGET);
+      break;
+    }
+  }
+}
+
+
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op,
                                                Expression::Context context) {
@@ -1082,9 +1483,9 @@
         MemOperand target = EmitSlotSearch(slot, r1);
         if (op == Token::INIT_CONST) {
           // Detect const reinitialization by checking for the hole value.
-          __ ldr(r1, target);
+          __ ldr(r2, target);
           __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-          __ cmp(r1, ip);
+          __ cmp(r2, ip);
           __ b(ne, &done);
         }
         // Perform the assignment and issue the write barrier.
@@ -1256,7 +1657,8 @@
   }
   // Record source position for debugger.
   SetSourcePosition(expr->position());
-  CallFunctionStub stub(arg_count, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
+  InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+  CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
   __ CallStub(&stub);
   // Restore context register.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -1270,8 +1672,51 @@
   Variable* var = fun->AsVariableProxy()->AsVariable();
 
   if (var != NULL && var->is_possibly_eval()) {
-    // Call to the identifier 'eval'.
-    UNREACHABLE();
+    // In a call to eval, we first call %ResolvePossiblyDirectEval to
+    // resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    VisitForValue(fun, kStack);
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+    __ push(r2);  // Reserved receiver slot.
+
+    // Push the arguments.
+    ZoneList<Expression*>* args = expr->arguments();
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      VisitForValue(args->at(i), kStack);
+    }
+
+    // Push copy of the function - found below the arguments.
+    __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ push(r1);
+
+    // Push copy of the first argument or undefined if it doesn't exist.
+    if (arg_count > 0) {
+      __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+      __ push(r1);
+    } else {
+      __ push(r2);
+    }
+
+    // Push the receiver of the enclosing function and do runtime call.
+    __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+    __ push(r1);
+    __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
+
+    // The runtime call returns a pair of values in r0 (function) and
+    // r1 (receiver). Touch up the stack with the right values.
+    __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+    // Record source position for debugger.
+    SetSourcePosition(expr->position());
+    InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
+    CallFunctionStub stub(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE);
+    __ CallStub(&stub);
+    // Restore context register.
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    DropAndApply(1, context_, r0);
   } else if (var != NULL && !var->is_this() && var->is_global()) {
     // Push global object as receiver for the call IC.
     __ ldr(r0, CodeGenerator::GlobalObject());
@@ -1279,8 +1724,16 @@
     EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
   } else if (var != NULL && var->slot() != NULL &&
              var->slot()->type() == Slot::LOOKUP) {
-    // Call to a lookup slot.
-    UNREACHABLE();
+    // Call to a lookup slot (dynamically introduced variable).  Call the
+    // runtime to find the function to call (returned in eax) and the object
+    // holding it (returned in edx).
+    __ push(context_register());
+    __ mov(r2, Operand(var->name()));
+    __ push(r2);
+    __ CallRuntime(Runtime::kLoadContextSlot, 2);
+    __ push(r0);  // Function.
+    __ push(r1);  // Receiver.
+    EmitCallWithStub(expr);
   } else if (fun->AsProperty() != NULL) {
     // Call to an object property.
     Property* prop = fun->AsProperty();
@@ -1376,7 +1829,720 @@
 }
 
 
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (strcmp("_IsSmi", *name->ToCString()) == 0) {
+    EmitIsSmi(expr->arguments());
+  } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
+    EmitIsNonNegativeSmi(expr->arguments());
+  } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
+    EmitIsObject(expr->arguments());
+  } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
+    EmitIsUndetectableObject(expr->arguments());
+  } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
+    EmitIsFunction(expr->arguments());
+  } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
+    EmitIsArray(expr->arguments());
+  } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
+    EmitIsRegExp(expr->arguments());
+  } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
+    EmitIsConstructCall(expr->arguments());
+  } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
+    EmitObjectEquals(expr->arguments());
+  } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
+    EmitArguments(expr->arguments());
+  } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
+    EmitArgumentsLength(expr->arguments());
+  } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
+    EmitClassOf(expr->arguments());
+  } else if (strcmp("_Log", *name->ToCString()) == 0) {
+    EmitLog(expr->arguments());
+  } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
+    EmitRandomHeapNumber(expr->arguments());
+  } else if (strcmp("_SubString", *name->ToCString()) == 0) {
+    EmitSubString(expr->arguments());
+  } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
+    EmitRegExpExec(expr->arguments());
+  } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
+    EmitValueOf(expr->arguments());
+  } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
+    EmitSetValueOf(expr->arguments());
+  } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
+    EmitNumberToString(expr->arguments());
+  } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
+    EmitCharFromCode(expr->arguments());
+  } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
+    EmitFastCharCodeAt(expr->arguments());
+  } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
+    EmitStringAdd(expr->arguments());
+  } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
+    EmitStringCompare(expr->arguments());
+  } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
+    EmitMathPow(expr->arguments());
+  } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
+    EmitMathSin(expr->arguments());
+  } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
+    EmitMathCos(expr->arguments());
+  } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
+    EmitMathSqrt(expr->arguments());
+  } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
+    EmitCallFunction(expr->arguments());
+  } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
+    EmitRegExpConstructResult(expr->arguments());
+  } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
+    EmitSwapElements(expr->arguments());
+  } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
+    EmitGetFromCache(expr->arguments());
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ tst(r0, Operand(kSmiTagMask | 0x80000000));
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+  __ BranchOnSmi(r0, if_false);
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, if_true);
+  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  __ b(ne, if_false);
+  __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+  __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+  __ b(lt, if_false);
+  __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+  __ b(le, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
+  __ tst(r1, Operand(1 << Map::kIsUndetectable));
+  __ b(ne, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ BranchOnSmi(r0, if_false);
+  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  // Get the frame pointer for the calling frame.
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &check_frame_marker);
+  __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+  __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kAccumulator);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
+
+  __ pop(r1);
+  __ cmp(r0, r1);
+  __ b(eq, if_true);
+  __ b(if_false);
+
+  Apply(context_, if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in edx and the formal
+  // parameter count in eax.
+  VisitForValue(args->at(0), kAccumulator);
+  __ mov(r1, r0);
+  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+  ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label exit;
+  // Get the number of formal parameters.
+  __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(ne, &exit);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  // If the object is a smi, we return null.
+  __ BranchOnSmi(r0, &null);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);  // Map is now in r0.
+  __ b(lt, &null);
+
+  // As long as JS_FUNCTION_TYPE is the last instance type and it is
+  // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+  // LAST_JS_OBJECT_TYPE.
+  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+  __ b(eq, &function);
+
+  // Check if the constructor in the map is a function.
+  __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
+  __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+  __ b(ne, &non_function_constructor);
+
+  // r0 now contains the constructor function. Grab the
+  // instance class name from there.
+  __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ b(&done);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+  __ jmp(&done);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ LoadRoot(r0, Heap::kNullValueRootIndex);
+
+  // All done.
+  __ bind(&done);
+
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+  // Conditionally generate a log call.
+  // Args:
+  //   0 (literal string): The type of logging (corresponds to the flags).
+  //     This is used to determine whether or not to generate the log call.
+  //   1 (string): Format string.  Access the string at argument index 2
+  //     with '%2s' (see Logger::LogRuntime for all the formats).
+  //   2 (array): Arguments to the format string.
+  ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
+    VisitForValue(args->at(1), kStack);
+    VisitForValue(args->at(2), kStack);
+    __ CallRuntime(Runtime::kLog, 2);
+  }
+#endif
+  // Finally, we're expected to leave a value on the top of the stack.
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+
+  Label slow_allocate_heapnumber;
+  Label heapnumber_allocated;
+
+  __ AllocateHeapNumber(r4, r1, r2, &slow_allocate_heapnumber);
+  __ jmp(&heapnumber_allocated);
+
+  __ bind(&slow_allocate_heapnumber);
+  // To allocate a heap number, and ensure that it is not a smi, we
+  // call the runtime function FUnaryMinus on 0, returning the double
+  // -0.0. A new, distinct heap number is returned each time.
+  __ mov(r0, Operand(Smi::FromInt(0)));
+  __ push(r0);
+  __ CallRuntime(Runtime::kNumberUnaryMinus, 1);
+  __ mov(r4, Operand(r0));
+
+  __ bind(&heapnumber_allocated);
+
+  // Convert 32 random bits in r0 to 0.(32 random bits) in a double
+  // by computing:
+  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
+  if (CpuFeatures::IsSupported(VFP3)) {
+    __ PrepareCallCFunction(0, r1);
+    __ CallCFunction(ExternalReference::random_uint32_function(), 0);
+
+    CpuFeatures::Scope scope(VFP3);
+    // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+    // Create this constant using mov/orr to avoid PC relative load.
+    __ mov(r1, Operand(0x41000000));
+    __ orr(r1, r1, Operand(0x300000));
+    // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
+    __ vmov(d7, r0, r1);
+    // Move 0x4130000000000000 to VFP.
+    __ mov(r0, Operand(0));
+    __ vmov(d8, r0, r1);
+    // Subtract and store the result in the heap number.
+    __ vsub(d7, d7, d8);
+    __ sub(r0, r4, Operand(kHeapObjectTag));
+    __ vstr(d7, r0, HeapNumber::kValueOffset);
+    __ mov(r0, r4);
+  } else {
+    __ mov(r0, Operand(r4));
+    __ PrepareCallCFunction(1, r1);
+    __ CallCFunction(
+        ExternalReference::fill_heap_number_with_random_function(), 1);
+  }
+
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  SubStringStub stub;
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the stub.
+  RegExpExecStub stub;
+  ASSERT(args->length() == 4);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  VisitForValue(args->at(3), kStack);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ BranchOnSmi(r0, &done);
+  // If the object is not a value type, return the object.
+  __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
+  __ b(ne, &done);
+  __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+  // Load the arguments on the stack and call the runtime function.
+  ASSERT(args->length() == 2);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  __ CallRuntime(Runtime::kMath_pow, 2);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 2);
+
+  VisitForValue(args->at(0), kStack);  // Load the object.
+  VisitForValue(args->at(1), kAccumulator);  // Load the value.
+  __ pop(r1);  // r0 = value. r1 = object.
+
+  Label done;
+  // If the object is a smi, return the value.
+  __ BranchOnSmi(r1, &done);
+
+  // If the object is not a value type, return the value.
+  __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
+  __ b(ne, &done);
+
+  // Store the value.
+  __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
+  __ RecordWrite(r1, r2, r3);
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+  ASSERT_EQ(args->length(), 1);
+
+  // Load the argument on the stack and call the stub.
+  VisitForValue(args->at(0), kStack);
+
+  NumberToStringStub stub;
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 1);
+
+  VisitForValue(args->at(0), kAccumulator);
+
+  Label slow_case, done;
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiShiftSize == 0);
+  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+  __ tst(r0, Operand(kSmiTagMask |
+                       ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+  __ b(nz, &slow_case);
+  __ mov(r1, Operand(Factory::single_character_string_cache()));
+  ASSERT(kSmiTag == 0);
+  ASSERT(kSmiTagSize == 1);
+  ASSERT(kSmiShiftSize == 0);
+  // At this point code register contains smi tagged ascii char code.
+  __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+  __ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag));
+  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+  __ cmp(r1, r2);
+  __ b(eq, &slow_case);
+  __ mov(r0, r1);
+  __ b(&done);
+
+  __ bind(&slow_case);
+  __ push(r0);
+  __ CallRuntime(Runtime::kCharFromCode, 1);
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
+  // TODO(fsc): Port the complete implementation from the classic back-end.
+  // Move the undefined value into the result register, which will
+  // trigger the slow case.
+  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+  Apply(context_, r0);
+}
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+
+  StringAddStub stub(NO_STRING_ADD_FLAGS);
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+
+  StringCompareStub stub;
+  __ CallStub(&stub);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_sin, 1);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_cos, 1);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+  // Load the argument on the stack and call the runtime function.
+  ASSERT(args->length() == 1);
+  VisitForValue(args->at(0), kStack);
+  __ CallRuntime(Runtime::kMath_sqrt, 1);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+  ASSERT(args->length() >= 2);
+
+  int arg_count = args->length() - 2;  // For receiver and function.
+  VisitForValue(args->at(0), kStack);  // Receiver.
+  for (int i = 0; i < arg_count; i++) {
+    VisitForValue(args->at(i + 1), kStack);
+  }
+  VisitForValue(args->at(arg_count + 1), kAccumulator);  // Function.
+
+  // InvokeFunction requires function in r1. Move it in there.
+  if (!result_register().is(r1)) __ mov(r1, result_register());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(r1, count, CALL_FUNCTION);
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallRuntime(Runtime::kRegExpConstructResult, 3);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 3);
+  VisitForValue(args->at(0), kStack);
+  VisitForValue(args->at(1), kStack);
+  VisitForValue(args->at(2), kStack);
+  __ CallRuntime(Runtime::kSwapElements, 3);
+  Apply(context_, r0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+  ASSERT_EQ(2, args->length());
+
+  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      Top::global_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort("Attempt to use undefined cache.");
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    Apply(context_, r0);
+    return;
+  }
+
+  VisitForValue(args->at(1), kAccumulator);
+
+  Register key = r0;
+  Register cache = r1;
+  __ ldr(cache, CodeGenerator::ContextOperand(cp, Context::GLOBAL_INDEX));
+  __ ldr(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
+  __ ldr(cache,
+         CodeGenerator::ContextOperand(
+             cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ ldr(cache,
+         FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+
+  Label done, not_found;
+  // tmp now holds finger offset as a smi.
+  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+  // r2 now holds finger offset as a smi.
+  __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // r3 now points to the start of fixed array elements.
+  __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
+  // Note side effect of PreIndex: r3 now points to the key of the pair.
+  __ cmp(key, r2);
+  __ b(ne, &not_found);
+
+  __ ldr(r0, MemOperand(r3, kPointerSize));
+  __ b(&done);
+
+  __ bind(&not_found);
+  // Call runtime to perform the lookup.
+  __ Push(cache, key);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+
+  __ bind(&done);
+  Apply(context_, r0);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+  if (name->length() > 0 && name->Get(0) == '_') {
+    Comment cmnt(masm_, "[ InlineRuntimeCall");
+    EmitInlineRuntimeCall(expr);
+    return;
+  }
+
   Comment cmnt(masm_, "[ CallRuntime");
   ZoneList<Expression*>* args = expr->arguments();
 
@@ -1411,6 +2577,49 @@
 
 void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* prop = expr->expression()->AsProperty();
+      Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+      if (prop == NULL && var == NULL) {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        Apply(context_, true);
+      } else if (var != NULL &&
+                 !var->is_global() &&
+                 var->slot() != NULL &&
+                 var->slot()->type() != Slot::LOOKUP) {
+        // Result of deleting non-global, non-dynamic variables is false.
+        // The subexpression does not have side effects.
+        Apply(context_, false);
+      } else {
+        // Property or variable reference.  Call the delete builtin with
+        // object and property name as arguments.
+        if (prop != NULL) {
+          VisitForValue(prop->obj(), kStack);
+          VisitForValue(prop->key(), kStack);
+        } else if (var->is_global()) {
+          __ ldr(r1, CodeGenerator::GlobalObject());
+          __ mov(r0, Operand(var->name()));
+          __ Push(r1, r0);
+        } else {
+          // Non-global variable.  Call the runtime to look up the context
+          // where the variable was introduced.
+          __ push(context_register());
+          __ mov(r2, Operand(var->name()));
+          __ push(r2);
+          __ CallRuntime(Runtime::kLookupContext, 2);
+          __ push(r0);
+          __ mov(r2, Operand(var->name()));
+          __ push(r2);
+        }
+        __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
+        Apply(context_, r0);
+      }
+      break;
+    }
+
     case Token::VOID: {
       Comment cmnt(masm_, "[ UnaryOperation (VOID)");
       VisitForEffect(expr->expression());
@@ -1451,33 +2660,15 @@
 
     case Token::NOT: {
       Comment cmnt(masm_, "[ UnaryOperation (NOT)");
-      Label materialize_true, materialize_false, done;
-      // Initially assume a pure test context.  Notice that the labels are
-      // swapped.
-      Label* if_true = false_label_;
-      Label* if_false = true_label_;
-      switch (context_) {
-        case Expression::kUninitialized:
-          UNREACHABLE();
-          break;
-        case Expression::kEffect:
-          if_true = &done;
-          if_false = &done;
-          break;
-        case Expression::kValue:
-          if_true = &materialize_false;
-          if_false = &materialize_true;
-          break;
-        case Expression::kTest:
-          break;
-        case Expression::kValueTest:
-          if_false = &materialize_true;
-          break;
-        case Expression::kTestValue:
-          if_true = &materialize_false;
-          break;
-      }
+      Label materialize_true, materialize_false;
+      Label* if_true = NULL;
+      Label* if_false = NULL;
+
+      // Notice that the labels are swapped.
+      PrepareTest(&materialize_true, &materialize_false, &if_false, &if_true);
+
       VisitForControl(expr->expression(), if_true, if_false);
+
       Apply(context_, if_false, if_true);  // Labels swapped.
       break;
     }
@@ -1500,7 +2691,7 @@
                  proxy->var()->slot() != NULL &&
                  proxy->var()->slot()->type() == Slot::LOOKUP) {
         __ mov(r0, Operand(proxy->name()));
-        __ stm(db_w, sp, cp.bit() | r0.bit());
+        __ Push(cp, r0);
         __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
         __ push(r0);
       } else {
@@ -1551,8 +2742,7 @@
       VisitForValue(expr->expression(), kAccumulator);
       // Avoid calling the stub for Smis.
       Label smi, done;
-      __ tst(result_register(), Operand(kSmiTagMask));
-      __ b(eq, &smi);
+      __ BranchOnSmi(result_register(), &smi);
       // Non-smi: call stub leaving result in accumulator register.
       __ CallStub(&stub);
       __ b(&done);
@@ -1574,6 +2764,12 @@
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
   Comment cmnt(masm_, "[ CountOperation");
+  // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
+  // as the left-hand side.
+  if (!expr->expression()->IsValidLeftHandSide()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
 
   // Expression can only be a property, a global or a (parameter or local)
   // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
@@ -1617,8 +2813,7 @@
 
   // Call ToNumber only if operand is not a smi.
   Label no_conversion;
-  __ tst(r0, Operand(kSmiTagMask));
-  __ b(eq, &no_conversion);
+  __ BranchOnSmi(r0, &no_conversion);
   __ push(r0);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
   __ bind(&no_conversion);
@@ -1662,8 +2857,7 @@
     __ b(vs, &stub_call);
     // We could eliminate this smi check if we split the code at
     // the first smi check before calling ToNumber.
-    __ tst(r0, Operand(kSmiTagMask));
-    __ b(eq, &done);
+    __ BranchOnSmi(r0, &done);
     __ bind(&stub_call);
     // Call stub. Undo operation first.
     __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
@@ -1758,36 +2952,41 @@
 }
 
 
+void FullCodeGenerator::EmitNullCompare(bool strict,
+                                        Register obj,
+                                        Register null_const,
+                                        Label* if_true,
+                                        Label* if_false,
+                                        Register scratch) {
+  __ cmp(obj, null_const);
+  if (strict) {
+    __ b(eq, if_true);
+  } else {
+    __ b(eq, if_true);
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(obj, ip);
+    __ b(eq, if_true);
+    __ BranchOnSmi(obj, if_false);
+    // It can be an undetectable object.
+    __ ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ tst(scratch, Operand(1 << Map::kIsUndetectable));
+    __ b(ne, if_true);
+  }
+  __ jmp(if_false);
+}
+
+
 void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   Comment cmnt(masm_, "[ CompareOperation");
 
   // Always perform the comparison for its control flow.  Pack the result
   // into the expression's context after the comparison is performed.
-  Label materialize_true, materialize_false, done;
-  // Initially assume we are in a test context.
-  Label* if_true = true_label_;
-  Label* if_false = false_label_;
-  switch (context_) {
-    case Expression::kUninitialized:
-      UNREACHABLE();
-      break;
-    case Expression::kEffect:
-      if_true = &done;
-      if_false = &done;
-      break;
-    case Expression::kValue:
-      if_true = &materialize_true;
-      if_false = &materialize_false;
-      break;
-    case Expression::kTest:
-      break;
-    case Expression::kValueTest:
-      if_true = &materialize_true;
-      break;
-    case Expression::kTestValue:
-      if_false = &materialize_false;
-      break;
-  }
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  PrepareTest(&materialize_true, &materialize_false, &if_true, &if_false);
 
   VisitForValue(expr->left(), kStack);
   switch (expr->op()) {
@@ -1818,10 +3017,24 @@
         case Token::EQ_STRICT:
           strict = true;
           // Fall through
-        case Token::EQ:
+        case Token::EQ: {
           cc = eq;
           __ pop(r1);
+          // If either operand is constant null we do a fast compare
+          // against null.
+          Literal* right_literal = expr->right()->AsLiteral();
+          Literal* left_literal = expr->left()->AsLiteral();
+          if (right_literal != NULL && right_literal->handle()->IsNull()) {
+            EmitNullCompare(strict, r1, r0, if_true, if_false, r2);
+            Apply(context_, if_true, if_false);
+            return;
+          } else if (left_literal != NULL && left_literal->handle()->IsNull()) {
+            EmitNullCompare(strict, r0, r1, if_true, if_false, r2);
+            Apply(context_, if_true, if_false);
+            return;
+          }
           break;
+        }
         case Token::LT:
           cc = lt;
           __ pop(r1);
@@ -1852,8 +3065,7 @@
       // before it is called.
       Label slow_case;
       __ orr(r2, r0, Operand(r1));
-      __ tst(r2, Operand(kSmiTagMask));
-      __ b(ne, &slow_case);
+      __ BranchOnNotSmi(r2, &slow_case);
       __ cmp(r1, r0);
       __ b(cc, if_true);
       __ jmp(if_false);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index e356d55..29c48a4 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -354,6 +354,51 @@
 }
 
 
+void MacroAssembler::Ldrd(Register dst1, Register dst2,
+                          const MemOperand& src, Condition cond) {
+  ASSERT(src.rm().is(no_reg));
+  ASSERT(!dst1.is(lr));  // r14.
+  ASSERT_EQ(0, dst1.code() % 2);
+  ASSERT_EQ(dst1.code() + 1, dst2.code());
+
+  // Generate two ldr instructions if ldrd is not available.
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatures::Scope scope(ARMv7);
+    ldrd(dst1, dst2, src, cond);
+  } else {
+    MemOperand src2(src);
+    src2.set_offset(src2.offset() + 4);
+    if (dst1.is(src.rn())) {
+      ldr(dst2, src2, cond);
+      ldr(dst1, src, cond);
+    } else {
+      ldr(dst1, src, cond);
+      ldr(dst2, src2, cond);
+    }
+  }
+}
+
+
+void MacroAssembler::Strd(Register src1, Register src2,
+                          const MemOperand& dst, Condition cond) {
+  ASSERT(dst.rm().is(no_reg));
+  ASSERT(!src1.is(lr));  // r14.
+  ASSERT_EQ(0, src1.code() % 2);
+  ASSERT_EQ(src1.code() + 1, src2.code());
+
+  // Generate two str instructions if strd is not available.
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatures::Scope scope(ARMv7);
+    strd(src1, src2, dst, cond);
+  } else {
+    MemOperand dst2(dst);
+    dst2.set_offset(dst2.offset() + 4);
+    str(src1, dst, cond);
+    str(src2, dst2, cond);
+  }
+}
+
+
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   // r0-r3: preserved
   stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 9cf93da..494f2b6 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -185,6 +185,18 @@
     }
   }
 
+  // Load two consecutive registers with two consecutive memory locations.
+  void Ldrd(Register dst1,
+            Register dst2,
+            const MemOperand& src,
+            Condition cond = al);
+
+  // Store two consecutive registers to two consecutive memory locations.
+  void Strd(Register src1,
+            Register src2,
+            const MemOperand& dst,
+            Condition cond = al);
+
   // ---------------------------------------------------------------------------
   // Stack limit support
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 8001cd8..d82ef21 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -436,7 +436,7 @@
                         Register holder,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         Label* miss_label) {
@@ -456,7 +456,8 @@
     }
 
     if (!optimize) {
-      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
+                     miss_label);
       return;
     }
 
@@ -466,14 +467,18 @@
     __ push(receiver);
     __ Push(holder, name_);
 
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
     CompileCallLoadPropertyWithInterceptor(masm,
                                            receiver,
                                            holder,
                                            name_,
-                                           holder_obj);
+                                           interceptor_holder);
 
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
     Label interceptor_failed;
-    // Compare with no_interceptor_result_sentinel.
     __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
     __ cmp(r0, scratch1);
     __ b(eq, &interceptor_failed);
@@ -488,13 +493,17 @@
     __ LeaveInternalFrame();
 
     if (lookup->type() == FIELD) {
-      holder = stub_compiler->CheckPrototypes(holder_obj,
+      // We found FIELD property in prototype chain of interceptor's holder.
+      // Check that the maps from interceptor's holder to field's holder
+      // haven't changed...
+      holder = stub_compiler->CheckPrototypes(interceptor_holder,
                                               holder,
                                               lookup->holder(),
                                               scratch1,
                                               scratch2,
                                               name,
                                               miss_label);
+      // ... and retrieve a field from field's holder.
       stub_compiler->GenerateFastPropertyLoad(masm,
                                               r0,
                                               holder,
@@ -502,30 +511,38 @@
                                               lookup->GetFieldIndex());
       __ Ret();
     } else {
+      // We found CALLBACKS property in prototype chain of interceptor's
+      // holder.
       ASSERT(lookup->type() == CALLBACKS);
       ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
       ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
+      // Prepare for tail call: push receiver to stack.
       Label cleanup;
       __ push(receiver);
 
-      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+      // Check that the maps from interceptor's holder to callback's holder
+      // haven't changed.
+      holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
                                               lookup->holder(), scratch1,
                                               scratch2,
                                               name,
                                               &cleanup);
 
+      // Continue tail call preparation: push remaining parameters.
       __ push(holder);
       __ Move(holder, Handle<AccessorInfo>(callback));
       __ push(holder);
       __ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
       __ Push(scratch1, name_);
 
+      // Tail call to runtime.
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
       __ TailCallExternalReference(ref, 5, 1);
 
+      // Clean up code: we pushed receiver and need to remove it.
       __ bind(&cleanup);
       __ pop(scratch2);
     }
@@ -536,9 +553,9 @@
                       Register receiver,
                       Register holder,
                       Register scratch,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
-    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
 
     ExternalReference ref = ExternalReference(
         IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
@@ -714,7 +731,7 @@
                        Register receiver,
                        Register scratch1,
                        Register scratch2,
-                       JSObject* holder_obj,
+                       JSObject* interceptor_holder,
                        LookupResult* lookup,
                        String* name,
                        const CallOptimization& optimization,
@@ -727,10 +744,13 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
        !lookup->holder()->IsGlobalObject()) {
-     depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+     depth1 =
+         optimization.GetPrototypeDepthOfExpectedType(object,
+                                                      interceptor_holder);
      if (depth1 == kInvalidProtoDepth) {
-       depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
-                                                             lookup->holder());
+       depth2 =
+           optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                        lookup->holder());
      }
      can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
                             (depth2 != kInvalidProtoDepth);
@@ -745,23 +765,31 @@
       ReserveSpaceForFastApiCall(masm, scratch1);
     }
 
+    // Check that the maps from receiver to interceptor's holder
+    // haven't changed and thus we can invoke interceptor.
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj, scratch1,
-                                        scratch2, name, depth1, miss);
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
+                                        scratch1, scratch2, name,
+                                        depth1, miss);
 
+    // Invoke an interceptor and if it provides a value,
+    // branch to |regular_invoke|.
     Label regular_invoke;
-    LoadWithInterceptor(masm, receiver, holder, holder_obj, scratch2,
+    LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
                         &regular_invoke);
 
-    // Generate code for the failed interceptor case.
+    // Interceptor returned nothing for this property.  Try to use cached
+    // constant function.
 
-    // Check the lookup is still valid.
-    stub_compiler_->CheckPrototypes(holder_obj, receiver,
+    // Check that the maps from interceptor's holder to constant function's
+    // holder haven't changed and thus we can use cached constant function.
+    stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
                                     lookup->holder(), scratch1,
                                     scratch2, name, depth2, miss);
 
+    // Invoke function.
     if (can_do_fast_api_call) {
       GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
@@ -769,12 +797,14 @@
                         JUMP_FUNCTION);
     }
 
+    // Deferred code for fast API call case---clean preallocated space.
     if (can_do_fast_api_call) {
       __ bind(&miss_cleanup);
       FreeSpaceForFastApiCall(masm);
       __ b(miss_label);
     }
 
+    // Invoke a regular function.
     __ bind(&regular_invoke);
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm);
@@ -787,10 +817,10 @@
                       Register scratch1,
                       Register scratch2,
                       String* name,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         miss_label);
 
@@ -803,7 +833,7 @@
                              receiver,
                              holder,
                              name_,
-                             holder_obj);
+                             interceptor_holder);
 
     __ CallExternalReference(
           ExternalReference(
diff --git a/src/assembler.h b/src/assembler.h
index f2a6c8b..74613b3 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -38,6 +38,7 @@
 #include "runtime.h"
 #include "top.h"
 #include "token.h"
+#include "objects.h"
 
 namespace v8 {
 namespace internal {
@@ -199,9 +200,23 @@
   INLINE(Object** target_object_address());
   INLINE(void set_target_object(Object* target));
 
-  // Read the address of the word containing the target_address. Can only
-  // be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY.
+  // Read the address of the word containing the target_address in an
+  // instruction stream.  What this means exactly is architecture-independent.
+  // The only architecture-independent user of this function is the serializer.
+  // The serializer uses it to find out how many raw bytes of instruction to
+  // output before the next target.  Architecture-independent code shouldn't
+  // dereference the pointer it gets back from this.
   INLINE(Address target_address_address());
+  // This indicates how much space a target takes up when deserializing a code
+  // stream.  For most architectures this is just the size of a pointer.  For
+  // an instruction like movw/movt where the target bits are mixed into the
+  // instruction bits the size of the target will be zero, indicating that the
+  // serializer should not step forwards in memory after a target is resolved
+  // and written.  In this case the target_address_address function above
+  // should return the end of the instructions to be patched, allowing the
+  // deserializer to deserialize the instructions as raw bytes and put them in
+  // place, ready to be patched with the target.
+  INLINE(int target_address_size());
 
   // Read/modify the reference in the instruction this relocation
   // applies to; can only be called if rmode_ is external_reference
@@ -216,6 +231,8 @@
   INLINE(Object** call_object_address());
   INLINE(void set_call_object(Object* target));
 
+  inline void Visit(ObjectVisitor* v);
+
   // Patch the code with some other code.
   void PatchCode(byte* instructions, int instruction_count);
 
diff --git a/src/circular-queue-inl.h b/src/circular-queue-inl.h
index 90ab0f5..349f222 100644
--- a/src/circular-queue-inl.h
+++ b/src/circular-queue-inl.h
@@ -34,54 +34,6 @@
 namespace internal {
 
 
-template<typename Record>
-CircularQueue<Record>::CircularQueue(int desired_buffer_size_in_bytes)
-    : buffer_(NewArray<Record>(desired_buffer_size_in_bytes / sizeof(Record))),
-      buffer_end_(buffer_ + desired_buffer_size_in_bytes / sizeof(Record)),
-      enqueue_semaphore_(
-          OS::CreateSemaphore(static_cast<int>(buffer_end_ - buffer_) - 1)),
-      enqueue_pos_(buffer_),
-      dequeue_pos_(buffer_) {
-  // To be able to distinguish between a full and an empty queue
-  // state, the queue must be capable of containing at least 2
-  // records.
-  ASSERT((buffer_end_ - buffer_) >= 2);
-}
-
-
-template<typename Record>
-CircularQueue<Record>::~CircularQueue() {
-  DeleteArray(buffer_);
-  delete enqueue_semaphore_;
-}
-
-
-template<typename Record>
-void CircularQueue<Record>::Dequeue(Record* rec) {
-  ASSERT(!IsEmpty());
-  *rec = *dequeue_pos_;
-  dequeue_pos_ = Next(dequeue_pos_);
-  // Tell we have a spare record.
-  enqueue_semaphore_->Signal();
-}
-
-
-template<typename Record>
-void CircularQueue<Record>::Enqueue(const Record& rec) {
-  // Wait until we have at least one spare record.
-  enqueue_semaphore_->Wait();
-  ASSERT(Next(enqueue_pos_) != dequeue_pos_);
-  *enqueue_pos_ = rec;
-  enqueue_pos_ = Next(enqueue_pos_);
-}
-
-
-template<typename Record>
-Record* CircularQueue<Record>::Next(Record* curr) {
-  return ++curr != buffer_end_ ? curr : buffer_;
-}
-
-
 void* SamplingCircularQueue::Enqueue() {
   WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
   void* result = producer_pos_->enqueue_pos;
diff --git a/src/circular-queue.h b/src/circular-queue.h
index 486f107..73afc68 100644
--- a/src/circular-queue.h
+++ b/src/circular-queue.h
@@ -32,32 +32,6 @@
 namespace internal {
 
 
-// Lock-based blocking circular queue for small records.  Intended for
-// transfer of small records between a single producer and a single
-// consumer. Blocks on enqueue operation if the queue is full.
-template<typename Record>
-class CircularQueue {
- public:
-  inline explicit CircularQueue(int desired_buffer_size_in_bytes);
-  inline ~CircularQueue();
-
-  INLINE(void Dequeue(Record* rec));
-  INLINE(void Enqueue(const Record& rec));
-  INLINE(bool IsEmpty()) { return enqueue_pos_ == dequeue_pos_; }
-
- private:
-  INLINE(Record* Next(Record* curr));
-
-  Record* buffer_;
-  Record* const buffer_end_;
-  Semaphore* enqueue_semaphore_;
-  Record* enqueue_pos_;
-  Record* dequeue_pos_;
-
-  DISALLOW_COPY_AND_ASSIGN(CircularQueue);
-};
-
-
 // Lock-free cache-friendly sampling circular queue for large
 // records. Intended for fast transfer of large records between a
 // single producer and a single consumer. If the queue is full,
diff --git a/src/compiler.cc b/src/compiler.cc
index 27d4835..ca92ed9 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -44,6 +44,18 @@
 namespace v8 {
 namespace internal {
 
+// For normal operation the syntax checker is used to determine whether to
+// use the full compiler for top level code or not. However if the flag
+// --always-full-compiler is specified or debugging is active the full
+// compiler will be used for all code.
+static bool AlwaysFullCompiler() {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
+#else
+  return FLAG_always_full_compiler;
+#endif
+}
+
 
 static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
   FunctionLiteral* function = info->function();
@@ -120,21 +132,9 @@
       ? info->scope()->is_global_scope()
       : (shared->is_toplevel() || shared->try_full_codegen());
 
-  bool force_full_compiler = false;
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
-  // On ia32 the full compiler can compile all code whereas the other platforms
-  // the constructs supported is checked by the associated syntax checker. When
-  // --always-full-compiler is used on ia32 the syntax checker is still in
-  // effect, but there is a special flag --force-full-compiler to ignore the
-  // syntax checker completely and use the full compiler for all code. Also
-  // when debugging on ia32 the full compiler will be used for all code.
-  force_full_compiler =
-      Debugger::IsDebuggerActive() || FLAG_force_full_compiler;
-#endif
-
-  if (force_full_compiler) {
+  if (AlwaysFullCompiler()) {
     return FullCodeGenerator::MakeCode(info);
-  } else if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
+  } else if (FLAG_full_compiler && is_run_once) {
     FullCodeGenSyntaxChecker checker;
     checker.Check(function);
     if (checker.has_supported_syntax()) {
@@ -521,7 +521,11 @@
     CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
     bool is_run_once = literal->try_full_codegen();
     bool is_compiled = false;
-    if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
+
+    if (AlwaysFullCompiler()) {
+      code = FullCodeGenerator::MakeCode(&info);
+      is_compiled = true;
+    } else if (FLAG_full_compiler && is_run_once) {
       FullCodeGenSyntaxChecker checker;
       checker.Check(literal);
       if (checker.has_supported_syntax()) {
diff --git a/src/cpu-profiler-inl.h b/src/cpu-profiler-inl.h
index 9ef6841..cb7fdd8 100644
--- a/src/cpu-profiler-inl.h
+++ b/src/cpu-profiler-inl.h
@@ -34,6 +34,7 @@
 
 #include "circular-queue-inl.h"
 #include "profile-generator-inl.h"
+#include "unbound-queue-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 52a891f..31c4658 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -46,7 +46,6 @@
 ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
     : generator_(generator),
       running_(false),
-      events_buffer_(kEventsBufferSize),
       ticks_buffer_(sizeof(TickSampleEventRecord),
                     kTickSamplesBufferChunkSize,
                     kTickSamplesBufferChunksCount),
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index a51133d..81f9ae3 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -31,6 +31,7 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 #include "circular-queue.h"
+#include "unbound-queue.h"
 
 namespace v8 {
 namespace internal {
@@ -181,7 +182,7 @@
 
   ProfileGenerator* generator_;
   bool running_;
-  CircularQueue<CodeEventsContainer> events_buffer_;
+  UnboundQueue<CodeEventsContainer> events_buffer_;
   SamplingCircularQueue ticks_buffer_;
   unsigned enqueue_order_;
 };
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index c360508..c086df4 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -149,10 +149,6 @@
 DEFINE_bool(fast_compiler, false, "enable speculative optimizing backend")
 DEFINE_bool(always_full_compiler, false,
             "try to use the dedicated run-once backend for all code")
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
-DEFINE_bool(force_full_compiler, false,
-            "force use of the dedicated run-once backend for all code")
-#endif
 DEFINE_bool(always_fast_compiler, false,
             "try to use the speculative optimizing backend for all code")
 DEFINE_bool(trace_bailout, false,
diff --git a/src/handles.cc b/src/handles.cc
index 1d4465f..c90365c 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -399,6 +399,11 @@
 }
 
 
+Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
+  CALL_HEAP_FUNCTION(obj->DefineAccessor(*info), Object);
+}
+
+
 // Wrappers for scripts are kept alive and cached in weak global
 // handles referred from proxy objects held by the scripts as long as
 // they are used. When they are not used anymore, the garbage
diff --git a/src/handles.h b/src/handles.h
index ea13def..96b17a6 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -262,6 +262,8 @@
 
 Handle<JSObject> Copy(Handle<JSObject> obj);
 
+Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info);
+
 Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
                                       Handle<JSArray> array);
 
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 1d88220..9c96e19 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -38,6 +38,7 @@
 #define V8_IA32_ASSEMBLER_IA32_INL_H_
 
 #include "cpu.h"
+#include "debug.h"
 
 namespace v8 {
 namespace internal {
@@ -77,6 +78,11 @@
 }
 
 
+int RelocInfo::target_address_size() {
+  return Assembler::kExternalTargetSize;
+}
+
+
 void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   Assembler::set_target_address_at(pc_, target);
@@ -148,6 +154,26 @@
 }
 
 
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    visitor->VisitPointer(target_object_address());
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    visitor->VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  } else if (Debug::has_break_points() &&
+             RelocInfo::IsJSReturn(mode) &&
+             IsPatchedReturnSequence()) {
+    visitor->VisitDebugTarget(this);
+#endif
+  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+
 Immediate::Immediate(int x)  {
   x_ = x;
   rmode_ = RelocInfo::NONE;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 226a374..c55ec7b 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -695,9 +695,7 @@
   } else if (variable != NULL && variable->slot() != NULL) {
     // For a variable that rewrites to a slot, we signal it is the immediate
     // subexpression of a typeof.
-    Result result =
-        LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
-    frame()->Push(&result);
+    LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
   } else {
     // Anything else can be handled normally.
     Load(expr);
@@ -746,7 +744,8 @@
     // We have to skip storing into the arguments slot if it has already
     // been written to. This can happen if the a function has a local
     // variable named 'arguments'.
-    Result probe = LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+    LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+    Result probe = frame_->Pop();
     if (probe.is_constant()) {
       // We have to skip updating the arguments object if it has
       // been assigned a proper value.
@@ -3026,9 +3025,7 @@
   // Load the receiver and the existing arguments object onto the
   // expression stack. Avoid allocating the arguments object here.
   Load(receiver);
-  Result existing_args =
-      LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
-  frame()->Push(&existing_args);
+  LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
 
   // Emit the source position information after having loaded the
   // receiver and the arguments.
@@ -4719,19 +4716,19 @@
 }
 
 
-Result CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
-  Result result;
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
   if (slot->type() == Slot::LOOKUP) {
     ASSERT(slot->var()->is_dynamic());
     JumpTarget slow;
     JumpTarget done;
+    Result value;
 
     // Generate fast case for loading from slots that correspond to
     // local/global variables or arguments unless they are shadowed by
     // eval-introduced bindings.
     EmitDynamicLoadFromSlotFastCase(slot,
                                     typeof_state,
-                                    &result,
+                                    &value,
                                     &slow,
                                     &done);
 
@@ -4743,14 +4740,14 @@
     frame()->EmitPush(esi);
     frame()->EmitPush(Immediate(slot->var()->name()));
     if (typeof_state == INSIDE_TYPEOF) {
-      result =
+      value =
           frame()->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
     } else {
-      result = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
+      value = frame()->CallRuntime(Runtime::kLoadContextSlot, 2);
     }
 
-    done.Bind(&result);
-    return result;
+    done.Bind(&value);
+    frame_->Push(&value);
 
   } else if (slot->var()->mode() == Variable::CONST) {
     // Const slots may contain 'the hole' value (the constant hasn't been
@@ -4767,15 +4764,13 @@
     __ j(not_equal, &exit);
     __ mov(ecx, Factory::undefined_value());
     __ bind(&exit);
-    return Result(ecx);
+    frame()->EmitPush(ecx);
 
   } else if (slot->type() == Slot::PARAMETER) {
     frame()->PushParameterAt(slot->index());
-    return frame()->Pop();
 
   } else if (slot->type() == Slot::LOCAL) {
     frame()->PushLocalAt(slot->index());
-    return frame()->Pop();
 
   } else {
     // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
@@ -4784,46 +4779,48 @@
     // The use of SlotOperand below is safe for an unspilled frame
     // because it will always be a context slot.
     ASSERT(slot->type() == Slot::CONTEXT);
-    result = allocator()->Allocate();
-    ASSERT(result.is_valid());
-    __ mov(result.reg(), SlotOperand(slot, result.reg()));
-    return result;
+    Result temp = allocator()->Allocate();
+    ASSERT(temp.is_valid());
+    __ mov(temp.reg(), SlotOperand(slot, temp.reg()));
+    frame()->Push(&temp);
   }
 }
 
 
-Result CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
                                                     TypeofState state) {
-  Result result = LoadFromSlot(slot, state);
+  LoadFromSlot(slot, state);
 
   // Bail out quickly if we're not using lazy arguments allocation.
-  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return result;
+  if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
 
   // ... or if the slot isn't a non-parameter arguments slot.
-  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return result;
+  if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
 
   // If the loaded value is a constant, we know if the arguments
   // object has been lazily loaded yet.
+  Result result = frame()->Pop();
   if (result.is_constant()) {
     if (result.handle()->IsTheHole()) {
-      result.Unuse();
-      return StoreArgumentsObject(false);
-    } else {
-      return result;
+      result = StoreArgumentsObject(false);
     }
+    frame()->Push(&result);
+    return;
   }
-
+  ASSERT(result.is_register());
   // The loaded value is in a register. If it is the sentinel that
   // indicates that we haven't loaded the arguments object yet, we
   // need to do it now.
   JumpTarget exit;
   __ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
-  exit.Branch(not_equal, &result);
+  frame()->Push(&result);
+  exit.Branch(not_equal);
 
-  result.Unuse();
   result = StoreArgumentsObject(false);
-  exit.Bind(&result);
-  return result;
+  frame()->SetElementAt(0, &result);
+  result.Unuse();
+  exit.Bind();
+  return;
 }
 
 
@@ -5073,8 +5070,7 @@
       UNREACHABLE();
     }
   } else {
-    Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-    frame()->Push(&result);
+    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
   }
 }
 
@@ -5395,8 +5391,7 @@
   if (node->is_compound()) {
     // For a compound assignment the right-hand side is a binary operation
     // between the current property value and the actual right-hand side.
-    Result result = LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
-    frame()->Push(&result);
+    LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
     Load(node->value());
 
     // Perform the binary operation.
@@ -8603,16 +8598,16 @@
   if (loop_nesting() > 0) {
     Comment cmnt(masm_, "[ Inlined load from keyed Property");
 
-    Result key = frame_->Pop();
-    Result receiver = frame_->Pop();
-    key.ToRegister();
-    receiver.ToRegister();
-
     // Use a fresh temporary to load the elements without destroying
     // the receiver which is needed for the deferred slow case.
     Result elements = allocator()->Allocate();
     ASSERT(elements.is_valid());
 
+    Result key = frame_->Pop();
+    Result receiver = frame_->Pop();
+    key.ToRegister();
+    receiver.ToRegister();
+
     // Use a fresh temporary for the index and later the loaded
     // value.
     result = allocator()->Allocate();
@@ -8626,6 +8621,7 @@
     __ test(receiver.reg(), Immediate(kSmiTagMask));
     deferred->Branch(zero);
 
+    // Check that the receiver has the expected map.
     // Initially, use an invalid map. The map is patched in the IC
     // initialization code.
     __ bind(deferred->patch_site());
@@ -8659,7 +8655,6 @@
            FieldOperand(elements.reg(), FixedArray::kLengthOffset));
     deferred->Branch(above_equal);
 
-    // Load and check that the result is not the hole.
     __ mov(result.reg(), Operand(elements.reg(),
                                  result.reg(),
                                  times_4,
@@ -8857,10 +8852,8 @@
       Comment cmnt(masm, "[ Load from Slot");
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
-      Result result =
-          cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+      cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
       if (!persist_after_get_) set_unloaded();
-      cgen_->frame()->Push(&result);
       break;
     }
 
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index e00bec7..a098dc3 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -459,8 +459,8 @@
   void LoadWithSafeInt32ModeDisabled(Expression* expr);
 
   // Read a value from a slot and leave it on top of the expression stack.
-  Result LoadFromSlot(Slot* slot, TypeofState typeof_state);
-  Result LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
+  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
   Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
                                            TypeofState typeof_state,
                                            JumpTarget* slow);
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 368a8ee..1b78772 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -2222,9 +2222,7 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
-  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(eax, Map::kInstanceTypeOffset));
-  __ cmp(ebx, FIRST_JS_OBJECT_TYPE);
+  __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, eax);  // Map is now in eax.
   __ j(below, &null);
 
   // As long as JS_FUNCTION_TYPE is the last instance type and it is
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index d7b05cf..eb555d7 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -356,7 +356,7 @@
                         Register holder,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         Label* miss_label) {
@@ -376,7 +376,8 @@
     }
 
     if (!optimize) {
-      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
+                     miss_label);
       return;
     }
 
@@ -389,12 +390,17 @@
     __ push(holder);
     __ push(name_);
 
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
     CompileCallLoadPropertyWithInterceptor(masm,
                                            receiver,
                                            holder,
                                            name_,
-                                           holder_obj);
+                                           interceptor_holder);
 
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
     Label interceptor_failed;
     __ cmp(eax, Factory::no_interceptor_result_sentinel());
     __ j(equal, &interceptor_failed);
@@ -411,47 +417,61 @@
     __ LeaveInternalFrame();
 
     if (lookup->type() == FIELD) {
-      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+      // We found FIELD property in prototype chain of interceptor's holder.
+      // Check that the maps from interceptor's holder to field's holder
+      // haven't changed...
+      holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
                                               lookup->holder(), scratch1,
                                               scratch2,
                                               name,
                                               miss_label);
+      // ... and retrieve a field from field's holder.
       stub_compiler->GenerateFastPropertyLoad(masm, eax,
                                               holder, lookup->holder(),
                                               lookup->GetFieldIndex());
       __ ret(0);
     } else {
+      // We found CALLBACKS property in prototype chain of interceptor's
+      // holder.
       ASSERT(lookup->type() == CALLBACKS);
       ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
       ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
+      // Prepare for tail call: push receiver to stack after return address.
       Label cleanup;
-      __ pop(scratch2);
+      __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(scratch2);
 
-      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+      // Check that the maps from interceptor's holder to callback's holder
+      // haven't changed.
+      holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
                                               lookup->holder(), scratch1,
                                               scratch2,
                                               name,
                                               &cleanup);
 
-      __ pop(scratch2);  // save old return address
+      // Continue tail call preparation: push remaining parameters after
+      // return address.
+      __ pop(scratch2);  // return address
       __ push(holder);
       __ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
       __ push(holder);
       __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
       __ push(name_);
-      __ push(scratch2);  // restore old return address
+      __ push(scratch2);  // restore return address
 
+      // Tail call to runtime.
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
       __ TailCallExternalReference(ref, 5, 1);
 
+      // Clean up code: we pushed receiver after return address and
+      // need to remove it from there.
       __ bind(&cleanup);
-      __ pop(scratch1);
-      __ pop(scratch2);
+      __ pop(scratch1);  // return address.
+      __ pop(scratch2);  // receiver.
       __ push(scratch1);
     }
   }
@@ -461,10 +481,10 @@
                       Register receiver,
                       Register holder,
                       Register scratch,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     __ pop(scratch);  // save old return address
-    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
     __ push(scratch);  // restore old return address
 
     ExternalReference ref = ExternalReference(
@@ -626,7 +646,7 @@
                         Register receiver,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         const CallOptimization& optimization,
@@ -639,10 +659,13 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+      depth1 =
+          optimization.GetPrototypeDepthOfExpectedType(object,
+                                                       interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
-                                                              lookup->holder());
+        depth2 =
+            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                         lookup->holder());
       }
       can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
                              (depth2 != kInvalidProtoDepth);
@@ -655,24 +678,32 @@
       ReserveSpaceForFastApiCall(masm, scratch1);
     }
 
+    // Check that the maps from receiver to interceptor's holder
+    // haven't changed and thus we can invoke interceptor.
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         depth1, miss);
 
+    // Invoke an interceptor and if it provides a value,
+    // branch to |regular_invoke|.
     Label regular_invoke;
-    LoadWithInterceptor(masm, receiver, holder, holder_obj, &regular_invoke);
+    LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
+                        &regular_invoke);
 
-    // Generate code for the failed interceptor case.
+    // Interceptor returned nothing for this property.  Try to use cached
+    // constant function.
 
-    // Check the lookup is still valid.
-    stub_compiler_->CheckPrototypes(holder_obj, receiver,
+    // Check that the maps from interceptor's holder to constant function's
+    // holder haven't changed and thus we can use cached constant function.
+    stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
                                     lookup->holder(),
                                     scratch1, scratch2, name,
                                     depth2, miss);
 
+    // Invoke function.
     if (can_do_fast_api_call) {
       GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
@@ -680,12 +711,14 @@
                         JUMP_FUNCTION);
     }
 
+    // Deferred code for fast API call case---clean preallocated space.
     if (can_do_fast_api_call) {
       __ bind(&miss_cleanup);
       FreeSpaceForFastApiCall(masm, scratch1);
       __ jmp(miss_label);
     }
 
+    // Invoke a regular function.
     __ bind(&regular_invoke);
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
@@ -698,10 +731,10 @@
                       Register scratch1,
                       Register scratch2,
                       String* name,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         miss_label);
 
@@ -713,7 +746,7 @@
                              receiver,
                              holder,
                              name_,
-                             holder_obj);
+                             interceptor_holder);
 
     __ CallExternalReference(
           ExternalReference(
diff --git a/src/jump-target-light.h b/src/jump-target-light.h
index 656ec75..084bd58 100644
--- a/src/jump-target-light.h
+++ b/src/jump-target-light.h
@@ -74,6 +74,8 @@
 
   inline CodeGenerator* cgen();
 
+  Label* entry_label() { return &entry_label_; }
+
   const VirtualFrame* entry_frame() const {
     return entry_frame_set_ ? &entry_frame_ : NULL;
   }
diff --git a/src/objects.cc b/src/objects.cc
index 360eb28..ab678cb 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -189,7 +189,7 @@
   }
 
   UNREACHABLE();
-  return 0;
+  return NULL;
 }
 
 
@@ -1613,7 +1613,7 @@
   }
 
   UNREACHABLE();
-  return 0;
+  return NULL;
 }
 
 
@@ -1657,7 +1657,8 @@
 }
 
 
-Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
+bool JSObject::SetElementWithCallbackSetterInPrototypes(uint32_t index,
+                                                        Object* value) {
   for (Object* pt = GetPrototype();
        pt != Heap::null_value();
        pt = pt->GetPrototype()) {
@@ -1670,12 +1671,12 @@
       Object* element = dictionary->ValueAt(entry);
       PropertyDetails details = dictionary->DetailsAt(entry);
       if (details.type() == CALLBACKS) {
-        // Only accessors allowed as elements.
-        return FixedArray::cast(element)->get(kSetterIndex);
+        SetElementWithCallback(element, index, value, JSObject::cast(pt));
+        return true;
       }
     }
   }
-  return Heap::undefined_value();
+  return false;
 }
 
 
@@ -2692,30 +2693,11 @@
   // interceptor calls.
   AssertNoContextChange ncc;
 
-  // Check access rights if needed.
-  if (IsAccessCheckNeeded() &&
-      !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
-    Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
-    return Heap::undefined_value();
-  }
-
   // Try to flatten before operating on the string.
   name->TryFlatten();
 
-  // Check if there is an API defined callback object which prohibits
-  // callback overwriting in this object or it's prototype chain.
-  // This mechanism is needed for instance in a browser setting, where
-  // certain accessors such as window.location should not be allowed
-  // to be overwritten because allowing overwriting could potentially
-  // cause security problems.
-  LookupResult callback_result;
-  LookupCallback(name, &callback_result);
-  if (callback_result.IsFound()) {
-    Object* obj = callback_result.GetCallbackObject();
-    if (obj->IsAccessorInfo() &&
-        AccessorInfo::cast(obj)->prohibits_overwriting()) {
-      return Heap::undefined_value();
-    }
+  if (!CanSetCallback(name)) {
+    return Heap::undefined_value();
   }
 
   uint32_t index;
@@ -2746,9 +2728,10 @@
           PropertyDetails details = dictionary->DetailsAt(entry);
           if (details.IsReadOnly()) return Heap::undefined_value();
           if (details.type() == CALLBACKS) {
-            // Only accessors allowed as elements.
-            ASSERT(result->IsFixedArray());
-            return result;
+            if (result->IsFixedArray()) {
+              return result;
+            }
+            // Otherwise allow to override it.
           }
         }
         break;
@@ -2765,15 +2748,10 @@
       if (result.IsReadOnly()) return Heap::undefined_value();
       if (result.type() == CALLBACKS) {
         Object* obj = result.GetCallbackObject();
+        // Need to preserve old getters/setters.
         if (obj->IsFixedArray()) {
-          // The object might be in fast mode even though it has
-          // a getter/setter.
-          Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
-          if (ok->IsFailure()) return ok;
-
-          PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
-          SetNormalizedProperty(name, obj, details);
-          return obj;
+          // Use set to update attributes.
+          return SetPropertyCallback(name, obj, attributes);
         }
       }
     }
@@ -2782,50 +2760,100 @@
   // Allocate the fixed array to hold getter and setter.
   Object* structure = Heap::AllocateFixedArray(2, TENURED);
   if (structure->IsFailure()) return structure;
-  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
 
   if (is_element) {
-    // Normalize object to make this operation simple.
-    Object* ok = NormalizeElements();
-    if (ok->IsFailure()) return ok;
-
-    // Update the dictionary with the new CALLBACKS property.
-    Object* dict =
-        element_dictionary()->Set(index, structure, details);
-    if (dict->IsFailure()) return dict;
-
-    // If name is an index we need to stay in slow case.
-    NumberDictionary* elements = NumberDictionary::cast(dict);
-    elements->set_requires_slow_elements();
-    // Set the potential new dictionary on the object.
-    set_elements(NumberDictionary::cast(dict));
+    return SetElementCallback(index, structure, attributes);
   } else {
-    // Normalize object to make this operation simple.
-    Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
-    if (ok->IsFailure()) return ok;
-
-    // For the global object allocate a new map to invalidate the global inline
-    // caches which have a global property cell reference directly in the code.
-    if (IsGlobalObject()) {
-      Object* new_map = map()->CopyDropDescriptors();
-      if (new_map->IsFailure()) return new_map;
-      set_map(Map::cast(new_map));
-    }
-
-    // Update the dictionary with the new CALLBACKS property.
-    return SetNormalizedProperty(name, structure, details);
+    return SetPropertyCallback(name, structure, attributes);
   }
+}
+
+
+bool JSObject::CanSetCallback(String* name) {
+  ASSERT(!IsAccessCheckNeeded()
+         || Top::MayNamedAccess(this, name, v8::ACCESS_SET));
+
+  // Check if there is an API defined callback object which prohibits
+  // callback overwriting in this object or it's prototype chain.
+  // This mechanism is needed for instance in a browser setting, where
+  // certain accessors such as window.location should not be allowed
+  // to be overwritten because allowing overwriting could potentially
+  // cause security problems.
+  LookupResult callback_result;
+  LookupCallback(name, &callback_result);
+  if (callback_result.IsProperty()) {
+    Object* obj = callback_result.GetCallbackObject();
+    if (obj->IsAccessorInfo() &&
+        AccessorInfo::cast(obj)->prohibits_overwriting()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+Object* JSObject::SetElementCallback(uint32_t index,
+                                     Object* structure,
+                                     PropertyAttributes attributes) {
+  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+
+  // Normalize elements to make this operation simple.
+  Object* ok = NormalizeElements();
+  if (ok->IsFailure()) return ok;
+
+  // Update the dictionary with the new CALLBACKS property.
+  Object* dict =
+      element_dictionary()->Set(index, structure, details);
+  if (dict->IsFailure()) return dict;
+
+  NumberDictionary* elements = NumberDictionary::cast(dict);
+  elements->set_requires_slow_elements();
+  // Set the potential new dictionary on the object.
+  set_elements(elements);
 
   return structure;
 }
 
 
+Object* JSObject::SetPropertyCallback(String* name,
+                                      Object* structure,
+                                      PropertyAttributes attributes) {
+  PropertyDetails details = PropertyDetails(attributes, CALLBACKS);
+
+  bool convert_back_to_fast = HasFastProperties() &&
+      (map()->instance_descriptors()->number_of_descriptors()
+          < DescriptorArray::kMaxNumberOfDescriptors);
+
+  // Normalize object to make this operation simple.
+  Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+  if (ok->IsFailure()) return ok;
+
+  // For the global object allocate a new map to invalidate the global inline
+  // caches which have a global property cell reference directly in the code.
+  if (IsGlobalObject()) {
+    Object* new_map = map()->CopyDropDescriptors();
+    if (new_map->IsFailure()) return new_map;
+    set_map(Map::cast(new_map));
+  }
+
+  // Update the dictionary with the new CALLBACKS property.
+  Object* result = SetNormalizedProperty(name, structure, details);
+  if (result->IsFailure()) return result;
+
+  if (convert_back_to_fast) {
+    ok = TransformToFastProperties(0);
+    if (ok->IsFailure()) return ok;
+  }
+  return result;
+}
+
 Object* JSObject::DefineAccessor(String* name, bool is_getter, JSFunction* fun,
                                  PropertyAttributes attributes) {
   // Check access rights if needed.
   if (IsAccessCheckNeeded() &&
-      !Top::MayNamedAccess(this, name, v8::ACCESS_HAS)) {
-    Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+      !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
     return Heap::undefined_value();
   }
 
@@ -2844,6 +2872,78 @@
 }
 
 
+Object* JSObject::DefineAccessor(AccessorInfo* info) {
+  String* name = String::cast(info->name());
+  // Check access rights if needed.
+  if (IsAccessCheckNeeded() &&
+      !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
+    Top::ReportFailedAccessCheck(this, v8::ACCESS_SET);
+    return Heap::undefined_value();
+  }
+
+  if (IsJSGlobalProxy()) {
+    Object* proto = GetPrototype();
+    if (proto->IsNull()) return this;
+    ASSERT(proto->IsJSGlobalObject());
+    return JSObject::cast(proto)->DefineAccessor(info);
+  }
+
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc;
+
+  // Try to flatten before operating on the string.
+  name->TryFlatten();
+
+  if (!CanSetCallback(name)) {
+    return Heap::undefined_value();
+  }
+
+  uint32_t index = 0;
+  bool is_element = name->AsArrayIndex(&index);
+
+  if (is_element) {
+    if (IsJSArray()) return Heap::undefined_value();
+
+    // Accessors overwrite previous callbacks (cf. with getters/setters).
+    switch (GetElementsKind()) {
+      case FAST_ELEMENTS:
+        break;
+      case PIXEL_ELEMENTS:
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      case EXTERNAL_FLOAT_ELEMENTS:
+        // Ignore getters and setters on pixel and external array
+        // elements.
+        return Heap::undefined_value();
+      case DICTIONARY_ELEMENTS:
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+
+    SetElementCallback(index, info, info->property_attributes());
+  } else {
+    // Lookup the name.
+    LookupResult result;
+    LocalLookup(name, &result);
+    // ES5 forbids turning a property into an accessor if it's not
+    // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
+    if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
+      return Heap::undefined_value();
+    }
+    SetPropertyCallback(name, info, info->property_attributes());
+  }
+
+  return this;
+}
+
+
 Object* JSObject::LookupAccessor(String* name, bool is_getter) {
   // Make sure that the top context does not change when doing callbacks or
   // interceptor calls.
@@ -2871,8 +2971,9 @@
           Object* element = dictionary->ValueAt(entry);
           PropertyDetails details = dictionary->DetailsAt(entry);
           if (details.type() == CALLBACKS) {
-            // Only accessors allowed as elements.
-            return FixedArray::cast(element)->get(accessor_index);
+            if (element->IsFixedArray()) {
+              return FixedArray::cast(element)->get(accessor_index);
+            }
           }
         }
       }
@@ -5171,22 +5272,7 @@
                   RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
 
   for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
-    RelocInfo::Mode rmode = it.rinfo()->rmode();
-    if (rmode == RelocInfo::EMBEDDED_OBJECT) {
-      v->VisitPointer(it.rinfo()->target_object_address());
-    } else if (RelocInfo::IsCodeTarget(rmode)) {
-      v->VisitCodeTarget(it.rinfo());
-    } else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
-      v->VisitExternalReference(it.rinfo()->target_reference_address());
-#ifdef ENABLE_DEBUGGER_SUPPORT
-    } else if (Debug::has_break_points() &&
-               RelocInfo::IsJSReturn(rmode) &&
-               it.rinfo()->IsPatchedReturnSequence()) {
-      v->VisitDebugTarget(it.rinfo());
-#endif
-    } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
-      v->VisitRuntimeEntry(it.rinfo());
-    }
+    it.rinfo()->Visit(v);
   }
 
   ScopeInfo<>::IterateScopeInfo(this, v);
@@ -5854,6 +5940,108 @@
 }
 
 
+Object* JSObject::GetElementWithCallback(Object* receiver,
+                                         Object* structure,
+                                         uint32_t index,
+                                         Object* holder) {
+  ASSERT(!structure->IsProxy());
+
+  // api style callbacks.
+  if (structure->IsAccessorInfo()) {
+    AccessorInfo* data = AccessorInfo::cast(structure);
+    Object* fun_obj = data->getter();
+    v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
+    HandleScope scope;
+    Handle<JSObject> self(JSObject::cast(receiver));
+    Handle<JSObject> holder_handle(JSObject::cast(holder));
+    Handle<Object> number = Factory::NewNumberFromUint(index);
+    Handle<String> key(Factory::NumberToString(number));
+    LOG(ApiNamedPropertyAccess("load", *self, *key));
+    CustomArguments args(data->data(), *self, *holder_handle);
+    v8::AccessorInfo info(args.end());
+    v8::Handle<v8::Value> result;
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      result = call_fun(v8::Utils::ToLocal(key), info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    if (result.IsEmpty()) return Heap::undefined_value();
+    return *v8::Utils::OpenHandle(*result);
+  }
+
+  // __defineGetter__ callback
+  if (structure->IsFixedArray()) {
+    Object* getter = FixedArray::cast(structure)->get(kGetterIndex);
+    if (getter->IsJSFunction()) {
+      return Object::GetPropertyWithDefinedGetter(receiver,
+                                                  JSFunction::cast(getter));
+    }
+    // Getter is not a function.
+    return Heap::undefined_value();
+  }
+
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Object* JSObject::SetElementWithCallback(Object* structure,
+                                         uint32_t index,
+                                         Object* value,
+                                         JSObject* holder) {
+  HandleScope scope;
+
+  // We should never get here to initialize a const with the hole
+  // value since a const declaration would conflict with the setter.
+  ASSERT(!value->IsTheHole());
+  Handle<Object> value_handle(value);
+
+  // To accommodate both the old and the new api we switch on the
+  // data structure used to store the callbacks.  Eventually proxy
+  // callbacks should be phased out.
+  ASSERT(!structure->IsProxy());
+
+  if (structure->IsAccessorInfo()) {
+    // api style callbacks
+    AccessorInfo* data = AccessorInfo::cast(structure);
+    Object* call_obj = data->setter();
+    v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
+    if (call_fun == NULL) return value;
+    Handle<Object> number = Factory::NewNumberFromUint(index);
+    Handle<String> key(Factory::NumberToString(number));
+    LOG(ApiNamedPropertyAccess("store", this, *key));
+    CustomArguments args(data->data(), this, JSObject::cast(holder));
+    v8::AccessorInfo info(args.end());
+    {
+      // Leaving JavaScript.
+      VMState state(EXTERNAL);
+      call_fun(v8::Utils::ToLocal(key),
+               v8::Utils::ToLocal(value_handle),
+               info);
+    }
+    RETURN_IF_SCHEDULED_EXCEPTION();
+    return *value_handle;
+  }
+
+  if (structure->IsFixedArray()) {
+    Object* setter = FixedArray::cast(structure)->get(kSetterIndex);
+    if (setter->IsJSFunction()) {
+     return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+    } else {
+      Handle<Object> holder_handle(holder);
+      Handle<Object> key(Factory::NewNumberFromUint(index));
+      Handle<Object> args[2] = { key, holder_handle };
+      return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
+                                               HandleVector(args, 2)));
+    }
+  }
+
+  UNREACHABLE();
+  return NULL;
+}
+
+
 // Adding n elements in fast case is O(n*n).
 // Note: revisit design to have dual undefined values to capture absent
 // elements.
@@ -5864,9 +6052,8 @@
   uint32_t elms_length = static_cast<uint32_t>(elms->length());
 
   if (!IsJSArray() && (index >= elms_length || elms->get(index)->IsTheHole())) {
-    Object* setter = LookupCallbackSetterInPrototypes(index);
-    if (setter->IsJSFunction()) {
-      return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value);
+    if (SetElementWithCallbackSetterInPrototypes(index, value)) {
+      return value;
     }
   }
 
@@ -5984,18 +6171,7 @@
         Object* element = dictionary->ValueAt(entry);
         PropertyDetails details = dictionary->DetailsAt(entry);
         if (details.type() == CALLBACKS) {
-          // Only accessors allowed as elements.
-          FixedArray* structure = FixedArray::cast(element);
-          if (structure->get(kSetterIndex)->IsJSFunction()) {
-            JSFunction* setter = JSFunction::cast(structure->get(kSetterIndex));
-            return SetPropertyWithDefinedSetter(setter, value);
-          } else {
-            Handle<Object> self(this);
-            Handle<Object> key(Factory::NewNumberFromUint(index));
-            Handle<Object> args[2] = { key, self };
-            return Top::Throw(*Factory::NewTypeError("no_setter_in_callback",
-                                                     HandleVector(args, 2)));
-          }
+          return SetElementWithCallback(element, index, value, this);
         } else {
           dictionary->UpdateMaxNumberKey(index);
           dictionary->ValueAtPut(entry, value);
@@ -6003,10 +6179,8 @@
       } else {
         // Index not already used. Look for an accessor in the prototype chain.
         if (!IsJSArray()) {
-          Object* setter = LookupCallbackSetterInPrototypes(index);
-          if (setter->IsJSFunction()) {
-            return SetPropertyWithDefinedSetter(JSFunction::cast(setter),
-                                                value);
+          if (SetElementWithCallbackSetterInPrototypes(index, value)) {
+            return value;
           }
         }
         Object* result = dictionary->AtNumberPut(index, value);
@@ -6109,16 +6283,10 @@
         Object* element = dictionary->ValueAt(entry);
         PropertyDetails details = dictionary->DetailsAt(entry);
         if (details.type() == CALLBACKS) {
-          // Only accessors allowed as elements.
-          FixedArray* structure = FixedArray::cast(element);
-          Object* getter = structure->get(kGetterIndex);
-          if (getter->IsJSFunction()) {
-            return GetPropertyWithDefinedGetter(receiver,
-                                                JSFunction::cast(getter));
-          } else {
-            // Getter is not a function.
-            return Heap::undefined_value();
-          }
+          return GetElementWithCallback(receiver,
+                                        element,
+                                        index,
+                                        this);
         }
         return element;
       }
@@ -6266,16 +6434,10 @@
         Object* element = dictionary->ValueAt(entry);
         PropertyDetails details = dictionary->DetailsAt(entry);
         if (details.type() == CALLBACKS) {
-          // Only accessors allowed as elements.
-          FixedArray* structure = FixedArray::cast(element);
-          Object* getter = structure->get(kGetterIndex);
-          if (getter->IsJSFunction()) {
-            return GetPropertyWithDefinedGetter(receiver,
-                                                JSFunction::cast(getter));
-          } else {
-            // Getter is not a function.
-            return Heap::undefined_value();
-          }
+          return GetElementWithCallback(receiver,
+                                        element,
+                                        index,
+                                        this);
         }
         return element;
       }
diff --git a/src/objects.h b/src/objects.h
index 7f9c2a0..8e89e8f 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1248,6 +1248,8 @@
                          PropertyAttributes attributes);
   Object* LookupAccessor(String* name, bool is_getter);
 
+  Object* DefineAccessor(AccessorInfo* info);
+
   // Used from Object::GetProperty().
   Object* GetPropertyWithFailedAccessCheck(Object* receiver,
                                            LookupResult* result,
@@ -1370,7 +1372,7 @@
   void LookupRealNamedProperty(String* name, LookupResult* result);
   void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result);
   void LookupCallbackSetterInPrototypes(String* name, LookupResult* result);
-  Object* LookupCallbackSetterInPrototypes(uint32_t index);
+  bool SetElementWithCallbackSetterInPrototypes(uint32_t index, Object* value);
   void LookupCallback(String* name, LookupResult* result);
 
   // Returns the number of properties on this object filtering out properties
@@ -1539,6 +1541,14 @@
   Object* GetElementWithInterceptor(JSObject* receiver, uint32_t index);
 
  private:
+  Object* GetElementWithCallback(Object* receiver,
+                                 Object* structure,
+                                 uint32_t index,
+                                 Object* holder);
+  Object* SetElementWithCallback(Object* structure,
+                                 uint32_t index,
+                                 Object* value,
+                                 JSObject* holder);
   Object* SetElementWithInterceptor(uint32_t index, Object* value);
   Object* SetElementWithoutInterceptor(uint32_t index, Object* value);
 
@@ -1569,6 +1579,13 @@
   // Returns true if most of the elements backing storage is used.
   bool HasDenseElements();
 
+  bool CanSetCallback(String* name);
+  Object* SetElementCallback(uint32_t index,
+                             Object* structure,
+                             PropertyAttributes attributes);
+  Object* SetPropertyCallback(String* name,
+                              Object* structure,
+                              PropertyAttributes attributes);
   Object* DefineGetterSetter(String* name, PropertyAttributes attributes);
 
   void LookupInDescriptor(String* name, LookupResult* result);
diff --git a/src/parser.cc b/src/parser.cc
index c482fdf..bbf71bc 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -5073,12 +5073,12 @@
 
 
 int ScriptDataImpl::Length() {
-  return store_.length();
+  return store_.length() * sizeof(unsigned);
 }
 
 
-unsigned* ScriptDataImpl::Data() {
-  return store_.start();
+const char* ScriptDataImpl::Data() {
+  return reinterpret_cast<const char*>(store_.start());
 }
 
 
diff --git a/src/parser.h b/src/parser.h
index 2e5daf9..89966a6 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -90,7 +90,7 @@
         last_entry_(0) { }
   virtual ~ScriptDataImpl();
   virtual int Length();
-  virtual unsigned* Data();
+  virtual const char* Data();
   virtual bool HasError();
   FunctionEntry GetFunctionEnd(int start);
   bool SanityCheck();
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index b1075cf..72fe088 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -290,7 +290,7 @@
 
   int frames_count = backtrace(addresses.start(), frames_size);
 
-  char** symbols = backtrace_symbols(addresses, frames_count);
+  char** symbols = backtrace_symbols(addresses.start(), frames_count);
   if (symbols == NULL) {
     return kStackWalkError;
   }
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index fca218f..ff1ecb1 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -165,6 +165,28 @@
 }
 
 
+#ifdef V8_TARGET_ARCH_ARM
+// 0xffff0fa0 is the hard coded address of a function provided by
+// the kernel which implements a memory barrier. On older
+// ARM architecture revisions (pre-v6) this may be implemented using
+// a syscall. This address is stable, and in active use (hard coded)
+// by at least glibc-2.7 and the Android C library.
+typedef void (*LinuxKernelMemoryBarrierFunc)(void);
+LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
+    (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
+#endif
+
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)  // don't use on a simulator
+  pLinuxKernelMemoryBarrier();
+#else
+  __asm__ __volatile__("" : : : "memory");
+  // An x86 store acts as a release barrier.
+#endif
+  *ptr = value;
+}
+
+
 const char* OS::LocalTimezone(double time) {
   if (isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 23747c3..47193de 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -39,6 +39,7 @@
 #include <pthread.h>
 #include <semaphore.h>
 #include <signal.h>
+#include <libkern/OSAtomic.h>
 #include <mach/mach.h>
 #include <mach/semaphore.h>
 #include <mach/task.h>
@@ -259,6 +260,12 @@
 }
 
 
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+  OSMemoryBarrier();
+  *ptr = value;
+}
+
+
 const char* OS::LocalTimezone(double time) {
   if (isnan(time)) return "";
   time_t tv = static_cast<time_t>(floor(time/msPerSecond));
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index bee5173..e2d123c 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1340,6 +1340,12 @@
 }
 
 
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+
 bool VirtualMemory::IsReserved() {
   return address_ != NULL;
 }
diff --git a/src/platform.h b/src/platform.h
index 606e5b4..d63ca5e 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -277,6 +277,8 @@
   // the platform doesn't care. Guaranteed to be a power of two.
   static int ActivationFrameAlignment();
 
+  static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
+
  private:
   static const int msPerSecond = 1000;
 
diff --git a/src/runtime.js b/src/runtime.js
index 8e3883f..3e4d473 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -559,20 +559,15 @@
 // ES5, section 9.12
 function SameValue(x, y) {
   if (typeof x != typeof y) return false;
-  if (IS_NULL_OR_UNDEFINED(x)) return true;
   if (IS_NUMBER(x)) {
     if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
-    // x is +0 and y is -0 or vice versa
-    if (x === 0 && y === 0 && !%_IsSmi(x) && !%_IsSmi(y) &&
-        ((1 / x < 0 && 1 / y > 0) || (1 / x > 0 && 1 / y < 0))) {
+    // x is +0 and y is -0 or vice versa.
+    if (x === 0 && y === 0 && (1 / x) != (1 / y)) {
       return false;
     }
-    return x == y;
+    return x === y;
   }
-  if (IS_STRING(x)) return %StringEquals(x, y);
-  if (IS_BOOLEAN(x))return %NumberEquals(%ToNumber(x),%ToNumber(y));
-
-  return %_ObjectEquals(x, y);
+  return x === y
 }
 
 
diff --git a/src/serialize.cc b/src/serialize.cc
index a3a2ba9..06c6df7 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1299,7 +1299,7 @@
   }
   sink_->Put(kExternalReference + representation, "ExternalReference");
   sink_->PutInt(encoding, "reference id");
-  bytes_processed_so_far_ += Assembler::kExternalTargetSize;
+  bytes_processed_so_far_ += rinfo->target_address_size();
 }
 
 
@@ -1309,7 +1309,7 @@
   OutputRawData(target_start);
   Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
   serializer_->SerializeObject(target, kFromCode, kFirstInstruction);
-  bytes_processed_so_far_ += Assembler::kCallTargetSize;
+  bytes_processed_so_far_ += rinfo->target_address_size();
 }
 
 
diff --git a/src/top.cc b/src/top.cc
index 87dc1f6..516ec67 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -370,8 +370,7 @@
   v8::HandleScope scope;
   // Ensure no negative values.
   int limit = Max(frame_limit, 0);
-  Handle<JSArray> stackTrace = Factory::NewJSArray(frame_limit);
-  FixedArray* frames = FixedArray::cast(stackTrace->elements());
+  Handle<JSArray> stack_trace = Factory::NewJSArray(frame_limit);
 
   Handle<String> column_key =  Factory::LookupAsciiSymbol("column");
   Handle<String> line_key =  Factory::LookupAsciiSymbol("lineNumber");
@@ -438,13 +437,13 @@
       SetProperty(stackFrame, constructor_key, is_constructor, NONE);
     }
 
-    frames->set(frames_seen, *stackFrame);
+    FixedArray::cast(stack_trace->elements())->set(frames_seen, *stackFrame);
     frames_seen++;
     it.Advance();
   }
 
-  stackTrace->set_length(Smi::FromInt(frames_seen));
-  return scope.Close(Utils::StackTraceToLocal(stackTrace));
+  stack_trace->set_length(Smi::FromInt(frames_seen));
+  return scope.Close(Utils::StackTraceToLocal(stack_trace));
 }
 
 
diff --git a/src/unbound-queue-inl.h b/src/unbound-queue-inl.h
new file mode 100644
index 0000000..ff5d833
--- /dev/null
+++ b/src/unbound-queue-inl.h
@@ -0,0 +1,87 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UNBOUND_QUEUE_INL_H_
+#define V8_UNBOUND_QUEUE_INL_H_
+
+#include "unbound-queue.h"
+
+namespace v8 {
+namespace internal {
+
+template<typename Record>
+struct UnboundQueue<Record>::Node: public Malloced {
+  explicit Node(const Record& value)
+      : value(value), next(NULL) {
+  }
+
+  Record value;
+  Node* next;
+};
+
+
+template<typename Record>
+UnboundQueue<Record>::UnboundQueue() {
+  first_ = new Node(Record());
+  divider_ = last_ = reinterpret_cast<AtomicWord>(first_);
+}
+
+
+template<typename Record>
+UnboundQueue<Record>::~UnboundQueue() {
+  while (first_ != NULL) DeleteFirst();
+}
+
+
+template<typename Record>
+void UnboundQueue<Record>::DeleteFirst() {
+  Node* tmp = first_;
+  first_ = tmp->next;
+  delete tmp;
+}
+
+
+template<typename Record>
+void UnboundQueue<Record>::Dequeue(Record* rec) {
+  ASSERT(divider_ != last_);
+  Node* next = reinterpret_cast<Node*>(divider_)->next;
+  *rec = next->value;
+  OS::ReleaseStore(&divider_, reinterpret_cast<AtomicWord>(next));
+}
+
+
+template<typename Record>
+void UnboundQueue<Record>::Enqueue(const Record& rec) {
+  Node*& next = reinterpret_cast<Node*>(last_)->next;
+  next = new Node(rec);
+  OS::ReleaseStore(&last_, reinterpret_cast<AtomicWord>(next));
+  while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_UNBOUND_QUEUE_INL_H_
diff --git a/src/unbound-queue.h b/src/unbound-queue.h
new file mode 100644
index 0000000..7bc314b
--- /dev/null
+++ b/src/unbound-queue.h
@@ -0,0 +1,66 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_UNBOUND_QUEUE_
+#define V8_UNBOUND_QUEUE_
+
+namespace v8 {
+namespace internal {
+
+
+// Lock-free unbound queue for small records.  Intended for
+// transferring small records between a Single producer and a Single
+// consumer. Doesn't have restrictions on the number of queued
+// elements, so producer never blocks.  Implemented after Herb
+// Sutter's article:
+// http://www.ddj.com/high-performance-computing/210604448
+template<typename Record>
+class UnboundQueue BASE_EMBEDDED {
+ public:
+  inline UnboundQueue();
+  inline ~UnboundQueue();
+
+  INLINE(void Dequeue(Record* rec));
+  INLINE(void Enqueue(const Record& rec));
+  INLINE(bool IsEmpty()) { return divider_ == last_; }
+
+ private:
+  INLINE(void DeleteFirst());
+
+  struct Node;
+
+  Node* first_;
+  AtomicWord divider_;  // Node*
+  AtomicWord last_;     // Node*
+
+  DISALLOW_COPY_AND_ASSIGN(UnboundQueue);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_UNBOUND_QUEUE_
diff --git a/src/v8natives.js b/src/v8natives.js
index 531bd0e..ed392e2 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -434,6 +434,11 @@
 }
 
 
+PropertyDescriptor.prototype.hasWritable = function() {
+  return this.hasWritable_;
+}
+
+
 PropertyDescriptor.prototype.setConfigurable = function(configurable) {
   this.configurable_ = configurable;
   this.hasConfigurable_ = true;
@@ -537,6 +542,22 @@
     throw MakeTypeError("define_disallowed", ["defineProperty"]);
 
   if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
+      // Step 5 and 6
+     if ((!desc.hasEnumerable() || 
+          SameValue(desc.isEnumerable() && current.isEnumerable())) &&
+         (!desc.hasConfigurable() || 
+          SameValue(desc.isConfigurable(), current.isConfigurable())) &&
+         (!desc.hasWritable() || 
+          SameValue(desc.isWritable(), current.isWritable())) &&
+         (!desc.hasValue() ||
+          SameValue(desc.getValue(), current.getValue())) &&
+         (!desc.hasGetter() ||
+          SameValue(desc.getGet(), current.getGet())) &&
+         (!desc.hasSetter() ||
+          SameValue(desc.getSet(), current.getSet()))) {
+       return true;
+     }
+
     // Step 7
     if (desc.isConfigurable() ||  desc.isEnumerable() != current.isEnumerable())
       throw MakeTypeError("redefine_disallowed", ["defineProperty"]);
@@ -583,7 +604,13 @@
     flag |= DONT_DELETE;
 
   if (IsDataDescriptor(desc) || IsGenericDescriptor(desc)) {
-    flag |= desc.isWritable() ? 0 : READ_ONLY;
+    if (desc.hasWritable()) {
+      flag |= desc.isWritable() ? 0 : READ_ONLY;
+    } else if (!IS_UNDEFINED(current)) {
+      flag |= current.isWritable() ? 0 : READ_ONLY;
+    } else {
+      flag |= READ_ONLY;
+    }
     %DefineOrRedefineDataProperty(obj, p, desc.getValue(), flag);
   } else {
     if (desc.hasGetter() && IS_FUNCTION(desc.getGet())) {
@@ -673,8 +700,9 @@
 // ES5 section 15.2.3.6.
 function ObjectDefineProperty(obj, p, attributes) {
   if ((!IS_SPEC_OBJECT_OR_NULL(obj) || IS_NULL_OR_UNDEFINED(obj)) &&
-      !IS_UNDETECTABLE(obj))
+      !IS_UNDETECTABLE(obj)) {
     throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]);
+  }
   var name = ToString(p);
   var desc = ToPropertyDescriptor(attributes);
   DefineOwnProperty(obj, name, desc, true);
diff --git a/src/version.cc b/src/version.cc
index b05251f..adeee59 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      11
+#define BUILD_NUMBER      12
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index be7cfe0..4c69510 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -29,6 +29,7 @@
 #define V8_X64_ASSEMBLER_X64_INL_H_
 
 #include "cpu.h"
+#include "debug.h"
 #include "memory.h"
 
 namespace v8 {
@@ -229,6 +230,15 @@
 }
 
 
+int RelocInfo::target_address_size() {
+  if (IsCodedSpecially()) {
+    return Assembler::kCallTargetSize;
+  } else {
+    return Assembler::kExternalTargetSize;
+  }
+}
+
+
 void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
   if (IsCodeTarget(rmode_)) {
@@ -320,6 +330,27 @@
       pc_ + Assembler::kPatchReturnSequenceAddressOffset);
 }
 
+
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    visitor->VisitPointer(target_object_address());
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    visitor->VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  } else if (Debug::has_break_points() &&
+             RelocInfo::IsJSReturn(mode) &&
+             IsPatchedReturnSequence()) {
+    visitor->VisitDebugTarget(this);
+#endif
+  } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // Implementation of Operand
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 3d03949..9f26496 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -239,6 +239,52 @@
 }
 
 
+Operand::Operand(const Operand& operand, int32_t offset) {
+  ASSERT(operand.len_ >= 1);
+  // Operand encodes REX ModR/M [SIB] [Disp].
+  byte modrm = operand.buf_[0];
+  ASSERT(modrm < 0xC0);  // Disallow mode 3 (register target).
+  bool has_sib = ((modrm & 0x07) == 0x04);
+  byte mode = modrm & 0xC0;
+  int disp_offset = has_sib ? 2 : 1;
+  int base_reg = (has_sib ? operand.buf_[1] : modrm) & 0x07;
+  // Mode 0 with rbp/r13 as ModR/M or SIB base register always has a 32-bit
+  // displacement.
+  bool is_baseless = (mode == 0) && (base_reg == 0x05);  // No base or RIP base.
+  int32_t disp_value = 0;
+  if (mode == 0x80 || is_baseless) {
+    // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
+    disp_value = *reinterpret_cast<const int32_t*>(&operand.buf_[disp_offset]);
+  } else if (mode == 0x40) {
+    // Mode 1: Byte displacement.
+    disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
+  }
+
+  // Write new operand with same registers, but with modified displacement.
+  ASSERT(offset >= 0 ? disp_value + offset > disp_value
+                     : disp_value + offset < disp_value);  // No overflow.
+  disp_value += offset;
+  rex_ = operand.rex_;
+  if (!is_int8(disp_value) || is_baseless) {
+    // Need 32 bits of displacement, mode 2 or mode 1 with register rbp/r13.
+    buf_[0] = (modrm & 0x3f) | (is_baseless ? 0x00 : 0x80);
+    len_ = disp_offset + 4;
+    Memory::int32_at(&buf_[disp_offset]) = disp_value;
+  } else if (disp_value != 0 || (base_reg == 0x05)) {
+    // Need 8 bits of displacement.
+    buf_[0] = (modrm & 0x3f) | 0x40;  // Mode 1.
+    len_ = disp_offset + 1;
+    buf_[disp_offset] = static_cast<byte>(disp_value);
+  } else {
+    // Need no displacement.
+    buf_[0] = (modrm & 0x3f);  // Mode 0.
+    len_ = disp_offset;
+  }
+  if (has_sib) {
+    buf_[1] = operand.buf_[1];
+  }
+}
+
 // -----------------------------------------------------------------------------
 // Implementation of Assembler.
 
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index b55a7b7..3db4d08 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -300,12 +300,16 @@
           ScaleFactor scale,
           int32_t disp);
 
+  // Offset from existing memory operand.
+  // Offset is added to existing displacement as 32-bit signed values and
+  // this must not overflow.
+  Operand(const Operand& base, int32_t offset);
+
  private:
   byte rex_;
   byte buf_[10];
   // The number of bytes in buf_.
   unsigned int len_;
-  RelocInfo::Mode rmode_;
 
   // Set the ModR/M byte without an encoded 'reg' register. The
   // register is encoded later as part of the emit_operand operation.
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 6f6670a..767c33f 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -603,9 +603,8 @@
  public:
   explicit DeferredReferenceGetKeyedValue(Register dst,
                                           Register receiver,
-                                          Register key,
-                                          bool is_global)
-      : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+                                          Register key)
+      : dst_(dst), receiver_(receiver), key_(key) {
     set_comment("[ DeferredReferenceGetKeyedValue");
   }
 
@@ -618,7 +617,6 @@
   Register dst_;
   Register receiver_;
   Register key_;
-  bool is_global_;
 };
 
 
@@ -633,10 +631,7 @@
   // This means that we cannot allow test instructions after calls to
   // KeyedLoadIC stubs in other places.
   Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  RelocInfo::Mode mode = is_global_
-                         ? RelocInfo::CODE_TARGET_CONTEXT
-                         : RelocInfo::CODE_TARGET;
-  __ Call(ic, mode);
+  __ Call(ic, RelocInfo::CODE_TARGET);
   // The delta from the start of the map-compare instruction to the
   // test instruction.  We use masm_-> directly here instead of the __
   // macro because the macro sometimes uses macro expansion to turn
@@ -5693,7 +5688,7 @@
                                                     slow));
           frame_->Push(&arguments);
           frame_->Push(key_literal->handle());
-          *result = EmitKeyedLoad(false);
+          *result = EmitKeyedLoad();
           frame_->Drop(2);  // Drop key and receiver.
           done->Jump(result);
         }
@@ -7188,8 +7183,89 @@
 }
 
 
-Result CodeGenerator::EmitKeyedLoad(bool is_global) {
-  Comment cmnt(masm_, "[ Load from keyed Property");
+Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
+  // Do not inline the inobject property case for loads from the global
+  // object.  Also do not inline for unoptimized code.  This saves time
+  // in the code generator.  Unoptimized code is toplevel code or code
+  // that is not in a loop.
+  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+    Comment cmnt(masm(), "[ Load from named Property");
+    frame()->Push(name);
+
+    RelocInfo::Mode mode = is_contextual
+        ? RelocInfo::CODE_TARGET_CONTEXT
+        : RelocInfo::CODE_TARGET;
+    result = frame()->CallLoadIC(mode);
+    // A test rax instruction following the call signals that the
+    // inobject property case was inlined.  Ensure that there is not
+    // a test rax instruction here.
+    __ nop();
+  } else {
+    // Inline the inobject property case.
+    Comment cmnt(masm(), "[ Inlined named property load");
+    Result receiver = frame()->Pop();
+    receiver.ToRegister();
+    result = allocator()->Allocate();
+    ASSERT(result.is_valid());
+
+    // Cannot use r12 for receiver, because that changes
+    // the distance between a call and a fixup location,
+    // due to a special encoding of r12 as r/m in a ModR/M byte.
+    if (receiver.reg().is(r12)) {
+      frame()->Spill(receiver.reg());  // It will be overwritten with result.
+      // Swap receiver and value.
+      __ movq(result.reg(), receiver.reg());
+      Result temp = receiver;
+      receiver = result;
+      result = temp;
+    }
+
+    DeferredReferenceGetNamedValue* deferred =
+        new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
+
+    // Check that the receiver is a heap object.
+    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+
+    __ bind(deferred->patch_site());
+    // This is the map check instruction that will be patched (so we can't
+    // use the double underscore macro that may insert instructions).
+    // Initially use an invalid map to force a failure.
+    masm()->Move(kScratchRegister, Factory::null_value());
+    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                 kScratchRegister);
+    // This branch is always a forwards branch so it's always a fixed
+    // size which allows the assert below to succeed and patching to work.
+    // Don't use deferred->Branch(...), since that might add coverage code.
+    masm()->j(not_equal, deferred->entry_label());
+
+    // The delta from the patch label to the load offset must be
+    // statically known.
+    ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
+           LoadIC::kOffsetToLoadInstruction);
+    // The initial (invalid) offset has to be large enough to force
+    // a 32-bit instruction encoding to allow patching with an
+    // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
+    int offset = kMaxInt;
+    masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset));
+
+    __ IncrementCounter(&Counters::named_load_inline, 1);
+    deferred->BindExit();
+    frame()->Push(&receiver);
+  }
+  ASSERT(frame()->height() == original_height);
+  return result;
+}
+
+
+Result CodeGenerator::EmitKeyedLoad() {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
   // Inline array load code if inside of a loop.  We do not know
   // the receiver map yet, so we initially generate the code with
   // a check against an invalid map.  In the inline cache code, we
@@ -7197,34 +7273,30 @@
   if (loop_nesting() > 0) {
     Comment cmnt(masm_, "[ Inlined load from keyed Property");
 
+    // Use a fresh temporary to load the elements without destroying
+    // the receiver which is needed for the deferred slow case.
+    // Allocate the temporary early so that we use rax if it is free.
+    Result elements = allocator()->Allocate();
+    ASSERT(elements.is_valid());
+
+
     Result key = frame_->Pop();
     Result receiver = frame_->Pop();
     key.ToRegister();
     receiver.ToRegister();
 
-    // Use a fresh temporary to load the elements without destroying
-    // the receiver which is needed for the deferred slow case.
-    Result elements = allocator()->Allocate();
-    ASSERT(elements.is_valid());
-
-    // Use a fresh temporary for the index and later the loaded
-    // value.
+    // Use a fresh temporary for the index
     Result index = allocator()->Allocate();
     ASSERT(index.is_valid());
 
     DeferredReferenceGetKeyedValue* deferred =
-        new DeferredReferenceGetKeyedValue(index.reg(),
+        new DeferredReferenceGetKeyedValue(elements.reg(),
                                            receiver.reg(),
-                                           key.reg(),
-                                           is_global);
+                                           key.reg());
 
-    // Check that the receiver is not a smi (only needed if this
-    // is not a load from the global context) and that it has the
-    // expected map.
-    if (!is_global) {
-      __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-    }
+    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
 
+    // Check that the receiver has the expected map.
     // Initially, use an invalid map. The map is patched in the IC
     // initialization code.
     __ bind(deferred->patch_site());
@@ -7255,7 +7327,6 @@
     __ cmpl(index.reg(),
             FieldOperand(elements.reg(), FixedArray::kLengthOffset));
     deferred->Branch(above_equal);
-
     // The index register holds the un-smi-tagged key. It has been
     // zero-extended to 64-bits, so it can be used directly as index in the
     // operand below.
@@ -7266,39 +7337,33 @@
     // heuristic about which register to reuse.  For example, if
     // one is rax, the we can reuse that one because the value
     // coming from the deferred code will be in rax.
-    Result value = index;
-    __ movq(value.reg(),
+    __ movq(elements.reg(),
             Operand(elements.reg(),
                     index.reg(),
                     times_pointer_size,
                     FixedArray::kHeaderSize - kHeapObjectTag));
+    result = elements;
     elements.Unuse();
     index.Unuse();
-    __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
+    __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
     deferred->Branch(equal);
     __ IncrementCounter(&Counters::keyed_load_inline, 1);
 
     deferred->BindExit();
-    // Restore the receiver and key to the frame and push the
-    // result on top of it.
     frame_->Push(&receiver);
     frame_->Push(&key);
-    return value;
-
   } else {
     Comment cmnt(masm_, "[ Load from keyed Property");
-    RelocInfo::Mode mode = is_global
-        ? RelocInfo::CODE_TARGET_CONTEXT
-        : RelocInfo::CODE_TARGET;
-    Result answer = frame_->CallKeyedLoadIC(mode);
+    result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET);
     // Make sure that we do not have a test instruction after the
     // call.  A test instruction after the call is used to
     // indicate that we have generated an inline version of the
     // keyed load.  The explicit nop instruction is here because
     // the push that follows might be peep-hole optimized away.
     __ nop();
-    return answer;
   }
+  ASSERT(frame()->height() == original_height);
+  return result;
 }
 
 
@@ -7341,6 +7406,7 @@
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
       cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
+      if (!persist_after_get_) set_unloaded();
       break;
     }
 
@@ -7348,101 +7414,29 @@
       Variable* var = expression_->AsVariableProxy()->AsVariable();
       bool is_global = var != NULL;
       ASSERT(!is_global || var->is_global());
-
-      // Do not inline the inobject property case for loads from the global
-      // object.  Also do not inline for unoptimized code.  This saves time
-      // in the code generator.  Unoptimized code is toplevel code or code
-      // that is not in a loop.
-      if (is_global ||
-          cgen_->scope()->is_global_scope() ||
-          cgen_->loop_nesting() == 0) {
-        Comment cmnt(masm, "[ Load from named Property");
-        cgen_->frame()->Push(GetName());
-
-        RelocInfo::Mode mode = is_global
-                               ? RelocInfo::CODE_TARGET_CONTEXT
-                               : RelocInfo::CODE_TARGET;
-        Result answer = cgen_->frame()->CallLoadIC(mode);
-        // A test rax instruction following the call signals that the
-        // inobject property case was inlined.  Ensure that there is not
-        // a test rax instruction here.
-        __ nop();
-        cgen_->frame()->Push(&answer);
-      } else {
-        // Inline the inobject property case.
-        Comment cmnt(masm, "[ Inlined named property load");
-        Result receiver = cgen_->frame()->Pop();
-        receiver.ToRegister();
-        Result value = cgen_->allocator()->Allocate();
-        ASSERT(value.is_valid());
-        // Cannot use r12 for receiver, because that changes
-        // the distance between a call and a fixup location,
-        // due to a special encoding of r12 as r/m in a ModR/M byte.
-        if (receiver.reg().is(r12)) {
-          // Swap receiver and value.
-          __ movq(value.reg(), receiver.reg());
-          Result temp = receiver;
-          receiver = value;
-          value = temp;
-          cgen_->frame()->Spill(value.reg());  // r12 may have been shared.
-        }
-
-        DeferredReferenceGetNamedValue* deferred =
-            new DeferredReferenceGetNamedValue(value.reg(),
-                                               receiver.reg(),
-                                               GetName());
-
-        // Check that the receiver is a heap object.
-        __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-        __ bind(deferred->patch_site());
-        // This is the map check instruction that will be patched (so we can't
-        // use the double underscore macro that may insert instructions).
-        // Initially use an invalid map to force a failure.
-        masm->Move(kScratchRegister, Factory::null_value());
-        masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
-                   kScratchRegister);
-        // This branch is always a forwards branch so it's always a fixed
-        // size which allows the assert below to succeed and patching to work.
-        // Don't use deferred->Branch(...), since that might add coverage code.
-        masm->j(not_equal, deferred->entry_label());
-
-        // The delta from the patch label to the load offset must be
-        // statically known.
-        ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
-               LoadIC::kOffsetToLoadInstruction);
-        // The initial (invalid) offset has to be large enough to force
-        // a 32-bit instruction encoding to allow patching with an
-        // arbitrary offset.  Use kMaxInt (minus kHeapObjectTag).
-        int offset = kMaxInt;
-        masm->movq(value.reg(), FieldOperand(receiver.reg(), offset));
-
-        __ IncrementCounter(&Counters::named_load_inline, 1);
-        deferred->BindExit();
-        cgen_->frame()->Push(&receiver);
-        cgen_->frame()->Push(&value);
+      Result result = cgen_->EmitNamedLoad(GetName(), is_global);
+      cgen_->frame()->Push(&result);
+      if (!persist_after_get_) {
+        cgen_->UnloadReference(this);
       }
       break;
     }
 
     case KEYED: {
-      Comment cmnt(masm, "[ Load from keyed Property");
-      Variable* var = expression_->AsVariableProxy()->AsVariable();
-      bool is_global = var != NULL;
-      ASSERT(!is_global || var->is_global());
+      // A load of a bare identifier (load from global) cannot be keyed.
+      ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL);
 
-      Result value = cgen_->EmitKeyedLoad(is_global);
+      Result value = cgen_->EmitKeyedLoad();
       cgen_->frame()->Push(&value);
+      if (!persist_after_get_) {
+        cgen_->UnloadReference(this);
+      }
       break;
     }
 
     default:
       UNREACHABLE();
   }
-
-  if (!persist_after_get_) {
-    cgen_->UnloadReference(this);
-  }
 }
 
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 01bbd20..9d46583 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -449,10 +449,13 @@
   // value in place.
   void StoreToSlot(Slot* slot, InitState init_state);
 
+  // Receiver is passed on the frame and not consumed.
+  Result EmitNamedLoad(Handle<String> name, bool is_contextual);
+
   // Load a property of an object, returning it in a Result.
   // The object and the property name are passed on the stack, and
   // not changed.
-  Result EmitKeyedLoad(bool is_global);
+  Result EmitKeyedLoad();
 
   // Special code for typeof expressions: Unfortunately, we must
   // be careful when loading the expression in 'typeof'
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 81424f6..5bd09c2 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -2217,7 +2217,7 @@
 
   // Check that the object is a JS object but take special care of JS
   // functions to make sure they have 'Function' as their class.
-  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+  __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);  // Map is now in rax.
   __ j(below, &null);
 
   // As long as JS_FUNCTION_TYPE is the last instance type and it is
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 065b616..b7a6aaf 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -603,7 +603,7 @@
 }
 
 
-void MacroAssembler::SmiCompare(Register  dst, const Operand& src) {
+void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
   cmpq(dst, src);
 }
 
@@ -614,13 +614,7 @@
 
 
 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
-  if (src->value() == 0) {
-    // Only tagged long smi to have 32-bit representation.
-    cmpq(dst, Immediate(0));
-  } else {
-    Move(kScratchRegister, src);
-    cmpq(dst, kScratchRegister);
-  }
+  cmpl(Operand(dst, kIntSize), Immediate(src->value()));
 }
 
 
@@ -922,8 +916,7 @@
 
 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
   if (constant->value() != 0) {
-    Move(kScratchRegister, constant);
-    addq(dst, kScratchRegister);
+    addl(Operand(dst, kIntSize), Immediate(constant->value()));
   }
 }
 
@@ -1607,13 +1600,7 @@
 
 
 void MacroAssembler::Test(const Operand& src, Smi* source) {
-  intptr_t smi = reinterpret_cast<intptr_t>(source);
-  if (is_int32(smi)) {
-    testl(src, Immediate(static_cast<int32_t>(smi)));
-  } else {
-    Move(kScratchRegister, source);
-    testq(src, kScratchRegister);
-  }
+  testl(Operand(src, kIntSize), Immediate(source->value()));
 }
 
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 25361b3..8b095cb 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -430,7 +430,7 @@
                         Register holder,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         Label* miss_label) {
@@ -450,7 +450,8 @@
     }
 
     if (!optimize) {
-      CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+      CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
+                     miss_label);
       return;
     }
 
@@ -463,12 +464,17 @@
     __ push(holder);
     __ push(name_);
 
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
     CompileCallLoadPropertyWithInterceptor(masm,
                                            receiver,
                                            holder,
                                            name_,
-                                           holder_obj);
+                                           interceptor_holder);
 
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
     Label interceptor_failed;
     __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
     __ j(equal, &interceptor_failed);
@@ -485,13 +491,17 @@
     __ LeaveInternalFrame();
 
     if (lookup->type() == FIELD) {
-      holder = stub_compiler->CheckPrototypes(holder_obj,
+      // We found FIELD property in prototype chain of interceptor's holder.
+      // Check that the maps from interceptor's holder to field's holder
+      // haven't changed...
+      holder = stub_compiler->CheckPrototypes(interceptor_holder,
                                               holder,
                                               lookup->holder(),
                                               scratch1,
                                               scratch2,
                                               name,
                                               miss_label);
+      // ... and retrieve a field from field's holder.
       stub_compiler->GenerateFastPropertyLoad(masm,
                                               rax,
                                               holder,
@@ -499,37 +509,47 @@
                                               lookup->GetFieldIndex());
       __ ret(0);
     } else {
+      // We found CALLBACKS property in prototype chain of interceptor's
+      // holder.
       ASSERT(lookup->type() == CALLBACKS);
       ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
       ASSERT(callback != NULL);
       ASSERT(callback->getter() != NULL);
 
+      // Prepare for tail call.  Push receiver to stack after return address.
       Label cleanup;
-      __ pop(scratch2);
+      __ pop(scratch2);  // return address
       __ push(receiver);
       __ push(scratch2);
 
-      holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+      // Check that the maps from interceptor's holder to callback's holder
+      // haven't changed.
+      holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
                                               lookup->holder(), scratch1,
                                               scratch2,
                                               name,
                                               &cleanup);
 
-      __ pop(scratch2);  // save old return address
+      // Continue tail call preparation: push remaining parameters after
+      // return address.
+      __ pop(scratch2);  // return address
       __ push(holder);
       __ Move(holder, Handle<AccessorInfo>(callback));
       __ push(holder);
       __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
       __ push(name_);
-      __ push(scratch2);  // restore old return address
+      __ push(scratch2);  // restore return address
 
+      // Tail call to runtime.
       ExternalReference ref =
           ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
       __ TailCallExternalReference(ref, 5, 1);
 
+      // Clean up code: we pushed receiver after return address and
+      // need to remove it from there.
       __ bind(&cleanup);
-      __ pop(scratch1);
-      __ pop(scratch2);
+      __ pop(scratch1);  // return address
+      __ pop(scratch2);  // receiver
       __ push(scratch1);
     }
   }
@@ -539,10 +559,10 @@
                       Register receiver,
                       Register holder,
                       Register scratch,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     __ pop(scratch);  // save old return address
-    PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+    PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
     __ push(scratch);  // restore old return address
 
     ExternalReference ref = ExternalReference(
@@ -704,7 +724,7 @@
                         Register receiver,
                         Register scratch1,
                         Register scratch2,
-                        JSObject* holder_obj,
+                        JSObject* interceptor_holder,
                         LookupResult* lookup,
                         String* name,
                         const CallOptimization& optimization,
@@ -717,10 +737,13 @@
     bool can_do_fast_api_call = false;
     if (optimization.is_simple_api_call() &&
         !lookup->holder()->IsGlobalObject()) {
-      depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
+      depth1 =
+          optimization.GetPrototypeDepthOfExpectedType(object,
+                                                       interceptor_holder);
       if (depth1 == kInvalidProtoDepth) {
-        depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
-                                                              lookup->holder());
+        depth2 =
+            optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
+                                                         lookup->holder());
       }
       can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
                              (depth2 != kInvalidProtoDepth);
@@ -733,24 +756,32 @@
       ReserveSpaceForFastApiCall(masm, scratch1);
     }
 
+    // Check that the maps from receiver to interceptor's holder
+    // haven't changed and thus we can invoke interceptor.
     Label miss_cleanup;
     Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         depth1, miss);
 
+    // Invoke an interceptor and if it provides a value,
+    // branch to |regular_invoke|.
     Label regular_invoke;
-    LoadWithInterceptor(masm, receiver, holder, holder_obj, &regular_invoke);
+    LoadWithInterceptor(masm, receiver, holder, interceptor_holder,
+                        &regular_invoke);
 
-    // Generate code for the failed interceptor case.
+    // Interceptor returned nothing for this property.  Try to use cached
+    // constant function.
 
-    // Check the lookup is still valid.
-    stub_compiler_->CheckPrototypes(holder_obj, receiver,
+    // Check that the maps from interceptor's holder to constant function's
+    // holder haven't changed and thus we can use cached constant function.
+    stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
                                     lookup->holder(),
                                     scratch1, scratch2, name,
                                     depth2, miss);
 
+    // Invoke function.
     if (can_do_fast_api_call) {
       GenerateFastApiCall(masm, optimization, arguments_.immediate());
     } else {
@@ -758,12 +789,14 @@
                         JUMP_FUNCTION);
     }
 
+    // Deferred code for fast API call case---clean preallocated space.
     if (can_do_fast_api_call) {
       __ bind(&miss_cleanup);
       FreeSpaceForFastApiCall(masm, scratch1);
       __ jmp(miss_label);
     }
 
+    // Invoke a regular function.
     __ bind(&regular_invoke);
     if (can_do_fast_api_call) {
       FreeSpaceForFastApiCall(masm, scratch1);
@@ -776,10 +809,10 @@
                       Register scratch1,
                       Register scratch2,
                       String* name,
-                      JSObject* holder_obj,
+                      JSObject* interceptor_holder,
                       Label* miss_label) {
     Register holder =
-        stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
+        stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
                                         scratch1, scratch2, name,
                                         miss_label);
 
@@ -791,7 +824,7 @@
                              receiver,
                              holder,
                              name_,
-                             holder_obj);
+                             interceptor_holder);
 
     __ CallExternalReference(
         ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall)),
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index 2cf0b12..876c104 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -71,6 +71,7 @@
     'test-strings.cc',
     'test-threads.cc',
     'test-thread-termination.cc',
+    'test-unbound-queue.cc',
     'test-utils.cc',
     'test-version.cc'
   ],
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index ea9e6e1..46eaccd 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -76,6 +76,11 @@
 }
 
 
+static void ExpectTrue(const char* code) {
+  ExpectBoolean(code, true);
+}
+
+
 static void ExpectObject(const char* code, Local<Value> expected) {
   Local<Value> result = CompileRun(code);
   CHECK(result->Equals(expected));
@@ -2506,7 +2511,7 @@
 
   // Uses getOwnPropertyDescriptor to check the configurable status
   Local<Script> script_desc
-    = Script::Compile(v8_str("var prop =Object.getOwnPropertyDescriptor( "
+    = Script::Compile(v8_str("var prop = Object.getOwnPropertyDescriptor( "
                              "obj, 'x');"
                              "prop.configurable;"));
   Local<Value> result = script_desc->Run();
@@ -2592,8 +2597,167 @@
 }
 
 
+static v8::Handle<v8::Object> GetGlobalProperty(LocalContext* context,
+                                                char const* name) {
+  return v8::Handle<v8::Object>::Cast((*context)->Global()->Get(v8_str(name)));
+}
 
 
+THREADED_TEST(DefineAPIAccessorOnObject) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  LocalContext context;
+
+  context->Global()->Set(v8_str("obj1"), templ->NewInstance());
+  CompileRun("var obj2 = {};");
+
+  CHECK(CompileRun("obj1.x")->IsUndefined());
+  CHECK(CompileRun("obj2.x")->IsUndefined());
+
+  CHECK(GetGlobalProperty(&context, "obj1")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+
+  ExpectString("obj1.x", "x");
+  CHECK(CompileRun("obj2.x")->IsUndefined());
+
+  CHECK(GetGlobalProperty(&context, "obj2")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+
+  ExpectString("obj1.x", "x");
+  ExpectString("obj2.x", "x");
+
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
+
+  CompileRun("Object.defineProperty(obj1, 'x',"
+             "{ get: function() { return 'y'; }, configurable: true })");
+
+  ExpectString("obj1.x", "y");
+  ExpectString("obj2.x", "x");
+
+  CompileRun("Object.defineProperty(obj2, 'x',"
+             "{ get: function() { return 'y'; }, configurable: true })");
+
+  ExpectString("obj1.x", "y");
+  ExpectString("obj2.x", "y");
+
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
+
+  CHECK(GetGlobalProperty(&context, "obj1")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+  CHECK(GetGlobalProperty(&context, "obj2")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+
+  ExpectString("obj1.x", "x");
+  ExpectString("obj2.x", "x");
+
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
+  ExpectTrue("Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
+
+  // Define getters/setters, but now make them not configurable.
+  CompileRun("Object.defineProperty(obj1, 'x',"
+             "{ get: function() { return 'z'; }, configurable: false })");
+  CompileRun("Object.defineProperty(obj2, 'x',"
+             "{ get: function() { return 'z'; }, configurable: false })");
+
+  ExpectTrue("!Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
+  ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
+
+  ExpectString("obj1.x", "z");
+  ExpectString("obj2.x", "z");
+
+  CHECK(!GetGlobalProperty(&context, "obj1")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+  CHECK(!GetGlobalProperty(&context, "obj2")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+
+  ExpectString("obj1.x", "z");
+  ExpectString("obj2.x", "z");
+}
+
+
+THREADED_TEST(DontDeleteAPIAccessorsCannotBeOverriden) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  LocalContext context;
+
+  context->Global()->Set(v8_str("obj1"), templ->NewInstance());
+  CompileRun("var obj2 = {};");
+
+  CHECK(GetGlobalProperty(&context, "obj1")->SetAccessor(
+        v8_str("x"),
+        GetXValue, NULL,
+        v8_str("donut"), v8::DEFAULT, v8::DontDelete));
+  CHECK(GetGlobalProperty(&context, "obj2")->SetAccessor(
+        v8_str("x"),
+        GetXValue, NULL,
+        v8_str("donut"), v8::DEFAULT, v8::DontDelete));
+
+  ExpectString("obj1.x", "x");
+  ExpectString("obj2.x", "x");
+
+  ExpectTrue("!Object.getOwnPropertyDescriptor(obj1, 'x').configurable");
+  ExpectTrue("!Object.getOwnPropertyDescriptor(obj2, 'x').configurable");
+
+  CHECK(!GetGlobalProperty(&context, "obj1")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+  CHECK(!GetGlobalProperty(&context, "obj2")->
+      SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")));
+
+  {
+    v8::TryCatch try_catch;
+    CompileRun("Object.defineProperty(obj1, 'x',"
+        "{get: function() { return 'func'; }})");
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value(try_catch.Exception());
+    CHECK_EQ(*exception_value,
+            "TypeError: Cannot redefine property: defineProperty");
+  }
+  {
+    v8::TryCatch try_catch;
+    CompileRun("Object.defineProperty(obj2, 'x',"
+        "{get: function() { return 'func'; }})");
+    CHECK(try_catch.HasCaught());
+    String::AsciiValue exception_value(try_catch.Exception());
+    CHECK_EQ(*exception_value,
+            "TypeError: Cannot redefine property: defineProperty");
+  }
+}
+
+
+static v8::Handle<Value> Get239Value(Local<String> name,
+                                     const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  CHECK_EQ(info.Data(), v8_str("donut"));
+  CHECK_EQ(name, v8_str("239"));
+  return name;
+}
+
+
+THREADED_TEST(ElementAPIAccessor) {
+  v8::HandleScope scope;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  LocalContext context;
+
+  context->Global()->Set(v8_str("obj1"), templ->NewInstance());
+  CompileRun("var obj2 = {};");
+
+  CHECK(GetGlobalProperty(&context, "obj1")->SetAccessor(
+        v8_str("239"),
+        Get239Value, NULL,
+        v8_str("donut")));
+  CHECK(GetGlobalProperty(&context, "obj2")->SetAccessor(
+        v8_str("239"),
+        Get239Value, NULL,
+        v8_str("donut")));
+
+  ExpectString("obj1[239]", "239");
+  ExpectString("obj2[239]", "239");
+  ExpectString("obj1['239']", "239");
+  ExpectString("obj2['239']", "239");
+}
+
 
 v8::Persistent<Value> xValue;
 
@@ -8003,8 +8167,8 @@
   // TODO(155): This test would break without the initialization of V8. This is
   // a workaround for now to make this test not fail.
   v8::V8::Initialize();
-  const char *script = "function foo(a) { return a+1; }";
-  v8::ScriptData *sd =
+  const char* script = "function foo(a) { return a+1; }";
+  v8::ScriptData* sd =
       v8::ScriptData::PreCompile(script, i::StrLength(script));
   CHECK_NE(sd->Length(), 0);
   CHECK_NE(sd->Data(), NULL);
@@ -8015,8 +8179,8 @@
 
 TEST(PreCompileWithError) {
   v8::V8::Initialize();
-  const char *script = "function foo(a) { return 1 * * 2; }";
-  v8::ScriptData *sd =
+  const char* script = "function foo(a) { return 1 * * 2; }";
+  v8::ScriptData* sd =
       v8::ScriptData::PreCompile(script, i::StrLength(script));
   CHECK(sd->HasError());
   delete sd;
@@ -8025,14 +8189,53 @@
 
 TEST(Regress31661) {
   v8::V8::Initialize();
-  const char *script = " The Definintive Guide";
-  v8::ScriptData *sd =
+  const char* script = " The Definintive Guide";
+  v8::ScriptData* sd =
       v8::ScriptData::PreCompile(script, i::StrLength(script));
   CHECK(sd->HasError());
   delete sd;
 }
 
 
+// Tests that ScriptData can be serialized and deserialized.
+TEST(PreCompileSerialization) {
+  v8::V8::Initialize();
+  const char* script = "function foo(a) { return a+1; }";
+  v8::ScriptData* sd =
+      v8::ScriptData::PreCompile(script, i::StrLength(script));
+
+  // Serialize.
+  int serialized_data_length = sd->Length();
+  char* serialized_data = i::NewArray<char>(serialized_data_length);
+  memcpy(serialized_data, sd->Data(), serialized_data_length);
+
+  // Deserialize.
+  v8::ScriptData* deserialized_sd =
+      v8::ScriptData::New(serialized_data, serialized_data_length);
+
+  // Verify that the original is the same as the deserialized.
+  CHECK_EQ(sd->Length(), deserialized_sd->Length());
+  CHECK_EQ(0, memcmp(sd->Data(), deserialized_sd->Data(), sd->Length()));
+  CHECK_EQ(sd->HasError(), deserialized_sd->HasError());
+
+  delete sd;
+  delete deserialized_sd;
+}
+
+
+// Attempts to deserialize bad data.
+TEST(PreCompileDeserializationError) {
+  v8::V8::Initialize();
+  const char* data = "DONT CARE";
+  int invalid_size = 3;
+  v8::ScriptData* sd = v8::ScriptData::New(data, invalid_size);
+
+  CHECK_EQ(0, sd->Length());
+
+  delete sd;
+}
+
+
 // This tests that we do not allow dictionary load/call inline caches
 // to use functions that have not yet been compiled.  The potential
 // problem of loading a function that has not yet been compiled can
diff --git a/test/cctest/test-circular-queue.cc b/test/cctest/test-circular-queue.cc
index 3fa49bf..ce9a42e 100644
--- a/test/cctest/test-circular-queue.cc
+++ b/test/cctest/test-circular-queue.cc
@@ -1,6 +1,6 @@
 // Copyright 2010 the V8 project authors. All rights reserved.
 //
-// Tests of circular queues.
+// Tests of the circular queue.
 
 #include "v8.h"
 #include "circular-queue-inl.h"
@@ -8,53 +8,9 @@
 
 namespace i = v8::internal;
 
-using i::CircularQueue;
 using i::SamplingCircularQueue;
 
 
-TEST(SingleRecordCircularQueue) {
-  typedef int Record;
-  CircularQueue<Record> cq(sizeof(Record) * 2);
-  CHECK(cq.IsEmpty());
-  cq.Enqueue(1);
-  CHECK(!cq.IsEmpty());
-  Record rec = 0;
-  cq.Dequeue(&rec);
-  CHECK_EQ(1, rec);
-  CHECK(cq.IsEmpty());
-}
-
-
-TEST(MultipleRecordsCircularQueue) {
-  typedef int Record;
-  const int kQueueSize = 10;
-  CircularQueue<Record> cq(sizeof(Record) * (kQueueSize + 1));
-  CHECK(cq.IsEmpty());
-  cq.Enqueue(1);
-  CHECK(!cq.IsEmpty());
-  for (int i = 2; i <= 5; ++i) {
-    cq.Enqueue(i);
-    CHECK(!cq.IsEmpty());
-  }
-  Record rec = 0;
-  for (int i = 1; i <= 4; ++i) {
-    CHECK(!cq.IsEmpty());
-    cq.Dequeue(&rec);
-    CHECK_EQ(i, rec);
-  }
-  for (int i = 6; i <= 12; ++i) {
-    cq.Enqueue(i);
-    CHECK(!cq.IsEmpty());
-  }
-  for (int i = 5; i <= 12; ++i) {
-    CHECK(!cq.IsEmpty());
-    cq.Dequeue(&rec);
-    CHECK_EQ(i, rec);
-  }
-  CHECK(cq.IsEmpty());
-}
-
-
 TEST(SamplingCircularQueue) {
   typedef SamplingCircularQueue::Cell Record;
   const int kRecordsPerChunk = 4;
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index 3fd5c69..6da1a75 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -273,12 +273,10 @@
 // StackTracer uses Top::c_entry_fp as a starting point for stack
 // walking.
 TEST(CFromJSStackTrace) {
-#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
   // TODO(711) The hack of replacing the inline runtime function
   // RandomHeapNumber with GetFrameNumber does not work with the way the full
   // compiler generates inline runtime calls.
-  i::FLAG_force_full_compiler = false;
-#endif
+  i::FLAG_always_full_compiler = false;
 
   TickSample sample;
   InitTraceEnv(&sample);
@@ -315,12 +313,10 @@
 // Top::c_entry_fp value. In this case, StackTracer uses passed frame
 // pointer value as a starting point for stack walking.
 TEST(PureJSStackTrace) {
-#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
   // TODO(711) The hack of replacing the inline runtime function
   // RandomHeapNumber with GetFrameNumber does not work with the way the full
   // compiler generates inline runtime calls.
-  i::FLAG_force_full_compiler = false;
-#endif
+  i::FLAG_always_full_compiler = false;
 
   TickSample sample;
   InitTraceEnv(&sample);
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
index 8924ba7..dd97498 100755
--- a/test/cctest/test-macro-assembler-x64.cc
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -61,6 +61,7 @@
 using v8::internal::r13;
 using v8::internal::r14;
 using v8::internal::r15;
+using v8::internal::times_pointer_size;
 using v8::internal::FUNCTION_CAST;
 using v8::internal::CodeDesc;
 using v8::internal::less_equal;
@@ -75,6 +76,8 @@
 using v8::internal::Smi;
 using v8::internal::kSmiTagMask;
 using v8::internal::kSmiValueSize;
+using v8::internal::kPointerSize;
+using v8::internal::kIntSize;
 
 // Test the x64 assembler by compiling some simple functions into
 // a buffer and executing them.  These tests do not initialize the
@@ -2053,4 +2056,358 @@
 }
 
 
+TEST(OperandOffset) {
+  int data[256];
+  for (int i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
+
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, static_cast<int>(actual_size));
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  __ push(r12);
+  __ push(r13);
+  __ push(rbx);
+  __ push(rbp);
+  __ push(Immediate(0x100));  // <-- rbp
+  __ movq(rbp, rsp);
+  __ push(Immediate(0x101));
+  __ push(Immediate(0x102));
+  __ push(Immediate(0x103));
+  __ push(Immediate(0x104));
+  __ push(Immediate(0x105));  // <-- rbx
+  __ push(Immediate(0x106));
+  __ push(Immediate(0x107));
+  __ push(Immediate(0x108));
+  __ push(Immediate(0x109));  // <-- rsp
+  // rbp = rsp[9]
+  // r12 = rsp[3]
+  // rbx = rsp[5]
+  // r13 = rsp[7]
+  __ lea(r12, Operand(rsp, 3 * kPointerSize));
+  __ lea(r13, Operand(rbp, -3 * kPointerSize));
+  __ lea(rbx, Operand(rbp, -5 * kPointerSize));
+  __ movl(rcx, Immediate(2));
+  __ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE);
+  __ movl(rax, Immediate(1));
+
+  Operand sp0 = Operand(rsp, 0);
+
+  // Test 1.
+  __ movl(rdx, sp0);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x109));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Test 2.
+  // Zero to non-zero displacement.
+  __ movl(rdx, Operand(sp0, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x107));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand sp2 = Operand(rsp, 2 * kPointerSize);
+
+  // Test 3.
+  __ movl(rdx, sp2);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x107));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(sp2, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x105));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Non-zero to zero displacement.
+  __ movl(rdx, Operand(sp2, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x109));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand sp2c2 = Operand(rsp, rcx, times_pointer_size, 2 * kPointerSize);
+
+  // Test 6.
+  __ movl(rdx, sp2c2);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x105));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(sp2c2, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x103));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Non-zero to zero displacement.
+  __ movl(rdx, Operand(sp2c2, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x107));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+
+  Operand bp0 = Operand(rbp, 0);
+
+  // Test 9.
+  __ movl(rdx, bp0);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x100));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Zero to non-zero displacement.
+  __ movl(rdx, Operand(bp0, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x102));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand bp2 = Operand(rbp, -2 * kPointerSize);
+
+  // Test 11.
+  __ movl(rdx, bp2);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x102));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Non-zero to zero displacement.
+  __ movl(rdx, Operand(bp2, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x100));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bp2, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x104));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand bp2c4 = Operand(rbp, rcx, times_pointer_size, -4 * kPointerSize);
+
+  // Test 14:
+  __ movl(rdx, bp2c4);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x102));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bp2c4, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x100));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bp2c4, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x104));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand bx0 = Operand(rbx, 0);
+
+  // Test 17.
+  __ movl(rdx, bx0);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x105));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bx0, 5 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x100));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bx0, -4 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x109));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand bx2 = Operand(rbx, 2 * kPointerSize);
+
+  // Test 20.
+  __ movl(rdx, bx2);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x103));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bx2, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x101));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Non-zero to zero displacement.
+  __ movl(rdx, Operand(bx2, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x105));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand bx2c2 = Operand(rbx, rcx, times_pointer_size, -2 * kPointerSize);
+
+  // Test 23.
+  __ movl(rdx, bx2c2);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x105));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bx2c2, 2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x103));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(bx2c2, -2 * kPointerSize));
+  __ cmpl(rdx, Immediate(0x107));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand r80 = Operand(r8, 0);
+
+  // Test 26.
+  __ movl(rdx, r80);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x80808080));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, -8 * kIntSize));
+  __ cmpl(rdx, Immediate(0x78787878));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, 8 * kIntSize));
+  __ cmpl(rdx, Immediate(0x88888888));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, -64 * kIntSize));
+  __ cmpl(rdx, Immediate(0x40404040));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, 64 * kIntSize));
+  __ cmpl(rdx, Immediate(0xC0C0C0C0));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  Operand r88 = Operand(r8, 8 * kIntSize);
+
+  // Test 31.
+  __ movl(rdx, r88);  // Sanity check.
+  __ cmpl(rdx, Immediate(0x88888888));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r88, -8 * kIntSize));
+  __ cmpl(rdx, Immediate(0x80808080));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r88, 8 * kIntSize));
+  __ cmpl(rdx, Immediate(0x90909090));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r88, -64 * kIntSize));
+  __ cmpl(rdx, Immediate(0x48484848));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r88, 64 * kIntSize));
+  __ cmpl(rdx, Immediate(0xC8C8C8C8));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+
+  Operand r864 = Operand(r8, 64 * kIntSize);
+
+  // Test 36.
+  __ movl(rdx, r864);  // Sanity check.
+  __ cmpl(rdx, Immediate(0xC0C0C0C0));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r864, -8 * kIntSize));
+  __ cmpl(rdx, Immediate(0xB8B8B8B8));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r864, 8 * kIntSize));
+  __ cmpl(rdx, Immediate(0xC8C8C8C8));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r864, -64 * kIntSize));
+  __ cmpl(rdx, Immediate(0x80808080));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r864, 32 * kIntSize));
+  __ cmpl(rdx, Immediate(0xE0E0E0E0));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // 32-bit offset to 8-bit offset.
+  __ movl(rdx, Operand(r864, -60 * kIntSize));
+  __ cmpl(rdx, Immediate(0x84848484));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r864, 60 * kIntSize));
+  __ cmpl(rdx, Immediate(0xFCFCFCFC));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Test unaligned offsets.
+
+  // Test 43.
+  __ movl(rdx, Operand(r80, 2));
+  __ cmpl(rdx, Immediate(0x81818080));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, -2));
+  __ cmpl(rdx, Immediate(0x80807F7F));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, 126));
+  __ cmpl(rdx, Immediate(0xA0A09F9F));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, -126));
+  __ cmpl(rdx, Immediate(0x61616060));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, 254));
+  __ cmpl(rdx, Immediate(0xC0C0BFBF));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  __ movl(rdx, Operand(r80, -254));
+  __ cmpl(rdx, Immediate(0x41414040));
+  __ j(not_equal, &exit);
+  __ incq(rax);
+
+  // Success.
+
+  __ movl(rax, Immediate(0));
+  __ bind(&exit);
+  __ lea(rsp, Operand(rbp, kPointerSize));
+  __ pop(rbp);
+  __ pop(rbx);
+  __ pop(r13);
+  __ pop(r12);
+  __ ret(0);
+
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+
 #undef __
diff --git a/test/cctest/test-thread-termination.cc b/test/cctest/test-thread-termination.cc
index 83a1e19..aed7466 100644
--- a/test/cctest/test-thread-termination.cc
+++ b/test/cctest/test-thread-termination.cc
@@ -308,3 +308,48 @@
   v8::Script::Compile(source)->Run();
   context.Dispose();
 }
+
+v8::Handle<v8::Value> ReenterAfterTermination(const v8::Arguments& args) {
+  v8::TryCatch try_catch;
+  CHECK(!v8::V8::IsExecutionTerminating());
+  v8::Script::Compile(v8::String::New("function f() {"
+                                      "  var term = true;"
+                                      "  try {"
+                                      "    while(true) {"
+                                      "      if (term) terminate();"
+                                      "      term = false;"
+                                      "    }"
+                                      "    fail();"
+                                      "  } catch(e) {"
+                                      "    fail();"
+                                      "  }"
+                                      "}"
+                                      "f()"))->Run();
+  CHECK(try_catch.HasCaught());
+  CHECK(try_catch.Exception()->IsNull());
+  CHECK(try_catch.Message().IsEmpty());
+  CHECK(!try_catch.CanContinue());
+  CHECK(v8::V8::IsExecutionTerminating());
+  v8::Script::Compile(v8::String::New("function f() { fail(); } f()"))->Run();
+  return v8::Undefined();
+}
+
+// Test that reentry into V8 while the termination exception is still pending
+// (has not yet unwound the 0-level JS frame) does not crash.
+TEST(TerminateAndReenterFromThreadItself) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> global =
+      CreateGlobalTemplate(TerminateCurrentThread, ReenterAfterTermination);
+  v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+  v8::Context::Scope context_scope(context);
+  CHECK(!v8::V8::IsExecutionTerminating());
+  v8::Handle<v8::String> source =
+      v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
+  v8::Script::Compile(source)->Run();
+  CHECK(!v8::V8::IsExecutionTerminating());
+  // Check we can run JS again after termination.
+  CHECK(v8::Script::Compile(v8::String::New("function f() { return true; }"
+                                            "f()"))->Run()->IsTrue());
+  context.Dispose();
+}
+
diff --git a/test/cctest/test-unbound-queue.cc b/test/cctest/test-unbound-queue.cc
new file mode 100644
index 0000000..df5509e
--- /dev/null
+++ b/test/cctest/test-unbound-queue.cc
@@ -0,0 +1,54 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+//
+// Tests of the unbound queue.
+
+#include "v8.h"
+#include "unbound-queue-inl.h"
+#include "cctest.h"
+
+namespace i = v8::internal;
+
+using i::UnboundQueue;
+
+
+TEST(SingleRecord) {
+  typedef int Record;
+  UnboundQueue<Record> cq;
+  CHECK(cq.IsEmpty());
+  cq.Enqueue(1);
+  CHECK(!cq.IsEmpty());
+  Record rec = 0;
+  cq.Dequeue(&rec);
+  CHECK_EQ(1, rec);
+  CHECK(cq.IsEmpty());
+}
+
+
+TEST(MultipleRecords) {
+  typedef int Record;
+  UnboundQueue<Record> cq;
+  CHECK(cq.IsEmpty());
+  cq.Enqueue(1);
+  CHECK(!cq.IsEmpty());
+  for (int i = 2; i <= 5; ++i) {
+    cq.Enqueue(i);
+    CHECK(!cq.IsEmpty());
+  }
+  Record rec = 0;
+  for (int i = 1; i <= 4; ++i) {
+    CHECK(!cq.IsEmpty());
+    cq.Dequeue(&rec);
+    CHECK_EQ(i, rec);
+  }
+  for (int i = 6; i <= 12; ++i) {
+    cq.Enqueue(i);
+    CHECK(!cq.IsEmpty());
+  }
+  for (int i = 5; i <= 12; ++i) {
+    CHECK(!cq.IsEmpty());
+    cq.Dequeue(&rec);
+    CHECK_EQ(i, rec);
+  }
+  CHECK(cq.IsEmpty());
+}
+
diff --git a/test/mjsunit/object-define-property.js b/test/mjsunit/object-define-property.js
index 43b1c7f..46bfb34 100644
--- a/test/mjsunit/object-define-property.js
+++ b/test/mjsunit/object-define-property.js
@@ -53,36 +53,46 @@
   assertTrue(/called on non-object/.test(e));
 }
 
-// Object
+// Object.
 var obj1 = {};
 
-// Values
+// Values.
 var val1 = 0;
 var val2 = 0;
 var val3 = 0;
 
-// Descriptors
+function setter1() {val1++; }
+function getter1() {return val1; }
+
+function setter2() {val2++; }
+function getter2() {return val2; }
+
+function setter3() {val3++; }
+function getter3() {return val3; }
+
+
+// Descriptors.
 var emptyDesc = {};
 
 var accessorConfigurable = { 
-    set: function() { val1++; },
-    get: function() { return val1; },
+    set: setter1,
+    get: getter1,
     configurable: true
 };
 
 var accessorNoConfigurable = {
-    set: function() { val2++; },
-    get: function() { return val2; },
+    set: setter2,
+    get: getter2,
     configurable: false 
 };
 
 var accessorOnlySet = {
-  set: function() { val3++; },
+  set: setter3,
   configurable: true
 };
 
 var accessorOnlyGet = {
-  get: function() { return val3; },
+  get: getter3,
   configurable: true
 };
 
@@ -200,7 +210,7 @@
 assertEquals(4, val2);
 assertEquals(4, obj1.bar);
 
-// Define an accessor that has only a setter
+// Define an accessor that has only a setter.
 Object.defineProperty(obj1, "setOnly", accessorOnlySet);
 desc = Object.getOwnPropertyDescriptor(obj1, "setOnly");
 assertTrue(desc.configurable);
@@ -212,7 +222,7 @@
 assertEquals(1, obj1.setOnly = 1);
 assertEquals(1, val3);
 
-// Add a getter - should not touch the setter
+// Add a getter - should not touch the setter.
 Object.defineProperty(obj1, "setOnly", accessorOnlyGet);
 desc = Object.getOwnPropertyDescriptor(obj1, "setOnly");
 assertTrue(desc.configurable);
@@ -256,7 +266,7 @@
 assertEquals(obj1.foobar, 1000);
 
 
-// Redefine to writable descriptor - now writing to foobar should be allowed
+// Redefine to writable descriptor - now writing to foobar should be allowed.
 Object.defineProperty(obj1, "foobar", dataWritable);
 desc = Object.getOwnPropertyDescriptor(obj1, "foobar");
 assertEquals(obj1.foobar, 3000);
@@ -279,7 +289,7 @@
 assertEquals(obj1.foobar, 2000);
 assertEquals(desc.value, 2000);
 assertFalse(desc.configurable);
-assertFalse(desc.writable);
+assertTrue(desc.writable);
 assertFalse(desc.enumerable);
 assertEquals(desc.get, undefined);
 assertEquals(desc.set, undefined);
@@ -307,7 +317,7 @@
 assertEquals(obj1.foobar, 2000);
 assertEquals(desc.value, 2000);
 assertFalse(desc.configurable);
-assertFalse(desc.writable);
+assertTrue(desc.writable);
 assertFalse(desc.enumerable);
 assertEquals(desc.get, undefined);
 assertEquals(desc.set, undefined);
@@ -375,7 +385,7 @@
 
 
 // Redefinition of an accessor defined using __defineGetter__ and 
-// __defineSetter__
+// __defineSetter__.
 function get(){return this.x}
 function set(x){this.x=x};
 
@@ -442,7 +452,7 @@
 assertEquals(5, val1);
 assertEquals(5, obj4.bar);
 
-// Make sure an error is thrown when trying to access to redefined function
+// Make sure an error is thrown when trying to access to redefined function.
 try {
   obj4.bar();
   assertTrue(false);
@@ -453,7 +463,7 @@
 
 // Test runtime calls to DefineOrRedefineDataProperty and
 // DefineOrRedefineAccessorProperty - make sure we don't 
-// crash
+// crash.
 try {
   %DefineOrRedefineAccessorProperty(0, 0, 0, 0, 0);
 } catch (e) {
@@ -497,3 +507,210 @@
 } catch (e) {
   assertTrue(/illegal access/.test(e));
 }
+
+// Test that all possible differences in step 6 in DefineOwnProperty are
+// exercised, i.e., any difference in the given property descriptor and the
+// existing properties should not return true, but throw an error if the
+// existing configurable property is false. 
+
+var obj5 = {};
+// Enumerable will default to false.
+Object.defineProperty(obj5, 'foo', accessorNoConfigurable);
+desc = Object.getOwnPropertyDescriptor(obj5, 'foo');
+// First, test that we are actually allowed to set the accessor if all
+// values are of the descriptor are the same as the existing one.
+Object.defineProperty(obj5, 'foo', accessorNoConfigurable);
+
+// Different setter.
+var descDifferent = {
+  configurable:false,
+  enumerable:false,
+  set: setter1,
+  get: getter2
+};
+
+try {
+  Object.defineProperty(obj5, 'foo', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Different getter.
+descDifferent = {
+  configurable:false,
+  enumerable:false,
+  set: setter2,
+  get: getter1
+};
+
+try {
+  Object.defineProperty(obj5, 'foo', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Different enumerable.
+descDifferent = {
+  configurable:false,
+  enumerable:true,
+  set: setter2,
+  get: getter2
+};
+
+try {
+  Object.defineProperty(obj5, 'foo', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Different configurable.
+descDifferent = {
+  configurable:false,
+  enumerable:true,
+  set: setter2,
+  get: getter2
+};
+
+try {
+  Object.defineProperty(obj5, 'foo', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// No difference.
+descDifferent = {
+  configurable:false,
+  enumerable:false,
+  set: setter2,
+  get: getter2
+};
+// Make sure we can still redefine if all properties are the same.
+Object.defineProperty(obj5, 'foo', descDifferent);
+
+// Make sure that obj5 still holds the original values.
+desc = Object.getOwnPropertyDescriptor(obj5, 'foo');
+assertEquals(desc.get, getter2);
+assertEquals(desc.set, setter2);
+assertFalse(desc.enumerable);
+assertFalse(desc.configurable);
+
+
+// Also exercise step 6 on data property, writable and enumerable
+// defaults to false.
+Object.defineProperty(obj5, 'bar', dataNoConfigurable);
+
+// Test that redefinition with the same property descriptor is possible
+Object.defineProperty(obj5, 'bar', dataNoConfigurable);
+
+// Different value.
+descDifferent = {
+  configurable:false,
+  enumerable:false,
+  writable: false,
+  value: 1999
+};
+
+try {
+  Object.defineProperty(obj5, 'bar', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// Different writable.
+descDifferent = {
+  configurable:false,
+  enumerable:false,
+  writable: true,
+  value: 2000
+};
+
+try {
+  Object.defineProperty(obj5, 'bar', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+
+// Different enumerable.
+descDifferent = {
+  configurable:false,
+  enumerable:true ,
+  writable:false,
+  value: 2000
+};
+
+try {
+  Object.defineProperty(obj5, 'bar', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+
+// Different configurable.
+descDifferent = {
+  configurable:true,
+  enumerable:false,
+  writable:false,
+  value: 2000
+};
+
+try {
+  Object.defineProperty(obj5, 'bar', descDifferent);
+  assertTrue(false);
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+// No difference.
+descDifferent = {
+  configurable:false,
+  enumerable:false,
+  writable:false,
+  value:2000
+};
+// Make sure we can still redefine if all properties are the same.
+Object.defineProperty(obj5, 'bar', descDifferent);
+
+// Make sure that obj5 still holds the original values.
+desc = Object.getOwnPropertyDescriptor(obj5, 'bar');
+assertEquals(desc.value, 2000);
+assertFalse(desc.writable);
+assertFalse(desc.enumerable);
+assertFalse(desc.configurable);
+
+
+// Make sure that we can't overwrite +0 with -0 and vice versa.
+var descMinusZero = {value: -0, configurable: false};
+var descPlusZero = {value: +0, configurable: false};
+
+Object.defineProperty(obj5, 'minuszero', descMinusZero);
+
+// Make sure we can redefine with -0.
+Object.defineProperty(obj5, 'minuszero', descMinusZero);
+
+try {
+  Object.defineProperty(obj5, 'minuszero', descPlusZero);
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
+
+
+Object.defineProperty(obj5, 'pluszero', descPlusZero);
+
+// Make sure we can redefine with +0.
+Object.defineProperty(obj5, 'pluszero', descPlusZero);
+
+try {
+  Object.defineProperty(obj5, 'pluszero', descMinusZero);
+  assertUnreachable();
+} catch (e) {
+  assertTrue(/Cannot redefine property/.test(e));
+}
diff --git a/test/mjsunit/regress/regress-712.js b/test/mjsunit/regress/regress-712.js
new file mode 100644
index 0000000..b26b94a
--- /dev/null
+++ b/test/mjsunit/regress/regress-712.js
@@ -0,0 +1,38 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This regression test is used to ensure that Object.defineProperty
+// can't be called with an empty property descriptor on a non-configurable
+// existing property and override the existing property.
+// See: http://code.google.com/p/v8/issues/detail?id=712
+
+var obj = {};
+Object.defineProperty(obj, "x", { get: function() { return "42"; },
+                                  configurable: false });
+assertEquals(obj.x, "42");
+Object.defineProperty(obj, 'x', {});
+assertEquals(obj.x, "42");
diff --git a/test/mjsunit/regress/regress-720.js b/test/mjsunit/regress/regress-720.js
new file mode 100644
index 0000000..97e1284
--- /dev/null
+++ b/test/mjsunit/regress/regress-720.js
@@ -0,0 +1,36 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This regression test is used to ensure that Object.defineProperty
+// keeps the existing value of the writable flag if none is given
+// in the provided descriptor. 
+// See: http://code.google.com/p/v8/issues/detail?id=720
+
+var o = {x: 10};
+Object.defineProperty(o, "x", {value: 5});
+var desc = Object.getOwnPropertyDescriptor(o, "x");
+assertTrue(desc["writable"]);
diff --git a/test/mjsunit/samevalue.js b/test/mjsunit/samevalue.js
new file mode 100644
index 0000000..2de677e
--- /dev/null
+++ b/test/mjsunit/samevalue.js
@@ -0,0 +1,102 @@
+// Copyright 2010 the V8 project authors. All rights reserved.

+// Redistribution and use in source and binary forms, with or without

+// modification, are permitted provided that the following conditions are

+// met:

+//

+//     * Redistributions of source code must retain the above copyright

+//       notice, this list of conditions and the following disclaimer.

+//     * Redistributions in binary form must reproduce the above

+//       copyright notice, this list of conditions and the following

+//       disclaimer in the documentation and/or other materials provided

+//       with the distribution.

+//     * Neither the name of Google Inc. nor the names of its

+//       contributors may be used to endorse or promote products derived

+//       from this software without specific prior written permission.

+//

+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS

+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT

+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR

+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT

+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,

+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT

+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,

+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY

+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT

+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE

+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+

+

+// Flags: --expose-natives_as natives

+// Test the SameValue internal method.

+

+var obj1 = {x: 10, y: 11, z: "test"};

+var obj2 = {x: 10, y: 11, z: "test"};

+

+assertTrue(natives.SameValue(0, 0));

+assertTrue(natives.SameValue(+0, +0));

+assertTrue(natives.SameValue(-0, -0));

+assertTrue(natives.SameValue(1, 1));

+assertTrue(natives.SameValue(2, 2));

+assertTrue(natives.SameValue(-1, -1));

+assertTrue(natives.SameValue(0.5, 0.5));

+assertTrue(natives.SameValue(true, true));

+assertTrue(natives.SameValue(false, false));

+assertTrue(natives.SameValue(NaN, NaN));

+assertTrue(natives.SameValue(null, null));

+assertTrue(natives.SameValue("foo", "foo"));

+assertTrue(natives.SameValue(obj1, obj1));

+// Undefined values.

+assertTrue(natives.SameValue());

+assertTrue(natives.SameValue(undefined, undefined));

+

+assertFalse(natives.SameValue(0,1));

+assertFalse(natives.SameValue("foo", "bar"));

+assertFalse(natives.SameValue(obj1, obj2));

+assertFalse(natives.SameValue(true, false));

+

+assertFalse(natives.SameValue(obj1, true));

+assertFalse(natives.SameValue(obj1, "foo"));

+assertFalse(natives.SameValue(obj1, 1));

+assertFalse(natives.SameValue(obj1, undefined));

+assertFalse(natives.SameValue(obj1, NaN));

+

+assertFalse(natives.SameValue(undefined, true));

+assertFalse(natives.SameValue(undefined, "foo"));

+assertFalse(natives.SameValue(undefined, 1));

+assertFalse(natives.SameValue(undefined, obj1));

+assertFalse(natives.SameValue(undefined, NaN));

+

+assertFalse(natives.SameValue(NaN, true));

+assertFalse(natives.SameValue(NaN, "foo"));

+assertFalse(natives.SameValue(NaN, 1));

+assertFalse(natives.SameValue(NaN, obj1));

+assertFalse(natives.SameValue(NaN, undefined));

+

+assertFalse(natives.SameValue("foo", true));

+assertFalse(natives.SameValue("foo", 1));

+assertFalse(natives.SameValue("foo", obj1));

+assertFalse(natives.SameValue("foo", undefined));

+assertFalse(natives.SameValue("foo", NaN));

+

+assertFalse(natives.SameValue(true, 1));

+assertFalse(natives.SameValue(true, obj1));

+assertFalse(natives.SameValue(true, undefined));

+assertFalse(natives.SameValue(true, NaN));

+assertFalse(natives.SameValue(true, "foo"));

+

+assertFalse(natives.SameValue(1, true));

+assertFalse(natives.SameValue(1, obj1));

+assertFalse(natives.SameValue(1, undefined));

+assertFalse(natives.SameValue(1, NaN));

+assertFalse(natives.SameValue(1, "foo"));

+

+// Special string cases.

+assertFalse(natives.SameValue("1", 1));

+assertFalse(natives.SameValue("true", true));

+assertFalse(natives.SameValue("false", false));

+assertFalse(natives.SameValue("undefined", undefined));

+assertFalse(natives.SameValue("NaN", NaN));

+

+// -0 and +0 are should be different

+assertFalse(natives.SameValue(+0, -0));

+assertFalse(natives.SameValue(-0, +0));

diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 5985b9f..a92576e 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -412,6 +412,8 @@
         '../../src/top.h',
         '../../src/type-info.cc',
         '../../src/type-info.h',
+        '../../src/unbound-queue-inl.h',
+        '../../src/unbound-queue.h',
         '../../src/unicode-inl.h',
         '../../src/unicode.cc',
         '../../src/unicode.h',
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index 46aba8d..48d63b7 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -627,6 +627,8 @@
 		9FBE03E410BD412600F8BFBA /* fast-codegen-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "fast-codegen-arm.cc"; path = "arm/fast-codegen-arm.cc"; sourceTree = "<group>"; };
 		9FC86ABB0F5FEDAC00F22668 /* oprofile-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "oprofile-agent.cc"; sourceTree = "<group>"; };
 		9FC86ABC0F5FEDAC00F22668 /* oprofile-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "oprofile-agent.h"; sourceTree = "<group>"; };
+		9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue-inl.h"; sourceTree = "<group>"; };
+		9FF7A28311A642EA0051B8F2 /* unbound-queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "unbound-queue.h"; sourceTree = "<group>"; };
 /* End PBXFileReference section */
 
 /* Begin PBXFrameworksBuildPhase section */
@@ -970,6 +972,8 @@
 				897FF1910E719B8F00D62E90 /* top.h */,
 				9FA38BAE1175B2D200C4CD55 /* type-info.cc */,
 				9FA38BAF1175B2D200C4CD55 /* type-info.h */,
+				9FF7A28211A642EA0051B8F2 /* unbound-queue-inl.h */,
+				9FF7A28311A642EA0051B8F2 /* unbound-queue.h */,
 				897FF1920E719B8F00D62E90 /* unicode-inl.h */,
 				897FF1930E719B8F00D62E90 /* unicode.cc */,
 				897FF1940E719B8F00D62E90 /* unicode.h */,
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index 004e16e..2571b65 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -144,6 +144,30 @@
 					/>
 				</FileConfiguration>
 			</File>
+    		<File
+				RelativePath="..\..\src\dtoa.cc"
+				>
+			</File>
+    		<File
+				RelativePath="..\..\src\dtoa.h"
+				>
+			</File>
+    		<File
+				RelativePath="..\..\src\fast-dtoa.cc"
+				>
+			</File>
+    		<File
+				RelativePath="..\..\src\fast-dtoa.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\fixed-dtoa.cc"
+				>
+			</File>
+    		<File
+				RelativePath="..\..\src\fixed-dtoa.h"
+				>
+			</File>
 		</Filter>
 		<Filter
 			Name="src"
@@ -961,6 +985,14 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\unbound-queue-inl.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\unbound-queue.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\unicode-inl.h"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index 39cd42a..a3c5970 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -953,6 +953,14 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\unbound-queue-inl.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\unbound-queue.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\unicode-inl.h"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
index 4607817..708b380 100644
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ b/tools/visual_studio/v8_base_x64.vcproj
@@ -938,6 +938,14 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\unbound-queue-inl.h"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\unbound-queue.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\unicode-inl.h"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_cctest.vcproj b/tools/visual_studio/v8_cctest.vcproj
index 424d226..cca6eba 100644
--- a/tools/visual_studio/v8_cctest.vcproj
+++ b/tools/visual_studio/v8_cctest.vcproj
@@ -248,6 +248,10 @@
 			>
 		</File>
 		<File
+			RelativePath="..\..\test\cctest\test-unbound-queue.cc"
+			>
+		</File>
+		<File
 			RelativePath="..\..\test\cctest\test-utils.cc"
 			>
 		</File>