Push version 1.3.17 to trunk.

Added API method to get simple heap statistics.

Improved heap profiler support.

Fixed the implementation of the resource constraint API so it works when using snapshots.

Fixed a number of issues in the Windows 64-bit version.

Optimized calls to API getters.

Added valgrind notification on code modification to the 64-bit version.

Fixed issue where we logged shared library addresses on Windows at startup and never used them.



git-svn-id: http://v8.googlecode.com/svn/trunk@3167 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index d13d74f..e816f58 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,22 @@
+2009-10-28: Version 1.3.17
+
+        Added API method to get simple heap statistics.
+
+        Improved heap profiler support.
+
+        Fixed the implementation of the resource constraint API so it
+        works when using snapshots.
+
+        Fixed a number of issues in the Windows 64-bit version.
+
+        Optimized calls to API getters.
+
+        Added valgrind notification on code modification to the 64-bit version.
+
+        Fixed issue where we logged shared library addresses on Windows at
+        startup and never used them.
+
+
 2009-10-16: Version 1.3.16
         
         X64: Convert smis to holding 32 bits of payload.
@@ -41,7 +60,7 @@
         Ensure V8 is initialized before locking and unlocking threads.
 
         Implemented a new JavaScript minifier for compressing the source of
-        the built-in JavaScript. This Remove non-Open Source code from Douglas
+        the built-in JavaScript. This removes non-Open Source code from Douglas
         Crockford from the project.
 
         Added a missing optimization in StringCharAt.
diff --git a/include/v8.h b/include/v8.h
index d923f97..5f3b68b 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -452,8 +452,8 @@
   void* operator new(size_t size);
   void operator delete(void*, size_t);
 
-  // This Data class is accessible internally through a typedef in the
-  // ImplementationUtilities class.
+  // This Data class is accessible internally as HandleScopeData through a
+  // typedef in the ImplementationUtilities class.
   class V8EXPORT Data {
    public:
     int extensions;
@@ -1069,7 +1069,7 @@
 class V8EXPORT Integer : public Number {
  public:
   static Local<Integer> New(int32_t value);
-  static inline Local<Integer> NewFromUnsigned(uint32_t value);
+  static Local<Integer> NewFromUnsigned(uint32_t value);
   int64_t Value() const;
   static inline Integer* Cast(v8::Value* obj);
  private:
@@ -1126,6 +1126,16 @@
   DontDelete = 1 << 2
 };
 
+enum ExternalArrayType {
+  kExternalByteArray = 1,
+  kExternalUnsignedByteArray,
+  kExternalShortArray,
+  kExternalUnsignedShortArray,
+  kExternalIntArray,
+  kExternalUnsignedIntArray,
+  kExternalFloatArray
+};
+
 /**
  * A JavaScript object (ECMA-262, 4.3.3)
  */
@@ -1278,6 +1288,17 @@
    */
   void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
 
+  /**
+   * Set the backing store of the indexed properties to be managed by the
+   * embedding layer. Access to the indexed properties will follow the rules
+   * spelled out for the CanvasArray subtypes in the WebGL specification.
+   * Note: The embedding program still owns the data and needs to ensure that
+   *       the backing store is preserved while V8 has a reference.
+   */
+  void SetIndexedPropertiesToExternalArrayData(void* data,
+                                               ExternalArrayType array_type,
+                                               int number_of_elements);
+
   static Local<Object> New();
   static inline Object* Cast(Value* obj);
  private:
@@ -2103,6 +2124,29 @@
 
 
 /**
+ * Collection of V8 heap information.
+ *
+ * Instances of this class can be passed to v8::V8::HeapStatistics to
+ * get heap statistics from V8.
+ */
+class V8EXPORT HeapStatistics {
+ public:
+  HeapStatistics();
+  size_t total_heap_size() { return total_heap_size_; }
+  size_t used_heap_size() { return used_heap_size_; }
+
+ private:
+  void set_total_heap_size(size_t size) { total_heap_size_ = size; }
+  void set_used_heap_size(size_t size) { used_heap_size_ = size; }
+
+  size_t total_heap_size_;
+  size_t used_heap_size_;
+
+  friend class V8;
+};
+
+
+/**
  * Container class for static utility functions.
  */
 class V8EXPORT V8 {
@@ -2352,6 +2396,10 @@
    */
   static bool Dispose();
 
+  /**
+   * Get statistics about the heap memory usage.
+   */
+  static void GetHeapStatistics(HeapStatistics* heap_statistics);
 
   /**
    * Optional notification that the embedder is idle.
@@ -3069,15 +3117,6 @@
 }
 
 
-Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
-  bool fits_into_int32_t = (value & (1 << 31)) == 0;
-  if (fits_into_int32_t) {
-    return Integer::New(static_cast<int32_t>(value));
-  }
-  return Local<Integer>::Cast(Number::New(value));
-}
-
-
 Integer* Integer::Cast(v8::Value* value) {
 #ifdef V8_ENABLE_CHECKS
   CheckCast(value);
diff --git a/src/api.cc b/src/api.cc
index 630fa8f..4cb655d 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -342,10 +342,10 @@
 
 
 bool SetResourceConstraints(ResourceConstraints* constraints) {
-  int semispace_size = constraints->max_young_space_size();
+  int young_space_size = constraints->max_young_space_size();
   int old_gen_size = constraints->max_old_space_size();
-  if (semispace_size != 0 || old_gen_size != 0) {
-    bool result = i::Heap::ConfigureHeap(semispace_size, old_gen_size);
+  if (young_space_size != 0 || old_gen_size != 0) {
+    bool result = i::Heap::ConfigureHeap(young_space_size / 2, old_gen_size);
     if (!result) return false;
   }
   if (constraints->stack_limit() != NULL) {
@@ -2306,6 +2306,30 @@
 }
 
 
+void v8::Object::SetIndexedPropertiesToExternalArrayData(
+    void* data,
+    ExternalArrayType array_type,
+    int length) {
+  ON_BAILOUT("v8::SetIndexedPropertiesToExternalArrayData()", return);
+  ENTER_V8;
+  HandleScope scope;
+  if (!ApiCheck(length <= i::ExternalArray::kMaxLength,
+                "v8::Object::SetIndexedPropertiesToExternalArrayData()",
+                "length exceeds max acceptable value")) {
+    return;
+  }
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  if (!ApiCheck(!self->IsJSArray(),
+                "v8::Object::SetIndexedPropertiesToExternalArrayData()",
+                "JSArray is not supported")) {
+    return;
+  }
+  i::Handle<i::ExternalArray> array =
+      i::Factory::NewExternalArray(length, array_type, data);
+  self->set_elements(*array);
+}
+
+
 Local<v8::Object> Function::NewInstance() const {
   return NewInstance(0, NULL);
 }
@@ -2611,6 +2635,15 @@
 }
 
 
+HeapStatistics::HeapStatistics(): total_heap_size_(0), used_heap_size_(0) { }
+
+
+void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
+  heap_statistics->set_total_heap_size(i::Heap::CommittedMemory());
+  heap_statistics->set_used_heap_size(i::Heap::SizeOfObjects());
+}
+
+
 bool v8::V8::IdleNotification() {
   // Returning true tells the caller that it need not
   // continue to call IdleNotification.
@@ -2620,10 +2653,8 @@
 
 
 void v8::V8::LowMemoryNotification() {
-#if defined(ANDROID)
   if (!i::V8::IsRunning()) return;
   i::Heap::CollectAllGarbage(true);
-#endif
 }
 
 
@@ -3152,6 +3183,10 @@
 Local<v8::Value> v8::Date::New(double time) {
   EnsureInitialized("v8::Date::New()");
   LOG_API("Date::New");
+  if (isnan(time)) {
+    // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
+    time = i::OS::nan_value();
+  }
   ENTER_V8;
   EXCEPTION_PREAMBLE();
   i::Handle<i::Object> obj =
@@ -3224,6 +3259,10 @@
 
 Local<Number> v8::Number::New(double value) {
   EnsureInitialized("v8::Number::New()");
+  if (isnan(value)) {
+    // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
+    value = i::OS::nan_value();
+  }
   ENTER_V8;
   i::Handle<i::Object> result = i::Factory::NewNumber(value);
   return Utils::NumberToLocal(result);
@@ -3241,6 +3280,17 @@
 }
 
 
+Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
+  bool fits_into_int32_t = (value & (1 << 31)) == 0;
+  if (fits_into_int32_t) {
+    return Integer::New(static_cast<int32_t>(value));
+  }
+  ENTER_V8;
+  i::Handle<i::Object> result = i::Factory::NewNumber(value);
+  return Utils::IntegerToLocal(result);
+}
+
+
 void V8::IgnoreOutOfMemoryException() {
   thread_local.set_ignore_out_of_memory(true);
 }
diff --git a/src/api.h b/src/api.h
index 1221f35..a28e1f0 100644
--- a/src/api.h
+++ b/src/api.h
@@ -125,6 +125,15 @@
 }
 
 
+class ApiFunction {
+ public:
+  explicit ApiFunction(v8::internal::Address addr) : addr_(addr) { }
+  v8::internal::Address address() { return addr_; }
+ private:
+  v8::internal::Address addr_;
+};
+
+
 v8::Arguments::Arguments(v8::Local<v8::Value> data,
                          v8::Local<v8::Object> holder,
                          v8::Local<v8::Function> callee,
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 48cc090..d6046ec 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -245,6 +245,12 @@
 }
 
 
+void Assembler::set_target_at(Address constant_pool_entry,
+                              Address target) {
+  Memory::Address_at(constant_pool_entry) = target;
+}
+
+
 void Assembler::set_target_address_at(Address pc, Address target) {
   Memory::Address_at(target_address_address_at(pc)) = target;
   // Intuitively, we would think it is necessary to flush the instruction cache
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index d1df08c..d617c7e 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -437,6 +437,14 @@
   INLINE(static Address target_address_at(Address pc));
   INLINE(static void set_target_address_at(Address pc, Address target));
 
+  // Modify the code target address in a constant pool entry.
+  inline static void set_target_at(Address constant_pool_entry, Address target);
+
+  // Here we are patching the address in the constant pool, not the actual call
+  // instruction.  The address in the constant pool is the same size as a
+  // pointer.
+  static const int kCallTargetSize = kPointerSize;
+
   // Size of an instruction.
   static const int kInstrSize = sizeof(Instr);
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index d7afb37..6db554a 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -949,6 +949,8 @@
     const int kGlobalIndex =
         Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
     __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
+    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+    __ ldr(r2, FieldMemOperand(r2, kGlobalIndex));
     __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
 
     __ bind(&patch_receiver);
@@ -1107,6 +1109,8 @@
   const int kGlobalOffset =
       Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
   __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+  __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
+  __ ldr(r0, FieldMemOperand(r0, kGlobalOffset));
   __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
 
   // Push the receiver.
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 147c5e3..3292bdc 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1122,22 +1122,20 @@
 
 void CodeGenerator::CheckStack() {
   VirtualFrame::SpilledScope spilled_scope;
-  if (FLAG_check_stack) {
-    Comment cmnt(masm_, "[ check stack");
-    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
-    // Put the lr setup instruction in the delay slot.  kInstrSize is added to
-    // the implicit 8 byte offset that always applies to operations with pc and
-    // gives a return address 12 bytes down.
-    masm_->add(lr, pc, Operand(Assembler::kInstrSize));
-    masm_->cmp(sp, Operand(ip));
-    StackCheckStub stub;
-    // Call the stub if lower.
-    masm_->mov(pc,
-               Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
-                       RelocInfo::CODE_TARGET),
-               LeaveCC,
-               lo);
-  }
+  Comment cmnt(masm_, "[ check stack");
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  // Put the lr setup instruction in the delay slot.  kInstrSize is added to
+  // the implicit 8 byte offset that always applies to operations with pc and
+  // gives a return address 12 bytes down.
+  masm_->add(lr, pc, Operand(Assembler::kInstrSize));
+  masm_->cmp(sp, Operand(ip));
+  StackCheckStub stub;
+  // Call the stub if lower.
+  masm_->mov(pc,
+             Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+                     RelocInfo::CODE_TARGET),
+             LeaveCC,
+             lo);
 }
 
 
@@ -1172,9 +1170,9 @@
 
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   VirtualFrame::SpilledScope spilled_scope;
+  frame_->EmitPush(cp);
   __ mov(r0, Operand(pairs));
   frame_->EmitPush(r0);
-  frame_->EmitPush(cp);
   __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
   frame_->EmitPush(r0);
   frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
@@ -2255,12 +2253,10 @@
   VirtualFrame::SpilledScope spilled_scope;
   ASSERT(boilerplate->IsBoilerplate());
 
-  // Push the boilerplate on the stack.
-  __ mov(r0, Operand(boilerplate));
-  frame_->EmitPush(r0);
-
   // Create a new closure.
   frame_->EmitPush(cp);
+  __ mov(r0, Operand(boilerplate));
+  frame_->EmitPush(r0);
   frame_->CallRuntime(Runtime::kNewClosure, 2);
   frame_->EmitPush(r0);
 }
@@ -5799,7 +5795,7 @@
                               Label* throw_normal_exception,
                               Label* throw_termination_exception,
                               Label* throw_out_of_memory_exception,
-                              StackFrame::Type frame_type,
+                              ExitFrame::Mode mode,
                               bool do_gc,
                               bool always_allocate) {
   // r0: result parameter for PerformGC, if any
@@ -5859,7 +5855,7 @@
   // r0:r1: result
   // sp: stack pointer
   // fp: frame pointer
-  __ LeaveExitFrame(frame_type);
+  __ LeaveExitFrame(mode);
 
   // check if we should retry or throw exception
   Label retry;
@@ -5905,12 +5901,12 @@
   // this by performing a garbage collection and retrying the
   // builtin once.
 
-  StackFrame::Type frame_type = is_debug_break
-      ? StackFrame::EXIT_DEBUG
-      : StackFrame::EXIT;
+  ExitFrame::Mode mode = is_debug_break
+      ? ExitFrame::MODE_DEBUG
+      : ExitFrame::MODE_NORMAL;
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(frame_type);
+  __ EnterExitFrame(mode);
 
   // r4: number of arguments (C callee-saved)
   // r5: pointer to builtin function (C callee-saved)
@@ -5925,7 +5921,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                false,
                false);
 
@@ -5934,7 +5930,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                true,
                false);
 
@@ -5945,7 +5941,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                true,
                true);
 
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 7b50b01..e079950 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -242,7 +242,7 @@
   void LoadReference(Reference* ref);
   void UnloadReference(Reference* ref);
 
-  MemOperand ContextOperand(Register context, int index) const {
+  static MemOperand ContextOperand(Register context, int index) {
     return MemOperand(context, Context::SlotOffset(index));
   }
 
@@ -254,7 +254,7 @@
                                                JumpTarget* slow);
 
   // Expressions
-  MemOperand GlobalObject() const  {
+  static MemOperand GlobalObject()  {
     return ContextOperand(cp, Context::GLOBAL_INDEX);
   }
 
@@ -330,10 +330,11 @@
                                       const InlineRuntimeLUT& new_entry,
                                       InlineRuntimeLUT* old_entry);
 
+  static Handle<Code> ComputeLazyCompile(int argc);
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
-  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+  static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
 
   // Declare global variables and functions in the given array of
   // name/value pairs.
@@ -425,6 +426,8 @@
   friend class VirtualFrame;
   friend class JumpTarget;
   friend class Reference;
+  friend class FastCodeGenerator;
+  friend class CodeGenSelector;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
index d2e620c..21ee6d7 100644
--- a/src/arm/fast-codegen-arm.cc
+++ b/src/arm/fast-codegen-arm.cc
@@ -29,6 +29,7 @@
 
 #include "codegen-inl.h"
 #include "fast-codegen.h"
+#include "parser.h"
 
 namespace v8 {
 namespace internal {
@@ -62,27 +63,32 @@
     if (locals_count > 0) {
       __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
     }
-    if (FLAG_check_stack) {
-      __ LoadRoot(r2, Heap::kStackLimitRootIndex);
-    }
+    __ LoadRoot(r2, Heap::kStackLimitRootIndex);
     for (int i = 0; i < locals_count; i++) {
       __ push(ip);
     }
   }
 
-  if (FLAG_check_stack) {
-    // Put the lr setup instruction in the delay slot.  The kInstrSize is
-    // added to the implicit 8 byte offset that always applies to operations
-    // with pc and gives a return address 12 bytes down.
-    Comment cmnt(masm_, "[ Stack check");
-    __ add(lr, pc, Operand(Assembler::kInstrSize));
-    __ cmp(sp, Operand(r2));
-    StackCheckStub stub;
-    __ mov(pc,
-           Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
-                   RelocInfo::CODE_TARGET),
-           LeaveCC,
-           lo);
+  // Check the stack for overflow or break request.
+  // Put the lr setup instruction in the delay slot.  The kInstrSize is
+  // added to the implicit 8 byte offset that always applies to operations
+  // with pc and gives a return address 12 bytes down.
+  Comment cmnt(masm_, "[ Stack check");
+  __ add(lr, pc, Operand(Assembler::kInstrSize));
+  __ cmp(sp, Operand(r2));
+  StackCheckStub stub;
+  __ mov(pc,
+         Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+                 RelocInfo::CODE_TARGET),
+         LeaveCC,
+         lo);
+
+  { Comment cmnt(masm_, "[ Declarations");
+    VisitDeclarations(fun->scope()->declarations());
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
   }
 
   { Comment cmnt(masm_, "[ Body");
@@ -94,6 +100,13 @@
     // body.
     __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
     SetReturnPosition(fun);
+    if (FLAG_trace) {
+      // Push the return value on the stack as the parameter.
+      // Runtime::TraceExit returns its parameter in r0.
+      __ push(r0);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
+
     __ RecordJSReturn();
     __ mov(sp, fp);
     __ ldm(ia_w, sp, fp.bit() | lr.bit());
@@ -104,52 +117,311 @@
 }
 
 
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  SetStatementPosition(stmt);
-  Visit(stmt->expression());
+void FastCodeGenerator::Move(Location destination, Slot* source) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      break;
+    case Location::TEMP:
+      __ ldr(ip, MemOperand(fp, SlotOffset(source)));
+      __ push(ip);
+      break;
+  }
+}
+
+
+void FastCodeGenerator::Move(Location destination, Literal* expr) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      break;
+    case Location::TEMP:
+      __ mov(ip, Operand(expr->handle()));
+      __ push(ip);
+      break;
+  }
+}
+
+
+void FastCodeGenerator::Move(Slot* destination, Location source) {
+  switch (source.type()) {
+    case Location::NOWHERE:
+      UNREACHABLE();
+    case Location::TEMP:
+      __ pop(ip);
+      __ str(ip, MemOperand(fp, SlotOffset(destination)));
+      break;
+  }
+}
+
+
+void FastCodeGenerator::DropAndMove(Location destination, Register source) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      __ pop();
+      break;
+    case Location::TEMP:
+      __ str(source, MemOperand(sp));
+      break;
+  }
+}
+
+
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.
+  // The context is the first argument.
+  __ mov(r1, Operand(pairs));
+  __ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0)));
+  __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
 }
 
 
 void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
   Comment cmnt(masm_, "[ ReturnStatement");
   SetStatementPosition(stmt);
-  Visit(stmt->expression());
-  __ pop(r0);
+  Expression* expr = stmt->expression();
+  // Complete the statement based on the type of the subexpression.
+  if (expr->AsLiteral() != NULL) {
+    __ mov(r0, Operand(expr->AsLiteral()->handle()));
+  } else {
+    Visit(expr);
+    Move(r0, expr->location());
+  }
+
+  if (FLAG_trace) {
+    __ push(r0);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+
   __ RecordJSReturn();
   __ mov(sp, fp);
   __ ldm(ia_w, sp, fp.bit() | lr.bit());
-    int num_parameters = function_->scope()->num_parameters();
+  int num_parameters = function_->scope()->num_parameters();
   __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
   __ Jump(lr);
 }
 
 
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+  if (HasStackOverflow()) return;
+
+  ASSERT(boilerplate->IsBoilerplate());
+
+  // Create a new closure.
+  __ mov(r0, Operand(boilerplate));
+  __ stm(db_w, sp, cp.bit() | r0.bit());
+  __ CallRuntime(Runtime::kNewClosure, 2);
+  Move(expr->location(), r0);
+}
+
+
 void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
   Expression* rewrite = expr->var()->rewrite();
-  ASSERT(rewrite != NULL);
-
-  Slot* slot = rewrite->AsSlot();
-  ASSERT(slot != NULL);
-  { Comment cmnt(masm_, "[ Slot");
-    if (expr->location().is_temporary()) {
-      __ ldr(ip, MemOperand(fp, SlotOffset(slot)));
-      __ push(ip);
-    } else {
-      ASSERT(expr->location().is_nowhere());
-    }
+  if (rewrite == NULL) {
+    Comment cmnt(masm_, "Global variable");
+    // Use inline caching. Variable name is passed in r2 and the global
+    // object on the stack.
+    __ ldr(ip, CodeGenerator::GlobalObject());
+    __ push(ip);
+    __ mov(r2, Operand(expr->name()));
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    DropAndMove(expr->location(), r0);
+  } else {
+    Comment cmnt(masm_, "Stack slot");
+    Move(expr->location(), rewrite->AsSlot());
   }
 }
 
 
-void FastCodeGenerator::VisitLiteral(Literal* expr) {
-  Comment cmnt(masm_, "[ Literal");
-  if (expr->location().is_temporary()) {
-    __ mov(ip, Operand(expr->handle()));
-    __ push(ip);
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  Comment cmnt(masm_, "[ ObjectLiteral");
+  Label boilerplate_exists;
+  __ ldr(r2, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
+  // r2 = literal array (0).
+  __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+  int literal_offset =
+      FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ ldr(r0, FieldMemOperand(r2, literal_offset));
+  // Check whether we need to materialize the object literal boilerplate.
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, Operand(ip));
+  __ b(ne, &boilerplate_exists);
+  // Create boilerplate if it does not exist.
+  // r1 = literal index (1).
+  __ mov(r1, Operand(Smi::FromInt(expr->literal_index())));
+  // r0 = constant properties (2).
+  __ mov(r0, Operand(expr->constant_properties()));
+  __ stm(db_w, sp, r2.bit() | r1.bit() | r0.bit());
+  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  __ bind(&boilerplate_exists);
+  // r0 contains boilerplate.
+  // Clone boilerplate.
+  __ push(r0);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
   } else {
-    ASSERT(expr->location().is_nowhere());
+    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+  }
+
+  // If result_saved == true: the result is saved on top of the stack.
+  // If result_saved == false: the result is in eax.
+  bool result_saved = false;
+
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+    if (!result_saved) {
+      __ push(r0);  // Save result on stack
+      result_saved = true;
+    }
+    switch (property->kind()) {
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:   // fall through
+        ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+      case ObjectLiteral::Property::COMPUTED:  // fall through
+      case ObjectLiteral::Property::PROTOTYPE:
+        __ push(r0);
+        Visit(key);
+        ASSERT(key->location().is_temporary());
+        Visit(value);
+        ASSERT(value->location().is_temporary());
+        __ CallRuntime(Runtime::kSetProperty, 3);
+        __ ldr(r0, MemOperand(sp));  // Restore result into r0
+        break;
+      case ObjectLiteral::Property::SETTER:  // fall through
+      case ObjectLiteral::Property::GETTER:
+        __ push(r0);
+        Visit(key);
+        ASSERT(key->location().is_temporary());
+        __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ?
+                           Smi::FromInt(1) :
+                           Smi::FromInt(0)));
+        __ push(r1);
+        Visit(value);
+        ASSERT(value->location().is_temporary());
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        __ ldr(r0, MemOperand(sp));  // Restore result into r0
+        break;
+      default: UNREACHABLE();
+    }
+  }
+  switch (expr->location().type()) {
+    case Location::NOWHERE:
+      if (result_saved) __ pop();
+      break;
+    case Location::TEMP:
+      if (!result_saved) __ push(r0);
+      break;
+  }
+}
+
+
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Comment cmnt(masm_, "[ RegExp Literal");
+  Label done;
+  // Registers will be used as follows:
+  // r4 = JS function, literals array
+  // r3 = literal index
+  // r2 = RegExp pattern
+  // r1 = RegExp flags
+  // r0 = temp + return value (RegExp literal)
+  __ ldr(r0, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
+  __ ldr(r4,  FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+  int literal_offset =
+    FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ ldr(r0, FieldMemOperand(r4, literal_offset));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(ne, &done);
+  __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
+  __ mov(r2, Operand(expr->pattern()));
+  __ mov(r1, Operand(expr->flags()));
+  __ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  __ bind(&done);
+  Move(expr->location(), r0);
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+  Label make_clone;
+
+  // Fetch the function's literals array.
+  __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+  // Check if the literal's boilerplate has been instantiated.
+  int offset =
+      FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+  __ ldr(r0, FieldMemOperand(r3, offset));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(&make_clone, ne);
+
+  // Instantiate the boilerplate.
+  __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+  __ mov(r1, Operand(expr->literals()));
+  __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+  __ bind(&make_clone);
+  // Clone the boilerplate.
+  __ push(r0);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  } else {
+    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  ZoneList<Expression*>* subexprs = expr->values();
+  for (int i = 0, len = subexprs->length(); i < len; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (subexpr->AsLiteral() != NULL ||
+        CompileTimeValue::IsCompileTimeValue(subexpr)) {
+      continue;
+    }
+
+    if (!result_saved) {
+      __ push(r0);
+      result_saved = true;
+    }
+    Visit(subexpr);
+    ASSERT(subexpr->location().is_temporary());
+
+    // Store the subexpression value in the array's elements.
+    __ pop(r0);  // Subexpression value.
+    __ ldr(r1, MemOperand(sp));  // Copy of array literal.
+    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ str(r0, FieldMemOperand(r1, offset));
+
+    // Update the write barrier for the array store with r0 as the scratch
+    // register.
+    __ mov(r2, Operand(offset));
+    __ RecordWrite(r1, r2, r0);
+  }
+
+  switch (expr->location().type()) {
+    case Location::NOWHERE:
+      if (result_saved) __ pop();
+      break;
+    case Location::TEMP:
+      if (!result_saved) __ push(r0);
+      break;
   }
 }
 
@@ -158,19 +430,239 @@
   Comment cmnt(masm_, "[ Assignment");
   ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
 
-  Visit(expr->value());
-
+  // Left-hand side can only be a global or a (parameter or local) slot.
   Variable* var = expr->target()->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL && var->slot() != NULL);
+  ASSERT(var != NULL);
+  ASSERT(var->is_global() || var->slot() != NULL);
 
-  if (expr->location().is_temporary()) {
-    __ ldr(ip, MemOperand(sp));
+  Expression* rhs = expr->value();
+  Location destination = expr->location();
+  if (var->is_global()) {
+    // Assignment to a global variable, use inline caching.  Right-hand-side
+    // value is passed in r0, variable name in r2, and the global object on
+    // the stack.
+
+    // Code for the right-hand-side expression depends on its type.
+    if (rhs->AsLiteral() != NULL) {
+      __ mov(r0, Operand(rhs->AsLiteral()->handle()));
+    } else {
+      ASSERT(rhs->location().is_temporary());
+      Visit(rhs);
+      __ pop(r0);
+    }
+    __ mov(r2, Operand(var->name()));
+    __ ldr(ip, CodeGenerator::GlobalObject());
+    __ push(ip);
+    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // Overwrite the global object on the stack with the result if needed.
+    DropAndMove(expr->location(), r0);
   } else {
-    ASSERT(expr->location().is_nowhere());
-    __ pop(ip);
+    // Local or parameter assignment.
+
+    // Code for the right-hand side expression depends on its type.
+    if (rhs->AsLiteral() != NULL) {
+      // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
+      // discarded result.  Always perform the assignment.
+      __ mov(ip, Operand(rhs->AsLiteral()->handle()));
+      __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+      Move(expr->location(), ip);
+    } else {
+      ASSERT(rhs->location().is_temporary());
+      Visit(rhs);
+      // Load right-hand side into ip.
+      switch (expr->location().type()) {
+        case Location::NOWHERE:
+          // Case 'var = temp'.  Discard right-hand-side temporary.
+          __ pop(ip);
+          break;
+        case Location::TEMP:
+          // Case 'temp1 <- (var = temp0)'.  Preserve right-hand-side
+          // temporary on the stack.
+          __ ldr(ip, MemOperand(sp));
+          break;
+      }
+      // Do the slot assignment.
+      __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+    }
   }
-  __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
 }
 
 
+void FastCodeGenerator::VisitProperty(Property* expr) {
+  Comment cmnt(masm_, "[ Property");
+  Expression* key = expr->key();
+  uint32_t dummy;
+
+  // Record the source position for the property load.
+  SetSourcePosition(expr->position());
+
+  // Evaluate receiver.
+  Visit(expr->obj());
+
+  if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+      !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+    // Do a NAMED property load.
+    // The IC expects the property name in ecx and the receiver on the stack.
+    __ mov(r2, Operand(key->AsLiteral()->handle()));
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // By emitting a nop we make sure that we do not have a "test eax,..."
+    // instruction after the call it is treated specially by the LoadIC code.
+    __ nop();
+  } else {
+    // Do a KEYED property load.
+    Visit(expr->key());
+    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // By emitting a nop we make sure that we do not have a "test eax,..."
+    // instruction after the call it is treated specially by the LoadIC code.
+    __ nop();
+    // Drop key and receiver left on the stack by IC.
+    __ pop();
+  }
+  switch (expr->location().type()) {
+    case Location::TEMP:
+      __ str(r0, MemOperand(sp));
+      break;
+    case Location::NOWHERE:
+      __ pop();
+  }
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+  Comment cmnt(masm_, "[ Call");
+  Expression* fun = expr->expression();
+  ZoneList<Expression*>* args = expr->arguments();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL && !var->is_this() && var->is_global());
+  ASSERT(!var->is_possibly_eval());
+
+  __ mov(r1, Operand(var->name()));
+  // Push global object as receiver.
+  __ ldr(r0, CodeGenerator::GlobalObject());
+  __ stm(db_w, sp, r1.bit() | r0.bit());
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(args->at(i)->location().is_temporary());
+  }
+  // Record source position for debugger
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+                                                         NOT_IN_LOOP);
+  __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+  // Restore context register.
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  DropAndMove(expr->location(), r0);
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* node) {
+  Comment cmnt(masm_, "[ CallNew");
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments.
+  // Push function on the stack.
+  Visit(node->expression());
+  ASSERT(node->expression()->location().is_temporary());
+
+  // Push global object (receiver).
+  __ ldr(r0, CodeGenerator::GlobalObject());
+  __ push(r0);
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = node->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(args->at(i)->location().is_temporary());
+    // If location is temporary, it is already on the stack,
+    // so nothing to do here.
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetSourcePosition(node->position());
+
+  // Load function, arg_count into r1 and r0.
+  __ mov(r0, Operand(arg_count));
+  // Function is in esp[arg_count + 1].
+  __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+
+  Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+  // Replace function on TOS with result in r0, or pop it.
+  DropAndMove(node->location(), r0);
+}
+
+
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+  Runtime::Function* function = expr->function();
+
+  ASSERT(function != NULL);
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(args->at(i)->location().is_temporary());
+  }
+
+  __ CallRuntime(function, arg_count);
+  Move(expr->location(), r0);
+}
+
+
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+  // Compile a short-circuited boolean or operation in a non-test
+  // context.
+  ASSERT(expr->op() == Token::OR);
+  // Compile (e0 || e1) as if it were
+  // (let (temp = e0) temp ? temp : e1).
+
+  Label done;
+  Location destination = expr->location();
+  Expression* left = expr->left();
+  Expression* right = expr->right();
+
+  // Call the runtime to find the boolean value of the left-hand
+  // subexpression.  Duplicate the value if it may be needed as the final
+  // result.
+  if (left->AsLiteral() != NULL) {
+    __ mov(r0, Operand(left->AsLiteral()->handle()));
+    __ push(r0);
+    if (destination.is_temporary()) __ push(r0);
+  } else {
+    Visit(left);
+    ASSERT(left->location().is_temporary());
+    if (destination.is_temporary()) {
+      __ ldr(r0, MemOperand(sp));
+      __ push(r0);
+    }
+  }
+  // The left-hand value is in on top of the stack.  It is duplicated on the
+  // stack iff the destination location is temporary.
+  __ CallRuntime(Runtime::kToBool, 1);
+  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, &done);
+
+  // Discard the left-hand value if present on the stack.
+  if (destination.is_temporary()) __ pop();
+  // Save or discard the right-hand value as needed.
+  if (right->AsLiteral() != NULL) {
+    Move(destination, right->AsLiteral());
+  } else {
+    Visit(right);
+    Move(destination, right->location());
+  }
+
+  __ bind(&done);
+}
+
 } }  // namespace v8::internal
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index 6fde4b7..b0fa13a 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -54,23 +54,24 @@
   if (fp == 0) return NONE;
   // Compute frame type and stack pointer.
   Address sp = fp + ExitFrameConstants::kSPDisplacement;
-  Type type;
-  if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
-    type = EXIT_DEBUG;
+  const int offset = ExitFrameConstants::kCodeOffset;
+  Object* code = Memory::Object_at(fp + offset);
+  bool is_debug_exit = code->IsSmi();
+  if (is_debug_exit) {
     sp -= kNumJSCallerSaved * kPointerSize;
-  } else {
-    type = EXIT;
   }
   // Fill in the state.
   state->sp = sp;
   state->fp = fp;
   state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-  return type;
+  return EXIT;
 }
 
 
 void ExitFrame::Iterate(ObjectVisitor* v) const {
-  // Do nothing
+  v->VisitPointer(&code_slot());
+  // The arguments are traversed as part of the expression stack of
+  // the calling frame.
 }
 
 
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index 0874c09..4924c1a 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -100,7 +100,7 @@
   static const int kSPDisplacement = -1 * kPointerSize;
 
   // The debug marker is just above the frame pointer.
-  static const int kDebugMarkOffset = -1 * kPointerSize;
+  static const int kCodeOffset = -1 * kPointerSize;
 
   static const int kSavedRegistersOffset = 0 * kPointerSize;
 
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index d230b45..ba83645 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -615,6 +615,13 @@
 }
 
 
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+                                        ExternalArrayType array_type) {
+  // TODO(476): port specialized code.
+  GenerateGeneric(masm);
+}
+
+
 void KeyedStoreIC::Generate(MacroAssembler* masm,
                             const ExternalReference& f) {
   // ---------- S t a t e --------------
@@ -748,6 +755,13 @@
 }
 
 
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+                                         ExternalArrayType array_type) {
+  // TODO(476): port specialized code.
+  GenerateGeneric(masm);
+}
+
+
 void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- r0     : value
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 45c6540..dc73bad 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -274,9 +274,7 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
-  ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
   // Compute the argv pointer and keep it in a callee-saved register.
   // r0 is argc.
   add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -298,8 +296,11 @@
   stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
   mov(fp, Operand(sp));  // setup new frame pointer
 
-  // Push debug marker.
-  mov(ip, Operand(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+  if (mode == ExitFrame::MODE_DEBUG) {
+    mov(ip, Operand(Smi::FromInt(0)));
+  } else {
+    mov(ip, Operand(CodeObject()));
+  }
   push(ip);
 
   // Save the frame pointer and the context in top.
@@ -316,7 +317,7 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Save the state of all registers to the stack from the memory
   // location. This is needed to allow nested break points.
-  if (type == StackFrame::EXIT_DEBUG) {
+  if (mode == ExitFrame::MODE_DEBUG) {
     // Use sp as base to push.
     CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
   }
@@ -348,14 +349,14 @@
 }
 
 
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Restore the memory copy of the registers by digging them out from
   // the stack. This is needed to allow nested break points.
-  if (type == StackFrame::EXIT_DEBUG) {
+  if (mode == ExitFrame::MODE_DEBUG) {
     // This code intentionally clobbers r2 and r3.
     const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
-    const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+    const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
     add(r3, fp, Operand(kOffset));
     CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
   }
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index e37bb5e..6dc2b7a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -87,14 +87,14 @@
   void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
-  // Enter specific kind of exit frame; either EXIT or
-  // EXIT_DEBUG. Expects the number of arguments in register r0 and
+  // Enter specific kind of exit frame; either normal or debug mode.
+  // Expects the number of arguments in register r0 and
   // the builtin function to call in register r1. Exits with argc in
   // r4, argv in r6, and and the builtin function to call in r5.
-  void EnterExitFrame(StackFrame::Type type);
+  void EnterExitFrame(ExitFrame::Mode mode);
 
   // Leave the current exit frame. Expects the return value in r0.
-  void LeaveExitFrame(StackFrame::Type type);
+  void LeaveExitFrame(ExitFrame::Mode mode);
 
   // Align the stack by optionally pushing a Smi zero.
   void AlignStack(int offset);
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 2e75a61..bd50428 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -29,6 +29,7 @@
 #include "unicode.h"
 #include "log.h"
 #include "ast.h"
+#include "code-stubs.h"
 #include "regexp-stack.h"
 #include "macro-assembler.h"
 #include "regexp-macro-assembler.h"
@@ -1099,14 +1100,12 @@
 
 
 void RegExpMacroAssemblerARM::CheckStackLimit() {
-  if (FLAG_check_stack) {
-    ExternalReference stack_limit =
-        ExternalReference::address_of_regexp_stack_limit();
-    __ mov(r0, Operand(stack_limit));
-    __ ldr(r0, MemOperand(r0));
-    __ cmp(backtrack_stackpointer(), Operand(r0));
-    SafeCall(&stack_overflow_label_, ls);
-  }
+  ExternalReference stack_limit =
+      ExternalReference::address_of_regexp_stack_limit();
+  __ mov(r0, Operand(stack_limit));
+  __ ldr(r0, MemOperand(r0));
+  __ cmp(backtrack_stackpointer(), Operand(r0));
+  SafeCall(&stack_overflow_label_, ls);
 }
 
 
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index 0711ac1..f70bc05 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -260,6 +260,21 @@
 };
 
 
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM.
+class RegExpCEntryStub: public CodeStub {
+ public:
+  RegExpCEntryStub() {}
+  virtual ~RegExpCEntryStub() {}
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return RegExpCEntry; }
+  int MinorKey() { return 0; }
+  const char* GetName() { return "RegExpCEntryStub"; }
+};
+
 #endif  // V8_NATIVE_REGEXP
 
 
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 2d5b140..47ecb96 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -146,29 +146,27 @@
       // Initialize stack slots with 'undefined' value.
     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   }
-  if (FLAG_check_stack) {
-    __ LoadRoot(r2, Heap::kStackLimitRootIndex);
-  }
+  __ LoadRoot(r2, Heap::kStackLimitRootIndex);
   for (int i = 0; i < count; i++) {
     __ push(ip);
   }
-  if (FLAG_check_stack) {
-    // Put the lr setup instruction in the delay slot.  The kInstrSize is added
-    // to the implicit 8 byte offset that always applies to operations with pc
-    // and gives a return address 12 bytes down.
-    masm()->add(lr, pc, Operand(Assembler::kInstrSize));
-    masm()->cmp(sp, Operand(r2));
-    StackCheckStub stub;
-    // Call the stub if lower.
-    masm()->mov(pc,
-                Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
-                        RelocInfo::CODE_TARGET),
-                LeaveCC,
-                lo);
-  }
+  // Check the stack for overflow or a break request.
+  // Put the lr setup instruction in the delay slot.  The kInstrSize is added
+  // to the implicit 8 byte offset that always applies to operations with pc
+  // and gives a return address 12 bytes down.
+  masm()->add(lr, pc, Operand(Assembler::kInstrSize));
+  masm()->cmp(sp, Operand(r2));
+  StackCheckStub stub;
+  // Call the stub if lower.
+  masm()->mov(pc,
+              Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+                      RelocInfo::CODE_TARGET),
+              LeaveCC,
+              lo);
 }
 
 
+
 void VirtualFrame::SaveContextRegister() {
   UNIMPLEMENTED();
 }
@@ -255,7 +253,7 @@
 
 
 void VirtualFrame::RawCallCodeObject(Handle<Code> code,
-                                       RelocInfo::Mode rmode) {
+                                     RelocInfo::Mode rmode) {
   ASSERT(cgen()->HasValidEntryRegisters());
   __ Call(code, rmode);
 }
diff --git a/src/assembler.cc b/src/assembler.cc
index 34595f8..34346a9 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -522,6 +522,10 @@
   : address_(Redirect(Builtins::c_function_address(id))) {}
 
 
+ExternalReference::ExternalReference(ApiFunction* fun)
+  : address_(Redirect(fun->address())) {}
+
+
 ExternalReference::ExternalReference(Builtins::Name name)
   : address_(Builtins::builtin_address(name)) {}
 
@@ -608,6 +612,27 @@
   return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
 }
 
+
+ExternalReference ExternalReference::handle_scope_extensions_address() {
+  return ExternalReference(HandleScope::current_extensions_address());
+}
+
+
+ExternalReference ExternalReference::handle_scope_next_address() {
+  return ExternalReference(HandleScope::current_next_address());
+}
+
+
+ExternalReference ExternalReference::handle_scope_limit_address() {
+  return ExternalReference(HandleScope::current_limit_address());
+}
+
+
+ExternalReference ExternalReference::scheduled_exception_address() {
+  return ExternalReference(Top::scheduled_exception_address());
+}
+
+
 #ifdef V8_NATIVE_REGEXP
 
 ExternalReference ExternalReference::re_check_stack_guard_state() {
diff --git a/src/assembler.h b/src/assembler.h
index 21a66dd..311dadd 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -373,6 +373,8 @@
  public:
   explicit ExternalReference(Builtins::CFunctionId id);
 
+  explicit ExternalReference(ApiFunction* ptr);
+
   explicit ExternalReference(Builtins::Name name);
 
   explicit ExternalReference(Runtime::FunctionId id);
@@ -422,6 +424,12 @@
   static ExternalReference double_fp_operation(Token::Value operation);
   static ExternalReference compare_doubles();
 
+  static ExternalReference handle_scope_extensions_address();
+  static ExternalReference handle_scope_next_address();
+  static ExternalReference handle_scope_limit_address();
+
+  static ExternalReference scheduled_exception_address();
+
   Address address() const {return reinterpret_cast<Address>(address_);}
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/ast.cc b/src/ast.cc
index f6864b8..90b5ed6 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -28,6 +28,7 @@
 #include "v8.h"
 
 #include "ast.h"
+#include "parser.h"
 #include "scopes.h"
 #include "string-stream.h"
 
@@ -138,6 +139,13 @@
 }
 
 
+bool ObjectLiteral::Property::IsCompileTimeValue() {
+  return kind_ == CONSTANT ||
+      (kind_ == MATERIALIZED_LITERAL &&
+       CompileTimeValue::IsCompileTimeValue(value_));
+}
+
+
 bool ObjectLiteral::IsValidJSON() {
   int length = properties()->length();
   for (int i = 0; i < length; i++) {
diff --git a/src/ast.h b/src/ast.h
index 42154f6..9b7d9dd 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -747,6 +747,8 @@
     Expression* value() { return value_; }
     Kind kind() { return kind_; }
 
+    bool IsCompileTimeValue();
+
    private:
     Literal* key_;
     Expression* value_;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 43aa1a3..3436b50 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -316,8 +316,11 @@
 
 void Bootstrapper::Iterate(ObjectVisitor* v) {
   natives_cache.Iterate(v);
+  v->Synchronize("NativesCache");
   extensions_cache.Iterate(v);
+  v->Synchronize("Extensions");
   PendingFixups::Iterate(v);
+  v->Synchronize("PendingFixups");
 }
 
 
diff --git a/src/builtins.cc b/src/builtins.cc
index afb5427..fa1b34e 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -538,6 +538,44 @@
 }
 
 
+static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalUnsignedByteArray(
+    MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalShortArray(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalShortArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalUnsignedShortArray(
+    MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalIntArray(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalIntArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalUnsignedIntArray(
+    MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalFloatArray(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalFloatArray);
+}
+
+
 static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
   KeyedLoadIC::GeneratePreMonomorphic(masm);
 }
@@ -567,6 +605,44 @@
 }
 
 
+static void Generate_KeyedStoreIC_ExternalByteArray(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalByteArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalUnsignedByteArray(
+    MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalShortArray(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalShortArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalUnsignedShortArray(
+    MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalIntArray(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalIntArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalUnsignedIntArray(
+    MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalFloatArray(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalFloatArray);
+}
+
+
 static void Generate_KeyedStoreIC_ExtendStorage(MacroAssembler* masm) {
   KeyedStoreIC::GenerateExtendStorage(masm);
 }
diff --git a/src/builtins.h b/src/builtins.h
index 141d5b7..bc32c49 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -48,44 +48,58 @@
 
 
 // Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V)                                      \
-  V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED)        \
-  V(JSConstructCall,            BUILTIN, UNINITIALIZED)        \
-  V(JSConstructStubGeneric,     BUILTIN, UNINITIALIZED)        \
-  V(JSEntryTrampoline,          BUILTIN, UNINITIALIZED)        \
-  V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED)        \
-                                                               \
-  V(LoadIC_Miss,                BUILTIN, UNINITIALIZED)        \
-  V(KeyedLoadIC_Miss,           BUILTIN, UNINITIALIZED)        \
-  V(StoreIC_Miss,               BUILTIN, UNINITIALIZED)        \
-  V(KeyedStoreIC_Miss,          BUILTIN, UNINITIALIZED)        \
-                                                               \
-  V(StoreIC_ExtendStorage,      BUILTIN, UNINITIALIZED)        \
-  V(KeyedStoreIC_ExtendStorage, BUILTIN, UNINITIALIZED)        \
-                                                               \
-  V(LoadIC_Initialize,          LOAD_IC, UNINITIALIZED)        \
-  V(LoadIC_PreMonomorphic,      LOAD_IC, PREMONOMORPHIC)       \
-  V(LoadIC_Normal,              LOAD_IC, MONOMORPHIC)          \
-  V(LoadIC_ArrayLength,         LOAD_IC, MONOMORPHIC)          \
-  V(LoadIC_StringLength,        LOAD_IC, MONOMORPHIC)          \
-  V(LoadIC_FunctionPrototype,   LOAD_IC, MONOMORPHIC)          \
-  V(LoadIC_Megamorphic,         LOAD_IC, MEGAMORPHIC)          \
-                                                               \
-  V(KeyedLoadIC_Initialize,     KEYED_LOAD_IC, UNINITIALIZED)  \
-  V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
-  V(KeyedLoadIC_Generic,        KEYED_LOAD_IC, MEGAMORPHIC)    \
-                                                               \
-  V(StoreIC_Initialize,         STORE_IC, UNINITIALIZED)       \
-  V(StoreIC_Megamorphic,        STORE_IC, MEGAMORPHIC)         \
-                                                               \
-  V(KeyedStoreIC_Initialize,    KEYED_STORE_IC, UNINITIALIZED) \
-  V(KeyedStoreIC_Generic,       KEYED_STORE_IC, MEGAMORPHIC)   \
-                                                               \
-  /* Uses KeyedLoadIC_Initialize; must be after in list. */    \
-  V(FunctionCall,               BUILTIN, UNINITIALIZED)        \
-  V(FunctionApply,              BUILTIN, UNINITIALIZED)        \
-                                                               \
-  V(ArrayCode,                  BUILTIN, UNINITIALIZED)        \
+#define BUILTIN_LIST_A(V)                                                 \
+  V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED)                   \
+  V(JSConstructCall,            BUILTIN, UNINITIALIZED)                   \
+  V(JSConstructStubGeneric,     BUILTIN, UNINITIALIZED)                   \
+  V(JSEntryTrampoline,          BUILTIN, UNINITIALIZED)                   \
+  V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED)                   \
+                                                                          \
+  V(LoadIC_Miss,                BUILTIN, UNINITIALIZED)                   \
+  V(KeyedLoadIC_Miss,           BUILTIN, UNINITIALIZED)                   \
+  V(StoreIC_Miss,               BUILTIN, UNINITIALIZED)                   \
+  V(KeyedStoreIC_Miss,          BUILTIN, UNINITIALIZED)                   \
+                                                                          \
+  V(StoreIC_ExtendStorage,      BUILTIN, UNINITIALIZED)                   \
+  V(KeyedStoreIC_ExtendStorage, BUILTIN, UNINITIALIZED)                   \
+                                                                          \
+  V(LoadIC_Initialize,          LOAD_IC, UNINITIALIZED)                   \
+  V(LoadIC_PreMonomorphic,      LOAD_IC, PREMONOMORPHIC)                  \
+  V(LoadIC_Normal,              LOAD_IC, MONOMORPHIC)                     \
+  V(LoadIC_ArrayLength,         LOAD_IC, MONOMORPHIC)                     \
+  V(LoadIC_StringLength,        LOAD_IC, MONOMORPHIC)                     \
+  V(LoadIC_FunctionPrototype,   LOAD_IC, MONOMORPHIC)                     \
+  V(LoadIC_Megamorphic,         LOAD_IC, MEGAMORPHIC)                     \
+                                                                          \
+  V(KeyedLoadIC_Initialize,     KEYED_LOAD_IC, UNINITIALIZED)             \
+  V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC)            \
+  V(KeyedLoadIC_Generic,        KEYED_LOAD_IC, MEGAMORPHIC)               \
+  V(KeyedLoadIC_ExternalByteArray,          KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalUnsignedByteArray,  KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalShortArray,         KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalUnsignedShortArray, KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalIntArray,           KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalUnsignedIntArray,   KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalFloatArray,         KEYED_LOAD_IC, MEGAMORPHIC)   \
+                                                                          \
+  V(StoreIC_Initialize,         STORE_IC, UNINITIALIZED)                  \
+  V(StoreIC_Megamorphic,        STORE_IC, MEGAMORPHIC)                    \
+                                                                          \
+  V(KeyedStoreIC_Initialize,    KEYED_STORE_IC, UNINITIALIZED)            \
+  V(KeyedStoreIC_Generic,       KEYED_STORE_IC, MEGAMORPHIC)              \
+  V(KeyedStoreIC_ExternalByteArray,          KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalUnsignedByteArray,  KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalShortArray,         KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalUnsignedShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalIntArray,           KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalUnsignedIntArray,   KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalFloatArray,         KEYED_STORE_IC, MEGAMORPHIC) \
+                                                                          \
+  /* Uses KeyedLoadIC_Initialize; must be after in list. */               \
+  V(FunctionCall,               BUILTIN, UNINITIALIZED)                   \
+  V(FunctionApply,              BUILTIN, UNINITIALIZED)                   \
+                                                                          \
+  V(ArrayCode,                  BUILTIN, UNINITIALIZED)                   \
   V(ArrayConstructCode,         BUILTIN, UNINITIALIZED)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 586c948..7a2f859 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -36,10 +36,27 @@
 namespace internal {
 
 Handle<Code> CodeStub::GetCode() {
-  uint32_t key = GetKey();
-  int index = Heap::code_stubs()->FindEntry(key);
-  if (index == NumberDictionary::kNotFound) {
-    HandleScope scope;
+  bool custom_cache = has_custom_cache();
+
+  int index = 0;
+  uint32_t key = 0;
+  if (custom_cache) {
+    Code* cached;
+    if (GetCustomCache(&cached)) {
+      return Handle<Code>(cached);
+    } else {
+      index = NumberDictionary::kNotFound;
+    }
+  } else {
+    key = GetKey();
+    index = Heap::code_stubs()->FindEntry(key);
+    if (index != NumberDictionary::kNotFound)
+      return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
+  }
+
+  Code* result;
+  {
+    v8::HandleScope scope;
 
     // Update the static counter each time a new code stub is generated.
     Counters::code_stubs.Increment();
@@ -79,63 +96,29 @@
     }
 #endif
 
-    // Update the dictionary and the root in Heap.
-    Handle<NumberDictionary> dict =
-        Factory::DictionaryAtNumberPut(
-            Handle<NumberDictionary>(Heap::code_stubs()),
-            key,
-            code);
-    Heap::public_set_code_stubs(*dict);
-    index = Heap::code_stubs()->FindEntry(key);
+    if (custom_cache) {
+      SetCustomCache(*code);
+    } else {
+      // Update the dictionary and the root in Heap.
+      Handle<NumberDictionary> dict =
+          Factory::DictionaryAtNumberPut(
+              Handle<NumberDictionary>(Heap::code_stubs()),
+              key,
+              code);
+      Heap::public_set_code_stubs(*dict);
+    }
+    result = *code;
   }
-  ASSERT(index != NumberDictionary::kNotFound);
 
-  return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index)));
+  return Handle<Code>(result);
 }
 
 
 const char* CodeStub::MajorName(CodeStub::Major major_key) {
   switch (major_key) {
-    case CallFunction:
-      return "CallFunction";
-    case GenericBinaryOp:
-      return "GenericBinaryOp";
-    case SmiOp:
-      return "SmiOp";
-    case Compare:
-      return "Compare";
-    case RecordWrite:
-      return "RecordWrite";
-    case StackCheck:
-      return "StackCheck";
-    case UnarySub:
-      return "UnarySub";
-    case RevertToNumber:
-      return "RevertToNumber";
-    case ToBoolean:
-      return "ToBoolean";
-    case Instanceof:
-      return "Instanceof";
-    case CounterOp:
-      return "CounterOp";
-    case ArgumentsAccess:
-      return "ArgumentsAccess";
-    case Runtime:
-      return "Runtime";
-    case CEntry:
-      return "CEntry";
-    case JSEntry:
-      return "JSEntry";
-    case GetProperty:
-      return "GetProperty";
-    case SetProperty:
-      return "SetProperty";
-    case InvokeBuiltin:
-      return "InvokeBuiltin";
-    case ConvertToDouble:
-      return "ConvertToDouble";
-    case WriteInt32ToHeapNumber:
-      return "WriteInt32ToHeapNumber";
+#define DEF_CASE(name) case name: return #name;
+    CODE_STUB_LIST_ALL(DEF_CASE)
+#undef DEF_CASE
     default:
       UNREACHABLE();
       return NULL;
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 91d951f..63461bc 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -31,32 +31,51 @@
 namespace v8 {
 namespace internal {
 
+// List of code stubs used on all platforms. The order in this list is important
+// as only the stubs up to and including RecordWrite allows nested stub calls.
+#define CODE_STUB_LIST_ALL(V)  \
+  V(CallFunction)              \
+  V(GenericBinaryOp)           \
+  V(SmiOp)                     \
+  V(Compare)                   \
+  V(RecordWrite)               \
+  V(ConvertToDouble)           \
+  V(WriteInt32ToHeapNumber)    \
+  V(StackCheck)                \
+  V(UnarySub)                  \
+  V(RevertToNumber)            \
+  V(ToBoolean)                 \
+  V(Instanceof)                \
+  V(CounterOp)                 \
+  V(ArgumentsAccess)           \
+  V(Runtime)                   \
+  V(CEntry)                    \
+  V(JSEntry)
+
+// List of code stubs only used on ARM platforms.
+#ifdef V8_TARGET_ARCH_ARM
+#define CODE_STUB_LIST_ARM(V)  \
+  V(GetProperty)               \
+  V(SetProperty)               \
+  V(InvokeBuiltin)             \
+  V(RegExpCEntry)
+#else
+#define CODE_STUB_LIST_ARM(V)
+#endif
+
+// Combined list of code stubs.
+#define CODE_STUB_LIST(V)  \
+  CODE_STUB_LIST_ALL(V)    \
+  CODE_STUB_LIST_ARM(V)
 
 // Stub is base classes of all stubs.
 class CodeStub BASE_EMBEDDED {
  public:
   enum Major {
-    CallFunction,
-    GenericBinaryOp,
-    SmiOp,
-    Compare,
-    RecordWrite,  // Last stub that allows stub calls inside.
-    ConvertToDouble,
-    WriteInt32ToHeapNumber,
-    StackCheck,
-    UnarySub,
-    RevertToNumber,
-    ToBoolean,
-    Instanceof,
-    CounterOp,
-    ArgumentsAccess,
-    Runtime,
-    CEntry,
-    JSEntry,
-    GetProperty,   // ARM only
-    SetProperty,   // ARM only
-    InvokeBuiltin,  // ARM only
-    RegExpCEntry,  // ARM only
+#define DEF_ENUM(name) name,
+    CODE_STUB_LIST(DEF_ENUM)
+#undef DEF_ENUM
+    NoCache,  // marker for stubs that do custom caching
     NUMBER_OF_IDS
   };
 
@@ -73,6 +92,12 @@
 
   virtual ~CodeStub() {}
 
+  // Override these methods to provide a custom caching mechanism for
+  // an individual type of code stub.
+  virtual bool GetCustomCache(Code** code_out) { return false; }
+  virtual void SetCustomCache(Code* value) { }
+  virtual bool has_custom_cache() { return false; }
+
  protected:
   static const int kMajorBits = 5;
   static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
diff --git a/src/codegen.cc b/src/codegen.cc
index 096a1a1..f2788a8 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -274,7 +274,7 @@
 }
 
 
-static Handle<Code> ComputeLazyCompile(int argc) {
+Handle<Code> CodeGenerator::ComputeLazyCompile(int argc) {
   CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
 }
 
@@ -551,4 +551,20 @@
 }
 
 
+bool ApiGetterEntryStub::GetCustomCache(Code** code_out) {
+  Object* cache = info()->load_stub_cache();
+  if (cache->IsUndefined()) {
+    return false;
+  } else {
+    *code_out = Code::cast(cache);
+    return true;
+  }
+}
+
+
+void ApiGetterEntryStub::SetCustomCache(Code* value) {
+  info()->set_load_stub_cache(value);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/codegen.h b/src/codegen.h
index 1209f36..fc4a53b 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -56,6 +56,7 @@
 //   ~CodeGenerator
 //   ProcessDeferred
 //   GenCode
+//   ComputeLazyCompile
 //   BuildBoilerplate
 //   ComputeCallInitialize
 //   ComputeCallInitializeInLoop
@@ -300,7 +301,7 @@
                     Label* throw_normal_exception,
                     Label* throw_termination_exception,
                     Label* throw_out_of_memory_exception,
-                    StackFrame::Type frame_type,
+                    ExitFrame::Mode mode,
                     bool do_gc,
                     bool always_allocate_scope);
   void GenerateThrowTOS(MacroAssembler* masm);
@@ -319,6 +320,32 @@
 };
 
 
+class ApiGetterEntryStub : public CodeStub {
+ public:
+  ApiGetterEntryStub(Handle<AccessorInfo> info,
+                     ApiFunction* fun)
+      : info_(info),
+        fun_(fun) { }
+  void Generate(MacroAssembler* masm);
+  virtual bool has_custom_cache() { return true; }
+  virtual bool GetCustomCache(Code** code_out);
+  virtual void SetCustomCache(Code* value);
+
+  static const int kStackSpace = 6;
+  static const int kArgc = 4;
+ private:
+  Handle<AccessorInfo> info() { return info_; }
+  ApiFunction* fun() { return fun_; }
+  Major MajorKey() { return NoCache; }
+  int MinorKey() { return 0; }
+  const char* GetName() { return "ApiEntryStub"; }
+  // The accessor info associated with the function.
+  Handle<AccessorInfo> info_;
+  // The function to be called.
+  ApiFunction* fun_;
+};
+
+
 class CEntryDebugBreakStub : public CEntryStub {
  public:
   CEntryDebugBreakStub() : CEntryStub(1) { }
diff --git a/src/compiler.cc b/src/compiler.cc
index 2e55683..bad209e 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -46,13 +46,25 @@
  public:
   enum CodeGenTag { NORMAL, FAST };
 
-  CodeGenSelector() : has_supported_syntax_(true) {}
+  CodeGenSelector()
+      : has_supported_syntax_(true),
+        location_(Location::Nowhere()) {
+  }
 
   CodeGenTag Select(FunctionLiteral* fun);
 
  private:
+  void VisitDeclarations(ZoneList<Declaration*>* decls);
   void VisitStatements(ZoneList<Statement*>* stmts);
 
+  // Visit an expression in effect context with a desired location of
+  // nowhere.
+  void VisitAsEffect(Expression* expr);
+
+  // Visit an expression in value context with a desired location of
+  // temporary.
+  void VisitAsValue(Expression* expr);
+
   // AST node visit functions.
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
@@ -60,6 +72,9 @@
 
   bool has_supported_syntax_;
 
+  // The desired location of the currently visited expression.
+  Location location_;
+
   DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
 };
 
@@ -107,7 +122,7 @@
     CodeGenSelector selector;
     CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
     if (code_gen == CodeGenSelector::FAST) {
-      return FastCodeGenerator::MakeCode(literal, script);
+      return FastCodeGenerator::MakeCode(literal, script, is_eval);
     }
     ASSERT(code_gen == CodeGenSelector::NORMAL);
   }
@@ -450,15 +465,17 @@
 CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
   Scope* scope = fun->scope();
 
-  if (!scope->is_global_scope()) return NORMAL;
+  if (!scope->is_global_scope()) {
+    if (FLAG_trace_bailout) PrintF("Non-global scope\n");
+    return NORMAL;
+  }
   ASSERT(scope->num_heap_slots() == 0);
   ASSERT(scope->arguments() == NULL);
 
-  if (!scope->declarations()->is_empty()) return NORMAL;
-  if (fun->materialized_literal_count() > 0) return NORMAL;
-  if (fun->body()->is_empty()) return NORMAL;
-
   has_supported_syntax_ = true;
+  VisitDeclarations(fun->scope()->declarations());
+  if (!has_supported_syntax_) return NORMAL;
+
   VisitStatements(fun->body());
   return has_supported_syntax_ ? FAST : NORMAL;
 }
@@ -480,34 +497,66 @@
   } while (false)
 
 
+void CodeGenSelector::VisitDeclarations(ZoneList<Declaration*>* decls) {
+  for (int i = 0; i < decls->length(); i++) {
+    Visit(decls->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
 void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
   for (int i = 0, len = stmts->length(); i < len; i++) {
-    CHECK_BAILOUT;
     Visit(stmts->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
+void CodeGenSelector::VisitAsEffect(Expression* expr) {
+  if (location_.is_nowhere()) {
+    Visit(expr);
+  } else {
+    Location saved = location_;
+    location_ = Location::Nowhere();
+    Visit(expr);
+    location_ = saved;
+  }
+}
+
+
+void CodeGenSelector::VisitAsValue(Expression* expr) {
+  if (location_.is_temporary()) {
+    Visit(expr);
+  } else {
+    Location saved = location_;
+    location_ = Location::Temporary();
+    Visit(expr);
+    location_ = saved;
   }
 }
 
 
 void CodeGenSelector::VisitDeclaration(Declaration* decl) {
-  BAILOUT("Declaration");
+  Variable* var = decl->proxy()->var();
+  if (!var->is_global() || var->mode() == Variable::CONST) {
+    BAILOUT("Non-global declaration");
+  }
 }
 
 
 void CodeGenSelector::VisitBlock(Block* stmt) {
-  BAILOUT("Block");
+  VisitStatements(stmt->statements());
 }
 
 
 void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
-  Expression* expr = stmt->expression();
-  Visit(expr);
-  CHECK_BAILOUT;
-  expr->set_location(Location::Nowhere());
+  VisitAsEffect(stmt->expression());
 }
 
 
 void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {
-  BAILOUT("EmptyStatement");
+  // EmptyStatement is supported.
 }
 
 
@@ -527,7 +576,7 @@
 
 
 void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
-  Visit(stmt->expression());
+  VisitAsValue(stmt->expression());
 }
 
 
@@ -582,7 +631,10 @@
 
 
 void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {
-  BAILOUT("FunctionLiteral");
+  if (!expr->AllowsLazyCompilation()) {
+    BAILOUT("FunctionLiteral does not allow lazy compilation");
+  }
+  expr->set_location(location_);
 }
 
 
@@ -598,37 +650,88 @@
 
 
 void CodeGenSelector::VisitSlot(Slot* expr) {
-  Slot::Type type = expr->type();
-  if (type != Slot::PARAMETER && type != Slot::LOCAL) {
-    BAILOUT("non-parameter/non-local slot reference");
-  }
+  UNREACHABLE();
 }
 
 
 void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
   Expression* rewrite = expr->var()->rewrite();
-  if (rewrite == NULL) BAILOUT("global variable reference");
-  Visit(rewrite);
+  // A rewrite of NULL indicates a global variable.
+  if (rewrite != NULL) {
+    // Non-global.
+    Slot* slot = rewrite->AsSlot();
+    if (slot == NULL) {
+      // This is a variable rewritten to an explicit property access
+      // on the arguments object.
+      BAILOUT("non-global/non-slot variable reference");
+    }
+
+    Slot::Type type = slot->type();
+    if (type != Slot::PARAMETER && type != Slot::LOCAL) {
+      BAILOUT("non-parameter/non-local slot reference");
+    }
+  }
+  expr->set_location(location_);
 }
 
 
 void CodeGenSelector::VisitLiteral(Literal* expr) {
-  // All literals are supported.
+  expr->set_location(location_);
 }
 
 
 void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {
-  BAILOUT("RegExpLiteral");
+  expr->set_location(location_);
 }
 
 
 void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
-  BAILOUT("ObjectLiteral");
+  ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+
+  for (int i = 0, len = properties->length(); i < len; i++) {
+    ObjectLiteral::Property* property = properties->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+
+      // For (non-compile-time) materialized literals and computed
+      // properties with symbolic keys we will use an IC and therefore not
+      // generate code for the key.
+      case ObjectLiteral::Property::COMPUTED:  // Fall through.
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        if (property->key()->handle()->IsSymbol()) {
+          break;
+        }
+        // Fall through.
+
+      // In all other cases we need the key's value on the stack
+      // for a runtime call.  (Relies on TEMP meaning STACK.)
+      case ObjectLiteral::Property::GETTER:  // Fall through.
+      case ObjectLiteral::Property::SETTER:  // Fall through.
+      case ObjectLiteral::Property::PROTOTYPE:
+        VisitAsValue(property->key());
+        CHECK_BAILOUT;
+        break;
+    }
+    VisitAsValue(property->value());
+    CHECK_BAILOUT;
+  }
+  expr->set_location(location_);
 }
 
 
 void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
-  BAILOUT("ArrayLiteral");
+  ZoneList<Expression*>* subexprs = expr->values();
+  for (int i = 0, len = subexprs->length(); i < len; i++) {
+    Expression* subexpr = subexprs->at(i);
+    if (subexpr->AsLiteral() != NULL) continue;
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+    VisitAsValue(subexpr);
+    CHECK_BAILOUT;
+  }
+  expr->set_location(location_);
 }
 
 
@@ -640,7 +743,10 @@
 void CodeGenSelector::VisitAssignment(Assignment* expr) {
   // We support plain non-compound assignments to parameters and
   // non-context (stack-allocated) locals.
-  if (expr->starts_initialization_block()) BAILOUT("initialization block");
+  if (expr->starts_initialization_block() ||
+      expr->ends_initialization_block()) {
+    BAILOUT("initialization block start");
+  }
 
   Token::Value op = expr->op();
   if (op == Token::INIT_CONST) BAILOUT("initialize constant");
@@ -649,15 +755,18 @@
   }
 
   Variable* var = expr->target()->AsVariableProxy()->AsVariable();
-  if (var == NULL || var->is_global()) BAILOUT("non-variable assignment");
+  if (var == NULL) BAILOUT("non-variable assignment");
 
-  ASSERT(var->slot() != NULL);
-  Slot::Type type = var->slot()->type();
-  if (type != Slot::PARAMETER && type != Slot::LOCAL) {
-    BAILOUT("non-parameter/non-local slot assignment");
+  if (!var->is_global()) {
+    ASSERT(var->slot() != NULL);
+    Slot::Type type = var->slot()->type();
+    if (type != Slot::PARAMETER && type != Slot::LOCAL) {
+      BAILOUT("non-parameter/non-local slot assignment");
+    }
   }
 
-  Visit(expr->value());
+  VisitAsValue(expr->value());
+  expr->set_location(location_);
 }
 
 
@@ -667,22 +776,64 @@
 
 
 void CodeGenSelector::VisitProperty(Property* expr) {
-  BAILOUT("Property");
+  VisitAsValue(expr->obj());
+  CHECK_BAILOUT;
+  VisitAsValue(expr->key());
+  expr->set_location(location_);
 }
 
 
 void CodeGenSelector::VisitCall(Call* expr) {
-  BAILOUT("Call");
+  Expression* fun = expr->expression();
+  ZoneList<Expression*>* args = expr->arguments();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+
+  // Check for supported calls
+  if (var != NULL && var->is_possibly_eval()) {
+    BAILOUT("Call to a function named 'eval'");
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
+    // ----------------------------------
+  } else {
+    BAILOUT("Call to a non-global function");
+  }
+  // Check all arguments to the call.  (Relies on TEMP meaning STACK.)
+  for (int i = 0; i < args->length(); i++) {
+    VisitAsValue(args->at(i));
+    CHECK_BAILOUT;
+  }
+  expr->set_location(location_);
 }
 
 
 void CodeGenSelector::VisitCallNew(CallNew* expr) {
-  BAILOUT("CallNew");
+  VisitAsValue(expr->expression());
+  CHECK_BAILOUT;
+  ZoneList<Expression*>* args = expr->arguments();
+  // Check all arguments to the call
+  for (int i = 0; i < args->length(); i++) {
+    VisitAsValue(args->at(i));
+    CHECK_BAILOUT;
+  }
+  expr->set_location(location_);
 }
 
 
 void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
-  BAILOUT("CallRuntime");
+  // In case of JS runtime function bail out.
+  if (expr->function() == NULL) BAILOUT("call JS runtime function");
+  // Check for inline runtime call
+  if (expr->name()->Get(0) == '_' &&
+      CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
+    BAILOUT("inlined runtime call");
+  }
+  // Check all arguments to the call.  (Relies on TEMP meaning STACK.)
+  for (int i = 0; i < expr->arguments()->length(); i++) {
+    VisitAsValue(expr->arguments()->at(i));
+    CHECK_BAILOUT;
+  }
+  expr->set_location(location_);
 }
 
 
@@ -697,7 +848,19 @@
 
 
 void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
-  BAILOUT("BinaryOperation");
+  switch (expr->op()) {
+    case Token::OR:
+      VisitAsValue(expr->left());
+      CHECK_BAILOUT;
+      // The location for the right subexpression is the same as for the
+      // whole expression so we call Visit directly.
+      Visit(expr->right());
+      break;
+
+    default:
+      BAILOUT("Unsupported binary operation");
+  }
+  expr->set_location(location_);
 }
 
 
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 8c875d7..ba7220a 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -84,7 +84,7 @@
   static const double two32 = 4294967296.0;
   static const double two31 = 2147483648.0;
   if (!isfinite(x) || x == 0) return 0;
-  if (x < 0 || x >= two32) x = fmod(x, two32);
+  if (x < 0 || x >= two32) x = modulo(x, two32);
   x = (x >= 0) ? floor(x) : ceil(x) + two32;
   return (int32_t) ((x >= two31) ? x - two32 : x);
 }
diff --git a/src/conversions.cc b/src/conversions.cc
index 2a3db7b..3e66d28 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -664,7 +664,7 @@
   int integer_pos = kBufferSize - 2;
   do {
     integer_buffer[integer_pos--] =
-        chars[static_cast<int>(fmod(integer_part, radix))];
+        chars[static_cast<int>(modulo(integer_part, radix))];
     integer_part /= radix;
   } while (integer_part >= 1.0);
   // Sanity check.
diff --git a/src/conversions.h b/src/conversions.h
index b6589cb..67f7d53 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -31,6 +31,7 @@
 namespace v8 {
 namespace internal {
 
+
 // The fast double-to-int conversion routine does not guarantee
 // rounding towards zero.
 // The result is unspecified if x is infinite or NaN, or if the rounded
diff --git a/src/debug-delay.js b/src/debug-delay.js
index d9447bd..35f7fcd 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -1243,6 +1243,8 @@
         this.threadsRequest_(request, response);
       } else if (request.command == 'suspend') {
         this.suspendRequest_(request, response);
+      } else if (request.command == 'version') {
+        this.versionRequest_(request, response);
       } else {
         throw new Error('Unknown command "' + request.command + '" in request');
       }
@@ -1911,11 +1913,17 @@
 
 
 DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
-  // TODO(peter.rybin): probably we need some body field here.
   response.running = false;
 };
 
 
+DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
+  response.body = {
+    V8Version: %GetV8Version()
+  }
+};
+
+
 // Check whether the previously processed command caused the VM to become
 // running.
 DebugCommandProcessor.prototype.isRunning = function() {
diff --git a/src/factory.cc b/src/factory.cc
index 5251e34..32b69db 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -222,6 +222,18 @@
 }
 
 
+Handle<ExternalArray> Factory::NewExternalArray(int length,
+                                                ExternalArrayType array_type,
+                                                void* external_pointer,
+                                                PretenureFlag pretenure) {
+  ASSERT(0 <= length);
+  CALL_HEAP_FUNCTION(Heap::AllocateExternalArray(length,
+                                                 array_type,
+                                                 external_pointer,
+                                                 pretenure), ExternalArray);
+}
+
+
 Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
   CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
 }
diff --git a/src/factory.h b/src/factory.h
index 7223f08..cb438e9 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -155,10 +155,17 @@
   static Handle<ByteArray> NewByteArray(int length,
                                         PretenureFlag pretenure = NOT_TENURED);
 
-  static Handle<PixelArray> NewPixelArray(int length,
+  static Handle<PixelArray> NewPixelArray(
+      int length,
       uint8_t* external_pointer,
       PretenureFlag pretenure = NOT_TENURED);
 
+  static Handle<ExternalArray> NewExternalArray(
+      int length,
+      ExternalArrayType array_type,
+      void* external_pointer,
+      PretenureFlag pretenure = NOT_TENURED);
+
   static Handle<Map> NewMap(InstanceType type, int instance_size);
 
   static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
index 4ec6a52..8655e97 100644
--- a/src/fast-codegen.cc
+++ b/src/fast-codegen.cc
@@ -29,16 +29,19 @@
 
 #include "codegen-inl.h"
 #include "fast-codegen.h"
+#include "stub-cache.h"
+#include "debug.h"
 
 namespace v8 {
 namespace internal {
 
 Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
-                                         Handle<Script> script) {
+                                         Handle<Script> script,
+                                         bool is_eval) {
   CodeGenerator::MakeCodePrologue(fun);
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(NULL, kInitialBufferSize);
-  FastCodeGenerator cgen(&masm);
+  FastCodeGenerator cgen(&masm, script, is_eval);
   cgen.Generate(fun);
   if (cgen.HasStackOverflow()) {
     ASSERT(!Top::has_pending_exception());
@@ -50,6 +53,7 @@
 
 
 int FastCodeGenerator::SlotOffset(Slot* slot) {
+  ASSERT(slot != NULL);
   // Offset is negative because higher indexes are at lower addresses.
   int offset = -slot->index() * kPointerSize;
   // Adjust by a (parameter or local) base offset.
@@ -66,6 +70,137 @@
   return offset;
 }
 
+
+void FastCodeGenerator::Move(Location destination, Location source) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      break;
+
+    case Location::TEMP:
+      switch (source.type()) {
+        case Location::NOWHERE:
+          UNREACHABLE();
+        case Location::TEMP:
+          break;
+      }
+      break;
+  }
+}
+
+
+// All platform macro assemblers in {ia32,x64,arm} have a push(Register)
+// function.
+void FastCodeGenerator::Move(Location destination, Register source) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      break;
+    case Location::TEMP:
+      masm_->push(source);
+      break;
+  }
+}
+
+
+// All platform macro assemblers in {ia32,x64,arm} have a pop(Register)
+// function.
+void FastCodeGenerator::Move(Register destination, Location source) {
+  switch (source.type()) {
+    case Location::NOWHERE:
+      UNREACHABLE();
+    case Location::TEMP:
+      masm_->pop(destination);
+  }
+}
+
+
+void FastCodeGenerator::VisitDeclarations(
+    ZoneList<Declaration*>* declarations) {
+  int length = declarations->length();
+  int globals = 0;
+  for (int i = 0; i < length; i++) {
+    Declaration* node = declarations->at(i);
+    Variable* var = node->proxy()->var();
+    Slot* slot = var->slot();
+
+    // If it was not possible to allocate the variable at compile
+    // time, we need to "declare" it at runtime to make sure it
+    // actually exists in the local context.
+    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
+      UNREACHABLE();
+    } else {
+      // Count global variables and functions for later processing
+      globals++;
+    }
+  }
+
+  // Return in case of no declared global functions or variables.
+  if (globals == 0) return;
+
+  // Compute array of global variable and function declarations.
+  Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+  for (int j = 0, i = 0; i < length; i++) {
+    Declaration* node = declarations->at(i);
+    Variable* var = node->proxy()->var();
+    Slot* slot = var->slot();
+
+    if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
+      array->set(j++, *(var->name()));
+      if (node->fun() == NULL) {
+        if (var->mode() == Variable::CONST) {
+          // In case this is const property use the hole.
+          array->set_the_hole(j++);
+        } else {
+          array->set_undefined(j++);
+        }
+      } else {
+        Handle<JSFunction> function = BuildBoilerplate(node->fun());
+        // Check for stack-overflow exception.
+        if (HasStackOverflow()) return;
+        array->set(j++, *function);
+      }
+    }
+  }
+
+  // Invoke the platform-dependent code generator to do the actual
+  // declaration the global variables and functions.
+  DeclareGlobals(array);
+}
+
+Handle<JSFunction> FastCodeGenerator::BuildBoilerplate(FunctionLiteral* fun) {
+#ifdef DEBUG
+  // We should not try to compile the same function literal more than
+  // once.
+  fun->mark_as_compiled();
+#endif
+
+  // Generate code
+  Handle<Code> code = CodeGenerator::ComputeLazyCompile(fun->num_parameters());
+  // Check for stack-overflow exception.
+  if (code.is_null()) {
+    SetStackOverflow();
+    return Handle<JSFunction>::null();
+  }
+
+  // Create a boilerplate function.
+  Handle<JSFunction> function =
+      Factory::NewFunctionBoilerplate(fun->name(),
+                                      fun->materialized_literal_count(),
+                                      code);
+  CodeGenerator::SetFunctionInfo(function, fun, false, script_);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Notify debugger that a new function has been added.
+  Debugger::OnNewFunction(function);
+#endif
+
+  // Set the expected number of properties for instances and return
+  // the resulting function.
+  SetExpectedNofPropertiesFromEstimate(function,
+                                       fun->expected_property_count());
+  return function;
+}
+
+
 void FastCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
   if (FLAG_debug_info) {
     CodeGenerator::RecordPositions(masm_, fun->start_position());
@@ -100,12 +235,22 @@
 
 
 void FastCodeGenerator::VisitBlock(Block* stmt) {
-  UNREACHABLE();
+  Comment cmnt(masm_, "[ Block");
+  SetStatementPosition(stmt);
+  VisitStatements(stmt->statements());
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  SetStatementPosition(stmt);
+  Visit(stmt->expression());
 }
 
 
 void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
-  UNREACHABLE();
+  Comment cmnt(masm_, "[ EmptyStatement");
+  SetStatementPosition(stmt);
 }
 
 
@@ -174,11 +319,6 @@
 }
 
 
-void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
-  UNREACHABLE();
-}
-
-
 void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
     FunctionBoilerplateLiteral* expr) {
   UNREACHABLE();
@@ -196,18 +336,8 @@
 }
 
 
-void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
-  UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
-  UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
-  UNREACHABLE();
+void FastCodeGenerator::VisitLiteral(Literal* expr) {
+  Move(expr->location(), expr);
 }
 
 
@@ -221,26 +351,6 @@
 }
 
 
-void FastCodeGenerator::VisitProperty(Property* expr) {
-  UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCall(Call* expr) {
-  UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCallNew(CallNew* expr) {
-  UNREACHABLE();
-}
-
-
-void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
-  UNREACHABLE();
-}
-
-
 void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
   UNREACHABLE();
 }
@@ -251,11 +361,6 @@
 }
 
 
-void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
-  UNREACHABLE();
-}
-
-
 void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
   UNREACHABLE();
 }
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
index e6bb643..a718157 100644
--- a/src/fast-codegen.h
+++ b/src/fast-codegen.h
@@ -38,17 +38,36 @@
 
 class FastCodeGenerator: public AstVisitor {
  public:
-  explicit FastCodeGenerator(MacroAssembler* masm)
-      : masm_(masm), function_(NULL) {
+  FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+    : masm_(masm), function_(NULL), script_(script), is_eval_(is_eval) {
   }
 
-  static Handle<Code> MakeCode(FunctionLiteral* fun, Handle<Script> script);
+  static Handle<Code> MakeCode(FunctionLiteral* fun,
+                               Handle<Script> script,
+                               bool is_eval);
 
   void Generate(FunctionLiteral* fun);
 
  private:
   int SlotOffset(Slot* slot);
 
+  void Move(Location destination, Location source);
+
+  void Move(Location destination, Register source);
+  void Move(Location destination, Slot* source);
+  void Move(Location destination, Literal* source);
+
+  void Move(Register destination, Location source);
+  void Move(Slot* destination, Location source);
+
+  // Drop the TOS, and store source to destination.
+  // If destination is TOS, just overwrite TOS with source.
+  void DropAndMove(Location destination, Register source);
+
+  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  Handle<JSFunction> BuildBoilerplate(FunctionLiteral* fun);
+  void DeclareGlobals(Handle<FixedArray> pairs);
+
   void SetFunctionPosition(FunctionLiteral* fun);
   void SetReturnPosition(FunctionLiteral* fun);
   void SetStatementPosition(Statement* stmt);
@@ -61,6 +80,8 @@
 
   MacroAssembler* masm_;
   FunctionLiteral* function_;
+  Handle<Script> script_;
+  bool is_eval_;
 
   DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
 };
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 2a964ab..1ceb672 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -132,8 +132,6 @@
 // codegen-ia32.cc / codegen-arm.cc
 DEFINE_bool(trace, false, "trace function calls")
 DEFINE_bool(defer_negation, true, "defer negation operation")
-DEFINE_bool(check_stack, true,
-            "check stack for overflow, interrupt, breakpoint")
 
 // codegen.cc
 DEFINE_bool(lazy, true, "use lazy compilation")
@@ -163,8 +161,8 @@
            "maximum length of function source code printed in a stack trace.")
 
 // heap.cc
-DEFINE_int(new_space_size, 0, "size of (each semispace in) the new generation")
-DEFINE_int(old_space_size, 0, "size of the old generation")
+DEFINE_int(max_new_space_size, 0, "max size of the new generation")
+DEFINE_int(max_old_space_size, 0, "max size of the old generation")
 DEFINE_bool(gc_global, false, "always perform global GCs")
 DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
 DEFINE_bool(trace_gc, false,
diff --git a/src/frames.cc b/src/frames.cc
index 5cd8332..d7302de 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -393,8 +393,19 @@
 }
 
 
+Object*& ExitFrame::code_slot() const {
+  const int offset = ExitFrameConstants::kCodeOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+
 Code* ExitFrame::code() const {
-  return Heap::c_entry_code();
+  Object* code = code_slot();
+  if (code->IsSmi()) {
+    return Heap::c_entry_debug_break_code();
+  } else {
+    return Code::cast(code);
+  }
 }
 
 
@@ -412,11 +423,6 @@
 }
 
 
-Code* ExitDebugFrame::code() const {
-  return Heap::c_entry_debug_break_code();
-}
-
-
 Address StandardFrame::GetExpressionAddress(int n) const {
   const int offset = StandardFrameConstants::kExpressionsOffset;
   return fp() + offset - n * kPointerSize;
diff --git a/src/frames.h b/src/frames.h
index 768196d..024065a 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -93,7 +93,6 @@
   V(ENTRY,             EntryFrame)            \
   V(ENTRY_CONSTRUCT,   EntryConstructFrame)   \
   V(EXIT,              ExitFrame)             \
-  V(EXIT_DEBUG,        ExitDebugFrame)        \
   V(JAVA_SCRIPT,       JavaScriptFrame)       \
   V(INTERNAL,          InternalFrame)         \
   V(CONSTRUCT,         ConstructFrame)        \
@@ -119,7 +118,6 @@
   bool is_entry() const { return type() == ENTRY; }
   bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
   bool is_exit() const { return type() == EXIT; }
-  bool is_exit_debug() const { return type() == EXIT_DEBUG; }
   bool is_java_script() const { return type() == JAVA_SCRIPT; }
   bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
   bool is_internal() const { return type() == INTERNAL; }
@@ -260,10 +258,13 @@
 // Exit frames are used to exit JavaScript execution and go to C.
 class ExitFrame: public StackFrame {
  public:
+  enum Mode { MODE_NORMAL, MODE_DEBUG };
   virtual Type type() const { return EXIT; }
 
   virtual Code* code() const;
 
+  Object*& code_slot() const;
+
   // Garbage collection support.
   virtual void Iterate(ObjectVisitor* v) const;
 
@@ -289,26 +290,6 @@
 };
 
 
-class ExitDebugFrame: public ExitFrame {
- public:
-  virtual Type type() const { return EXIT_DEBUG; }
-
-  virtual Code* code() const;
-
-  static ExitDebugFrame* cast(StackFrame* frame) {
-    ASSERT(frame->is_exit_debug());
-    return static_cast<ExitDebugFrame*>(frame);
-  }
-
- protected:
-  explicit ExitDebugFrame(StackFrameIterator* iterator)
-      : ExitFrame(iterator) { }
-
- private:
-  friend class StackFrameIterator;
-};
-
-
 class StandardFrame: public StackFrame {
  public:
   // Testers.
diff --git a/src/global-handles.cc b/src/global-handles.cc
index f4b69fc..c6cc288 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -44,6 +44,10 @@
     callback_ = NULL;
   }
 
+  Node() {
+    state_ = DESTROYED;
+  }
+
   explicit Node(Object* object) {
     Initialize(object);
     // Initialize link structure.
@@ -200,20 +204,80 @@
 };
 
 
+class GlobalHandles::Pool BASE_EMBEDDED {
+  public:
+    Pool() {
+      current_ = new Chunk();
+      current_->previous = NULL;
+      next_ = current_->nodes;
+      limit_ = current_->nodes + kNodesPerChunk;
+    }
+
+    Node* Allocate() {
+      if (next_ < limit_) {
+        return next_++;
+      }
+      return SlowAllocate();
+    }
+
+    void Release() {
+      Chunk* current = current_;
+      ASSERT(current != NULL);  // At least a single block must by allocated
+      do {
+        Chunk* previous = current->previous;
+        delete current;
+        current = previous;
+      } while (current != NULL);
+      current_ = NULL;
+      next_ = limit_ = NULL;
+    }
+
+  private:
+    static const int kNodesPerChunk = (1 << 12) - 1;
+    struct Chunk : public Malloced {
+      Chunk* previous;
+      Node nodes[kNodesPerChunk];
+    };
+
+    Node* SlowAllocate() {
+      Chunk* chunk = new Chunk();
+      chunk->previous = current_;
+      current_ = chunk;
+
+      Node* new_nodes = current_->nodes;
+      next_ = new_nodes + 1;
+      limit_ = new_nodes + kNodesPerChunk;
+      return new_nodes;
+    }
+
+    Chunk* current_;
+    Node* next_;
+    Node* limit_;
+};
+
+
+static GlobalHandles::Pool pool_;
+
+
 Handle<Object> GlobalHandles::Create(Object* value) {
   Counters::global_handles.Increment();
   Node* result;
-  if (first_free() == NULL) {
-    // Allocate a new node.
-    result = new Node(value);
-    result->set_next(head());
-    set_head(result);
-  } else {
+  if (first_free()) {
     // Take the first node in the free list.
     result = first_free();
     set_first_free(result->next_free());
-    result->Initialize(value);
+  } else if (first_deallocated()) {
+    // Next try deallocated list
+    result = first_deallocated();
+    set_first_deallocated(result->next_free());
+    set_head(result);
+  } else {
+    // Allocate a new node.
+    result = pool_.Allocate();
+    result->set_next(head());
+    set_head(result);
   }
+  result->Initialize(value);
   return result->handle();
 }
 
@@ -292,7 +356,7 @@
   // Process weak global handle callbacks. This must be done after the
   // GC is completely done, because the callbacks may invoke arbitrary
   // API functions.
-  // At the same time deallocate all DESTROYED nodes
+  // At the same time deallocate all DESTROYED nodes.
   ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
   const int initial_post_gc_processing_count = ++post_gc_processing_count;
   Node** p = &head_;
@@ -310,12 +374,19 @@
       // Delete the link.
       Node* node = *p;
       *p = node->next();  // Update the link.
-      delete node;
+      if (first_deallocated()) {
+        first_deallocated()->set_next(node);
+      }
+      node->set_next_free(first_deallocated());
+      set_first_deallocated(node);
     } else {
       p = (*p)->next_addr();
     }
   }
   set_first_free(NULL);
+  if (first_deallocated()) {
+    first_deallocated()->set_next(head());
+  }
 }
 
 
@@ -329,16 +400,11 @@
 }
 
 void GlobalHandles::TearDown() {
-  // Delete all the nodes in the linked list.
-  Node* current = head_;
-  while (current != NULL) {
-    Node* n = current;
-    current = current->next();
-    delete n;
-  }
-  // Reset the head and free_list.
+  // Reset all the lists.
   set_head(NULL);
   set_first_free(NULL);
+  set_first_deallocated(NULL);
+  pool_.Release();
 }
 
 
@@ -347,6 +413,7 @@
 
 GlobalHandles::Node* GlobalHandles::head_ = NULL;
 GlobalHandles::Node* GlobalHandles::first_free_ = NULL;
+GlobalHandles::Node* GlobalHandles::first_deallocated_ = NULL;
 
 #ifdef DEBUG
 
diff --git a/src/global-handles.h b/src/global-handles.h
index feb95bf..87eb9b8 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -127,6 +127,7 @@
   static void PrintStats();
   static void Print();
 #endif
+  class Pool;
  private:
   // Internal node structure, one for each global handle.
   class Node;
@@ -148,6 +149,23 @@
   static Node* first_free_;
   static Node* first_free() { return first_free_; }
   static void set_first_free(Node* value) { first_free_ = value; }
+
+  // List of deallocated nodes.
+  // Deallocated nodes form a prefix of all the nodes and
+  // |first_deallocated| points to last deallocated node before
+  // |head|.  Those deallocated nodes are additionally linked
+  // by |next_free|:
+  //                                    1st deallocated  head
+  //                                           |          |
+  //                                           V          V
+  //    node          node        ...         node       node
+  //      .next      -> .next ->                .next ->
+  //   <- .next_free <- .next_free           <- .next_free
+  static Node* first_deallocated_;
+  static Node* first_deallocated() { return first_deallocated_; }
+  static void set_first_deallocated(Node* value) {
+    first_deallocated_ = value;
+  }
 };
 
 
diff --git a/src/globals.h b/src/globals.h
index efe0127..ae10b72 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -103,6 +103,10 @@
 #define V8PRIxPTR "lx"
 #endif
 
+#if defined(__APPLE__) && defined(__MACH__)
+#define USING_MAC_ABI
+#endif
+
 // Code-point values in Unicode 4.0 are 21 bits wide.
 typedef uint16_t uc16;
 typedef int32_t uc32;
@@ -170,6 +174,15 @@
 #endif
 
 
+// Constants relevant to double precision floating point numbers.
+
+// Quiet NaNs have bits 51 to 62 set, possibly the sign bit, and no
+// other bits set.
+const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
+// If looking only at the top 32 bits, the QNaN mask is bits 19 to 30.
+const uint32_t kQuietNaNHighBitsMask = 0xfff << (51 - 32);
+
+
 // -----------------------------------------------------------------------------
 // Forward declarations for frequently used classes
 // (sorted alphabetically)
@@ -239,6 +252,7 @@
 class VariableProxy;
 class RelocInfo;
 class Deserializer;
+class GenericDeserializer;  // TODO(erikcorry): Get rid of this.
 class MessageLocation;
 class ObjectGroup;
 class TickSample;
@@ -263,7 +277,9 @@
   LO_SPACE,             // Promoted large objects.
 
   FIRST_SPACE = NEW_SPACE,
-  LAST_SPACE = LO_SPACE
+  LAST_SPACE = LO_SPACE,
+  FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
+  LAST_PAGED_SPACE = CELL_SPACE
 };
 const int kSpaceTagSize = 3;
 const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
diff --git a/src/handles.cc b/src/handles.cc
index b43ec53..275fe6a 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -105,6 +105,21 @@
 }
 
 
+Address HandleScope::current_extensions_address() {
+  return reinterpret_cast<Address>(&current_.extensions);
+}
+
+
+Address HandleScope::current_next_address() {
+  return reinterpret_cast<Address>(&current_.next);
+}
+
+
+Address HandleScope::current_limit_address() {
+  return reinterpret_cast<Address>(&current_.limit);
+}
+
+
 Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content,
                                       Handle<JSArray> array) {
   CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray);
@@ -345,7 +360,7 @@
 Handle<Object> SetElement(Handle<JSObject> object,
                           uint32_t index,
                           Handle<Object> value) {
-  if (object->HasPixelElements()) {
+  if (object->HasPixelElements() || object->HasExternalArrayElements()) {
     if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
       bool has_exception;
       Handle<Object> number = Execution::ToNumber(value, &has_exception);
diff --git a/src/handles.h b/src/handles.h
index 5d57465..d3e9b78 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -133,6 +133,13 @@
     return result;
   }
 
+  // Deallocates any extensions used by the current scope.
+  static void DeleteExtensions();
+
+  static Address current_extensions_address();
+  static Address current_next_address();
+  static Address current_limit_address();
+
  private:
   // Prevent heap allocation or illegal handle scopes.
   HandleScope(const HandleScope&);
@@ -166,9 +173,6 @@
   // Extend the handle scope making room for more handles.
   static internal::Object** Extend();
 
-  // Deallocates any extensions used by the current scope.
-  static void DeleteExtensions();
-
   // Zaps the handles in the half-open interval [start, end).
   static void ZapRange(internal::Object** start, internal::Object** end);
 
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 8f55ce1..7f7cd7f 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -78,6 +78,10 @@
     }
   } else if (obj->IsString()) {
     return JSObjectsCluster(Heap::String_symbol());
+  } else if (obj->IsJSGlobalPropertyCell()) {
+    return JSObjectsCluster(JSObjectsCluster::GLOBAL_PROPERTY);
+  } else if (obj->IsCode() || obj->IsSharedFunctionInfo() || obj->IsScript()) {
+    return JSObjectsCluster(JSObjectsCluster::CODE);
   }
   return JSObjectsCluster();
 }
@@ -112,6 +116,16 @@
   if (FixedArray::cast(obj->elements())->length() != 0) {
     size += obj->elements()->Size();
   }
+  // For functions, also account non-empty context and literals sizes.
+  if (obj->IsJSFunction()) {
+    JSFunction* f = JSFunction::cast(obj);
+    if (f->unchecked_context()->IsContext()) {
+      size += f->context()->Size();
+    }
+    if (f->literals()->length() != 0) {
+      size += f->literals()->Size();
+    }
+  }
   return size;
 }
 
@@ -127,15 +141,15 @@
   }
 
   void VisitPointer(Object** o) {
-    if ((*o)->IsJSObject() || (*o)->IsString()) {
-      profile_->StoreReference(cluster_, HeapObject::cast(*o));
-    } else if ((*o)->IsFixedArray() && !inside_array_) {
+    if ((*o)->IsFixedArray() && !inside_array_) {
       // Traverse one level deep for data members that are fixed arrays.
       // This covers the case of 'elements' and 'properties' of JSObject,
       // and function contexts.
       inside_array_ = true;
       FixedArray::cast(*o)->Iterate(this);
       inside_array_ = false;
+    } else if ((*o)->IsHeapObject()) {
+      profile_->StoreReference(cluster_, HeapObject::cast(*o));
     }
   }
 
@@ -340,6 +354,8 @@
     accumulator->Add("(roots)");
   } else if (constructor_ == FromSpecialCase(GLOBAL_PROPERTY)) {
     accumulator->Add("(global property)");
+  } else if (constructor_ == FromSpecialCase(CODE)) {
+    accumulator->Add("(code)");
   } else if (constructor_ == FromSpecialCase(SELF)) {
     accumulator->Add("(self)");
   } else {
@@ -527,6 +543,7 @@
 void RetainerHeapProfile::StoreReference(const JSObjectsCluster& cluster,
                                          HeapObject* ref) {
   JSObjectsCluster ref_cluster = Clusterizer::Clusterize(ref);
+  if (ref_cluster.is_null()) return;
   JSObjectsRetainerTree::Locator ref_loc;
   if (retainers_tree_.Insert(ref_cluster, &ref_loc)) {
     ref_loc.set_value(new JSObjectsClusterTree());
@@ -537,15 +554,10 @@
 
 
 void RetainerHeapProfile::CollectStats(HeapObject* obj) {
-  if (obj->IsJSObject()) {
-    const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
-    ReferencesExtractor extractor(cluster, this);
-    obj->Iterate(&extractor);
-  } else if (obj->IsJSGlobalPropertyCell()) {
-    JSObjectsCluster global_prop(JSObjectsCluster::GLOBAL_PROPERTY);
-    ReferencesExtractor extractor(global_prop, this);
-    obj->Iterate(&extractor);
-  }
+  const JSObjectsCluster cluster = Clusterizer::Clusterize(obj);
+  if (cluster.is_null()) return;
+  ReferencesExtractor extractor(cluster, this);
+  obj->Iterate(&extractor);
 }
 
 
@@ -576,8 +588,10 @@
 void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
   InstanceType type = obj->map()->instance_type();
   ASSERT(0 <= type && type <= LAST_TYPE);
-  info[type].increment_number(1);
-  info[type].increment_bytes(obj->Size());
+  if (!FreeListNode::IsFreeListNode(obj)) {
+    info[type].increment_number(1);
+    info[type].increment_bytes(obj->Size());
+  }
 }
 
 
@@ -601,7 +615,7 @@
 void HeapProfiler::WriteSample() {
   LOG(HeapSampleBeginEvent("Heap", "allocated"));
   LOG(HeapSampleStats(
-      "Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects()));
+      "Heap", "allocated", Heap::CommittedMemory(), Heap::SizeOfObjects()));
 
   HistogramInfo info[LAST_TYPE+1];
 #define DEF_TYPE_NAME(name) info[name].set_name(#name);
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index bd875df..f8cb04d 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -54,7 +54,8 @@
   enum SpecialCase {
     ROOTS = 1,
     GLOBAL_PROPERTY = 2,
-    SELF = 3  // This case is used in ClustersCoarser only.
+    CODE = 3,
+    SELF = 100  // This case is used in ClustersCoarser only.
   };
 
   JSObjectsCluster() : constructor_(NULL), instance_(NULL) {}
@@ -97,6 +98,7 @@
     switch (special) {
       case ROOTS: return Heap::result_symbol();
       case GLOBAL_PROPERTY: return Heap::code_symbol();
+      case CODE: return Heap::arguments_shadow_symbol();
       case SELF: return Heap::catch_var_symbol();
       default:
         UNREACHABLE();
diff --git a/src/heap.cc b/src/heap.cc
index 2082e97..ae18fbe 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -39,9 +39,11 @@
 #include "natives.h"
 #include "scanner.h"
 #include "scopeinfo.h"
+#include "snapshot.h"
 #include "v8threads.h"
 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
 #include "regexp-macro-assembler.h"
+#include "arm/regexp-macro-assembler-arm.h"
 #endif
 
 namespace v8 {
@@ -74,28 +76,35 @@
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
 #if defined(ANDROID)
-int Heap::semispace_size_  = 512*KB;
-int Heap::old_generation_size_ = 128*MB;
+int Heap::max_semispace_size_  = 512*KB;
+int Heap::max_old_generation_size_ = 128*MB;
 int Heap::initial_semispace_size_ = 128*KB;
 size_t Heap::code_range_size_ = 0;
 #elif defined(V8_TARGET_ARCH_X64)
-int Heap::semispace_size_  = 16*MB;
-int Heap::old_generation_size_ = 1*GB;
+int Heap::max_semispace_size_  = 16*MB;
+int Heap::max_old_generation_size_ = 1*GB;
 int Heap::initial_semispace_size_ = 1*MB;
 size_t Heap::code_range_size_ = 512*MB;
 #else
-int Heap::semispace_size_  = 8*MB;
-int Heap::old_generation_size_ = 512*MB;
+int Heap::max_semispace_size_  = 8*MB;
+int Heap::max_old_generation_size_ = 512*MB;
 int Heap::initial_semispace_size_ = 512*KB;
 size_t Heap::code_range_size_ = 0;
 #endif
 
+// The snapshot semispace size will be the default semispace size if
+// snapshotting is used and will be the requested semispace size as
+// set up by ConfigureHeap otherwise.
+int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
+
 GCCallback Heap::global_gc_prologue_callback_ = NULL;
 GCCallback Heap::global_gc_epilogue_callback_ = NULL;
 
 // Variables set based on semispace_size_ and old_generation_size_ in
 // ConfigureHeap.
-int Heap::young_generation_size_ = 0;  // Will be 2 * semispace_size_.
+
+// Will be 4 * reserved_semispace_size_ to ensure that young
+// generation can be aligned to its size.
 int Heap::survived_since_last_expansion_ = 0;
 int Heap::external_allocation_limit_ = 0;
 
@@ -105,6 +114,7 @@
 int Heap::gc_count_ = 0;
 
 int Heap::always_allocate_scope_depth_ = 0;
+int Heap::linear_allocation_scope_depth_ = 0;
 bool Heap::context_disposed_pending_ = false;
 
 #ifdef DEBUG
@@ -127,6 +137,19 @@
 }
 
 
+int Heap::CommittedMemory() {
+  if (!HasBeenSetup()) return 0;
+
+  return new_space_.CommittedMemory() +
+      old_pointer_space_->CommittedMemory() +
+      old_data_space_->CommittedMemory() +
+      code_space_->CommittedMemory() +
+      map_space_->CommittedMemory() +
+      cell_space_->CommittedMemory() +
+      lo_space_->Size();
+}
+
+
 int Heap::Available() {
   if (!HasBeenSetup()) return 0;
 
@@ -222,19 +245,34 @@
 void Heap::PrintShortHeapStatistics() {
   if (!FLAG_trace_gc_verbose) return;
   PrintF("Memory allocator,   used: %8d, available: %8d\n",
-         MemoryAllocator::Size(), MemoryAllocator::Available());
+         MemoryAllocator::Size(),
+         MemoryAllocator::Available());
   PrintF("New space,          used: %8d, available: %8d\n",
-         Heap::new_space_.Size(), new_space_.Available());
-  PrintF("Old pointers,       used: %8d, available: %8d\n",
-         old_pointer_space_->Size(), old_pointer_space_->Available());
-  PrintF("Old data space,     used: %8d, available: %8d\n",
-         old_data_space_->Size(), old_data_space_->Available());
-  PrintF("Code space,         used: %8d, available: %8d\n",
-         code_space_->Size(), code_space_->Available());
-  PrintF("Map space,          used: %8d, available: %8d\n",
-         map_space_->Size(), map_space_->Available());
+         Heap::new_space_.Size(),
+         new_space_.Available());
+  PrintF("Old pointers,       used: %8d, available: %8d, waste: %8d\n",
+         old_pointer_space_->Size(),
+         old_pointer_space_->Available(),
+         old_pointer_space_->Waste());
+  PrintF("Old data space,     used: %8d, available: %8d, waste: %8d\n",
+         old_data_space_->Size(),
+         old_data_space_->Available(),
+         old_data_space_->Waste());
+  PrintF("Code space,         used: %8d, available: %8d, waste: %8d\n",
+         code_space_->Size(),
+         code_space_->Available(),
+         code_space_->Waste());
+  PrintF("Map space,          used: %8d, available: %8d, waste: %8d\n",
+         map_space_->Size(),
+         map_space_->Available(),
+         map_space_->Waste());
+  PrintF("Cell space,         used: %8d, available: %8d, waste: %8d\n",
+         cell_space_->Size(),
+         cell_space_->Available(),
+         cell_space_->Waste());
   PrintF("Large object space, used: %8d, avaialble: %8d\n",
-         lo_space_->Size(), lo_space_->Available());
+         lo_space_->Size(),
+         lo_space_->Available());
 }
 #endif
 
@@ -478,7 +516,13 @@
 
   Counters::objs_since_last_young.Set(0);
 
-  PostGarbageCollectionProcessing();
+  if (collector == MARK_COMPACTOR) {
+    DisableAssertNoAllocation allow_allocation;
+    GlobalHandles::PostGarbageCollectionProcessing();
+  }
+
+  // Update relocatables.
+  Relocatable::PostGarbageCollectionProcessing();
 
   if (collector == MARK_COMPACTOR) {
     // Register the amount of external allocated memory.
@@ -494,17 +538,6 @@
 }
 
 
-void Heap::PostGarbageCollectionProcessing() {
-  // Process weak handles post gc.
-  {
-    DisableAssertNoAllocation allow_allocation;
-    GlobalHandles::PostGarbageCollectionProcessing();
-  }
-  // Update relocatables.
-  Relocatable::PostGarbageCollectionProcessing();
-}
-
-
 void Heap::MarkCompact(GCTracer* tracer) {
   gc_state_ = MARK_COMPACT;
   mc_count_++;
@@ -1195,6 +1228,41 @@
   if (obj->IsFailure()) return false;
   set_pixel_array_map(Map::cast(obj));
 
+  obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_byte_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_unsigned_byte_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_short_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_unsigned_short_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_int_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_unsigned_int_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_float_array_map(Map::cast(obj));
+
   obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
   if (obj->IsFailure()) return false;
   set_code_map(Map::cast(obj));
@@ -1615,6 +1683,35 @@
 }
 
 
+Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
+  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
+}
+
+
+Heap::RootListIndex Heap::RootIndexForExternalArrayType(
+    ExternalArrayType array_type) {
+  switch (array_type) {
+    case kExternalByteArray:
+      return kExternalByteArrayMapRootIndex;
+    case kExternalUnsignedByteArray:
+      return kExternalUnsignedByteArrayMapRootIndex;
+    case kExternalShortArray:
+      return kExternalShortArrayMapRootIndex;
+    case kExternalUnsignedShortArray:
+      return kExternalUnsignedShortArrayMapRootIndex;
+    case kExternalIntArray:
+      return kExternalIntArrayMapRootIndex;
+    case kExternalUnsignedIntArray:
+      return kExternalUnsignedIntArrayMapRootIndex;
+    case kExternalFloatArray:
+      return kExternalFloatArrayMapRootIndex;
+    default:
+      UNREACHABLE();
+      return kUndefinedValueRootIndex;
+  }
+}
+
+
 Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
   return SmiOrNumberFromDouble(value,
                                true /* number object must be new */,
@@ -1713,10 +1810,10 @@
   }
 
   Map* map;
-  if (length <= String::kMaxShortStringSize) {
+  if (length <= String::kMaxShortSize) {
     map = is_ascii ? short_cons_ascii_string_map()
       : short_cons_string_map();
-  } else if (length <= String::kMaxMediumStringSize) {
+  } else if (length <= String::kMaxMediumSize) {
     map = is_ascii ? medium_cons_ascii_string_map()
       : medium_cons_string_map();
   } else {
@@ -1746,11 +1843,11 @@
   }
 
   Map* map;
-  if (length <= String::kMaxShortStringSize) {
+  if (length <= String::kMaxShortSize) {
     map = buffer->IsAsciiRepresentation() ?
       short_sliced_ascii_string_map() :
       short_sliced_string_map();
-  } else if (length <= String::kMaxMediumStringSize) {
+  } else if (length <= String::kMaxMediumSize) {
     map = buffer->IsAsciiRepresentation() ?
       medium_sliced_ascii_string_map() :
       medium_sliced_string_map();
@@ -1815,9 +1912,9 @@
     ExternalAsciiString::Resource* resource) {
   Map* map;
   int length = resource->length();
-  if (length <= String::kMaxShortStringSize) {
+  if (length <= String::kMaxShortSize) {
     map = short_external_ascii_string_map();
-  } else if (length <= String::kMaxMediumStringSize) {
+  } else if (length <= String::kMaxMediumSize) {
     map = medium_external_ascii_string_map();
   } else {
     map = long_external_ascii_string_map();
@@ -1940,6 +2037,31 @@
 }
 
 
+Object* Heap::AllocateExternalArray(int length,
+                                    ExternalArrayType array_type,
+                                    void* external_pointer,
+                                    PretenureFlag pretenure) {
+  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = OLD_DATA_SPACE;
+
+  Object* result = AllocateRaw(ExternalArray::kAlignedSize,
+                               space,
+                               OLD_DATA_SPACE);
+
+  if (result->IsFailure()) return result;
+
+  reinterpret_cast<ExternalArray*>(result)->set_map(
+      MapForExternalArrayType(array_type));
+  reinterpret_cast<ExternalArray*>(result)->set_length(length);
+  reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
+      external_pointer);
+
+  return result;
+}
+
+
 Object* Heap::CreateCode(const CodeDesc& desc,
                          ZoneScopeInfo* sinfo,
                          Code::Flags flags,
@@ -2021,7 +2143,9 @@
                                TargetSpaceId(map->instance_type()));
   if (result->IsFailure()) return result;
   HeapObject::cast(result)->set_map(map);
+#ifdef ENABLE_LOGGING_AND_PROFILING
   ProducerHeapProfile::RecordJSObjectAllocation(result);
+#endif
   return result;
 }
 
@@ -2134,9 +2258,8 @@
   // descriptors for these to the initial map as the object cannot be
   // constructed without having these properties.
   ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
-  if (fun->shared()->has_only_this_property_assignments() &&
-      fun->shared()->this_property_assignments_count() > 0 &&
-      fun->shared()->has_only_simple_this_property_assignments()) {
+  if (fun->shared()->has_only_simple_this_property_assignments() &&
+      fun->shared()->this_property_assignments_count() > 0) {
     int count = fun->shared()->this_property_assignments_count();
     if (count > in_object_properties) {
       count = in_object_properties;
@@ -2344,7 +2467,9 @@
     JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
   }
   // Return the new clone.
+#ifdef ENABLE_LOGGING_AND_PROFILING
   ProducerHeapProfile::RecordJSObjectAllocation(clone);
+#endif
   return clone;
 }
 
@@ -2534,18 +2659,18 @@
   Map* map;
 
   if (is_ascii) {
-    if (chars <= String::kMaxShortStringSize) {
+    if (chars <= String::kMaxShortSize) {
       map = short_ascii_symbol_map();
-    } else if (chars <= String::kMaxMediumStringSize) {
+    } else if (chars <= String::kMaxMediumSize) {
       map = medium_ascii_symbol_map();
     } else {
       map = long_ascii_symbol_map();
     }
     size = SeqAsciiString::SizeFor(chars);
   } else {
-    if (chars <= String::kMaxShortStringSize) {
+    if (chars <= String::kMaxShortSize) {
       map = short_symbol_map();
-    } else if (chars <= String::kMaxMediumStringSize) {
+    } else if (chars <= String::kMaxMediumSize) {
       map = medium_symbol_map();
     } else {
       map = long_symbol_map();
@@ -2595,9 +2720,9 @@
 
   // Determine the map based on the string's length.
   Map* map;
-  if (length <= String::kMaxShortStringSize) {
+  if (length <= String::kMaxShortSize) {
     map = short_ascii_string_map();
-  } else if (length <= String::kMaxMediumStringSize) {
+  } else if (length <= String::kMaxMediumSize) {
     map = medium_ascii_string_map();
   } else {
     map = long_ascii_string_map();
@@ -2632,9 +2757,9 @@
 
   // Determine the map based on the string's length.
   Map* map;
-  if (length <= String::kMaxShortStringSize) {
+  if (length <= String::kMaxShortSize) {
     map = short_string_map();
-  } else if (length <= String::kMaxMediumStringSize) {
+  } else if (length <= String::kMaxMediumSize) {
     map = medium_string_map();
   } else {
     map = long_string_map();
@@ -3119,60 +3244,53 @@
 }
 
 
-#ifdef DEBUG
-#define SYNCHRONIZE_TAG(tag) v->Synchronize(tag)
-#else
-#define SYNCHRONIZE_TAG(tag)
-#endif
-
 void Heap::IterateRoots(ObjectVisitor* v) {
   IterateStrongRoots(v);
   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
-  SYNCHRONIZE_TAG("symbol_table");
+  v->Synchronize("symbol_table");
 }
 
 
 void Heap::IterateStrongRoots(ObjectVisitor* v) {
   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
-  SYNCHRONIZE_TAG("strong_root_list");
+  v->Synchronize("strong_root_list");
 
   v->VisitPointer(bit_cast<Object**, String**>(&hidden_symbol_));
-  SYNCHRONIZE_TAG("symbol");
+  v->Synchronize("symbol");
 
   Bootstrapper::Iterate(v);
-  SYNCHRONIZE_TAG("bootstrapper");
+  v->Synchronize("bootstrapper");
   Top::Iterate(v);
-  SYNCHRONIZE_TAG("top");
+  v->Synchronize("top");
   Relocatable::Iterate(v);
-  SYNCHRONIZE_TAG("relocatable");
+  v->Synchronize("relocatable");
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
   Debug::Iterate(v);
 #endif
-  SYNCHRONIZE_TAG("debug");
+  v->Synchronize("debug");
   CompilationCache::Iterate(v);
-  SYNCHRONIZE_TAG("compilationcache");
+  v->Synchronize("compilationcache");
 
   // Iterate over local handles in handle scopes.
   HandleScopeImplementer::Iterate(v);
-  SYNCHRONIZE_TAG("handlescope");
+  v->Synchronize("handlescope");
 
   // Iterate over the builtin code objects and code stubs in the heap. Note
   // that it is not strictly necessary to iterate over code objects on
   // scavenge collections.  We still do it here because this same function
   // is used by the mark-sweep collector and the deserializer.
   Builtins::IterateBuiltins(v);
-  SYNCHRONIZE_TAG("builtins");
+  v->Synchronize("builtins");
 
   // Iterate over global handles.
   GlobalHandles::IterateRoots(v);
-  SYNCHRONIZE_TAG("globalhandles");
+  v->Synchronize("globalhandles");
 
   // Iterate over pointers being held by inactive threads.
   ThreadManager::Iterate(v);
-  SYNCHRONIZE_TAG("threadmanager");
+  v->Synchronize("threadmanager");
 }
-#undef SYNCHRONIZE_TAG
 
 
 // Flag is set when the heap has been configured.  The heap can be repeatedly
@@ -3182,21 +3300,37 @@
 // TODO(1236194): Since the heap size is configurable on the command line
 // and through the API, we should gracefully handle the case that the heap
 // size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
+bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
   if (HasBeenSetup()) return false;
 
-  if (semispace_size > 0) semispace_size_ = semispace_size;
-  if (old_gen_size > 0) old_generation_size_ = old_gen_size;
+  if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
+
+  if (Snapshot::IsEnabled()) {
+    // If we are using a snapshot we always reserve the default amount
+    // of memory for each semispace because code in the snapshot has
+    // write-barrier code that relies on the size and alignment of new
+    // space.  We therefore cannot use a larger max semispace size
+    // than the default reserved semispace size.
+    if (max_semispace_size_ > reserved_semispace_size_) {
+      max_semispace_size_ = reserved_semispace_size_;
+    }
+  } else {
+    // If we are not using snapshots we reserve space for the actual
+    // max semispace size.
+    reserved_semispace_size_ = max_semispace_size_;
+  }
+
+  if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
 
   // The new space size must be a power of two to support single-bit testing
   // for containment.
-  semispace_size_ = RoundUpToPowerOf2(semispace_size_);
-  initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_);
-  young_generation_size_ = 2 * semispace_size_;
-  external_allocation_limit_ = 10 * semispace_size_;
+  max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
+  reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
+  initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
+  external_allocation_limit_ = 10 * max_semispace_size_;
 
   // The old generation is paged.
-  old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize);
+  max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
 
   heap_configured = true;
   return true;
@@ -3204,7 +3338,7 @@
 
 
 bool Heap::ConfigureHeapDefault() {
-  return ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size);
+  return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size);
 }
 
 
@@ -3240,30 +3374,31 @@
   }
 
   // Setup memory allocator and reserve a chunk of memory for new
-  // space.  The chunk is double the size of the new space to ensure
-  // that we can find a pair of semispaces that are contiguous and
-  // aligned to their size.
-  if (!MemoryAllocator::Setup(MaxCapacity())) return false;
+  // space.  The chunk is double the size of the requested reserved
+  // new space size to ensure that we can find a pair of semispaces that
+  // are contiguous and aligned to their size.
+  if (!MemoryAllocator::Setup(MaxReserved())) return false;
   void* chunk =
-      MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_);
+      MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
   if (chunk == NULL) return false;
 
   // Align the pair of semispaces to their size, which must be a power
   // of 2.
-  ASSERT(IsPowerOf2(young_generation_size_));
   Address new_space_start =
-      RoundUp(reinterpret_cast<byte*>(chunk), young_generation_size_);
-  if (!new_space_.Setup(new_space_start, young_generation_size_)) return false;
+      RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
+  if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
+    return false;
+  }
 
   // Initialize old pointer space.
   old_pointer_space_ =
-      new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
+      new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
   if (old_pointer_space_ == NULL) return false;
   if (!old_pointer_space_->Setup(NULL, 0)) return false;
 
   // Initialize old data space.
   old_data_space_ =
-      new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
+      new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
   if (old_data_space_ == NULL) return false;
   if (!old_data_space_->Setup(NULL, 0)) return false;
 
@@ -3278,7 +3413,7 @@
   }
 
   code_space_ =
-      new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
+      new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
   if (code_space_ == NULL) return false;
   if (!code_space_->Setup(NULL, 0)) return false;
 
@@ -3288,7 +3423,7 @@
   if (!map_space_->Setup(NULL, 0)) return false;
 
   // Initialize global property cell space.
-  cell_space_ = new CellSpace(old_generation_size_, CELL_SPACE);
+  cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
   if (cell_space_ == NULL) return false;
   if (!cell_space_->Setup(NULL, 0)) return false;
 
@@ -3311,8 +3446,10 @@
   LOG(IntEvent("heap-capacity", Capacity()));
   LOG(IntEvent("heap-available", Available()));
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
   // This should be called only after initial objects have been created.
   ProducerHeapProfile::Setup();
+#endif
 
   return true;
 }
diff --git a/src/heap.h b/src/heap.h
index e878efc..2852605 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -38,7 +38,13 @@
 
 // Defines all the roots in Heap.
 #define UNCONDITIONAL_STRONG_ROOT_LIST(V)                                      \
-  /* Cluster the most popular ones in a few cache lines here at the top. */    \
+  /* Put the byte array map early.  We need it to be in place by the time   */ \
+  /* the deserializer hits the next page, since it wants to put a byte      */ \
+  /* array in the unused space at the end of the page.                      */ \
+  V(Map, byte_array_map, ByteArrayMap)                                         \
+  V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
+  V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
+  /* Cluster the most popular ones in a few cache lines here at the top.    */ \
   V(Smi, stack_limit, StackLimit)                                              \
   V(Object, undefined_value, UndefinedValue)                                   \
   V(Object, the_hole_value, TheHoleValue)                                      \
@@ -109,8 +115,14 @@
     undetectable_medium_ascii_string_map,                                      \
     UndetectableMediumAsciiStringMap)                                          \
   V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap)   \
-  V(Map, byte_array_map, ByteArrayMap)                                         \
   V(Map, pixel_array_map, PixelArrayMap)                                       \
+  V(Map, external_byte_array_map, ExternalByteArrayMap)                        \
+  V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap)       \
+  V(Map, external_short_array_map, ExternalShortArrayMap)                      \
+  V(Map, external_unsigned_short_array_map, ExternalUnsignedShortArrayMap)     \
+  V(Map, external_int_array_map, ExternalIntArrayMap)                          \
+  V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap)         \
+  V(Map, external_float_array_map, ExternalFloatArrayMap)                      \
   V(Map, context_map, ContextMap)                                              \
   V(Map, catch_context_map, CatchContextMap)                                   \
   V(Map, code_map, CodeMap)                                                    \
@@ -119,8 +131,6 @@
   V(Map, boilerplate_function_map, BoilerplateFunctionMap)                     \
   V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
   V(Map, proxy_map, ProxyMap)                                                  \
-  V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
-  V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
   V(Object, nan_value, NanValue)                                               \
   V(Object, minus_zero_value, MinusZeroValue)                                  \
   V(String, empty_string, EmptyString)                                         \
@@ -214,7 +224,8 @@
   V(exec_symbol, "exec")                                                 \
   V(zero_symbol, "0")                                                    \
   V(global_eval_symbol, "GlobalEval")                                    \
-  V(identity_hash_symbol, "v8::IdentityHash")
+  V(identity_hash_symbol, "v8::IdentityHash")                            \
+  V(closure_symbol, "(closure)")
 
 
 // Forward declaration of the GCTracer class.
@@ -228,7 +239,7 @@
  public:
   // Configure heap size before setup. Return false if the heap has been
   // setup already.
-  static bool ConfigureHeap(int semispace_size, int old_gen_size);
+  static bool ConfigureHeap(int max_semispace_size, int max_old_gen_size);
   static bool ConfigureHeapDefault();
 
   // Initializes the global object heap. If create_heap_objects is true,
@@ -247,19 +258,26 @@
   // Returns whether Setup has been called.
   static bool HasBeenSetup();
 
-  // Returns the maximum heap capacity.
-  static int MaxCapacity() {
-    return young_generation_size_ + old_generation_size_;
+  // Returns the maximum amount of memory reserved for the heap.  For
+  // the young generation, we reserve 4 times the amount needed for a
+  // semi space.  The young generation consists of two semi spaces and
+  // we reserve twice the amount needed for those in order to ensure
+  // that new space can be aligned to its size.
+  static int MaxReserved() {
+    return 4 * reserved_semispace_size_ + max_old_generation_size_;
   }
-  static int SemiSpaceSize() { return semispace_size_; }
+  static int MaxSemiSpaceSize() { return max_semispace_size_; }
+  static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
   static int InitialSemiSpaceSize() { return initial_semispace_size_; }
-  static int YoungGenerationSize() { return young_generation_size_; }
-  static int OldGenerationSize() { return old_generation_size_; }
+  static int MaxOldGenerationSize() { return max_old_generation_size_; }
 
   // Returns the capacity of the heap in bytes w/o growing. Heap grows when
   // more spaces are needed until it reaches the limit.
   static int Capacity();
 
+  // Returns the amount of memory currently committed for the heap.
+  static int CommittedMemory();
+
   // Returns the available bytes in space w/o growing.
   // Heap doesn't guarantee that it can allocate an object that requires
   // all available bytes. Check MaxHeapObjectSize() instead.
@@ -290,6 +308,9 @@
   static Address always_allocate_scope_depth_address() {
     return reinterpret_cast<Address>(&always_allocate_scope_depth_);
   }
+  static bool linear_allocation() {
+      return linear_allocation_scope_depth_ != 0;
+  }
 
   static Address* NewSpaceAllocationTopAddress() {
     return new_space_.allocation_top_address();
@@ -449,6 +470,15 @@
                                     uint8_t* external_pointer,
                                     PretenureFlag pretenure);
 
+  // Allocates an external array of the specified length and type.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateExternalArray(int length,
+                                       ExternalArrayType array_type,
+                                       void* external_pointer,
+                                       PretenureFlag pretenure);
+
   // Allocate a tenured JS global property cell.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
@@ -637,9 +667,6 @@
   static void GarbageCollectionPrologue();
   static void GarbageCollectionEpilogue();
 
-  // Code that should be executed after the garbage collection proper.
-  static void PostGarbageCollectionProcessing();
-
   // Performs garbage collection operation.
   // Returns whether required_space bytes are available after the collection.
   static bool CollectGarbage(int required_space, AllocationSpace space);
@@ -729,7 +756,7 @@
   static bool Contains(HeapObject* value);
 
   // Checks whether an address/object in a space.
-  // Currently used by tests and heap verification only.
+  // Currently used by tests, serialization and heap verification only.
   static bool InSpace(Address addr, AllocationSpace space);
   static bool InSpace(HeapObject* value, AllocationSpace space);
 
@@ -884,11 +911,15 @@
 
   static Object* NumberToString(Object* number);
 
+  static Map* MapForExternalArrayType(ExternalArrayType array_type);
+  static RootListIndex RootIndexForExternalArrayType(
+      ExternalArrayType array_type);
+
  private:
-  static int semispace_size_;
+  static int reserved_semispace_size_;
+  static int max_semispace_size_;
   static int initial_semispace_size_;
-  static int young_generation_size_;
-  static int old_generation_size_;
+  static int max_old_generation_size_;
   static size_t code_range_size_;
 
   // For keeping track of how much data has survived
@@ -896,6 +927,7 @@
   static int survived_since_last_expansion_;
 
   static int always_allocate_scope_depth_;
+  static int linear_allocation_scope_depth_;
   static bool context_disposed_pending_;
 
   static const int kMaxMapSpaceSize = 8*MB;
@@ -1111,6 +1143,7 @@
   friend class Factory;
   friend class DisallowAllocationFailure;
   friend class AlwaysAllocateScope;
+  friend class LinearAllocationScope;
 };
 
 
@@ -1132,6 +1165,19 @@
 };
 
 
+class LinearAllocationScope {
+ public:
+  LinearAllocationScope() {
+    Heap::linear_allocation_scope_depth_++;
+  }
+
+  ~LinearAllocationScope() {
+    Heap::linear_allocation_scope_depth_--;
+    ASSERT(Heap::linear_allocation_scope_depth_ >= 0);
+  }
+};
+
+
 #ifdef DEBUG
 // Visitor class to verify interior pointers that do not have remembered set
 // bits.  All heap object pointers have to point into the heap to a location
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index bc28710..698377a 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -1850,6 +1850,22 @@
 }
 
 
+void Assembler::fucomi(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  EMIT(0xE8 + i);
+}
+
+
+void Assembler::fucomip() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDF);
+  EMIT(0xE9);
+}
+
+
 void Assembler::fcompp() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2109,7 +2125,7 @@
   // Some internal data structures overflow for very large buffers,
   // they must ensure that kMaximalBufferSize is not too large.
   if ((desc.buffer_size > kMaximalBufferSize) ||
-      (desc.buffer_size > Heap::OldGenerationSize())) {
+      (desc.buffer_size > Heap::MaxOldGenerationSize())) {
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 4719f2d..a431d04 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -439,6 +439,14 @@
   inline static Address target_address_at(Address pc);
   inline static void set_target_address_at(Address pc, Address target);
 
+  // This sets the branch destination (which is in the instruction on x86).
+  inline static void set_target_at(Address instruction_payload,
+                                   Address target) {
+    set_target_address_at(instruction_payload, target);
+  }
+
+  static const int kCallTargetSize = kPointerSize;
+
   // Distance between the address of the code target in the call instruction
   // and the return address
   static const int kCallTargetAddressOffset = kPointerSize;
@@ -702,6 +710,8 @@
   void ftst();
   void fucomp(int i);
   void fucompp();
+  void fucomi(int i);
+  void fucomip();
   void fcompp();
   void fnstsw_ax();
   void fwait();
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index ad44026..963b0e3 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -462,6 +462,8 @@
     const int kGlobalIndex =
         Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
     __ mov(ebx, FieldOperand(esi, kGlobalIndex));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+    __ mov(ebx, FieldOperand(ebx, kGlobalIndex));
     __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
 
     __ bind(&patch_receiver);
@@ -520,48 +522,48 @@
   __ push(Operand(ebp, 2 * kPointerSize));  // push arguments
   __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  if (FLAG_check_stack) {
-    // We need to catch preemptions right here, otherwise an unlucky preemption
-    // could show up as a failed apply.
-    ExternalReference stack_guard_limit =
-        ExternalReference::address_of_stack_guard_limit();
-    Label retry_preemption;
-    Label no_preemption;
-    __ bind(&retry_preemption);
-    __ mov(edi, Operand::StaticVariable(stack_guard_limit));
-    __ cmp(esp, Operand(edi));
-    __ j(above, &no_preemption, taken);
+  // Check the stack for overflow or a break request.
+  // We need to catch preemptions right here, otherwise an unlucky preemption
+  // could show up as a failed apply.
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  Label retry_preemption;
+  Label no_preemption;
+  __ bind(&retry_preemption);
+  __ mov(edi, Operand::StaticVariable(stack_guard_limit));
+  __ cmp(esp, Operand(edi));
+  __ j(above, &no_preemption, taken);
 
-    // Preemption!
-    // Because builtins always remove the receiver from the stack, we
-    // have to fake one to avoid underflowing the stack.
-    __ push(eax);
-    __ push(Immediate(Smi::FromInt(0)));
+  // Preemption!
+  // Because builtins always remove the receiver from the stack, we
+  // have to fake one to avoid underflowing the stack.
+  __ push(eax);
+  __ push(Immediate(Smi::FromInt(0)));
 
-    // Do call to runtime routine.
-    __ CallRuntime(Runtime::kStackGuard, 1);
-    __ pop(eax);
-    __ jmp(&retry_preemption);
+  // Do call to runtime routine.
+  __ CallRuntime(Runtime::kStackGuard, 1);
+  __ pop(eax);
+  __ jmp(&retry_preemption);
 
-    __ bind(&no_preemption);
+  __ bind(&no_preemption);
 
-    Label okay;
-    // Make ecx the space we have left.
-    __ mov(ecx, Operand(esp));
-    __ sub(ecx, Operand(edi));
-    // Make edx the space we need for the array when it is unrolled onto the
-    // stack.
-    __ mov(edx, Operand(eax));
-    __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
-    __ cmp(ecx, Operand(edx));
-    __ j(greater, &okay, taken);
+  Label okay;
+  // Make ecx the space we have left.
+  __ mov(ecx, Operand(esp));
+  __ sub(ecx, Operand(edi));
+  // Make edx the space we need for the array when it is unrolled onto the
+  // stack.
+  __ mov(edx, Operand(eax));
+  __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+  __ cmp(ecx, Operand(edx));
+  __ j(greater, &okay, taken);
 
-    // Too bad: Out of stack space.
-    __ push(Operand(ebp, 4 * kPointerSize));  // push this
-    __ push(eax);
-    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-    __ bind(&okay);
-  }
+  // Too bad: Out of stack space.
+  __ push(Operand(ebp, 4 * kPointerSize));  // push this
+  __ push(eax);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  __ bind(&okay);
+  // End of stack check.
 
   // Push current index and limit.
   const int kLimitOffset =
@@ -606,6 +608,8 @@
   const int kGlobalOffset =
       Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
   __ mov(ebx, FieldOperand(esi, kGlobalOffset));
+  __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalContextOffset));
+  __ mov(ebx, FieldOperand(ebx, kGlobalOffset));
   __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
 
   // Push the receiver.
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index ce48e9d..8e8ff2e 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -697,18 +697,6 @@
 }
 
 
-class ToBooleanStub: public CodeStub {
- public:
-  ToBooleanStub() { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Major MajorKey() { return ToBoolean; }
-  int MinorKey() { return 0; }
-};
-
-
 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
 // convert it to a boolean in the condition code register or jump to
 // 'false_target'/'true_target' as appropriate.
@@ -773,13 +761,6 @@
   // either operand is not a number.  Operands are in edx and eax.
   // Leaves operands unchanged.
   static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
-  // Allocate a heap number in new space with undefined value.
-  // Returns tagged pointer in eax, or jumps to need_gc if new space is full.
-  static void AllocateHeapNumber(MacroAssembler* masm,
-                                 Label* need_gc,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Register result);
 };
 
 
@@ -2222,14 +2203,12 @@
 
 
 void CodeGenerator::CheckStack() {
-  if (FLAG_check_stack) {
-    DeferredStackCheck* deferred = new DeferredStackCheck;
-    ExternalReference stack_guard_limit =
-        ExternalReference::address_of_stack_guard_limit();
-    __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
-    deferred->Branch(below);
-    deferred->BindExit();
-  }
+  DeferredStackCheck* deferred = new DeferredStackCheck;
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+  deferred->Branch(below);
+  deferred->BindExit();
 }
 
 
@@ -2282,8 +2261,8 @@
   // allow us to push the arguments directly into place.
   frame_->SyncRange(0, frame_->element_count() - 1);
 
+  frame_->EmitPush(esi);  // The context is the first argument.
   frame_->EmitPush(Immediate(pairs));
-  frame_->EmitPush(esi);  // The context is the second argument.
   frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
   Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
@@ -3583,11 +3562,9 @@
   ASSERT(boilerplate->IsBoilerplate());
   frame_->SyncRange(0, frame_->element_count() - 1);
 
-  // Push the boilerplate on the stack.
-  frame_->EmitPush(Immediate(boilerplate));
-
   // Create a new closure.
   frame_->EmitPush(esi);
+  frame_->EmitPush(Immediate(boilerplate));
   Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
   frame_->Push(&result);
 }
@@ -5175,11 +5152,10 @@
   Result scratch1 = allocator()->Allocate();
   Result scratch2 = allocator()->Allocate();
   Result heap_number = allocator()->Allocate();
-  FloatingPointHelper::AllocateHeapNumber(masm_,
-                                          call_runtime.entry_label(),
-                                          scratch1.reg(),
-                                          scratch2.reg(),
-                                          heap_number.reg());
+  __ AllocateHeapNumber(heap_number.reg(),
+                        scratch1.reg(),
+                        scratch2.reg(),
+                        call_runtime.entry_label());
   scratch1.Unuse();
   scratch2.Unuse();
 
@@ -6508,11 +6484,7 @@
   __ j(not_equal, &true_result);
   __ fldz();
   __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-  __ fucompp();
-  __ push(eax);
-  __ fnstsw_ax();
-  __ sahf();
-  __ pop(eax);
+  __ FCmp();
   __ j(zero, &false_result);
   // Fall through to |true_result|.
 
@@ -6536,42 +6508,47 @@
     __ push(right);
   } else {
     // The calling convention with registers is left in edx and right in eax.
-    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
-    if (!(left.is(edx) && right.is(eax))) {
-      if (left.is(eax) && right.is(edx)) {
+    Register left_arg = edx;
+    Register right_arg = eax;
+    if (!(left.is(left_arg) && right.is(right_arg))) {
+      if (left.is(right_arg) && right.is(left_arg)) {
         if (IsOperationCommutative()) {
           SetArgsReversed();
         } else {
           __ xchg(left, right);
         }
-      } else if (left.is(edx)) {
-        __ mov(eax, right);
-      } else if (left.is(eax)) {
+      } else if (left.is(left_arg)) {
+        __ mov(right_arg, right);
+      } else if (left.is(right_arg)) {
         if (IsOperationCommutative()) {
-          __ mov(edx, right);
+          __ mov(left_arg, right);
           SetArgsReversed();
         } else {
-          __ mov(edx, left);
-          __ mov(eax, right);
+          // Order of moves important to avoid destroying left argument.
+          __ mov(left_arg, left);
+          __ mov(right_arg, right);
         }
-      } else if (right.is(edx)) {
+      } else if (right.is(left_arg)) {
         if (IsOperationCommutative()) {
-          __ mov(eax, left);
+          __ mov(right_arg, left);
           SetArgsReversed();
         } else {
-          __ mov(eax, right);
-          __ mov(edx, left);
+          // Order of moves important to avoid destroying right argument.
+          __ mov(right_arg, right);
+          __ mov(left_arg, left);
         }
-      } else if (right.is(eax)) {
-        __ mov(edx, left);
+      } else if (right.is(right_arg)) {
+        __ mov(left_arg, left);
       } else {
-        __ mov(edx, left);
-        __ mov(eax, right);
+        // Order of moves is not important.
+        __ mov(left_arg, left);
+        __ mov(right_arg, right);
       }
     }
 
     // Update flags to indicate that arguments are in registers.
     SetArgsInRegisters();
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
   }
 
   // Call the stub.
@@ -6588,19 +6565,22 @@
     __ push(left);
     __ push(Immediate(right));
   } else {
-    // Adapt arguments to the calling convention left in edx and right in eax.
-    if (left.is(edx)) {
-      __ mov(eax, Immediate(right));
-    } else if (left.is(eax) && IsOperationCommutative()) {
-      __ mov(edx, Immediate(right));
+    // The calling convention with registers is left in edx and right in eax.
+    Register left_arg = edx;
+    Register right_arg = eax;
+    if (left.is(left_arg)) {
+      __ mov(right_arg, Immediate(right));
+    } else if (left.is(right_arg) && IsOperationCommutative()) {
+      __ mov(left_arg, Immediate(right));
       SetArgsReversed();
     } else {
-      __ mov(edx, left);
-      __ mov(eax, Immediate(right));
+      __ mov(left_arg, left);
+      __ mov(right_arg, Immediate(right));
     }
 
     // Update flags to indicate that arguments are in registers.
     SetArgsInRegisters();
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
   }
 
   // Call the stub.
@@ -6617,18 +6597,21 @@
     __ push(Immediate(left));
     __ push(right);
   } else {
-    // Adapt arguments to the calling convention left in edx and right in eax.
-    bool is_commutative = (op_ == (Token::ADD) || (op_ == Token::MUL));
-    if (right.is(eax)) {
-      __ mov(edx, Immediate(left));
-    } else if (right.is(edx) && is_commutative) {
-        __ mov(eax, Immediate(left));
+    // The calling convention with registers is left in edx and right in eax.
+    Register left_arg = edx;
+    Register right_arg = eax;
+    if (right.is(right_arg)) {
+      __ mov(left_arg, Immediate(left));
+    } else if (right.is(left_arg) && IsOperationCommutative()) {
+      __ mov(right_arg, Immediate(left));
+      SetArgsReversed();
     } else {
-      __ mov(edx, Immediate(left));
-      __ mov(eax, right);
+      __ mov(left_arg, Immediate(left));
+      __ mov(right_arg, right);
     }
     // Update flags to indicate that arguments are in registers.
     SetArgsInRegisters();
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
   }
 
   // Call the stub.
@@ -6836,11 +6819,7 @@
           case NO_OVERWRITE: {
             // Allocate a heap number for the result. Keep eax and edx intact
             // for the possible runtime call.
-            FloatingPointHelper::AllocateHeapNumber(masm,
-                                                    &call_runtime,
-                                                    ecx,
-                                                    no_reg,
-                                                    ebx);
+            __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
             // Now eax can be overwritten losing one of the arguments as we are
             // now done and will not need it any more.
             __ mov(eax, ebx);
@@ -6868,11 +6847,7 @@
           case NO_OVERWRITE:
             // Allocate a heap number for the result. Keep eax and edx intact
             // for the possible runtime call.
-            FloatingPointHelper::AllocateHeapNumber(masm,
-                                                    &call_runtime,
-                                                    ecx,
-                                                    no_reg,
-                                                    ebx);
+            __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
             // Now eax can be overwritten losing one of the arguments as we are
             // now done and will not need it any more.
             __ mov(eax, ebx);
@@ -6924,18 +6899,14 @@
         // Check if right operand is int32.
         __ fist_s(Operand(esp, 0 * kPointerSize));
         __ fild_s(Operand(esp, 0 * kPointerSize));
-        __ fucompp();
-        __ fnstsw_ax();
-        __ sahf();
+        __ FCmp();
         __ j(not_zero, &operand_conversion_failure);
         __ j(parity_even, &operand_conversion_failure);
 
         // Check if left operand is int32.
         __ fist_s(Operand(esp, 1 * kPointerSize));
         __ fild_s(Operand(esp, 1 * kPointerSize));
-        __ fucompp();
-        __ fnstsw_ax();
-        __ sahf();
+        __ FCmp();
         __ j(not_zero, &operand_conversion_failure);
         __ j(parity_even, &operand_conversion_failure);
       }
@@ -6964,7 +6935,7 @@
       // Tag smi result and return.
       ASSERT(kSmiTagSize == times_2);  // adjust code if not the case
       __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
-      __ ret(2 * kPointerSize);
+      GenerateReturn(masm);
 
       // All ops except SHR return a signed int32 that we load in a HeapNumber.
       if (op_ != Token::SHR) {
@@ -6982,8 +6953,7 @@
             __ j(not_zero, &skip_allocation, not_taken);
             // Fall through!
           case NO_OVERWRITE:
-            FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
-                                                    ecx, edx, eax);
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
             __ bind(&skip_allocation);
             break;
           default: UNREACHABLE();
@@ -6992,7 +6962,7 @@
         __ mov(Operand(esp, 1 * kPointerSize), ebx);
         __ fild_s(Operand(esp, 1 * kPointerSize));
         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-        __ ret(2 * kPointerSize);
+        GenerateReturn(masm);
       }
 
       // Clear the FPU exception flag and reset the stack before calling
@@ -7024,7 +6994,7 @@
 
   // If all else fails, use the runtime system to get the correct
   // result. If arguments was passed in registers now place them on the
-  // stack in the correct order.
+  // stack in the correct order below the return address.
   __ bind(&call_runtime);
   if (HasArgumentsInRegisters()) {
     __ pop(ecx);
@@ -7133,25 +7103,6 @@
 }
 
 
-void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
-                                             Label* need_gc,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Register result) {
-  // Allocate heap number in new space.
-  __ AllocateInNewSpace(HeapNumber::kSize,
-                        result,
-                        scratch1,
-                        scratch2,
-                        need_gc,
-                        TAG_OBJECT);
-
-  // Set the map.
-  __ mov(FieldOperand(result, HeapObject::kMapOffset),
-         Immediate(Factory::heap_number_map()));
-}
-
-
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
   Label load_smi, done;
@@ -7308,7 +7259,7 @@
   } else {
     __ mov(edx, Operand(eax));
     // edx: operand
-    FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx, eax);
+    __ AllocateHeapNumber(eax, ebx, ecx, &undo);
     // eax: allocated 'empty' number
     __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
     __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
@@ -7458,20 +7409,19 @@
       // not NaN.
       // The representation of NaN values has all exponent bits (52..62) set,
       // and not all mantissa bits (0..51) clear.
+      // We only accept QNaNs, which have bit 51 set.
       // Read top bits of double representation (second word of value).
-      __ mov(eax, FieldOperand(edx, HeapNumber::kExponentOffset));
-      // Test that exponent bits are all set.
-      __ not_(eax);
-      __ test(eax, Immediate(0x7ff00000));
-      __ j(not_zero, &return_equal);
-      __ not_(eax);
 
-      // Shift out flag and all exponent bits, retaining only mantissa.
-      __ shl(eax, 12);
-      // Or with all low-bits of mantissa.
-      __ or_(eax, FieldOperand(edx, HeapNumber::kMantissaOffset));
-      // Return zero equal if all bits in mantissa is zero (it's an Infinity)
-      // and non-zero if not (it's a NaN).
+      // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+      // all bits in the mask are set. We only need to check the word
+      // that contains the exponent and high bit of the mantissa.
+      ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+      __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+      __ xor_(eax, Operand(eax));
+      // Shift value and mask so kQuietNaNHighBitsMask applies to topmost bits.
+      __ add(edx, Operand(edx));
+      __ cmp(edx, kQuietNaNHighBitsMask << 1);
+      __ setcc(above_equal, eax);
       __ ret(0);
 
       __ bind(&not_identical);
@@ -7757,11 +7707,84 @@
 }
 
 
+// If true, a Handle<T> passed by value is passed and returned by
+// using the location_ field directly.  If false, it is passed and
+// returned as a pointer to a handle.
+#ifdef USING_MAC_ABI
+static const bool kPassHandlesDirectly = true;
+#else
+static const bool kPassHandlesDirectly = false;
+#endif
+
+
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+  Label get_result;
+  Label prologue;
+  Label promote_scheduled_exception;
+  __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
+  ASSERT_EQ(kArgc, 4);
+  if (kPassHandlesDirectly) {
+    // When handles as passed directly we don't have to allocate extra
+    // space for and pass an out parameter.
+    __ mov(Operand(esp, 0 * kPointerSize), ebx);  // name.
+    __ mov(Operand(esp, 1 * kPointerSize), eax);  // arguments pointer.
+  } else {
+    // The function expects three arguments to be passed but we allocate
+    // four to get space for the output cell.  The argument slots are filled
+    // as follows:
+    //
+    //   3: output cell
+    //   2: arguments pointer
+    //   1: name
+    //   0: pointer to the output cell
+    //
+    // Note that this is one more "argument" than the function expects
+    // so the out cell will have to be popped explicitly after returning
+    // from the function.
+    __ mov(Operand(esp, 1 * kPointerSize), ebx);  // name.
+    __ mov(Operand(esp, 2 * kPointerSize), eax);  // arguments pointer.
+    __ mov(ebx, esp);
+    __ add(Operand(ebx), Immediate(3 * kPointerSize));
+    __ mov(Operand(esp, 0 * kPointerSize), ebx);  // output
+    __ mov(Operand(esp, 3 * kPointerSize), Immediate(0));  // out cell.
+  }
+  // Call the api function!
+  __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY);
+  // Check if the function scheduled an exception.
+  ExternalReference scheduled_exception_address =
+      ExternalReference::scheduled_exception_address();
+  __ cmp(Operand::StaticVariable(scheduled_exception_address),
+         Immediate(Factory::the_hole_value()));
+  __ j(not_equal, &promote_scheduled_exception, not_taken);
+  if (!kPassHandlesDirectly) {
+    // The returned value is a pointer to the handle holding the result.
+    // Dereference this to get to the location.
+    __ mov(eax, Operand(eax, 0));
+  }
+  // Check if the result handle holds 0
+  __ test(eax, Operand(eax));
+  __ j(not_zero, &get_result, taken);
+  // It was zero; the result is undefined.
+  __ mov(eax, Factory::undefined_value());
+  __ jmp(&prologue);
+  // It was non-zero.  Dereference to get the result value.
+  __ bind(&get_result);
+  __ mov(eax, Operand(eax, 0));
+  __ bind(&prologue);
+  __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
+  __ ret(0);
+  __ bind(&promote_scheduled_exception);
+  __ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException),
+                     0,
+                     1);
+}
+
+
 void CEntryStub::GenerateCore(MacroAssembler* masm,
                               Label* throw_normal_exception,
                               Label* throw_termination_exception,
                               Label* throw_out_of_memory_exception,
-                              StackFrame::Type frame_type,
+                              ExitFrame::Mode mode,
                               bool do_gc,
                               bool always_allocate_scope) {
   // eax: result parameter for PerformGC, if any
@@ -7811,7 +7834,7 @@
   __ j(zero, &failure_returned, not_taken);
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame(frame_type);
+  __ LeaveExitFrame(mode);
   __ ret(0);
 
   // Handling of failure.
@@ -7910,12 +7933,12 @@
   // of a proper result. The builtin entry handles this by performing
   // a garbage collection and retrying the builtin (twice).
 
-  StackFrame::Type frame_type = is_debug_break ?
-      StackFrame::EXIT_DEBUG :
-      StackFrame::EXIT;
+  ExitFrame::Mode mode = is_debug_break
+      ? ExitFrame::MODE_DEBUG
+      : ExitFrame::MODE_NORMAL;
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(frame_type);
+  __ EnterExitFrame(mode);
 
   // eax: result parameter for PerformGC, if any (setup below)
   // ebx: pointer to builtin function  (C callee-saved)
@@ -7933,7 +7956,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                false,
                false);
 
@@ -7942,7 +7965,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                true,
                false);
 
@@ -7953,7 +7976,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                true,
                true);
 
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index ec4a8be..3669e9d 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -396,7 +396,7 @@
   void LoadReference(Reference* ref);
   void UnloadReference(Reference* ref);
 
-  Operand ContextOperand(Register context, int index) const {
+  static Operand ContextOperand(Register context, int index) {
     return Operand(context, Context::SlotOffset(index));
   }
 
@@ -407,7 +407,7 @@
                                             JumpTarget* slow);
 
   // Expressions
-  Operand GlobalObject() const {
+  static Operand GlobalObject() {
     return ContextOperand(esi, Context::GLOBAL_INDEX);
   }
 
@@ -511,10 +511,11 @@
                                       const InlineRuntimeLUT& new_entry,
                                       InlineRuntimeLUT* old_entry);
 
+  static Handle<Code> ComputeLazyCompile(int argc);
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
-  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+  static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
 
   // Declare global variables and functions in the given array of
   // name/value pairs.
@@ -616,6 +617,8 @@
   friend class JumpTarget;
   friend class Reference;
   friend class Result;
+  friend class FastCodeGenerator;
+  friend class CodeGenSelector;
 
   friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
 
@@ -623,7 +626,19 @@
 };
 
 
-// Flag that indicates whether how to generate code for the stub.
+class ToBooleanStub: public CodeStub {
+ public:
+  ToBooleanStub() { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return ToBoolean; }
+  int MinorKey() { return 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
 enum GenericBinaryFlags {
   NO_GENERIC_BINARY_FLAGS = 0,
   NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
@@ -632,10 +647,10 @@
 
 class GenericBinaryOpStub: public CodeStub {
  public:
-  GenericBinaryOpStub(Token::Value operation,
+  GenericBinaryOpStub(Token::Value op,
                       OverwriteMode mode,
                       GenericBinaryFlags flags)
-      : op_(operation),
+      : op_(op),
         mode_(mode),
         flags_(flags),
         args_in_registers_(false),
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index adedf34..3e3ca73 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -204,7 +204,7 @@
     InstructionDesc* id = &instructions_[bm[i].b];
     id->mnem = bm[i].mnem;
     id->op_order_ = bm[i].op_order_;
-    assert(id->type == NO_INSTR);  // Information already entered
+    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered.
     id->type = type;
   }
 }
@@ -216,7 +216,7 @@
                                      const char* mnem) {
   for (byte b = start; b <= end; b++) {
     InstructionDesc* id = &instructions_[b];
-    assert(id->type == NO_INSTR);  // Information already entered
+    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered.
     id->mnem = mnem;
     id->type = type;
   }
@@ -226,7 +226,7 @@
 void InstructionTable::AddJumpConditionalShort() {
   for (byte b = 0x70; b <= 0x7F; b++) {
     InstructionDesc* id = &instructions_[b];
-    assert(id->type == NO_INSTR);  // Information already entered
+    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered.
     id->mnem = jump_conditional_mnem[b & 0x0F];
     id->type = JUMP_CONDITIONAL_SHORT_INSTR;
   }
@@ -321,6 +321,8 @@
   int SetCC(byte* data);
   int CMov(byte* data);
   int FPUInstruction(byte* data);
+  int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
+  int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
   void AppendToBuffer(const char* format, ...);
 
 
@@ -493,7 +495,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::F7Instruction(byte* data) {
-  assert(*data == 0xF7);
+  ASSERT_EQ(0xF7, *data);
   byte modrm = *(data+1);
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
@@ -526,7 +528,7 @@
 
 int DisassemblerIA32::D1D3C1Instruction(byte* data) {
   byte op = *data;
-  assert(op == 0xD1 || op == 0xD3 || op == 0xC1);
+  ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
   byte modrm = *(data+1);
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
@@ -560,7 +562,7 @@
         default: UnimplementedInstruction();
       }
     }
-    assert(mnem != NULL);
+    ASSERT_NE(NULL, mnem);
     AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
     if (imm8 > 0) {
       AppendToBuffer("%d", imm8);
@@ -576,7 +578,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::JumpShort(byte* data) {
-  assert(*data == 0xEB);
+  ASSERT_EQ(0xEB, *data);
   byte b = *(data+1);
   byte* dest = data + static_cast<int8_t>(b) + 2;
   AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -586,7 +588,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
-  assert(*data == 0x0F);
+  ASSERT_EQ(0x0F, *data);
   byte cond = *(data+1) & 0x0F;
   byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
   const char* mnem = jump_conditional_mnem[cond];
@@ -614,18 +616,18 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::SetCC(byte* data) {
-  assert(*data == 0x0F);
+  ASSERT_EQ(0x0F, *data);
   byte cond = *(data+1) & 0x0F;
   const char* mnem = set_conditional_mnem[cond];
   AppendToBuffer("%s ", mnem);
   PrintRightByteOperand(data+2);
-  return 3;  // includes 0x0F
+  return 3;  // Includes 0x0F.
 }
 
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::CMov(byte* data) {
-  assert(*data == 0x0F);
+  ASSERT_EQ(0x0F, *data);
   byte cond = *(data + 1) & 0x0F;
   const char* mnem = conditional_move_mnem[cond];
   int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
@@ -635,107 +637,165 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::FPUInstruction(byte* data) {
-  byte b1 = *data;
-  byte b2 = *(data + 1);
-  if (b1 == 0xD9) {
-    const char* mnem = NULL;
-    switch (b2) {
-      case 0xE8: mnem = "fld1"; break;
-      case 0xEE: mnem = "fldz"; break;
-      case 0xE1: mnem = "fabs"; break;
-      case 0xE0: mnem = "fchs"; break;
-      case 0xF8: mnem = "fprem"; break;
-      case 0xF5: mnem = "fprem1"; break;
-      case 0xF7: mnem = "fincstp"; break;
-      case 0xE4: mnem = "ftst"; break;
-    }
-    if (mnem != NULL) {
-      AppendToBuffer("%s", mnem);
-      return 2;
-    } else if ((b2 & 0xF8) == 0xC8) {
-      AppendToBuffer("fxch st%d", b2 & 0x7);
-      return 2;
-    } else {
-      int mod, regop, rm;
-      get_modrm(*(data+1), &mod, &regop, &rm);
-      const char* mnem = "?";
-      switch (regop) {
-        case eax: mnem = "fld_s"; break;
-        case ebx: mnem = "fstp_s"; break;
-        default: UnimplementedInstruction();
-      }
-      AppendToBuffer("%s ", mnem);
-      int count = PrintRightOperand(data + 1);
-      return count + 1;
-    }
-  } else if (b1 == 0xDD) {
-    if ((b2 & 0xF8) == 0xC0) {
-      AppendToBuffer("ffree st%d", b2 & 0x7);
-      return 2;
-    } else {
-      int mod, regop, rm;
-      get_modrm(*(data+1), &mod, &regop, &rm);
-      const char* mnem = "?";
-      switch (regop) {
-        case eax: mnem = "fld_d"; break;
-        case ebx: mnem = "fstp_d"; break;
-        default: UnimplementedInstruction();
-      }
-      AppendToBuffer("%s ", mnem);
-      int count = PrintRightOperand(data + 1);
-      return count + 1;
-    }
-  } else if (b1 == 0xDB) {
-    int mod, regop, rm;
-    get_modrm(*(data+1), &mod, &regop, &rm);
-    const char* mnem = "?";
-    switch (regop) {
-      case eax: mnem = "fild_s"; break;
-      case edx: mnem = "fist_s"; break;
-      case ebx: mnem = "fistp_s"; break;
-      default: UnimplementedInstruction();
-    }
-    AppendToBuffer("%s ", mnem);
-    int count = PrintRightOperand(data + 1);
-    return count + 1;
-  } else if (b1 == 0xDF) {
-    if (b2 == 0xE0) {
-      AppendToBuffer("fnstsw_ax");
-      return 2;
-    }
-    int mod, regop, rm;
-    get_modrm(*(data+1), &mod, &regop, &rm);
-    const char* mnem = "?";
-    switch (regop) {
-      case ebp: mnem = "fild_d"; break;
-      case edi: mnem = "fistp_d"; break;
-      default: UnimplementedInstruction();
-    }
-    AppendToBuffer("%s ", mnem);
-    int count = PrintRightOperand(data + 1);
-    return count + 1;
-  } else if (b1 == 0xDC || b1 == 0xDE) {
-    bool is_pop = (b1 == 0xDE);
-    if (is_pop && b2 == 0xD9) {
-      AppendToBuffer("fcompp");
-      return 2;
-    }
-    const char* mnem = "FP0xDC";
-    switch (b2 & 0xF8) {
-      case 0xC0: mnem = "fadd"; break;
-      case 0xE8: mnem = "fsub"; break;
-      case 0xC8: mnem = "fmul"; break;
-      case 0xF8: mnem = "fdiv"; break;
-      default: UnimplementedInstruction();
-    }
-    AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
-    return 2;
-  } else if (b1 == 0xDA && b2 == 0xE9) {
-    const char* mnem = "fucompp";
-    AppendToBuffer("%s", mnem);
-    return 2;
+  byte escape_opcode = *data;
+  ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+  byte modrm_byte = *(data+1);
+
+  if (modrm_byte >= 0xC0) {
+    return RegisterFPUInstruction(escape_opcode, modrm_byte);
+  } else {
+    return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
   }
-  AppendToBuffer("Unknown FP instruction");
+}
+
+int DisassemblerIA32::MemoryFPUInstruction(int escape_opcode,
+                                           int modrm_byte,
+                                           byte* modrm_start) {
+  const char* mnem = "?";
+  int regop = (modrm_byte >> 3) & 0x7;  // reg/op field of modrm byte.
+  switch (escape_opcode) {
+    case 0xD9: switch (regop) {
+        case 0: mnem = "fld_s"; break;
+        case 3: mnem = "fstp_s"; break;
+        case 7: mnem = "fstcw"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDB: switch (regop) {
+        case 0: mnem = "fild_s"; break;
+        case 1: mnem = "fisttp_s"; break;
+        case 2: mnem = "fist_s"; break;
+        case 3: mnem = "fistp_s"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDD: switch (regop) {
+        case 0: mnem = "fld_d"; break;
+        case 3: mnem = "fstp_d"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDF: switch (regop) {
+        case 5: mnem = "fild_d"; break;
+        case 7: mnem = "fistp_d"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    default: UnimplementedInstruction();
+  }
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(modrm_start);
+  return count + 1;
+}
+
+int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
+                                             byte modrm_byte) {
+  bool has_register = false;  // Is the FPU register encoded in modrm_byte?
+  const char* mnem = "?";
+
+  switch (escape_opcode) {
+    case 0xD8:
+      UnimplementedInstruction();
+      break;
+
+    case 0xD9:
+      switch (modrm_byte & 0xF8) {
+        case 0xC8:
+          mnem = "fxch";
+          has_register = true;
+          break;
+        default:
+          switch (modrm_byte) {
+            case 0xE0: mnem = "fchs"; break;
+            case 0xE1: mnem = "fabs"; break;
+            case 0xE4: mnem = "ftst"; break;
+            case 0xE8: mnem = "fld1"; break;
+            case 0xEE: mnem = "fldz"; break;
+            case 0xF5: mnem = "fprem1"; break;
+            case 0xF7: mnem = "fincstp"; break;
+            case 0xF8: mnem = "fprem"; break;
+            case 0xFE: mnem = "fsin"; break;
+            case 0xFF: mnem = "fcos"; break;
+            default: UnimplementedInstruction();
+          }
+      }
+      break;
+
+    case 0xDA:
+      if (modrm_byte == 0xE9) {
+        mnem = "fucompp";
+      } else {
+        UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDB:
+      if ((modrm_byte & 0xF8) == 0xE8) {
+        mnem = "fucomi";
+        has_register = true;
+      } else if (modrm_byte  == 0xE2) {
+        mnem = "fclex";
+      } else {
+        UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDC:
+      has_register = true;
+      switch (modrm_byte & 0xF8) {
+        case 0xC0: mnem = "fadd"; break;
+        case 0xE8: mnem = "fsub"; break;
+        case 0xC8: mnem = "fmul"; break;
+        case 0xF8: mnem = "fdiv"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDD:
+      has_register = true;
+      switch (modrm_byte & 0xF8) {
+        case 0xC0: mnem = "ffree"; break;
+        case 0xD8: mnem = "fstp"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDE:
+      if (modrm_byte  == 0xD9) {
+        mnem = "fcompp";
+      } else {
+        has_register = true;
+        switch (modrm_byte & 0xF8) {
+          case 0xC0: mnem = "faddp"; break;
+          case 0xE8: mnem = "fsubp"; break;
+          case 0xC8: mnem = "fmulp"; break;
+          case 0xF8: mnem = "fdivp"; break;
+          default: UnimplementedInstruction();
+        }
+      }
+      break;
+
+    case 0xDF:
+      if (modrm_byte == 0xE0) {
+        mnem = "fnstsw_ax";
+      } else if ((modrm_byte & 0xF8) == 0xE8) {
+        mnem = "fucomip";
+        has_register = true;
+      }
+      break;
+
+    default: UnimplementedInstruction();
+  }
+
+  if (has_register) {
+    AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
+  } else {
+    AppendToBuffer("%s", mnem);
+  }
   return 2;
 }
 
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
index ee1b92d..0d661c3 100644
--- a/src/ia32/fast-codegen-ia32.cc
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -29,6 +29,7 @@
 
 #include "codegen-inl.h"
 #include "fast-codegen.h"
+#include "parser.h"
 
 namespace v8 {
 namespace internal {
@@ -75,6 +76,14 @@
     __ bind(&ok);
   }
 
+  { Comment cmnt(masm_, "[ Declarations");
+    VisitDeclarations(fun->scope()->declarations());
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
   { Comment cmnt(masm_, "[ Body");
     VisitStatements(fun->body());
   }
@@ -84,6 +93,11 @@
     // body.
     __ mov(eax, Factory::undefined_value());
     SetReturnPosition(fun);
+
+    if (FLAG_trace) {
+      __ push(eax);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
     __ RecordJSReturn();
     // Do not use the leave instruction here because it is too short to
     // patch with the code required by the debugger.
@@ -94,19 +108,79 @@
 }
 
 
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  SetStatementPosition(stmt);
-  Visit(stmt->expression());
+void FastCodeGenerator::Move(Location destination, Slot* source) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      break;
+    case Location::TEMP:
+      __ push(Operand(ebp, SlotOffset(source)));
+      break;
+  }
+}
+
+
+void FastCodeGenerator::Move(Location destination, Literal* expr) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      break;
+    case Location::TEMP:
+      __ push(Immediate(expr->handle()));
+      break;
+  }
+}
+
+
+void FastCodeGenerator::Move(Slot* destination, Location source) {
+  switch (source.type()) {
+    case Location::NOWHERE:
+      UNREACHABLE();
+    case Location::TEMP:
+      __ pop(Operand(ebp, SlotOffset(destination)));
+      break;
+  }
+}
+
+
+void FastCodeGenerator::DropAndMove(Location destination, Register source) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      __ add(Operand(esp), Immediate(kPointerSize));
+      break;
+    case Location::TEMP:
+      __ mov(Operand(esp, 0), source);
+      break;
+  }
+}
+
+
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.
+  __ push(esi);  // The context is the first argument.
+  __ push(Immediate(pairs));
+  __ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0)));
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
 }
 
 
 void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
   Comment cmnt(masm_, "[ ReturnStatement");
   SetStatementPosition(stmt);
-  Visit(stmt->expression());
-  __ pop(eax);
+  Expression* expr = stmt->expression();
+  // Complete the statement based on the type of the subexpression.
+  if (expr->AsLiteral() != NULL) {
+    __ mov(eax, expr->AsLiteral()->handle());
+  } else {
+    Visit(expr);
+    Move(eax, expr->location());
+  }
+
+  if (FLAG_trace) {
+    __ push(eax);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
   __ RecordJSReturn();
+
   // Do not use the leave instruction here because it is too short to
   // patch with the code required by the debugger.
   __ mov(esp, ebp);
@@ -115,29 +189,240 @@
 }
 
 
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+  if (HasStackOverflow()) return;
+
+  ASSERT(boilerplate->IsBoilerplate());
+
+  // Create a new closure.
+  __ push(esi);
+  __ push(Immediate(boilerplate));
+  __ CallRuntime(Runtime::kNewClosure, 2);
+  Move(expr->location(), eax);
+}
+
+
 void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
   Expression* rewrite = expr->var()->rewrite();
-  ASSERT(rewrite != NULL);
+  if (rewrite == NULL) {
+    Comment cmnt(masm_, "Global variable");
+    // Use inline caching. Variable name is passed in ecx and the global
+    // object on the stack.
+    __ push(CodeGenerator::GlobalObject());
+    __ mov(ecx, expr->name());
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    // By emitting a nop we make sure that we do not have a test eax
+    // instruction after the call it is treated specially by the LoadIC code
+    // Remember that the assembler may choose to do peephole optimization
+    // (eg, push/pop elimination).
+    __ nop();
 
-  Slot* slot = rewrite->AsSlot();
-  ASSERT(slot != NULL);
-  { Comment cmnt(masm_, "[ Slot");
-    if (expr->location().is_temporary()) {
-      __ push(Operand(ebp, SlotOffset(slot)));
-    } else {
-      ASSERT(expr->location().is_nowhere());
-    }
+    DropAndMove(expr->location(), eax);
+  } else {
+    Comment cmnt(masm_, "Stack slot");
+    Move(expr->location(), rewrite->AsSlot());
   }
 }
 
 
-void FastCodeGenerator::VisitLiteral(Literal* expr) {
-  Comment cmnt(masm_, "[ Literal");
-  if (expr->location().is_temporary()) {
-    __ push(Immediate(expr->handle()));
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  Comment cmnt(masm_, "[ ObjectLiteral");
+  Label exists;
+  // Registers will be used as follows:
+  // edi = JS function.
+  // ebx = literals array.
+  // eax = boilerplate
+
+  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+  int literal_offset =
+      FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ mov(eax, FieldOperand(ebx, literal_offset));
+  __ cmp(eax, Factory::undefined_value());
+  __ j(not_equal, &exists);
+  // Create boilerplate if it does not exist.
+  // Literal array (0).
+  __ push(ebx);
+  // Literal index (1).
+  __ push(Immediate(Smi::FromInt(expr->literal_index())));
+  // Constant properties (2).
+  __ push(Immediate(expr->constant_properties()));
+  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  __ bind(&exists);
+  // eax contains boilerplate.
+  // Clone boilerplate.
+  __ push(eax);
+  if (expr->depth() == 1) {
+    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
   } else {
-    ASSERT(expr->location().is_nowhere());
+    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  }
+
+  // If result_saved == true: the result is saved on top of the stack.
+  // If result_saved == false: the result not on the stack, just is in eax.
+  bool result_saved = false;
+
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+    if (!result_saved) {
+      __ push(eax);  // Save result on the stack
+      result_saved = true;
+    }
+    switch (property->kind()) {
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:  // fall through
+        ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+      case ObjectLiteral::Property::COMPUTED:
+        if (key->handle()->IsSymbol()) {
+          Visit(value);
+          Move(eax, value->location());
+          __ mov(ecx, Immediate(key->handle()));
+          Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+          __ call(ic, RelocInfo::CODE_TARGET);
+          // StoreIC leaves the receiver on the stack.
+          break;
+        }
+        // fall through
+      case ObjectLiteral::Property::PROTOTYPE:
+        __ push(eax);
+        Visit(key);
+        ASSERT(key->location().is_temporary());
+        Visit(value);
+        ASSERT(value->location().is_temporary());
+        __ CallRuntime(Runtime::kSetProperty, 3);
+        __ mov(eax, Operand(esp, 0));  // Restore result into eax.
+        break;
+      case ObjectLiteral::Property::SETTER:  // fall through
+      case ObjectLiteral::Property::GETTER:
+        __ push(eax);
+        Visit(key);
+        ASSERT(key->location().is_temporary());
+        __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ?
+                          Smi::FromInt(1) :
+                          Smi::FromInt(0)));
+        Visit(value);
+        ASSERT(value->location().is_temporary());
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        __ mov(eax, Operand(esp, 0));  // Restore result into eax.
+        break;
+      default: UNREACHABLE();
+    }
+  }
+  switch (expr->location().type()) {
+    case Location::NOWHERE:
+      if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
+      break;
+    case Location::TEMP:
+      if (!result_saved) __ push(eax);
+      break;
+  }
+}
+
+
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Comment cmnt(masm_, "[ RegExp Literal");
+  Label done;
+  // Registers will be used as follows:
+  // edi = JS function.
+  // ebx = literals array.
+  // eax = regexp literal.
+  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+  int literal_offset =
+    FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ mov(eax, FieldOperand(ebx, literal_offset));
+  __ cmp(eax, Factory::undefined_value());
+  __ j(not_equal, &done);
+  // Create regexp literal using runtime function
+  // Result will be in eax.
+  __ push(ebx);
+  __ push(Immediate(Smi::FromInt(expr->literal_index())));
+  __ push(Immediate(expr->pattern()));
+  __ push(Immediate(expr->flags()));
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  // Label done:
+  __ bind(&done);
+  Move(expr->location(), eax);
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+  Label make_clone;
+
+  // Fetch the function's literals array.
+  __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ebx, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+  // Check if the literal's boilerplate has been instantiated.
+  int offset =
+      FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+  __ mov(eax, FieldOperand(ebx, offset));
+  __ cmp(eax, Factory::undefined_value());
+  __ j(not_equal, &make_clone);
+
+  // Instantiate the boilerplate.
+  __ push(ebx);
+  __ push(Immediate(Smi::FromInt(expr->literal_index())));
+  __ push(Immediate(expr->literals()));
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+  __ bind(&make_clone);
+  // Clone the boilerplate.
+  __ push(eax);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  } else {
+    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  ZoneList<Expression*>* subexprs = expr->values();
+  for (int i = 0, len = subexprs->length(); i < len; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (subexpr->AsLiteral() != NULL ||
+        CompileTimeValue::IsCompileTimeValue(subexpr)) {
+      continue;
+    }
+
+    if (!result_saved) {
+      __ push(eax);
+      result_saved = true;
+    }
+    Visit(subexpr);
+    ASSERT(subexpr->location().is_temporary());
+
+    // Store the subexpression value in the array's elements.
+    __ pop(eax);  // Subexpression value.
+    __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
+    __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ mov(FieldOperand(ebx, offset), eax);
+
+    // Update the write barrier for the array store.
+    __ RecordWrite(ebx, offset, eax, ecx);
+  }
+
+  switch (expr->location().type()) {
+    case Location::NOWHERE:
+      if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
+      break;
+    case Location::TEMP:
+      if (!result_saved) __ push(eax);
+      break;
   }
 }
 
@@ -145,19 +430,266 @@
 void FastCodeGenerator::VisitAssignment(Assignment* expr) {
   Comment cmnt(masm_, "[ Assignment");
   ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
-  Visit(expr->value());
 
+  // Left-hand side can only be a global or a (parameter or local) slot.
   Variable* var = expr->target()->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL && var->slot() != NULL);
+  ASSERT(var != NULL);
+  ASSERT(var->is_global() || var->slot() != NULL);
 
-  if (expr->location().is_temporary()) {
-    __ mov(eax, Operand(esp, 0));
-    __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+  Expression* rhs = expr->value();
+  if (var->is_global()) {
+    // Assignment to a global variable, use inline caching.  Right-hand-side
+    // value is passed in eax, variable name in ecx, and the global object
+    // on the stack.
+
+    // Code for the right-hand-side expression depends on its type.
+    if (rhs->AsLiteral() != NULL) {
+      __ mov(eax, rhs->AsLiteral()->handle());
+    } else {
+      ASSERT(rhs->location().is_temporary());
+      Visit(rhs);
+      __ pop(eax);
+    }
+    __ mov(ecx, var->name());
+    __ push(CodeGenerator::GlobalObject());
+    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // Overwrite the global object on the stack with the result if needed.
+    DropAndMove(expr->location(), eax);
   } else {
-    ASSERT(expr->location().is_nowhere());
-    __ pop(Operand(ebp, SlotOffset(var->slot())));
+    // Local or parameter assignment.
+
+    // Code for the right-hand side expression depends on its type.
+    if (rhs->AsLiteral() != NULL) {
+      // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
+      // discarded result.  Always perform the assignment.
+      __ mov(eax, rhs->AsLiteral()->handle());
+      __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+      Move(expr->location(), eax);
+    } else {
+      ASSERT(rhs->location().is_temporary());
+      Visit(rhs);
+      switch (expr->location().type()) {
+        case Location::NOWHERE:
+          // Case 'var = temp'.  Discard right-hand-side temporary.
+          Move(var->slot(), rhs->location());
+          break;
+        case Location::TEMP:
+          // Case 'temp1 <- (var = temp0)'.  Preserve right-hand-side
+          // temporary on the stack.
+          __ mov(eax, Operand(esp, 0));
+          __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+          break;
+      }
+    }
   }
 }
 
 
+void FastCodeGenerator::VisitProperty(Property* expr) {
+  Comment cmnt(masm_, "[ Property");
+  Expression* key = expr->key();
+  uint32_t dummy;
+
+  // Record the source position for the property load.
+  SetSourcePosition(expr->position());
+
+  // Evaluate receiver.
+  Visit(expr->obj());
+
+  if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+      !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+    // Do a NAMED property load.
+    // The IC expects the property name in ecx and the receiver on the stack.
+    __ mov(ecx, Immediate(key->AsLiteral()->handle()));
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // By emitting a nop we make sure that we do not have a test eax
+    // instruction after the call it is treated specially by the LoadIC code.
+    __ nop();
+  } else {
+    // Do a KEYED property load.
+    Visit(expr->key());
+    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // By emitting a nop we make sure that we do not have a "test eax,..."
+    // instruction after the call it is treated specially by the LoadIC code.
+    __ nop();
+    // Drop key left on the stack by IC.
+    __ add(Operand(esp), Immediate(kPointerSize));
+  }
+  switch (expr->location().type()) {
+    case Location::TEMP:
+      __ mov(Operand(esp, 0), eax);
+      break;
+    case Location::NOWHERE:
+      __ add(Operand(esp), Immediate(kPointerSize));
+      break;
+  }
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+  Expression* fun = expr->expression();
+  ZoneList<Expression*>* args = expr->arguments();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL && !var->is_this() && var->is_global());
+  ASSERT(!var->is_possibly_eval());
+
+  __ push(Immediate(var->name()));
+  // Push global object (receiver).
+  __ push(CodeGenerator::GlobalObject());
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(args->at(i)->location().is_temporary());
+  }
+  // Record source position for debugger
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+                                                         NOT_IN_LOOP);
+  __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+  // Restore context register.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  // Discard the function left on TOS.
+  DropAndMove(expr->location(), eax);
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* node) {
+  Comment cmnt(masm_, "[ CallNew");
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments.
+  // Push function on the stack.
+  Visit(node->expression());
+  ASSERT(node->expression()->location().is_temporary());
+
+  // Push global object (receiver).
+  __ push(CodeGenerator::GlobalObject());
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = node->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(args->at(i)->location().is_temporary());
+    // If location is temporary, it is already on the stack,
+    // so nothing to do here.
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetSourcePosition(node->position());
+
+  // Load function, arg_count into edi and eax.
+  __ Set(eax, Immediate(arg_count));
+  // Function is in esp[arg_count + 1].
+  __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
+
+  Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+  __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+  // Replace function on TOS with result in eax, or pop it.
+  DropAndMove(node->location(), eax);
+}
+
+
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+  Runtime::Function* function = expr->function();
+
+  ASSERT(function != NULL);
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(args->at(i)->location().is_temporary());
+  }
+
+  __ CallRuntime(function, arg_count);
+  Move(expr->location(), eax);
+}
+
+
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+  // Compile a short-circuited boolean or operation in a non-test
+  // context.
+  ASSERT(expr->op() == Token::OR);
+  // Compile (e0 || e1) as if it were
+  // (let (temp = e0) temp ? temp : e1).
+
+  Label eval_right, done;
+  Location destination = expr->location();
+  Expression* left = expr->left();
+  Expression* right = expr->right();
+
+  // Use the shared ToBoolean stub to find the boolean value of the
+  // left-hand subexpression.  Load the value into eax to perform some
+  // inlined checks assumed by the stub.
+
+  // Compile the left-hand value into eax.  Put it on the stack if we may
+  // need it as the value of the whole expression.
+  if (left->AsLiteral() != NULL) {
+    __ mov(eax, left->AsLiteral()->handle());
+    if (destination.is_temporary()) __ push(eax);
+  } else {
+    Visit(left);
+    ASSERT(left->location().is_temporary());
+    switch (destination.type()) {
+      case Location::NOWHERE:
+        // Pop the left-hand value into eax because we will not need it as the
+        // final result.
+        __ pop(eax);
+        break;
+      case Location::TEMP:
+        // Copy the left-hand value into eax because we may need it as the
+        // final result.
+        __ mov(eax, Operand(esp, 0));
+        break;
+    }
+  }
+  // The left-hand value is in eax.  It is also on the stack iff the
+  // destination location is temporary.
+
+  // Perform fast checks assumed by the stub.
+  __ cmp(eax, Factory::undefined_value());  // The undefined value is false.
+  __ j(equal, &eval_right);
+  __ cmp(eax, Factory::true_value());  // True is true.
+  __ j(equal, &done);
+  __ cmp(eax, Factory::false_value());  // False is false.
+  __ j(equal, &eval_right);
+  ASSERT(kSmiTag == 0);
+  __ test(eax, Operand(eax));  // The smi zero is false.
+  __ j(zero, &eval_right);
+  __ test(eax, Immediate(kSmiTagMask));  // All other smis are true.
+  __ j(zero, &done);
+
+  // Call the stub for all other cases.
+  __ push(eax);
+  ToBooleanStub stub;
+  __ CallStub(&stub);
+  __ test(eax, Operand(eax));  // The stub returns nonzero for true.
+  __ j(not_zero, &done);
+
+  __ bind(&eval_right);
+  // Discard the left-hand value if present on the stack.
+  if (destination.is_temporary()) {
+    __ add(Operand(esp), Immediate(kPointerSize));
+  }
+  // Save or discard the right-hand value as needed.
+  if (right->AsLiteral() != NULL) {
+    Move(destination, right->AsLiteral());
+  } else {
+    Visit(right);
+    Move(destination, right->location());
+  }
+
+  __ bind(&done);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index dea439f..5c900be 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -56,19 +56,14 @@
   state->fp = fp;
   state->sp = sp;
   state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
-  // Determine frame type.
-  if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
-    return EXIT_DEBUG;
-  } else {
-    return EXIT;
-  }
+  return EXIT;
 }
 
 
 void ExitFrame::Iterate(ObjectVisitor* v) const {
-  // Exit frames on IA-32 do not contain any pointers. The arguments
-  // are traversed as part of the expression stack of the calling
-  // frame.
+  v->VisitPointer(&code_slot());
+  // The arguments are traversed as part of the expression stack of
+  // the calling frame.
 }
 
 
diff --git a/src/ia32/frames-ia32.h b/src/ia32/frames-ia32.h
index 3a7c86b..c3fe6c7 100644
--- a/src/ia32/frames-ia32.h
+++ b/src/ia32/frames-ia32.h
@@ -76,7 +76,7 @@
 
 class ExitFrameConstants : public AllStatic {
  public:
-  static const int kDebugMarkOffset = -2 * kPointerSize;
+  static const int kCodeOffset      = -2 * kPointerSize;
   static const int kSPOffset        = -1 * kPointerSize;
 
   static const int kCallerFPOffset =  0 * kPointerSize;
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index af05680..3aa3c34 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -301,7 +301,7 @@
   // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
-  KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
 
   __ bind(&check_string);
   // The key is not a smi.
@@ -342,6 +342,166 @@
 }
 
 
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+                                        ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label slow, failed_allocation;
+
+  // Load name and receiver.
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+
+  // Check that the key is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow, not_taken);
+
+  // Get the map of the receiver.
+  __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.
+  __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow, not_taken);
+
+  // Get the instance type from the map of the receiver.
+  __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
+  // Check that the object is a JS object.
+  __ cmp(edx, JS_OBJECT_TYPE);
+  __ j(not_equal, &slow, not_taken);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // eax: index (as a smi)
+  // ecx: JSObject
+  __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+  Handle<Map> map(Heap::MapForExternalArrayType(array_type));
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(map));
+  __ j(not_equal, &slow, not_taken);
+
+  // Check that the index is in range.
+  __ sar(eax, kSmiTagSize);  // Untag the index.
+  __ cmp(eax, FieldOperand(ecx, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ j(above_equal, &slow);
+
+  // eax: untagged index
+  // ecx: elements array
+  __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+  // ecx: base pointer of external storage
+  switch (array_type) {
+    case kExternalByteArray:
+      __ movsx_b(eax, Operand(ecx, eax, times_1, 0));
+      break;
+    case kExternalUnsignedByteArray:
+      __ mov_b(eax, Operand(ecx, eax, times_1, 0));
+      break;
+    case kExternalShortArray:
+      __ movsx_w(eax, Operand(ecx, eax, times_2, 0));
+      break;
+    case kExternalUnsignedShortArray:
+      __ mov_w(eax, Operand(ecx, eax, times_2, 0));
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ mov(eax, Operand(ecx, eax, times_4, 0));
+      break;
+    case kExternalFloatArray:
+      __ fld_s(Operand(ecx, eax, times_4, 0));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // eax: value
+  // For floating-point array type:
+  // FP(0): value
+
+  if (array_type == kExternalIntArray ||
+      array_type == kExternalUnsignedIntArray) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    if (array_type == kExternalIntArray) {
+      // See Smi::IsValid for why this works.
+      __ mov(ebx, eax);
+      __ add(Operand(ebx), Immediate(0x40000000));
+      __ cmp(ebx, 0x80000000);
+      __ j(above_equal, &box_int);
+    } else {
+      ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+      // The test is different for unsigned int values. Since we need
+      // the Smi-encoded result to be treated as unsigned, we can't
+      // handle either of the top two bits being set in the value.
+      __ test(eax, Immediate(0xC0000000));
+      __ j(not_zero, &box_int);
+    }
+
+    __ shl(eax, kSmiTagSize);
+    __ ret(0);
+
+    __ bind(&box_int);
+
+    // Allocate a HeapNumber for the int and perform int-to-double
+    // conversion.
+    if (array_type == kExternalIntArray) {
+      __ push(eax);
+      __ fild_s(Operand(esp, 0));
+      __ pop(eax);
+    } else {
+      ASSERT(array_type == kExternalUnsignedIntArray);
+      // Need to zero-extend the value.
+      // There's no fild variant for unsigned values, so zero-extend
+      // to a 64-bit int manually.
+      __ push(Immediate(0));
+      __ push(eax);
+      __ fild_d(Operand(esp, 0));
+      __ pop(eax);
+      __ pop(eax);
+    }
+    // FP(0): value
+    __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+    // Set the value.
+    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+    __ ret(0);
+  } else if (array_type == kExternalFloatArray) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+    // Set the value.
+    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+    __ ret(0);
+  } else {
+    __ shl(eax, kSmiTagSize);
+    __ ret(0);
+  }
+
+  // If we fail allocation of the HeapNumber, we still have a value on
+  // top of the FPU stack. Remove it.
+  __ bind(&failed_allocation);
+  __ ffree();
+  __ fincstp();
+  // Fall through to slow case.
+
+  // Slow case: Load name and receiver from stack and jump to runtime.
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+}
+
+
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
@@ -395,15 +555,9 @@
   // ebx: index (as a smi)
   __ j(below, &fast, taken);
 
-  // Slow case: Push extra copies of the arguments (3).
+  // Slow case: call runtime.
   __ bind(&slow);
-  __ pop(ecx);
-  __ push(Operand(esp, 1 * kPointerSize));
-  __ push(Operand(esp, 1 * kPointerSize));
-  __ push(eax);
-  __ push(ecx);
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+  Generate(masm, ExternalReference(Runtime::kSetProperty));
 
   // Check whether the elements is a pixel array.
   // eax: value
@@ -485,6 +639,201 @@
 }
 
 
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+                                         ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label slow, check_heap_number;
+
+  // Get the receiver from the stack.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
+  // Check that the object isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &slow);
+  // Get the map from the receiver.
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow);
+  // Get the key from the stack.
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
+  // Check that the key is a smi.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow);
+  // Get the instance type from the map of the receiver.
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  // Check that the object is a JS object.
+  __ cmp(ecx, JS_OBJECT_TYPE);
+  __ j(not_equal, &slow);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // eax: value
+  // edx: JSObject
+  // ebx: index (as a smi)
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  Handle<Map> map(Heap::MapForExternalArrayType(array_type));
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(map));
+  __ j(not_equal, &slow);
+
+  // Check that the index is in range.
+  __ sar(ebx, kSmiTagSize);  // Untag the index.
+  __ cmp(ebx, FieldOperand(ecx, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ j(above_equal, &slow);
+
+  // Handle both smis and HeapNumbers in the fast path. Go to the
+  // runtime for all other kinds of values.
+  // eax: value
+  // ecx: elements array
+  // ebx: untagged index
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_equal, &check_heap_number);
+  // smi case
+  __ mov(edx, eax);  // Save the value.
+  __ sar(eax, kSmiTagSize);  // Untag the value.
+  __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+  // ecx: base pointer of external storage
+  switch (array_type) {
+    case kExternalByteArray:
+    case kExternalUnsignedByteArray:
+      __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+      break;
+    case kExternalShortArray:
+    case kExternalUnsignedShortArray:
+      __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ mov(Operand(ecx, ebx, times_4, 0), eax);
+      break;
+    case kExternalFloatArray:
+      // Need to perform int-to-float conversion.
+      __ push(eax);
+      __ fild_s(Operand(esp, 0));
+      __ pop(eax);
+      __ fstp_s(Operand(ecx, ebx, times_4, 0));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  __ mov(eax, edx);  // Return the original value.
+  __ ret(0);
+
+  __ bind(&check_heap_number);
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+         Immediate(Factory::heap_number_map()));
+  __ j(not_equal, &slow);
+
+  // The WebGL specification leaves the behavior of storing NaN and
+  // +/-Infinity into integer arrays basically undefined. For more
+  // reproducible behavior, convert these to zero.
+  __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+  __ mov(edx, eax);  // Save the value.
+  __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+  // ebx: untagged index
+  // ecx: base pointer of external storage
+  // top of FPU stack: value
+  if (array_type == kExternalFloatArray) {
+    __ fstp_s(Operand(ecx, ebx, times_4, 0));
+    __ mov(eax, edx);  // Return the original value.
+    __ ret(0);
+  } else {
+    // Need to perform float-to-int conversion.
+    // Test the top of the FP stack for NaN.
+    Label is_nan;
+    __ fucomi(0);
+    __ j(parity_even, &is_nan);
+
+    if (array_type != kExternalUnsignedIntArray) {
+      __ push(eax);  // Make room on stack
+      __ fistp_s(Operand(esp, 0));
+      __ pop(eax);
+    } else {
+      // fistp stores values as signed integers.
+      // To represent the entire range, we need to store as a 64-bit
+      // int and discard the high 32 bits.
+      __ push(eax);  // Make room on stack
+      __ push(eax);  // Make room on stack
+      __ fistp_d(Operand(esp, 0));
+      __ pop(eax);
+      __ mov(Operand(esp, 0), eax);
+      __ pop(eax);
+    }
+    // eax: untagged integer value
+    switch (array_type) {
+      case kExternalByteArray:
+      case kExternalUnsignedByteArray:
+        __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+        break;
+      case kExternalShortArray:
+      case kExternalUnsignedShortArray:
+        __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+        break;
+      case kExternalIntArray:
+      case kExternalUnsignedIntArray: {
+        // We also need to explicitly check for +/-Infinity. These are
+        // converted to MIN_INT, but we need to be careful not to
+        // confuse with legal uses of MIN_INT.
+        Label not_infinity;
+        // This test would apparently detect both NaN and Infinity,
+        // but we've already checked for NaN using the FPU hardware
+        // above.
+        __ mov_w(edi, FieldOperand(edx, HeapNumber::kValueOffset + 6));
+        __ and_(edi, 0x7FF0);
+        __ cmp(edi, 0x7FF0);
+        __ j(not_equal, &not_infinity);
+        __ mov(eax, 0);
+        __ bind(&not_infinity);
+        __ mov(Operand(ecx, ebx, times_4, 0), eax);
+        break;
+      }
+      default:
+        UNREACHABLE();
+        break;
+    }
+    __ mov(eax, edx);  // Return the original value.
+    __ ret(0);
+
+    __ bind(&is_nan);
+    __ ffree();
+    __ fincstp();
+    switch (array_type) {
+      case kExternalByteArray:
+      case kExternalUnsignedByteArray:
+        __ mov_b(Operand(ecx, ebx, times_1, 0), 0);
+        break;
+      case kExternalShortArray:
+      case kExternalUnsignedShortArray:
+        __ mov(eax, 0);
+        __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+        break;
+      case kExternalIntArray:
+      case kExternalUnsignedIntArray:
+        __ mov(Operand(ecx, ebx, times_4, 0), Immediate(0));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    __ mov(eax, edx);  // Return the original value.
+    __ ret(0);
+  }
+
+  // Slow case: call runtime.
+  __ bind(&slow);
+  Generate(masm, ExternalReference(Runtime::kSetProperty));
+}
+
+
 // Defined in ic.cc.
 Object* CallIC_Miss(Arguments args);
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index a3b2149..34d4fd5 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -319,11 +319,17 @@
 
 
 void MacroAssembler::FCmp() {
-  fucompp();
-  push(eax);
-  fnstsw_ax();
-  sahf();
-  pop(eax);
+  if (CpuFeatures::IsSupported(CpuFeatures::CMOV)) {
+    fucomip();
+    ffree(0);
+    fincstp();
+  } else {
+    fucompp();
+    push(eax);
+    fnstsw_ax();
+    sahf();
+    pop(eax);
+  }
 }
 
 
@@ -349,10 +355,7 @@
   leave();
 }
 
-
-void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
-  ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) {
   // Setup the frame structure on the stack.
   ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
   ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
@@ -363,23 +366,24 @@
   // Reserve room for entry stack pointer and push the debug marker.
   ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
   push(Immediate(0));  // saved entry sp, patched before call
-  push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+  if (mode == ExitFrame::MODE_DEBUG) {
+    push(Immediate(0));
+  } else {
+    push(Immediate(CodeObject()));
+  }
 
   // Save the frame pointer and the context in top.
   ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
   ExternalReference context_address(Top::k_context_address);
   mov(Operand::StaticVariable(c_entry_fp_address), ebp);
   mov(Operand::StaticVariable(context_address), esi);
+}
 
-  // Setup argc and argv in callee-saved registers.
-  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
-  mov(edi, Operand(eax));
-  lea(esi, Operand(ebp, eax, times_4, offset));
-
+void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Save the state of all registers to the stack from the memory
   // location. This is needed to allow nested break points.
-  if (type == StackFrame::EXIT_DEBUG) {
+  if (mode == ExitFrame::MODE_DEBUG) {
     // TODO(1243899): This should be symmetric to
     // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
     // correct here, but computed for the other call. Very error
@@ -390,8 +394,8 @@
   }
 #endif
 
-  // Reserve space for two arguments: argc and argv.
-  sub(Operand(esp), Immediate(2 * kPointerSize));
+  // Reserve space for arguments.
+  sub(Operand(esp), Immediate(argc * kPointerSize));
 
   // Get the required frame alignment for the OS.
   static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -405,15 +409,39 @@
 }
 
 
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) {
+  EnterExitFramePrologue(mode);
+
+  // Setup argc and argv in callee-saved registers.
+  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+  mov(edi, Operand(eax));
+  lea(esi, Operand(ebp, eax, times_4, offset));
+
+  EnterExitFrameEpilogue(mode, 2);
+}
+
+
+void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode,
+                                       int stack_space,
+                                       int argc) {
+  EnterExitFramePrologue(mode);
+
+  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
+  lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset));
+
+  EnterExitFrameEpilogue(mode, argc);
+}
+
+
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Restore the memory copy of the registers by digging them out from
   // the stack. This is needed to allow nested break points.
-  if (type == StackFrame::EXIT_DEBUG) {
+  if (mode == ExitFrame::MODE_DEBUG) {
     // It's okay to clobber register ebx below because we don't need
     // the function pointer after this.
     const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
-    int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+    int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
     lea(ebx, Operand(ebp, kOffset));
     CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
   }
@@ -767,6 +795,24 @@
 }
 
 
+void MacroAssembler::AllocateHeapNumber(Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required) {
+  // Allocate heap number in new space.
+  AllocateInNewSpace(HeapNumber::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map.
+  mov(FieldOperand(result, HeapObject::kMapOffset),
+      Immediate(Factory::heap_number_map()));
+}
+
+
 void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
                                       Register result,
                                       Register op,
@@ -907,6 +953,48 @@
 }
 
 
+void MacroAssembler::PushHandleScope(Register scratch) {
+  // Push the number of extensions, smi-tagged so the gc will ignore it.
+  ExternalReference extensions_address =
+      ExternalReference::handle_scope_extensions_address();
+  mov(scratch, Operand::StaticVariable(extensions_address));
+  ASSERT_EQ(0, kSmiTag);
+  shl(scratch, kSmiTagSize);
+  push(scratch);
+  mov(Operand::StaticVariable(extensions_address), Immediate(0));
+  // Push next and limit pointers which will be wordsize aligned and
+  // hence automatically smi tagged.
+  ExternalReference next_address =
+      ExternalReference::handle_scope_next_address();
+  push(Operand::StaticVariable(next_address));
+  ExternalReference limit_address =
+      ExternalReference::handle_scope_limit_address();
+  push(Operand::StaticVariable(limit_address));
+}
+
+
+void MacroAssembler::PopHandleScope(Register scratch) {
+  ExternalReference extensions_address =
+        ExternalReference::handle_scope_extensions_address();
+  Label write_back;
+  mov(scratch, Operand::StaticVariable(extensions_address));
+  cmp(Operand(scratch), Immediate(0));
+  j(equal, &write_back);
+  CallRuntime(Runtime::kDeleteHandleScopeExtensions, 0);
+
+  bind(&write_back);
+  ExternalReference limit_address =
+        ExternalReference::handle_scope_limit_address();
+  pop(Operand::StaticVariable(limit_address));
+  ExternalReference next_address =
+        ExternalReference::handle_scope_next_address();
+  pop(Operand::StaticVariable(next_address));
+  pop(scratch);
+  shr(scratch, kSmiTagSize);
+  mov(Operand::StaticVariable(extensions_address), scratch);
+}
+
+
 void MacroAssembler::JumpToRuntime(const ExternalReference& ext) {
   // Set the entry point and jump to the C entry runtime stub.
   mov(ebx, Immediate(ext));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index ed72c96..18d221c 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -77,16 +77,18 @@
   void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
-  // Enter specific kind of exit frame; either EXIT or
-  // EXIT_DEBUG. Expects the number of arguments in register eax and
+  // Enter specific kind of exit frame; either in normal or debug mode.
+  // Expects the number of arguments in register eax and
   // sets up the number of arguments in register edi and the pointer
   // to the first argument in register esi.
-  void EnterExitFrame(StackFrame::Type type);
+  void EnterExitFrame(ExitFrame::Mode mode);
+
+  void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc);
 
   // Leave the current exit frame. Expects the return value in
   // register eax:edx (untouched) and the pointer to the first
   // argument in register esi.
-  void LeaveExitFrame(StackFrame::Type type);
+  void LeaveExitFrame(ExitFrame::Mode mode);
 
 
   // ---------------------------------------------------------------------------
@@ -206,6 +208,15 @@
   // un-done.
   void UndoAllocationInNewSpace(Register object);
 
+  // Allocate a heap number in new space with undefined value. The
+  // register scratch2 can be passed as no_reg; the others must be
+  // valid registers. Returns tagged pointer in result register, or
+  // jumps to gc_required if new space is full.
+  void AllocateHeapNumber(Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
@@ -260,6 +271,9 @@
                        int num_arguments,
                        int result_size);
 
+  void PushHandleScope(Register scratch);
+  void PopHandleScope(Register scratch);
+
   // Jump to a runtime routine.
   void JumpToRuntime(const ExternalReference& ext);
 
@@ -337,6 +351,9 @@
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
 
+  void EnterExitFramePrologue(ExitFrame::Mode mode);
+  void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc);
+
   // Allocation support helpers.
   void LoadAllocationTopHelper(Register result,
                                Register result_end,
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 7af4e89..76d36a9 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1093,17 +1093,15 @@
 
 
 void RegExpMacroAssemblerIA32::CheckStackLimit() {
-  if (FLAG_check_stack) {
-    Label no_stack_overflow;
-    ExternalReference stack_limit =
-        ExternalReference::address_of_regexp_stack_limit();
-    __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
-    __ j(above, &no_stack_overflow);
+  Label no_stack_overflow;
+  ExternalReference stack_limit =
+      ExternalReference::address_of_regexp_stack_limit();
+  __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit));
+  __ j(above, &no_stack_overflow);
 
-    SafeCall(&stack_overflow_label_);
+  SafeCall(&stack_overflow_label_);
 
-    __ bind(&no_stack_overflow);
-  }
+  __ bind(&no_stack_overflow);
 }
 
 
@@ -1163,10 +1161,6 @@
 }
 
 
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
-  __ int3();  // Unused on ia32.
-}
-
 #undef __
 
 #endif  // V8_NATIVE_REGEXP
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index ca4e142..3e5fc04 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -776,20 +776,39 @@
       CheckPrototypes(object, receiver, holder,
                       scratch1, scratch2, name, miss);
 
-  // Push the arguments on the JS stack of the caller.
-  __ pop(scratch2);  // remove return address
+  Handle<AccessorInfo> callback_handle(callback);
+
+  Register other = reg.is(scratch1) ? scratch2 : scratch1;
+  __ EnterInternalFrame();
+  __ PushHandleScope(other);
+  // Push the stack address where the list of arguments ends
+  __ mov(other, esp);
+  __ sub(Operand(other), Immediate(2 * kPointerSize));
+  __ push(other);
   __ push(receiver);  // receiver
   __ push(reg);  // holder
-  __ mov(reg, Immediate(Handle<AccessorInfo>(callback)));  // callback data
-  __ push(reg);
-  __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+  __ mov(other, Immediate(callback_handle));
+  __ push(other);
+  __ push(FieldOperand(other, AccessorInfo::kDataOffset));  // data
   __ push(name_reg);  // name
-  __ push(scratch2);  // restore return address
+  // Save a pointer to where we pushed the arguments pointer.
+  // This will be passed as the const Arguments& to the C++ callback.
+  __ mov(eax, esp);
+  __ add(Operand(eax), Immediate(5 * kPointerSize));
+  __ mov(ebx, esp);
 
-  // Do tail-call to the runtime system.
-  ExternalReference load_callback_property =
-      ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
-  __ TailCallRuntime(load_callback_property, 5, 1);
+  // Do call through the api.
+  ASSERT_EQ(6, ApiGetterEntryStub::kStackSpace);
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+  ApiGetterEntryStub stub(callback_handle, &fun);
+  __ CallStub(&stub);
+
+  Register tmp = other.is(eax) ? reg : other;
+  __ PopHandleScope(tmp);
+  __ LeaveInternalFrame();
+
+  __ ret(0);
 }
 
 
diff --git a/src/ic.cc b/src/ic.cc
index 264b99c..c12dba7 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -265,6 +265,55 @@
 }
 
 
+Code* KeyedLoadIC::external_array_stub(JSObject::ElementsKind elements_kind) {
+  switch (elements_kind) {
+    case JSObject::EXTERNAL_BYTE_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalByteArray);
+    case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedByteArray);
+    case JSObject::EXTERNAL_SHORT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalShortArray);
+    case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      return Builtins::builtin(
+          Builtins::KeyedLoadIC_ExternalUnsignedShortArray);
+    case JSObject::EXTERNAL_INT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalIntArray);
+    case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedIntArray);
+    case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalFloatArray);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+Code* KeyedStoreIC::external_array_stub(JSObject::ElementsKind elements_kind) {
+  switch (elements_kind) {
+    case JSObject::EXTERNAL_BYTE_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedStoreIC_ExternalByteArray);
+    case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      return Builtins::builtin(
+          Builtins::KeyedStoreIC_ExternalUnsignedByteArray);
+    case JSObject::EXTERNAL_SHORT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedStoreIC_ExternalShortArray);
+    case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      return Builtins::builtin(
+          Builtins::KeyedStoreIC_ExternalUnsignedShortArray);
+    case JSObject::EXTERNAL_INT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedStoreIC_ExternalIntArray);
+    case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedStoreIC_ExternalUnsignedIntArray);
+    case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedStoreIC_ExternalFloatArray);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
 static bool HasInterceptorGetter(JSObject* object) {
   return !object->GetNamedInterceptor()->getter()->IsUndefined();
 }
@@ -823,7 +872,14 @@
   bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
 
   if (use_ic) {
-    set_target(generic_stub());
+    Code* stub = generic_stub();
+    if (object->IsJSObject()) {
+      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+      if (receiver->HasExternalArrayElements()) {
+        stub = external_array_stub(receiver->GetElementsKind());
+      }
+    }
+    set_target(stub);
     // For JSObjects that are not value wrappers and that do not have
     // indexed interceptors, we initialize the inlined fast case (if
     // present) by patching the inlined map check.
@@ -1110,7 +1166,16 @@
   bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
   ASSERT(!(use_ic && object->IsJSGlobalProxy()));
 
-  if (use_ic) set_target(generic_stub());
+  if (use_ic) {
+    Code* stub = generic_stub();
+    if (object->IsJSObject()) {
+      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+      if (receiver->HasExternalArrayElements()) {
+        stub = external_array_stub(receiver->GetElementsKind());
+      }
+    }
+    set_target(stub);
+  }
 
   // Set the property.
   return Runtime::SetObjectProperty(object, key, value, NONE);
diff --git a/src/ic.h b/src/ic.h
index fcf1ec0..8709088 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -269,6 +269,13 @@
   static void GeneratePreMonomorphic(MacroAssembler* masm);
   static void GenerateGeneric(MacroAssembler* masm);
 
+  // Generators for external array types. See objects.h.
+  // These are similar to the generic IC; they optimize the case of
+  // operating upon external array types but fall back to the runtime
+  // for all other types.
+  static void GenerateExternalArray(MacroAssembler* masm,
+                                    ExternalArrayType array_type);
+
   // Clear the use of the inlined version.
   static void ClearInlinedVersion(Address address);
 
@@ -294,6 +301,7 @@
   static Code* pre_monomorphic_stub() {
     return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic);
   }
+  static Code* external_array_stub(JSObject::ElementsKind elements_kind);
 
   static void Clear(Address address, Code* target);
 
@@ -358,6 +366,13 @@
   static void GenerateGeneric(MacroAssembler* masm);
   static void GenerateExtendStorage(MacroAssembler* masm);
 
+  // Generators for external array types. See objects.h.
+  // These are similar to the generic IC; they optimize the case of
+  // operating upon external array types but fall back to the runtime
+  // for all other types.
+  static void GenerateExternalArray(MacroAssembler* masm,
+                                    ExternalArrayType array_type);
+
   // Clear the inlined version so the IC is always hit.
   static void ClearInlinedVersion(Address address);
 
@@ -384,6 +399,7 @@
   static Code* generic_stub() {
     return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
   }
+  static Code* external_array_stub(JSObject::ElementsKind elements_kind);
 
   static void Clear(Address address, Code* target);
 
diff --git a/src/list.h b/src/list.h
index 25211d9..19dc733 100644
--- a/src/list.h
+++ b/src/list.h
@@ -48,6 +48,7 @@
 class List {
  public:
 
+  List() { Initialize(0); }
   INLINE(explicit List(int capacity)) { Initialize(capacity); }
   INLINE(~List()) { DeleteData(data_); }
 
diff --git a/src/location.h b/src/location.h
index 59cd88a..c4a77cb 100644
--- a/src/location.h
+++ b/src/location.h
@@ -35,16 +35,17 @@
 
 class Location BASE_EMBEDDED {
  public:
+  enum Type { NOWHERE, TEMP };
+
   static Location Temporary() { return Location(TEMP); }
   static Location Nowhere() { return Location(NOWHERE); }
-  static Location Constant() { return Location(CONSTANT); }
 
   bool is_temporary() { return type_ == TEMP; }
   bool is_nowhere() { return type_ == NOWHERE; }
 
- private:
-  enum Type { TEMP, NOWHERE, CONSTANT };
+  Type type() { return type_; }
 
+ private:
   explicit Location(Type type) : type_(type) {}
 
   Type type_;
diff --git a/src/log.cc b/src/log.cc
index d1d9a31..2e7796a 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -125,6 +125,9 @@
   bool overflow_;  // Tell whether a buffer overflow has occurred.
   Semaphore* buffer_semaphore_;  // Sempahore used for buffer synchronization.
 
+  // Tells whether profiler is engaged, that is, processing thread is stated.
+  bool engaged_;
+
   // Tells whether worker thread should continue running.
   bool running_;
 
@@ -243,17 +246,25 @@
 //
 // Profiler implementation.
 //
-Profiler::Profiler() {
-  buffer_semaphore_ = OS::CreateSemaphore(0);
-  head_ = 0;
-  tail_ = 0;
-  overflow_ = false;
-  running_ = false;
+Profiler::Profiler()
+    : head_(0),
+      tail_(0),
+      overflow_(false),
+      buffer_semaphore_(OS::CreateSemaphore(0)),
+      engaged_(false),
+      running_(false) {
 }
 
 
 void Profiler::Engage() {
-  OS::LogSharedLibraryAddresses();
+  if (engaged_) return;
+  engaged_ = true;
+
+  // TODO(mnaganov): This is actually "Chromium" mode. Flags need to be revised.
+  // http://code.google.com/p/v8/issues/detail?id=487
+  if (!FLAG_prof_lazy) {
+    OS::LogSharedLibraryAddresses();
+  }
 
   // Start thread processing the profiler buffer.
   running_ = true;
@@ -268,6 +279,8 @@
 
 
 void Profiler::Disengage() {
+  if (!engaged_) return;
+
   // Stop receiving ticks.
   Logger::ticker_->ClearProfiler();
 
@@ -1053,6 +1066,7 @@
   }
   if (modules_to_enable & PROFILER_MODULE_CPU) {
     if (FLAG_prof_lazy) {
+      profiler_->Engage();
       LOG(UncheckedStringEvent("profiler", "resume"));
       FLAG_log_code = true;
       LogCompiledFunctions();
@@ -1245,7 +1259,9 @@
     } else {
       is_logging_ = true;
     }
-    profiler_->Engage();
+    if (!FLAG_prof_lazy) {
+      profiler_->Engage();
+    }
   }
 
   LogMessageBuilder::set_write_failure_handler(StopLoggingAndProfiling);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index afa51f6..10ad294 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -117,6 +117,27 @@
     case PIXEL_ARRAY_TYPE:
       PixelArray::cast(this)->PixelArrayPrint();
       break;
+    case EXTERNAL_BYTE_ARRAY_TYPE:
+      ExternalByteArray::cast(this)->ExternalByteArrayPrint();
+      break;
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+      ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayPrint();
+      break;
+    case EXTERNAL_SHORT_ARRAY_TYPE:
+      ExternalShortArray::cast(this)->ExternalShortArrayPrint();
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+      ExternalUnsignedShortArray::cast(this)->ExternalUnsignedShortArrayPrint();
+      break;
+    case EXTERNAL_INT_ARRAY_TYPE:
+      ExternalIntArray::cast(this)->ExternalIntArrayPrint();
+      break;
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+      ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayPrint();
+      break;
+    case EXTERNAL_FLOAT_ARRAY_TYPE:
+      ExternalFloatArray::cast(this)->ExternalFloatArrayPrint();
+      break;
     case FILLER_TYPE:
       PrintF("filler");
       break;
@@ -196,6 +217,28 @@
     case PIXEL_ARRAY_TYPE:
       PixelArray::cast(this)->PixelArrayVerify();
       break;
+    case EXTERNAL_BYTE_ARRAY_TYPE:
+      ExternalByteArray::cast(this)->ExternalByteArrayVerify();
+      break;
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+      ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayVerify();
+      break;
+    case EXTERNAL_SHORT_ARRAY_TYPE:
+      ExternalShortArray::cast(this)->ExternalShortArrayVerify();
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+      ExternalUnsignedShortArray::cast(this)->
+          ExternalUnsignedShortArrayVerify();
+      break;
+    case EXTERNAL_INT_ARRAY_TYPE:
+      ExternalIntArray::cast(this)->ExternalIntArrayVerify();
+      break;
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+      ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayVerify();
+      break;
+    case EXTERNAL_FLOAT_ARRAY_TYPE:
+      ExternalFloatArray::cast(this)->ExternalFloatArrayVerify();
+      break;
     case CODE_TYPE:
       Code::cast(this)->CodeVerify();
       break;
@@ -274,6 +317,41 @@
 }
 
 
+void ExternalByteArray::ExternalByteArrayPrint() {
+  PrintF("external byte array");
+}
+
+
+void ExternalUnsignedByteArray::ExternalUnsignedByteArrayPrint() {
+  PrintF("external unsigned byte array");
+}
+
+
+void ExternalShortArray::ExternalShortArrayPrint() {
+  PrintF("external short array");
+}
+
+
+void ExternalUnsignedShortArray::ExternalUnsignedShortArrayPrint() {
+  PrintF("external unsigned short array");
+}
+
+
+void ExternalIntArray::ExternalIntArrayPrint() {
+  PrintF("external int array");
+}
+
+
+void ExternalUnsignedIntArray::ExternalUnsignedIntArrayPrint() {
+  PrintF("external unsigned int array");
+}
+
+
+void ExternalFloatArray::ExternalFloatArrayPrint() {
+  PrintF("external float array");
+}
+
+
 void ByteArray::ByteArrayVerify() {
   ASSERT(IsByteArray());
 }
@@ -284,6 +362,41 @@
 }
 
 
+void ExternalByteArray::ExternalByteArrayVerify() {
+  ASSERT(IsExternalByteArray());
+}
+
+
+void ExternalUnsignedByteArray::ExternalUnsignedByteArrayVerify() {
+  ASSERT(IsExternalUnsignedByteArray());
+}
+
+
+void ExternalShortArray::ExternalShortArrayVerify() {
+  ASSERT(IsExternalShortArray());
+}
+
+
+void ExternalUnsignedShortArray::ExternalUnsignedShortArrayVerify() {
+  ASSERT(IsExternalUnsignedShortArray());
+}
+
+
+void ExternalIntArray::ExternalIntArrayVerify() {
+  ASSERT(IsExternalIntArray());
+}
+
+
+void ExternalUnsignedIntArray::ExternalUnsignedIntArrayVerify() {
+  ASSERT(IsExternalUnsignedIntArray());
+}
+
+
+void ExternalFloatArray::ExternalFloatArrayVerify() {
+  ASSERT(IsExternalFloatArray());
+}
+
+
 void JSObject::PrintProperties() {
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
@@ -345,6 +458,58 @@
       }
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS: {
+      ExternalByteArray* p = ExternalByteArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+      ExternalUnsignedByteArray* p =
+          ExternalUnsignedByteArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_SHORT_ELEMENTS: {
+      ExternalShortArray* p = ExternalShortArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+      ExternalUnsignedShortArray* p =
+          ExternalUnsignedShortArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_INT_ELEMENTS: {
+      ExternalIntArray* p = ExternalIntArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+      ExternalUnsignedIntArray* p =
+          ExternalUnsignedIntArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalFloatArray* p = ExternalFloatArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %f\n", i, p->get(i));
+      }
+      break;
+    }
     case DICTIONARY_ELEMENTS:
       elements()->Print();
       break;
@@ -433,6 +598,16 @@
     case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
     case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
     case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY";
+    case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+      return "EXTERNAL_UNSIGNED_BYTE_ARRAY";
+    case EXTERNAL_SHORT_ARRAY_TYPE: return "EXTERNAL_SHORT_ARRAY";
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+      return "EXTERNAL_UNSIGNED_SHORT_ARRAY";
+    case EXTERNAL_INT_ARRAY_TYPE: return "EXTERNAL_INT_ARRAY";
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+      return "EXTERNAL_UNSIGNED_INT_ARRAY";
+    case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
     case FILLER_TYPE: return "FILLER";
     case JS_OBJECT_TYPE: return "JS_OBJECT";
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
@@ -804,6 +979,7 @@
   VerifyPointer(name());
   VerifyPointer(data());
   VerifyPointer(flag());
+  VerifyPointer(load_stub_cache());
 }
 
 void AccessorInfo::AccessorInfoPrint() {
diff --git a/src/objects-inl.h b/src/objects-inl.h
index cb7b7c8..5907a86 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -360,6 +360,65 @@
 }
 
 
+bool Object::IsExternalArray() {
+  if (!Object::IsHeapObject())
+    return false;
+  InstanceType instance_type =
+      HeapObject::cast(this)->map()->instance_type();
+  return (instance_type >= EXTERNAL_BYTE_ARRAY_TYPE &&
+          instance_type <= EXTERNAL_FLOAT_ARRAY_TYPE);
+}
+
+
+bool Object::IsExternalByteArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_BYTE_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalUnsignedByteArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalShortArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_SHORT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalUnsignedShortArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalIntArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_INT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalUnsignedIntArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_UNSIGNED_INT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalFloatArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_FLOAT_ARRAY_TYPE;
+}
+
+
 bool Object::IsFailure() {
   return HAS_FAILURE_TAG(this);
 }
@@ -886,6 +945,25 @@
 }
 
 
+bool MapWord::IsSerializationAddress() {
+  return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
+}
+
+
+MapWord MapWord::FromSerializationAddress(int raw) {
+  // When the map word is being used as a serialization address we Smi-encode
+  // the serialization address (which is always a smallish positive integer).
+  return MapWord(reinterpret_cast<uintptr_t>(Smi::FromInt(raw)));
+}
+
+
+int MapWord::ToSerializationAddress() {
+  // When the map word is being used as a serialization address we treat the
+  // map word as a Smi and get the small integer that it encodes.
+  return reinterpret_cast<Smi*>(value_)->value();
+}
+
+
 bool MapWord::IsMarked() {
   return (value_ & kMarkingMask) == 0;
 }
@@ -1084,14 +1162,16 @@
 Array* JSObject::elements() {
   Object* array = READ_FIELD(this, kElementsOffset);
   // In the assert below Dictionary is covered under FixedArray.
-  ASSERT(array->IsFixedArray() || array->IsPixelArray());
+  ASSERT(array->IsFixedArray() || array->IsPixelArray() ||
+         array->IsExternalArray());
   return reinterpret_cast<Array*>(array);
 }
 
 
 void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
   // In the assert below Dictionary is covered under FixedArray.
-  ASSERT(value->IsFixedArray() || value->IsPixelArray());
+  ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
+         value->IsExternalArray());
   WRITE_FIELD(this, kElementsOffset, value);
   CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, mode);
 }
@@ -1554,6 +1634,14 @@
 CAST_ACCESSOR(Proxy)
 CAST_ACCESSOR(ByteArray)
 CAST_ACCESSOR(PixelArray)
+CAST_ACCESSOR(ExternalArray)
+CAST_ACCESSOR(ExternalByteArray)
+CAST_ACCESSOR(ExternalUnsignedByteArray)
+CAST_ACCESSOR(ExternalShortArray)
+CAST_ACCESSOR(ExternalUnsignedShortArray)
+CAST_ACCESSOR(ExternalIntArray)
+CAST_ACCESSOR(ExternalUnsignedIntArray)
+CAST_ACCESSOR(ExternalFloatArray)
 CAST_ACCESSOR(Struct)
 
 
@@ -1819,9 +1907,9 @@
 Map* ExternalAsciiString::StringMap(int length) {
   Map* map;
   // Number of characters: determines the map.
-  if (length <= String::kMaxShortStringSize) {
+  if (length <= String::kMaxShortSize) {
     map = Heap::short_external_ascii_string_map();
-  } else if (length <= String::kMaxMediumStringSize) {
+  } else if (length <= String::kMaxMediumSize) {
     map = Heap::medium_external_ascii_string_map();
   } else {
     map = Heap::long_external_ascii_string_map();
@@ -1833,9 +1921,9 @@
 Map* ExternalAsciiString::SymbolMap(int length) {
   Map* map;
   // Number of characters: determines the map.
-  if (length <= String::kMaxShortStringSize) {
+  if (length <= String::kMaxShortSize) {
     map = Heap::short_external_ascii_symbol_map();
-  } else if (length <= String::kMaxMediumStringSize) {
+  } else if (length <= String::kMaxMediumSize) {
     map = Heap::medium_external_ascii_symbol_map();
   } else {
     map = Heap::long_external_ascii_symbol_map();
@@ -1858,9 +1946,9 @@
 Map* ExternalTwoByteString::StringMap(int length) {
   Map* map;
   // Number of characters: determines the map.
-  if (length <= String::kMaxShortStringSize) {
+  if (length <= String::kMaxShortSize) {
     map = Heap::short_external_string_map();
-  } else if (length <= String::kMaxMediumStringSize) {
+  } else if (length <= String::kMaxMediumSize) {
     map = Heap::medium_external_string_map();
   } else {
     map = Heap::long_external_string_map();
@@ -1872,9 +1960,9 @@
 Map* ExternalTwoByteString::SymbolMap(int length) {
   Map* map;
   // Number of characters: determines the map.
-  if (length <= String::kMaxShortStringSize) {
+  if (length <= String::kMaxShortSize) {
     map = Heap::short_external_symbol_map();
-  } else if (length <= String::kMaxMediumStringSize) {
+  } else if (length <= String::kMaxMediumSize) {
     map = Heap::medium_external_symbol_map();
   } else {
     map = Heap::long_external_symbol_map();
@@ -1938,6 +2026,116 @@
 }
 
 
+void* ExternalArray::external_pointer() {
+  intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
+  return reinterpret_cast<void*>(ptr);
+}
+
+
+void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) {
+  intptr_t ptr = reinterpret_cast<intptr_t>(value);
+  WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
+}
+
+
+int8_t ExternalByteArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int8_t* ptr = static_cast<int8_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalByteArray::set(int index, int8_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int8_t* ptr = static_cast<int8_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+uint8_t ExternalUnsignedByteArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalUnsignedByteArray::set(int index, uint8_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+int16_t ExternalShortArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int16_t* ptr = static_cast<int16_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalShortArray::set(int index, int16_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int16_t* ptr = static_cast<int16_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+uint16_t ExternalUnsignedShortArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalUnsignedShortArray::set(int index, uint16_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+int32_t ExternalIntArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int32_t* ptr = static_cast<int32_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalIntArray::set(int index, int32_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int32_t* ptr = static_cast<int32_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+uint32_t ExternalUnsignedIntArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalUnsignedIntArray::set(int index, uint32_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+float ExternalFloatArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  float* ptr = static_cast<float*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalFloatArray::set(int index, float value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  float* ptr = static_cast<float*>(external_pointer());
+  ptr[index] = value;
+}
+
+
 int Map::instance_size() {
   return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
 }
@@ -2238,6 +2436,7 @@
 ACCESSORS(AccessorInfo, data, Object, kDataOffset)
 ACCESSORS(AccessorInfo, name, Object, kNameOffset)
 ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset)
+ACCESSORS(AccessorInfo, load_stub_cache, Object, kLoadStubCacheOffset)
 
 ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
 ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@@ -2646,6 +2845,25 @@
     ASSERT(array->IsDictionary());
     return DICTIONARY_ELEMENTS;
   }
+  if (array->IsExternalArray()) {
+    switch (array->map()->instance_type()) {
+      case EXTERNAL_BYTE_ARRAY_TYPE:
+        return EXTERNAL_BYTE_ELEMENTS;
+      case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+        return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+      case EXTERNAL_SHORT_ARRAY_TYPE:
+        return EXTERNAL_SHORT_ELEMENTS;
+      case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+        return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+      case EXTERNAL_INT_ARRAY_TYPE:
+        return EXTERNAL_INT_ELEMENTS;
+      case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+        return EXTERNAL_UNSIGNED_INT_ELEMENTS;
+      default:
+        ASSERT(array->map()->instance_type() == EXTERNAL_FLOAT_ARRAY_TYPE);
+        return EXTERNAL_FLOAT_ELEMENTS;
+    }
+  }
   ASSERT(array->IsPixelArray());
   return PIXEL_ELEMENTS;
 }
@@ -2666,6 +2884,52 @@
 }
 
 
+bool JSObject::HasExternalArrayElements() {
+  return (HasExternalByteElements() ||
+          HasExternalUnsignedByteElements() ||
+          HasExternalShortElements() ||
+          HasExternalUnsignedShortElements() ||
+          HasExternalIntElements() ||
+          HasExternalUnsignedIntElements() ||
+          HasExternalFloatElements());
+}
+
+
+bool JSObject::HasExternalByteElements() {
+  return GetElementsKind() == EXTERNAL_BYTE_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalUnsignedByteElements() {
+  return GetElementsKind() == EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalShortElements() {
+  return GetElementsKind() == EXTERNAL_SHORT_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalUnsignedShortElements() {
+  return GetElementsKind() == EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalIntElements() {
+  return GetElementsKind() == EXTERNAL_INT_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalUnsignedIntElements() {
+  return GetElementsKind() == EXTERNAL_UNSIGNED_INT_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalFloatElements() {
+  return GetElementsKind() == EXTERNAL_FLOAT_ELEMENTS;
+}
+
+
 bool JSObject::HasNamedInterceptor() {
   return map()->has_named_interceptor();
 }
@@ -2712,7 +2976,7 @@
 
 
 bool StringHasher::has_trivial_hash() {
-  return length_ > String::kMaxMediumStringSize;
+  return length_ > String::kMaxMediumSize;
 }
 
 
diff --git a/src/objects.cc b/src/objects.cc
index ab1d35f..b14ec5c 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -751,10 +751,11 @@
 
 bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
 #ifdef DEBUG
-  {  // NOLINT (presubmit.py gets confused about if and braces)
+  if (FLAG_enable_slow_asserts) {
     // Assert that the resource and the string are equivalent.
     ASSERT(static_cast<size_t>(this->length()) == resource->length());
-    SmartPointer<uc16> smart_chars = this->ToWideCString();
+    SmartPointer<uc16> smart_chars(NewArray<uc16>(this->length()));
+    String::WriteToFlat(this, *smart_chars, 0, this->length());
     ASSERT(memcmp(*smart_chars,
                   resource->data(),
                   resource->length() * sizeof(**smart_chars)) == 0);
@@ -794,10 +795,11 @@
 
 bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
 #ifdef DEBUG
-  {  // NOLINT (presubmit.py gets confused about if and braces)
+  if (FLAG_enable_slow_asserts) {
     // Assert that the resource and the string are equivalent.
     ASSERT(static_cast<size_t>(this->length()) == resource->length());
-    SmartPointer<char> smart_chars = this->ToCString();
+    SmartPointer<char> smart_chars(NewArray<char>(this->length()));
+    String::WriteToFlat(this, *smart_chars, 0, this->length());
     ASSERT(memcmp(*smart_chars,
                   resource->data(),
                   resource->length()*sizeof(**smart_chars)) == 0);
@@ -837,7 +839,7 @@
 
 void String::StringShortPrint(StringStream* accumulator) {
   int len = length();
-  if (len > kMaxMediumStringSize) {
+  if (len > kMaxMediumSize) {
     accumulator->Add("<Very long string[%u]>", len);
     return;
   }
@@ -1005,6 +1007,34 @@
     case PIXEL_ARRAY_TYPE:
       accumulator->Add("<PixelArray[%u]>", PixelArray::cast(this)->length());
       break;
+    case EXTERNAL_BYTE_ARRAY_TYPE:
+      accumulator->Add("<ExternalByteArray[%u]>",
+                       ExternalByteArray::cast(this)->length());
+      break;
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+      accumulator->Add("<ExternalUnsignedByteArray[%u]>",
+                       ExternalUnsignedByteArray::cast(this)->length());
+      break;
+    case EXTERNAL_SHORT_ARRAY_TYPE:
+      accumulator->Add("<ExternalShortArray[%u]>",
+                       ExternalShortArray::cast(this)->length());
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+      accumulator->Add("<ExternalUnsignedShortArray[%u]>",
+                       ExternalUnsignedShortArray::cast(this)->length());
+      break;
+    case EXTERNAL_INT_ARRAY_TYPE:
+      accumulator->Add("<ExternalIntArray[%u]>",
+                       ExternalIntArray::cast(this)->length());
+      break;
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+      accumulator->Add("<ExternalUnsignedIntArray[%u]>",
+                       ExternalUnsignedIntArray::cast(this)->length());
+      break;
+    case EXTERNAL_FLOAT_ARRAY_TYPE:
+      accumulator->Add("<ExternalFloatArray[%u]>",
+                       ExternalFloatArray::cast(this)->length());
+      break;
     case SHARED_FUNCTION_INFO_TYPE:
       accumulator->Add("<SharedFunctionInfo>");
       break;
@@ -1147,6 +1177,13 @@
     case FILLER_TYPE:
     case BYTE_ARRAY_TYPE:
     case PIXEL_ARRAY_TYPE:
+    case EXTERNAL_BYTE_ARRAY_TYPE:
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+    case EXTERNAL_SHORT_ARRAY_TYPE:
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+    case EXTERNAL_INT_ARRAY_TYPE:
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+    case EXTERNAL_FLOAT_ARRAY_TYPE:
       break;
     case SHARED_FUNCTION_INFO_TYPE: {
       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
@@ -1214,7 +1251,8 @@
 
 String* JSObject::constructor_name() {
   if (IsJSFunction()) {
-    return Heap::function_class_symbol();
+    return JSFunction::cast(this)->IsBoilerplate() ?
+      Heap::function_class_symbol() : Heap::closure_symbol();
   }
   if (map()->constructor()->IsJSFunction()) {
     JSFunction* constructor = JSFunction::cast(map()->constructor());
@@ -2237,7 +2275,7 @@
 
 
 Object* JSObject::NormalizeElements() {
-  ASSERT(!HasPixelElements());
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
   if (HasDictionaryElements()) return this;
 
   // Get number of entries.
@@ -2322,7 +2360,7 @@
 
 Object* JSObject::DeleteElementPostInterceptor(uint32_t index,
                                                DeleteMode mode) {
-  ASSERT(!HasPixelElements());
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
   switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
@@ -2413,10 +2451,17 @@
       }
       break;
     }
-    case PIXEL_ELEMENTS: {
-      // Pixel elements cannot be deleted. Just silently ignore here.
+    case PIXEL_ELEMENTS:
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+      // Pixel and external array elements cannot be deleted. Just
+      // silently ignore here.
       break;
-    }
     case DICTIONARY_ELEMENTS: {
       NumberDictionary* dictionary = element_dictionary();
       int entry = dictionary->FindEntry(index);
@@ -2507,7 +2552,15 @@
   // Check if the object is among the indexed properties.
   switch (GetElementsKind()) {
     case PIXEL_ELEMENTS:
-      // Raw pixels do not reference other objects.
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+      // Raw pixels and external arrays do not reference other
+      // objects.
       break;
     case FAST_ELEMENTS: {
       int length = IsJSArray() ?
@@ -2752,7 +2805,15 @@
       case FAST_ELEMENTS:
         break;
       case PIXEL_ELEMENTS:
-        // Ignore getters and setters on pixel elements.
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      case EXTERNAL_FLOAT_ELEMENTS:
+        // Ignore getters and setters on pixel and external array
+        // elements.
         return Heap::undefined_value();
       case DICTIONARY_ELEMENTS: {
         // Lookup the index.
@@ -3087,7 +3148,7 @@
 
 
 Object* FixedArray::AddKeysFromJSArray(JSArray* array) {
-  ASSERT(!array->HasPixelElements());
+  ASSERT(!array->HasPixelElements() && !array->HasExternalArrayElements());
   switch (array->GetElementsKind()) {
     case JSObject::FAST_ELEMENTS:
       return UnionOfKeys(FixedArray::cast(array->elements()));
@@ -4599,7 +4660,7 @@
 
 uint32_t StringHasher::GetHashField() {
   ASSERT(is_valid());
-  if (length_ <= String::kMaxShortStringSize) {
+  if (length_ <= String::kMaxShortSize) {
     uint32_t payload;
     if (is_array_index()) {
       payload = v8::internal::HashField(array_index(), true);
@@ -4608,7 +4669,7 @@
     }
     return (payload & ((1 << String::kShortLengthShift) - 1)) |
            (length_ << String::kShortLengthShift);
-  } else if (length_ <= String::kMaxMediumStringSize) {
+  } else if (length_ <= String::kMaxMediumSize) {
     uint32_t payload = v8::internal::HashField(GetHash(), false);
     return (payload & ((1 << String::kMediumLengthShift) - 1)) |
            (length_ << String::kMediumLengthShift);
@@ -5201,8 +5262,8 @@
 
 
 void JSObject::SetFastElements(FixedArray* elems) {
-  // We should never end in here with a pixel array.
-  ASSERT(!HasPixelElements());
+  // We should never end in here with a pixel or external array.
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
 #ifdef DEBUG
   // Check the provided array is filled with the_hole.
   uint32_t len = static_cast<uint32_t>(elems->length());
@@ -5239,8 +5300,8 @@
 
 
 Object* JSObject::SetSlowElements(Object* len) {
-  // We should never end in here with a pixel array.
-  ASSERT(!HasPixelElements());
+  // We should never end in here with a pixel or external array.
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
 
   uint32_t new_length = static_cast<uint32_t>(len->Number());
 
@@ -5318,8 +5379,8 @@
 
 
 Object* JSObject::SetElementsLength(Object* len) {
-  // We should never end in here with a pixel array.
-  ASSERT(!HasPixelElements());
+  // We should never end in here with a pixel or external array.
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
 
   Object* smi_length = len->ToSmi();
   if (smi_length->IsSmi()) {
@@ -5420,6 +5481,20 @@
       }
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      // TODO(kbr): Add testcase.
+      ExternalArray* array = ExternalArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        return true;
+      }
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       if (element_dictionary()->FindEntry(index)
           != NumberDictionary::kNotFound) {
@@ -5507,6 +5582,16 @@
       PixelArray* pixels = PixelArray::cast(elements());
       return (index < static_cast<uint32_t>(pixels->length()));
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalArray* array = ExternalArray::cast(elements());
+      return (index < static_cast<uint32_t>(array->length()));
+    }
     case DICTIONARY_ELEMENTS: {
       return element_dictionary()->FindEntry(index)
           != NumberDictionary::kNotFound;
@@ -5550,6 +5635,19 @@
       }
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalArray* array = ExternalArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        return true;
+      }
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       if (element_dictionary()->FindEntry(index)
           != NumberDictionary::kNotFound) {
@@ -5690,6 +5788,37 @@
       PixelArray* pixels = PixelArray::cast(elements());
       return pixels->SetValue(index, value);
     }
+    case EXTERNAL_BYTE_ELEMENTS: {
+      ExternalByteArray* array = ExternalByteArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+      ExternalUnsignedByteArray* array =
+          ExternalUnsignedByteArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_SHORT_ELEMENTS: {
+      ExternalShortArray* array = ExternalShortArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+      ExternalUnsignedShortArray* array =
+          ExternalUnsignedShortArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_INT_ELEMENTS: {
+      ExternalIntArray* array = ExternalIntArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+      ExternalUnsignedIntArray* array =
+          ExternalUnsignedIntArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalFloatArray* array = ExternalFloatArray::cast(elements());
+      return array->SetValue(index, value);
+    }
     case DICTIONARY_ELEMENTS: {
       // Insert element in the dictionary.
       FixedArray* elms = FixedArray::cast(elements());
@@ -5807,6 +5936,17 @@
       UNIMPLEMENTED();
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      // TODO(kbr): Add testcase and implement.
+      UNIMPLEMENTED();
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       NumberDictionary* dictionary = element_dictionary();
       int entry = dictionary->FindEntry(index);
@@ -5905,6 +6045,65 @@
       }
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS: {
+      ExternalByteArray* array = ExternalByteArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        int8_t value = array->get(index);
+        return Smi::FromInt(value);
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+      ExternalUnsignedByteArray* array =
+          ExternalUnsignedByteArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        uint8_t value = array->get(index);
+        return Smi::FromInt(value);
+      }
+      break;
+    }
+    case EXTERNAL_SHORT_ELEMENTS: {
+      ExternalShortArray* array = ExternalShortArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        int16_t value = array->get(index);
+        return Smi::FromInt(value);
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+      ExternalUnsignedShortArray* array =
+          ExternalUnsignedShortArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        uint16_t value = array->get(index);
+        return Smi::FromInt(value);
+      }
+      break;
+    }
+    case EXTERNAL_INT_ELEMENTS: {
+      ExternalIntArray* array = ExternalIntArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        int32_t value = array->get(index);
+        return Heap::NumberFromInt32(value);
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+      ExternalUnsignedIntArray* array =
+          ExternalUnsignedIntArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        uint32_t value = array->get(index);
+        return Heap::NumberFromUint32(value);
+      }
+      break;
+    }
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalFloatArray* array = ExternalFloatArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        float value = array->get(index);
+        return Heap::AllocateHeapNumber(value);
+      }
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       NumberDictionary* dictionary = element_dictionary();
       int entry = dictionary->FindEntry(index);
@@ -5948,7 +6147,14 @@
       }
       break;
     }
-    case PIXEL_ELEMENTS: {
+    case PIXEL_ELEMENTS:
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
       return true;
     }
     case DICTIONARY_ELEMENTS: {
@@ -6172,6 +6378,16 @@
       PixelArray* pixels = PixelArray::cast(elements());
       return index < static_cast<uint32_t>(pixels->length());
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalArray* array = ExternalArray::cast(elements());
+      return index < static_cast<uint32_t>(array->length());
+    }
     case DICTIONARY_ELEMENTS: {
       return element_dictionary()->FindEntry(index)
           != NumberDictionary::kNotFound;
@@ -6392,6 +6608,23 @@
       ASSERT(!storage || storage->length() >= counter);
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      int length = ExternalArray::cast(elements())->length();
+      while (counter < length) {
+        if (storage != NULL) {
+          storage->set(counter, Smi::FromInt(counter), SKIP_WRITE_BARRIER);
+        }
+        counter++;
+      }
+      ASSERT(!storage || storage->length() >= counter);
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       if (storage != NULL) {
         element_dictionary()->CopyKeysTo(storage, filter);
@@ -6938,7 +7171,7 @@
 // If the object is in dictionary mode, it is converted to fast elements
 // mode.
 Object* JSObject::PrepareElementsForSort(uint32_t limit) {
-  ASSERT(!HasPixelElements());
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
 
   if (HasDictionaryElements()) {
     // Convert to fast elements containing only the existing properties.
@@ -7070,6 +7303,99 @@
 }
 
 
+template<typename ExternalArrayClass, typename ValueType>
+static Object* ExternalArrayIntSetter(ExternalArrayClass* receiver,
+                                      uint32_t index,
+                                      Object* value) {
+  ValueType cast_value = 0;
+  if (index < static_cast<uint32_t>(receiver->length())) {
+    if (value->IsSmi()) {
+      int int_value = Smi::cast(value)->value();
+      cast_value = static_cast<ValueType>(int_value);
+    } else if (value->IsHeapNumber()) {
+      double double_value = HeapNumber::cast(value)->value();
+      cast_value = static_cast<ValueType>(DoubleToInt32(double_value));
+    } else {
+      // Clamp undefined to zero (default). All other types have been
+      // converted to a number type further up in the call chain.
+      ASSERT(value->IsUndefined());
+    }
+    receiver->set(index, cast_value);
+  }
+  return Heap::NumberFromInt32(cast_value);
+}
+
+
+Object* ExternalByteArray::SetValue(uint32_t index, Object* value) {
+  return ExternalArrayIntSetter<ExternalByteArray, int8_t>
+      (this, index, value);
+}
+
+
+Object* ExternalUnsignedByteArray::SetValue(uint32_t index, Object* value) {
+  return ExternalArrayIntSetter<ExternalUnsignedByteArray, uint8_t>
+      (this, index, value);
+}
+
+
+Object* ExternalShortArray::SetValue(uint32_t index, Object* value) {
+  return ExternalArrayIntSetter<ExternalShortArray, int16_t>
+      (this, index, value);
+}
+
+
+Object* ExternalUnsignedShortArray::SetValue(uint32_t index, Object* value) {
+  return ExternalArrayIntSetter<ExternalUnsignedShortArray, uint16_t>
+      (this, index, value);
+}
+
+
+Object* ExternalIntArray::SetValue(uint32_t index, Object* value) {
+  return ExternalArrayIntSetter<ExternalIntArray, int32_t>
+      (this, index, value);
+}
+
+
+Object* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
+  uint32_t cast_value = 0;
+  if (index < static_cast<uint32_t>(length())) {
+    if (value->IsSmi()) {
+      int int_value = Smi::cast(value)->value();
+      cast_value = static_cast<uint32_t>(int_value);
+    } else if (value->IsHeapNumber()) {
+      double double_value = HeapNumber::cast(value)->value();
+      cast_value = static_cast<uint32_t>(DoubleToUint32(double_value));
+    } else {
+      // Clamp undefined to zero (default). All other types have been
+      // converted to a number type further up in the call chain.
+      ASSERT(value->IsUndefined());
+    }
+    set(index, cast_value);
+  }
+  return Heap::NumberFromUint32(cast_value);
+}
+
+
+Object* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
+  float cast_value = 0;
+  if (index < static_cast<uint32_t>(length())) {
+    if (value->IsSmi()) {
+      int int_value = Smi::cast(value)->value();
+      cast_value = static_cast<float>(int_value);
+    } else if (value->IsHeapNumber()) {
+      double double_value = HeapNumber::cast(value)->value();
+      cast_value = static_cast<float>(double_value);
+    } else {
+      // Clamp undefined to zero (default). All other types have been
+      // converted to a number type further up in the call chain.
+      ASSERT(value->IsUndefined());
+    }
+    set(index, cast_value);
+  }
+  return Heap::AllocateHeapNumber(cast_value);
+}
+
+
 Object* GlobalObject::GetPropertyCell(LookupResult* result) {
   ASSERT(!HasFastProperties());
   Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
diff --git a/src/objects.h b/src/objects.h
index deb0971..61bdf44 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -56,6 +56,14 @@
 //       - Array
 //         - ByteArray
 //         - PixelArray
+//         - ExternalArray
+//           - ExternalByteArray
+//           - ExternalUnsignedByteArray
+//           - ExternalShortArray
+//           - ExternalUnsignedShortArray
+//           - ExternalIntArray
+//           - ExternalUnsignedIntArray
+//           - ExternalFloatArray
 //         - FixedArray
 //           - DescriptorArray
 //           - HashTable
@@ -274,6 +282,16 @@
   V(PROXY_TYPE)                                 \
   V(BYTE_ARRAY_TYPE)                            \
   V(PIXEL_ARRAY_TYPE)                           \
+  /* Note: the order of these external array */ \
+  /* types is relied upon in */                 \
+  /* Object::IsExternalArray(). */              \
+  V(EXTERNAL_BYTE_ARRAY_TYPE)                   \
+  V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)          \
+  V(EXTERNAL_SHORT_ARRAY_TYPE)                  \
+  V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)         \
+  V(EXTERNAL_INT_ARRAY_TYPE)                    \
+  V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)           \
+  V(EXTERNAL_FLOAT_ARRAY_TYPE)                  \
   V(FILLER_TYPE)                                \
                                                 \
   V(ACCESSOR_INFO_TYPE)                         \
@@ -673,6 +691,13 @@
   PROXY_TYPE,
   BYTE_ARRAY_TYPE,
   PIXEL_ARRAY_TYPE,
+  EXTERNAL_BYTE_ARRAY_TYPE,
+  EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
+  EXTERNAL_SHORT_ARRAY_TYPE,
+  EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
+  EXTERNAL_INT_ARRAY_TYPE,
+  EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
+  EXTERNAL_FLOAT_ARRAY_TYPE,
   FILLER_TYPE,
   SMI_TYPE,
 
@@ -780,6 +805,14 @@
   inline bool IsNumber();
   inline bool IsByteArray();
   inline bool IsPixelArray();
+  inline bool IsExternalArray();
+  inline bool IsExternalByteArray();
+  inline bool IsExternalUnsignedByteArray();
+  inline bool IsExternalShortArray();
+  inline bool IsExternalUnsignedShortArray();
+  inline bool IsExternalIntArray();
+  inline bool IsExternalUnsignedIntArray();
+  inline bool IsExternalFloatArray();
   inline bool IsFailure();
   inline bool IsRetryAfterGC();
   inline bool IsOutOfMemoryFailure();
@@ -1049,6 +1082,15 @@
   // View this map word as a forwarding address.
   inline HeapObject* ToForwardingAddress();
 
+  // True if this map word is a serialization address.  This will only be the
+  // case during a destructive serialization of the heap.
+  inline bool IsSerializationAddress();
+
+  // Create a map word from a serialization address.
+  static inline MapWord FromSerializationAddress(int raw);
+
+  // View this map word as a serialization address.
+  inline int ToSerializationAddress();
 
   // Marking phase of full collection: the map word of live objects is
   // marked, and may be marked as overflowed (eg, the object is live, its
@@ -1323,7 +1365,14 @@
   enum ElementsKind {
     FAST_ELEMENTS,
     DICTIONARY_ELEMENTS,
-    PIXEL_ELEMENTS
+    PIXEL_ELEMENTS,
+    EXTERNAL_BYTE_ELEMENTS,
+    EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
+    EXTERNAL_SHORT_ELEMENTS,
+    EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
+    EXTERNAL_INT_ELEMENTS,
+    EXTERNAL_UNSIGNED_INT_ELEMENTS,
+    EXTERNAL_FLOAT_ELEMENTS
   };
 
   // [properties]: Backing storage for properties.
@@ -1343,6 +1392,14 @@
   inline bool HasFastElements();
   inline bool HasDictionaryElements();
   inline bool HasPixelElements();
+  inline bool HasExternalArrayElements();
+  inline bool HasExternalByteElements();
+  inline bool HasExternalUnsignedByteElements();
+  inline bool HasExternalShortElements();
+  inline bool HasExternalUnsignedShortElements();
+  inline bool HasExternalIntElements();
+  inline bool HasExternalUnsignedIntElements();
+  inline bool HasExternalFloatElements();
   inline NumberDictionary* element_dictionary();  // Gets slow elements.
 
   // Collects elements starting at index 0.
@@ -2507,6 +2564,200 @@
 };
 
 
+// An ExternalArray represents a fixed-size array of primitive values
+// which live outside the JavaScript heap. Its subclasses are used to
+// implement the CanvasArray types being defined in the WebGL
+// specification. As of this writing the first public draft is not yet
+// available, but Khronos members can access the draft at:
+//   https://cvs.khronos.org/svn/repos/3dweb/trunk/doc/spec/WebGL-spec.html
+//
+// The semantics of these arrays differ from CanvasPixelArray.
+// Out-of-range values passed to the setter are converted via a C
+// cast, not clamping. Out-of-range indices cause exceptions to be
+// raised rather than being silently ignored.
+class ExternalArray: public Array {
+ public:
+  // [external_pointer]: The pointer to the external memory area backing this
+  // external array.
+  DECL_ACCESSORS(external_pointer, void)  // Pointer to the data store.
+
+  // Casting.
+  static inline ExternalArray* cast(Object* obj);
+
+  // Maximal acceptable length for an external array.
+  static const int kMaxLength = 0x3fffffff;
+
+  // ExternalArray headers are not quadword aligned.
+  static const int kExternalPointerOffset = Array::kAlignedSize;
+  static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
+  static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
+};
+
+
+class ExternalByteArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline int8_t get(int index);
+  inline void set(int index, int8_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalByteArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalByteArrayPrint();
+  void ExternalByteArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalByteArray);
+};
+
+
+class ExternalUnsignedByteArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline uint8_t get(int index);
+  inline void set(int index, uint8_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalUnsignedByteArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalUnsignedByteArrayPrint();
+  void ExternalUnsignedByteArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedByteArray);
+};
+
+
+class ExternalShortArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline int16_t get(int index);
+  inline void set(int index, int16_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalShortArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalShortArrayPrint();
+  void ExternalShortArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalShortArray);
+};
+
+
+class ExternalUnsignedShortArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline uint16_t get(int index);
+  inline void set(int index, uint16_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalUnsignedShortArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalUnsignedShortArrayPrint();
+  void ExternalUnsignedShortArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedShortArray);
+};
+
+
+class ExternalIntArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline int32_t get(int index);
+  inline void set(int index, int32_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalIntArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalIntArrayPrint();
+  void ExternalIntArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalIntArray);
+};
+
+
+class ExternalUnsignedIntArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline uint32_t get(int index);
+  inline void set(int index, uint32_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalUnsignedIntArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalUnsignedIntArrayPrint();
+  void ExternalUnsignedIntArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedIntArray);
+};
+
+
+class ExternalFloatArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline float get(int index);
+  inline void set(int index, float value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalFloatArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalFloatArrayPrint();
+  void ExternalFloatArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloatArray);
+};
+
+
 // Code describes objects with on-the-fly generated machine code.
 class Code: public HeapObject {
  public:
@@ -3819,10 +4070,8 @@
   static const int kSize = kLengthOffset + kIntSize;
   // Notice: kSize is not pointer-size aligned if pointers are 64-bit.
 
-  // Limits on sizes of different types of strings.
-  static const int kMaxShortStringSize = 63;
-  static const int kMaxMediumStringSize = 16383;
-
+  // Maximum number of characters to consider when trying to convert a string
+  // value into an array index.
   static const int kMaxArrayIndexSize = 10;
 
   // Max ascii char code.
@@ -3846,13 +4095,17 @@
   // field.
   static const int kMaxCachedArrayIndexLength = 7;
 
-  // Shift constants for retriving length and hash code from
+  // Shift constants for retrieving length and hash code from
   // length/hash field.
   static const int kHashShift = kNofLengthBitFields;
   static const int kShortLengthShift = kHashShift + kShortStringTag;
   static const int kMediumLengthShift = kHashShift + kMediumStringTag;
   static const int kLongLengthShift = kHashShift + kLongStringTag;
-  // Maximal string length that can be stored in the hash/length field.
+
+  // Maximal string length that can be stored in the hash/length field for
+  // different types of strings.
+  static const int kMaxShortSize = (1 << (32 - kShortLengthShift)) - 1;
+  static const int kMaxMediumSize = (1 << (32 - kMediumLengthShift)) - 1;
   static const int kMaxLength = (1 << (32 - kLongLengthShift)) - 1;
 
   // Limit for truncation in short printing.
@@ -4466,6 +4719,7 @@
   DECL_ACCESSORS(data, Object)
   DECL_ACCESSORS(name, Object)
   DECL_ACCESSORS(flag, Smi)
+  DECL_ACCESSORS(load_stub_cache, Object)
 
   inline bool all_can_read();
   inline void set_all_can_read(bool value);
@@ -4491,7 +4745,8 @@
   static const int kDataOffset = kSetterOffset + kPointerSize;
   static const int kNameOffset = kDataOffset + kPointerSize;
   static const int kFlagOffset = kNameOffset + kPointerSize;
-  static const int kSize = kFlagOffset + kPointerSize;
+  static const int kLoadStubCacheOffset = kFlagOffset + kPointerSize;
+  static const int kSize = kLoadStubCacheOffset + kPointerSize;
 
  private:
   // Bit positions in flag.
@@ -4863,6 +5118,8 @@
   // Intended for serialization/deserialization checking: insert, or
   // check for the presence of, a tag at this position in the stream.
   virtual void Synchronize(const char* tag) {}
+#else
+  inline void Synchronize(const char* tag) {}
 #endif
 };
 
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index c0cf7f4..084880e 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -47,6 +47,13 @@
 }
 
 
+// Give V8 the opportunity to override the default fmod behavior.
+double modulo(double x, double y) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
 // Initialize OS class early in the V8 startup.
 void OS::Setup() {
   // Seed the random number generator.
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index b8fe967..1e1245c 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -54,6 +54,12 @@
 namespace v8 {
 namespace internal {
 
+// ----------------------------------------------------------------------------
+// Math functions
+
+double modulo(double x, double y) {
+  return fmod(x, y);
+}
 
 // ----------------------------------------------------------------------------
 // POSIX date/time support.
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 26e5ce5..d1f5319 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -48,10 +48,10 @@
 #ifndef NOMCX
 #define NOMCX
 #endif
-// Require Windows 2000 or higher (this is required for the IsDebuggerPresent
+// Require Windows XP or higher (this is required for the RtlCaptureContext
 // function to be present).
 #ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x500
+#define _WIN32_WINNT 0x501
 #endif
 
 #include <windows.h>
@@ -223,6 +223,31 @@
   return ceil(x);
 }
 
+#ifdef _WIN64
+typedef double (*ModuloFunction)(double, double);
+
+// Defined in codegen-x64.cc.
+ModuloFunction CreateModuloFunction();
+
+double modulo(double x, double y) {
+  static ModuloFunction function = CreateModuloFunction();
+  return function(x, y);
+}
+#else  // Win32
+
+double modulo(double x, double y) {
+  // Workaround MS fmod bugs. ECMA-262 says:
+  // dividend is finite and divisor is an infinity => result equals dividend
+  // dividend is a zero and divisor is nonzero finite => result equals dividend
+  if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
+      !(x == 0 && (y != 0 && isfinite(y)))) {
+    x = fmod(x, y);
+  }
+  return x;
+}
+
+#endif  // _WIN64
+
 // ----------------------------------------------------------------------------
 // The Time class represents time on win32. A timestamp is represented as
 // a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript
@@ -1183,22 +1208,7 @@
 
   // Capture current context.
   CONTEXT context;
-  memset(&context, 0, sizeof(context));
-  context.ContextFlags = CONTEXT_CONTROL;
-  context.ContextFlags = CONTEXT_CONTROL;
-#ifdef  _WIN64
-  // TODO(X64): Implement context capture.
-#else
-  __asm    call x
-  __asm x: pop eax
-  __asm    mov context.Eip, eax
-  __asm    mov context.Ebp, ebp
-  __asm    mov context.Esp, esp
-  // NOTE: At some point, we could use RtlCaptureContext(&context) to
-  // capture the context instead of inline assembler. However it is
-  // only available on XP, Vista, Server 2003 and Server 2008 which
-  // might not be sufficient.
-#endif
+  RtlCaptureContext(&context);
 
   // Initialize the stack walking
   STACKFRAME64 stack_frame;
@@ -1308,7 +1318,9 @@
 
 double OS::nan_value() {
 #ifdef _MSC_VER
-  static const __int64 nanval = 0xfff8000000000000;
+  // Positive Quiet NaN with no payload (aka. Indeterminate) has all bits
+  // in mask set, so value equals mask.
+  static const __int64 nanval = kQuietNaNMask;
   return *reinterpret_cast<const double*>(&nanval);
 #else  // _MSC_VER
   return NAN;
diff --git a/src/platform.h b/src/platform.h
index 76bf891..fefe4b8 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -111,6 +111,7 @@
 class Semaphore;
 
 double ceiling(double x);
+double modulo(double x, double y);
 
 // Forward declarations.
 class Socket;
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 26aab2c..aa01096 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -215,22 +215,6 @@
                         bool at_start);
 };
 
-
-// Enter C code from generated RegExp code in a way that allows
-// the C code to fix the return address in case of a GC.
-// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
- public:
-  RegExpCEntryStub() {}
-  virtual ~RegExpCEntryStub() {}
-  void Generate(MacroAssembler* masm);
-
- private:
-  Major MajorKey() { return RegExpCEntry; }
-  int MinorKey() { return 0; }
-  const char* GetName() { return "RegExpCEntryStub"; }
-};
-
 #endif  // V8_NATIVE_REGEXP
 
 } }  // namespace v8::internal
diff --git a/src/runtime.cc b/src/runtime.cc
index 9eeffd1..76520e3 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -156,7 +156,7 @@
 
   // Deep copy local elements.
   // Pixel elements cannot be created using an object literal.
-  ASSERT(!copy->HasPixelElements());
+  ASSERT(!copy->HasPixelElements() && !copy->HasExternalArrayElements());
   switch (copy->GetElementsKind()) {
     case JSObject::FAST_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(copy->elements());
@@ -577,8 +577,8 @@
   HandleScope scope;
   Handle<GlobalObject> global = Handle<GlobalObject>(Top::context()->global());
 
-  CONVERT_ARG_CHECKED(FixedArray, pairs, 0);
-  Handle<Context> context = args.at<Context>(1);
+  Handle<Context> context = args.at<Context>(0);
+  CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
   bool is_eval = Smi::cast(args[2])->value() == 1;
 
   // Compute the property attributes. According to ECMA-262, section
@@ -1357,8 +1357,9 @@
           StringBuilderSubstringPosition::encode(from);
       AddElement(Smi::FromInt(encoded_slice));
     } else {
-      Handle<String> slice = Factory::NewStringSlice(subject_, from, to);
-      AddElement(*slice);
+      // Otherwise encode as two smis.
+      AddElement(Smi::FromInt(-length));
+      AddElement(Smi::FromInt(from));
     }
     IncrementCharacterCount(length);
   }
@@ -3742,14 +3743,7 @@
   CONVERT_DOUBLE_CHECKED(x, args[0]);
   CONVERT_DOUBLE_CHECKED(y, args[1]);
 
-#if defined WIN32 || defined _WIN64
-  // Workaround MS fmod bugs. ECMA-262 says:
-  // dividend is finite and divisor is an infinity => result equals dividend
-  // dividend is a zero and divisor is nonzero finite => result equals dividend
-  if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
-      !(x == 0 && (y != 0 && isfinite(y))))
-#endif
-  x = fmod(x, y);
+  x = modulo(x, y);
   // NewNumberFromDouble may return a Smi instead of a Number object
   return Heap::NewNumberFromDouble(x);
 }
@@ -3773,9 +3767,21 @@
   for (int i = 0; i < array_length; i++) {
     Object* element = fixed_array->get(i);
     if (element->IsSmi()) {
+      // Smi encoding of position and length.
       int encoded_slice = Smi::cast(element)->value();
-      int pos = StringBuilderSubstringPosition::decode(encoded_slice);
-      int len = StringBuilderSubstringLength::decode(encoded_slice);
+      int pos;
+      int len;
+      if (encoded_slice > 0) {
+        // Position and length encoded in one smi.
+        pos = StringBuilderSubstringPosition::decode(encoded_slice);
+        len = StringBuilderSubstringLength::decode(encoded_slice);
+      } else {
+        // Position and length encoded in two smis.
+        Object* obj = fixed_array->get(++i);
+        ASSERT(obj->IsSmi());
+        pos = Smi::cast(obj)->value();
+        len = -encoded_slice;
+      }
       String::WriteToFlat(special,
                           sink + position,
                           pos,
@@ -3796,6 +3802,10 @@
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(JSArray, array, args[0]);
   CONVERT_CHECKED(String, special, args[1]);
+
+  // This assumption is used by the slice encoding in one or two smis.
+  ASSERT(Smi::kMaxValue >= String::kMaxLength);
+
   int special_length = special->length();
   Object* smi_array_length = array->length();
   if (!smi_array_length->IsSmi()) {
@@ -3823,13 +3833,29 @@
   for (int i = 0; i < array_length; i++) {
     Object* elt = fixed_array->get(i);
     if (elt->IsSmi()) {
+      // Smi encoding of position and length.
       int len = Smi::cast(elt)->value();
-      int pos = len >> 11;
-      len &= 0x7ff;
-      if (pos + len > special_length) {
-        return Top::Throw(Heap::illegal_argument_symbol());
+      if (len > 0) {
+        // Position and length encoded in one smi.
+        int pos = len >> 11;
+        len &= 0x7ff;
+        if (pos + len > special_length) {
+          return Top::Throw(Heap::illegal_argument_symbol());
+        }
+        position += len;
+      } else {
+        // Position and length encoded in two smis.
+        position += (-len);
+        // Get the position and check that it is also a smi.
+        i++;
+        if (i >= array_length) {
+          return Top::Throw(Heap::illegal_argument_symbol());
+        }
+        Object* pos = fixed_array->get(i);
+        if (!pos->IsSmi()) {
+          return Top::Throw(Heap::illegal_argument_symbol());
+        }
       }
-      position += len;
     } else if (elt->IsString()) {
       String* element = String::cast(elt);
       int element_length = element->length();
@@ -4367,8 +4393,8 @@
 static Object* Runtime_NewClosure(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSFunction, boilerplate, 0);
-  CONVERT_ARG_CHECKED(Context, context, 1);
+  CONVERT_ARG_CHECKED(Context, context, 0);
+  CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1);
 
   Handle<JSFunction> result =
       Factory::NewFunctionFromBoilerplate(boilerplate, context);
@@ -4804,6 +4830,12 @@
 }
 
 
+static Object* Runtime_PromoteScheduledException(Arguments args) {
+  ASSERT_EQ(0, args.length());
+  return Top::PromoteScheduledException();
+}
+
+
 static Object* Runtime_ThrowReferenceError(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 1);
@@ -5273,6 +5305,47 @@
 };
 
 
+template<class ExternalArrayClass, class ElementType>
+static uint32_t IterateExternalArrayElements(Handle<JSObject> receiver,
+                                             bool elements_are_ints,
+                                             bool elements_are_guaranteed_smis,
+                                             uint32_t range,
+                                             ArrayConcatVisitor* visitor) {
+  Handle<ExternalArrayClass> array(
+      ExternalArrayClass::cast(receiver->elements()));
+  uint32_t len = Min(static_cast<uint32_t>(array->length()), range);
+
+  if (visitor != NULL) {
+    if (elements_are_ints) {
+      if (elements_are_guaranteed_smis) {
+        for (uint32_t j = 0; j < len; j++) {
+          Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get(j))));
+          visitor->visit(j, e);
+        }
+      } else {
+        for (uint32_t j = 0; j < len; j++) {
+          int64_t val = static_cast<int64_t>(array->get(j));
+          if (Smi::IsValid(static_cast<intptr_t>(val))) {
+            Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
+            visitor->visit(j, e);
+          } else {
+            Handle<Object> e(
+                Heap::AllocateHeapNumber(static_cast<ElementType>(val)));
+            visitor->visit(j, e);
+          }
+        }
+      }
+    } else {
+      for (uint32_t j = 0; j < len; j++) {
+        Handle<Object> e(Heap::AllocateHeapNumber(array->get(j)));
+        visitor->visit(j, e);
+      }
+    }
+  }
+
+  return len;
+}
+
 /**
  * A helper function that visits elements of a JSObject. Only elements
  * whose index between 0 and range (exclusive) are visited.
@@ -5322,6 +5395,48 @@
       }
       break;
     }
+    case JSObject::EXTERNAL_BYTE_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalByteArray, int8_t>(
+              receiver, true, true, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
+              receiver, true, true, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_SHORT_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalShortArray, int16_t>(
+              receiver, true, true, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
+              receiver, true, true, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_INT_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalIntArray, int32_t>(
+              receiver, true, false, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
+              receiver, true, false, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalFloatArray, float>(
+              receiver, false, false, range, visitor);
+      break;
+    }
     case JSObject::DICTIONARY_ELEMENTS: {
       Handle<NumberDictionary> dict(receiver->element_dictionary());
       uint32_t capacity = dict->Capacity();
@@ -7659,6 +7774,18 @@
 }
 
 
+// Returns V8 version as a string.
+static Object* Runtime_GetV8Version(Arguments args) {
+  ASSERT_EQ(args.length(), 0);
+
+  NoHandleAllocation ha;
+
+  const char* version_string = v8::V8::GetVersion();
+
+  return Heap::AllocateStringFromAscii(CStrVector(version_string), NOT_TENURED);
+}
+
+
 static Object* Runtime_Abort(Arguments args) {
   ASSERT(args.length() == 2);
   OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
@@ -7670,6 +7797,13 @@
 }
 
 
+static Object* Runtime_DeleteHandleScopeExtensions(Arguments args) {
+  ASSERT(args.length() == 0);
+  HandleScope::DeleteExtensions();
+  return Heap::undefined_value();
+}
+
+
 #ifdef DEBUG
 // ListNatives is ONLY used by the fuzz-natives.js in debug mode
 // Exclude the code in release mode.
diff --git a/src/runtime.h b/src/runtime.h
index 279181d..a55ef25 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -175,6 +175,7 @@
   F(FunctionIsBuiltin, 1, 1) \
   F(GetScript, 1, 1) \
   F(CollectStackTrace, 2, 1) \
+  F(GetV8Version, 0, 1) \
   \
   F(ClassOf, 1, 1) \
   F(SetCode, 2, 1) \
@@ -233,6 +234,7 @@
   F(ReThrow, 1, 1) \
   F(ThrowReferenceError, 1, 1) \
   F(StackGuard, 1, 1) \
+  F(PromoteScheduledException, 0, 1) \
   \
   /* Contexts */ \
   F(NewContext, 1, 1) \
@@ -262,6 +264,8 @@
   F(Log, 2, 1) \
   /* ES5 */ \
   F(LocalKeys, 1, 1) \
+  /* Handle scopes */ \
+  F(DeleteHandleScopeExtensions, 0, 1) \
   \
   /* Pseudo functions - handled as macros by parser */ \
   F(IS_VAR, 1, 1)
diff --git a/src/runtime.js b/src/runtime.js
index 789bfdb..ba19871 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -128,7 +128,10 @@
   if (IS_STRING(a) && IS_STRING(b)) {
     return %StringCompare(a, b);
   } else {
-    return %NumberCompare(%ToNumber(a), %ToNumber(b), ncr);
+    var a_number = %ToNumber(a);
+    var b_number = %ToNumber(b);
+    if (NUMBER_IS_NAN(a_number) || NUMBER_IS_NAN(b_number)) return ncr;
+    return %NumberCompare(a_number, b_number, ncr);
   }
 }
 
diff --git a/src/serialize.cc b/src/serialize.cc
index 6ff1d7f..6eedeef 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1417,7 +1417,27 @@
 #endif
 
 
+class NoGlobalHandlesChecker : public ObjectVisitor {
+ public:
+  virtual void VisitPointers(Object** start, Object** end) {
+    ASSERT(false);
+  }
+};
+
+
+class GlobalHandleDestroyer : public ObjectVisitor {
+  void VisitPointers(Object**start, Object**end) {
+    while (start < end) {
+      GlobalHandles::Destroy(start++);
+    }
+  }
+};
+
+
 void Deserializer::Deserialize() {
+  // No global handles.
+  NoGlobalHandlesChecker checker;
+  GlobalHandles::IterateRoots(&checker);
   // No active threads.
   ASSERT_EQ(NULL, ThreadState::FirstInUse());
   // No active handles.
@@ -1428,6 +1448,10 @@
   GetHeader();
   Heap::IterateRoots(this);
   GetContextStack();
+  // Any global handles that have been set up by deserialization are leaked
+  // since noone is keeping track of them.  So we discard them now.
+  GlobalHandleDestroyer destroyer;
+  GlobalHandles::IterateRoots(&destroyer);
 }
 
 
@@ -1740,4 +1764,488 @@
 }
 
 
+Deserializer2::Deserializer2(SnapshotByteSource* source)
+    : source_(source),
+      external_reference_decoder_(NULL) {
+  for (int i = 0; i <= LAST_SPACE; i++) {
+    fullness_[i] = 0;
+  }
+}
+
+
+// This routine both allocates a new object, and also keeps
+// track of where objects have been allocated so that we can
+// fix back references when deserializing.
+Address Deserializer2::Allocate(int space_index, int size) {
+  HeapObject* new_object;
+  int old_fullness = CurrentAllocationAddress(space_index);
+  // When we start a new page we need to record its location.
+  bool record_page = (old_fullness == 0);
+  if (SpaceIsPaged(space_index)) {
+    PagedSpace* space;
+    switch (space_index) {
+      case OLD_DATA_SPACE: space = Heap::old_data_space(); break;
+      case OLD_POINTER_SPACE: space = Heap::old_pointer_space(); break;
+      case MAP_SPACE: space = Heap::map_space(); break;
+      case CODE_SPACE: space = Heap::code_space(); break;
+      case CELL_SPACE: space = Heap::cell_space(); break;
+      default: UNREACHABLE(); space = NULL; break;
+    }
+    ASSERT(size <= Page::kPageSize - Page::kObjectStartOffset);
+    int current_page = old_fullness >> Page::kPageSizeBits;
+    int new_fullness = old_fullness + size;
+    int new_page = new_fullness >> Page::kPageSizeBits;
+    // What is our new position within the current page.
+    int intra_page_offset = new_fullness - current_page * Page::kPageSize;
+    if (intra_page_offset > Page::kPageSize - Page::kObjectStartOffset) {
+      // This object will not fit in a page and we have to move to the next.
+      new_page = current_page + 1;
+      old_fullness = new_page << Page::kPageSizeBits;
+      new_fullness = old_fullness + size;
+      record_page = true;
+    }
+    fullness_[space_index] = new_fullness;
+    Object* new_allocation = space->AllocateRaw(size);
+    new_object = HeapObject::cast(new_allocation);
+    ASSERT(!new_object->IsFailure());
+    ASSERT((reinterpret_cast<intptr_t>(new_object->address()) &
+            Page::kPageAlignmentMask) ==
+           (old_fullness & Page::kPageAlignmentMask) +
+            Page::kObjectStartOffset);
+  } else if (SpaceIsLarge(space_index)) {
+    ASSERT(size > Page::kPageSize - Page::kObjectStartOffset);
+    fullness_[LO_SPACE]++;
+    LargeObjectSpace* lo_space = Heap::lo_space();
+    Object* new_allocation;
+    if (space_index == kLargeData) {
+      new_allocation = lo_space->AllocateRaw(size);
+    } else if (space_index == kLargeFixedArray) {
+      new_allocation = lo_space->AllocateRawFixedArray(size);
+    } else {
+      ASSERT(space_index == kLargeCode);
+      new_allocation = lo_space->AllocateRawCode(size);
+    }
+    ASSERT(!new_allocation->IsFailure());
+    new_object = HeapObject::cast(new_allocation);
+    record_page = true;
+    // The page recording below records all large objects in the same space.
+    space_index = LO_SPACE;
+  } else {
+    ASSERT(space_index == NEW_SPACE);
+    Object* new_allocation = Heap::new_space()->AllocateRaw(size);
+    fullness_[space_index] += size;
+    ASSERT(!new_allocation->IsFailure());
+    new_object = HeapObject::cast(new_allocation);
+  }
+  Address address = new_object->address();
+  if (record_page) {
+    pages_[space_index].Add(address);
+  }
+  return address;
+}
+
+
+// This returns the address of an object that has been described in the
+// snapshot as being offset bytes back in a particular space.
+HeapObject* Deserializer2::GetAddress(int space) {
+  int offset = source_->GetInt();
+  if (SpaceIsLarge(space)) {
+    // Large spaces have one object per 'page'.
+    return HeapObject::FromAddress(
+        pages_[LO_SPACE][fullness_[LO_SPACE] - offset]);
+  }
+  offset <<= kObjectAlignmentBits;
+  if (space == NEW_SPACE) {
+    // New space has only one space - numbered 0.
+    return HeapObject::FromAddress(
+        pages_[space][0] + fullness_[space] - offset);
+  }
+  ASSERT(SpaceIsPaged(space));
+  int virtual_address = fullness_[space] - offset;
+  int page_of_pointee = (virtual_address) >> Page::kPageSizeBits;
+  Address object_address = pages_[space][page_of_pointee] +
+                           (virtual_address & Page::kPageAlignmentMask);
+  return HeapObject::FromAddress(object_address);
+}
+
+
+void Deserializer2::Deserialize() {
+  // Don't GC while deserializing - just expand the heap.
+  AlwaysAllocateScope always_allocate;
+  // Don't use the free lists while deserializing.
+  LinearAllocationScope allocate_linearly;
+  // No active threads.
+  ASSERT_EQ(NULL, ThreadState::FirstInUse());
+  // No active handles.
+  ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
+  ASSERT(external_reference_decoder_ == NULL);
+  external_reference_decoder_ = new ExternalReferenceDecoder();
+  Heap::IterateRoots(this);
+  ASSERT(source_->AtEOF());
+  delete external_reference_decoder_;
+  external_reference_decoder_ = NULL;
+}
+
+
+// This is called on the roots.  It is the driver of the deserialization
+// process.
+void Deserializer2::VisitPointers(Object** start, Object** end) {
+  for (Object** current = start; current < end; current++) {
+    DataType data = static_cast<DataType>(source_->Get());
+    if (data == SMI_SERIALIZATION) {
+      *current = Smi::FromInt(source_->GetInt() - kSmiBias);
+    } else if (data == BACKREF_SERIALIZATION) {
+      int space = source_->Get();
+      *current = GetAddress(space);
+    } else {
+      ASSERT(data == OBJECT_SERIALIZATION);
+      ReadObject(current);
+    }
+  }
+}
+
+
+// This routine writes the new object into the pointer provided and then
+// returns true if the new object was in young space and false otherwise.
+// The reason for this strange interface is that otherwise the object is
+// written very late, which means the ByteArray map is not set up by the
+// time we need to use it to mark the space at the end of a page free (by
+// making it into a byte array).
+bool Deserializer2::ReadObject(Object** write_back) {
+  int space = source_->Get();
+  int size = source_->GetInt() << kObjectAlignmentBits;
+  Address address = Allocate(space, size);
+  *write_back = HeapObject::FromAddress(address);
+  Object** current = reinterpret_cast<Object**>(address);
+  Object** limit = current + (size >> kPointerSizeLog2);
+  while (current < limit) {
+    DataType data = static_cast<DataType>(source_->Get());
+    switch (data) {
+      case SMI_SERIALIZATION:
+        *current++ = Smi::FromInt(source_->GetInt() - kSmiBias);
+        break;
+      case RAW_DATA_SERIALIZATION: {
+        int size = source_->GetInt();
+        byte* raw_data_out = reinterpret_cast<byte*>(current);
+        for (int j = 0; j < size; j++) {
+          *raw_data_out++ = source_->Get();
+        }
+        current = reinterpret_cast<Object**>(raw_data_out);
+        break;
+      }
+      case OBJECT_SERIALIZATION: {
+        // Recurse to unpack an object that is forward-referenced from here.
+        bool in_new_space = ReadObject(current);
+        if (in_new_space && space != NEW_SPACE) {
+          Heap::RecordWrite(address,
+                            reinterpret_cast<Address>(current) - address);
+        }
+        current++;
+        break;
+      }
+      case CODE_OBJECT_SERIALIZATION: {
+        Object* new_code_object = NULL;
+        ReadObject(&new_code_object);
+        Code* code_object = reinterpret_cast<Code*>(new_code_object);
+        // Setting a branch/call to another code object from code.
+        Address location_of_branch_data = reinterpret_cast<Address>(current);
+        Assembler::set_target_at(location_of_branch_data,
+                                 code_object->instruction_start());
+        location_of_branch_data += Assembler::kCallTargetSize;
+        current = reinterpret_cast<Object**>(location_of_branch_data);
+        break;
+      }
+      case BACKREF_SERIALIZATION: {
+        // Write a backreference to an object we unpacked earlier.
+        int backref_space = source_->Get();
+        if (backref_space == NEW_SPACE && space != NEW_SPACE) {
+          Heap::RecordWrite(address,
+                            reinterpret_cast<Address>(current) - address);
+        }
+        *current++ = GetAddress(backref_space);
+        break;
+      }
+      case CODE_BACKREF_SERIALIZATION: {
+        int backref_space = source_->Get();
+        // Can't use Code::cast because heap is not set up yet and assertions
+        // will fail.
+        Code* code_object = reinterpret_cast<Code*>(GetAddress(backref_space));
+        // Setting a branch/call to previously decoded code object from code.
+        Address location_of_branch_data = reinterpret_cast<Address>(current);
+        Assembler::set_target_at(location_of_branch_data,
+                                 code_object->instruction_start());
+        location_of_branch_data += Assembler::kCallTargetSize;
+        current = reinterpret_cast<Object**>(location_of_branch_data);
+        break;
+        }
+      case EXTERNAL_REFERENCE_SERIALIZATION: {
+        int reference_id = source_->GetInt();
+        Address address = external_reference_decoder_->Decode(reference_id);
+        *current++ = reinterpret_cast<Object*>(address);
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+  }
+  ASSERT(current == limit);
+  return space == NEW_SPACE;
+}
+
+
+void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
+  const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7;
+  for (int shift = max_shift; shift > 0; shift -= 7) {
+    if (integer >= 1u << shift) {
+      Put(((integer >> shift) & 0x7f) | 0x80, "intpart");
+    }
+  }
+  Put(integer & 0x7f, "intlastpart");
+}
+
+#ifdef DEBUG
+
+void Deserializer2::Synchronize(const char* tag) {
+  int data = source_->Get();
+  // If this assert fails then that indicates that you have a mismatch between
+  // the number of GC roots when serializing and deserializing.
+  ASSERT(data == SYNCHRONIZE);
+  do {
+    int character = source_->Get();
+    if (character == 0) break;
+    if (FLAG_debug_serialization) {
+      PrintF("%c", character);
+    }
+  } while (true);
+  if (FLAG_debug_serialization) {
+    PrintF("\n");
+  }
+}
+
+
+void Serializer2::Synchronize(const char* tag) {
+  sink_->Put(SYNCHRONIZE, tag);
+  int character;
+  do {
+    character = *tag++;
+    sink_->Put(character, "tagcharacter");
+  } while (character != 0);
+}
+
+#endif
+
+Serializer2::Serializer2(SnapshotByteSink* sink)
+    : sink_(sink),
+      current_root_index_(0),
+      external_reference_encoder_(NULL) {
+  for (int i = 0; i <= LAST_SPACE; i++) {
+    fullness_[i] = 0;
+  }
+}
+
+
+void Serializer2::Serialize() {
+  // No active threads.
+  CHECK_EQ(NULL, ThreadState::FirstInUse());
+  // No active or weak handles.
+  CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
+  CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
+  ASSERT(external_reference_encoder_ == NULL);
+  external_reference_encoder_ = new ExternalReferenceEncoder();
+  Heap::IterateRoots(this);
+  delete external_reference_encoder_;
+  external_reference_encoder_ = NULL;
+}
+
+
+void Serializer2::VisitPointers(Object** start, Object** end) {
+  for (Object** current = start; current < end; current++) {
+    SerializeObject(*current, TAGGED_REPRESENTATION);
+  }
+}
+
+
+void Serializer2::SerializeObject(
+    Object* o,
+    ReferenceRepresentation reference_representation) {
+  if (o->IsHeapObject()) {
+    HeapObject* heap_object = HeapObject::cast(o);
+    MapWord map_word = heap_object->map_word();
+    if (map_word.IsSerializationAddress()) {
+      int space = SpaceOfAlreadySerializedObject(heap_object);
+      int offset =
+          CurrentAllocationAddress(space) - map_word.ToSerializationAddress();
+      // If we are actually dealing with real offsets (and not a numbering of
+      // all objects) then we should shift out the bits that are always 0.
+      if (!SpaceIsLarge(space)) offset >>= kObjectAlignmentBits;
+      if (reference_representation == CODE_TARGET_REPRESENTATION) {
+        sink_->Put(CODE_BACKREF_SERIALIZATION, "BackRefCodeSerialization");
+      } else {
+        ASSERT(reference_representation == TAGGED_REPRESENTATION);
+        sink_->Put(BACKREF_SERIALIZATION, "BackRefSerialization");
+      }
+      sink_->Put(space, "space");
+      sink_->PutInt(offset, "offset");
+    } else {
+      // Object has not yet been serialized.  Serialize it here.
+      ObjectSerializer serializer(this,
+                                  heap_object,
+                                  sink_,
+                                  reference_representation);
+      serializer.Serialize();
+    }
+  } else {
+    // Serialize a Smi.
+    unsigned int value = Smi::cast(o)->value() + kSmiBias;
+    sink_->Put(SMI_SERIALIZATION, "SmiSerialization");
+    sink_->PutInt(value, "smi");
+  }
+}
+
+
+void Serializer2::ObjectSerializer::Serialize() {
+  int space = Serializer2::SpaceOfObject(object_);
+  int size = object_->Size();
+
+  if (reference_representation_ == TAGGED_REPRESENTATION) {
+    sink_->Put(OBJECT_SERIALIZATION, "ObjectSerialization");
+  } else {
+    ASSERT(reference_representation_ == CODE_TARGET_REPRESENTATION);
+    sink_->Put(CODE_OBJECT_SERIALIZATION, "ObjectSerialization");
+  }
+  sink_->Put(space, "space");
+  sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
+
+  // Get the map before overwriting it.
+  Map* map = object_->map();
+  // Mark this object as already serialized.
+  object_->set_map_word(
+      MapWord::FromSerializationAddress(serializer_->Allocate(space, size)));
+
+  // Serialize the map (first word of the object).
+  serializer_->SerializeObject(map, TAGGED_REPRESENTATION);
+
+  // Serialize the rest of the object.
+  ASSERT(bytes_processed_so_far_ == 0);
+  bytes_processed_so_far_ = kPointerSize;
+  object_->IterateBody(map->instance_type(), size, this);
+  OutputRawData(object_->address() + size);
+}
+
+
+void Serializer2::ObjectSerializer::VisitPointers(Object** start,
+                                                  Object** end) {
+  Address pointers_start = reinterpret_cast<Address>(start);
+  OutputRawData(pointers_start);
+
+  for (Object** current = start; current < end; current++) {
+    serializer_->SerializeObject(*current, TAGGED_REPRESENTATION);
+  }
+  bytes_processed_so_far_ += (end - start) * kPointerSize;
+}
+
+
+void Serializer2::ObjectSerializer::VisitExternalReferences(Address* start,
+                                                            Address* end) {
+  Address references_start = reinterpret_cast<Address>(start);
+  OutputRawData(references_start);
+
+  for (Address* current = start; current < end; current++) {
+    sink_->Put(EXTERNAL_REFERENCE_SERIALIZATION, "External reference");
+    int reference_id = serializer_->EncodeExternalReference(*current);
+    sink_->PutInt(reference_id, "reference id");
+  }
+  bytes_processed_so_far_ += (end - start) * kPointerSize;
+}
+
+
+void Serializer2::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
+  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+  Address target_start = rinfo->target_address_address();
+  OutputRawData(target_start);
+  Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+  serializer_->SerializeObject(target, CODE_TARGET_REPRESENTATION);
+  bytes_processed_so_far_ += Assembler::kCallTargetSize;
+}
+
+
+void Serializer2::ObjectSerializer::OutputRawData(Address up_to) {
+  Address object_start = object_->address();
+  int up_to_offset = up_to - object_start;
+  int skipped = up_to_offset - bytes_processed_so_far_;
+  // This assert will fail if the reloc info gives us the target_address_address
+  // locations in a non-ascending order.  Luckily that doesn't happen.
+  ASSERT(skipped >= 0);
+  if (skipped != 0) {
+    sink_->Put(RAW_DATA_SERIALIZATION, "raw data");
+    sink_->PutInt(skipped, "length");
+    for (int i = 0; i < skipped; i++) {
+      unsigned int data = object_start[bytes_processed_so_far_ + i];
+      sink_->Put(data, "byte");
+    }
+  }
+  bytes_processed_so_far_ += skipped;
+}
+
+
+int Serializer2::SpaceOfObject(HeapObject* object) {
+  for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
+    AllocationSpace s = static_cast<AllocationSpace>(i);
+    if (Heap::InSpace(object, s)) {
+      if (i == LO_SPACE) {
+        if (object->IsCode()) {
+          return kLargeCode;
+        } else if (object->IsFixedArray()) {
+          return kLargeFixedArray;
+        } else {
+          return kLargeData;
+        }
+      }
+      return i;
+    }
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
+int Serializer2::SpaceOfAlreadySerializedObject(HeapObject* object) {
+  for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
+    AllocationSpace s = static_cast<AllocationSpace>(i);
+    if (Heap::InSpace(object, s)) {
+      return i;
+    }
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
+int Serializer2::Allocate(int space, int size) {
+  ASSERT(space >= 0 && space < kNumberOfSpaces);
+  if (SpaceIsLarge(space)) {
+    // In large object space we merely number the objects instead of trying to
+    // determine some sort of address.
+    return fullness_[LO_SPACE]++;
+  }
+  if (SpaceIsPaged(space)) {
+    // Paged spaces are a little special.  We encode their addresses as if the
+    // pages were all contiguous and each page were filled up in the range
+    // 0 - Page::kObjectAreaSize.  In practice the pages may not be contiguous
+    // and allocation does not start at offset 0 in the page, but this scheme
+    // means the deserializer can get the page number quickly by shifting the
+    // serialized address.
+    ASSERT(IsPowerOf2(Page::kPageSize));
+    int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1));
+    ASSERT(size <= Page::kObjectAreaSize);
+    if (used_in_this_page + size > Page::kObjectAreaSize) {
+      fullness_[space] = RoundUp(fullness_[space], Page::kPageSize);
+    }
+  }
+  int allocation_address = fullness_[space];
+  fullness_[space] = allocation_address + size;
+  return allocation_address;
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/serialize.h b/src/serialize.h
index c901480..cefff78 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -262,7 +262,18 @@
 
 // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
 
-class Deserializer: public ObjectVisitor {
+
+// TODO(erikcorry): Get rid of this superclass when we are using the new
+// snapshot code exclusively.
+class GenericDeserializer: public ObjectVisitor {
+ public:
+  virtual void GetLog() = 0;
+  virtual void Deserialize() = 0;
+};
+
+
+// TODO(erikcorry): Get rid of this class.
+class Deserializer: public GenericDeserializer {
  public:
   // Create a deserializer. The snapshot is held in str and has size len.
   Deserializer(const byte* str, int len);
@@ -339,6 +350,223 @@
   DISALLOW_COPY_AND_ASSIGN(Deserializer);
 };
 
+
+class SnapshotByteSource {
+ public:
+  SnapshotByteSource(const byte* array, int length)
+    : data_(array), length_(length), position_(0) { }
+
+  bool HasMore() { return position_ < length_; }
+
+  int Get() {
+    ASSERT(position_ < length_);
+    return data_[position_++];
+  }
+
+  int GetInt() {
+    // A little unwind to catch the really small ints.
+    int snapshot_byte = Get();
+    if ((snapshot_byte & 0x80) == 0) {
+      return snapshot_byte;
+    }
+    uintptr_t accumulator = (snapshot_byte & 0x7f) << 7;
+    while (true) {
+      snapshot_byte = Get();
+      if ((snapshot_byte & 0x80) == 0) {
+        return accumulator | snapshot_byte;
+      }
+      accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7;
+    }
+    UNREACHABLE();
+    return accumulator;
+  }
+
+  bool AtEOF() {
+    return position_ == length_;
+  }
+
+ private:
+  const byte* data_;
+  int length_;
+  int position_;
+};
+
+
+// The SerDes class is a common superclass for Serializer2 and Deserializer2
+// which is used to store common constants and methods used by both.
+// TODO(erikcorry): This should inherit from ObjectVisitor.
+class SerDes: public GenericDeserializer {
+ protected:
+  enum DataType {
+    SMI_SERIALIZATION,
+    RAW_DATA_SERIALIZATION,
+    OBJECT_SERIALIZATION,
+    CODE_OBJECT_SERIALIZATION,
+    BACKREF_SERIALIZATION,
+    CODE_BACKREF_SERIALIZATION,
+    EXTERNAL_REFERENCE_SERIALIZATION,
+    SYNCHRONIZE
+  };
+  // Our Smi encoding is much more efficient for small positive integers than it
+  // is for negative numbers so we add a bias before encoding and subtract it
+  // after encoding so that popular small negative Smis are efficiently encoded.
+  static const int kSmiBias = 16;
+  static const int kLargeData = LAST_SPACE;
+  static const int kLargeCode = kLargeData + 1;
+  static const int kLargeFixedArray = kLargeCode + 1;
+  static const int kNumberOfSpaces = kLargeFixedArray + 1;
+
+  static inline bool SpaceIsLarge(int space) { return space >= kLargeData; }
+  static inline bool SpaceIsPaged(int space) {
+    return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
+  }
+};
+
+
+
+// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
+class Deserializer2: public SerDes {
+ public:
+  // Create a deserializer from a snapshot byte source.
+  explicit Deserializer2(SnapshotByteSource* source);
+
+  virtual ~Deserializer2() { }
+
+  // Deserialize the snapshot into an empty heap.
+  void Deserialize();
+  void GetLog() { }   // TODO(erikcorry): Get rid of this.
+#ifdef DEBUG
+  virtual void Synchronize(const char* tag);
+#endif
+
+ private:
+  virtual void VisitPointers(Object** start, Object** end);
+
+  virtual void VisitExternalReferences(Address* start, Address* end) {
+    UNREACHABLE();
+  }
+
+  virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
+    UNREACHABLE();
+  }
+
+  int CurrentAllocationAddress(int space) {
+    // The three different kinds of large objects have different tags in the
+    // snapshot so the deserializer knows which kind of object to allocate,
+    // but they share a fullness_ entry.
+    if (SpaceIsLarge(space)) space = LO_SPACE;
+    return fullness_[space];
+  }
+
+  HeapObject* GetAddress(int space);
+  Address Allocate(int space, int size);
+  bool ReadObject(Object** write_back);
+
+  // Keep track of the pages in the paged spaces.
+  // (In large object space we are keeping track of individual objects
+  // rather than pages.)  In new space we just need the address of the
+  // first object and the others will flow from that.
+  List<Address> pages_[SerDes::kNumberOfSpaces];
+
+  SnapshotByteSource* source_;
+  ExternalReferenceDecoder* external_reference_decoder_;
+  // Keep track of the fullness of each space in order to generate
+  // relative addresses for back references.  Large objects are
+  // just numbered sequentially since relative addresses make no
+  // sense in large object space.
+  int fullness_[LAST_SPACE + 1];
+
+  DISALLOW_COPY_AND_ASSIGN(Deserializer2);
+};
+
+
+class SnapshotByteSink {
+ public:
+  virtual ~SnapshotByteSink() { }
+  virtual void Put(int byte, const char* description) = 0;
+  void PutInt(uintptr_t integer, const char* description);
+};
+
+
+class Serializer2 : public SerDes {
+ public:
+  explicit Serializer2(SnapshotByteSink* sink);
+  // Serialize the current state of the heap. This operation destroys the
+  // heap contents.
+  void Serialize();
+  void VisitPointers(Object** start, Object** end);
+  void GetLog() { }       // TODO(erikcorry): Get rid of this.
+  void Deserialize() { }  // TODO(erikcorry): Get rid of this.
+#ifdef DEBUG
+  virtual void Synchronize(const char* tag);
+#endif
+
+ private:
+  enum ReferenceRepresentation {
+    TAGGED_REPRESENTATION,      // A tagged object reference.
+    CODE_TARGET_REPRESENTATION  // A reference to first instruction in target.
+  };
+  class ObjectSerializer : public ObjectVisitor {
+   public:
+    ObjectSerializer(Serializer2* serializer,
+                     Object* o,
+                     SnapshotByteSink* sink,
+                     ReferenceRepresentation representation)
+      : serializer_(serializer),
+        object_(HeapObject::cast(o)),
+        sink_(sink),
+        reference_representation_(representation),
+        bytes_processed_so_far_(0) { }
+    void Serialize();
+    void VisitPointers(Object** start, Object** end);
+    void VisitExternalReferences(Address* start, Address* end);
+    void VisitCodeTarget(RelocInfo* target);
+
+   private:
+    void OutputRawData(Address up_to);
+
+    Serializer2* serializer_;
+    HeapObject* object_;
+    SnapshotByteSink* sink_;
+    ReferenceRepresentation reference_representation_;
+    int bytes_processed_so_far_;
+  };
+
+  void SerializeObject(Object* o, ReferenceRepresentation representation);
+  void InitializeAllocators();
+  // This will return the space for an object.  If the object is in large
+  // object space it may return kLargeCode or kLargeFixedArray in order
+  // to indicate to the deserializer what kind of large object allocation
+  // to make.
+  static int SpaceOfObject(HeapObject* object);
+  // This just returns the space of the object.  It will return LO_SPACE
+  // for all large objects since you can't check the type of the object
+  // once the map has been used for the serialization address.
+  static int SpaceOfAlreadySerializedObject(HeapObject* object);
+  int Allocate(int space, int size);
+  int CurrentAllocationAddress(int space) {
+    if (SpaceIsLarge(space)) space = LO_SPACE;
+    return fullness_[space];
+  }
+  int EncodeExternalReference(Address addr) {
+    return external_reference_encoder_->Encode(addr);
+  }
+
+  // Keep track of the fullness of each space in order to generate
+  // relative addresses for back references.  Large objects are
+  // just numbered sequentially since relative addresses make no
+  // sense in large object space.
+  int fullness_[LAST_SPACE + 1];
+  SnapshotByteSink* sink_;
+  int current_root_index_;
+  ExternalReferenceEncoder* external_reference_encoder_;
+
+  friend class ObjectSerializer;
+  friend class Deserializer2;
+
+  DISALLOW_COPY_AND_ASSIGN(Serializer2);
+};
+
 } }  // namespace v8::internal
 
 #endif  // V8_SERIALIZE_H_
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index 9c66a50..b258a15 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -32,6 +32,7 @@
 #include "api.h"
 #include "serialize.h"
 #include "snapshot.h"
+#include "platform.h"
 
 namespace v8 {
 namespace internal {
@@ -43,6 +44,13 @@
 }
 
 
+bool Snapshot::Deserialize2(const byte* content, int len) {
+  SnapshotByteSource source(content, len);
+  Deserializer2 deserializer(&source);
+  return V8::Initialize(&deserializer);
+}
+
+
 bool Snapshot::Initialize(const char* snapshot_file) {
   if (snapshot_file) {
     int len;
@@ -58,6 +66,20 @@
 }
 
 
+bool Snapshot::Initialize2(const char* snapshot_file) {
+  if (snapshot_file) {
+    int len;
+    byte* str = ReadBytes(snapshot_file, &len);
+    if (!str) return false;
+    Deserialize2(str, len);
+    DeleteArray(str);
+  } else if (size_ > 0) {
+    Deserialize2(data_, size_);
+  }
+  return true;
+}
+
+
 bool Snapshot::WriteToFile(const char* snapshot_file) {
   Serializer ser;
   ser.Serialize();
@@ -72,4 +94,38 @@
 }
 
 
+class FileByteSink : public SnapshotByteSink {
+ public:
+  explicit FileByteSink(const char* snapshot_file) {
+    fp_ = OS::FOpen(snapshot_file, "wb");
+    if (fp_ == NULL) {
+      PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+      exit(1);
+    }
+  }
+  virtual ~FileByteSink() {
+    if (fp_ != NULL) {
+      fclose(fp_);
+    }
+  }
+  virtual void Put(int byte, const char* description) {
+    if (fp_ != NULL) {
+      fputc(byte, fp_);
+    }
+  }
+
+ private:
+  FILE* fp_;
+};
+
+
+bool Snapshot::WriteToFile2(const char* snapshot_file) {
+  FileByteSink file(snapshot_file);
+  Serializer2 ser(&file);
+  ser.Serialize();
+  return true;
+}
+
+
+
 } }  // namespace v8::internal
diff --git a/src/snapshot.h b/src/snapshot.h
index 88ba8db..a3a3867 100644
--- a/src/snapshot.h
+++ b/src/snapshot.h
@@ -37,6 +37,7 @@
   // NULL, use the internal snapshot instead. Returns false if no snapshot
   // could be found.
   static bool Initialize(const char* snapshot_file = NULL);
+  static bool Initialize2(const char* snapshot_file = NULL);
 
   // Returns whether or not the snapshot is enabled.
   static bool IsEnabled() { return size_ != 0; }
@@ -44,12 +45,14 @@
   // Write snapshot to the given file. Returns true if snapshot was written
   // successfully.
   static bool WriteToFile(const char* snapshot_file);
+  static bool WriteToFile2(const char* snapshot_file);
 
  private:
   static const byte data_[];
   static int size_;
 
   static bool Deserialize(const byte* content, int len);
+  static bool Deserialize2(const byte* content, int len);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
 };
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index da72497..847bb9a 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -360,6 +360,13 @@
   return obj;
 }
 
+
+bool FreeListNode::IsFreeListNode(HeapObject* object) {
+  return object->map() == Heap::raw_unchecked_byte_array_map()
+      || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
+      || object->map() == Heap::raw_unchecked_two_pointer_filler_map();
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_SPACES_INL_H_
diff --git a/src/spaces.cc b/src/spaces.cc
index 43abaa4..c69579a 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -982,7 +982,7 @@
   // To support fast containment testing in the new space, the size of
   // this chunk must be a power of two and it must be aligned to its size.
   int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
-  int maximum_semispace_capacity = Heap::SemiSpaceSize();
+  int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
 
   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
   ASSERT(IsPowerOf2(maximum_semispace_capacity));
@@ -998,7 +998,7 @@
 #undef SET_NAME
 #endif
 
-  ASSERT(size == 2 * maximum_semispace_capacity);
+  ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
   ASSERT(IsAddressAligned(start, size, 0));
 
   if (!to_space_.Setup(start,
@@ -1527,7 +1527,9 @@
   // correct size.
   if (size_in_bytes > ByteArray::kAlignedSize) {
     set_map(Heap::raw_unchecked_byte_array_map());
-    ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes));
+    // Can't use ByteArray::cast because it fails during deserialization.
+    ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
+    this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
   } else if (size_in_bytes == kPointerSize) {
     set_map(Heap::raw_unchecked_one_pointer_filler_map());
   } else if (size_in_bytes == 2 * kPointerSize) {
@@ -1535,13 +1537,13 @@
   } else {
     UNREACHABLE();
   }
-  ASSERT(Size() == size_in_bytes);
+  // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
+  // deserialization because the byte array map is not done yet.
 }
 
 
 Address FreeListNode::next() {
-  ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
-         map() == Heap::raw_unchecked_two_pointer_filler_map());
+  ASSERT(IsFreeListNode(this));
   if (map() == Heap::raw_unchecked_byte_array_map()) {
     ASSERT(Size() >= kNextOffset + kPointerSize);
     return Memory::Address_at(address() + kNextOffset);
@@ -1552,8 +1554,7 @@
 
 
 void FreeListNode::set_next(Address next) {
-  ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
-         map() == Heap::raw_unchecked_two_pointer_filler_map());
+  ASSERT(IsFreeListNode(this));
   if (map() == Heap::raw_unchecked_byte_array_map()) {
     ASSERT(Size() >= kNextOffset + kPointerSize);
     Memory::Address_at(address() + kNextOffset) = next;
@@ -1830,13 +1831,16 @@
     return AllocateInNextPage(current_page, size_in_bytes);
   }
 
-  // There is no next page in this space.  Try free list allocation.
-  int wasted_bytes;
-  Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
-  accounting_stats_.WasteBytes(wasted_bytes);
-  if (!result->IsFailure()) {
-    accounting_stats_.AllocateBytes(size_in_bytes);
-    return HeapObject::cast(result);
+  // There is no next page in this space.  Try free list allocation unless that
+  // is currently forbidden.
+  if (!Heap::linear_allocation()) {
+    int wasted_bytes;
+    Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
+    accounting_stats_.WasteBytes(wasted_bytes);
+    if (!result->IsFailure()) {
+      accounting_stats_.AllocateBytes(size_in_bytes);
+      return HeapObject::cast(result);
+    }
   }
 
   // Free list allocation failed and there is no next page.  Fail if we have
@@ -2232,10 +2236,10 @@
     return AllocateInNextPage(current_page, size_in_bytes);
   }
 
-  // There is no next page in this space.  Try free list allocation.
-  // The fixed space free list implicitly assumes that all free blocks
-  // are of the fixed size.
-  if (size_in_bytes == object_size_in_bytes_) {
+  // There is no next page in this space.  Try free list allocation unless
+  // that is currently forbidden.  The fixed space free list implicitly assumes
+  // that all free blocks are of the fixed size.
+  if (!Heap::linear_allocation()) {
     Object* result = free_list_.Allocate();
     if (!result->IsFailure()) {
       accounting_stats_.AllocateBytes(size_in_bytes);
diff --git a/src/spaces.h b/src/spaces.h
index 76b88ef..9e1d873 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -862,6 +862,10 @@
   // Current capacity without growing (Size() + Available() + Waste()).
   int Capacity() { return accounting_stats_.Capacity(); }
 
+  // Total amount of memory committed for this space.  For paged
+  // spaces this equals the capacity.
+  int CommittedMemory() { return Capacity(); }
+
   // Available bytes without growing.
   int Available() { return accounting_stats_.Available(); }
 
@@ -1252,11 +1256,19 @@
 
   // Return the allocated bytes in the active semispace.
   virtual int Size() { return top() - bottom(); }
+
   // Return the current capacity of a semispace.
   int Capacity() {
     ASSERT(to_space_.Capacity() == from_space_.Capacity());
     return to_space_.Capacity();
   }
+
+  // Return the total amount of memory committed for new space.
+  int CommittedMemory() {
+    if (from_space_.is_committed()) return 2 * Capacity();
+    return Capacity();
+  }
+
   // Return the available bytes without growing in the active semispace.
   int Available() { return Capacity() - Size(); }
 
@@ -1423,6 +1435,8 @@
     return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
   }
 
+  static inline bool IsFreeListNode(HeapObject* object);
+
   // Set the size in bytes, which can be read with HeapObject::Size().  This
   // function also writes a map to the first word of the block so that it
   // looks like a heap object to the garbage collector and heap iteration
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 8c62a45..eb5d1e3 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -188,7 +188,7 @@
 void StringStream::PrintObject(Object* o) {
   o->ShortPrint(this);
   if (o->IsString()) {
-    if (String::cast(o)->length() <= String::kMaxMediumStringSize) {
+    if (String::cast(o)->length() <= String::kMaxMediumSize) {
       return;
     }
   } else if (o->IsNumber() || o->IsOddball()) {
diff --git a/src/string.js b/src/string.js
index d2d6e96..bb2ad4f 100644
--- a/src/string.js
+++ b/src/string.js
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -810,10 +810,13 @@
   var len = end - start;
   if (len == 0) return;
   var elements = this.elements;
-  if (start >= 0 && len >= 0 && start < 0x80000 && len < 0x800) {
+  if (start < 0x80000 && len < 0x800) {
     elements[elements.length] = (start << 11) + len;
   } else {
-    elements[elements.length] = SubString(this.special_string, start, end);
+    // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength,
+    // so -len is a smi.
+    elements[elements.length] = -len;
+    elements[elements.length] = start;
   }
 }
 
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index e10dc61..a399e45 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -735,11 +735,16 @@
 
 
 Object* LoadCallbackProperty(Arguments args) {
+  ASSERT(args[0]->IsJSObject());
+  ASSERT(args[1]->IsJSObject());
   AccessorInfo* callback = AccessorInfo::cast(args[2]);
   Address getter_address = v8::ToCData<Address>(callback->getter());
   v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
   ASSERT(fun != NULL);
-  v8::AccessorInfo info(args.arguments());
+  CustomArguments custom_args(callback->data(),
+                              JSObject::cast(args[0]),
+                              JSObject::cast(args[1]));
+  v8::AccessorInfo info(custom_args.end());
   HandleScope scope;
   v8::Handle<v8::Value> result;
   {
diff --git a/src/third_party/valgrind/valgrind.h b/src/third_party/valgrind/valgrind.h
index 47f369b..a94dc58 100644
--- a/src/third_party/valgrind/valgrind.h
+++ b/src/third_party/valgrind/valgrind.h
@@ -74,6 +74,7 @@
 #define __VALGRIND_H
 
 #include <stdarg.h>
+#include <stdint.h>
 
 /* Nb: this file might be included in a file compiled with -ansi.  So
    we can't use C++ style "//" comments nor the "asm" keyword (instead
@@ -232,7 +233,7 @@
 
 typedef
    struct { 
-      unsigned long long int nraddr; /* where's the code? */
+      uint64_t nraddr; /* where's the code? */
    }
    OrigFn;
 
@@ -243,14 +244,14 @@
 #define VALGRIND_DO_CLIENT_REQUEST(                               \
         _zzq_rlval, _zzq_default, _zzq_request,                   \
         _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
-  { volatile unsigned long long int _zzq_args[6];                 \
-    volatile unsigned long long int _zzq_result;                  \
-    _zzq_args[0] = (unsigned long long int)(_zzq_request);        \
-    _zzq_args[1] = (unsigned long long int)(_zzq_arg1);           \
-    _zzq_args[2] = (unsigned long long int)(_zzq_arg2);           \
-    _zzq_args[3] = (unsigned long long int)(_zzq_arg3);           \
-    _zzq_args[4] = (unsigned long long int)(_zzq_arg4);           \
-    _zzq_args[5] = (unsigned long long int)(_zzq_arg5);           \
+  { volatile uint64_t _zzq_args[6];                 \
+    volatile uint64_t _zzq_result;                  \
+    _zzq_args[0] = (uint64_t)(_zzq_request);        \
+    _zzq_args[1] = (uint64_t)(_zzq_arg1);           \
+    _zzq_args[2] = (uint64_t)(_zzq_arg2);           \
+    _zzq_args[3] = (uint64_t)(_zzq_arg3);           \
+    _zzq_args[4] = (uint64_t)(_zzq_arg4);           \
+    _zzq_args[5] = (uint64_t)(_zzq_arg5);           \
     __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
                      /* %RDX = client_request ( %RAX ) */         \
                      "xchgq %%rbx,%%rbx"                          \
@@ -263,7 +264,7 @@
 
 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
   { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
-    volatile unsigned long long int __addr;                       \
+    volatile uint64_t __addr;                       \
     __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
                      /* %RAX = guest_NRADDR */                    \
                      "xchgq %%rcx,%%rcx"                          \
@@ -346,8 +347,8 @@
 
 typedef
    struct { 
-      unsigned long long int nraddr; /* where's the code? */
-      unsigned long long int r2;  /* what tocptr do we need? */
+      uint64_t nraddr; /* where's the code? */
+      uint64_t r2;  /* what tocptr do we need? */
    }
    OrigFn;
 
@@ -359,15 +360,15 @@
         _zzq_rlval, _zzq_default, _zzq_request,                   \
         _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
                                                                   \
-  {          unsigned long long int  _zzq_args[6];                \
-    register unsigned long long int  _zzq_result __asm__("r3");   \
-    register unsigned long long int* _zzq_ptr __asm__("r4");      \
-    _zzq_args[0] = (unsigned long long int)(_zzq_request);        \
-    _zzq_args[1] = (unsigned long long int)(_zzq_arg1);           \
-    _zzq_args[2] = (unsigned long long int)(_zzq_arg2);           \
-    _zzq_args[3] = (unsigned long long int)(_zzq_arg3);           \
-    _zzq_args[4] = (unsigned long long int)(_zzq_arg4);           \
-    _zzq_args[5] = (unsigned long long int)(_zzq_arg5);           \
+  {          uint64_t  _zzq_args[6];                \
+    register uint64_t  _zzq_result __asm__("r3");   \
+    register uint64_t* _zzq_ptr __asm__("r4");      \
+    _zzq_args[0] = (uint64_t)(_zzq_request);        \
+    _zzq_args[1] = (uint64_t)(_zzq_arg1);           \
+    _zzq_args[2] = (uint64_t)(_zzq_arg2);           \
+    _zzq_args[3] = (uint64_t)(_zzq_arg3);           \
+    _zzq_args[4] = (uint64_t)(_zzq_arg4);           \
+    _zzq_args[5] = (uint64_t)(_zzq_arg5);           \
     _zzq_ptr = _zzq_args;                                         \
     __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
                      /* %R3 = client_request ( %R4 ) */           \
@@ -380,7 +381,7 @@
 
 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
   { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
-    register unsigned long long int __addr __asm__("r3");         \
+    register uint64_t __addr __asm__("r3");         \
     __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
                      /* %R3 = guest_NRADDR */                     \
                      "or 2,2,2"                                   \
@@ -484,8 +485,8 @@
 
 typedef
    struct { 
-      unsigned long long int nraddr; /* where's the code? */
-      unsigned long long int r2;  /* what tocptr do we need? */
+      uint64_t nraddr; /* where's the code? */
+      uint64_t r2;  /* what tocptr do we need? */
    }
    OrigFn;
 
@@ -497,9 +498,9 @@
         _zzq_rlval, _zzq_default, _zzq_request,                   \
         _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5)    \
                                                                   \
-  {          unsigned long long int  _zzq_args[7];                \
-    register unsigned long long int  _zzq_result;                 \
-    register unsigned long long int* _zzq_ptr;                    \
+  {          uint64_t  _zzq_args[7];                \
+    register uint64_t  _zzq_result;                 \
+    register uint64_t* _zzq_ptr;                    \
     _zzq_args[0] = (unsigned int long long)(_zzq_request);        \
     _zzq_args[1] = (unsigned int long long)(_zzq_arg1);           \
     _zzq_args[2] = (unsigned int long long)(_zzq_arg2);           \
@@ -522,7 +523,7 @@
 
 #define VALGRIND_GET_NR_CONTEXT(_zzq_rlval)                       \
   { volatile OrigFn* _zzq_orig = &(_zzq_rlval);                   \
-    register unsigned long long int __addr;                       \
+    register uint64_t __addr;                       \
     __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE               \
                      /* %R3 = guest_NRADDR */                     \
                      "or 2,2,2\n\t"                               \
diff --git a/src/top.h b/src/top.h
index ae94f08..0f5aa27 100644
--- a/src/top.h
+++ b/src/top.h
@@ -170,6 +170,10 @@
     return &thread_local_.external_caught_exception_;
   }
 
+  static Object** scheduled_exception_address() {
+    return &thread_local_.scheduled_exception_;
+  }
+
   static Object* scheduled_exception() {
     ASSERT(has_scheduled_exception());
     return thread_local_.scheduled_exception_;
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 84f9ee4..b3f29f5 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -118,6 +118,7 @@
   SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                \
   SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)          \
   SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow)              \
+  SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
   /* Count how much the monomorphic keyed-load stubs are hit. */    \
   SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype)  \
   SC(keyed_load_string_length, V8.KeyedLoadStringLength)            \
diff --git a/src/v8.cc b/src/v8.cc
index 3c70ee9..fe21b3b 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -45,7 +45,7 @@
 bool V8::has_been_disposed_ = false;
 bool V8::has_fatal_error_ = false;
 
-bool V8::Initialize(Deserializer *des) {
+bool V8::Initialize(GenericDeserializer *des) {
   bool create_heap_objects = des == NULL;
   if (has_been_disposed_ || has_fatal_error_) return false;
   if (IsRunning()) return true;
diff --git a/src/v8.h b/src/v8.h
index 106ae61..6c5546c 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -80,7 +80,7 @@
   // created from scratch. If a non-null Deserializer is given, the
   // initial state is created by reading the deserialized data into an
   // empty heap.
-  static bool Initialize(Deserializer* des);
+  static bool Initialize(GenericDeserializer* des);
   static void TearDown();
   static bool IsRunning() { return is_running_; }
   // To be dead you have to have lived
diff --git a/src/version.cc b/src/version.cc
index 4a63127..7b8986c 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     3
-#define BUILD_NUMBER      16
-#define PATCH_LEVEL       2
+#define BUILD_NUMBER      17
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 3f3d34e..61e8753 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -393,7 +393,7 @@
   // Some internal data structures overflow for very large buffers,
   // they must ensure that kMaximalBufferSize is not too large.
   if ((desc.buffer_size > kMaximalBufferSize) ||
-      (desc.buffer_size > Heap::OldGenerationSize())) {
+      (desc.buffer_size > Heap::MaxOldGenerationSize())) {
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
@@ -574,11 +574,11 @@
     emit(src.value_);
   } else if (dst.is(rax)) {
     emit(0x05 | (subcode << 3));
-    emitl(src.value_);
+    emitw(src.value_);
   } else {
     emit(0x81);
     emit_modrm(subcode, dst);
-    emitl(src.value_);
+    emitw(src.value_);
   }
 }
 
@@ -597,7 +597,7 @@
   } else {
     emit(0x81);
     emit_operand(subcode, dst);
-    emitl(src.value_);
+    emitw(src.value_);
   }
 }
 
@@ -1255,6 +1255,15 @@
   emit_operand(src, dst);
 }
 
+void Assembler::movw(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(src, dst);
+  emit(0x89);
+  emit_operand(src, dst);
+}
+
 void Assembler::movl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1439,6 +1448,26 @@
 }
 
 
+void Assembler::movsxbq(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xBE);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movsxwq(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0xBF);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::movsxlq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1477,6 +1506,16 @@
 }
 
 
+void Assembler::movzxwq(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0xB7);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::movzxwl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1970,6 +2009,14 @@
 }
 
 
+void Assembler::fstp(int index) {
+  ASSERT(is_uint3(index));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDD, 0xD8, index);
+}
+
+
 void Assembler::fild_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2021,7 +2068,7 @@
   last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDF);
-  emit_operand(8, adr);
+  emit_operand(7, adr);
 }
 
 
@@ -2190,6 +2237,22 @@
 }
 
 
+void Assembler::fucomi(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xDB);
+  emit(0xE8 + i);
+}
+
+
+void Assembler::fucomip() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xDF);
+  emit(0xE9);
+}
+
+
 void Assembler::fcompp() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2258,18 +2321,7 @@
 }
 
 
-void Assembler::movsd(Register dst, XMMRegister src) {
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  emit(0xF2);  // double
-  emit_optional_rex_32(src, dst);
-  emit(0x0F);
-  emit(0x11);  // store
-  emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movsd(XMMRegister dst, Register src) {
+void Assembler::movsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit(0xF2);  // double
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 7e09a41..617f092 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -458,7 +458,14 @@
   // the relative displacements stored in the code.
   static inline Address target_address_at(Address pc);
   static inline void set_target_address_at(Address pc, Address target);
+  // This sets the branch destination (which is in the instruction on x64).
+  inline static void set_target_at(Address instruction_payload,
+                                   Address target) {
+    set_target_address_at(instruction_payload, target);
+  }
   inline Handle<Object> code_target_object_handle_at(Address pc);
+  // Number of bytes taken up by the branch target in the code.
+  static const int kCallTargetSize = 4;  // Use 32-bit displacement.
   // Distance between the address of the code target in the call instruction
   // and the return address pushed on the stack.
   static const int kCallTargetAddressOffset = 4;  // Use 32-bit displacement.
@@ -513,6 +520,10 @@
   void movb(Register dst, Immediate imm);
   void movb(const Operand& dst, Register src);
 
+  // Move the low 16 bits of a 64-bit register value to a 16-bit
+  // memory location.
+  void movw(const Operand& dst, Register src);
+
   void movl(Register dst, Register src);
   void movl(Register dst, const Operand& src);
   void movl(const Operand& dst, Register src);
@@ -542,10 +553,13 @@
   void movq(Register dst, ExternalReference ext);
   void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
 
+  void movsxbq(Register dst, const Operand& src);
+  void movsxwq(Register dst, const Operand& src);
   void movsxlq(Register dst, Register src);
   void movsxlq(Register dst, const Operand& src);
   void movzxbq(Register dst, const Operand& src);
   void movzxbl(Register dst, const Operand& src);
+  void movzxwq(Register dst, const Operand& src);
   void movzxwl(Register dst, const Operand& src);
 
   // New x64 instruction to load from an immediate 64-bit pointer into RAX.
@@ -913,7 +927,11 @@
   void testq(Register dst, Immediate mask);
 
   void xor_(Register dst, Register src) {
-    arithmetic_op(0x33, dst, src);
+    if (dst.code() == src.code()) {
+      arithmetic_op_32(0x33, dst, src);
+    } else {
+      arithmetic_op(0x33, dst, src);
+    }
   }
 
   void xorl(Register dst, Register src) {
@@ -1006,6 +1024,7 @@
 
   void fstp_s(const Operand& adr);
   void fstp_d(const Operand& adr);
+  void fstp(int index);
 
   void fild_s(const Operand& adr);
   void fild_d(const Operand& adr);
@@ -1042,6 +1061,9 @@
   void ftst();
   void fucomp(int i);
   void fucompp();
+  void fucomi(int i);
+  void fucomip();
+
   void fcompp();
   void fnstsw_ax();
   void fwait();
@@ -1056,8 +1078,7 @@
 
   // SSE2 instructions
   void movsd(const Operand& dst, XMMRegister src);
-  void movsd(Register src, XMMRegister dst);
-  void movsd(XMMRegister dst, Register src);
+  void movsd(XMMRegister src, XMMRegister dst);
   void movsd(XMMRegister src, const Operand& dst);
 
   void cvttss2si(Register dst, const Operand& src);
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 01992ce..8590365 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -246,6 +246,8 @@
     const int kGlobalIndex =
         Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
     __ movq(rbx, FieldOperand(rsi, kGlobalIndex));
+    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+    __ movq(rbx, FieldOperand(rbx, kGlobalIndex));
     __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
 
     __ bind(&patch_receiver);
@@ -318,47 +320,47 @@
   __ push(Operand(rbp, kArgumentsOffset));
   __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
 
-  if (FLAG_check_stack) {
-    // We need to catch preemptions right here, otherwise an unlucky preemption
-    // could show up as a failed apply.
-    Label retry_preemption;
-    Label no_preemption;
-    __ bind(&retry_preemption);
-    ExternalReference stack_guard_limit =
-        ExternalReference::address_of_stack_guard_limit();
-    __ movq(kScratchRegister, stack_guard_limit);
-    __ movq(rcx, rsp);
-    __ subq(rcx, Operand(kScratchRegister, 0));
-    // rcx contains the difference between the stack limit and the stack top.
-    // We use it below to check that there is enough room for the arguments.
-    __ j(above, &no_preemption);
+  // Check the stack for overflow or a break request.
+  // We need to catch preemptions right here, otherwise an unlucky preemption
+  // could show up as a failed apply.
+  Label retry_preemption;
+  Label no_preemption;
+  __ bind(&retry_preemption);
+  ExternalReference stack_guard_limit =
+      ExternalReference::address_of_stack_guard_limit();
+  __ movq(kScratchRegister, stack_guard_limit);
+  __ movq(rcx, rsp);
+  __ subq(rcx, Operand(kScratchRegister, 0));
+  // rcx contains the difference between the stack limit and the stack top.
+  // We use it below to check that there is enough room for the arguments.
+  __ j(above, &no_preemption);
 
-    // Preemption!
-    // Because runtime functions always remove the receiver from the stack, we
-    // have to fake one to avoid underflowing the stack.
-    __ push(rax);
-    __ Push(Smi::FromInt(0));
+  // Preemption!
+  // Because runtime functions always remove the receiver from the stack, we
+  // have to fake one to avoid underflowing the stack.
+  __ push(rax);
+  __ Push(Smi::FromInt(0));
 
-    // Do call to runtime routine.
-    __ CallRuntime(Runtime::kStackGuard, 1);
-    __ pop(rax);
-    __ jmp(&retry_preemption);
+  // Do call to runtime routine.
+  __ CallRuntime(Runtime::kStackGuard, 1);
+  __ pop(rax);
+  __ jmp(&retry_preemption);
 
-    __ bind(&no_preemption);
+  __ bind(&no_preemption);
 
-    Label okay;
-    // Make rdx the space we need for the array when it is unrolled onto the
-    // stack.
-    __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
-    __ cmpq(rcx, rdx);
-    __ j(greater, &okay);
+  Label okay;
+  // Make rdx the space we need for the array when it is unrolled onto the
+  // stack.
+  __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+  __ cmpq(rcx, rdx);
+  __ j(greater, &okay);
 
-    // Too bad: Out of stack space.
-    __ push(Operand(rbp, kFunctionOffset));
-    __ push(rax);
-    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
-    __ bind(&okay);
-  }
+  // Too bad: Out of stack space.
+  __ push(Operand(rbp, kFunctionOffset));
+  __ push(rax);
+  __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
+  __ bind(&okay);
+  // End of stack check.
 
   // Push current index and limit.
   const int kLimitOffset =
@@ -400,6 +402,8 @@
   const int kGlobalOffset =
       Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
   __ movq(rbx, FieldOperand(rsi, kGlobalOffset));
+  __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalContextOffset));
+  __ movq(rbx, FieldOperand(rbx, kGlobalOffset));
   __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
 
   // Push the receiver.
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index d72257e..877cfdf 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -240,13 +240,6 @@
   // operands, jumps to the non_float label otherwise.
   static void CheckNumberOperands(MacroAssembler* masm,
                                   Label* non_float);
-
-  // Allocate a heap number in new space with undefined value.
-  // Returns tagged pointer in result, or jumps to need_gc if new space is full.
-  static void AllocateHeapNumber(MacroAssembler* masm,
-                                 Label* need_gc,
-                                 Register scratch,
-                                 Register result);
 };
 
 
@@ -277,8 +270,8 @@
   frame_->SyncRange(0, frame_->element_count() - 1);
 
   __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
+  frame_->EmitPush(rsi);  // The context is the first argument.
   frame_->EmitPush(kScratchRegister);
-  frame_->EmitPush(rsi);  // The context is the second argument.
   frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
   Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
@@ -859,12 +852,10 @@
 
 
 void CodeGenerator::CheckStack() {
-  if (FLAG_check_stack) {
-    DeferredStackCheck* deferred = new DeferredStackCheck;
-    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
-    deferred->Branch(below);
-    deferred->BindExit();
-  }
+  DeferredStackCheck* deferred = new DeferredStackCheck;
+  __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+  deferred->Branch(below);
+  deferred->BindExit();
 }
 
 
@@ -2184,12 +2175,10 @@
   ASSERT(boilerplate->IsBoilerplate());
   frame_->SyncRange(0, frame_->element_count() - 1);
 
-  // Push the boilerplate on the stack.
-  __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
-  frame_->EmitPush(kScratchRegister);
-
   // Create a new closure.
   frame_->EmitPush(rsi);
+  __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
+  frame_->EmitPush(kScratchRegister);
   Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
   frame_->Push(&result);
 }
@@ -3975,10 +3964,9 @@
   // Allocate heap number for result if possible.
   Result scratch = allocator()->Allocate();
   Result heap_number = allocator()->Allocate();
-  FloatingPointHelper::AllocateHeapNumber(masm_,
-                                          call_runtime.entry_label(),
-                                          scratch.reg(),
-                                          heap_number.reg());
+  __ AllocateHeapNumber(heap_number.reg(),
+                        scratch.reg(),
+                        call_runtime.entry_label());
   scratch.Unuse();
 
   // Store the result in the allocated heap number.
@@ -4249,18 +4237,6 @@
 }
 
 
-class ToBooleanStub: public CodeStub {
- public:
-  ToBooleanStub() { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Major MajorKey() { return ToBoolean; }
-  int MinorKey() { return 0; }
-};
-
-
 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
 // convert it to a boolean in the condition code register or jump to
 // 'false_target'/'true_target' as appropriate.
@@ -5079,10 +5055,8 @@
 
 
 void DeferredInlineBinaryOperation::Generate() {
-  __ push(left_);
-  __ push(right_);
-  GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
-  __ CallStub(&stub);
+  GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+  stub.GenerateCall(masm_, left_, right_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
@@ -5111,16 +5085,16 @@
       // Bit operations always assume they likely operate on Smis. Still only
       // generate the inline Smi check code if this operation is part of a loop.
       flags = (loop_nesting() > 0)
-              ? SMI_CODE_INLINED
-              : SMI_CODE_IN_STUB;
+              ? NO_SMI_CODE_IN_STUB
+              : NO_GENERIC_BINARY_FLAGS;
       break;
 
     default:
       // By default only inline the Smi check code for likely smis if this
       // operation is part of a loop.
       flags = ((loop_nesting() > 0) && type->IsLikelySmi())
-              ? SMI_CODE_INLINED
-              : SMI_CODE_IN_STUB;
+              ? NO_SMI_CODE_IN_STUB
+              : NO_GENERIC_BINARY_FLAGS;
       break;
   }
 
@@ -5179,7 +5153,7 @@
     return;
   }
 
-  if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+  if ((flags & NO_SMI_CODE_IN_STUB) != 0 && !generate_no_smi_code) {
     LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
   } else {
     frame_->Push(&left);
@@ -5188,7 +5162,7 @@
     // that does not check for the fast smi case.
     // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
     if (generate_no_smi_code) {
-      flags = SMI_CODE_INLINED;
+      flags = NO_SMI_CODE_IN_STUB;
     }
     GenericBinaryOpStub stub(op, overwrite_mode, flags);
     Result answer = frame_->CallStub(&stub, 2);
@@ -5243,41 +5217,33 @@
 
 
 void DeferredInlineSmiAdd::Generate() {
-  __ push(dst_);
-  __ Push(value_);
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, dst_, value_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
 
 void DeferredInlineSmiAddReversed::Generate() {
-  __ Push(value_);
-  __ push(dst_);
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, value_, dst_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
 
 void DeferredInlineSmiSub::Generate() {
-  __ push(dst_);
-  __ Push(value_);
-  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, dst_, value_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
 
 void DeferredInlineSmiOperation::Generate() {
-  __ push(src_);
-  __ Push(value_);
   // For mod we don't generate all the Smi code inline.
   GenericBinaryOpStub stub(
       op_,
       overwrite_mode_,
-      (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
-  __ CallStub(&stub);
+      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+  stub.GenerateCall(masm_, src_, value_);
   if (!dst_.is(rax)) __ movq(dst_, rax);
 }
 
@@ -6214,16 +6180,11 @@
   // These three cases set C3 when compared to zero in the FPU.
   __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
   __ j(not_equal, &true_result);
-  // TODO(x64): Don't use fp stack, use MMX registers?
   __ fldz();  // Load zero onto fp stack
   // Load heap-number double value onto fp stack
   __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
-  __ fucompp();  // Compare and pop both values.
-  __ movq(kScratchRegister, rax);
-  __ fnstsw_ax();  // Store fp status word in ax, no checking for exceptions.
-  __ testl(rax, Immediate(0x4000));  // Test FP condition flag C3, bit 16.
-  __ movq(rax, kScratchRegister);
-  __ j(not_zero, &false_result);
+  __ FCmp();
+  __ j(zero, &false_result);
   // Fall through to |true_result|.
 
   // Return 1/0 for true/false in rax.
@@ -6363,7 +6324,7 @@
   if (overwrite_) {
     __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
   } else {
-    FloatingPointHelper::AllocateHeapNumber(masm, &slow, rbx, rcx);
+    __ AllocateHeapNumber(rcx, rbx, &slow);
     // rcx: allocated 'empty' number
     __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
     __ movq(rax, rcx);
@@ -6406,19 +6367,18 @@
       // not NaN.
       // The representation of NaN values has all exponent bits (52..62) set,
       // and not all mantissa bits (0..51) clear.
-      // Read double representation into rax.
-      __ movq(rbx, V8_UINT64_C(0x7ff0000000000000), RelocInfo::NONE);
-      __ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset));
-      // Test that exponent bits are all set.
-      __ or_(rbx, rax);
-      __ cmpq(rbx, rax);
-      __ j(not_equal, &return_equal);
-      // Shift out flag and all exponent bits, retaining only mantissa.
-      __ shl(rax, Immediate(12));
-      // If all bits in the mantissa are zero the number is Infinity, and
-      // we return zero.  Otherwise it is a NaN, and we return non-zero.
-      // We cannot just return rax because only eax is tested on return.
-      __ setcc(not_zero, rax);
+      // We only allow QNaNs, which have bit 51 set (which also rules out
+      // the value being Infinity).
+
+      // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+      // all bits in the mask are set. We only need to check the word
+      // that contains the exponent and high bit of the mantissa.
+      ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+      __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
+      __ xorl(rax, rax);
+      __ addl(rdx, rdx);  // Shift value and mask so mask applies to top bits.
+      __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
+      __ setcc(above_equal, rax);
       __ ret(0);
 
       __ bind(&not_identical);
@@ -6811,7 +6771,7 @@
                               Label* throw_normal_exception,
                               Label* throw_termination_exception,
                               Label* throw_out_of_memory_exception,
-                              StackFrame::Type frame_type,
+                              ExitFrame::Mode mode,
                               bool do_gc,
                               bool always_allocate_scope) {
   // rax: result parameter for PerformGC, if any.
@@ -6877,13 +6837,24 @@
   // Check for failure result.
   Label failure_returned;
   ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+#ifdef _WIN64
+  // If return value is on the stack, pop it to registers.
+  if (result_size_ > 1) {
+    ASSERT_EQ(2, result_size_);
+    // Read result values stored on stack. Result is stored
+    // above the four argument mirror slots and the two
+    // Arguments object slots.
+    __ movq(rax, Operand(rsp, 6 * kPointerSize));
+    __ movq(rdx, Operand(rsp, 7 * kPointerSize));
+  }
+#endif
   __ lea(rcx, Operand(rax, 1));
   // Lower 2 bits of rcx are 0 iff rax has failure tag.
   __ testl(rcx, Immediate(kFailureTagMask));
   __ j(zero, &failure_returned);
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame(frame_type, result_size_);
+  __ LeaveExitFrame(mode, result_size_);
   __ ret(0);
 
   // Handling of failure.
@@ -7013,12 +6984,12 @@
   // this by performing a garbage collection and retrying the
   // builtin once.
 
-  StackFrame::Type frame_type = is_debug_break ?
-      StackFrame::EXIT_DEBUG :
-      StackFrame::EXIT;
+  ExitFrame::Mode mode = is_debug_break ?
+      ExitFrame::MODE_DEBUG :
+      ExitFrame::MODE_NORMAL;
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(frame_type, result_size_);
+  __ EnterExitFrame(mode, result_size_);
 
   // rax: Holds the context at this point, but should not be used.
   //      On entry to code generated by GenerateCore, it must hold
@@ -7041,7 +7012,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                false,
                false);
 
@@ -7050,7 +7021,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                true,
                false);
 
@@ -7061,7 +7032,7 @@
                &throw_normal_exception,
                &throw_termination_exception,
                &throw_out_of_memory_exception,
-               frame_type,
+               mode,
                true,
                true);
 
@@ -7076,6 +7047,11 @@
 }
 
 
+void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
+  UNREACHABLE();
+}
+
+
 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
   Label invoke, exit;
 #ifdef ENABLE_LOGGING_AND_PROFILING
@@ -7210,24 +7186,6 @@
 }
 
 
-void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
-                                             Label* need_gc,
-                                             Register scratch,
-                                             Register result) {
-  // Allocate heap number in new space.
-  __ AllocateInNewSpace(HeapNumber::kSize,
-                        result,
-                        scratch,
-                        no_reg,
-                        need_gc,
-                        TAG_OBJECT);
-
-  // Set the map and tag the result.
-  __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
-  __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
   Label load_smi, done;
@@ -7376,6 +7334,127 @@
 }
 
 
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Register left,
+    Register right) {
+  if (!ArgsInRegistersSupported()) {
+    // Pass arguments on the stack.
+    __ push(left);
+    __ push(right);
+  } else {
+    // The calling convention with registers is left in rdx and right in rax.
+    Register left_arg = rdx;
+    Register right_arg = rax;
+    if (!(left.is(left_arg) && right.is(right_arg))) {
+      if (left.is(right_arg) && right.is(left_arg)) {
+        if (IsOperationCommutative()) {
+          SetArgsReversed();
+        } else {
+          __ xchg(left, right);
+        }
+      } else if (left.is(left_arg)) {
+        __ movq(right_arg, right);
+      } else if (left.is(right_arg)) {
+        if (IsOperationCommutative()) {
+          __ movq(left_arg, right);
+          SetArgsReversed();
+        } else {
+          // Order of moves important to avoid destroying left argument.
+          __ movq(left_arg, left);
+          __ movq(right_arg, right);
+        }
+      } else if (right.is(left_arg)) {
+        if (IsOperationCommutative()) {
+          __ movq(right_arg, left);
+          SetArgsReversed();
+        } else {
+          // Order of moves important to avoid destroying right argument.
+          __ movq(right_arg, right);
+          __ movq(left_arg, left);
+        }
+      } else if (right.is(right_arg)) {
+        __ movq(left_arg, left);
+      } else {
+        // Order of moves is not important.
+        __ movq(left_arg, left);
+        __ movq(right_arg, right);
+      }
+    }
+
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Register left,
+    Smi* right) {
+  if (!ArgsInRegistersSupported()) {
+    // Pass arguments on the stack.
+    __ push(left);
+    __ Push(right);
+  } else {
+    // The calling convention with registers is left in rdx and right in rax.
+    Register left_arg = rdx;
+    Register right_arg = rax;
+    if (left.is(left_arg)) {
+      __ Move(right_arg, right);
+    } else if (left.is(right_arg) && IsOperationCommutative()) {
+      __ Move(left_arg, right);
+      SetArgsReversed();
+    } else {
+      __ movq(left_arg, left);
+      __ Move(right_arg, right);
+    }
+
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Smi* left,
+    Register right) {
+  if (!ArgsInRegistersSupported()) {
+    // Pass arguments on the stack.
+    __ Push(left);
+    __ push(right);
+  } else {
+    // The calling convention with registers is left in rdx and right in rax.
+    Register left_arg = rdx;
+    Register right_arg = rax;
+    if (right.is(right_arg)) {
+      __ Move(left_arg, left);
+    } else if (right.is(left_arg) && IsOperationCommutative()) {
+      __ Move(right_arg, left);
+      SetArgsReversed();
+    } else {
+      __ Move(left_arg, left);
+      __ movq(right_arg, right);
+    }
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
   // Perform fast-case smi code for the operation (rax <op> rbx) and
   // leave result in register rax.
@@ -7448,22 +7527,21 @@
 
 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   Label call_runtime;
-  if (flags_ == SMI_CODE_IN_STUB) {
+  if (HasSmiCodeInStub()) {
     // The fast case smi code wasn't inlined in the stub caller
     // code. Generate it here to speed up common operations.
     Label slow;
     __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // get y
     __ movq(rax, Operand(rsp, 2 * kPointerSize));  // get x
     GenerateSmiCode(masm, &slow);
-    __ ret(2 * kPointerSize);  // remove both operands
+    GenerateReturn(masm);
 
     // Too bad. The fast case smi code didn't succeed.
     __ bind(&slow);
   }
 
-  // Setup registers.
-  __ movq(rax, Operand(rsp, 1 * kPointerSize));  // get y
-  __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // get x
+  // Make sure the arguments are in rdx and rax.
+  GenerateLoadArguments(masm);
 
   // Floating point case.
   switch (op_) {
@@ -7487,10 +7565,10 @@
           __ JumpIfNotSmi(rax, &skip_allocation);
           // Fall through!
         case NO_OVERWRITE:
-          FloatingPointHelper::AllocateHeapNumber(masm,
-                                                  &call_runtime,
-                                                  rcx,
-                                                  rax);
+          // Allocate a heap number for the result. Keep rax and rdx intact
+          // for the possible runtime call.
+          __ AllocateHeapNumber(rbx, rcx, &call_runtime);
+          __ movq(rax, rbx);
           __ bind(&skip_allocation);
           break;
         default: UNREACHABLE();
@@ -7506,7 +7584,7 @@
         default: UNREACHABLE();
       }
       __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm4);
-      __ ret(2 * kPointerSize);
+      GenerateReturn(masm);
     }
     case Token::MOD: {
       // For MOD we go directly to runtime in the non-smi case.
@@ -7541,31 +7619,16 @@
         // Check if right operand is int32.
         __ fist_s(Operand(rsp, 0 * kPointerSize));
         __ fild_s(Operand(rsp, 0 * kPointerSize));
-        __ fucompp();
-        __ fnstsw_ax();
-        if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
-          __ sahf();
-          __ j(not_zero, &operand_conversion_failure);
-          __ j(parity_even, &operand_conversion_failure);
-        } else {
-          __ and_(rax, Immediate(0x4400));
-          __ cmpl(rax, Immediate(0x4000));
-          __ j(not_zero, &operand_conversion_failure);
-        }
+        __ FCmp();
+        __ j(not_zero, &operand_conversion_failure);
+        __ j(parity_even, &operand_conversion_failure);
+
         // Check if left operand is int32.
         __ fist_s(Operand(rsp, 1 * kPointerSize));
         __ fild_s(Operand(rsp, 1 * kPointerSize));
-        __ fucompp();
-        __ fnstsw_ax();
-        if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
-          __ sahf();
-          __ j(not_zero, &operand_conversion_failure);
-          __ j(parity_even, &operand_conversion_failure);
-        } else {
-          __ and_(rax, Immediate(0x4400));
-          __ cmpl(rax, Immediate(0x4000));
-          __ j(not_zero, &operand_conversion_failure);
-        }
+        __ FCmp();
+        __ j(not_zero, &operand_conversion_failure);
+        __ j(parity_even, &operand_conversion_failure);
       }
 
       // Get int32 operands and perform bitop.
@@ -7589,7 +7652,7 @@
       __ JumpIfNotValidSmiValue(rax, &non_smi_result);
       // Tag smi result, if possible, and return.
       __ Integer32ToSmi(rax, rax);
-      __ ret(2 * kPointerSize);
+      GenerateReturn(masm);
 
       // All ops except SHR return a signed int32 that we load in a HeapNumber.
       if (op_ != Token::SHR && non_smi_result.is_linked()) {
@@ -7606,8 +7669,7 @@
             __ JumpIfNotSmi(rax, &skip_allocation);
             // Fall through!
           case NO_OVERWRITE:
-            FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
-                                                    rcx, rax);
+            __ AllocateHeapNumber(rax, rcx, &call_runtime);
             __ bind(&skip_allocation);
             break;
           default: UNREACHABLE();
@@ -7616,7 +7678,7 @@
         __ movq(Operand(rsp, 1 * kPointerSize), rbx);
         __ fild_s(Operand(rsp, 1 * kPointerSize));
         __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
-        __ ret(2 * kPointerSize);
+        GenerateReturn(masm);
       }
 
       // Clear the FPU exception flag and reset the stack before calling
@@ -7647,8 +7709,20 @@
   }
 
   // If all else fails, use the runtime system to get the correct
-  // result.
+  // result. If arguments was passed in registers now place them on the
+  // stack in the correct order below the return address.
   __ bind(&call_runtime);
+  if (HasArgumentsInRegisters()) {
+    __ pop(rcx);
+    if (HasArgumentsReversed()) {
+      __ push(rax);
+      __ push(rdx);
+    } else {
+      __ push(rdx);
+      __ push(rax);
+    }
+    __ push(rcx);
+  }
   switch (op_) {
     case Token::ADD:
       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
@@ -7689,12 +7763,124 @@
 }
 
 
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+  // If arguments are not passed in registers read them from the stack.
+  if (!HasArgumentsInRegisters()) {
+    __ movq(rax, Operand(rsp, 1 * kPointerSize));
+    __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+  }
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+  // If arguments are not passed in registers remove them from the stack before
+  // returning.
+  if (!HasArgumentsInRegisters()) {
+    __ ret(2 * kPointerSize);  // Remove both operands
+  } else {
+    __ ret(0);
+  }
+}
+
+
 int CompareStub::MinorKey() {
   // Encode the two parameters in a unique 16 bit value.
   ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
   return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
 }
 
+#undef __
+
+#define __ masm.
+
+#ifdef _WIN64
+typedef double (*ModuloFunction)(double, double);
+// Define custom fmod implementation.
+ModuloFunction CreateModuloFunction() {
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  Assembler masm(buffer, actual_size);
+  // Generated code is put into a fixed, unmovable, buffer, and not into
+  // the V8 heap. We can't, and don't, refer to any relocatable addresses
+  // (e.g. the JavaScript nan-object).
+
+  // Windows 64 ABI passes double arguments in xmm0, xmm1 and
+  // returns result in xmm0.
+  // Argument backing space is allocated on the stack above
+  // the return address.
+
+  // Compute x mod y.
+  // Load y and x (use argument backing store as temporary storage).
+  __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
+  __ movsd(Operand(rsp, kPointerSize), xmm0);
+  __ fld_d(Operand(rsp, kPointerSize * 2));
+  __ fld_d(Operand(rsp, kPointerSize));
+
+  // Clear exception flags before operation.
+  {
+    Label no_exceptions;
+    __ fwait();
+    __ fnstsw_ax();
+    // Clear if Illegal Operand or Zero Division exceptions are set.
+    __ testb(rax, Immediate(5));
+    __ j(zero, &no_exceptions);
+    __ fnclex();
+    __ bind(&no_exceptions);
+  }
+
+  // Compute st(0) % st(1)
+  {
+    Label partial_remainder_loop;
+    __ bind(&partial_remainder_loop);
+    __ fprem();
+    __ fwait();
+    __ fnstsw_ax();
+    __ testl(rax, Immediate(0x400 /* C2 */));
+    // If C2 is set, computation only has partial result. Loop to
+    // continue computation.
+    __ j(not_zero, &partial_remainder_loop);
+  }
+
+  Label valid_result;
+  Label return_result;
+  // If Invalid Operand or Zero Division exceptions are set,
+  // return NaN.
+  __ testb(rax, Immediate(5));
+  __ j(zero, &valid_result);
+  __ fstp(0);  // Drop result in st(0).
+  int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
+  __ movq(rcx, kNaNValue, RelocInfo::NONE);
+  __ movq(Operand(rsp, kPointerSize), rcx);
+  __ movsd(xmm0, Operand(rsp, kPointerSize));
+  __ jmp(&return_result);
+
+  // If result is valid, return that.
+  __ bind(&valid_result);
+  __ fstp_d(Operand(rsp, kPointerSize));
+  __ movsd(xmm0, Operand(rsp, kPointerSize));
+
+  // Clean up FPU stack and exceptions and return xmm0
+  __ bind(&return_result);
+  __ fstp(0);  // Unload y.
+
+  Label clear_exceptions;
+  __ testb(rax, Immediate(0x3f /* Any Exception*/));
+  __ j(not_zero, &clear_exceptions);
+  __ ret(0);
+  __ bind(&clear_exceptions);
+  __ fnclex();
+  __ ret(0);
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  // Call the function from C++.
+  return FUNCTION_CAST<ModuloFunction>(buffer);
+}
+
+#endif
 
 #undef __
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 5fa6583..0721d52 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -398,7 +398,7 @@
   void LoadReference(Reference* ref);
   void UnloadReference(Reference* ref);
 
-  Operand ContextOperand(Register context, int index) const {
+  static Operand ContextOperand(Register context, int index) {
     return Operand(context, Context::SlotOffset(index));
   }
 
@@ -409,7 +409,7 @@
                                             JumpTarget* slow);
 
   // Expressions
-  Operand GlobalObject() const {
+  static Operand GlobalObject() {
     return ContextOperand(rsi, Context::GLOBAL_INDEX);
   }
 
@@ -511,10 +511,11 @@
   static bool PatchInlineRuntimeEntry(Handle<String> name,
                                       const InlineRuntimeLUT& new_entry,
                                       InlineRuntimeLUT* old_entry);
+  static Handle<Code> ComputeLazyCompile(int argc);
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
-  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+  static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
 
   // Declare global variables and functions in the given array of
   // name/value pairs.
@@ -616,6 +617,8 @@
   friend class JumpTarget;
   friend class Reference;
   friend class Result;
+  friend class FastCodeGenerator;
+  friend class CodeGenSelector;
 
   friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
 
@@ -632,11 +635,22 @@
 // which is declared in code-stubs.h.
 
 
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
+class ToBooleanStub: public CodeStub {
+ public:
+  ToBooleanStub() { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return ToBoolean; }
+  int MinorKey() { return 0; }
+};
+
+
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
 enum GenericBinaryFlags {
-  SMI_CODE_IN_STUB,
-  SMI_CODE_INLINED
+  NO_GENERIC_BINARY_FLAGS = 0,
+  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
 };
 
 
@@ -645,45 +659,82 @@
   GenericBinaryOpStub(Token::Value op,
                       OverwriteMode mode,
                       GenericBinaryFlags flags)
-      : op_(op), mode_(mode), flags_(flags) {
+      : op_(op),
+        mode_(mode),
+        flags_(flags),
+        args_in_registers_(false),
+        args_reversed_(false) {
     use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+  // Generate code to call the stub with the supplied arguments. This will add
+  // code at the call site to prepare arguments either in registers or on the
+  // stack together with the actual call.
+  void GenerateCall(MacroAssembler* masm, Register left, Register right);
+  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
 
  private:
   Token::Value op_;
   OverwriteMode mode_;
   GenericBinaryFlags flags_;
+  bool args_in_registers_;  // Arguments passed in registers not on the stack.
+  bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
 
   const char* GetName();
 
 #ifdef DEBUG
   void Print() {
-    PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+    PrintF("GenericBinaryOpStub (op %s), "
+           "(mode %d, flags %d, registers %d, reversed %d)\n",
            Token::String(op_),
            static_cast<int>(mode_),
-           static_cast<int>(flags_));
+           static_cast<int>(flags_),
+           static_cast<int>(args_in_registers_),
+           static_cast<int>(args_reversed_));
   }
 #endif
 
-  // Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
+  // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 12> {};
-  class SSE3Bits: public BitField<bool, 14, 1> {};
+  class OpBits: public BitField<Token::Value, 2, 10> {};
+  class SSE3Bits: public BitField<bool, 12, 1> {};
+  class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
+  class ArgsReversedBits: public BitField<bool, 14, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
 
   Major MajorKey() { return GenericBinaryOp; }
   int MinorKey() {
     // Encode the parameters in a unique 16 bit value.
     return OpBits::encode(op_)
-        | ModeBits::encode(mode_)
-        | FlagBits::encode(flags_)
-        | SSE3Bits::encode(use_sse3_);
+           | ModeBits::encode(mode_)
+           | FlagBits::encode(flags_)
+           | SSE3Bits::encode(use_sse3_)
+           | ArgsInRegistersBits::encode(args_in_registers_)
+           | ArgsReversedBits::encode(args_reversed_);
   }
+
   void Generate(MacroAssembler* masm);
+  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+  void GenerateLoadArguments(MacroAssembler* masm);
+  void GenerateReturn(MacroAssembler* masm);
+
+  bool ArgsInRegistersSupported() {
+    return ((op_ == Token::ADD) || (op_ == Token::SUB)
+             || (op_ == Token::MUL) || (op_ == Token::DIV))
+            && flags_ != NO_SMI_CODE_IN_STUB;
+  }
+  bool IsOperationCommutative() {
+    return (op_ == Token::ADD) || (op_ == Token::MUL);
+  }
+
+  void SetArgsInRegisters() { args_in_registers_ = true; }
+  void SetArgsReversed() { args_reversed_ = true; }
+  bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+  bool HasArgumentsInRegisters() { return args_in_registers_; }
+  bool HasArgumentsReversed() { return args_reversed_; }
 };
 
 
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index 8df0ab7..cc20c58 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -27,6 +27,10 @@
 
 // CPU specific code for x64 independent of OS goes here.
 
+#ifdef __GNUC__
+#include "third_party/valgrind/valgrind.h"
+#endif
+
 #include "v8.h"
 
 #include "cpu.h"
@@ -49,6 +53,15 @@
 
   // If flushing of the instruction cache becomes necessary Windows has the
   // API function FlushInstructionCache.
+
+  // By default, valgrind only checks the stack for writes that might need to
+  // invalidate already cached translated code.  This leads to random
+  // instability when code patches or moves are sometimes unnoticed.  One
+  // solution is to run valgrind with --smc-check=all, but this comes at a big
+  // performance cost.  We can notify valgrind to invalidate its cache.
+#ifdef VALGRIND_DISCARD_TRANSLATIONS
+  VALGRIND_DISCARD_TRANSLATIONS(start, size);
+#endif
 }
 
 
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index d8d6dbb..9fd581d 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -218,7 +218,7 @@
     OperandType op_order = bm[i].op_order_;
     id->op_order_ =
         static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
-    assert(id->type == NO_INSTR);  // Information not already entered
+    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered
     id->type = type;
     id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
   }
@@ -232,7 +232,7 @@
                                      const char* mnem) {
   for (byte b = start; b <= end; b++) {
     InstructionDesc* id = &instructions_[b];
-    assert(id->type == NO_INSTR);  // Information already entered
+    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered
     id->mnem = mnem;
     id->type = type;
     id->byte_size_operation = byte_size;
@@ -243,7 +243,7 @@
 void InstructionTable::AddJumpConditionalShort() {
   for (byte b = 0x70; b <= 0x7F; b++) {
     InstructionDesc* id = &instructions_[b];
-    assert(id->type == NO_INSTR);  // Information already entered
+    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered
     id->mnem = NULL;  // Computed depending on condition code.
     id->type = JUMP_CONDITIONAL_SHORT_INSTR;
   }
@@ -393,6 +393,7 @@
                               RegisterNameMapping register_name);
   int PrintRightOperand(byte* modrmp);
   int PrintRightByteOperand(byte* modrmp);
+  int PrintRightXMMOperand(byte* modrmp);
   int PrintOperands(const char* mnem,
                     OperandType op_order,
                     byte* data);
@@ -400,13 +401,15 @@
   int PrintImmediateOp(byte* data);
   const char* TwoByteMnemonic(byte opcode);
   int TwoByteOpcodeInstruction(byte* data);
-  int F7Instruction(byte* data);
+  int F6F7Instruction(byte* data);
   int ShiftInstruction(byte* data);
   int JumpShort(byte* data);
   int JumpConditional(byte* data);
   int JumpConditionalShort(byte* data);
   int SetCC(byte* data);
   int FPUInstruction(byte* data);
+  int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
+  int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
   void AppendToBuffer(const char* format, ...);
 
   void UnimplementedInstruction() {
@@ -568,6 +571,12 @@
 }
 
 
+int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
+  return PrintRightOperandHelper(modrmp,
+                                 &DisassemblerX64::NameOfXMMRegister);
+}
+
+
 // Returns number of bytes used including the current *data.
 // Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
 int DisassemblerX64::PrintOperands(const char* mnem,
@@ -648,8 +657,8 @@
 
 
 // Returns number of bytes used, including *data.
-int DisassemblerX64::F7Instruction(byte* data) {
-  assert(*data == 0xF7);
+int DisassemblerX64::F6F7Instruction(byte* data) {
+  ASSERT(*data == 0xF7 || *data == 0xF6);
   byte modrm = *(data + 1);
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
@@ -676,19 +685,12 @@
                    operand_size_code(),
                    NameOfCPURegister(rm));
     return 2;
-  } else if (mod == 3 && regop == 0) {
-    int32_t imm = *reinterpret_cast<int32_t*>(data + 2);
-    AppendToBuffer("test%c %s,0x%x",
-                   operand_size_code(),
-                   NameOfCPURegister(rm),
-                   imm);
-    return 6;
   } else if (regop == 0) {
     AppendToBuffer("test%c ", operand_size_code());
-    int count = PrintRightOperand(data + 1);
-    int32_t imm = *reinterpret_cast<int32_t*>(data + 1 + count);
-    AppendToBuffer(",0x%x", imm);
-    return 1 + count + 4 /*int32_t*/;
+    int count = PrintRightOperand(data + 1);  // Use name of 64-bit register.
+    AppendToBuffer(",0x");
+    count += PrintImmediate(data + 1 + count, operand_size());
+    return 1 + count;
   } else {
     UnimplementedInstruction();
     return 2;
@@ -739,7 +741,7 @@
       UnimplementedInstruction();
       return num_bytes;
   }
-  assert(mnem != NULL);
+  ASSERT_NE(NULL, mnem);
   if (op == 0xD0) {
     imm8 = 1;
   } else if (op == 0xC0) {
@@ -762,7 +764,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::JumpShort(byte* data) {
-  assert(*data == 0xEB);
+  ASSERT_EQ(0xEB, *data);
   byte b = *(data + 1);
   byte* dest = data + static_cast<int8_t>(b) + 2;
   AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -772,7 +774,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::JumpConditional(byte* data) {
-  assert(*data == 0x0F);
+  ASSERT_EQ(0x0F, *data);
   byte cond = *(data + 1) & 0x0F;
   byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
   const char* mnem = conditional_code_suffix[cond];
@@ -794,7 +796,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::SetCC(byte* data) {
-  assert(*data == 0x0F);
+  ASSERT_EQ(0x0F, *data);
   byte cond = *(data + 1) & 0x0F;
   const char* mnem = conditional_code_suffix[cond];
   AppendToBuffer("set%s%c ", mnem, operand_size_code());
@@ -805,158 +807,170 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::FPUInstruction(byte* data) {
-  byte b1 = *data;
-  byte b2 = *(data + 1);
-  if (b1 == 0xD9) {
-    const char* mnem = NULL;
-    switch (b2) {
-      case 0xE0:
-        mnem = "fchs";
-        break;
-      case 0xE1:
-        mnem = "fabs";
-        break;
-      case 0xE4:
-        mnem = "ftst";
-        break;
-      case 0xF5:
-        mnem = "fprem1";
-        break;
-      case 0xF7:
-        mnem = "fincstp";
-        break;
-      case 0xE8:
-        mnem = "fld1";
-        break;
-      case 0xEE:
-        mnem = "fldz";
-        break;
-      case 0xF8:
-        mnem = "fprem";
-        break;
-    }
-    if (mnem != NULL) {
-      AppendToBuffer("%s", mnem);
-      return 2;
-    } else if ((b2 & 0xF8) == 0xC8) {
-      AppendToBuffer("fxch st%d", b2 & 0x7);
-      return 2;
-    } else {
-      int mod, regop, rm;
-      get_modrm(*(data + 1), &mod, &regop, &rm);
-      const char* mnem = "?";
-      switch (regop) {
-        case 0:
-          mnem = "fld_s";
-          break;
-        case 3:
-          mnem = "fstp_s";
-          break;
-        default:
-          UnimplementedInstruction();
-      }
-      AppendToBuffer("%s ", mnem);
-      int count = PrintRightOperand(data + 1);
-      return count + 1;
-    }
-  } else if (b1 == 0xDD) {
-    if ((b2 & 0xF8) == 0xC0) {
-      AppendToBuffer("ffree st%d", b2 & 0x7);
-      return 2;
-    } else {
-      int mod, regop, rm;
-      get_modrm(*(data + 1), &mod, &regop, &rm);
-      const char* mnem = "?";
-      switch (regop) {
-        case 0:
-          mnem = "fld_d";
-          break;
-        case 3:
-          mnem = "fstp_d";
-          break;
-        default:
-          UnimplementedInstruction();
-      }
-      AppendToBuffer("%s ", mnem);
-      int count = PrintRightOperand(data + 1);
-      return count + 1;
-    }
-  } else if (b1 == 0xDB) {
-    int mod, regop, rm;
-    get_modrm(*(data + 1), &mod, &regop, &rm);
-    const char* mnem = "?";
-    switch (regop) {
-      case 0:
-        mnem = "fild_s";
-        break;
-      case 2:
-        mnem = "fist_s";
-        break;
-      case 3:
-        mnem = "fistp_s";
-        break;
-      default:
-        UnimplementedInstruction();
-    }
-    AppendToBuffer("%s ", mnem);
-    int count = PrintRightOperand(data + 1);
-    return count + 1;
-  } else if (b1 == 0xDF) {
-    if (b2 == 0xE0) {
-      AppendToBuffer("fnstsw_ax");
-      return 2;
-    }
-    int mod, regop, rm;
-    get_modrm(*(data + 1), &mod, &regop, &rm);
-    const char* mnem = "?";
-    switch (regop) {
-      case 5:
-        mnem = "fild_d";
-        break;
-      case 7:
-        mnem = "fistp_d";
-        break;
-      default:
-        UnimplementedInstruction();
-    }
-    AppendToBuffer("%s ", mnem);
-    int count = PrintRightOperand(data + 1);
-    return count + 1;
-  } else if (b1 == 0xDC || b1 == 0xDE) {
-    bool is_pop = (b1 == 0xDE);
-    if (is_pop && b2 == 0xD9) {
-      AppendToBuffer("fcompp");
-      return 2;
-    }
-    const char* mnem = "FP0xDC";
-    switch (b2 & 0xF8) {
-      case 0xC0:
-        mnem = "fadd";
-        break;
-      case 0xE8:
-        mnem = "fsub";
-        break;
-      case 0xC8:
-        mnem = "fmul";
-        break;
-      case 0xF8:
-        mnem = "fdiv";
-        break;
-      default:
-        UnimplementedInstruction();
-    }
-    AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
-    return 2;
-  } else if (b1 == 0xDA && b2 == 0xE9) {
-    const char* mnem = "fucompp";
-    AppendToBuffer("%s", mnem);
-    return 2;
+  byte escape_opcode = *data;
+  ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+  byte modrm_byte = *(data+1);
+
+  if (modrm_byte >= 0xC0) {
+    return RegisterFPUInstruction(escape_opcode, modrm_byte);
+  } else {
+    return MemoryFPUInstruction(escape_opcode, modrm_byte, data+1);
   }
-  AppendToBuffer("Unknown FP instruction");
+}
+
+int DisassemblerX64::MemoryFPUInstruction(int escape_opcode,
+                                           int modrm_byte,
+                                           byte* modrm_start) {
+  const char* mnem = "?";
+  int regop = (modrm_byte >> 3) & 0x7;  // reg/op field of modrm byte.
+  switch (escape_opcode) {
+    case 0xD9: switch (regop) {
+        case 0: mnem = "fld_s"; break;
+        case 3: mnem = "fstp_s"; break;
+        case 7: mnem = "fstcw"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDB: switch (regop) {
+        case 0: mnem = "fild_s"; break;
+        case 1: mnem = "fisttp_s"; break;
+        case 2: mnem = "fist_s"; break;
+        case 3: mnem = "fistp_s"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDD: switch (regop) {
+        case 0: mnem = "fld_d"; break;
+        case 3: mnem = "fstp_d"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDF: switch (regop) {
+        case 5: mnem = "fild_d"; break;
+        case 7: mnem = "fistp_d"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    default: UnimplementedInstruction();
+  }
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(modrm_start);
+  return count + 1;
+}
+
+int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
+                                             byte modrm_byte) {
+  bool has_register = false;  // Is the FPU register encoded in modrm_byte?
+  const char* mnem = "?";
+
+  switch (escape_opcode) {
+    case 0xD8:
+      UnimplementedInstruction();
+      break;
+
+    case 0xD9:
+      switch (modrm_byte & 0xF8) {
+        case 0xC8:
+          mnem = "fxch";
+          has_register = true;
+          break;
+        default:
+          switch (modrm_byte) {
+            case 0xE0: mnem = "fchs"; break;
+            case 0xE1: mnem = "fabs"; break;
+            case 0xE4: mnem = "ftst"; break;
+            case 0xE8: mnem = "fld1"; break;
+            case 0xEE: mnem = "fldz"; break;
+            case 0xF5: mnem = "fprem1"; break;
+            case 0xF7: mnem = "fincstp"; break;
+            case 0xF8: mnem = "fprem"; break;
+            case 0xFE: mnem = "fsin"; break;
+            case 0xFF: mnem = "fcos"; break;
+            default: UnimplementedInstruction();
+          }
+      }
+      break;
+
+    case 0xDA:
+      if (modrm_byte == 0xE9) {
+        mnem = "fucompp";
+      } else {
+        UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDB:
+      if ((modrm_byte & 0xF8) == 0xE8) {
+        mnem = "fucomi";
+        has_register = true;
+      } else if (modrm_byte  == 0xE2) {
+        mnem = "fclex";
+      } else {
+        UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDC:
+      has_register = true;
+      switch (modrm_byte & 0xF8) {
+        case 0xC0: mnem = "fadd"; break;
+        case 0xE8: mnem = "fsub"; break;
+        case 0xC8: mnem = "fmul"; break;
+        case 0xF8: mnem = "fdiv"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDD:
+      has_register = true;
+      switch (modrm_byte & 0xF8) {
+        case 0xC0: mnem = "ffree"; break;
+        case 0xD8: mnem = "fstp"; break;
+        default: UnimplementedInstruction();
+      }
+      break;
+
+    case 0xDE:
+      if (modrm_byte  == 0xD9) {
+        mnem = "fcompp";
+      } else {
+        has_register = true;
+        switch (modrm_byte & 0xF8) {
+          case 0xC0: mnem = "faddp"; break;
+          case 0xE8: mnem = "fsubp"; break;
+          case 0xC8: mnem = "fmulp"; break;
+          case 0xF8: mnem = "fdivp"; break;
+          default: UnimplementedInstruction();
+        }
+      }
+      break;
+
+    case 0xDF:
+      if (modrm_byte == 0xE0) {
+        mnem = "fnstsw_ax";
+      } else if ((modrm_byte & 0xF8) == 0xE8) {
+        mnem = "fucomip";
+        has_register = true;
+      }
+      break;
+
+    default: UnimplementedInstruction();
+  }
+
+  if (has_register) {
+    AppendToBuffer("%s st%d", mnem, modrm_byte & 0x7);
+  } else {
+    AppendToBuffer("%s", mnem);
+  }
   return 2;
 }
 
 
+
 // Handle all two-byte opcodes, which start with 0x0F.
 // These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
 // We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
@@ -1035,13 +1049,13 @@
       int mod, regop, rm;
       get_modrm(*current, &mod, &regop, &rm);
       AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
-      data += PrintRightOperand(data);
+      current += PrintRightOperand(current);
     } else if ((opcode & 0xF8) == 0x58) {
       // XMM arithmetic. Mnemonic was retrieved at the start of this function.
       int mod, regop, rm;
       get_modrm(*current, &mod, &regop, &rm);
-      AppendToBuffer("%s %s,%s", mnemonic, NameOfXMMRegister(regop),
-                     NameOfXMMRegister(rm));
+      AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
     } else {
       UnimplementedInstruction();
     }
@@ -1050,7 +1064,7 @@
 
     // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
     // Assert that mod is not 3, so source is memory, not an XMM register.
-    ASSERT((*current & 0xC0) != 0xC0);
+    ASSERT_NE(0xC0, *current & 0xC0);
     current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
   } else {
     UnimplementedInstruction();
@@ -1226,18 +1240,6 @@
         break;
       }
 
-      case 0xF6: {
-        int mod, regop, rm;
-        get_modrm(*(data + 1), &mod, &regop, &rm);
-        if (mod == 3 && regop == 0) {
-          AppendToBuffer("testb %s,%d", NameOfCPURegister(rm), *(data + 2));
-        } else {
-          UnimplementedInstruction();
-        }
-        data += 3;
-        break;
-      }
-
       case 0x81:  // fall through
       case 0x83:  // 0x81 with sign extension bit set
         data += PrintImmediateOp(data);
@@ -1334,7 +1336,7 @@
       case 0x95:
       case 0x96:
       case 0x97: {
-        int reg = (current & 0x7) | (rex_b() ? 8 : 0);
+        int reg = (*data & 0x7) | (rex_b() ? 8 : 0);
         if (reg == 0) {
           AppendToBuffer("nop");  // Common name for xchg rax,rax.
         } else {
@@ -1342,8 +1344,9 @@
                          operand_size_code(),
                          NameOfCPURegister(reg));
         }
+        data++;
       }
-
+        break;
 
       case 0xFE: {
         data++;
@@ -1455,8 +1458,10 @@
         data += JumpShort(data);
         break;
 
+      case 0xF6:
+        byte_size_operand_ = true;  // fall through
       case 0xF7:
-        data += F7Instruction(data);
+        data += F6F7Instruction(data);
         break;
 
       default:
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
index c433836..53ee357 100644
--- a/src/x64/fast-codegen-x64.cc
+++ b/src/x64/fast-codegen-x64.cc
@@ -30,6 +30,7 @@
 #include "codegen-inl.h"
 #include "debug.h"
 #include "fast-codegen.h"
+#include "parser.h"
 
 namespace v8 {
 namespace internal {
@@ -74,6 +75,14 @@
     __ bind(&ok);
   }
 
+  { Comment cmnt(masm_, "[ Declarations");
+    VisitDeclarations(fun->scope()->declarations());
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
   { Comment cmnt(masm_, "[ Body");
     VisitStatements(fun->body());
   }
@@ -83,7 +92,12 @@
     // body.
     __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
     SetReturnPosition(fun);
+    if (FLAG_trace) {
+      __ push(rax);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
     __ RecordJSReturn();
+
     // Do not use the leave instruction here because it is too short to
     // patch with the code required by the debugger.
     __ movq(rsp, rbp);
@@ -102,18 +116,78 @@
 }
 
 
-void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
-  Comment cmnt(masm_, "[ ExpressionStatement");
-  SetStatementPosition(stmt);
-  Visit(stmt->expression());
+void FastCodeGenerator::Move(Location destination, Slot* source) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      break;
+    case Location::TEMP:
+      __ push(Operand(rbp, SlotOffset(source)));
+      break;
+  }
+}
+
+
+void FastCodeGenerator::Move(Location destination, Literal* expr) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      break;
+    case Location::TEMP:
+      __ Push(expr->handle());
+      break;
+  }
+}
+
+
+void FastCodeGenerator::Move(Slot* destination, Location source) {
+  switch (source.type()) {
+    case Location::NOWHERE:
+      UNREACHABLE();
+    case Location::TEMP:
+      __ pop(Operand(rbp, SlotOffset(destination)));
+      break;
+  }
+}
+
+
+void FastCodeGenerator::DropAndMove(Location destination, Register source) {
+  switch (destination.type()) {
+    case Location::NOWHERE:
+      __ addq(rsp, Immediate(kPointerSize));
+      break;
+    case Location::TEMP:
+      __ movq(Operand(rsp, 0), source);
+      break;
+  }
+}
+
+
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.
+  __ push(rsi);  // The context is the first argument.
+  __ Push(pairs);
+  __ Push(Smi::FromInt(is_eval_ ? 1 : 0));
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
 }
 
 
 void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
   Comment cmnt(masm_, "[ ReturnStatement");
   SetStatementPosition(stmt);
-  Visit(stmt->expression());
-  __ pop(rax);
+  Expression* expr = stmt->expression();
+  // Complete the statement based on the type of the subexpression.
+  if (expr->AsLiteral() != NULL) {
+    __ Move(rax, expr->AsLiteral()->handle());
+  } else {
+    Visit(expr);
+    Move(rax, expr->location());
+  }
+
+  if (FLAG_trace) {
+    __ push(rax);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+
   __ RecordJSReturn();
   // Do not use the leave instruction here because it is too short to
   // patch with the code required by the debugger.
@@ -132,29 +206,235 @@
 }
 
 
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+  if (HasStackOverflow()) return;
+
+  ASSERT(boilerplate->IsBoilerplate());
+
+  // Create a new closure.
+  __ push(rsi);
+  __ Push(boilerplate);
+  __ CallRuntime(Runtime::kNewClosure, 2);
+  Move(expr->location(), rax);
+}
+
+
 void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
   Comment cmnt(masm_, "[ VariableProxy");
   Expression* rewrite = expr->var()->rewrite();
-  ASSERT(rewrite != NULL);
+  if (rewrite == NULL) {
+    Comment cmnt(masm_, "Global variable");
+    // Use inline caching. Variable name is passed in rcx and the global
+    // object on the stack.
+    __ push(CodeGenerator::GlobalObject());
+    __ Move(rcx, expr->name());
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
 
-  Slot* slot = rewrite->AsSlot();
-  ASSERT(slot != NULL);
-  { Comment cmnt(masm_, "[ Slot");
-    if (expr->location().is_temporary()) {
-      __ push(Operand(rbp, SlotOffset(slot)));
-    } else {
-      ASSERT(expr->location().is_nowhere());
-    }
+    // A test rax instruction following the call is used by the IC to
+    // indicate that the inobject property case was inlined.  Ensure there
+    // is no test rax instruction here.
+    DropAndMove(expr->location(), rax);
+  } else {
+    Comment cmnt(masm_, "Stack slot");
+    Move(expr->location(), rewrite->AsSlot());
   }
 }
 
 
-void FastCodeGenerator::VisitLiteral(Literal* expr) {
-  Comment cmnt(masm_, "[ Literal");
-  if (expr->location().is_temporary()) {
-    __ Push(expr->handle());
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  Comment cmnt(masm_, "[ ObjectLiteral");
+  Label boilerplate_exists;
+
+  __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+  int literal_offset =
+    FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ movq(rax, FieldOperand(rbx, literal_offset));
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(not_equal, &boilerplate_exists);
+  // Create boilerplate if it does not exist.
+  // Literal array (0).
+  __ push(rbx);
+  // Literal index (1).
+  __ Push(Smi::FromInt(expr->literal_index()));
+  // Constant properties (2).
+  __ Push(expr->constant_properties());
+  __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+  __ bind(&boilerplate_exists);
+  // rax contains boilerplate.
+  // Clone boilerplate.
+  __ push(rax);
+  if (expr->depth() == 1) {
+    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
   } else {
-    ASSERT(expr->location().is_nowhere());
+    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  }
+
+  // If result_saved == true: the result is saved on top of the stack.
+  // If result_saved == false: the result is not on the stack, just in rax.
+  bool result_saved = false;
+
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+    if (!result_saved) {
+      __ push(rax);  // Save result on the stack
+      result_saved = true;
+    }
+    switch (property->kind()) {
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:  // fall through
+        ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+      case ObjectLiteral::Property::COMPUTED:
+        if (key->handle()->IsSymbol()) {
+          Visit(value);
+          ASSERT(value->location().is_temporary());
+          __ pop(rax);
+          __ Move(rcx, key->handle());
+          Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+          __ call(ic, RelocInfo::CODE_TARGET);
+          // StoreIC leaves the receiver on the stack.
+          break;
+        }
+        // fall through
+      case ObjectLiteral::Property::PROTOTYPE:
+        __ push(rax);
+        Visit(key);
+        ASSERT(key->location().is_temporary());
+        Visit(value);
+        ASSERT(value->location().is_temporary());
+        __ CallRuntime(Runtime::kSetProperty, 3);
+        __ movq(rax, Operand(rsp, 0));  // Restore result into rax.
+        break;
+      case ObjectLiteral::Property::SETTER:  // fall through
+      case ObjectLiteral::Property::GETTER:
+        __ push(rax);
+        Visit(key);
+        ASSERT(key->location().is_temporary());
+        __ Push(property->kind() == ObjectLiteral::Property::SETTER ?
+                Smi::FromInt(1) :
+                Smi::FromInt(0));
+        Visit(value);
+        ASSERT(value->location().is_temporary());
+        __ CallRuntime(Runtime::kDefineAccessor, 4);
+        __ movq(rax, Operand(rsp, 0));  // Restore result into rax.
+        break;
+      default: UNREACHABLE();
+    }
+  }
+  switch (expr->location().type()) {
+    case Location::NOWHERE:
+      if (result_saved) __ addq(rsp, Immediate(kPointerSize));
+      break;
+    case Location::TEMP:
+      if (!result_saved) __ push(rax);
+      break;
+  }
+}
+
+
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Comment cmnt(masm_, "[ RegExp Literal");
+  Label done;
+  // Registers will be used as follows:
+  // rdi = JS function.
+  // rbx = literals array.
+  // rax = regexp literal.
+  __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+  int literal_offset =
+    FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ movq(rax, FieldOperand(rbx, literal_offset));
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(not_equal, &done);
+  // Create regexp literal using runtime function
+  // Result will be in rax.
+  __ push(rbx);
+  __ Push(Smi::FromInt(expr->literal_index()));
+  __ Push(expr->pattern());
+  __ Push(expr->flags());
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  // Label done:
+  __ bind(&done);
+  Move(expr->location(), rax);
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+  Label make_clone;
+
+  // Fetch the function's literals array.
+  __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ movq(rbx, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+  // Check if the literal's boilerplate has been instantiated.
+  int offset =
+      FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+  __ movq(rax, FieldOperand(rbx, offset));
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(not_equal, &make_clone);
+
+  // Instantiate the boilerplate.
+  __ push(rbx);
+  __ Push(Smi::FromInt(expr->literal_index()));
+  __ Push(expr->literals());
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+  __ bind(&make_clone);
+  // Clone the boilerplate.
+  __ push(rax);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  } else {
+    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  ZoneList<Expression*>* subexprs = expr->values();
+  for (int i = 0, len = subexprs->length(); i < len; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (subexpr->AsLiteral() != NULL ||
+        CompileTimeValue::IsCompileTimeValue(subexpr)) {
+      continue;
+    }
+
+    if (!result_saved) {
+      __ push(rax);
+      result_saved = true;
+    }
+    Visit(subexpr);
+    ASSERT(subexpr->location().is_temporary());
+
+    // Store the subexpression value in the array's elements.
+    __ pop(rax);  // Subexpression value.
+    __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
+    __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ movq(FieldOperand(rbx, offset), rax);
+
+    // Update the write barrier for the array store.
+    __ RecordWrite(rbx, offset, rax, rcx);
+  }
+
+  switch (expr->location().type()) {
+    case Location::NOWHERE:
+      if (result_saved) __ addq(rsp, Immediate(kPointerSize));
+      break;
+    case Location::TEMP:
+      if (!result_saved) __ push(rax);
+      break;
   }
 }
 
@@ -163,19 +443,268 @@
   Comment cmnt(masm_, "[ Assignment");
   ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
 
-  Visit(expr->value());
-
+  // Left-hand side can only be a global or a (parameter or local) slot.
   Variable* var = expr->target()->AsVariableProxy()->AsVariable();
-  ASSERT(var != NULL && var->slot() != NULL);
+  ASSERT(var != NULL);
+  ASSERT(var->is_global() || var->slot() != NULL);
 
-  if (expr->location().is_temporary()) {
-    __ movq(rax, Operand(rsp, 0));
-    __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+  Expression* rhs = expr->value();
+  Location destination = expr->location();
+  if (var->is_global()) {
+    // Assignment to a global variable, use inline caching.  Right-hand-side
+    // value is passed in rax, variable name in rcx, and the global object
+    // on the stack.
+
+    // Code for the right-hand-side expression depends on its type.
+    if (rhs->AsLiteral() != NULL) {
+      __ Move(rax, rhs->AsLiteral()->handle());
+    } else {
+      ASSERT(rhs->location().is_temporary());
+      Visit(rhs);
+      __ pop(rax);
+    }
+    __ Move(rcx, var->name());
+    __ push(CodeGenerator::GlobalObject());
+    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // Overwrite the global object on the stack with the result if needed.
+    DropAndMove(expr->location(), rax);
   } else {
-    ASSERT(expr->location().is_nowhere());
-    __ pop(Operand(rbp, SlotOffset(var->slot())));
+    // Local or parameter assignment.
+
+    // Code for the right-hand-side expression depends on its type.
+    if (rhs->AsLiteral() != NULL) {
+      // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
+      // discarded result.  Always perform the assignment.
+      __ Move(kScratchRegister, rhs->AsLiteral()->handle());
+      __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
+      Move(expr->location(), kScratchRegister);
+    } else {
+      ASSERT(rhs->location().is_temporary());
+      Visit(rhs);
+      switch (expr->location().type()) {
+        case Location::NOWHERE:
+          // Case 'var = temp'.  Discard right-hand-side temporary.
+          Move(var->slot(), rhs->location());
+          break;
+        case Location::TEMP:
+          // Case 'temp1 <- (var = temp0)'.  Preserve right-hand-side
+          // temporary on the stack.
+          __ movq(kScratchRegister, Operand(rsp, 0));
+          __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
+          break;
+      }
+    }
   }
 }
 
 
+void FastCodeGenerator::VisitProperty(Property* expr) {
+  Comment cmnt(masm_, "[ Property");
+  Expression* key = expr->key();
+  uint32_t dummy;
+
+  // Record the source position for the property load.
+  SetSourcePosition(expr->position());
+
+  // Evaluate receiver.
+  Visit(expr->obj());
+
+  if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
+      !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
+    // Do a NAMED property load.
+    // The IC expects the property name in rcx and the receiver on the stack.
+    __ Move(rcx, key->AsLiteral()->handle());
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // By emitting a nop we make sure that we do not have a "test eax,..."
+    // instruction after the call it is treated specially by the LoadIC code.
+    __ nop();
+  } else {
+    // Do a KEYED property load.
+    Visit(expr->key());
+    Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // By emitting a nop we make sure that we do not have a "test ..."
+    // instruction after the call it is treated specially by the LoadIC code.
+    __ nop();
+    // Drop key left on the stack by IC.
+    __ addq(rsp, Immediate(kPointerSize));
+  }
+  switch (expr->location().type()) {
+    case Location::TEMP:
+      __ movq(Operand(rsp, 0), rax);
+      break;
+    case Location::NOWHERE:
+      __ addq(rsp, Immediate(kPointerSize));
+      break;
+  }
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+  Expression* fun = expr->expression();
+  ZoneList<Expression*>* args = expr->arguments();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL && !var->is_this() && var->is_global());
+  ASSERT(!var->is_possibly_eval());
+
+  __ Push(var->name());
+  // Push global object (receiver).
+  __ push(CodeGenerator::GlobalObject());
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(args->at(i)->location().is_temporary());
+  }
+  // Record source position for debugger
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+                                                         NOT_IN_LOOP);
+  __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+  // Restore context register.
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  // Discard the function left on TOS.
+  DropAndMove(expr->location(), rax);
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* node) {
+  Comment cmnt(masm_, "[ CallNew");
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments.
+  // Push function on the stack.
+  Visit(node->expression());
+  ASSERT(node->expression()->location().is_temporary());
+  // If location is temporary, already on the stack,
+
+  // Push global object (receiver).
+  __ push(CodeGenerator::GlobalObject());
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = node->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(args->at(i)->location().is_temporary());
+    // If location is temporary, it is already on the stack,
+    // so nothing to do here.
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetSourcePosition(node->position());
+
+  // Load function, arg_count into rdi and rax.
+  __ Set(rax, arg_count);
+  // Function is in rsp[arg_count + 1].
+  __ movq(rdi, Operand(rsp, rax, times_pointer_size, kPointerSize));
+
+  Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
+  __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+
+  // Replace function on TOS with result in rax, or pop it.
+  DropAndMove(node->location(), rax);
+}
+
+
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+  Runtime::Function* function = expr->function();
+
+  ASSERT(function != NULL);
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(args->at(i)->location().is_temporary());
+  }
+
+  __ CallRuntime(function, arg_count);
+  Move(expr->location(), rax);
+}
+
+
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+  // Compile a short-circuited boolean or operation in a non-test
+  // context.
+  ASSERT(expr->op() == Token::OR);
+  // Compile (e0 || e1) as if it were
+  // (let (temp = e0) temp ? temp : e1).
+
+  Label eval_right, done;
+  Location destination = expr->location();
+  Expression* left = expr->left();
+  Expression* right = expr->right();
+
+  // Use the shared ToBoolean stub to find the boolean value of the
+  // left-hand subexpression.  Load the value into rax to perform some
+  // inlined checks assumed by the stub.
+
+  // Compile the left-hand value into rax.  Put it on the stack if we may
+  // need it as the value of the whole expression.
+  if (left->AsLiteral() != NULL) {
+    __ Move(rax, left->AsLiteral()->handle());
+    if (destination.is_temporary()) __ push(rax);
+  } else {
+    Visit(left);
+    ASSERT(left->location().is_temporary());
+    switch (destination.type()) {
+      case Location::NOWHERE:
+        // Pop the left-hand value into rax because we will not need it as the
+        // final result.
+        __ pop(rax);
+        break;
+      case Location::TEMP:
+        // Copy the left-hand value into rax because we may need it as the
+        // final result.
+        __ movq(rax, Operand(rsp, 0));
+        break;
+    }
+  }
+  // The left-hand value is in rax.  It is also on the stack iff the
+  // destination location is temporary.
+
+  // Perform fast checks assumed by the stub.
+  // The undefined value is false.
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &eval_right);
+  __ CompareRoot(rax, Heap::kTrueValueRootIndex);  // True is true.
+  __ j(equal, &done);
+  __ CompareRoot(rax, Heap::kFalseValueRootIndex);  // False is false.
+  __ j(equal, &eval_right);
+  ASSERT(kSmiTag == 0);
+  __ SmiCompare(rax, Smi::FromInt(0));  // The smi zero is false.
+  __ j(equal, &eval_right);
+  Condition is_smi = masm_->CheckSmi(rax);  // All other smis are true.
+  __ j(is_smi, &done);
+
+  // Call the stub for all other cases.
+  __ push(rax);
+  ToBooleanStub stub;
+  __ CallStub(&stub);
+  __ testq(rax, rax);  // The stub returns nonzero for true.
+  __ j(not_zero, &done);
+
+  __ bind(&eval_right);
+  // Discard the left-hand value if present on the stack.
+  if (destination.is_temporary()) {
+    __ addq(rsp, Immediate(kPointerSize));
+  }
+  // Save or discard the right-hand value as needed.
+  if (right->AsLiteral() != NULL) {
+    Move(destination, right->AsLiteral());
+  } else {
+    Visit(right);
+    Move(destination, right->location());
+  }
+
+  __ bind(&done);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index fe224ad..6a0527c 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -57,11 +57,7 @@
   state->sp = sp;
   state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
   // Determine frame type.
-  if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
-    return EXIT_DEBUG;
-  } else {
-    return EXIT;
-  }
+  return EXIT;
 }
 
 int JavaScriptFrame::GetProvidedParametersCount() const {
@@ -69,10 +65,10 @@
 }
 
 
-void ExitFrame::Iterate(ObjectVisitor* a) const {
-  // Exit frames on X64 do not contain any pointers. The arguments
-  // are traversed as part of the expression stack of the calling
-  // frame.
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+  v->VisitPointer(&code_slot());
+  // The arguments are traversed as part of the expression stack of
+  // the calling frame.
 }
 
 byte* InternalFrame::GetCallerStackPointer() const {
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index eefaa0a..a92b248 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -63,7 +63,7 @@
 
 class ExitFrameConstants : public AllStatic {
  public:
-  static const int kDebugMarkOffset = -2 * kPointerSize;
+  static const int kCodeOffset      = -2 * kPointerSize;
   static const int kSPOffset        = -1 * kPointerSize;
 
   static const int kCallerFPOffset  = +0 * kPointerSize;
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 7108025..2812df1 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -320,7 +320,7 @@
   // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
-  KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
   __ bind(&check_string);
   // The key is not a smi.
   // Is it a string?
@@ -360,6 +360,146 @@
 }
 
 
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+                                        ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label slow, failed_allocation;
+
+  // Load name and receiver.
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(rcx, &slow);
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rax, &slow);
+
+  // Check that the object is a JS object.
+  __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
+  __ j(not_equal, &slow);
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.  The map is already in rdx.
+  __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // rax: index (as a smi)
+  // rcx: JSObject
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::RootIndexForExternalArrayType(array_type));
+  __ j(not_equal, &slow);
+
+  // Check that the index is in range.
+  __ SmiToInteger32(rax, rax);
+  __ cmpl(rax, FieldOperand(rcx, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ j(above_equal, &slow);
+
+  // rax: untagged index
+  // rcx: elements array
+  __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
+  // rcx: base pointer of external storage
+  switch (array_type) {
+    case kExternalByteArray:
+      __ movsxbq(rax, Operand(rcx, rax, times_1, 0));
+      break;
+    case kExternalUnsignedByteArray:
+      __ movb(rax, Operand(rcx, rax, times_1, 0));
+      break;
+    case kExternalShortArray:
+      __ movsxwq(rax, Operand(rcx, rax, times_2, 0));
+      break;
+    case kExternalUnsignedShortArray:
+      __ movzxwq(rax, Operand(rcx, rax, times_2, 0));
+      break;
+    case kExternalIntArray:
+      __ movsxlq(rax, Operand(rcx, rax, times_4, 0));
+      break;
+    case kExternalUnsignedIntArray:
+      __ movl(rax, Operand(rcx, rax, times_4, 0));
+      break;
+    case kExternalFloatArray:
+      __ fld_s(Operand(rcx, rax, times_4, 0));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // rax: value
+  // For floating-point array type:
+  // FP(0): value
+
+  if (array_type == kExternalIntArray ||
+      array_type == kExternalUnsignedIntArray) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    if (array_type == kExternalIntArray) {
+      __ JumpIfNotValidSmiValue(rax, &box_int);
+    } else {
+      ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+      __ JumpIfUIntNotValidSmiValue(rax, &box_int);
+    }
+
+    __ Integer32ToSmi(rax, rax);
+    __ ret(0);
+
+    __ bind(&box_int);
+
+    // Allocate a HeapNumber for the int and perform int-to-double
+    // conversion.
+    __ push(rax);
+    if (array_type == kExternalIntArray) {
+      __ fild_s(Operand(rsp, 0));
+    } else {
+      ASSERT(array_type == kExternalUnsignedIntArray);
+      // Need to zero-extend the value.
+      __ fild_d(Operand(rsp, 0));
+    }
+    __ pop(rax);
+    // FP(0): value
+    __ AllocateHeapNumber(rax, rbx, &failed_allocation);
+    // Set the value.
+    __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+    __ ret(0);
+  } else if (array_type == kExternalFloatArray) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    __ AllocateHeapNumber(rax, rbx, &failed_allocation);
+    // Set the value.
+    __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+    __ ret(0);
+  } else {
+    __ Integer32ToSmi(rax, rax);
+    __ ret(0);
+  }
+
+  // If we fail allocation of the HeapNumber, we still have a value on
+  // top of the FPU stack. Remove it.
+  __ bind(&failed_allocation);
+  __ ffree();
+  __ fincstp();
+  // Fall through to slow case.
+
+  // Slow case: Load name and receiver from stack and jump to runtime.
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+}
+
+
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rsp[0] : return address
@@ -458,15 +598,9 @@
   // rbx: index (as a smi)
   __ j(below, &fast);
 
-  // Slow case: Push extra copies of the arguments (3).
+  // Slow case: call runtime.
   __ bind(&slow);
-  __ pop(rcx);
-  __ push(Operand(rsp, 1 * kPointerSize));
-  __ push(Operand(rsp, 1 * kPointerSize));
-  __ push(rax);
-  __ push(rcx);
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+  Generate(masm, ExternalReference(Runtime::kSetProperty));
 
   // Check whether the elements is a pixel array.
   // rax: value
@@ -558,6 +692,180 @@
 }
 
 
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+                                         ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : key
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label slow, check_heap_number;
+
+  // Get the receiver from the stack.
+  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(rdx, &slow);
+  // Get the map from the receiver.
+  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow);
+  // Get the key from the stack.
+  __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // 1 ~ return address
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rbx, &slow);
+
+  // Check that the object is a JS object.
+  __ CmpInstanceType(rcx, JS_OBJECT_TYPE);
+  __ j(not_equal, &slow);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // rax: value
+  // rdx: JSObject
+  // rbx: index (as a smi)
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::RootIndexForExternalArrayType(array_type));
+  __ j(not_equal, &slow);
+
+  // Check that the index is in range.
+  __ SmiToInteger32(rbx, rbx);  // Untag the index.
+  __ cmpl(rbx, FieldOperand(rcx, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ j(above_equal, &slow);
+
+  // Handle both smis and HeapNumbers in the fast path. Go to the
+  // runtime for all other kinds of values.
+  // rax: value
+  // rcx: elements array
+  // rbx: untagged index
+  __ JumpIfNotSmi(rax, &check_heap_number);
+  __ movq(rdx, rax);  // Save the value.
+  __ SmiToInteger32(rax, rax);
+  __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
+  // rcx: base pointer of external storage
+  switch (array_type) {
+    case kExternalByteArray:
+    case kExternalUnsignedByteArray:
+      __ movb(Operand(rcx, rbx, times_1, 0), rax);
+      break;
+    case kExternalShortArray:
+    case kExternalUnsignedShortArray:
+      __ movw(Operand(rcx, rbx, times_2, 0), rax);
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ movl(Operand(rcx, rbx, times_4, 0), rax);
+      break;
+    case kExternalFloatArray:
+      // Need to perform int-to-float conversion.
+      __ push(rax);
+      __ fild_s(Operand(rsp, 0));
+      __ pop(rax);
+      __ fstp_s(Operand(rcx, rbx, times_4, 0));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  __ movq(rax, rdx);  // Return the original value.
+  __ ret(0);
+
+  __ bind(&check_heap_number);
+  __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rdx);
+  __ j(not_equal, &slow);
+
+  // The WebGL specification leaves the behavior of storing NaN and
+  // +/-Infinity into integer arrays basically undefined. For more
+  // reproducible behavior, convert these to zero.
+  __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+  __ movq(rdx, rax);  // Save the value.
+  __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
+  // rbx: untagged index
+  // rcx: base pointer of external storage
+  // top of FPU stack: value
+  if (array_type == kExternalFloatArray) {
+    __ fstp_s(Operand(rcx, rbx, times_4, 0));
+  } else {
+    // Need to perform float-to-int conversion.
+    // Test the top of the FP stack for NaN.
+    Label is_nan;
+    __ fucomi(0);
+    __ j(parity_even, &is_nan);
+
+    __ push(rax);  // Make room on stack
+    __ fistp_d(Operand(rsp, 0));
+    __ pop(rax);
+    // rax: untagged integer value
+    switch (array_type) {
+      case kExternalByteArray:
+      case kExternalUnsignedByteArray:
+        __ movb(Operand(rcx, rbx, times_1, 0), rax);
+        break;
+      case kExternalShortArray:
+      case kExternalUnsignedShortArray:
+        __ movw(Operand(rcx, rbx, times_2, 0), rax);
+        break;
+      case kExternalIntArray:
+      case kExternalUnsignedIntArray: {
+        // We also need to explicitly check for +/-Infinity. These are
+        // converted to MIN_INT, but we need to be careful not to
+        // confuse with legal uses of MIN_INT.
+        Label not_infinity;
+        // This test would apparently detect both NaN and Infinity,
+        // but we've already checked for NaN using the FPU hardware
+        // above.
+        __ movzxwq(rdi, FieldOperand(rdx, HeapNumber::kValueOffset + 6));
+        __ and_(rdi, Immediate(0x7FF0));
+        __ cmpw(rdi, Immediate(0x7FF0));
+        __ j(not_equal, &not_infinity);
+        __ movq(rax, Immediate(0));
+        __ bind(&not_infinity);
+        __ movl(Operand(rcx, rbx, times_4, 0), rax);
+        break;
+      }
+      default:
+        UNREACHABLE();
+        break;
+    }
+    __ movq(rax, rdx);  // Return the original value.
+    __ ret(0);
+
+    __ bind(&is_nan);
+    __ ffree();
+    __ fincstp();
+    __ movq(rax, Immediate(0));
+    switch (array_type) {
+      case kExternalByteArray:
+      case kExternalUnsignedByteArray:
+        __ movb(Operand(rcx, rbx, times_1, 0), rax);
+        break;
+      case kExternalShortArray:
+      case kExternalUnsignedShortArray:
+        __ movw(Operand(rcx, rbx, times_2, 0), rax);
+        break;
+      case kExternalIntArray:
+      case kExternalUnsignedIntArray:
+        __ movl(Operand(rcx, rbx, times_4, 0), rax);
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    __ movq(rax, rdx);  // Return the original value.
+    __ ret(0);
+  }
+
+  // Slow case: call runtime.
+  __ bind(&slow);
+  Generate(masm, ExternalReference(Runtime::kSetProperty));
+}
+
+
 void CallIC::Generate(MacroAssembler* masm,
                       int argc,
                       ExternalReference const& f) {
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 614f67b..6bf6e6a 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -580,6 +580,14 @@
 }
 
 
+Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
+  // An unsigned 32-bit integer value is valid as long as the high bit
+  // is not set.
+  testq(src, Immediate(0x80000000));
+  return zero;
+}
+
+
 void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
   if (dst.is(src)) {
     ASSERT(!dst.is(kScratchRegister));
@@ -699,7 +707,7 @@
                                        Smi* constant,
                                        Label* on_not_smi_result) {
   // Does not assume that src is a smi.
-  ASSERT_EQ(static_cast<intptr_t>(1), kSmiTagMask);
+  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
   ASSERT_EQ(0, kSmiTag);
   ASSERT(!dst.is(kScratchRegister));
   ASSERT(!src.is(kScratchRegister));
@@ -1243,40 +1251,19 @@
 }
 
 
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+                                                Label* on_invalid) {
+  Condition is_valid = CheckUInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid);
+}
+
+
 void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
                                       Label* on_not_both_smi) {
   Condition both_smi = CheckBothSmi(src1, src2);
   j(NegateCondition(both_smi), on_not_both_smi);
 }
 
-bool MacroAssembler::IsUnsafeSmi(Smi* value) {
-  return false;
-}
-
-
-void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
-  UNIMPLEMENTED();
-}
-
-
-void MacroAssembler::Move(Register dst, Smi* source) {
-  if (IsUnsafeSmi(source)) {
-    LoadUnsafeSmi(dst, source);
-  } else {
-    Set(dst, reinterpret_cast<int64_t>(source));
-  }
-}
-
-
-void MacroAssembler::Move(const Operand& dst, Smi* source) {
-  if (IsUnsafeSmi(source)) {
-    LoadUnsafeSmi(kScratchRegister, source);
-    movq(dst, kScratchRegister);
-  } else {
-    Set(dst, reinterpret_cast<int64_t>(source));
-  }
-}
-
 
 void MacroAssembler::Move(Register dst, Handle<Object> source) {
   ASSERT(!source->IsFailure());
@@ -1332,33 +1319,23 @@
 
 
 void MacroAssembler::Push(Smi* source) {
-  if (IsUnsafeSmi(source)) {
-    LoadUnsafeSmi(kScratchRegister, source);
-    push(kScratchRegister);
+  intptr_t smi = reinterpret_cast<intptr_t>(source);
+  if (is_int32(smi)) {
+    push(Immediate(static_cast<int32_t>(smi)));
   } else {
-    intptr_t smi = reinterpret_cast<intptr_t>(source);
-    if (is_int32(smi)) {
-      push(Immediate(static_cast<int32_t>(smi)));
-    } else {
-      Set(kScratchRegister, smi);
-      push(kScratchRegister);
-    }
+    Set(kScratchRegister, smi);
+    push(kScratchRegister);
   }
 }
 
 
 void MacroAssembler::Test(const Operand& src, Smi* source) {
-  if (IsUnsafeSmi(source)) {
-    LoadUnsafeSmi(kScratchRegister, source);
-    testq(src, kScratchRegister);
+  intptr_t smi = reinterpret_cast<intptr_t>(source);
+  if (is_int32(smi)) {
+    testl(src, Immediate(static_cast<int32_t>(smi)));
   } else {
-    intptr_t smi = reinterpret_cast<intptr_t>(source);
-    if (is_int32(smi)) {
-      testl(src, Immediate(static_cast<int32_t>(smi)));
-    } else {
-      Move(kScratchRegister, source);
-      testq(src, kScratchRegister);
-    }
+    Move(kScratchRegister, source);
+    testq(src, kScratchRegister);
   }
 }
 
@@ -1444,18 +1421,9 @@
 
 
 void MacroAssembler::FCmp() {
-  fucompp();
-  push(rax);
-  fnstsw_ax();
-  if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
-    sahf();
-  } else {
-    shrl(rax, Immediate(8));
-    and_(rax, Immediate(0xFF));
-    push(rax);
-    popfq();
-  }
-  pop(rax);
+  fucomip();
+  ffree(0);
+  fincstp();
 }
 
 
@@ -1819,9 +1787,7 @@
 }
 
 
-void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
-  ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
-
+void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) {
   // Setup the frame structure on the stack.
   // All constants are relative to the frame pointer of the exit frame.
   ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
@@ -1833,7 +1799,12 @@
   // Reserve room for entry stack pointer and push the debug marker.
   ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
   push(Immediate(0));  // saved entry sp, patched before call
-  push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+  if (mode == ExitFrame::MODE_DEBUG) {
+    push(Immediate(0));
+  } else {
+    movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
+    push(kScratchRegister);
+  }
 
   // Save the frame pointer and the context in top.
   ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
@@ -1853,7 +1824,7 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Save the state of all registers to the stack from the memory
   // location. This is needed to allow nested break points.
-  if (type == StackFrame::EXIT_DEBUG) {
+  if (mode == ExitFrame::MODE_DEBUG) {
     // TODO(1243899): This should be symmetric to
     // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
     // correct here, but computed for the other call. Very error
@@ -1892,17 +1863,17 @@
 }
 
 
-void MacroAssembler::LeaveExitFrame(StackFrame::Type type, int result_size) {
+void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) {
   // Registers:
   // r15 : argv
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Restore the memory copy of the registers by digging them out from
   // the stack. This is needed to allow nested break points.
-  if (type == StackFrame::EXIT_DEBUG) {
+  if (mode == ExitFrame::MODE_DEBUG) {
     // It's okay to clobber register rbx below because we don't need
     // the function pointer after this.
     const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
-    int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+    int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize;
     lea(rbx, Operand(rbp, kOffset));
     CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
   }
@@ -1912,16 +1883,6 @@
   movq(rcx, Operand(rbp, 1 * kPointerSize));
   movq(rbp, Operand(rbp, 0 * kPointerSize));
 
-#ifdef _WIN64
-  // If return value is on the stack, pop it to registers.
-  if (result_size > 1) {
-    ASSERT_EQ(2, result_size);
-    // Position above 4 argument mirrors and arguments object.
-    movq(rax, Operand(rsp, 6 * kPointerSize));
-    movq(rdx, Operand(rsp, 7 * kPointerSize));
-  }
-#endif
-
   // Pop everything up to and including the arguments and the receiver
   // from the caller stack.
   lea(rsp, Operand(r15, 1 * kPointerSize));
@@ -2251,6 +2212,23 @@
 }
 
 
+void MacroAssembler::AllocateHeapNumber(Register result,
+                                        Register scratch,
+                                        Label* gc_required) {
+  // Allocate heap number in new space.
+  AllocateInNewSpace(HeapNumber::kSize,
+                     result,
+                     scratch,
+                     no_reg,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map.
+  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 2aa4ce0..11cdfc3 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -106,16 +106,16 @@
   void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
   void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
 
-  // Enter specific kind of exit frame; either EXIT or
-  // EXIT_DEBUG. Expects the number of arguments in register rax and
+  // Enter specific kind of exit frame; either in normal or
+  // debug mode. Expects the number of arguments in register rax and
   // sets up the number of arguments in register rdi and the pointer
   // to the first argument in register rsi.
-  void EnterExitFrame(StackFrame::Type type, int result_size = 1);
+  void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1);
 
   // Leave the current exit frame. Expects/provides the return value in
   // register rax:rdx (untouched) and the pointer to the first
   // argument in register rsi.
-  void LeaveExitFrame(StackFrame::Type type, int result_size = 1);
+  void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1);
 
 
   // ---------------------------------------------------------------------------
@@ -207,12 +207,19 @@
   // to a smi.
   Condition CheckInteger32ValidSmiValue(Register src);
 
+  // Checks whether an 32-bit unsigned integer value is a valid for
+  // conversion to a smi.
+  Condition CheckUInteger32ValidSmiValue(Register src);
+
   // Test-and-jump functions. Typically combines a check function
   // above with a conditional jump.
 
   // Jump if the value cannot be represented by a smi.
   void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
 
+  // Jump if the unsigned integer value cannot be represented by a smi.
+  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
+
   // Jump to label if the value is a tagged smi.
   void JumpIfSmi(Register src, Label* on_smi);
 
@@ -374,12 +381,15 @@
   // Converts a positive smi to a negative index.
   SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
 
-  bool IsUnsafeSmi(Smi* value);
-  void LoadUnsafeSmi(Register dst, Smi* source);
-
   // Basic Smi operations.
-  void Move(Register dst, Smi* source);
-  void Move(const Operand& dst, Smi* source);
+  void Move(Register dst, Smi* source) {
+    Set(dst, reinterpret_cast<int64_t>(source));
+  }
+
+  void Move(const Operand& dst, Smi* source) {
+    Set(dst, reinterpret_cast<int64_t>(source));
+  }
+
   void Push(Smi* smi);
   void Test(const Operand& dst, Smi* source);
 
@@ -391,14 +401,6 @@
   void Set(const Operand& dst, int64_t x);
 
   // Handle support
-  bool IsUnsafeSmi(Handle<Object> value) {
-    return IsUnsafeSmi(Smi::cast(*value));
-  }
-
-  void LoadUnsafeSmi(Register dst, Handle<Object> source) {
-    LoadUnsafeSmi(dst, Smi::cast(*source));
-  }
-
   void Move(Register dst, Handle<Object> source);
   void Move(const Operand& dst, Handle<Object> source);
   void Cmp(Register dst, Handle<Object> source);
@@ -503,6 +505,13 @@
   // un-done.
   void UndoAllocationInNewSpace(Register object);
 
+  // Allocate a heap number in new space with undefined value. Returns
+  // tagged pointer in result register, or jumps to gc_required if new
+  // space is full.
+  void AllocateHeapNumber(Register result,
+                          Register scratch,
+                          Label* gc_required);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 5d17a2d..88636f8 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -1209,18 +1209,16 @@
 
 
 void RegExpMacroAssemblerX64::CheckStackLimit() {
-  if (FLAG_check_stack) {
-    Label no_stack_overflow;
-    ExternalReference stack_limit =
-        ExternalReference::address_of_regexp_stack_limit();
-    __ load_rax(stack_limit);
-    __ cmpq(backtrack_stackpointer(), rax);
-    __ j(above, &no_stack_overflow);
+  Label no_stack_overflow;
+  ExternalReference stack_limit =
+      ExternalReference::address_of_regexp_stack_limit();
+  __ load_rax(stack_limit);
+  __ cmpq(backtrack_stackpointer(), rax);
+  __ j(above, &no_stack_overflow);
 
-    SafeCall(&stack_overflow_label_);
+  SafeCall(&stack_overflow_label_);
 
-    __ bind(&no_stack_overflow);
-  }
+  __ bind(&no_stack_overflow);
 }
 
 
@@ -1287,11 +1285,6 @@
   }
 }
 
-
-void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
-  __ int3();  // Unused on x64.
-}
-
 #undef __
 
 #endif  // V8_NATIVE_REGEXP
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index f041041..9deefa5 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -34,6 +34,7 @@
 
 SOURCES = {
   'all': [
+    'test-accessors.cc',
     'test-alloc.cc',
     'test-api.cc',
     'test-ast.cc',
diff --git a/test/cctest/cctest.cc b/test/cctest/cctest.cc
index 82a33e6..f638ed4 100644
--- a/test/cctest/cctest.cc
+++ b/test/cctest/cctest.cc
@@ -121,3 +121,6 @@
   v8::V8::Dispose();
   return 0;
 }
+
+RegisterThreadedTest *RegisterThreadedTest::first_ = NULL;
+int RegisterThreadedTest::count_ = 0;
diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h
index a95645e..404b692 100644
--- a/test/cctest/cctest.h
+++ b/test/cctest/cctest.h
@@ -28,6 +28,8 @@
 #ifndef CCTEST_H_
 #define CCTEST_H_
 
+#include "v8.h"
+
 #ifndef TEST
 #define TEST(Name)                                                       \
   static void Test##Name();                                              \
@@ -72,4 +74,138 @@
   CcTest* prev_;
 };
 
+// Switches between all the Api tests using the threading support.
+// In order to get a surprising but repeatable pattern of thread
+// switching it has extra semaphores to control the order in which
+// the tests alternate, not relying solely on the big V8 lock.
+//
+// A test is augmented with calls to ApiTestFuzzer::Fuzz() in its
+// callbacks.  This will have no effect when we are not running the
+// thread fuzzing test.  In the thread fuzzing test it will
+// pseudorandomly select a successor thread and switch execution
+// to that thread, suspending the current test.
+class ApiTestFuzzer: public v8::internal::Thread {
+ public:
+  void CallTest();
+  explicit ApiTestFuzzer(int num)
+      : test_number_(num),
+        gate_(v8::internal::OS::CreateSemaphore(0)),
+        active_(true) {
+  }
+  ~ApiTestFuzzer() { delete gate_; }
+
+  // The ApiTestFuzzer is also a Thread, so it has a Run method.
+  virtual void Run();
+
+  enum PartOfTest { FIRST_PART, SECOND_PART };
+
+  static void Setup(PartOfTest part);
+  static void RunAllTests();
+  static void TearDown();
+  // This method switches threads if we are running the Threading test.
+  // Otherwise it does nothing.
+  static void Fuzz();
+ private:
+  static bool fuzzing_;
+  static int tests_being_run_;
+  static int current_;
+  static int active_tests_;
+  static bool NextThread();
+  int test_number_;
+  v8::internal::Semaphore* gate_;
+  bool active_;
+  void ContextSwitch();
+  static int GetNextTestNumber();
+  static v8::internal::Semaphore* all_tests_done_;
+};
+
+
+#define THREADED_TEST(Name)                                          \
+  static void Test##Name();                                          \
+  RegisterThreadedTest register_##Name(Test##Name, #Name);           \
+  /* */ TEST(Name)
+
+
+class RegisterThreadedTest {
+ public:
+  explicit RegisterThreadedTest(CcTest::TestFunction* callback,
+                                const char* name)
+      : fuzzer_(NULL), callback_(callback), name_(name) {
+    prev_ = first_;
+    first_ = this;
+    count_++;
+  }
+  static int count() { return count_; }
+  static RegisterThreadedTest* nth(int i) {
+    CHECK(i < count());
+    RegisterThreadedTest* current = first_;
+    while (i > 0) {
+      i--;
+      current = current->prev_;
+    }
+    return current;
+  }
+  CcTest::TestFunction* callback() { return callback_; }
+  ApiTestFuzzer* fuzzer_;
+  const char* name() { return name_; }
+
+ private:
+  static RegisterThreadedTest* first_;
+  static int count_;
+  CcTest::TestFunction* callback_;
+  RegisterThreadedTest* prev_;
+  const char* name_;
+};
+
+
+// A LocalContext holds a reference to a v8::Context.
+class LocalContext {
+ public:
+  LocalContext(v8::ExtensionConfiguration* extensions = 0,
+               v8::Handle<v8::ObjectTemplate> global_template =
+                   v8::Handle<v8::ObjectTemplate>(),
+               v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
+    : context_(v8::Context::New(extensions, global_template, global_object)) {
+    context_->Enter();
+  }
+
+  virtual ~LocalContext() {
+    context_->Exit();
+    context_.Dispose();
+  }
+
+  v8::Context* operator->() { return *context_; }
+  v8::Context* operator*() { return *context_; }
+  bool IsReady() { return !context_.IsEmpty(); }
+
+  v8::Local<v8::Context> local() {
+    return v8::Local<v8::Context>::New(context_);
+  }
+
+ private:
+  v8::Persistent<v8::Context> context_;
+};
+
+
+static inline v8::Local<v8::Value> v8_num(double x) {
+  return v8::Number::New(x);
+}
+
+
+static inline v8::Local<v8::String> v8_str(const char* x) {
+  return v8::String::New(x);
+}
+
+
+static inline v8::Local<v8::Script> v8_compile(const char* x) {
+  return v8::Script::Compile(v8_str(x));
+}
+
+
+// Helper function that compiles and runs the source.
+static inline v8::Local<v8::Value> CompileRun(const char* source) {
+  return v8::Script::Compile(v8::String::New(source))->Run();
+}
+
+
 #endif  // ifndef CCTEST_H_
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 8fff769..6ce241f 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -33,9 +33,24 @@
 # BUG(382): Weird test. Can't guarantee that it never times out.
 test-api/ApplyInterruption: PASS || TIMEOUT
 
+# This is about to go away anyway since new snapshot code is on the way.
+test-serialize/Deserialize: FAIL
+test-serialize/DeserializeAndRunScript: FAIL
+test-serialize/DeserializeNatives: FAIL
+test-serialize/DeserializeExtensions: FAIL
+
+# These tests always fail.  They are here to test test.py.  If
+# they don't fail then test.py has failed.
+test-serialize/TestThatAlwaysFails: FAIL
+test-serialize/DependentTestThatAlwaysFails: FAIL
+
 
 [ $arch == arm ]
 
+# New serialization doesn't work on ARM yet.
+test-serialize/Deserialize2: SKIP
+test-serialize/DeserializeAndRunScript2: SKIP
+
 # BUG(113): Test seems flaky on ARM.
 test-spaces/LargeObjectSpace: PASS || FAIL
 
diff --git a/test/cctest/test-accessors.cc b/test/cctest/test-accessors.cc
new file mode 100644
index 0000000..b56238a
--- /dev/null
+++ b/test/cctest/test-accessors.cc
@@ -0,0 +1,424 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "api.h"
+#include "cctest.h"
+#include "frames-inl.h"
+#include "string-stream.h"
+
+using ::v8::ObjectTemplate;
+using ::v8::Value;
+using ::v8::Context;
+using ::v8::Local;
+using ::v8::String;
+using ::v8::Script;
+using ::v8::Function;
+using ::v8::AccessorInfo;
+using ::v8::Extension;
+
+namespace i = ::v8::internal;
+
+static v8::Handle<Value> handle_property(Local<String> name,
+                                         const AccessorInfo&) {
+  ApiTestFuzzer::Fuzz();
+  return v8_num(900);
+}
+
+
+THREADED_TEST(PropertyHandler) {
+  v8::HandleScope scope;
+  Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+  fun_templ->InstanceTemplate()->SetAccessor(v8_str("foo"), handle_property);
+  LocalContext env;
+  Local<Function> fun = fun_templ->GetFunction();
+  env->Global()->Set(v8_str("Fun"), fun);
+  Local<Script> getter = v8_compile("var obj = new Fun(); obj.foo;");
+  CHECK_EQ(900, getter->Run()->Int32Value());
+  Local<Script> setter = v8_compile("obj.foo = 901;");
+  CHECK_EQ(901, setter->Run()->Int32Value());
+}
+
+
+static v8::Handle<Value> GetIntValue(Local<String> property,
+                                     const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  int* value =
+      static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value());
+  return v8_num(*value);
+}
+
+
+static void SetIntValue(Local<String> property,
+                        Local<Value> value,
+                        const AccessorInfo& info) {
+  int* field =
+      static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value());
+  *field = value->Int32Value();
+}
+
+int foo, bar, baz;
+
+THREADED_TEST(GlobalVariableAccess) {
+  foo = 0;
+  bar = -4;
+  baz = 10;
+  v8::HandleScope scope;
+  v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+  templ->InstanceTemplate()->SetAccessor(v8_str("foo"),
+                                         GetIntValue,
+                                         SetIntValue,
+                                         v8::External::New(&foo));
+  templ->InstanceTemplate()->SetAccessor(v8_str("bar"),
+                                         GetIntValue,
+                                         SetIntValue,
+                                         v8::External::New(&bar));
+  templ->InstanceTemplate()->SetAccessor(v8_str("baz"),
+                                         GetIntValue,
+                                         SetIntValue,
+                                         v8::External::New(&baz));
+  LocalContext env(0, templ->InstanceTemplate());
+  v8_compile("foo = (++bar) + baz")->Run();
+  CHECK_EQ(bar, -3);
+  CHECK_EQ(foo, 7);
+}
+
+
+static int x_register = 0;
+static v8::Handle<v8::Object> x_receiver;
+static v8::Handle<v8::Object> x_holder;
+
+
+static v8::Handle<Value> XGetter(Local<String> name, const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  CHECK_EQ(x_receiver, info.This());
+  CHECK_EQ(x_holder, info.Holder());
+  return v8_num(x_register);
+}
+
+
+static void XSetter(Local<String> name,
+                    Local<Value> value,
+                    const AccessorInfo& info) {
+  CHECK_EQ(x_holder, info.This());
+  CHECK_EQ(x_holder, info.Holder());
+  x_register = value->Int32Value();
+}
+
+
+THREADED_TEST(AccessorIC) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+  obj->SetAccessor(v8_str("x"), XGetter, XSetter);
+  LocalContext context;
+  x_holder = obj->NewInstance();
+  context->Global()->Set(v8_str("holder"), x_holder);
+  x_receiver = v8::Object::New();
+  context->Global()->Set(v8_str("obj"), x_receiver);
+  v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(CompileRun(
+    "obj.__proto__ = holder;"
+    "var result = [];"
+    "for (var i = 0; i < 10; i++) {"
+    "  holder.x = i;"
+    "  result.push(obj.x);"
+    "}"
+    "result"));
+  CHECK_EQ(10, array->Length());
+  for (int i = 0; i < 10; i++) {
+    v8::Handle<Value> entry = array->Get(v8::Integer::New(i));
+    CHECK_EQ(v8::Integer::New(i), entry);
+  }
+}
+
+
+static v8::Handle<Value> AccessorProhibitsOverwritingGetter(
+    Local<String> name,
+    const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  return v8::True();
+}
+
+
+THREADED_TEST(AccessorProhibitsOverwriting) {
+  v8::HandleScope scope;
+  LocalContext context;
+  Local<ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetAccessor(v8_str("x"),
+                     AccessorProhibitsOverwritingGetter,
+                     0,
+                     v8::Handle<Value>(),
+                     v8::PROHIBITS_OVERWRITING,
+                     v8::ReadOnly);
+  Local<v8::Object> instance = templ->NewInstance();
+  context->Global()->Set(v8_str("obj"), instance);
+  Local<Value> value = CompileRun(
+      "obj.__defineGetter__('x', function() { return false; });"
+      "obj.x");
+  CHECK(value->BooleanValue());
+  value = CompileRun(
+      "var setter_called = false;"
+      "obj.__defineSetter__('x', function() { setter_called = true; });"
+      "obj.x = 42;"
+      "setter_called");
+  CHECK(!value->BooleanValue());
+  value = CompileRun(
+      "obj2 = {};"
+      "obj2.__proto__ = obj;"
+      "obj2.__defineGetter__('x', function() { return false; });"
+      "obj2.x");
+  CHECK(value->BooleanValue());
+  value = CompileRun(
+      "var setter_called = false;"
+      "obj2 = {};"
+      "obj2.__proto__ = obj;"
+      "obj2.__defineSetter__('x', function() { setter_called = true; });"
+      "obj2.x = 42;"
+      "setter_called");
+  CHECK(!value->BooleanValue());
+}
+
+
+template <int C>
+static v8::Handle<Value> HandleAllocatingGetter(Local<String> name,
+                                                const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  for (int i = 0; i < C; i++)
+    v8::String::New("foo");
+  return v8::String::New("foo");
+}
+
+
+THREADED_TEST(HandleScopePop) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+  obj->SetAccessor(v8_str("one"), HandleAllocatingGetter<1>);
+  obj->SetAccessor(v8_str("many"), HandleAllocatingGetter<1024>);
+  LocalContext context;
+  v8::Handle<v8::Object> inst = obj->NewInstance();
+  context->Global()->Set(v8::String::New("obj"), inst);
+  int count_before = i::HandleScope::NumberOfHandles();
+  {
+    v8::HandleScope scope;
+    CompileRun(
+        "for (var i = 0; i < 1000; i++) {"
+        "  obj.one;"
+        "  obj.many;"
+        "}");
+  }
+  int count_after = i::HandleScope::NumberOfHandles();
+  CHECK_EQ(count_before, count_after);
+}
+
+static v8::Handle<Value> CheckAccessorArgsCorrect(Local<String> name,
+                                                  const AccessorInfo& info) {
+  CHECK(info.This() == info.Holder());
+  CHECK(info.Data()->Equals(v8::String::New("data")));
+  ApiTestFuzzer::Fuzz();
+  CHECK(info.This() == info.Holder());
+  CHECK(info.Data()->Equals(v8::String::New("data")));
+  i::Heap::CollectAllGarbage(true);
+  CHECK(info.This() == info.Holder());
+  CHECK(info.Data()->Equals(v8::String::New("data")));
+  return v8::Integer::New(17);
+}
+
+THREADED_TEST(DirectCall) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+  obj->SetAccessor(v8_str("xxx"),
+                   CheckAccessorArgsCorrect,
+                   NULL,
+                   v8::String::New("data"));
+  LocalContext context;
+  v8::Handle<v8::Object> inst = obj->NewInstance();
+  context->Global()->Set(v8::String::New("obj"), inst);
+  Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+  for (int i = 0; i < 10; i++) {
+    Local<Value> result = scr->Run();
+    CHECK(!result.IsEmpty());
+    CHECK_EQ(17, result->Int32Value());
+  }
+}
+
+static v8::Handle<Value> EmptyGetter(Local<String> name,
+                                     const AccessorInfo& info) {
+  CheckAccessorArgsCorrect(name, info);
+  ApiTestFuzzer::Fuzz();
+  CheckAccessorArgsCorrect(name, info);
+  return v8::Handle<v8::Value>();
+}
+
+THREADED_TEST(EmptyResult) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+  obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8::String::New("data"));
+  LocalContext context;
+  v8::Handle<v8::Object> inst = obj->NewInstance();
+  context->Global()->Set(v8::String::New("obj"), inst);
+  Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+  for (int i = 0; i < 10; i++) {
+    Local<Value> result = scr->Run();
+    CHECK(result == v8::Undefined());
+  }
+}
+
+
+THREADED_TEST(NoReuseRegress) {
+  // Check that the IC generated for the one test doesn't get reused
+  // for the other.
+  v8::HandleScope scope;
+  {
+    v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+    obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8::String::New("data"));
+    LocalContext context;
+    v8::Handle<v8::Object> inst = obj->NewInstance();
+    context->Global()->Set(v8::String::New("obj"), inst);
+    Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+    for (int i = 0; i < 2; i++) {
+      Local<Value> result = scr->Run();
+      CHECK(result == v8::Undefined());
+    }
+  }
+  {
+    v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+    obj->SetAccessor(v8_str("xxx"),
+                     CheckAccessorArgsCorrect,
+                     NULL,
+                     v8::String::New("data"));
+    LocalContext context;
+    v8::Handle<v8::Object> inst = obj->NewInstance();
+    context->Global()->Set(v8::String::New("obj"), inst);
+    Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx"));
+    for (int i = 0; i < 10; i++) {
+      Local<Value> result = scr->Run();
+      CHECK(!result.IsEmpty());
+      CHECK_EQ(17, result->Int32Value());
+    }
+  }
+}
+
+static v8::Handle<Value> ThrowingGetAccessor(Local<String> name,
+                                             const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  return v8::ThrowException(v8_str("g"));
+}
+
+
+static void ThrowingSetAccessor(Local<String> name,
+                                Local<Value> value,
+                                const AccessorInfo& info) {
+  v8::ThrowException(value);
+}
+
+
+THREADED_TEST(Regress1054726) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+  obj->SetAccessor(v8_str("x"),
+                   ThrowingGetAccessor,
+                   ThrowingSetAccessor,
+                   Local<Value>());
+
+  LocalContext env;
+  env->Global()->Set(v8_str("obj"), obj->NewInstance());
+
+  // Use the throwing property setter/getter in a loop to force
+  // the accessor ICs to be initialized.
+  v8::Handle<Value> result;
+  result = Script::Compile(v8_str(
+      "var result = '';"
+      "for (var i = 0; i < 5; i++) {"
+      "  try { obj.x; } catch (e) { result += e; }"
+      "}; result"))->Run();
+  CHECK_EQ(v8_str("ggggg"), result);
+
+  result = Script::Compile(String::New(
+      "var result = '';"
+      "for (var i = 0; i < 5; i++) {"
+      "  try { obj.x = i; } catch (e) { result += e; }"
+      "}; result"))->Run();
+  CHECK_EQ(v8_str("01234"), result);
+}
+
+
+static v8::Handle<Value> AllocGetter(Local<String> name,
+                                     const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  return v8::Array::New(1000);
+}
+
+
+THREADED_TEST(Gc) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+  obj->SetAccessor(v8_str("xxx"), AllocGetter);
+  LocalContext env;
+  env->Global()->Set(v8_str("obj"), obj->NewInstance());
+  Script::Compile(String::New(
+      "var last = [];"
+      "for (var i = 0; i < 2048; i++) {"
+      "  var result = obj.xxx;"
+      "  result[0] = last;"
+      "  last = result;"
+      "}"))->Run();
+}
+
+
+static v8::Handle<Value> StackCheck(Local<String> name,
+                                    const AccessorInfo& info) {
+  i::StackFrameIterator iter;
+  for (int i = 0; !iter.done(); i++) {
+    i::StackFrame* frame = iter.frame();
+    CHECK(i != 0 || (frame->type() == i::StackFrame::EXIT));
+    CHECK(frame->code()->IsCode());
+    i::Address pc = frame->pc();
+    i::Code* code = frame->code();
+    CHECK(code->contains(pc));
+    iter.Advance();
+  }
+  return v8::Undefined();
+}
+
+
+THREADED_TEST(StackIteration) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+  i::StringStream::ClearMentionedObjectCache();
+  obj->SetAccessor(v8_str("xxx"), StackCheck);
+  LocalContext env;
+  env->Global()->Set(v8_str("obj"), obj->NewInstance());
+  Script::Compile(String::New(
+      "function foo() {"
+      "  return obj.xxx;"
+      "}"
+      "for (var i = 0; i < 100; i++) {"
+      "  foo();"
+      "}"))->Run();
+}
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index c63ba31..83038ae 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include <limits.h>
+
 #include "v8.h"
 
 #include "api.h"
@@ -33,8 +35,11 @@
 #include "snapshot.h"
 #include "platform.h"
 #include "top.h"
+#include "utils.h"
 #include "cctest.h"
 
+static const bool kLogThreading = false;
+
 static bool IsNaN(double x) {
 #ifdef WIN32
   return _isnan(x);
@@ -55,131 +60,6 @@
 
 namespace i = ::v8::internal;
 
-static Local<Value> v8_num(double x) {
-  return v8::Number::New(x);
-}
-
-
-static Local<String> v8_str(const char* x) {
-  return String::New(x);
-}
-
-
-static Local<Script> v8_compile(const char* x) {
-  return Script::Compile(v8_str(x));
-}
-
-
-// A LocalContext holds a reference to a v8::Context.
-class LocalContext {
- public:
-  LocalContext(v8::ExtensionConfiguration* extensions = 0,
-               v8::Handle<ObjectTemplate> global_template =
-                   v8::Handle<ObjectTemplate>(),
-               v8::Handle<Value> global_object = v8::Handle<Value>())
-    : context_(Context::New(extensions, global_template, global_object)) {
-    context_->Enter();
-  }
-
-  virtual ~LocalContext() {
-    context_->Exit();
-    context_.Dispose();
-  }
-
-  Context* operator->() { return *context_; }
-  Context* operator*() { return *context_; }
-  Local<Context> local() { return Local<Context>::New(context_); }
-  bool IsReady() { return !context_.IsEmpty(); }
-
- private:
-  v8::Persistent<Context> context_;
-};
-
-
-// Switches between all the Api tests using the threading support.
-// In order to get a surprising but repeatable pattern of thread
-// switching it has extra semaphores to control the order in which
-// the tests alternate, not relying solely on the big V8 lock.
-//
-// A test is augmented with calls to ApiTestFuzzer::Fuzz() in its
-// callbacks.  This will have no effect when we are not running the
-// thread fuzzing test.  In the thread fuzzing test it will
-// pseudorandomly select a successor thread and switch execution
-// to that thread, suspending the current test.
-class ApiTestFuzzer: public v8::internal::Thread {
- public:
-  void CallTest();
-  explicit ApiTestFuzzer(int num)
-      : test_number_(num),
-        gate_(v8::internal::OS::CreateSemaphore(0)),
-        active_(true) {
-  }
-  ~ApiTestFuzzer() { delete gate_; }
-
-  // The ApiTestFuzzer is also a Thread, so it has a Run method.
-  virtual void Run();
-
-  enum PartOfTest { FIRST_PART, SECOND_PART };
-
-  static void Setup(PartOfTest part);
-  static void RunAllTests();
-  static void TearDown();
-  // This method switches threads if we are running the Threading test.
-  // Otherwise it does nothing.
-  static void Fuzz();
- private:
-  static bool fuzzing_;
-  static int tests_being_run_;
-  static int current_;
-  static int active_tests_;
-  static bool NextThread();
-  int test_number_;
-  v8::internal::Semaphore* gate_;
-  bool active_;
-  void ContextSwitch();
-  static int GetNextTestNumber();
-  static v8::internal::Semaphore* all_tests_done_;
-};
-
-
-#define THREADED_TEST(Name)                                          \
-  static void Test##Name();                                          \
-  RegisterThreadedTest register_##Name(Test##Name);                  \
-  /* */ TEST(Name)
-
-
-class RegisterThreadedTest {
- public:
-  explicit RegisterThreadedTest(CcTest::TestFunction* callback)
-      : fuzzer_(NULL), callback_(callback) {
-    prev_ = first_;
-    first_ = this;
-    count_++;
-  }
-  static int count() { return count_; }
-  static RegisterThreadedTest* nth(int i) {
-    CHECK(i < count());
-    RegisterThreadedTest* current = first_;
-    while (i > 0) {
-      i--;
-      current = current->prev_;
-    }
-    return current;
-  }
-  CcTest::TestFunction* callback() { return callback_; }
-  ApiTestFuzzer* fuzzer_;
-
- private:
-  static RegisterThreadedTest* first_;
-  static int count_;
-  CcTest::TestFunction* callback_;
-  RegisterThreadedTest* prev_;
-};
-
-
-RegisterThreadedTest *RegisterThreadedTest::first_ = NULL;
-int RegisterThreadedTest::count_ = 0;
-
 
 static int signature_callback_count;
 static v8::Handle<Value> IncrementingSignatureCallback(
@@ -228,11 +108,6 @@
 }
 
 
-// Helper function that compiles and runs the source.
-static Local<Value> CompileRun(const char* source) {
-  return Script::Compile(String::New(source))->Run();
-}
-
 THREADED_TEST(ReceiverSignature) {
   v8::HandleScope scope;
   LocalContext env;
@@ -717,27 +592,6 @@
 }
 
 
-static v8::Handle<Value> handle_property(Local<String> name,
-                                         const AccessorInfo&) {
-  ApiTestFuzzer::Fuzz();
-  return v8_num(900);
-}
-
-
-THREADED_TEST(PropertyHandler) {
-  v8::HandleScope scope;
-  Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
-  fun_templ->InstanceTemplate()->SetAccessor(v8_str("foo"), handle_property);
-  LocalContext env;
-  Local<Function> fun = fun_templ->GetFunction();
-  env->Global()->Set(v8_str("Fun"), fun);
-  Local<Script> getter = v8_compile("var obj = new Fun(); obj.foo;");
-  CHECK_EQ(900, getter->Run()->Int32Value());
-  Local<Script> setter = v8_compile("obj.foo = 901;");
-  CHECK_EQ(901, setter->Run()->Int32Value());
-}
-
-
 THREADED_TEST(TinyInteger) {
   v8::HandleScope scope;
   LocalContext env;
@@ -904,49 +758,6 @@
 }
 
 
-static v8::Handle<Value> GetIntValue(Local<String> property,
-                                     const AccessorInfo& info) {
-  ApiTestFuzzer::Fuzz();
-  int* value =
-      static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value());
-  return v8_num(*value);
-}
-
-static void SetIntValue(Local<String> property,
-                        Local<Value> value,
-                        const AccessorInfo& info) {
-  int* field =
-      static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value());
-  *field = value->Int32Value();
-}
-
-int foo, bar, baz;
-
-THREADED_TEST(GlobalVariableAccess) {
-  foo = 0;
-  bar = -4;
-  baz = 10;
-  v8::HandleScope scope;
-  v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
-  templ->InstanceTemplate()->SetAccessor(v8_str("foo"),
-                                         GetIntValue,
-                                         SetIntValue,
-                                         v8::External::New(&foo));
-  templ->InstanceTemplate()->SetAccessor(v8_str("bar"),
-                                         GetIntValue,
-                                         SetIntValue,
-                                         v8::External::New(&bar));
-  templ->InstanceTemplate()->SetAccessor(v8_str("baz"),
-                                         GetIntValue,
-                                         SetIntValue,
-                                         v8::External::New(&baz));
-  LocalContext env(0, templ->InstanceTemplate());
-  v8_compile("foo = (++bar) + baz")->Run();
-  CHECK_EQ(bar, -3);
-  CHECK_EQ(foo, 7);
-}
-
-
 THREADED_TEST(ObjectTemplate) {
   v8::HandleScope scope;
   Local<ObjectTemplate> templ1 = ObjectTemplate::New();
@@ -1362,50 +1173,6 @@
 }
 
 
-static v8::Handle<Value> ThrowingGetAccessor(Local<String> name,
-                                             const AccessorInfo& info) {
-  ApiTestFuzzer::Fuzz();
-  return v8::ThrowException(v8_str("g"));
-}
-
-
-static void ThrowingSetAccessor(Local<String> name,
-                                Local<Value> value,
-                                const AccessorInfo& info) {
-  v8::ThrowException(value);
-}
-
-
-THREADED_TEST(Regress1054726) {
-  v8::HandleScope scope;
-  v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
-  obj->SetAccessor(v8_str("x"),
-                   ThrowingGetAccessor,
-                   ThrowingSetAccessor,
-                   Local<Value>());
-
-  LocalContext env;
-  env->Global()->Set(v8_str("obj"), obj->NewInstance());
-
-  // Use the throwing property setter/getter in a loop to force
-  // the accessor ICs to be initialized.
-  v8::Handle<Value> result;
-  result = Script::Compile(v8_str(
-      "var result = '';"
-      "for (var i = 0; i < 5; i++) {"
-      "  try { obj.x; } catch (e) { result += e; }"
-      "}; result"))->Run();
-  CHECK_EQ(v8_str("ggggg"), result);
-
-  result = Script::Compile(String::New(
-      "var result = '';"
-      "for (var i = 0; i < 5; i++) {"
-      "  try { obj.x = i; } catch (e) { result += e; }"
-      "}; result"))->Run();
-  CHECK_EQ(v8_str("01234"), result);
-}
-
-
 THREADED_TEST(FunctionPrototype) {
   v8::HandleScope scope;
   Local<v8::FunctionTemplate> Foo = v8::FunctionTemplate::New();
@@ -3181,53 +2948,6 @@
 }
 
 
-static int x_register = 0;
-static v8::Handle<v8::Object> x_receiver;
-static v8::Handle<v8::Object> x_holder;
-
-
-static v8::Handle<Value> XGetter(Local<String> name, const AccessorInfo& info) {
-  ApiTestFuzzer::Fuzz();
-  CHECK_EQ(x_receiver, info.This());
-  CHECK_EQ(x_holder, info.Holder());
-  return v8_num(x_register);
-}
-
-
-static void XSetter(Local<String> name,
-                    Local<Value> value,
-                    const AccessorInfo& info) {
-  CHECK_EQ(x_holder, info.This());
-  CHECK_EQ(x_holder, info.Holder());
-  x_register = value->Int32Value();
-}
-
-
-THREADED_TEST(AccessorIC) {
-  v8::HandleScope scope;
-  v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
-  obj->SetAccessor(v8_str("x"), XGetter, XSetter);
-  LocalContext context;
-  x_holder = obj->NewInstance();
-  context->Global()->Set(v8_str("holder"), x_holder);
-  x_receiver = v8::Object::New();
-  context->Global()->Set(v8_str("obj"), x_receiver);
-  v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(CompileRun(
-    "obj.__proto__ = holder;"
-    "var result = [];"
-    "for (var i = 0; i < 10; i++) {"
-    "  holder.x = i;"
-    "  result.push(obj.x);"
-    "}"
-    "result"));
-  CHECK_EQ(10, array->Length());
-  for (int i = 0; i < 10; i++) {
-    v8::Handle<Value> entry = array->Get(v8::Integer::New(i));
-    CHECK_EQ(v8::Integer::New(i), entry);
-  }
-}
-
-
 static v8::Handle<Value> NoBlockGetterX(Local<String> name,
                                         const AccessorInfo&) {
   return v8::Handle<Value>();
@@ -6091,13 +5811,17 @@
 // not start immediately.
 bool ApiTestFuzzer::NextThread() {
   int test_position = GetNextTestNumber();
-  int test_number = RegisterThreadedTest::nth(current_)->fuzzer_->test_number_;
+  const char* test_name = RegisterThreadedTest::nth(current_)->name();
   if (test_position == current_) {
-    printf("Stay with %d\n", test_number);
+    if (kLogThreading)
+      printf("Stay with %s\n", test_name);
     return false;
   }
-  printf("Switch from %d to %d\n",
-         current_ < 0 ? 0 : test_number, test_position < 0 ? 0 : test_number);
+  if (kLogThreading) {
+    printf("Switch from %s to %s\n",
+           test_name,
+           RegisterThreadedTest::nth(test_position)->name());
+  }
   current_ = test_position;
   RegisterThreadedTest::nth(current_)->fuzzer_->gate_->Signal();
   return true;
@@ -6206,9 +5930,11 @@
 
 
 void ApiTestFuzzer::CallTest() {
-  printf("Start test %d\n", test_number_);
+  if (kLogThreading)
+    printf("Start test %d\n", test_number_);
   CallTestNumber(test_number_);
-  printf("End test %d\n", test_number_);
+  if (kLogThreading)
+    printf("End test %d\n", test_number_);
 }
 
 
@@ -6696,53 +6422,6 @@
 }
 
 
-static v8::Handle<Value> AccessorProhibitsOverwritingGetter(
-    Local<String> name,
-    const AccessorInfo& info) {
-  ApiTestFuzzer::Fuzz();
-  return v8::True();
-}
-
-
-THREADED_TEST(AccessorProhibitsOverwriting) {
-  v8::HandleScope scope;
-  LocalContext context;
-  Local<ObjectTemplate> templ = ObjectTemplate::New();
-  templ->SetAccessor(v8_str("x"),
-                     AccessorProhibitsOverwritingGetter,
-                     0,
-                     v8::Handle<Value>(),
-                     v8::PROHIBITS_OVERWRITING,
-                     v8::ReadOnly);
-  Local<v8::Object> instance = templ->NewInstance();
-  context->Global()->Set(v8_str("obj"), instance);
-  Local<Value> value = CompileRun(
-      "obj.__defineGetter__('x', function() { return false; });"
-      "obj.x");
-  CHECK(value->BooleanValue());
-  value = CompileRun(
-      "var setter_called = false;"
-      "obj.__defineSetter__('x', function() { setter_called = true; });"
-      "obj.x = 42;"
-      "setter_called");
-  CHECK(!value->BooleanValue());
-  value = CompileRun(
-      "obj2 = {};"
-      "obj2.__proto__ = obj;"
-      "obj2.__defineGetter__('x', function() { return false; });"
-      "obj2.x");
-  CHECK(value->BooleanValue());
-  value = CompileRun(
-      "var setter_called = false;"
-      "obj2 = {};"
-      "obj2.__proto__ = obj;"
-      "obj2.__defineSetter__('x', function() { setter_called = true; });"
-      "obj2.x = 42;"
-      "setter_called");
-  CHECK(!value->BooleanValue());
-}
-
-
 static bool NamedSetAccessBlocker(Local<v8::Object> obj,
                                   Local<Value> name,
                                   v8::AccessType type,
@@ -7279,7 +6958,7 @@
     CHECK(string->map() == i::Heap::short_external_ascii_string_map() ||
           string->map() == i::Heap::medium_external_ascii_string_map());
     // Morph external string to be TwoByte string.
-    if (string->length() <= i::String::kMaxShortStringSize) {
+    if (string->length() <= i::String::kMaxShortSize) {
       string->set_map(i::Heap::short_external_string_map());
     } else {
       string->set_map(i::Heap::medium_external_string_map());
@@ -7292,7 +6971,7 @@
     CHECK(string->map() == i::Heap::short_external_string_map() ||
           string->map() == i::Heap::medium_external_string_map());
     // Morph external string to be ASCII string.
-    if (string->length() <= i::String::kMaxShortStringSize) {
+    if (string->length() <= i::String::kMaxShortSize) {
       string->set_map(i::Heap::short_external_ascii_string_map());
     } else {
       string->set_map(i::Heap::medium_external_ascii_string_map());
@@ -8039,6 +7718,333 @@
 }
 
 
+template <class ExternalArrayClass, class ElementType>
+static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
+                                    int64_t low,
+                                    int64_t high) {
+  v8::HandleScope scope;
+  LocalContext context;
+  const int kElementCount = 40;
+  int element_size = 0;
+  switch (array_type) {
+    case v8::kExternalByteArray:
+    case v8::kExternalUnsignedByteArray:
+      element_size = 1;
+      break;
+    case v8::kExternalShortArray:
+    case v8::kExternalUnsignedShortArray:
+      element_size = 2;
+      break;
+    case v8::kExternalIntArray:
+    case v8::kExternalUnsignedIntArray:
+    case v8::kExternalFloatArray:
+      element_size = 4;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  ElementType* array_data =
+      static_cast<ElementType*>(malloc(kElementCount * element_size));
+  i::Handle<ExternalArrayClass> array =
+      i::Handle<ExternalArrayClass>::cast(
+          i::Factory::NewExternalArray(kElementCount, array_type, array_data));
+  i::Heap::CollectAllGarbage(false);  // Force GC to trigger verification.
+  for (int i = 0; i < kElementCount; i++) {
+    array->set(i, static_cast<ElementType>(i));
+  }
+  i::Heap::CollectAllGarbage(false);  // Force GC to trigger verification.
+  for (int i = 0; i < kElementCount; i++) {
+    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array->get(i)));
+    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array_data[i]));
+  }
+
+  v8::Handle<v8::Object> obj = v8::Object::New();
+  i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
+  // Set the elements to be the external array.
+  obj->SetIndexedPropertiesToExternalArrayData(array_data,
+                                               array_type,
+                                               kElementCount);
+  CHECK_EQ(1, static_cast<int>(jsobj->GetElement(1)->Number()));
+  obj->Set(v8_str("field"), v8::Int32::New(1503));
+  context->Global()->Set(v8_str("ext_array"), obj);
+  v8::Handle<v8::Value> result = CompileRun("ext_array.field");
+  CHECK_EQ(1503, result->Int32Value());
+  result = CompileRun("ext_array[1]");
+  CHECK_EQ(1, result->Int32Value());
+
+  // Check pass through of assigned smis
+  result = CompileRun("var sum = 0;"
+                      "for (var i = 0; i < 8; i++) {"
+                      "  sum += ext_array[i] = ext_array[i] = -i;"
+                      "}"
+                      "sum;");
+  CHECK_EQ(-28, result->Int32Value());
+
+  // Check assigned smis
+  result = CompileRun("for (var i = 0; i < 8; i++) {"
+                      "  ext_array[i] = i;"
+                      "}"
+                      "var sum = 0;"
+                      "for (var i = 0; i < 8; i++) {"
+                      "  sum += ext_array[i];"
+                      "}"
+                      "sum;");
+  CHECK_EQ(28, result->Int32Value());
+
+  // Check assigned smis in reverse order
+  result = CompileRun("for (var i = 8; --i >= 0; ) {"
+                      "  ext_array[i] = i;"
+                      "}"
+                      "var sum = 0;"
+                      "for (var i = 0; i < 8; i++) {"
+                      "  sum += ext_array[i];"
+                      "}"
+                      "sum;");
+  CHECK_EQ(28, result->Int32Value());
+
+  // Check pass through of assigned HeapNumbers
+  result = CompileRun("var sum = 0;"
+                      "for (var i = 0; i < 16; i+=2) {"
+                      "  sum += ext_array[i] = ext_array[i] = (-i * 0.5);"
+                      "}"
+                      "sum;");
+  CHECK_EQ(-28, result->Int32Value());
+
+  // Check assigned HeapNumbers
+  result = CompileRun("for (var i = 0; i < 16; i+=2) {"
+                      "  ext_array[i] = (i * 0.5);"
+                      "}"
+                      "var sum = 0;"
+                      "for (var i = 0; i < 16; i+=2) {"
+                      "  sum += ext_array[i];"
+                      "}"
+                      "sum;");
+  CHECK_EQ(28, result->Int32Value());
+
+  // Check assigned HeapNumbers in reverse order
+  result = CompileRun("for (var i = 14; i >= 0; i-=2) {"
+                      "  ext_array[i] = (i * 0.5);"
+                      "}"
+                      "var sum = 0;"
+                      "for (var i = 0; i < 16; i+=2) {"
+                      "  sum += ext_array[i];"
+                      "}"
+                      "sum;");
+  CHECK_EQ(28, result->Int32Value());
+
+  i::ScopedVector<char> test_buf(1024);
+
+  // Check legal boundary conditions.
+  // The repeated loads and stores ensure the ICs are exercised.
+  const char* boundary_program =
+      "var res = 0;"
+      "for (var i = 0; i < 16; i++) {"
+      "  ext_array[i] = %lld;"
+      "  if (i > 8) {"
+      "    res = ext_array[i];"
+      "  }"
+      "}"
+      "res;";
+  i::OS::SNPrintF(test_buf,
+                  boundary_program,
+                  low);
+  result = CompileRun(test_buf.start());
+  CHECK_EQ(low, result->IntegerValue());
+
+  i::OS::SNPrintF(test_buf,
+                  boundary_program,
+                  high);
+  result = CompileRun(test_buf.start());
+  CHECK_EQ(high, result->IntegerValue());
+
+  // Check misprediction of type in IC.
+  result = CompileRun("var tmp_array = ext_array;"
+                      "var sum = 0;"
+                      "for (var i = 0; i < 8; i++) {"
+                      "  tmp_array[i] = i;"
+                      "  sum += tmp_array[i];"
+                      "  if (i == 4) {"
+                      "    tmp_array = {};"
+                      "  }"
+                      "}"
+                      "sum;");
+  i::Heap::CollectAllGarbage(false);  // Force GC to trigger verification.
+  CHECK_EQ(28, result->Int32Value());
+
+  // Make sure out-of-range loads do not throw.
+  i::OS::SNPrintF(test_buf,
+                  "var caught_exception = false;"
+                  "try {"
+                  "  ext_array[%d];"
+                  "} catch (e) {"
+                  "  caught_exception = true;"
+                  "}"
+                  "caught_exception;",
+                  kElementCount);
+  result = CompileRun(test_buf.start());
+  CHECK_EQ(false, result->BooleanValue());
+
+  // Make sure out-of-range stores do not throw.
+  i::OS::SNPrintF(test_buf,
+                  "var caught_exception = false;"
+                  "try {"
+                  "  ext_array[%d] = 1;"
+                  "} catch (e) {"
+                  "  caught_exception = true;"
+                  "}"
+                  "caught_exception;",
+                  kElementCount);
+  result = CompileRun(test_buf.start());
+  CHECK_EQ(false, result->BooleanValue());
+
+  // Check other boundary conditions, values and operations.
+  result = CompileRun("for (var i = 0; i < 8; i++) {"
+                      "  ext_array[7] = undefined;"
+                      "}"
+                      "ext_array[7];");
+  CHECK_EQ(0, result->Int32Value());
+  CHECK_EQ(0, static_cast<int>(jsobj->GetElement(7)->Number()));
+
+  result = CompileRun("for (var i = 0; i < 8; i++) {"
+                      "  ext_array[6] = '2.3';"
+                      "}"
+                      "ext_array[6];");
+  CHECK_EQ(2, result->Int32Value());
+  CHECK_EQ(2, static_cast<int>(jsobj->GetElement(6)->Number()));
+
+  if (array_type != v8::kExternalFloatArray) {
+    // Though the specification doesn't state it, be explicit about
+    // converting NaNs and +/-Infinity to zero.
+    result = CompileRun("for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = 5;"
+                        "}"
+                        "for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = NaN;"
+                        "}"
+                        "ext_array[5];");
+    CHECK_EQ(0, result->Int32Value());
+    CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value());
+
+    result = CompileRun("for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = 5;"
+                        "}"
+                        "for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = Infinity;"
+                        "}"
+                        "ext_array[5];");
+    CHECK_EQ(0, result->Int32Value());
+    CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value());
+
+    result = CompileRun("for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = 5;"
+                        "}"
+                        "for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = -Infinity;"
+                        "}"
+                        "ext_array[5];");
+    CHECK_EQ(0, result->Int32Value());
+    CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value());
+  }
+
+  result = CompileRun("ext_array[3] = 33;"
+                      "delete ext_array[3];"
+                      "ext_array[3];");
+  CHECK_EQ(33, result->Int32Value());
+
+  result = CompileRun("ext_array[0] = 10; ext_array[1] = 11;"
+                      "ext_array[2] = 12; ext_array[3] = 13;"
+                      "ext_array.__defineGetter__('2',"
+                      "function() { return 120; });"
+                      "ext_array[2];");
+  CHECK_EQ(12, result->Int32Value());
+
+  result = CompileRun("var js_array = new Array(40);"
+                      "js_array[0] = 77;"
+                      "js_array;");
+  CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
+
+  result = CompileRun("ext_array[1] = 23;"
+                      "ext_array.__proto__ = [];"
+                      "js_array.__proto__ = ext_array;"
+                      "js_array.concat(ext_array);");
+  CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
+  CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value());
+
+  result = CompileRun("ext_array[1] = 23;");
+  CHECK_EQ(23, result->Int32Value());
+
+  free(array_data);
+}
+
+
+THREADED_TEST(ExternalByteArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalByteArray, int8_t>(
+      v8::kExternalByteArray,
+      -128,
+      127);
+}
+
+
+THREADED_TEST(ExternalUnsignedByteArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalUnsignedByteArray, uint8_t>(
+      v8::kExternalUnsignedByteArray,
+      0,
+      255);
+}
+
+
+THREADED_TEST(ExternalShortArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalShortArray, int16_t>(
+      v8::kExternalShortArray,
+      -32768,
+      32767);
+}
+
+
+THREADED_TEST(ExternalUnsignedShortArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalUnsignedShortArray, uint16_t>(
+      v8::kExternalUnsignedShortArray,
+      0,
+      65535);
+}
+
+
+THREADED_TEST(ExternalIntArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalIntArray, int32_t>(
+      v8::kExternalIntArray,
+      INT_MIN,   // -2147483648
+      INT_MAX);  //  2147483647
+}
+
+
+THREADED_TEST(ExternalUnsignedIntArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalUnsignedIntArray, uint32_t>(
+      v8::kExternalUnsignedIntArray,
+      0,
+      UINT_MAX);  // 4294967295
+}
+
+
+THREADED_TEST(ExternalFloatArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalFloatArray, float>(
+      v8::kExternalFloatArray,
+      -500,
+      500);
+}
+
+
+THREADED_TEST(ExternalArrays) {
+  TestExternalByteArray();
+  TestExternalUnsignedByteArray();
+  TestExternalShortArray();
+  TestExternalUnsignedShortArray();
+  TestExternalIntArray();
+  TestExternalUnsignedIntArray();
+  TestExternalFloatArray();
+}
+
+
 THREADED_TEST(ScriptContextDependence) {
   v8::HandleScope scope;
   LocalContext c1;
@@ -8153,3 +8159,130 @@
     CHECK(stack_limit == set_limit);
   }
 }
+
+
+THREADED_TEST(GetHeapStatistics) {
+  v8::HandleScope scope;
+  LocalContext c1;
+  v8::HeapStatistics heap_statistics;
+  CHECK_EQ(heap_statistics.total_heap_size(), 0);
+  CHECK_EQ(heap_statistics.used_heap_size(), 0);
+  v8::V8::GetHeapStatistics(&heap_statistics);
+  CHECK_NE(heap_statistics.total_heap_size(), 0);
+  CHECK_NE(heap_statistics.used_heap_size(), 0);
+}
+
+
+static double DoubleFromBits(uint64_t value) {
+  double target;
+#ifdef BIG_ENDIAN_FLOATING_POINT
+  const int kIntSize = 4;
+  // Somebody swapped the lower and higher half of doubles.
+  memcpy(&target, reinterpret_cast<char*>(&value) + kIntSize, kIntSize);
+  memcpy(reinterpret_cast<char*>(&target) + kIntSize, &value, kIntSize);
+#else
+  memcpy(&target, &value, sizeof(target));
+#endif
+  return target;
+}
+
+
+static uint64_t DoubleToBits(double value) {
+  uint64_t target;
+#ifdef BIG_ENDIAN_FLOATING_POINT
+  const int kIntSize = 4;
+  // Somebody swapped the lower and higher half of doubles.
+  memcpy(&target, reinterpret_cast<char*>(&value) + kIntSize, kIntSize);
+  memcpy(reinterpret_cast<char*>(&target) + kIntSize, &value, kIntSize);
+#else
+  memcpy(&target, &value, sizeof(target));
+#endif
+  return target;
+}
+
+
+static double DoubleToDateTime(double input) {
+  double date_limit = 864e13;
+  if (IsNaN(input) || input < -date_limit || input > date_limit) {
+    return i::OS::nan_value();
+  }
+  return (input < 0) ? -(floor(-input)) : floor(input);
+}
+
+// We don't have a consistent way to write 64-bit constants syntactically, so we
+// split them into two 32-bit constants and combine them programmatically.
+static double DoubleFromBits(uint32_t high_bits, uint32_t low_bits) {
+  return DoubleFromBits((static_cast<uint64_t>(high_bits) << 32) | low_bits);
+}
+
+
+THREADED_TEST(QuietSignalingNaNs) {
+  v8::HandleScope scope;
+  LocalContext context;
+  v8::TryCatch try_catch;
+
+  // Special double values.
+  double snan = DoubleFromBits(0x7ff00000, 0x00000001);
+  double qnan = DoubleFromBits(0x7ff80000, 0x00000000);
+  double infinity = DoubleFromBits(0x7ff00000, 0x00000000);
+  double max_normal = DoubleFromBits(0x7fefffff, 0xffffffffu);
+  double min_normal = DoubleFromBits(0x00100000, 0x00000000);
+  double max_denormal = DoubleFromBits(0x000fffff, 0xffffffffu);
+  double min_denormal = DoubleFromBits(0x00000000, 0x00000001);
+
+  // Date values are capped at +/-100000000 days (times 864e5 ms per day)
+  // on either side of the epoch.
+  double date_limit = 864e13;
+
+  double test_values[] = {
+      snan,
+      qnan,
+      infinity,
+      max_normal,
+      date_limit + 1,
+      date_limit,
+      min_normal,
+      max_denormal,
+      min_denormal,
+      0,
+      -0,
+      -min_denormal,
+      -max_denormal,
+      -min_normal,
+      -date_limit,
+      -date_limit - 1,
+      -max_normal,
+      -infinity,
+      -qnan,
+      -snan
+  };
+  int num_test_values = 20;
+
+  for (int i = 0; i < num_test_values; i++) {
+    double test_value = test_values[i];
+
+    // Check that Number::New preserves non-NaNs and quiets SNaNs.
+    v8::Handle<v8::Value> number = v8::Number::New(test_value);
+    double stored_number = number->NumberValue();
+    if (!IsNaN(test_value)) {
+      CHECK_EQ(test_value, stored_number);
+    } else {
+      uint64_t stored_bits = DoubleToBits(stored_number);
+      // Check if quiet nan (bits 51..62 all set).
+      CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff));
+    }
+
+    // Check that Date::New preserves non-NaNs in the date range and
+    // quiets SNaNs.
+    v8::Handle<v8::Value> date = v8::Date::New(test_value);
+    double expected_stored_date = DoubleToDateTime(test_value);
+    double stored_date = date->NumberValue();
+    if (!IsNaN(expected_stored_date)) {
+      CHECK_EQ(expected_stored_date, stored_date);
+    } else {
+      uint64_t stored_bits = DoubleToBits(stored_date);
+      // Check if quiet nan (bits 51..62 all set).
+      CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff));
+    }
+  }
+}
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 4ffcee3..656a456 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -178,12 +178,6 @@
 }
 
 
-// Helper function that compiles and runs the source.
-static v8::Local<v8::Value> CompileRun(const char* source) {
-  return v8::Script::Compile(v8::String::New(source))->Run();
-}
-
-
 // Is there any debug info for the function?
 static bool HasDebugInfo(v8::Handle<v8::Function> fun) {
   Handle<v8::internal::JSFunction> f = v8::Utils::OpenHandle(*fun);
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index 43df6ba..68cbc26 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -163,11 +163,6 @@
 }
 
 
-static void CompileRun(const char* source) {
-  Script::Compile(String::New(source))->Run();
-}
-
-
 v8::Handle<v8::Value> TraceExtension::JSEntrySPLevel2(
     const v8::Arguments& args) {
   v8::HandleScope scope;
@@ -329,17 +324,16 @@
 }
 
 
-static void CFuncDoTrace() {
+static void CFuncDoTrace(byte dummy_parameter) {
   Address fp;
 #ifdef __GNUC__
   fp = reinterpret_cast<Address>(__builtin_frame_address(0));
-#elif defined _MSC_VER && defined V8_TARGET_ARCH_IA32
-  __asm mov [fp], ebp  // NOLINT
-#elif defined _MSC_VER && defined V8_TARGET_ARCH_X64
-  // TODO(X64): __asm extension is not supported by the Microsoft Visual C++
-  // 64-bit compiler.
-  fp = 0;
-  UNIMPLEMENTED();
+#elif defined _MSC_VER
+  // Approximate a frame pointer address. We compile without base pointers,
+  // so we can't trust ebp/rbp.
+  fp = &dummy_parameter - 2 * sizeof(void*);  // NOLINT
+#else
+#error Unexpected platform.
 #endif
   DoTrace(fp);
 }
@@ -347,7 +341,7 @@
 
 static int CFunc(int depth) {
   if (depth <= 0) {
-    CFuncDoTrace();
+    CFuncDoTrace(0);
     return 0;
   } else {
     return CFunc(depth - 1) + 1;
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 3983215..b1cb63c 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -256,11 +256,10 @@
   // No sampling should happen prior to resuming profiler.
   CHECK(!LoggerTestHelper::IsSamplerActive());
 
-  // Read initial logged data (static libs map).
   EmbeddedVector<char, 102400> buffer;
+  // Nothing must be logged until profiling is resumed.
   int log_pos = GetLogLines(0, &buffer);
-  CHECK_GT(log_pos, 0);
-  CHECK_GT(buffer.length(), log_pos);
+  CHECK_EQ(0, log_pos);
 
   CompileAndRunScript("var a = (function(x) { return x + 1; })(10);");
 
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
index 9c1197f..f344ac8 100755
--- a/test/cctest/test-macro-assembler-x64.cc
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -57,7 +57,7 @@
 using v8::internal::r8;
 using v8::internal::r9;
 using v8::internal::r11;
-using v8::internal::r12;
+using v8::internal::r12;  // Remember: r12..r15 are callee save!
 using v8::internal::r13;
 using v8::internal::r14;
 using v8::internal::r15;
@@ -1144,6 +1144,8 @@
   masm->set_allow_stub_calls(false);
   Label exit;
 
+  __ push(r12);
+  __ push(r15);
   TestSmiDiv(masm, &exit, 0x10, 1, 1);
   TestSmiDiv(masm, &exit, 0x20, 1, 0);
   TestSmiDiv(masm, &exit, 0x30, -1, 0);
@@ -1168,6 +1170,8 @@
   __ xor_(r15, r15);  // Success.
   __ bind(&exit);
   __ movq(rax, r15);
+  __ pop(r15);
+  __ pop(r12);
   __ ret(0);
 
   CodeDesc desc;
@@ -1247,6 +1251,8 @@
   masm->set_allow_stub_calls(false);
   Label exit;
 
+  __ push(r12);
+  __ push(r15);
   TestSmiMod(masm, &exit, 0x10, 1, 1);
   TestSmiMod(masm, &exit, 0x20, 1, 0);
   TestSmiMod(masm, &exit, 0x30, -1, 0);
@@ -1271,6 +1277,8 @@
   __ xor_(r15, r15);  // Success.
   __ bind(&exit);
   __ movq(rax, r15);
+  __ pop(r15);
+  __ pop(r12);
   __ ret(0);
 
   CodeDesc desc;
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 743375d..e56f0f4 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -71,10 +71,6 @@
 
 
 TEST(Promotion) {
-  // Test the situation that some objects in new space are promoted to the
-  // old space
-  if (Snapshot::IsEnabled()) return;
-
   // Ensure that we get a compacting collection so that objects are promoted
   // from new space.
   FLAG_gc_global = true;
@@ -106,7 +102,6 @@
 
 
 TEST(NoPromotion) {
-  if (Snapshot::IsEnabled()) return;
   Heap::ConfigureHeap(2*256*KB, 4*MB);
 
   // Test the situation that some objects in new space are promoted to
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 6939a80..01e0715 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -185,6 +185,18 @@
 }
 
 
+static void Serialize2() {
+  Serializer::Enable();
+  // We have to create one context.  One reason for this is so that the builtins
+  // can be loaded from v8natives.js and their addresses can be processed.  This
+  // will clear the pending fixups array, which would otherwise contain GC roots
+  // that would confuse the serialization/deserialization process.
+  v8::Persistent<v8::Context> env = v8::Context::New();
+  env.Dispose();
+  Snapshot::WriteToFile2(FLAG_testing_serialization_file);
+}
+
+
 // Test that the whole heap can be serialized when running from the
 // internal snapshot.
 // (Smoke test.)
@@ -203,6 +215,13 @@
 }
 
 
+// Test that the whole heap can be serialized.
+TEST(Serialize2) {
+  v8::V8::Initialize();
+  Serialize2();
+}
+
+
 // Test that the heap isn't destroyed after a serialization.
 TEST(SerializeNondestructive) {
   if (Snapshot::IsEnabled()) return;
@@ -230,6 +249,11 @@
 }
 
 
+static void Deserialize2() {
+  CHECK(Snapshot::Initialize2(FLAG_testing_serialization_file));
+}
+
+
 static void SanityCheck() {
   v8::HandleScope scope;
 #ifdef DEBUG
@@ -251,6 +275,21 @@
   SanityCheck();
 }
 
+
+DEPENDENT_TEST(Deserialize2, Serialize2) {
+  v8::HandleScope scope;
+
+  Deserialize2();
+
+  fflush(stdout);
+
+  v8::Persistent<v8::Context> env = v8::Context::New();
+  env->Enter();
+
+  SanityCheck();
+}
+
+
 DEPENDENT_TEST(DeserializeAndRunScript, Serialize) {
   v8::HandleScope scope;
 
@@ -263,6 +302,21 @@
 }
 
 
+DEPENDENT_TEST(DeserializeAndRunScript2, Serialize2) {
+  v8::HandleScope scope;
+
+  Deserialize2();
+
+  v8::Persistent<v8::Context> env = v8::Context::New();
+  env->Enter();
+
+  const char* c_source = "\"1234\".length";
+  v8::Local<v8::String> source = v8::String::New(c_source);
+  v8::Local<v8::Script> script = v8::Script::Compile(source);
+  CHECK_EQ(4, script->Run()->Int32Value());
+}
+
+
 DEPENDENT_TEST(DeserializeNatives, Serialize) {
   v8::HandleScope scope;
 
@@ -286,3 +340,19 @@
   v8::Local<v8::Value> value = script->Run();
   CHECK(value->IsUndefined());
 }
+
+
+TEST(TestThatAlwaysSucceeds) {
+}
+
+
+TEST(TestThatAlwaysFails) {
+  bool ArtificialFailure = false;
+  CHECK(ArtificialFailure);
+}
+
+
+DEPENDENT_TEST(DependentTestThatAlwaysFails, TestThatAlwaysSucceeds) {
+  bool ArtificialFailure2 = false;
+  CHECK(ArtificialFailure2);
+}
diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc
index d946a7f..1a26883 100644
--- a/test/cctest/test-spaces.cc
+++ b/test/cctest/test-spaces.cc
@@ -99,9 +99,9 @@
 
 TEST(MemoryAllocator) {
   CHECK(Heap::ConfigureHeapDefault());
-  CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
+  CHECK(MemoryAllocator::Setup(Heap::MaxReserved()));
 
-  OldSpace faked_space(Heap::MaxCapacity(), OLD_POINTER_SPACE, NOT_EXECUTABLE);
+  OldSpace faked_space(Heap::MaxReserved(), OLD_POINTER_SPACE, NOT_EXECUTABLE);
   int total_pages = 0;
   int requested = 2;
   int allocated;
@@ -155,16 +155,16 @@
 
 TEST(NewSpace) {
   CHECK(Heap::ConfigureHeapDefault());
-  CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
+  CHECK(MemoryAllocator::Setup(Heap::MaxReserved()));
 
   NewSpace new_space;
 
   void* chunk =
-      MemoryAllocator::ReserveInitialChunk(2 * Heap::YoungGenerationSize());
+      MemoryAllocator::ReserveInitialChunk(4 * Heap::ReservedSemiSpaceSize());
   CHECK(chunk != NULL);
   Address start = RoundUp(static_cast<Address>(chunk),
-                          Heap::YoungGenerationSize());
-  CHECK(new_space.Setup(start, Heap::YoungGenerationSize()));
+                          2 * Heap::ReservedSemiSpaceSize());
+  CHECK(new_space.Setup(start, 2 * Heap::ReservedSemiSpaceSize()));
   CHECK(new_space.HasBeenSetup());
 
   while (new_space.Available() >= Page::kMaxHeapObjectSize) {
@@ -180,18 +180,18 @@
 
 TEST(OldSpace) {
   CHECK(Heap::ConfigureHeapDefault());
-  CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
+  CHECK(MemoryAllocator::Setup(Heap::MaxReserved()));
 
-  OldSpace* s = new OldSpace(Heap::OldGenerationSize(),
+  OldSpace* s = new OldSpace(Heap::MaxOldGenerationSize(),
                              OLD_POINTER_SPACE,
                              NOT_EXECUTABLE);
   CHECK(s != NULL);
 
   void* chunk =
-      MemoryAllocator::ReserveInitialChunk(2 * Heap::YoungGenerationSize());
+      MemoryAllocator::ReserveInitialChunk(4 * Heap::ReservedSemiSpaceSize());
   CHECK(chunk != NULL);
   Address start = static_cast<Address>(chunk);
-  size_t size = RoundUp(start, Heap::YoungGenerationSize()) - start;
+  size_t size = RoundUp(start, 2 * Heap::ReservedSemiSpaceSize()) - start;
 
   CHECK(s->Setup(start, size));
 
diff --git a/test/mjsunit/compiler/globals.js b/test/mjsunit/compiler/globals.js
new file mode 100644
index 0000000..066f927
--- /dev/null
+++ b/test/mjsunit/compiler/globals.js
@@ -0,0 +1,55 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test references and assignments to global variables.
+var g = 0;
+
+// Test compilation of a global variable store.
+assertEquals(1, eval('g = 1'));
+// Test that the store worked.
+assertEquals(1, g);
+
+// Test that patching the IC in the compiled code works.
+assertEquals(1, eval('g = 1'));
+assertEquals(1, g);
+assertEquals(1, eval('g = 1'));
+assertEquals(1, g);
+
+// Test a second store.
+assertEquals("2", eval('g = "2"'));
+assertEquals("2", g);
+
+// Test a load.
+assertEquals("2", eval('g'));
+
+// Test that patching the IC in the compiled code works.
+assertEquals("2", eval('g'));
+assertEquals("2", eval('g'));
+
+// Test a second load.
+g = 3;
+assertEquals(3, eval('g'));
diff --git a/test/mjsunit/compiler/literals-assignment.js b/test/mjsunit/compiler/literals-assignment.js
index 932bfa7..d2996c7 100644
--- a/test/mjsunit/compiler/literals-assignment.js
+++ b/test/mjsunit/compiler/literals-assignment.js
@@ -69,3 +69,36 @@
          })()";
 assertEquals(8, eval(code));
 
+// Test object literals.
+var a, b;
+code = "a = {x:8}";
+eval(code);
+assertEquals(8, a.x);
+
+code = "b = {x:a, y:'abc'}";
+eval(code);
+assertEquals(a, b.x);
+assertEquals(8, b.x.x);
+assertEquals("abc", b.y);
+
+code = "({x:8, y:9}); 10";
+assertEquals(10, eval(code));
+
+code = "({x:8, y:9})";
+eval(code);
+assertEquals(9, eval(code+".y"));
+
+code = "a = {2:8, x:9}";
+eval(code);
+assertEquals(8, a[2]);
+assertEquals(8, a["2"]);
+assertEquals(9, a["x"]);
+
+// Test regexp literals.
+
+a = /abc/;
+
+assertEquals(/abc/, a);
+
+code = "/abc/; 8";
+assertEquals(8, eval(code));
diff --git a/test/mjsunit/compiler/literals.js b/test/mjsunit/compiler/literals.js
index e0e532f..6775401 100644
--- a/test/mjsunit/compiler/literals.js
+++ b/test/mjsunit/compiler/literals.js
@@ -33,3 +33,20 @@
 assertEquals("abc", eval("'abc'"));
 
 assertEquals(8, eval("6;'abc';8"));
+
+// Test some materialized array literals.
+assertEquals([1,2,3,4], eval('[1,2,3,4]'));
+assertEquals([[1,2],3,4], eval('[[1,2],3,4]'));
+assertEquals([1,[2,3,4]], eval('[1,[2,3,4]]'));
+
+assertEquals([1,2,3,4], eval('var a=1, b=2; [a,b,3,4]'))
+assertEquals([1,2,3,4], eval('var a=1, b=2, c = [a,b,3,4]; c'));
+
+function double(x) { return x + x; }
+var s = 'var a = 1, b = 2; [double(a), double(b), double(3), double(4)]';
+assertEquals([2,4,6,8], eval(s));
+
+// Test array literals in effect context.
+assertEquals(17, eval('[1,2,3,4]; 17'));
+assertEquals(19, eval('var a=1, b=2; [a,b,3,4]; 19'));
+assertEquals(23, eval('var a=1, b=2; c=23; [a,b,3,4]; c'));
diff --git a/test/mjsunit/compiler/property-simple.js b/test/mjsunit/compiler/property-simple.js
new file mode 100644
index 0000000..b0f0ffa
--- /dev/null
+++ b/test/mjsunit/compiler/property-simple.js
@@ -0,0 +1,39 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test for property access
+
+var a;
+var b;
+
+code = "a = {x:8, y:9}; a.x";
+
+assertEquals(8, eval(code));
+
+code = "b = {z:a}; b.z.y";
+
+assertEquals(9, eval(code));
diff --git a/test/mjsunit/debug-version.js b/test/mjsunit/debug-version.js
new file mode 100644
index 0000000..b1bc1e8
--- /dev/null
+++ b/test/mjsunit/debug-version.js
@@ -0,0 +1,90 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+// Simple function which stores the last debug event.
+listenerComplete = false;
+exception = false;
+
+var base_version_request = '"seq":0,"type":"request","command":"version"'
+
+function safeEval(code) {
+  try {
+    return eval('(' + code + ')');
+  } catch (e) {
+    assertEquals(void 0, e);
+    return undefined;
+  }
+}
+
+function testArguments(exec_state) {
+  // Get the debug command processor in running state.
+  var dcp = exec_state.debugCommandProcessor(true);
+
+  assertTrue(dcp.isRunning());
+
+  var version_request = '{' + base_version_request + '}'
+  var version_response = safeEval(dcp.processDebugJSONRequest(version_request));
+
+  assertTrue(version_response.success);
+
+  var version_string = version_response.body.V8Version;
+
+  assertTrue(!!version_string, version_request + ' -> expected version string');
+
+  var version_pattern = /^\d*\.\d*\.\d*/;
+
+  assertTrue(!!(version_string.match(version_pattern)), "unexpected format of version: " + version_string);
+}
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+
+      // Test simple suspend request.
+      testArguments(exec_state);
+
+      // Indicate that all was processed.
+      listenerComplete = true;
+    }
+  } catch (e) {
+    exception = e
+  };
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Stop debugger and check that suspend command changes running flag.
+debugger;
+
+assertFalse(exception, "exception in listener")
+// Make sure that the debug event listener vas invoked.
+assertTrue(listenerComplete, "listener did not run to completion");
diff --git a/test/mjsunit/div-mod.js b/test/mjsunit/div-mod.js
index a8a19b3..b3c77e1 100644
--- a/test/mjsunit/div-mod.js
+++ b/test/mjsunit/div-mod.js
@@ -86,3 +86,72 @@
 for (var i = 0; i < divisors.length; i++) {
   run_tests_for(divisors[i]);
 }
+
+// Test extreme corner cases of modulo.
+
+// Computes the modulo by slow but lossless operations.
+function compute_mod(dividend, divisor) {
+  // Return NaN if either operand is NaN, if divisor is 0 or
+  // dividend is an infinity. Return dividend if divisor is an infinity.
+  if (isNaN(dividend) || isNaN(divisor) || divisor == 0) { return NaN; }
+  var sign = 1;
+  if (dividend < 0) { dividend = -dividend; sign = -1; }
+  if (dividend == Infinity) { return NaN; }
+  if (divisor < 0) { divisor = -divisor; }
+  if (divisor == Infinity) { return sign * dividend; }
+  function rec_mod(a, b) {
+    // Subtracts maximal possible multiplum of b from a.
+    if (a >= b) {
+      a = rec_mod(a, 2 * b);
+      if (a >= b) { a -= b; }
+    }
+    return a;
+  }
+  return sign * rec_mod(dividend, divisor);
+}
+
+(function () {
+  var large_non_smi = 1234567891234.12245;
+  var small_non_smi = 43.2367243;
+  var repeating_decimal = 0.3;
+  var finite_decimal = 0.5;
+  var smi = 43;
+  var power_of_two = 64;
+  var min_normal = Number.MIN_VALUE * Math.pow(2, 52);
+  var max_denormal = Number.MIN_VALUE * (Math.pow(2, 52) - 1);
+
+  // All combinations of NaN, Infinity, normal, denormal and zero.
+  var example_numbers = [
+    NaN,
+    0,
+    Number.MIN_VALUE,
+    3 * Number.MIN_VALUE,
+    max_denormal,
+    min_normal,
+    repeating_decimal,
+    finite_decimal,
+    smi,
+    power_of_two,
+    small_non_smi,
+    large_non_smi,
+    Number.MAX_VALUE,
+    Infinity
+  ];
+
+  function doTest(a, b) {
+    var exp = compute_mod(a, b);
+    var act = a % b;
+    assertEquals(exp, act, a + " % " + b);
+  }
+
+  for (var i = 0; i < example_numbers.length; i++) {
+    for (var j = 0; j < example_numbers.length; j++) {
+      var a = example_numbers[i];
+      var b = example_numbers[j];
+      doTest(a,b);
+      doTest(-a,b);
+      doTest(a,-b);
+      doTest(-a,-b);
+    }
+  }
+})()
diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js
index c653b18..f495c72 100644
--- a/test/mjsunit/fuzz-natives.js
+++ b/test/mjsunit/fuzz-natives.js
@@ -127,8 +127,11 @@
   "IS_VAR": true,
   "ResolvePossiblyDirectEval": true,
   "Log": true,
+  "DeclareGlobals": true,
 
-  "CollectStackTrace": true
+  "CollectStackTrace": true,
+  "PromoteScheduledException": true,
+  "DeleteHandleScopeExtensions": true
 };
 
 var currentlyUncallable = {
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 0b069cc..15f62b0 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -36,6 +36,9 @@
 
 big-object-literal: PASS, SKIP if ($arch == arm)
 
+# Issue 488: this test sometimes times out.
+array-constructor: PASS || TIMEOUT
+
 [ $arch == arm ]
 
 # Slow tests which times out in debug mode.
diff --git a/test/mjsunit/regress/regress-485.js b/test/mjsunit/regress/regress-485.js
new file mode 100644
index 0000000..62c6fb9
--- /dev/null
+++ b/test/mjsunit/regress/regress-485.js
@@ -0,0 +1,64 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// See: http://code.google.com/p/v8/issues/detail?id=485
+
+// Ensure that we don't expose the builtins object when calling
+// builtin functions that use or return "this".
+
+var global = this;
+var global2 = (function(){return this;})();
+assertEquals(global, global2, "direct call to local function returns global");
+
+var builtin = Object.prototype.valueOf;  // Builtin function that returns this.
+
+assertEquals(global, builtin(), "Direct call to builtin");
+
+assertEquals(global, builtin.call(), "call() to builtin");
+assertEquals(global, builtin.call(null), "call(null) to builtin");
+assertEquals(global, builtin.call(undefined), "call(undefined) to builtin");
+
+assertEquals(global, builtin.apply(), "apply() to builtin");
+assertEquals(global, builtin.apply(null), "apply(null) to builtin");
+assertEquals(global, builtin.apply(undefined), "apply(undefined) to builtin");
+
+assertEquals(global, builtin.call.call(builtin), "call.call() to builtin");
+assertEquals(global, builtin.call.apply(builtin), "call.apply() to builtin");
+assertEquals(global, builtin.apply.call(builtin), "apply.call() to builtin");
+assertEquals(global, builtin.apply.apply(builtin), "apply.apply() to builtin");
+
+
+// Builtin that depends on value of this to compute result.
+var builtin2 = Object.prototype.toString;
+
+// Global object has class "Object" according to Object.prototype.toString.
+// Builtins object displays as "[object builtins]".
+assertTrue("[object builtins]" != builtin2(), "Direct call to toString");
+assertTrue("[object builtins]" != builtin2.call(), "call() to toString");
+assertTrue("[object builtins]" != builtin2.apply(), "call() to toString");
+assertTrue("[object builtins]" != builtin2.call.call(builtin2),
+           "call.call() to toString");
diff --git a/tools/test.py b/tools/test.py
index 3a60c59..d206e33 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -326,6 +326,7 @@
     self.timed_out = timed_out
     self.stdout = stdout
     self.stderr = stderr
+    self.failed = None
 
 
 class TestCase(object):
@@ -333,7 +334,6 @@
   def __init__(self, context, path):
     self.path = path
     self.context = context
-    self.failed = None
     self.duration = None
 
   def IsNegative(self):
@@ -343,9 +343,9 @@
     return cmp(other.duration, self.duration)
 
   def DidFail(self, output):
-    if self.failed is None:
-      self.failed = self.IsFailureOutput(output)
-    return self.failed
+    if output.failed is None:
+      output.failed = self.IsFailureOutput(output)
+    return output.failed
 
   def IsFailureOutput(self, output):
     return output.exit_code != 0
@@ -1094,6 +1094,8 @@
       default=60, type="int")
   result.add_option("--arch", help='The architecture to run tests for',
       default='none')
+  result.add_option("--snapshot", help="Run the tests with snapshot turned on",
+      default=False, action="store_true")
   result.add_option("--simulator", help="Run tests with architecture simulator",
       default='none')
   result.add_option("--special-command", default=None)
@@ -1139,6 +1141,8 @@
     if options.arch == 'none':
       options.arch = ARCH_GUESS
     options.scons_flags.append("arch=" + options.arch)
+  if options.snapshot:
+    options.scons_flags.append("snapshot=on")
   return True