Push version 1.2.3 to trunk.

Fixed bug in reporting of out-of-memory situations.

Introduced hidden prototypes on certain builtin prototype objects such as String.prototype to emulate JSC's behavior of restoring the original function when deleting functions from those prototype objects.

Fixed crash bug in the register allocator.




git-svn-id: http://v8.googlecode.com/svn/trunk@1909 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 3ff239c..2ff9936 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,15 @@
+2009-05-11: Version 1.2.3
+
+        Fixed bug in reporting of out-of-memory situations.
+
+        Introduced hidden prototypes on certain builtin prototype objects
+        such as String.prototype to emulate JSC's behavior of restoring
+        the original function when deleting functions from those prototype
+        objects.
+
+        Fixed crash bug in the register allocator.
+
+
 2009-05-04: Version 1.2.2
 
         Fixed bug in array sorting for sparse arrays (issue 326).
diff --git a/SConstruct b/SConstruct
index ac210d5..0c50679 100644
--- a/SConstruct
+++ b/SConstruct
@@ -149,13 +149,13 @@
       }
     },
     'arch:ia32': {
-      'CPPDEFINES':   ['V8_ARCH_IA32', 'ILP32']
+      'CPPDEFINES':   ['V8_TARGET_ARCH_IA32']
     },
     'arch:arm': {
-      'CPPDEFINES':   ['V8_ARCH_ARM', 'ILP32']
+      'CPPDEFINES':   ['V8_TARGET_ARCH_ARM']
     },
     'arch:x64': {
-      'CPPDEFINES':   ['V8_ARCH_X64', 'LP64']
+      'CPPDEFINES':   ['V8_TARGET_ARCH_X64']
     },
     'prof:oprofile': {
       'CPPDEFINES':   ['ENABLE_OPROFILE_AGENT']
@@ -173,7 +173,7 @@
       'CCPDBFLAGS':   ['/Zi']
     },
     'arch:ia32': {
-      'CPPDEFINES':   ['V8_ARCH_IA32']
+      'CPPDEFINES':   ['V8_TARGET_ARCH_IA32']
     },
     'mode:debug': {
       'CCFLAGS':      ['/Od', '/Gm'],
@@ -239,7 +239,7 @@
       'LIBS': ['winmm', 'ws2_32']
     },
     'arch:arm': {
-      'CPPDEFINES':   ['V8_ARCH_ARM'],
+      'CPPDEFINES':   ['V8_TARGET_ARCH_ARM'],
       # /wd4996 is to silence the warning about sscanf
       # used by the arm simulator.
       'WARNINGFLAGS': ['/wd4996']
@@ -348,7 +348,7 @@
       'CPPDEFINES': ['USING_V8_SHARED']
     },
     'arch:ia32': {
-      'CPPDEFINES': ['V8_ARCH_IA32']
+      'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
     }
   }
 }
@@ -442,7 +442,7 @@
       }
     },
     'arch:ia32': {
-      'CPPDEFINES':     ['V8_ARCH_IA32']
+      'CPPDEFINES':     ['V8_TARGET_ARCH_IA32']
     },
     'mode:debug': {
       'CCFLAGS':   ['/Od'],
diff --git a/include/v8.h b/include/v8.h
index dde78a1..9f59e4e 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -77,9 +77,7 @@
 #endif  // BUILDING_V8_SHARED
 
 #else  // _WIN32
-#ifndef __STDC_CONSTANT_MACROS
-#define __STDC_CONSTANT_MACROS
-#endif
+
 #include <stdint.h>
 
 // Setup for Linux shared library export. There is no need to destinguish
@@ -829,14 +827,14 @@
   };
 
   /**
-   * Get the ExternalStringResource for an external string.  Only
-   * valid if IsExternal() returns true.
+   * Get the ExternalStringResource for an external string.  Returns
+   * NULL if IsExternal() doesn't return true.
    */
   ExternalStringResource* GetExternalStringResource() const;
 
   /**
    * Get the ExternalAsciiStringResource for an external ascii string.
-   * Only valid if IsExternalAscii() returns true.
+   * Returns NULL if IsExternalAscii() doesn't return true.
    */
   ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
 
@@ -1128,9 +1126,9 @@
 
   /**
    * Returns the identity hash for this object. The current implemenation uses
-   * a hidden property on the object to store the identity hash. 
+   * a hidden property on the object to store the identity hash.
    *
-   * The return value will never be 0. Also, it is not guaranteed to be 
+   * The return value will never be 0. Also, it is not guaranteed to be
    * unique.
    */
   int GetIdentityHash();
@@ -2082,6 +2080,24 @@
   static void ResumeProfiler();
 
   /**
+   * If logging is performed into a memory buffer (via --logfile=*), allows to
+   * retrieve previously written messages. This can be used for retrieving
+   * profiler log data in the application. This function is thread-safe.
+   *
+   * Caller provides a destination buffer that must exist during GetLogLines
+   * call. Only whole log lines are copied into the buffer.
+   *
+   * \param from_pos specified a point in a buffer to read from, 0 is the
+   *   beginning of a buffer. It is assumed that caller updates its current
+   *   position using returned size value from the previous call.
+   * \param dest_buf destination buffer for log data.
+   * \param max_size size of the destination buffer.
+   * \returns actual size of log data copied into buffer.
+   */
+  static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+
+
+  /**
    * Releases any resources used by v8 and stops any utility threads
    * that may be running.  Note that disposing v8 is permanent, it
    * cannot be reinitialized.
@@ -2262,6 +2278,14 @@
   static bool InContext();
 
   /**
+   * Associate an additional data object with the context. This is mainly used
+   * with the debugger to provide additional information on the context through
+   * the debugger API.
+   */
+  void SetData(Handle<Value> data);
+  Local<Value> GetData();
+
+  /**
    * Stack-allocated class which sets the execution context for all
    * operations executed within a local scope.
    */
diff --git a/src/accessors.cc b/src/accessors.cc
index 2d6a3a1..4cd93be 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -308,6 +308,25 @@
 
 
 //
+// Accessors::ScriptGetContextData
+//
+
+
+Object* Accessors::ScriptGetContextData(Object* object, void*) {
+  HandleScope scope;
+  Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
+  return script->context_data();
+}
+
+
+const AccessorDescriptor Accessors::ScriptContextData = {
+  ScriptGetContextData,
+  IllegalSetter,
+  0
+};
+
+
+//
 // Accessors::FunctionPrototype
 //
 
diff --git a/src/accessors.h b/src/accessors.h
index d174c90..1dd8fdd 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -48,6 +48,7 @@
   V(ScriptData)          \
   V(ScriptType)          \
   V(ScriptLineEnds)      \
+  V(ScriptContextData)   \
   V(ObjectPrototype)
 
 // Accessors contains all predefined proxy accessors.
@@ -88,6 +89,7 @@
   static Object* ScriptGetData(Object* object, void*);
   static Object* ScriptGetType(Object* object, void*);
   static Object* ScriptGetLineEnds(Object* object, void*);
+  static Object* ScriptGetContextData(Object* object, void*);
   static Object* ObjectGetPrototype(Object* receiver, void*);
   static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*);
 
diff --git a/src/api.cc b/src/api.cc
index 058b246..c250412 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -445,6 +445,40 @@
 }
 
 
+void Context::SetData(v8::Handle<Value> data) {
+  if (IsDeadCheck("v8::Context::SetData()")) return;
+  ENTER_V8;
+  {
+    HandleScope scope;
+    i::Handle<i::Context> env = Utils::OpenHandle(this);
+    i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
+    ASSERT(env->IsGlobalContext());
+    if (env->IsGlobalContext()) {
+      env->set_data(*raw_data);
+    }
+  }
+}
+
+
+v8::Local<v8::Value> Context::GetData() {
+  if (IsDeadCheck("v8::Context::GetData()")) return v8::Local<Value>();
+  ENTER_V8;
+  i::Object* raw_result = NULL;
+  {
+    HandleScope scope;
+    i::Handle<i::Context> env = Utils::OpenHandle(this);
+    ASSERT(env->IsGlobalContext());
+    if (env->IsGlobalContext()) {
+      raw_result = env->data();
+    } else {
+      return Local<Value>();
+    }
+  }
+  i::Handle<i::Object> result(raw_result);
+  return Utils::ToLocal(result);
+}
+
+
 void** v8::HandleScope::RawClose(void** value) {
   if (!ApiCheck(!is_closed_,
                 "v8::HandleScope::Close()",
@@ -2321,9 +2355,12 @@
 v8::String::GetExternalStringResource() const {
   EnsureInitialized("v8::String::GetExternalStringResource()");
   i::Handle<i::String> str = Utils::OpenHandle(this);
-  ASSERT(str->IsExternalTwoByteString());
-  void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
-  return reinterpret_cast<ExternalStringResource*>(resource);
+  if (i::StringShape(*str).IsExternalTwoByte()) {
+    void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
+    return reinterpret_cast<ExternalStringResource*>(resource);
+  } else {
+    return NULL;
+  }
 }
 
 
@@ -2331,9 +2368,12 @@
       v8::String::GetExternalAsciiStringResource() const {
   EnsureInitialized("v8::String::GetExternalAsciiStringResource()");
   i::Handle<i::String> str = Utils::OpenHandle(this);
-  ASSERT(str->IsExternalAsciiString());
-  void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
-  return reinterpret_cast<ExternalAsciiStringResource*>(resource);
+  if (i::StringShape(*str).IsExternalAscii()) {
+    void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+    return reinterpret_cast<ExternalAsciiStringResource*>(resource);
+  } else {
+    return NULL;
+  }
 }
 
 
@@ -2646,10 +2686,13 @@
   ENTER_V8;
   if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) {
     uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data);
-    int data_value = static_cast<int>(data_ptr >> kAlignedPointerShift);
+    intptr_t data_value =
+        static_cast<intptr_t>(data_ptr >> kAlignedPointerShift);
     STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value));
-    i::Handle<i::Object> obj(i::Smi::FromInt(data_value));
-    return Utils::ToLocal(obj);
+    if (i::Smi::IsIntptrValid(data_value)) {
+      i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value));
+      return Utils::ToLocal(obj);
+    }
   }
   return ExternalNewImpl(data);
 }
@@ -2660,7 +2703,8 @@
   i::Handle<i::Object> obj = Utils::OpenHandle(*value);
   if (obj->IsSmi()) {
     // The external value was an aligned pointer.
-    uintptr_t result = i::Smi::cast(*obj)->value() << kAlignedPointerShift;
+    uintptr_t result = static_cast<uintptr_t>(
+        i::Smi::cast(*obj)->value()) << kAlignedPointerShift;
     return reinterpret_cast<void*>(result);
   }
   return ExternalValueImpl(obj);
@@ -3078,6 +3122,11 @@
 #endif
 }
 
+int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
+#endif
+}
 
 String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
   EnsureInitialized("v8::String::Utf8Value::Utf8Value()");
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 519c04a..9c7a42a 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -58,6 +58,16 @@
   //  -- sp[...]: constructor arguments
   // -----------------------------------
 
+  Label non_function_call;
+  // Check that the function is not a smi.
+  __ tst(r1, Operand(kSmiTagMask));
+  __ b(eq, &non_function_call);
+  // Check that the function is a JSFunction.
+  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+  __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+  __ b(ne, &non_function_call);
+
   // Enter a construct frame.
   __ EnterConstructFrame();
 
@@ -169,7 +179,17 @@
   __ LeaveConstructFrame();
   __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
   __ add(sp, sp, Operand(kPointerSize));
-  __ mov(pc, Operand(lr));
+  __ Jump(lr);
+
+  // r0: number of arguments
+  // r1: called object
+  __ bind(&non_function_call);
+
+  // Set expected number of arguments to zero (not changing r0).
+  __ mov(r2, Operand(0));
+  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+  __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+          RelocInfo::CODE_TARGET);
 }
 
 
@@ -235,7 +255,7 @@
   // Exit the JS frame and remove the parameters (except function), and return.
   // Respect ABI stack constraint.
   __ LeaveInternalFrame();
-  __ mov(pc, lr);
+  __ Jump(lr);
 
   // r0: result
 }
@@ -544,7 +564,7 @@
   // Tear down the internal frame and remove function, receiver and args.
   __ LeaveInternalFrame();
   __ add(sp, sp, Operand(3 * kPointerSize));
-  __ mov(pc, lr);
+  __ Jump(lr);
 }
 
 
@@ -663,14 +683,14 @@
 
   // Exit frame and return.
   LeaveArgumentsAdaptorFrame(masm);
-  __ mov(pc, lr);
+  __ Jump(lr);
 
 
   // -------------------------------------------
   // Dont adapt arguments.
   // -------------------------------------------
   __ bind(&dont_adapt_arguments);
-  __ mov(pc, r3);
+  __ Jump(r3);
 }
 
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 57e98c1..1930a7c 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -277,7 +277,7 @@
     frame_->Exit();
 
     __ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
-    __ mov(pc, lr);
+    __ Jump(lr);
   }
 
   // Code generation state must be reset.
@@ -5034,13 +5034,13 @@
 
   // Nothing to do: The formal number of parameters has already been
   // passed in register r0 by calling function. Just return it.
-  __ mov(pc, lr);
+  __ Jump(lr);
 
   // Arguments adaptor case: Read the arguments length from the
   // adaptor frame and return it.
   __ bind(&adaptor);
   __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ mov(pc, lr);
+  __ Jump(lr);
 }
 
 
@@ -5072,7 +5072,7 @@
   __ sub(r3, r0, r1);
   __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ ldr(r0, MemOperand(r3, kDisplacement));
-  __ mov(pc, lr);
+  __ Jump(lr);
 
   // Arguments adaptor case: Check index against actual arguments
   // limit found in the arguments adaptor frame. Use unsigned
@@ -5086,7 +5086,7 @@
   __ sub(r3, r0, r1);
   __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ ldr(r0, MemOperand(r3, kDisplacement));
-  __ mov(pc, lr);
+  __ Jump(lr);
 
   // Slow-case: Handle non-smi or out-of-bounds access to arguments
   // by calling the runtime system.
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index a5f77fe..c098acd 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -303,7 +303,17 @@
   void Branch(bool if_true, JumpTarget* target);
   void CheckStack();
 
+  struct InlineRuntimeLUT {
+    void (CodeGenerator::*method)(ZoneList<Expression*>*);
+    const char* name;
+  };
+
+  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
   bool CheckForInlineRuntimeCall(CallRuntime* node);
+  static bool PatchInlineRuntimeEntry(Handle<String> name,
+                                      const InlineRuntimeLUT& new_entry,
+                                      InlineRuntimeLUT* old_entry);
+
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
@@ -433,6 +443,8 @@
   // in a spilled state.
   bool in_spilled_code_;
 
+  static InlineRuntimeLUT kInlineRuntimeLUT[];
+
   friend class VirtualFrame;
   friend class JumpTarget;
   friend class Reference;
diff --git a/src/arm/register-allocator-arm.cc b/src/arm/register-allocator-arm.cc
index 36a4d20..d468c84 100644
--- a/src/arm/register-allocator-arm.cc
+++ b/src/arm/register-allocator-arm.cc
@@ -66,6 +66,14 @@
 }
 
 
+bool RegisterAllocator::IsReserved(int reg_code) {
+  return (reg_code == sp.code())
+      || (reg_code == fp.code())
+      || (reg_code == cp.code())
+      || (reg_code == pc.code());
+}
+
+
 void RegisterAllocator::Initialize() {
   Reset();
   // The following registers are live on function entry, saved in the
diff --git a/src/array.js b/src/array.js
index bee73e4..ed84b5f 100644
--- a/src/array.js
+++ b/src/array.js
@@ -1005,7 +1005,6 @@
 
 
 // -------------------------------------------------------------------
-
 function SetupArray() {
   // Setup non-enumerable constructor property on the Array.prototype
   // object.
@@ -1013,7 +1012,7 @@
 
   // Setup non-enumerable functions of the Array.prototype object and
   // set their names.
-  InstallFunctions($Array.prototype, DONT_ENUM, $Array(
+  InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
     "toString", ArrayToString,
     "toLocaleString", ArrayToLocaleString,
     "join", ArrayJoin,
@@ -1034,8 +1033,7 @@
     "indexOf", ArrayIndexOf,
     "lastIndexOf", ArrayLastIndexOf,
     "reduce", ArrayReduce,
-    "reduceRight", ArrayReduceRight
-  ));
+    "reduceRight", ArrayReduceRight));
 
   // Manipulate the length of some of the functions to meet
   // expectations set by ECMA-262 or Mozilla.
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index e4962ef..09cf68d 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -833,6 +833,9 @@
 
   // Initialize the out of memory slot.
   global_context()->set_out_of_memory(Heap::false_value());
+
+  // Initialize the data slot.
+  global_context()->set_data(Heap::undefined_value());
 }
 
 
@@ -1052,6 +1055,14 @@
             Factory::LookupAsciiSymbol("line_ends"),
             proxy_line_ends,
             common_attributes);
+    Handle<Proxy> proxy_context_data =
+        Factory::NewProxy(&Accessors::ScriptContextData);
+    script_descriptors =
+        Factory::CopyAppendProxyDescriptor(
+            script_descriptors,
+            Factory::LookupAsciiSymbol("context_data"),
+            proxy_context_data,
+            common_attributes);
 
     Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
     script_map->set_instance_descriptors(*script_descriptors);
@@ -1470,11 +1481,20 @@
   Handle<JSFunction> function =
       Handle<JSFunction>(
           JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
-  Handle<JSObject> prototype =
+  Handle<JSObject> visible_prototype =
       Handle<JSObject>(JSObject::cast(function->prototype()));
-  AddSpecialFunction(prototype, "pop",
+  // Remember to put push and pop on the hidden prototype if it's there.
+  Handle<JSObject> push_and_pop_prototype;
+  Handle<Object> superproto(visible_prototype->GetPrototype());
+  if (superproto->IsJSObject() &&
+      JSObject::cast(*superproto)->map()->is_hidden_prototype()) {
+    push_and_pop_prototype = Handle<JSObject>::cast(superproto);
+  } else {
+    push_and_pop_prototype = visible_prototype;
+  }
+  AddSpecialFunction(push_and_pop_prototype, "pop",
                      Handle<Code>(Builtins::builtin(Builtins::ArrayPop)));
-  AddSpecialFunction(prototype, "push",
+  AddSpecialFunction(push_and_pop_prototype, "push",
                      Handle<Code>(Builtins::builtin(Builtins::ArrayPush)));
 }
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index d21b0ee..67634aa 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -72,7 +72,7 @@
 
  protected:
   static const int kMajorBits = 5;
-  static const int kMinorBits = kBitsPerPointer - kMajorBits - kSmiTagSize;
+  static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
 
  private:
   // Generates the assembler code for the stub.
diff --git a/src/codegen.cc b/src/codegen.cc
index ed7ee2f..40c2583 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -386,57 +386,69 @@
 }
 
 
-struct InlineRuntimeLUT {
-  void (CodeGenerator::*method)(ZoneList<Expression*>*);
-  const char* name;
+
+// Special cases: These 'runtime calls' manipulate the current
+// frame and are only used 1 or two places, so we generate them
+// inline instead of generating calls to them.  They are used
+// for implementing Function.prototype.call() and
+// Function.prototype.apply().
+CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
+  {&CodeGenerator::GenerateIsSmi, "_IsSmi"},
+  {&CodeGenerator::GenerateIsNonNegativeSmi, "_IsNonNegativeSmi"},
+  {&CodeGenerator::GenerateIsArray, "_IsArray"},
+  {&CodeGenerator::GenerateArgumentsLength, "_ArgumentsLength"},
+  {&CodeGenerator::GenerateArgumentsAccess, "_Arguments"},
+  {&CodeGenerator::GenerateValueOf, "_ValueOf"},
+  {&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
+  {&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
+  {&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
+  {&CodeGenerator::GenerateLog, "_Log"}
 };
 
 
+CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT(
+    Handle<String> name) {
+  const int entries_count =
+      sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT);
+  for (int i = 0; i < entries_count; i++) {
+    InlineRuntimeLUT* entry = &kInlineRuntimeLUT[i];
+    if (name->IsEqualTo(CStrVector(entry->name))) {
+      return entry;
+    }
+  }
+  return NULL;
+}
+
+
 bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
   ZoneList<Expression*>* args = node->arguments();
-  // Special cases: These 'runtime calls' manipulate the current
-  // frame and are only used 1 or two places, so we generate them
-  // inline instead of generating calls to them.  They are used
-  // for implementing Function.prototype.call() and
-  // Function.prototype.apply().
-  static const InlineRuntimeLUT kInlineRuntimeLUT[] = {
-    {&v8::internal::CodeGenerator::GenerateIsSmi,
-     "_IsSmi"},
-    {&v8::internal::CodeGenerator::GenerateIsNonNegativeSmi,
-     "_IsNonNegativeSmi"},
-    {&v8::internal::CodeGenerator::GenerateIsArray,
-     "_IsArray"},
-    {&v8::internal::CodeGenerator::GenerateArgumentsLength,
-     "_ArgumentsLength"},
-    {&v8::internal::CodeGenerator::GenerateArgumentsAccess,
-     "_Arguments"},
-    {&v8::internal::CodeGenerator::GenerateValueOf,
-     "_ValueOf"},
-    {&v8::internal::CodeGenerator::GenerateSetValueOf,
-     "_SetValueOf"},
-    {&v8::internal::CodeGenerator::GenerateFastCharCodeAt,
-     "_FastCharCodeAt"},
-    {&v8::internal::CodeGenerator::GenerateObjectEquals,
-     "_ObjectEquals"},
-    {&v8::internal::CodeGenerator::GenerateLog,
-     "_Log"}
-  };
   Handle<String> name = node->name();
   if (name->length() > 0 && name->Get(0) == '_') {
-    for (unsigned i = 0;
-         i < sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT);
-         i++) {
-      const InlineRuntimeLUT* entry = kInlineRuntimeLUT + i;
-      if (name->IsEqualTo(CStrVector(entry->name))) {
-        ((*this).*(entry->method))(args);
-        return true;
-      }
+    InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
+    if (entry != NULL) {
+      ((*this).*(entry->method))(args);
+      return true;
     }
   }
   return false;
 }
 
 
+bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
+    const CodeGenerator::InlineRuntimeLUT& new_entry,
+    CodeGenerator::InlineRuntimeLUT* old_entry) {
+  InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
+  if (entry == NULL) return false;
+  if (old_entry != NULL) {
+    old_entry->name = entry->name;
+    old_entry->method = entry->method;
+  }
+  entry->name = new_entry.name;
+  entry->method = new_entry.method;
+  return true;
+}
+
+
 void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
                                                     int min_index,
                                                     int range,
diff --git a/src/codegen.h b/src/codegen.h
index 3d45995..a6cd693 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -59,7 +59,9 @@
 //   ComputeCallInitializeInLoop
 //   ProcessDeclarations
 //   DeclareGlobals
+//   FindInlineRuntimeLUT
 //   CheckForInlineRuntimeCall
+//   PatchInlineRuntimeEntry
 //   GenerateFastCaseSwitchStatement
 //   GenerateFastCaseSwitchCases
 //   TryGenerateFastCaseSwitchStatement
@@ -76,16 +78,12 @@
 enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
 
 
-#ifdef V8_ARCH_ARM
-#include "arm/codegen-arm.h"
-#endif
-
-#ifdef V8_ARCH_X64
-#include "x64/codegen-x64.h"
-#endif
-
-#ifdef V8_ARCH_IA32
+#if V8_TARGET_ARCH_IA32
 #include "ia32/codegen-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/codegen-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/codegen-arm.h"
 #endif
 
 namespace v8 { namespace internal {
diff --git a/src/compiler.cc b/src/compiler.cc
index c16b938..256c696 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -104,6 +104,8 @@
   StackGuard guard;
   PostponeInterruptsScope postpone;
 
+  ASSERT(!i::Top::global_context().is_null());
+  script->set_context_data((*i::Top::global_context())->data());
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // Notify debugger
   Debugger::OnBeforeCompile(script);
diff --git a/src/contexts.h b/src/contexts.h
index ed4b1cf..f561431 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -94,7 +94,8 @@
   V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
   V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
   V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
-  V(MAP_CACHE_INDEX, Object, map_cache)
+  V(MAP_CACHE_INDEX, Object, map_cache) \
+  V(CONTEXT_DATA_INDEX, Object, data)
 
 // JSFunctions are pairs (context, function code), sometimes also called
 // closures. A Context object is used to represent function contexts and
@@ -213,6 +214,7 @@
     CONTEXT_EXTENSION_FUNCTION_INDEX,
     OUT_OF_MEMORY_INDEX,
     MAP_CACHE_INDEX,
+    CONTEXT_DATA_INDEX,
     GLOBAL_CONTEXT_SLOTS
   };
 
diff --git a/src/date-delay.js b/src/date-delay.js
index dbb9c2c..f06e8b7 100644
--- a/src/date-delay.js
+++ b/src/date-delay.js
@@ -1019,7 +1019,7 @@
 
   // Setup non-enumerable functions of the Date prototype object and
   // set their names.
-  InstallFunctions($Date.prototype, DONT_ENUM, $Array(
+  InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
     "toString", DateToString,
     "toDateString", DateToDateString,
     "toTimeString", DateToTimeString,
diff --git a/src/debug-delay.js b/src/debug-delay.js
index 4a8663b..ff7d6fb 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -1062,6 +1062,14 @@
 }
 
 
+ProtocolMessage.prototype.setOption = function(name, value) {
+  if (!this.options_) {
+    this.options_ = {};
+  }
+  this.options_[name] = value;
+}
+
+
 ProtocolMessage.prototype.failed = function(message) {
   this.success = false;
   this.message = message;
@@ -1090,7 +1098,7 @@
   if (this.body) {
     json += ',"body":';
     // Encode the body part.
-    var serializer = MakeMirrorSerializer(true);
+    var serializer = MakeMirrorSerializer(true, this.options_);
     if (this.body instanceof Mirror) {
       json += serializer.serializeValue(this.body);
     } else if (this.body instanceof Array) {
@@ -1680,6 +1688,7 @@
     
     if (!IS_UNDEFINED(request.arguments.includeSource)) {
       includeSource = %ToBoolean(request.arguments.includeSource);
+      response.setOption('includeSource', includeSource);
     }
   }
 
@@ -1690,25 +1699,7 @@
 
   for (var i = 0; i < scripts.length; i++) {
     if (types & ScriptTypeFlag(scripts[i].type)) {
-      var script = {};
-      if (scripts[i].name) {
-        script.name = scripts[i].name;
-      }
-      script.id = scripts[i].id;
-      script.lineOffset = scripts[i].line_offset;
-      script.columnOffset = scripts[i].column_offset;
-      script.lineCount = scripts[i].lineCount();
-      if (scripts[i].data) {
-        script.data = scripts[i].data;
-      }
-      if (includeSource) {
-        script.source = scripts[i].source;
-      } else {
-        script.sourceStart = scripts[i].source.substring(0, 80);
-      }
-      script.sourceLength = scripts[i].source.length;
-      script.type = scripts[i].type;
-      response.body.push(script);
+      response.body.push(MakeMirror(scripts[i]));
     }
   }
 };
diff --git a/src/debug.cc b/src/debug.cc
index 1688443..8422a67 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -2172,7 +2172,7 @@
 
 
 v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
-  return v8::Handle<v8::Context>();
+  return v8::Utils::ToLocal(Debug::debugger_entry()->GetContext());
 }
 
 
diff --git a/src/debug.h b/src/debug.h
index cc76567..35336cb 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -687,6 +687,9 @@
   // Check whether there are any JavaScript frames on the stack.
   inline bool HasJavaScriptFrames() { return has_js_frames_; }
 
+  // Get the active context from before entering the debugger.
+  inline Handle<Context> GetContext() { return save_.context(); }
+
  private:
   EnterDebugger* prev_;  // Previous debugger entry if entered recursively.
   JavaScriptFrameIterator it_;
diff --git a/src/execution.cc b/src/execution.cc
index eb39d64..32dde9e 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -32,16 +32,12 @@
 #include "api.h"
 #include "codegen-inl.h"
 
-#ifdef V8_ARCH_ARM
-#include "arm/simulator-arm.h"
-#endif
-
-#ifdef V8_ARCH_X64
-#include "x64/simulator-x64.h"
-#endif
-
-#ifdef V8_ARCH_IA32
+#if V8_TARGET_ARCH_IA32
 #include "ia32/simulator-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/simulator-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/simulator-arm.h"
 #endif
 
 #include "debug.h"
diff --git a/src/factory.cc b/src/factory.cc
index c6b1d17..4b0b7f5 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -167,6 +167,7 @@
   Heap::SetLastScriptId(Smi::FromInt(id));
 
   // Create and initialize script object.
+  Handle<Proxy> wrapper = Factory::NewProxy(0, TENURED);
   Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
   script->set_source(*source);
   script->set_name(Heap::undefined_value());
@@ -174,8 +175,9 @@
   script->set_line_offset(Smi::FromInt(0));
   script->set_column_offset(Smi::FromInt(0));
   script->set_data(Heap::undefined_value());
+  script->set_context_data(Heap::undefined_value());
   script->set_type(Smi::FromInt(SCRIPT_TYPE_NORMAL));
-  script->set_wrapper(*Factory::NewProxy(0, TENURED));
+  script->set_wrapper(*wrapper);
   script->set_line_ends(Heap::undefined_value());
 
   return script;
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 8244c67..bf46f6b 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -29,19 +29,15 @@
 #define V8_FRAMES_INL_H_
 
 #include "frames.h"
-#ifdef V8_ARCH_ARM
+
+#if V8_TARGET_ARCH_IA32
+#include "ia32/frames-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/frames-x64.h"
+#elif V8_TARGET_ARCH_ARM
 #include "arm/frames-arm.h"
 #endif
 
-#ifdef V8_ARCH_X64
-#include "x64/frames-x64.h"
-#endif
-
-#ifdef V8_ARCH_IA32
-#include "ia32/frames-ia32.h"
-#endif
-
-
 namespace v8 { namespace internal {
 
 
diff --git a/src/global-handles.cc b/src/global-handles.cc
index c2194fc..46b7db3 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -258,7 +258,7 @@
 }
 
 
-void GlobalHandles::MarkWeakRoots(WeakSlotCallback f) {
+void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
   for (Node* current = head_; current != NULL; current = current->next()) {
     if (current->state_ == Node::WEAK) {
       if (f(&current->object_)) {
diff --git a/src/global-handles.h b/src/global-handles.h
index c5f4450..e6e9de1 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -98,8 +98,9 @@
   // Iterates over all weak roots in heap.
   static void IterateWeakRoots(ObjectVisitor* v);
 
-  // Mark the weak pointers based on the callback.
-  static void MarkWeakRoots(WeakSlotCallback f);
+  // Find all weak handles satisfying the callback predicate, mark
+  // them as pending.
+  static void IdentifyWeakHandles(WeakSlotCallback f);
 
   // Add an object group.
   // Should only used in GC callback function before a collection.
diff --git a/src/globals.h b/src/globals.h
index 71bb78e..a0b5ac3 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -30,6 +30,25 @@
 
 namespace v8 { namespace internal {
 
+// Processor architecture detection.  For more info on what's defined, see:
+//   http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+//   http://www.agner.org/optimize/calling_conventions.pdf
+//   or with gcc, run: "echo | gcc -E -dM -"
+#if defined(_M_X64) || defined(__x86_64__)
+#define V8_HOST_ARCH_X64 1
+#define V8_HOST_ARCH_64_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(_M_IX86) || defined(__i386__)
+#define V8_HOST_ARCH_IA32 1
+#define V8_HOST_ARCH_32_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#elif defined(__ARMEL__)
+#define V8_HOST_ARCH_ARM 1
+#define V8_HOST_ARCH_32_BIT 1
+#else
+#error Your architecture was not detected as supported by v8
+#endif
+
 // Support for alternative bool type. This is only enabled if the code is
 // compiled with USE_MYBOOL defined. This catches some nasty type bugs.
 // For instance, 'bool b = "false";' results in b == true! This is a hidden
@@ -50,20 +69,29 @@
 typedef uint8_t byte;
 typedef byte* Address;
 
-// Define macros for writing 64-bit constants and pointer-size constants.
+// Define our own macros for writing 64-bit constants.  This is less fragile
+// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
+// works on compilers that don't have it (like MSVC).
+#if V8_HOST_ARCH_64_BIT
 #ifdef _MSC_VER
-#define UINT64_C(x)  (x ## UI64)
-#define INT64_C(x)   (x ## I64)
+#define V8_UINT64_C(x)  (x ## UI64)
+#define V8_INT64_C(x)   (x ## I64)
+#define V8_PTR_PREFIX "ll"
+#else
+#define V8_UINT64_C(x)  (x ## UL)
+#define V8_INT64_C(x)   (x ## L)
+#define V8_PTR_PREFIX "l"
 #endif
+#else  // V8_HOST_ARCH_64_BIT
+#define V8_PTR_PREFIX ""
+#endif
+
+#define V8PRIp V8_PTR_PREFIX "x"
 
 // Code-point values in Unicode 4.0 are 21 bits wide.
 typedef uint16_t uc16;
 typedef int32_t uc32;
 
-#if defined(V8_ARCH_IA32) || defined(V8_ARCH_X64)
-#define CAN_READ_UNALIGNED 1
-#endif
-
 // -----------------------------------------------------------------------------
 // Constants
 
@@ -81,7 +109,7 @@
 const int kDoubleSize   = sizeof(double);  // NOLINT
 const int kPointerSize  = sizeof(void*);   // NOLINT
 
-#ifdef V8_ARCH_X64
+#if V8_HOST_ARCH_64_BIT
 const int kPointerSizeLog2 = 3;
 #else
 const int kPointerSizeLog2 = 2;
@@ -118,13 +146,13 @@
 
 // Zap-value: The value used for zapping dead objects.
 // Should be a recognizable hex value tagged as a heap object pointer.
-#ifdef V8_ARCH_X64
+#ifdef V8_HOST_ARCH_64_BIT
 const Address kZapValue =
-    reinterpret_cast<Address>(UINT64_C(0xdeadbeedbeadbeed));
+    reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
 const Address kHandleZapValue =
-    reinterpret_cast<Address>(UINT64_C(0x1baddead0baddead));
+    reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
 const Address kFromSpaceZapValue =
-    reinterpret_cast<Address>(UINT64_C(0x1beefdad0beefdad));
+    reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
 #else
 const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
 const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
@@ -488,7 +516,7 @@
 // exception'.
 //
 // Bit_cast uses the memcpy exception to move the bits from a variable of one
-// type o a variable of another type.  Of course the end result is likely to
+// type of a variable of another type.  Of course the end result is likely to
 // be implementation dependent.  Most compilers (gcc-4.2 and MSVC 2005)
 // will completely optimize bit_cast away.
 //
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 6090dd4..86165ee 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -251,11 +251,11 @@
       __object__ = FUNCTION_CALL;                                         \
     }                                                                     \
     if (!__object__->IsFailure()) RETURN_VALUE;                           \
-    if (__object__->IsOutOfMemoryFailure()) {                             \
+    if (__object__->IsOutOfMemoryFailure() ||                             \
+        __object__->IsRetryAfterGC()) {                                   \
       /* TODO(1181417): Fix this. */                                      \
       v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2");      \
     }                                                                     \
-    ASSERT(!__object__->IsRetryAfterGC());                                \
     RETURN_EMPTY;                                                         \
   } while (false)
 
diff --git a/src/heap.cc b/src/heap.cc
index fa225f7..6d60015 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -538,7 +538,7 @@
 
 
 // Shared state read by the scavenge collector and set by ScavengeObject.
-static Address promoted_top = NULL;
+static Address promoted_rear = NULL;
 
 
 #ifdef DEBUG
@@ -554,24 +554,34 @@
     }
   }
 };
+
+
+static void VerifyNonPointerSpacePointers() {
+  // Verify that there are no pointers to new space in spaces where we
+  // do not expect them.
+  VerifyNonPointerSpacePointersVisitor v;
+  HeapObjectIterator code_it(Heap::code_space());
+  while (code_it.has_next()) {
+    HeapObject* object = code_it.next();
+    if (object->IsCode()) {
+      Code::cast(object)->ConvertICTargetsFromAddressToObject();
+      object->Iterate(&v);
+      Code::cast(object)->ConvertICTargetsFromObjectToAddress();
+    } else {
+      // If we find non-code objects in code space (e.g., free list
+      // nodes) we want to verify them as well.
+      object->Iterate(&v);
+    }
+  }
+
+  HeapObjectIterator data_it(Heap::old_data_space());
+  while (data_it.has_next()) data_it.next()->Iterate(&v);
+}
 #endif
 
 void Heap::Scavenge() {
 #ifdef DEBUG
-  if (FLAG_enable_slow_asserts) {
-    VerifyNonPointerSpacePointersVisitor v;
-    HeapObjectIterator it(code_space_);
-    while (it.has_next()) {
-      HeapObject* object = it.next();
-      if (object->IsCode()) {
-        Code::cast(object)->ConvertICTargetsFromAddressToObject();
-      }
-      object->Iterate(&v);
-      if (object->IsCode()) {
-        Code::cast(object)->ConvertICTargetsFromObjectToAddress();
-      }
-    }
-  }
+  if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
 #endif
 
   gc_state_ = SCAVENGE;
@@ -596,72 +606,70 @@
   new_space_.Flip();
   new_space_.ResetAllocationInfo();
 
-  // We need to sweep newly copied objects which can be in either the to space
-  // or the old space.  For to space objects, we use a mark.  Newly copied
-  // objects lie between the mark and the allocation top.  For objects
-  // promoted to old space, we write their addresses downward from the top of
-  // the new space.  Sweeping newly promoted objects requires an allocation
-  // pointer and a mark.  Note that the allocation pointer 'top' actually
-  // moves downward from the high address in the to space.
+  // We need to sweep newly copied objects which can be either in the
+  // to space or promoted to the old generation.  For to-space
+  // objects, we treat the bottom of the to space as a queue.  Newly
+  // copied and unswept objects lie between a 'front' mark and the
+  // allocation pointer.
   //
-  // There is guaranteed to be enough room at the top of the to space for the
-  // addresses of promoted objects: every object promoted frees up its size in
-  // bytes from the top of the new space, and objects are at least one pointer
-  // in size.  Using the new space to record promoted addresses makes the
-  // scavenge collector agnostic to the allocation strategy (eg, linear or
-  // free-list) used in old space.
-  Address new_mark = new_space_.ToSpaceLow();
-  Address promoted_mark = new_space_.ToSpaceHigh();
-  promoted_top = new_space_.ToSpaceHigh();
+  // Promoted objects can go into various old-generation spaces, and
+  // can be allocated internally in the spaces (from the free list).
+  // We treat the top of the to space as a queue of addresses of
+  // promoted objects.  The addresses of newly promoted and unswept
+  // objects lie between a 'front' mark and a 'rear' mark that is
+  // updated as a side effect of promoting an object.
+  //
+  // There is guaranteed to be enough room at the top of the to space
+  // for the addresses of promoted objects: every object promoted
+  // frees up its size in bytes from the top of the new space, and
+  // objects are at least one pointer in size.
+  Address new_space_front = new_space_.ToSpaceLow();
+  Address promoted_front = new_space_.ToSpaceHigh();
+  promoted_rear = new_space_.ToSpaceHigh();
 
   ScavengeVisitor scavenge_visitor;
   // Copy roots.
   IterateRoots(&scavenge_visitor);
 
-  // Copy objects reachable from the old generation.  By definition, there
-  // are no intergenerational pointers in code or data spaces.
+  // Copy objects reachable from weak pointers.
+  GlobalHandles::IterateWeakRoots(&scavenge_visitor);
+
+  // Copy objects reachable from the old generation.  By definition,
+  // there are no intergenerational pointers in code or data spaces.
   IterateRSet(old_pointer_space_, &ScavengePointer);
   IterateRSet(map_space_, &ScavengePointer);
   lo_space_->IterateRSet(&ScavengePointer);
 
-  bool has_processed_weak_pointers = false;
+  do {
+    ASSERT(new_space_front <= new_space_.top());
+    ASSERT(promoted_front >= promoted_rear);
 
-  while (true) {
-    ASSERT(new_mark <= new_space_.top());
-    ASSERT(promoted_mark >= promoted_top);
-
-    // Copy objects reachable from newly copied objects.
-    while (new_mark < new_space_.top() || promoted_mark > promoted_top) {
-      // Sweep newly copied objects in the to space.  The allocation pointer
-      // can change during sweeping.
-      Address previous_top = new_space_.top();
-      SemiSpaceIterator new_it(new_space(), new_mark);
-      while (new_it.has_next()) {
-        new_it.next()->Iterate(&scavenge_visitor);
-      }
-      new_mark = previous_top;
-
-      // Sweep newly copied objects in the old space.  The promotion 'top'
-      // pointer could change during sweeping.
-      previous_top = promoted_top;
-      for (Address current = promoted_mark - kPointerSize;
-           current >= previous_top;
-           current -= kPointerSize) {
-        HeapObject* object = HeapObject::cast(Memory::Object_at(current));
-        object->Iterate(&scavenge_visitor);
-        UpdateRSet(object);
-      }
-      promoted_mark = previous_top;
+    // The addresses new_space_front and new_space_.top() define a
+    // queue of unprocessed copied objects.  Process them until the
+    // queue is empty.
+    while (new_space_front < new_space_.top()) {
+      HeapObject* object = HeapObject::FromAddress(new_space_front);
+      object->Iterate(&scavenge_visitor);
+      new_space_front += object->Size();
     }
 
-    if (has_processed_weak_pointers) break;  // We are done.
-    // Copy objects reachable from weak pointers.
-    GlobalHandles::IterateWeakRoots(&scavenge_visitor);
-    has_processed_weak_pointers = true;
-  }
+    // The addresses promoted_front and promoted_rear define a queue
+    // of unprocessed addresses of promoted objects.  Process them
+    // until the queue is empty.
+    while (promoted_front > promoted_rear) {
+      promoted_front -= kPointerSize;
+      HeapObject* object =
+          HeapObject::cast(Memory::Object_at(promoted_front));
+      object->Iterate(&scavenge_visitor);
+      UpdateRSet(object);
+    }
+
+    // Take another spin if there are now unswept objects in new space
+    // (there are currently no more unswept promoted objects).
+  } while (new_space_front < new_space_.top());
 
   // Set age mark.
-  new_space_.set_age_mark(new_mark);
+  new_space_.set_age_mark(new_space_.top());
 
   LOG(ResourceEvent("scavenge", "end"));
 
@@ -882,8 +890,8 @@
       if (target_space == Heap::old_pointer_space_) {
         // Record the object's address at the top of the to space, to allow
         // it to be swept by the scavenger.
-        promoted_top -= kPointerSize;
-        Memory::Object_at(promoted_top) = *p;
+        promoted_rear -= kPointerSize;
+        Memory::Object_at(promoted_rear) = *p;
       } else {
 #ifdef DEBUG
         // Objects promoted to the data space should not have pointers to
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 8135e85..e260ab2 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -4544,6 +4544,17 @@
 }
 
 
+void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
+  ASSERT(args->length() == 0);
+  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);  // shifting code depends on this
+  Result ebp_as_smi = allocator_->Allocate();
+  ASSERT(ebp_as_smi.is_valid());
+  __ mov(ebp_as_smi.reg(), Operand(ebp));
+  __ shr(ebp_as_smi.reg(), kSmiTagSize);
+  frame_->Push(&ebp_as_smi);
+}
+
+
 void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
   if (CheckForInlineRuntimeCall(node)) {
     return;
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index caa293e..0e01957 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -473,7 +473,17 @@
 
   void CheckStack();
 
+  struct InlineRuntimeLUT {
+    void (CodeGenerator::*method)(ZoneList<Expression*>*);
+    const char* name;
+  };
+
+  static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
   bool CheckForInlineRuntimeCall(CallRuntime* node);
+  static bool PatchInlineRuntimeEntry(Handle<String> name,
+                                      const InlineRuntimeLUT& new_entry,
+                                      InlineRuntimeLUT* old_entry);
+
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
@@ -508,6 +518,7 @@
 
   void GenerateLog(ZoneList<Expression*>* args);
 
+  void GenerateGetFramePointer(ZoneList<Expression*>* args);
 
   // Methods and constants for fast case switch statement support.
   //
@@ -604,11 +615,15 @@
   // in a spilled state.
   bool in_spilled_code_;
 
+  static InlineRuntimeLUT kInlineRuntimeLUT[];
+
   friend class VirtualFrame;
   friend class JumpTarget;
   friend class Reference;
   friend class Result;
 
+  friend class CodeGeneratorPatcher;  // Used in test-log-ia32.cc
+
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
 
diff --git a/src/ia32/register-allocator-ia32.cc b/src/ia32/register-allocator-ia32.cc
index a898827..b72d765 100644
--- a/src/ia32/register-allocator-ia32.cc
+++ b/src/ia32/register-allocator-ia32.cc
@@ -97,6 +97,12 @@
 }
 
 
+bool RegisterAllocator::IsReserved(int reg_code) {
+  // Test below relies on the order of register codes.
+  return reg_code >= esp.code() && reg_code <= esi.code();
+}
+
+
 void RegisterAllocator::Initialize() {
   Reset();
   // The following register is live on function entry, saved in the
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 619010b..ff9f60c 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -623,6 +623,12 @@
 
   InvalidateFrameSlotAt(index);
 
+  // InvalidateFrameSlotAt can potentially change any frame element, due
+  // to spilling registers to allocate temporaries in order to preserve
+  // the copy-on-write semantics of aliased elements.  Reload top from
+  // the frame.
+  top = elements_[top_index];
+
   if (top.is_copy()) {
     // There are two cases based on the relative positions of the
     // stored-to slot and the backing slot of the top element.
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index 70680a8..77bcc90 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -130,13 +130,13 @@
 
 
 static int32_t Load32Aligned(const byte* pc) {
-  ASSERT((reinterpret_cast<int>(pc) & 3) == 0);
+  ASSERT((reinterpret_cast<intptr_t>(pc) & 3) == 0);
   return *reinterpret_cast<const int32_t *>(pc);
 }
 
 
 static int32_t Load16Aligned(const byte* pc) {
-  ASSERT((reinterpret_cast<int>(pc) & 1) == 0);
+  ASSERT((reinterpret_cast<intptr_t>(pc) & 1) == 0);
   return *reinterpret_cast<const uint16_t *>(pc);
 }
 
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 1757f52..ebaefc0 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -42,18 +42,14 @@
 #include "regexp-macro-assembler-irregexp.h"
 #include "regexp-stack.h"
 
-#ifdef V8_ARCH_ARM
-#include "arm/regexp-macro-assembler-arm.h"
-#endif
-
-#ifdef V8_ARCH_X64
-#include "x64/macro-assembler-x64.h"
-#include "x64/regexp-macro-assembler-x64.h"
-#endif
-
-#ifdef V8_ARCH_IA32
+#ifdef V8_TARGET_ARCH_IA32
 #include "ia32/macro-assembler-ia32.h"
 #include "ia32/regexp-macro-assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/macro-assembler-x64.h"
+#include "x64/regexp-macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/regexp-macro-assembler-arm.h"
 #endif
 
 #include "interpreter-irregexp.h"
@@ -431,13 +427,7 @@
   Handle<String> original_subject = subject;
   Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
   if (UseNativeRegexp()) {
-#ifdef V8_ARCH_ARM
-    UNREACHABLE();
-#endif
-#ifdef V8_ARCH_X64
-    UNIMPLEMENTED();
-#endif
-#ifdef V8_ARCH_IA32
+#if V8_TARGET_ARCH_IA32
     RegExpMacroAssemblerIA32::Result res;
     do {
       bool is_ascii = subject->IsAsciiRepresentation();
@@ -461,6 +451,8 @@
         || res == RegExpMacroAssemblerIA32::FAILURE);
 
     rc = (res == RegExpMacroAssemblerIA32::SUCCESS);
+#else
+    UNREACHABLE();
 #endif
   } else {
     bool is_ascii = subject->IsAsciiRepresentation();
@@ -2521,7 +2513,7 @@
 
 int ChoiceNode::CalculatePreloadCharacters(RegExpCompiler* compiler) {
   int preload_characters = EatsAtLeast(4, 0);
-#ifdef CAN_READ_UNALIGNED
+#ifdef V8_HOST_CAN_READ_UNALIGNED
   bool ascii = compiler->ascii();
   if (ascii) {
     if (preload_characters > 4) preload_characters = 4;
@@ -4445,13 +4437,13 @@
   NodeInfo info = *node->info();
 
   if (RegExpImpl::UseNativeRegexp()) {
-#ifdef V8_ARCH_ARM
+#ifdef V8_TARGET_ARCH_ARM
     UNREACHABLE();
 #endif
-#ifdef V8_ARCH_X64
+#ifdef V8_TARGET_ARCH_X64
     UNREACHABLE();
 #endif
-#ifdef V8_ARCH_IA32
+#ifdef V8_TARGET_ARCH_IA32
     RegExpMacroAssemblerIA32::Mode mode;
     if (is_ascii) {
       mode = RegExpMacroAssemblerIA32::ASCII;
diff --git a/src/jsregexp.h b/src/jsregexp.h
index c0d50a3..9fa0ece 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -37,14 +37,10 @@
 class RegExpImpl {
  public:
   static inline bool UseNativeRegexp() {
-#ifdef V8_ARCH_ARM
-    return false;
-#endif
-#ifdef V8_ARCH_X64
-    return false;
-#endif
-#ifdef V8_ARCH_IA32
+#ifdef V8_TARGET_ARCH_IA32
     return FLAG_regexp_native;
+#else
+  return false;
 #endif
   }
   // Creates a regular expression literal in the old space.
diff --git a/src/jump-target.cc b/src/jump-target.cc
index 047588b..6e41270 100644
--- a/src/jump-target.cc
+++ b/src/jump-target.cc
@@ -189,28 +189,44 @@
     }
   }
 
-  // Compute the registers already reserved by values in the frame.
-  // Count the reserved registers to avoid using them.
-  RegisterFile frame_registers = RegisterAllocator::Reserved();
-  for (int i = 0; i < length; i++) {
-    FrameElement* element = elements[i];
-    if (element != NULL && element->is_register()) {
-      frame_registers.Use(element->reg());
+  // Build the new frame.  A freshly allocated frame has memory elements
+  // for the parameters and some platform-dependent elements (e.g.,
+  // return address).  Replace those first.
+  entry_frame_ = new VirtualFrame(cgen_);
+  int index = 0;
+  for (; index < entry_frame_->elements_.length(); index++) {
+    // If the element is determined, set it now.  Count registers.  Mark
+    // elements as copied exactly when they have a copy.  Undetermined
+    // elements are initially recorded as if in memory.
+    if (elements[index] != NULL) {
+      entry_frame_->elements_[index] = *elements[index];
+      entry_frame_->elements_[index].clear_copied();
+      if (elements[index]->is_register()) {
+        entry_frame_->register_locations_[elements[index]->reg().code()] =
+            index;
+      } else if (elements[index]->is_copy()) {
+        entry_frame_->elements_[elements[index]->index()].set_copied();
+      }
+    }
+  }
+  // Then fill in the rest of the frame with new elements.
+  for (; index < length; index++) {
+    if (elements[index] == NULL) {
+      entry_frame_->elements_.Add(FrameElement::MemoryElement());
+    } else {
+      entry_frame_->elements_.Add(*elements[index]);
+      entry_frame_->elements_[index].clear_copied();
+      if (elements[index]->is_register()) {
+        entry_frame_->register_locations_[elements[index]->reg().code()] =
+            index;
+      } else if (elements[index]->is_copy()) {
+        entry_frame_->elements_[elements[index]->index()].set_copied();
+      }
     }
   }
 
-  // Build the new frame.  The frame already has memory elements for
-  // the parameters (including the receiver) and the return address.
-  // We will fill it up with memory elements.
-  entry_frame_ = new VirtualFrame(cgen_);
-  while (entry_frame_->elements_.length() < length) {
-    entry_frame_->elements_.Add(FrameElement::MemoryElement());
-  }
-
-
-  // Copy the already-determined frame elements to the entry frame,
-  // and allocate any still-undetermined frame elements to registers
-  // or memory, from the top down.
+  // Allocate any still-undetermined frame elements to registers or
+  // memory, from the top down.
   for (int i = length - 1; i >= 0; i--) {
     if (elements[i] == NULL) {
       // If the value is synced on all frames, put it in memory.  This
@@ -234,7 +250,7 @@
       for (int j = 0; j < reaching_frames_.length(); j++) {
         FrameElement element = reaching_frames_[j]->elements_[i];
         if (element.is_register() &&
-            !frame_registers.is_used(element.reg())) {
+            !entry_frame_->is_used(element.reg())) {
           candidate_registers.Use(element.reg());
           if (candidate_registers.count(element.reg()) > max_count) {
             max_count = candidate_registers.count(element.reg());
@@ -245,40 +261,32 @@
       // If there was no preferred choice consider any free register.
       if (best_reg_code == no_reg.code_) {
         for (int j = 0; j < kNumRegisters; j++) {
-          if (!frame_registers.is_used(j)) {
+          if (!entry_frame_->is_used(j) && !RegisterAllocator::IsReserved(j)) {
             best_reg_code = j;
             break;
           }
         }
       }
 
-      // If there was a register choice, use it.  If not do nothing
-      // (the element is already recorded as in memory)
       if (best_reg_code != no_reg.code_) {
+        // If there was a register choice, use it.  Preserve the copied
+        // flag on the element.
+        bool is_copied = entry_frame_->elements_[i].is_copied();
         Register reg = { best_reg_code };
-        frame_registers.Use(reg);
         entry_frame_->elements_[i] =
             FrameElement::RegisterElement(reg,
                                           FrameElement::NOT_SYNCED);
+        if (is_copied) entry_frame_->elements_[i].set_copied();
+        entry_frame_->register_locations_[best_reg_code] = i;
       }
-    } else {
-      // The element is already determined.
-      entry_frame_->elements_[i] = *elements[i];
+      // If there was no register found, the element is already
+      // recorded as in memory.
     }
   }
 
-  // Set the copied flags in the frame to be exact.  This assumes that
-  // the backing store of copies is always lower in the frame.
-  // Set the register locations to their index in the frame.
+  // Set the static type of frame elements.
   for (int i = 0; i < length; i++) {
     FrameElement* current = &entry_frame_->elements_[i];
-    current->clear_copied();
-    if (current->is_copy()) {
-      entry_frame_->elements_[current->index()].set_copied();
-    } else if (current->is_register()) {
-      entry_frame_->register_locations_[current->reg().code()] = i;
-    }
-
     if (direction_ == BIDIRECTIONAL && i >= high_water_mark) {
       current->set_static_type(StaticType::unknown());
     } else {
diff --git a/src/log.cc b/src/log.cc
index 4d4dfa7..5297a30 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -164,7 +164,7 @@
 //
 class Ticker: public Sampler {
  public:
-  explicit Ticker(int interval, unsigned int low_stack_bound):
+  explicit Ticker(int interval, uintptr_t low_stack_bound):
       Sampler(interval, FLAG_prof), window_(NULL), profiler_(NULL),
       stack_tracer_(low_stack_bound) {}
 
@@ -285,8 +285,179 @@
 
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
+
+// Functions and data for performing output of log messages.
+class Log : public AllStatic {
+ public:
+  // Opens stdout for logging.
+  static void OpenStdout();
+
+  // Opens file for logging.
+  static void OpenFile(const char* name);
+
+  // Opens memory buffer for logging.
+  static void OpenMemoryBuffer();
+
+  // Frees all resources acquired in Open... functions.
+  static void Close();
+
+  // See description in v8.h.
+  static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+
+  static bool is_enabled() { return output_.handle != NULL; }
+
+  typedef int (*WritePtr)(const char* msg, int length);
+ private:
+  static void Init();
+
+  // Write functions assume that mutex_ is acquired by the caller.
+  static WritePtr Write;
+
+  static int WriteToFile(const char* msg, int length) {
+    ASSERT(output_.handle != NULL);
+    int rv = fwrite(msg, 1, length, output_.handle);
+    ASSERT(length == rv);
+    return rv;
+  }
+
+  static int WriteToMemory(const char* msg, int length) {
+    ASSERT(output_.buffer != NULL);
+    ASSERT(output_buffer_write_pos_ >= output_.buffer);
+    if (output_buffer_write_pos_ + length
+        <= output_.buffer + kOutputBufferSize) {
+      memcpy(output_buffer_write_pos_, msg, length);
+      output_buffer_write_pos_ += length;
+      return length;
+    } else {
+      // Memory buffer is full, ignore write.
+      return 0;
+    }
+  }
+
+  // When logging is active, output_ refers the file or memory buffer
+  // events are written to.
+  // mutex_ should be acquired before using output_.
+  union Output {
+    FILE* handle;
+    char* buffer;
+  };
+  static Output output_;
+
+  // mutex_ is a Mutex used for enforcing exclusive
+  // access to the formatting buffer and the log file or log memory buffer.
+  static Mutex* mutex_;
+
+  // Size of buffer used for memory logging.
+  static const int kOutputBufferSize = 2 * 1024 * 1024;
+
+  // Writing position in a memory buffer.
+  static char* output_buffer_write_pos_;
+
+  // Size of buffer used for formatting log messages.
+  static const int kMessageBufferSize = 2048;
+
+  // Buffer used for formatting log messages. This is a singleton buffer and
+  // mutex_ should be acquired before using it.
+  static char* message_buffer_;
+
+  friend class LogMessageBuilder;
+};
+
+
+Log::WritePtr Log::Write = NULL;
+Log::Output Log::output_ = {NULL};
+Mutex* Log::mutex_ = NULL;
+char* Log::output_buffer_write_pos_ = NULL;
+char* Log::message_buffer_ = NULL;
+
+
+void Log::Init() {
+  mutex_ = OS::CreateMutex();
+  message_buffer_ = NewArray<char>(kMessageBufferSize);
+}
+
+
+void Log::OpenStdout() {
+  ASSERT(output_.handle == NULL);
+  output_.handle = stdout;
+  Write = WriteToFile;
+  Init();
+}
+
+
+void Log::OpenFile(const char* name) {
+  ASSERT(output_.handle == NULL);
+  output_.handle = OS::FOpen(name, OS::LogFileOpenMode);
+  Write = WriteToFile;
+  Init();
+}
+
+
+void Log::OpenMemoryBuffer() {
+  ASSERT(output_.buffer == NULL);
+  output_.buffer = NewArray<char>(kOutputBufferSize);
+  output_buffer_write_pos_ = output_.buffer;
+  Write = WriteToMemory;
+  Init();
+}
+
+
+void Log::Close() {
+  if (Write == WriteToFile) {
+    fclose(output_.handle);
+    output_.handle = NULL;
+  } else if (Write == WriteToMemory) {
+    DeleteArray(output_.buffer);
+    output_.buffer = NULL;
+  } else {
+    ASSERT(Write == NULL);
+  }
+  Write = NULL;
+
+  delete mutex_;
+  mutex_ = NULL;
+
+  DeleteArray(message_buffer_);
+  message_buffer_ = NULL;
+}
+
+
+int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) {
+  if (Write != WriteToMemory) return 0;
+  ASSERT(output_.buffer != NULL);
+  ASSERT(output_buffer_write_pos_ >= output_.buffer);
+  ASSERT(from_pos >= 0);
+  ASSERT(max_size >= 0);
+  int actual_size = max_size;
+  char* buffer_read_pos = output_.buffer + from_pos;
+  ScopedLock sl(mutex_);
+  if (actual_size == 0
+      || output_buffer_write_pos_ == output_.buffer
+      || buffer_read_pos >= output_buffer_write_pos_) {
+    // No data requested or can be returned.
+    return 0;
+  }
+  if (buffer_read_pos + actual_size > output_buffer_write_pos_) {
+    // Requested size overlaps with current writing position and
+    // needs to be truncated.
+    actual_size = output_buffer_write_pos_ - buffer_read_pos;
+    ASSERT(actual_size == 0 || buffer_read_pos[actual_size - 1] == '\n');
+  } else {
+    // Find previous log line boundary.
+    char* end_pos = buffer_read_pos + actual_size - 1;
+    while (end_pos >= buffer_read_pos && *end_pos != '\n') --end_pos;
+    actual_size = end_pos - buffer_read_pos + 1;
+  }
+  ASSERT(actual_size <= max_size);
+  if (actual_size > 0) {
+    memcpy(dest_buf, buffer_read_pos, actual_size);
+  }
+  return actual_size;
+}
+
+
 // Utility class for formatting log messages. It fills the message into the
-// static buffer in Logger.
+// static buffer in Log.
 class LogMessageBuilder BASE_EMBEDDED {
  public:
   explicit LogMessageBuilder();
@@ -309,45 +480,45 @@
 
 // Create a message builder starting from position 0. This acquires the mutex
 // in the logger as well.
-LogMessageBuilder::LogMessageBuilder(): sl(Logger::mutex_), pos_(0) {
-  ASSERT(Logger::message_buffer_ != NULL);
+LogMessageBuilder::LogMessageBuilder(): sl(Log::mutex_), pos_(0) {
+  ASSERT(Log::message_buffer_ != NULL);
 }
 
 
 // Append string data to the log message.
 void LogMessageBuilder::Append(const char* format, ...) {
-  Vector<char> buf(Logger::message_buffer_ + pos_,
-                   Logger::kMessageBufferSize - pos_);
+  Vector<char> buf(Log::message_buffer_ + pos_,
+                   Log::kMessageBufferSize - pos_);
   va_list args;
   va_start(args, format);
   Append(format, args);
   va_end(args);
-  ASSERT(pos_ <= Logger::kMessageBufferSize);
+  ASSERT(pos_ <= Log::kMessageBufferSize);
 }
 
 
 // Append string data to the log message.
 void LogMessageBuilder::Append(const char* format, va_list args) {
-  Vector<char> buf(Logger::message_buffer_ + pos_,
-                   Logger::kMessageBufferSize - pos_);
+  Vector<char> buf(Log::message_buffer_ + pos_,
+                   Log::kMessageBufferSize - pos_);
   int result = v8::internal::OS::VSNPrintF(buf, format, args);
 
   // Result is -1 if output was truncated.
   if (result >= 0) {
     pos_ += result;
   } else {
-    pos_ = Logger::kMessageBufferSize;
+    pos_ = Log::kMessageBufferSize;
   }
-  ASSERT(pos_ <= Logger::kMessageBufferSize);
+  ASSERT(pos_ <= Log::kMessageBufferSize);
 }
 
 
 // Append a character to the log message.
 void LogMessageBuilder::Append(const char c) {
-  if (pos_ < Logger::kMessageBufferSize) {
-    Logger::message_buffer_[pos_++] = c;
+  if (pos_ < Log::kMessageBufferSize) {
+    Log::message_buffer_[pos_++] = c;
   }
-  ASSERT(pos_ <= Logger::kMessageBufferSize);
+  ASSERT(pos_ <= Log::kMessageBufferSize);
 }
 
 
@@ -391,18 +562,14 @@
 
 // Write the log message to the log file currently opened.
 void LogMessageBuilder::WriteToLogFile() {
-  ASSERT(pos_ <= Logger::kMessageBufferSize);
-  size_t rv = fwrite(Logger::message_buffer_, 1, pos_, Logger::logfile_);
-  ASSERT(rv == static_cast<size_t>(pos_));
-  USE(rv);
+  ASSERT(pos_ <= Log::kMessageBufferSize);
+  Log::Write(Log::message_buffer_, pos_);
 }
 
 // Write a null-terminated string to to the log file currently opened.
 void LogMessageBuilder::WriteCStringToLogFile(const char* str) {
-  size_t len = strlen(str);
-  size_t rv = fwrite(str, 1, len, Logger::logfile_);
-  ASSERT(rv == len);
-  USE(rv);
+  int len = strlen(str);
+  Log::Write(str, len);
 }
 #endif
 
@@ -411,20 +578,22 @@
 // Logger class implementation.
 //
 Ticker* Logger::ticker_ = NULL;
-char* Logger::message_buffer_ = NULL;
-FILE* Logger::logfile_ = NULL;
 Profiler* Logger::profiler_ = NULL;
-Mutex* Logger::mutex_ = NULL;
 VMState* Logger::current_state_ = NULL;
 VMState Logger::bottom_state_(EXTERNAL);
 SlidingStateWindow* Logger::sliding_state_window_ = NULL;
 
+
+bool Logger::is_enabled() {
+  return Log::is_enabled();
+}
+
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 
 void Logger::Preamble(const char* content) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_code) return;
+  if (!Log::is_enabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   msg.WriteCStringToLogFile(content);
 #endif
@@ -440,7 +609,7 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::UncheckedStringEvent(const char* name, const char* value) {
-  if (logfile_ == NULL) return;
+  if (!Log::is_enabled()) return;
   LogMessageBuilder msg;
   msg.Append("%s,\"%s\"\n", name, value);
   msg.WriteToLogFile();
@@ -450,7 +619,7 @@
 
 void Logger::IntEvent(const char* name, int value) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log) return;
+  if (!Log::is_enabled() || !FLAG_log) return;
   LogMessageBuilder msg;
   msg.Append("%s,%d\n", name, value);
   msg.WriteToLogFile();
@@ -460,10 +629,9 @@
 
 void Logger::HandleEvent(const char* name, Object** location) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_handles) return;
+  if (!Log::is_enabled() || !FLAG_log_handles) return;
   LogMessageBuilder msg;
-  msg.Append("%s,0x%x\n", name,
-             reinterpret_cast<unsigned int>(location));
+  msg.Append("%s,0x%%"V8PRIp"\n", name, location);
   msg.WriteToLogFile();
 #endif
 }
@@ -471,10 +639,10 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 // ApiEvent is private so all the calls come from the Logger class.  It is the
-// caller's responsibility to ensure that logfile_ is not NULL and that
+// caller's responsibility to ensure that log is enabled and that
 // FLAG_log_api is true.
 void Logger::ApiEvent(const char* format, ...) {
-  ASSERT(logfile_ != NULL && FLAG_log_api);
+  ASSERT(Log::is_enabled() && FLAG_log_api);
   LogMessageBuilder msg;
   va_list ap;
   va_start(ap, format);
@@ -487,7 +655,7 @@
 
 void Logger::ApiNamedSecurityCheck(Object* key) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_api) return;
+  if (!Log::is_enabled() || !FLAG_log_api) return;
   if (key->IsString()) {
     SmartPointer<char> str =
         String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -505,7 +673,7 @@
                                 unsigned start,
                                 unsigned end) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_prof) return;
+  if (!Log::is_enabled() || !FLAG_prof) return;
   LogMessageBuilder msg;
   msg.Append("shared-library,\"%s\",0x%08x,0x%08x\n", library_path,
              start, end);
@@ -518,7 +686,7 @@
                                 unsigned start,
                                 unsigned end) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_prof) return;
+  if (!Log::is_enabled() || !FLAG_prof) return;
   LogMessageBuilder msg;
   msg.Append("shared-library,\"%ls\",0x%08x,0x%08x\n", library_path,
              start, end);
@@ -573,7 +741,7 @@
 
 void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_regexp) return;
+  if (!Log::is_enabled() || !FLAG_log_regexp) return;
   LogMessageBuilder msg;
   msg.Append("regexp-compile,");
   LogRegExpSource(regexp);
@@ -585,7 +753,7 @@
 
 void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_runtime) return;
+  if (!Log::is_enabled() || !FLAG_log_runtime) return;
   HandleScope scope;
   LogMessageBuilder msg;
   for (int i = 0; i < format.length(); i++) {
@@ -626,7 +794,7 @@
 
 void Logger::ApiIndexedSecurityCheck(uint32_t index) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_api) return;
+  if (!Log::is_enabled() || !FLAG_log_api) return;
   ApiEvent("api,check-security,%u\n", index);
 #endif
 }
@@ -637,7 +805,7 @@
                                     Object* name) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   ASSERT(name->IsString());
-  if (logfile_ == NULL || !FLAG_log_api) return;
+  if (!Log::is_enabled() || !FLAG_log_api) return;
   String* class_name_obj = holder->class_name();
   SmartPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -651,7 +819,7 @@
                                       JSObject* holder,
                                       uint32_t index) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_api) return;
+  if (!Log::is_enabled() || !FLAG_log_api) return;
   String* class_name_obj = holder->class_name();
   SmartPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -661,7 +829,7 @@
 
 void Logger::ApiObjectAccess(const char* tag, JSObject* object) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_api) return;
+  if (!Log::is_enabled() || !FLAG_log_api) return;
   String* class_name_obj = object->class_name();
   SmartPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -672,7 +840,7 @@
 
 void Logger::ApiEntryCall(const char* name) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_api) return;
+  if (!Log::is_enabled() || !FLAG_log_api) return;
   Logger::ApiEvent("api,%s\n", name);
 #endif
 }
@@ -680,10 +848,9 @@
 
 void Logger::NewEvent(const char* name, void* object, size_t size) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log) return;
+  if (!Log::is_enabled() || !FLAG_log) return;
   LogMessageBuilder msg;
-  msg.Append("new,%s,0x%x,%u\n", name,
-             reinterpret_cast<unsigned int>(object),
+  msg.Append("new,%s,0x%%"V8PRIp",%u\n", name, object,
              static_cast<unsigned int>(size));
   msg.WriteToLogFile();
 #endif
@@ -692,10 +859,9 @@
 
 void Logger::DeleteEvent(const char* name, void* object) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log) return;
+  if (!Log::is_enabled() || !FLAG_log) return;
   LogMessageBuilder msg;
-  msg.Append("delete,%s,0x%x\n", name,
-             reinterpret_cast<unsigned int>(object));
+  msg.Append("delete,%s,0x%%"V8PRIp"\n", name, object);
   msg.WriteToLogFile();
 #endif
 }
@@ -703,10 +869,9 @@
 
 void Logger::CodeCreateEvent(const char* tag, Code* code, const char* comment) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_code) return;
+  if (!Log::is_enabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-creation,%s,0x%x,%d,\"", tag,
-             reinterpret_cast<unsigned int>(code->address()),
+  msg.Append("code-creation,%s,0x%"V8PRIp",%d,\"", tag, code->address(),
              code->ExecutableSize());
   for (const char* p = comment; *p != '\0'; p++) {
     if (*p == '"') {
@@ -723,12 +888,11 @@
 
 void Logger::CodeCreateEvent(const char* tag, Code* code, String* name) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_code) return;
+  if (!Log::is_enabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   SmartPointer<char> str =
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  msg.Append("code-creation,%s,0x%x,%d,\"%s\"\n", tag,
-             reinterpret_cast<unsigned int>(code->address()),
+  msg.Append("code-creation,%s,0x%"V8PRIp",%d,\"%s\"\n", tag, code->address(),
              code->ExecutableSize(), *str);
   msg.WriteToLogFile();
 #endif
@@ -738,14 +902,14 @@
 void Logger::CodeCreateEvent(const char* tag, Code* code, String* name,
                              String* source, int line) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_code) return;
+  if (!Log::is_enabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
   SmartPointer<char> str =
       name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   SmartPointer<char> sourcestr =
       source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  msg.Append("code-creation,%s,0x%x,%d,\"%s %s:%d\"\n", tag,
-             reinterpret_cast<unsigned int>(code->address()),
+  msg.Append("code-creation,%s,0x%"V8PRIp",%d,\"%s %s:%d\"\n",
+             tag, code->address(),
              code->ExecutableSize(),
              *str, *sourcestr, line);
   msg.WriteToLogFile();
@@ -755,10 +919,10 @@
 
 void Logger::CodeCreateEvent(const char* tag, Code* code, int args_count) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_code) return;
+  if (!Log::is_enabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-creation,%s,0x%x,%d,\"args_count: %d\"\n", tag,
-             reinterpret_cast<unsigned int>(code->address()),
+  msg.Append("code-creation,%s,0x%"V8PRIp",%d,\"args_count: %d\"\n", tag,
+             code->address(),
              code->ExecutableSize(),
              args_count);
   msg.WriteToLogFile();
@@ -768,10 +932,10 @@
 
 void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_code) return;
+  if (!Log::is_enabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-creation,%s,0x%x,%d,\"", "RegExp",
-             reinterpret_cast<unsigned int>(code->address()),
+  msg.Append("code-creation,%s,0x%"V8PRIp",%d,\"", "RegExp",
+             code->address(),
              code->ExecutableSize());
   msg.AppendDetailed(source, false);
   msg.Append("\"\n");
@@ -782,11 +946,9 @@
 
 void Logger::CodeAllocateEvent(Code* code, Assembler* assem) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_code) return;
+  if (!Log::is_enabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-allocate,0x%x,0x%x\n",
-             reinterpret_cast<unsigned int>(code->address()),
-             reinterpret_cast<unsigned int>(assem));
+  msg.Append("code-allocate,0x%"V8PRIp",0x%"V8PRIp"\n", code->address(), assem);
   msg.WriteToLogFile();
 #endif
 }
@@ -794,11 +956,9 @@
 
 void Logger::CodeMoveEvent(Address from, Address to) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_code) return;
+  if (!Log::is_enabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-move,0x%x,0x%x\n",
-             reinterpret_cast<unsigned int>(from),
-             reinterpret_cast<unsigned int>(to));
+  msg.Append("code-move,0x%"V8PRIp",0x%"V8PRIp"\n", from, to);
   msg.WriteToLogFile();
 #endif
 }
@@ -806,9 +966,9 @@
 
 void Logger::CodeDeleteEvent(Address from) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_code) return;
+  if (!Log::is_enabled() || !FLAG_log_code) return;
   LogMessageBuilder msg;
-  msg.Append("code-delete,0x%x\n", reinterpret_cast<unsigned int>(from));
+  msg.Append("code-delete,0x%"V8PRIp"\n", from);
   msg.WriteToLogFile();
 #endif
 }
@@ -816,7 +976,7 @@
 
 void Logger::ResourceEvent(const char* name, const char* tag) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log) return;
+  if (!Log::is_enabled() || !FLAG_log) return;
   LogMessageBuilder msg;
   msg.Append("%s,%s,", name, tag);
 
@@ -834,12 +994,11 @@
 
 void Logger::SuspectReadEvent(String* name, Object* obj) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_suspect) return;
+  if (!Log::is_enabled() || !FLAG_log_suspect) return;
   LogMessageBuilder msg;
   String* class_name = obj->IsJSObject()
                        ? JSObject::cast(obj)->class_name()
                        : Heap::empty_string();
-  ScopedLock sl(mutex_);
   msg.Append("suspect-read,");
   msg.Append(class_name);
   msg.Append(',');
@@ -854,7 +1013,7 @@
 
 void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_gc) return;
+  if (!Log::is_enabled() || !FLAG_log_gc) return;
   LogMessageBuilder msg;
   msg.Append("heap-sample-begin,\"%s\",\"%s\"\n", space, kind);
   msg.WriteToLogFile();
@@ -864,7 +1023,7 @@
 
 void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_gc) return;
+  if (!Log::is_enabled() || !FLAG_log_gc) return;
   LogMessageBuilder msg;
   msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
   msg.WriteToLogFile();
@@ -874,7 +1033,7 @@
 
 void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log_gc) return;
+  if (!Log::is_enabled() || !FLAG_log_gc) return;
   LogMessageBuilder msg;
   msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
   msg.WriteToLogFile();
@@ -884,7 +1043,7 @@
 
 void Logger::DebugTag(const char* call_site_tag) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log) return;
+  if (!Log::is_enabled() || !FLAG_log) return;
   LogMessageBuilder msg;
   msg.Append("debug-tag,%s\n", call_site_tag);
   msg.WriteToLogFile();
@@ -894,7 +1053,7 @@
 
 void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
-  if (logfile_ == NULL || !FLAG_log) return;
+  if (!Log::is_enabled() || !FLAG_log) return;
   StringBuilder s(parameter.length() + 1);
   for (int i = 0; i < parameter.length(); ++i) {
     s.AddCharacter(static_cast<char>(parameter[i]));
@@ -913,15 +1072,15 @@
 
 #ifdef ENABLE_LOGGING_AND_PROFILING
 void Logger::TickEvent(TickSample* sample, bool overflow) {
-  if (logfile_ == NULL || !FLAG_prof) return;
+  if (!Log::is_enabled() || !FLAG_prof) return;
   LogMessageBuilder msg;
-  msg.Append("tick,0x%x,0x%x,%d", sample->pc, sample->sp,
+  msg.Append("tick,0x%"V8PRIp",0x%"V8PRIp",%d", sample->pc, sample->sp,
              static_cast<int>(sample->state));
   if (overflow) {
     msg.Append(",overflow");
   }
   for (int i = 0; i < sample->frames_count; ++i) {
-    msg.Append(",0x%x", reinterpret_cast<uint32_t>(sample->stack[i]));
+    msg.Append(",0x%"V8PRIp, sample->stack[i]);
   }
   msg.Append('\n');
   msg.WriteToLogFile();
@@ -941,6 +1100,12 @@
 void Logger::ResumeProfiler() {
   profiler_->resume();
 }
+
+
+int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) {
+  return Log::GetLogLines(from_pos, dest_buf, max_size);
+}
+
 #endif
 
 
@@ -967,7 +1132,9 @@
   // If we're logging anything, we need to open the log file.
   if (open_log_file) {
     if (strcmp(FLAG_logfile, "-") == 0) {
-      logfile_ = stdout;
+      Log::OpenStdout();
+    } else if (strcmp(FLAG_logfile, "*") == 0) {
+      Log::OpenMemoryBuffer();
     } else if (strchr(FLAG_logfile, '%') != NULL) {
       // If there's a '%' in the log file name we have to expand
       // placeholders.
@@ -1003,12 +1170,10 @@
         }
       }
       SmartPointer<const char> expanded = stream.ToCString();
-      logfile_ = OS::FOpen(*expanded, OS::LogFileOpenMode);
+      Log::OpenFile(*expanded);
     } else {
-      logfile_ = OS::FOpen(FLAG_logfile, OS::LogFileOpenMode);
+      Log::OpenFile(FLAG_logfile);
     }
-    message_buffer_ = NewArray<char>(kMessageBufferSize);
-    mutex_ = OS::CreateMutex();
   }
 
   current_state_ = &bottom_state_;
@@ -1016,7 +1181,7 @@
   // as log is initialized early with V8, we can assume that JS execution
   // frames can never reach this point on stack
   int stack_var;
-  ticker_ = new Ticker(1, reinterpret_cast<unsigned int>(&stack_var));
+  ticker_ = new Ticker(1, reinterpret_cast<uintptr_t>(&stack_var));
 
   if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
     sliding_state_window_ = new SlidingStateWindow();
@@ -1050,13 +1215,7 @@
 
   delete ticker_;
 
-  if (logfile_ != NULL) {
-    fclose(logfile_);
-    logfile_ = NULL;
-    delete mutex_;
-    mutex_ = NULL;
-    DeleteArray(message_buffer_);
-  }
+  Log::Close();
 #endif
 }
 
diff --git a/src/log.h b/src/log.h
index 44c1957..5f3c188 100644
--- a/src/log.h
+++ b/src/log.h
@@ -103,10 +103,10 @@
 
 class Logger {
  public:
-  // Opens the file for logging if the right flags are set.
+  // Acquires resources for logging if the right flags are set.
   static bool Setup();
 
-  // Closes file opened in Setup.
+  // Frees resources acquired in Setup.
   static void TearDown();
 
   // Enable the computation of a sliding window of states.
@@ -201,7 +201,7 @@
     return current_state_ ? current_state_->state() : OTHER;
   }
 
-  static bool is_enabled() { return logfile_ != NULL; }
+  static bool is_enabled();
 
   // Pause/Resume collection of profiling data.
   // When data collection is paused, Tick events are discarded until
@@ -210,6 +210,10 @@
   static void PauseProfiler();
   static void ResumeProfiler();
 
+  // If logging is performed into a memory buffer, allows to
+  // retrieve previously written messages. See v8.h.
+  static int GetLogLines(int from_pos, char* dest_buf, int max_size);
+
  private:
 
   // Emits the source code of a regexp. Used by regexp events.
@@ -223,17 +227,6 @@
   // Logs a StringEvent regardless of whether FLAG_log is true.
   static void UncheckedStringEvent(const char* name, const char* value);
 
-  // Size of buffer used for formatting log messages.
-  static const int kMessageBufferSize = 2048;
-
-  // Buffer used for formatting log messages. This is a singleton buffer and
-  // mutex_ should be acquired before using it.
-  static char* message_buffer_;
-
-  // When logging is active, logfile_ refers the file events are written to.
-  // mutex_ should be acquired before using logfile_.
-  static FILE* logfile_;
-
   // The sampler used by the profiler and the sliding state window.
   static Ticker* ticker_;
 
@@ -242,10 +235,6 @@
   // of samples.
   static Profiler* profiler_;
 
-  // mutex_ is a Mutex used for enforcing exclusive
-  // access to the formatting buffer and the log file.
-  static Mutex* mutex_;
-
   // A stack of VM states.
   static VMState* current_state_;
 
@@ -258,7 +247,6 @@
 
   // Internal implementation classes with access to
   // private members.
-  friend class LogMessageBuilder;
   friend class EventLog;
   friend class TimeLog;
   friend class Profiler;
@@ -273,12 +261,12 @@
 // Class that extracts stack trace, used for profiling.
 class StackTracer BASE_EMBEDDED {
  public:
-  explicit StackTracer(unsigned int low_stack_bound)
+  explicit StackTracer(uintptr_t low_stack_bound)
       : low_stack_bound_(low_stack_bound) { }
   void Trace(TickSample* sample);
  private:
 
-  unsigned int low_stack_bound_;
+  uintptr_t low_stack_bound_;
 };
 
 
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index 2d93dac..116381b 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -28,7 +28,19 @@
 #ifndef V8_MACRO_ASSEMBLER_H_
 #define V8_MACRO_ASSEMBLER_H_
 
-#ifdef V8_ARCH_ARM
+#if V8_TARGET_ARCH_IA32
+#include "assembler.h"
+#include "ia32/assembler-ia32.h"
+#include "ia32/assembler-ia32-inl.h"
+#include "code.h"  // must be after assembler_*.h
+#include "ia32/macro-assembler-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "assembler.h"
+#include "x64/assembler-x64.h"
+#include "x64/assembler-x64-inl.h"
+#include "code.h"  // must be after assembler_*.h
+#include "x64/macro-assembler-x64.h"
+#elif V8_TARGET_ARCH_ARM
 #include "arm/constants-arm.h"
 #include "assembler.h"
 #include "arm/assembler-arm.h"
@@ -37,20 +49,4 @@
 #include "arm/macro-assembler-arm.h"
 #endif
 
-#ifdef V8_ARCH_X64
-#include "assembler.h"
-#include "x64/assembler-x64.h"
-#include "x64/assembler-x64-inl.h"
-#include "code.h"  // must be after assembler_*.h
-#include "x64/macro-assembler-x64.h"
-#endif
-
-#ifdef V8_ARCH_IA32
-#include "assembler.h"
-#include "ia32/assembler-ia32.h"
-#include "ia32/assembler-ia32-inl.h"
-#include "code.h"  // must be after assembler_*.h
-#include "ia32/macro-assembler-ia32.h"
-#endif
-
 #endif  // V8_MACRO_ASSEMBLER_H_
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 94cf315..48774ec 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -555,10 +555,8 @@
 }
 
 
-bool MarkCompactCollector::MustBeMarked(Object** p) {
-  // Check whether *p is a HeapObject pointer.
-  if (!(*p)->IsHeapObject()) return false;
-  return !HeapObject::cast(*p)->IsMarked();
+bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
+  return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
 }
 
 
@@ -756,19 +754,20 @@
   RootMarkingVisitor root_visitor;
   MarkRoots(&root_visitor);
 
-  // The objects reachable from the roots are marked black, unreachable
-  // objects are white.  Mark objects reachable from object groups with at
-  // least one marked object, and continue until no new objects are
-  // reachable from the object groups.
+  // The objects reachable from the roots are marked, yet unreachable
+  // objects are unmarked.  Mark objects reachable from object groups
+  // containing at least one marked object, and continue until no new
+  // objects are reachable from the object groups.
   ProcessObjectGroups(root_visitor.stack_visitor());
 
-  // The objects reachable from the roots or object groups are marked black,
-  // unreachable objects are white.  Process objects reachable only from
-  // weak global handles.
+  // The objects reachable from the roots or object groups are marked,
+  // yet unreachable objects are unmarked.  Mark objects reachable
+  // only from weak global handles.
   //
-  // First we mark weak pointers not yet reachable.
-  GlobalHandles::MarkWeakRoots(&MustBeMarked);
-  // Then we process weak pointers and process the transitive closure.
+  // First we identify nonlive weak handles and mark them as pending
+  // destruction.
+  GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
+  // Then we mark the objects and process the transitive closure.
   GlobalHandles::IterateWeakRoots(&root_visitor);
   while (marking_stack.overflowed()) {
     RefillMarkingStack();
@@ -801,22 +800,21 @@
 
 
 #ifdef DEBUG
-void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj, int scale) {
-  ASSERT(scale == -1 || scale == 1);
-  live_bytes_ += obj->Size() * scale;
+void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
+  live_bytes_ += obj->Size();
   if (Heap::new_space()->Contains(obj)) {
-    live_young_objects_ += scale;
+    live_young_objects_++;
   } else if (Heap::map_space()->Contains(obj)) {
     ASSERT(obj->IsMap());
-    live_map_objects_ += scale;
+    live_map_objects_++;
   } else if (Heap::old_pointer_space()->Contains(obj)) {
-    live_old_pointer_objects_ += scale;
+    live_old_pointer_objects_++;
   } else if (Heap::old_data_space()->Contains(obj)) {
-    live_old_data_objects_ += scale;
+    live_old_data_objects_++;
   } else if (Heap::code_space()->Contains(obj)) {
-    live_code_objects_ += scale;
+    live_code_objects_++;
   } else if (Heap::lo_space()->Contains(obj)) {
-    live_lo_objects_ +=scale;
+    live_lo_objects_++;
   } else {
     UNREACHABLE();
   }
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 432b8cc..bfa2c3c 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -142,7 +142,6 @@
 
   friend class RootMarkingVisitor;
   friend class MarkingVisitor;
-  friend class UnmarkingVisitor;
 
   // Marking operations for objects reachable from roots.
   static void MarkLiveObjects();
@@ -156,7 +155,7 @@
   static inline void SetMark(HeapObject* obj) {
     tracer_->increment_marked_count();
 #ifdef DEBUG
-    UpdateLiveObjectCount(obj, 1);
+    UpdateLiveObjectCount(obj);
 #endif
     obj->SetMark();
   }
@@ -203,14 +202,12 @@
   // flag on the marking stack.
   static void RefillMarkingStack();
 
-  // Callback function for telling whether the object *p must be marked.
-  static bool MustBeMarked(Object** p);
+  // Callback function for telling whether the object *p is an unmarked
+  // heap object.
+  static bool IsUnmarkedHeapObject(Object** p);
 
 #ifdef DEBUG
-  // The scale argument is positive 1 if we are marking an object and
-  // -1 if we are clearing the mark bit of an object that we didn't
-  // actually want marked.
-  static void UpdateLiveObjectCount(HeapObject* obj, int scale);
+  static void UpdateLiveObjectCount(HeapObject* obj);
 #endif
 
   // We sweep the large object space in the same way whether we are
diff --git a/src/math.js b/src/math.js
index 5b7c396..86d6dd1 100644
--- a/src/math.js
+++ b/src/math.js
@@ -164,7 +164,7 @@
 
   // Setup non-enumerable functions of the Math object and
   // set their names.
-  InstallFunctions($Math, DONT_ENUM, $Array(
+  InstallFunctionsOnHiddenPrototype($Math, DONT_ENUM, $Array(
     "random", MathRandom,
     "abs", MathAbs,
     "acos", MathAcos,
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index dc4d7eb..30f19f0 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -155,6 +155,7 @@
 const PROPERTY_TYPE = 'property';
 const FRAME_TYPE = 'frame';
 const SCRIPT_TYPE = 'script';
+const CONTEXT_TYPE = 'context';
 
 // Maximum length when sending strings through the JSON protocol.
 const kMaxProtocolStringLength = 80;
@@ -364,6 +365,15 @@
 
 
 /**
+ * Check whether the mirror reflects a context.
+ * @returns {boolean} True if the mirror reflects a context
+ */
+Mirror.prototype.isContext = function() {
+  return this instanceof ContextMirror;
+}
+
+
+/**
  * Allocate a handle id for this object.
  */
 Mirror.prototype.allocateHandle_ = function() {
@@ -756,6 +766,15 @@
 
 
 /**
+ * Returns the inferred name of the function.
+ * @return {string} Name of the function
+ */
+FunctionMirror.prototype.inferredName = function() {
+  return %FunctionGetInferredName(this.value_);
+};
+
+
+/**
  * Returns the source code for the function.
  * @return {string or undefined} The source code for the function. If the
  *     function is not resolved undefined will be returned.
@@ -857,6 +876,11 @@
 };
 
 
+UnresolvedFunctionMirror.prototype.inferredName = function() {
+  return undefined;
+};
+
+
 UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) {
   return [];
 }
@@ -1547,6 +1571,7 @@
 function ScriptMirror(script) {
   Mirror.call(this, SCRIPT_TYPE);
   this.script_ = script;
+  this.context_ = new ContextMirror(script.context_data);
   this.allocateHandle_();
 }
 inherits(ScriptMirror, Mirror);
@@ -1608,6 +1633,11 @@
 }
 
 
+ScriptMirror.prototype.context = function() {
+  return this.context_;
+};
+
+
 ScriptMirror.prototype.toText = function() {
   var result = '';
   result += this.name();
@@ -1625,13 +1655,35 @@
 
 
 /**
+ * Mirror object for context.
+ * @param {Object} data The context data
+ * @constructor
+ * @extends Mirror
+ */
+function ContextMirror(data) {
+  Mirror.call(this, CONTEXT_TYPE);
+  this.data_ = data;
+  this.allocateHandle_();
+}
+inherits(ContextMirror, Mirror);
+
+
+ContextMirror.prototype.data = function() {
+  return this.data_;
+};
+
+
+/**
  * Returns a mirror serializer
  *
  * @param {boolean} details Set to true to include details
+ * @param {Object} options Options comtrolling the serialization
+ *     The following options can be set:
+ *       includeSource: include ths full source of scripts
  * @returns {MirrorSerializer} mirror serializer
  */
-function MakeMirrorSerializer(details) {
-  return new JSONProtocolSerializer(details);
+function MakeMirrorSerializer(details, options) {
+  return new JSONProtocolSerializer(details, options);
 }
 
 
@@ -1641,8 +1693,9 @@
  *     serialized
  * @constructor
  */
-function JSONProtocolSerializer(details) {
+function JSONProtocolSerializer(details, options) {
   this.details_ = details;
+  this.options_ = options;
   this.mirrors_ = [ ];
 }
 
@@ -1694,6 +1747,11 @@
 }
 
 
+JSONProtocolSerializer.prototype.includeSource_ = function() {
+  return this.options_ && this.options_.includeSource;
+}
+
+
 JSONProtocolSerializer.prototype.add_ = function(mirror) {
   // If this mirror is already in the list just return.
   for (var i = 0; i < this.mirrors_.length; i++) {
@@ -1712,7 +1770,7 @@
   // If serializing a reference to a mirror just return the reference and add
   // the mirror to the referenced mirrors.
   if (reference &&
-      (mirror.isValue() || mirror.isScript())) {
+      (mirror.isValue() || mirror.isScript() || mirror.isContext())) {
     this.add_(mirror);
     return '{"ref":' + mirror.handle() + '}';
   }
@@ -1721,7 +1779,7 @@
   var content = new Array();
 
   // Add the mirror handle.
-  if (mirror.isValue() || mirror.isScript()) {
+  if (mirror.isValue() || mirror.isScript() || mirror.isContext()) {
     content.push(MakeJSONPair_('handle', NumberToJSON_(mirror.handle())));
   }
 
@@ -1787,8 +1845,29 @@
                                  NumberToJSON_(mirror.columnOffset())));
       content.push(MakeJSONPair_('lineCount',
                                  NumberToJSON_(mirror.lineCount())));
+      if (mirror.data()) {
+        content.push(MakeJSONPair_('data', JSON.stringify(mirror.data())));
+      }
+      if (this.includeSource_()) {
+        content.push(MakeJSONPair_('source',
+                                   StringToJSON_(mirror.source())));
+      } else {
+        var sourceStart = mirror.source().substring(0, 80);
+        content.push(MakeJSONPair_('sourceStart',
+                                   StringToJSON_(sourceStart)));
+      }
+      content.push(MakeJSONPair_('sourceLength',
+                                 NumberToJSON_(mirror.source().length)));
       content.push(MakeJSONPair_('scriptType',
                                  NumberToJSON_(mirror.scriptType())));
+      if (mirror.context()) {
+        content.push(MakeJSONPair_('context',
+                                   this.serializeReference(mirror.context())));
+      }
+      break;
+
+    case CONTEXT_TYPE:
+      content.push(MakeJSONPair_('data', JSON.stringify(mirror.data())));
       break;
   }
 
@@ -1835,6 +1914,10 @@
   if (mirror.isFunction()) {
     // Add function specific properties.
     content.push(MakeJSONPair_('name', StringToJSON_(mirror.name())));
+    if (!IS_UNDEFINED(mirror.inferredName())) {
+      content.push(MakeJSONPair_('inferredName',
+                                 StringToJSON_(mirror.inferredName())));
+    }
     content.push(MakeJSONPair_('resolved', BooleanToJSON_(mirror.resolved())));
     if (mirror.resolved()) {
       content.push(MakeJSONPair_('source', StringToJSON_(mirror.source())));
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 58e4f7c..7821178 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -689,6 +689,14 @@
 
 Smi* Smi::FromInt(int value) {
   ASSERT(Smi::IsValid(value));
+  intptr_t tagged_value =
+      (static_cast<intptr_t>(value) << kSmiTagSize) | kSmiTag;
+  return reinterpret_cast<Smi*>(tagged_value);
+}
+
+
+Smi* Smi::FromIntptr(intptr_t value) {
+  ASSERT(Smi::IsValid(value));
   return reinterpret_cast<Smi*>((value << kSmiTagSize) | kSmiTag);
 }
 
@@ -784,6 +792,18 @@
 }
 
 
+bool Smi::IsIntptrValid(intptr_t value) {
+#ifdef DEBUG
+  bool in_range = (value >= kMinValue) && (value <= kMaxValue);
+#endif
+  // See Smi::IsValid(int) for description.
+  bool result =
+      ((static_cast<uintptr_t>(value) + 0x40000000U) < 0x80000000U);
+  ASSERT(result == in_range);
+  return result;
+}
+
+
 MapWord MapWord::FromMap(Map* map) {
   return MapWord(reinterpret_cast<uintptr_t>(map));
 }
@@ -2071,6 +2091,7 @@
 ACCESSORS(Script, line_offset, Smi, kLineOffsetOffset)
 ACCESSORS(Script, column_offset, Smi, kColumnOffsetOffset)
 ACCESSORS(Script, data, Object, kDataOffset)
+ACCESSORS(Script, context_data, Object, kContextOffset)
 ACCESSORS(Script, wrapper, Proxy, kWrapperOffset)
 ACCESSORS(Script, type, Smi, kTypeOffset)
 ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
diff --git a/src/objects.cc b/src/objects.cc
index 80977c1..9a7f7aa 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -3305,13 +3305,6 @@
   }
   ASSERT(string_tag == kExternalStringTag);
   ExternalTwoByteString* ext = ExternalTwoByteString::cast(string);
-  // This is a workaround for Chromium bug 9746: http://crbug.com/9746
-  // For external strings with a deleted resource we return a special
-  // Vector which will not compare to any string when doing SymbolTable
-  // lookups.
-  if (ext->resource() == NULL) {
-    return Vector<const uc16>(NULL, length);
-  }
   const uc16* start =
       reinterpret_cast<const uc16*>(ext->resource()->data());
   return Vector<const uc16>(start + offset, length);
@@ -4096,7 +4089,7 @@
   const Char* pa = a.start();
   const Char* pb = b.start();
   int i = 0;
-#ifndef CAN_READ_UNALIGNED
+#ifndef V8_HOST_CAN_READ_UNALIGNED
   // If this architecture isn't comfortable reading unaligned ints
   // then we have to check that the strings are aligned before
   // comparing them blockwise.
@@ -4115,7 +4108,7 @@
         return false;
       }
     }
-#ifndef CAN_READ_UNALIGNED
+#ifndef V8_HOST_CAN_READ_UNALIGNED
   }
 #endif
   // Compare the remaining characters that didn't fit into a block.
@@ -4128,18 +4121,6 @@
 }
 
 
-// This is a workaround for Chromium bug 9746: http://crbug.com/9746
-// Returns true if this Vector matches the problem exposed in the bug.
-template <typename T>
-static bool CheckVectorForBug9746(Vector<T> vec) {
-  // The problem is that somehow external string entries in the symbol
-  // table can have their resources collected while they are still in the
-  // table. This should not happen according to the test in the function
-  // DisposeExternalString in api.cc, but we have evidence that it does.
-  return (vec.start() == NULL) ? true : false;
-}
-
-
 static StringInputBuffer string_compare_buffer_b;
 
 
@@ -4150,9 +4131,7 @@
       VectorIterator<char> ib(b->ToAsciiVector());
       return CompareStringContents(ia, &ib);
     } else {
-      Vector<const uc16> vb = b->ToUC16Vector();
-      if (CheckVectorForBug9746(vb)) return false;
-      VectorIterator<uc16> ib(vb);
+      VectorIterator<uc16> ib(b->ToUC16Vector());
       return CompareStringContents(ia, &ib);
     }
   } else {
@@ -4194,9 +4173,7 @@
           return CompareRawStringContents(vec1, vec2);
         } else {
           VectorIterator<char> buf1(vec1);
-          Vector<const uc16> vec2 = other->ToUC16Vector();
-          if (CheckVectorForBug9746(vec2)) return false;
-          VectorIterator<uc16> ib(vec2);
+          VectorIterator<uc16> ib(other->ToUC16Vector());
           return CompareStringContents(&buf1, &ib);
         }
       } else {
@@ -4206,15 +4183,13 @@
       }
     } else {
       Vector<const uc16> vec1 = this->ToUC16Vector();
-      if (CheckVectorForBug9746(vec1)) return false;
       if (other->IsFlat()) {
         if (other->IsAsciiRepresentation()) {
           VectorIterator<uc16> buf1(vec1);
           VectorIterator<char> ib(other->ToAsciiVector());
           return CompareStringContents(&buf1, &ib);
         } else {
-          Vector<const uc16> vec2 = other->ToUC16Vector();
-          if (CheckVectorForBug9746(vec2)) return false;
+          Vector<const uc16> vec2(other->ToUC16Vector());
           return CompareRawStringContents(vec1, vec2);
         }
       } else {
@@ -4259,18 +4234,6 @@
 
 
 bool String::IsEqualTo(Vector<const char> str) {
-  // This is a workaround for Chromium bug 9746: http://crbug.com/9746
-  // The problem is that somehow external string entries in the symbol
-  // table can have their resources deleted while they are still in the
-  // table. This should not happen according to the test in the function
-  // DisposeExternalString in api.cc but we have evidence that it does.
-  // Thus we add this bailout here.
-  StringShape shape(this);
-  if (shape.IsExternalTwoByte()) {
-    ExternalTwoByteString* ext = ExternalTwoByteString::cast(this);
-    if (ext->resource() == NULL) return false;
-  }
-
   int slen = length();
   Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
   decoder->Reset(str.start(), str.length());
@@ -5198,7 +5161,8 @@
   }
 
   // Only attempt to find the hidden properties in the local object and not
-  // in the prototype chain.
+  // in the prototype chain.  Note that HasLocalProperty() can cause a GC in
+  // the general case, but in this case we know it won't hit an interceptor.
   if (!this->HasLocalProperty(key)) {
     // Hidden properties object not found. Allocate a new hidden properties
     // object if requested. Otherwise return the undefined value.
diff --git a/src/objects.h b/src/objects.h
index b5b7cbe..3e132ff 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -782,9 +782,13 @@
   // Convert a value to a Smi object.
   static inline Smi* FromInt(int value);
 
+  static inline Smi* FromIntptr(intptr_t value);
+
   // Returns whether value can be represented in a Smi.
   static inline bool IsValid(int value);
 
+  static inline bool IsIntptrValid(intptr_t);
+
   // Casting.
   static inline Smi* cast(Object* object);
 
@@ -1258,6 +1262,7 @@
     return GetPropertyAttribute(name) != ABSENT;
   }
 
+  // Can cause a GC if it hits an interceptor.
   bool HasLocalProperty(String* name) {
     return GetLocalPropertyAttribute(name) != ABSENT;
   }
@@ -2638,6 +2643,9 @@
   // [data]: additional data associated with this script.
   DECL_ACCESSORS(data, Object)
 
+  // [context_data]: context data for the context this script was compiled in.
+  DECL_ACCESSORS(context_data, Object)
+
   // [wrapper]: the wrapper cache.
   DECL_ACCESSORS(wrapper, Proxy)
 
@@ -2659,7 +2667,8 @@
   static const int kLineOffsetOffset = kNameOffset + kPointerSize;
   static const int kColumnOffsetOffset = kLineOffsetOffset + kPointerSize;
   static const int kDataOffset = kColumnOffsetOffset + kPointerSize;
-  static const int kWrapperOffset = kDataOffset + kPointerSize;
+  static const int kContextOffset = kDataOffset + kPointerSize;
+  static const int kWrapperOffset = kContextOffset + kPointerSize;
   static const int kTypeOffset = kWrapperOffset + kPointerSize;
   static const int kLineEndsOffset = kTypeOffset + kPointerSize;
   static const int kIdOffset = kLineEndsOffset + kPointerSize;
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index c735ceb..c02eebc 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -600,14 +600,18 @@
     // Extracting the sample from the context is extremely machine dependent.
     ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
     mcontext_t& mcontext = ucontext->uc_mcontext;
-#if defined(__arm__) || defined(__thumb__)
-    sample.pc = mcontext.gregs[R15];
-    sample.sp = mcontext.gregs[R13];
-    sample.fp = mcontext.gregs[R11];
-#else
+#if V8_HOST_ARCH_IA32
     sample.pc = mcontext.gregs[REG_EIP];
     sample.sp = mcontext.gregs[REG_ESP];
     sample.fp = mcontext.gregs[REG_EBP];
+#elif V8_HOST_ARCH_X64
+    sample.pc = mcontext.gregs[REG_RIP];
+    sample.sp = mcontext.gregs[REG_RSP];
+    sample.fp = mcontext.gregs[REG_RBP];
+#elif V8_HOST_ARCH_ARM
+    sample.pc = mcontext.gregs[R15];
+    sample.sp = mcontext.gregs[R13];
+    sample.fp = mcontext.gregs[R11];
 #endif
   }
 
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index 597a217..6c4e67a 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1775,9 +1775,16 @@
         context.ContextFlags = CONTEXT_FULL;
         GetThreadContext(profiled_thread_, &context);
         // Invoke tick handler with program counter and stack pointer.
+#if V8_HOST_ARCH_X64
+        UNIMPLEMENTED();
+        sample.pc = context.Rip;
+        sample.sp = context.Rsp;
+        sample.fp = context.Rbp;
+#else
         sample.pc = context.Eip;
         sample.sp = context.Esp;
         sample.fp = context.Ebp;
+#endif
       }
 
       // We always sample the VM state.
diff --git a/src/platform.h b/src/platform.h
index b70095b..e23abfc 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -350,7 +350,16 @@
   static LocalStorageKey CreateThreadLocalKey();
   static void DeleteThreadLocalKey(LocalStorageKey key);
   static void* GetThreadLocal(LocalStorageKey key);
+  static int GetThreadLocalInt(LocalStorageKey key) {
+    return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
+  }
   static void SetThreadLocal(LocalStorageKey key, void* value);
+  static void SetThreadLocalInt(LocalStorageKey key, int value) {
+    SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
+  }
+  static bool HasThreadLocal(LocalStorageKey key) {
+    return GetThreadLocal(key) != NULL;
+  }
 
   // A hint to the scheduler to let another thread run.
   static void YieldCPU();
@@ -483,9 +492,9 @@
 class TickSample {
  public:
   TickSample() : pc(0), sp(0), fp(0), state(OTHER) {}
-  unsigned int pc;  // Instruction pointer.
-  unsigned int sp;  // Stack pointer.
-  unsigned int fp;  // Frame pointer.
+  uintptr_t pc;  // Instruction pointer.
+  uintptr_t sp;  // Stack pointer.
+  uintptr_t fp;  // Frame pointer.
   StateTag state;   // The state of the VM.
   static const int kMaxFramesCount = 100;
   EmbeddedVector<Address, kMaxFramesCount> stack;  // Call stack.
diff --git a/src/register-allocator.h b/src/register-allocator.h
index dcc2eb7..f79d6cf 100644
--- a/src/register-allocator.h
+++ b/src/register-allocator.h
@@ -275,6 +275,11 @@
   // Unuse all the reserved registers in a register file.
   static void UnuseReserved(RegisterFile* register_file);
 
+  // True if the register is reserved by the code generator, false if it
+  // can be freely used by the allocator.
+  static bool IsReserved(int reg_code);
+  static bool IsReserved(Register reg) { return IsReserved(reg); }
+
   // Predicates and accessors for the registers' reference counts.
   bool is_used(int reg_code) const { return registers_.is_used(reg_code); }
   bool is_used(Register reg) const { return registers_.is_used(reg.code()); }
diff --git a/src/runtime.cc b/src/runtime.cc
index 3a738df..bf6286f 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -474,6 +474,42 @@
 }
 
 
+// Inserts an object as the hidden prototype of another object.
+static Object* Runtime_SetHiddenPrototype(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 2);
+  CONVERT_CHECKED(JSObject, jsobject, args[0]);
+  CONVERT_CHECKED(JSObject, proto, args[1]);
+
+  // Sanity checks.  The old prototype (that we are replacing) could
+  // theoretically be null, but if it is not null then check that we
+  // didn't already install a hidden prototype here.
+  RUNTIME_ASSERT(!jsobject->GetPrototype()->IsHeapObject() ||
+    !HeapObject::cast(jsobject->GetPrototype())->map()->is_hidden_prototype());
+  RUNTIME_ASSERT(!proto->map()->is_hidden_prototype());
+
+  // Allocate up front before we start altering state in case we get a GC.
+  Object* map_or_failure = proto->map()->CopyDropTransitions();
+  if (map_or_failure->IsFailure()) return map_or_failure;
+  Map* new_proto_map = Map::cast(map_or_failure);
+
+  map_or_failure = jsobject->map()->CopyDropTransitions();
+  if (map_or_failure->IsFailure()) return map_or_failure;
+  Map* new_map = Map::cast(map_or_failure);
+
+  // Set proto's prototype to be the old prototype of the object.
+  new_proto_map->set_prototype(jsobject->GetPrototype());
+  proto->set_map(new_proto_map);
+  new_proto_map->set_is_hidden_prototype();
+
+  // Set the object's prototype to proto.
+  new_map->set_prototype(proto);
+  jsobject->set_map(new_map);
+
+  return Heap::undefined_value();
+}
+
+
 static Object* Runtime_IsConstructCall(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 0);
@@ -2796,20 +2832,42 @@
 }
 
 
+static Object* HasLocalPropertyImplementation(Handle<JSObject> object,
+                                              Handle<String> key) {
+  if (object->HasLocalProperty(*key)) return Heap::true_value();
+  // Handle hidden prototypes.  If there's a hidden prototype above this thing
+  // then we have to check it for properties, because they are supposed to
+  // look like they are on this object.
+  Handle<Object> proto(object->GetPrototype());
+  if (proto->IsJSObject() &&
+      Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) {
+    return HasLocalPropertyImplementation(Handle<JSObject>::cast(proto), key);
+  }
+  return Heap::false_value();
+}
+
+
 static Object* Runtime_HasLocalProperty(Arguments args) {
   NoHandleAllocation ha;
   ASSERT(args.length() == 2);
   CONVERT_CHECKED(String, key, args[1]);
 
+  Object* obj = args[0];
   // Only JS objects can have properties.
-  if (args[0]->IsJSObject()) {
-    JSObject* object = JSObject::cast(args[0]);
-    if (object->HasLocalProperty(key)) return Heap::true_value();
-  } else if (args[0]->IsString()) {
+  if (obj->IsJSObject()) {
+    JSObject* object = JSObject::cast(obj);
+    // Fast case - no interceptors.
+    if (object->HasRealNamedProperty(key)) return Heap::true_value();
+    // Slow case.  Either it's not there or we have an interceptor.  We should
+    // have handles for this kind of deal.
+    HandleScope scope;
+    return HasLocalPropertyImplementation(Handle<JSObject>(object),
+                                          Handle<String>(key));
+  } else if (obj->IsString()) {
     // Well, there is one exception:  Handle [] on strings.
     uint32_t index;
     if (key->AsArrayIndex(&index)) {
-      String* string = String::cast(args[0]);
+      String* string = String::cast(obj);
       if (index < static_cast<uint32_t>(string->length()))
         return Heap::true_value();
     }
@@ -6860,6 +6918,15 @@
 #endif  // DEBUG
   return Heap::undefined_value();
 }
+
+
+static Object* Runtime_FunctionGetInferredName(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 1);
+
+  CONVERT_CHECKED(JSFunction, f, args[0]);
+  return f->shared()->inferred_name();
+}
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
diff --git a/src/runtime.h b/src/runtime.h
index 9430073..2041295 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -53,6 +53,7 @@
   F(ToSlowProperties, 1) \
   \
   F(IsInPrototypeChain, 2) \
+  F(SetHiddenPrototype, 2) \
   \
   F(IsConstructCall, 0) \
   \
@@ -298,7 +299,8 @@
   F(DebugConstructedBy, 2) \
   F(DebugGetPrototype, 1) \
   F(SystemBreak, 0) \
-  F(FunctionGetAssemblerCode, 1)
+  F(FunctionGetAssemblerCode, 1) \
+  F(FunctionGetInferredName, 1)
 #else
 #define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
 #endif
diff --git a/src/serialize.cc b/src/serialize.cc
index e15c003..62287bc 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -78,8 +78,8 @@
 
 
 static inline AllocationSpace GetSpace(Address addr) {
-  const int encoded = reinterpret_cast<int>(addr);
-  int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
   if (space_number == kLOSpaceExecutable) space_number = LO_SPACE;
   else if (space_number == kLOSpacePointer) space_number = LO_SPACE;
   return static_cast<AllocationSpace>(space_number);
@@ -87,43 +87,45 @@
 
 
 static inline bool IsLargeExecutableObject(Address addr) {
-  const int encoded = reinterpret_cast<int>(addr);
-  const int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
-  if (space_number == kLOSpaceExecutable) return true;
-  return false;
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  const int space_number =
+      (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
+  return (space_number == kLOSpaceExecutable);
 }
 
 
 static inline bool IsLargeFixedArray(Address addr) {
-  const int encoded = reinterpret_cast<int>(addr);
-  const int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
-  if (space_number == kLOSpacePointer) return true;
-  return false;
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  const int space_number =
+      (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
+  return (space_number == kLOSpacePointer);
 }
 
 
 static inline int PageIndex(Address addr) {
-  const int encoded = reinterpret_cast<int>(addr);
-  return (encoded >> kPageShift) & kPageMask;
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  return static_cast<int>(encoded >> kPageShift) & kPageMask;
 }
 
 
 static inline int PageOffset(Address addr) {
-  const int encoded = reinterpret_cast<int>(addr);
-  return ((encoded >> kOffsetShift) & kOffsetMask) << kObjectAlignmentBits;
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  const int offset = static_cast<int>(encoded >> kOffsetShift) & kOffsetMask;
+  return offset << kObjectAlignmentBits;
 }
 
 
 static inline int NewSpaceOffset(Address addr) {
-  const int encoded = reinterpret_cast<int>(addr);
-  return ((encoded >> kPageAndOffsetShift) & kPageAndOffsetMask) <<
-      kObjectAlignmentBits;
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  const int page_offset =
+      static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
+  return page_offset << kObjectAlignmentBits;
 }
 
 
 static inline int LargeObjectIndex(Address addr) {
-  const int encoded = reinterpret_cast<int>(addr);
-  return (encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
+  const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
+  return static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
 }
 
 
@@ -728,7 +730,9 @@
   if (key == NULL) return -1;
   HashMap::Entry* entry =
       const_cast<HashMap &>(encodings_).Lookup(key, Hash(key), false);
-  return entry == NULL ? -1 : reinterpret_cast<int>(entry->value);
+  return entry == NULL
+      ? -1
+      : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
 }
 
 
@@ -794,6 +798,10 @@
     InsertInt(i, len_);
   }
 
+  void PutAddress(Address p) {
+    PutBytes(reinterpret_cast<byte*>(&p), sizeof(p));
+  }
+
   void PutBytes(const byte* a, int size) {
     InsertBytes(a, len_, size);
   }
@@ -914,7 +922,8 @@
 
 // Helper functions for a map of encoded heap object addresses.
 static uint32_t HeapObjectHash(HeapObject* key) {
-  return reinterpret_cast<uint32_t>(key) >> 2;
+  uint32_t low32bits = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key));
+  return low32bits >> 2;
 }
 
 
@@ -1153,7 +1162,7 @@
 
 void Serializer::PutEncodedAddress(Address addr) {
   writer_->PutC('P');
-  writer_->PutInt(reinterpret_cast<int>(addr));
+  writer_->PutAddress(addr);
 }
 
 
@@ -1336,7 +1345,7 @@
         *p = GetObject();  // embedded object
       } else {
         ASSERT(c == 'P');  // pointer to previously serialized object
-        *p = Resolve(reinterpret_cast<Address>(reader_.GetInt()));
+        *p = Resolve(reader_.GetAddress());
       }
     } else {
       // A pointer internal to a HeapObject that we've already
@@ -1350,7 +1359,7 @@
 
 void Deserializer::VisitExternalReferences(Address* start, Address* end) {
   for (Address* p = start; p < end; ++p) {
-    uint32_t code = reinterpret_cast<uint32_t>(*p);
+    uint32_t code = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*p));
     *p = reference_decoder_->Decode(code);
   }
 }
@@ -1476,7 +1485,7 @@
 
 Address Deserializer::GetEncodedAddress() {
   reader_.ExpectC('P');
-  return reinterpret_cast<Address>(reader_.GetInt());
+  return reader_.GetAddress();
 }
 
 
diff --git a/src/serialize.h b/src/serialize.h
index ce7d947..f6594ac 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -70,7 +70,7 @@
  private:
   HashMap encodings_;
   static uint32_t Hash(Address key) {
-    return reinterpret_cast<uint32_t>(key) >> 2;
+    return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2);
   }
 
   int IndexOf(Address key) const;
@@ -231,6 +231,12 @@
     return result;
   }
 
+  Address GetAddress() {
+    Address result;
+    GetBytes(reinterpret_cast<Address>(&result), sizeof(result));
+    return result;
+  }
+
   void GetBytes(Address a, int size) {
     ASSERT(str_ + size <= end_);
     memcpy(a, str_, size);
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index d7cddb4..3973658 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -64,15 +64,16 @@
 // PageIterator
 
 bool PageIterator::has_next() {
-  return cur_page_ != stop_page_;
+  return prev_page_ != stop_page_;
 }
 
 
 Page* PageIterator::next() {
   ASSERT(has_next());
-  Page* result = cur_page_;
-  cur_page_ = cur_page_->next_page();
-  return result;
+  prev_page_ = (prev_page_ == NULL)
+               ? space_->first_page_
+               : prev_page_->next_page();
+  return prev_page_;
 }
 
 
diff --git a/src/spaces.cc b/src/spaces.cc
index ea40d52..f15af9e 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -111,17 +111,17 @@
 // -----------------------------------------------------------------------------
 // PageIterator
 
-PageIterator::PageIterator(PagedSpace* space, Mode mode) {
-  cur_page_ = space->first_page_;
+PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
+  prev_page_ = NULL;
   switch (mode) {
     case PAGES_IN_USE:
-      stop_page_ = space->AllocationTopPage()->next_page();
+      stop_page_ = space->AllocationTopPage();
       break;
     case PAGES_USED_BY_MC:
-      stop_page_ = space->MCRelocationTopPage()->next_page();
+      stop_page_ = space->MCRelocationTopPage();
       break;
     case ALL_PAGES:
-      stop_page_ = Page::FromAddress(NULL);
+      stop_page_ = space->last_page_;
       break;
     default:
       UNREACHABLE();
@@ -496,8 +496,11 @@
   accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
   ASSERT(Capacity() <= max_capacity_);
 
+  // Sequentially initialize remembered sets in the newly allocated
+  // pages and cache the current last page in the space.
   for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
     p->ClearRSet();
+    last_page_ = p;
   }
 
   // Use first_page_ for allocation.
@@ -676,9 +679,11 @@
 
   MemoryAllocator::SetNextPage(last_page, p);
 
-  // Clear remembered set of new pages.
+  // Sequentially clear remembered set of new pages and and cache the
+  // new last page in the space.
   while (p->is_valid()) {
     p->ClearRSet();
+    last_page_ = p;
     p = p->next_page();
   }
 
@@ -723,10 +728,12 @@
   Page* p = MemoryAllocator::FreePages(last_page_to_keep->next_page());
   MemoryAllocator::SetNextPage(last_page_to_keep, p);
 
-  // Since pages are only freed in whole chunks, we may have kept more than
-  // pages_to_keep.
+  // Since pages are only freed in whole chunks, we may have kept more
+  // than pages_to_keep.  Count the extra pages and cache the new last
+  // page in the space.
   while (p->is_valid()) {
     pages_to_keep++;
+    last_page_ = p;
     p = p->next_page();
   }
 
@@ -811,7 +818,7 @@
   start_ = start;
   address_mask_ = ~(size - 1);
   object_mask_ = address_mask_ | kHeapObjectTag;
-  object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
+  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
 
   allocation_info_.top = to_space_.low();
   allocation_info_.limit = to_space_.high();
@@ -970,7 +977,7 @@
   start_ = start;
   address_mask_ = ~(maximum_capacity - 1);
   object_mask_ = address_mask_ | kHeapObjectTag;
-  object_expected_ = reinterpret_cast<uint32_t>(start) | kHeapObjectTag;
+  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
 
   age_mark_ = start_;
   return true;
@@ -1890,7 +1897,7 @@
 
   // If the range starts on on odd numbered word (eg, for large object extra
   // remembered set ranges), print some spaces.
-  if ((reinterpret_cast<uint32_t>(start) / kIntSize) % 2 == 1) {
+  if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
     PrintF("                                    ");
   }
 
@@ -1929,7 +1936,7 @@
     }
 
     // Print a newline after every odd numbered word, otherwise a space.
-    if ((reinterpret_cast<uint32_t>(rset_address) / kIntSize) % 2 == 1) {
+    if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
       PrintF("\n");
     } else {
       PrintF(" ");
diff --git a/src/spaces.h b/src/spaces.h
index 843981b..e8504a4 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -511,11 +511,22 @@
 //
 // A HeapObjectIterator iterates objects from a given address to the
 // top of a space. The given address must be below the current
-// allocation pointer (space top). If the space top changes during
-// iteration (because of allocating new objects), the iterator does
-// not iterate new objects. The caller function must create a new
-// iterator starting from the old top in order to visit these new
-// objects. Heap::Scavenage() is such an example.
+// allocation pointer (space top). There are some caveats.
+//
+// (1) If the space top changes upward during iteration (because of
+//     allocating new objects), the iterator does not iterate objects
+//     above the original space top. The caller must create a new
+//     iterator starting from the old top in order to visit these new
+//     objects.
+//
+// (2) If new objects are allocated below the original allocation top
+//     (e.g., free-list allocation in paged spaces), the new objects
+//     may or may not be iterated depending on their position with
+//     respect to the current point of iteration.
+//
+// (3) The space top should not change downward during iteration,
+//     otherwise the iterator will return not-necessarily-valid
+//     objects.
 
 class HeapObjectIterator: public ObjectIterator {
  public:
@@ -559,17 +570,35 @@
 
 
 // -----------------------------------------------------------------------------
-// A PageIterator iterates pages in a space.
+// A PageIterator iterates the pages in a paged space.
 //
 // The PageIterator class provides three modes for iterating pages in a space:
-//   PAGES_IN_USE iterates pages that are in use by the allocator;
-//   PAGES_USED_BY_GC iterates pages that hold relocated objects during a
-//                    mark-compact collection;
+//   PAGES_IN_USE iterates pages containing allocated objects.
+//   PAGES_USED_BY_MC iterates pages that hold relocated objects during a
+//                    mark-compact collection.
 //   ALL_PAGES iterates all pages in the space.
+//
+// There are some caveats.
+//
+// (1) If the space expands during iteration, new pages will not be
+//     returned by the iterator in any mode.
+//
+// (2) If new objects are allocated during iteration, they will appear
+//     in pages returned by the iterator.  Allocation may cause the
+//     allocation pointer or MC allocation pointer in the last page to
+//     change between constructing the iterator and iterating the last
+//     page.
+//
+// (3) The space should not shrink during iteration, otherwise the
+//     iterator will return deallocated pages.
 
 class PageIterator BASE_EMBEDDED {
  public:
-  enum Mode {PAGES_IN_USE, PAGES_USED_BY_MC, ALL_PAGES};
+  enum Mode {
+    PAGES_IN_USE,
+    PAGES_USED_BY_MC,
+    ALL_PAGES
+  };
 
   PageIterator(PagedSpace* space, Mode mode);
 
@@ -577,8 +606,9 @@
   inline Page* next();
 
  private:
-  Page* cur_page_;  // next page to return
-  Page* stop_page_;  // page where to stop
+  PagedSpace* space_;
+  Page* prev_page_;  // Previous page returned.
+  Page* stop_page_;  // Page to stop at (last page returned by the iterator).
 };
 
 
@@ -809,6 +839,10 @@
   // The first page in this space.
   Page* first_page_;
 
+  // The last page in this space.  Initially set in Setup, updated in
+  // Expand and Shrink.
+  Page* last_page_;
+
   // Normal allocation information.
   AllocationInfo allocation_info_;
 
@@ -1183,9 +1217,9 @@
 
   // Start address and bit mask for containment testing.
   Address start_;
-  uint32_t address_mask_;
-  uint32_t object_mask_;
-  uint32_t object_expected_;
+  uintptr_t address_mask_;
+  uintptr_t object_mask_;
+  uintptr_t object_expected_;
 
   // Allocation pointer and limit for normal allocation and allocation during
   // mark-compact collection.
diff --git a/src/string-stream.h b/src/string-stream.h
index 901f376..fa20064 100644
--- a/src/string-stream.h
+++ b/src/string-stream.h
@@ -72,13 +72,36 @@
 
 class FmtElm {
  public:
-  FmtElm(int value) : type_(INT) { data_.u_int_ = value; }  // NOLINT
-  explicit FmtElm(double value) : type_(DOUBLE) { data_.u_double_ = value; }  // NOLINT
-  FmtElm(const char* value) : type_(C_STR) { data_.u_c_str_ = value; }  // NOLINT
-  FmtElm(const Vector<const uc16>& value) : type_(LC_STR) { data_.u_lc_str_ = &value; } // NOLINT
-  FmtElm(Object* value) : type_(OBJ) { data_.u_obj_ = value; }  // NOLINT
-  FmtElm(Handle<Object> value) : type_(HANDLE) { data_.u_handle_ = value.location(); }  // NOLINT
-  FmtElm(void* value) : type_(INT) { data_.u_int_ = reinterpret_cast<int>(value); }  // NOLINT
+  FmtElm(int value) : type_(INT) {  // NOLINT
+    data_.u_int_ = value;
+  }
+  explicit FmtElm(double value) : type_(DOUBLE) {
+    data_.u_double_ = value;
+  }
+  FmtElm(const char* value) : type_(C_STR) {  // NOLINT
+    data_.u_c_str_ = value;
+  }
+  FmtElm(const Vector<const uc16>& value) : type_(LC_STR) {  // NOLINT
+    data_.u_lc_str_ = &value;
+  }
+  FmtElm(Object* value) : type_(OBJ) {  // NOLINT
+    data_.u_obj_ = value;
+  }
+  FmtElm(Handle<Object> value) : type_(HANDLE) {  // NOLINT
+    data_.u_handle_ = value.location();
+  }
+  FmtElm(void* value) : type_(INT) {  // NOLINT
+#if V8_HOST_ARCH_64_BIT
+    // TODO(x64): FmtElm needs to treat pointers as pointers, and not as
+    // ints.  This will require adding a pointer type, etc.  For now just
+    // hack it and truncate the pointer.
+    // http://code.google.com/p/v8/issues/detail?id=335
+    data_.u_int_ = 0;
+    UNIMPLEMENTED();
+#else
+    data_.u_int_ = reinterpret_cast<int>(value);
+#endif
+  }
  private:
   friend class StringStream;
   enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE };
diff --git a/src/string.js b/src/string.js
index c7a838e..0bcabc9 100644
--- a/src/string.js
+++ b/src/string.js
@@ -831,7 +831,7 @@
 
 
   // Setup the non-enumerable functions on the String prototype object.
-  InstallFunctions($String.prototype, DONT_ENUM, $Array(
+  InstallFunctionsOnHiddenPrototype($String.prototype, DONT_ENUM, $Array(
     "valueOf", StringValueOf,
     "toString", StringToString,
     "charAt", StringCharAt,
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 824f4ff..369b15d 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -203,14 +203,21 @@
     // Compute the hash of the name (use entire length field).
     ASSERT(name->HasHashCode());
     uint32_t field = name->length_field();
+    // Using only the low bits in 64-bit mode is unlikely to increase the
+    // risk of collision even if the heap is spread over an area larger than
+    // 4Gb (and not at all if it isn't).
+    uint32_t map_low32bits =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
     // Base the offset on a simple combination of name, flags, and map.
-    uint32_t key = (reinterpret_cast<uint32_t>(map) + field) ^ flags;
+    uint32_t key = (map_low32bits + field) ^ flags;
     return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
   }
 
   static int SecondaryOffset(String* name, Code::Flags flags, int seed) {
     // Use the seed from the primary cache in the secondary cache.
-    uint32_t key = seed - reinterpret_cast<uint32_t>(name) + flags;
+    uint32_t string_low32bits =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
+    uint32_t key = seed - string_low32bits + flags;
     return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
   }
 
diff --git a/src/utils.h b/src/utils.h
index e008c85..0febe4a 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -527,7 +527,7 @@
 template <typename sourcechar, typename sinkchar>
 static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
   sinkchar* limit = dest + chars;
-#ifdef CAN_READ_UNALIGNED
+#ifdef V8_HOST_CAN_READ_UNALIGNED
   if (sizeof(*dest) == sizeof(*src)) {
     // Number of characters in a uint32_t.
     static const int kStepSize = sizeof(uint32_t) / sizeof(*dest);  // NOLINT
diff --git a/src/v8natives.js b/src/v8natives.js
index 29a24b4..55bc9f8 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -54,6 +54,16 @@
   }
 }
 
+// Emulates JSC by installing functions on a hidden prototype that
+// lies above the current object/prototype.  This lets you override
+// functions on String.prototype etc. and then restore the old function
+// with delete.  See http://code.google.com/p/chromium/issues/detail?id=1717
+function InstallFunctionsOnHiddenPrototype(object, attributes, functions) {
+  var hidden_prototype = new $Object();
+  %SetHiddenPrototype(object, hidden_prototype);
+  InstallFunctions(hidden_prototype, attributes, functions);
+}
+
 
 // ----------------------------------------------------------------------------
 
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 2439476..838cae7 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -309,13 +309,13 @@
 
 
 int ThreadManager::CurrentId() {
-  return bit_cast<int, void*>(Thread::GetThreadLocal(thread_id_key));
+  return Thread::GetThreadLocalInt(thread_id_key);
 }
 
 
 void ThreadManager::AssignId() {
-  if (Thread::GetThreadLocal(thread_id_key) == NULL) {
-    Thread::SetThreadLocal(thread_id_key, bit_cast<void*, int>(next_id_++));
+  if (!Thread::HasThreadLocal(thread_id_key)) {
+    Thread::SetThreadLocalInt(thread_id_key, next_id_++);
   }
 }
 
diff --git a/src/version.cc b/src/version.cc
index 3785c37..0d2840d 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     2
-#define BUILD_NUMBER      2
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      3
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/virtual-frame.h b/src/virtual-frame.h
index 8c76f8a..794f156 100644
--- a/src/virtual-frame.h
+++ b/src/virtual-frame.h
@@ -202,16 +202,12 @@
 
 } }  // namespace v8::internal
 
-#ifdef V8_ARCH_ARM
-#include "arm/virtual-frame-arm.h"
-#endif
-
-#ifdef V8_ARCH_X64
-#include "x64/virtual-frame-x64.h"
-#endif
-
-#ifdef V8_ARCH_IA32
+#if V8_TARGET_ARCH_IA32
 #include "ia32/virtual-frame-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "x64/virtual-frame-x64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "arm/virtual-frame-arm.h"
 #endif
 
 #endif  // V8_VIRTUAL_FRAME_H_
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 209aa2d..0b01849 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -25,3 +25,44 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifndef V8_X64_ASSEMBLER_X64_INL_H_
+#define V8_X64_ASSEMBLER_X64_INL_H_
+
+namespace v8 { namespace internal {
+
+Condition NegateCondition(Condition cc) {
+  return static_cast<Condition>(cc ^ 1);
+}
+
+
+Address RelocInfo::target_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  return reinterpret_cast<Address>(pc_);
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+  Assembler::set_target_address_at(pc_, target);
+}
+
+
+void Assembler::set_target_address_at(byte* location, byte* value) {
+  UNIMPLEMENTED();
+}
+
+
+byte* Assembler::target_address_at(byte* location) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 209aa2d..6e2c42a 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -25,3 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include "v8.h"
+
+#include "macro-assembler.h"
+
+namespace v8 { namespace internal {
+
+Register no_reg = { -1 };
+
+} }  // namespace v8::internal
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 209aa2d..40fcdd3 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -1,27 +1,925 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
 //
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
 //
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+
+// A lightweight X64 Assembler.
+
+#ifndef V8_X64_ASSEMBLER_X64_H_
+#define V8_X64_ASSEMBLER_X64_H_
+
+namespace v8 { namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+const int kNumRegisters = 16;
+
+struct Register {
+  bool is_valid() const  { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(Register reg) const  { return code_ == reg.code_; }
+  // The byte-register distinction of ai32 has dissapeared.
+  bool is_byte_register() const  { return false; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+  int bit() const  {
+    UNIMPLEMENTED();
+    return 0;
+  }
+
+  // (unfortunately we can't make this private in a struct)
+  int code_;
+};
+
+extern Register rax;
+extern Register rcx;
+extern Register rdx;
+extern Register rbx;
+extern Register rsp;
+extern Register rbp;
+extern Register rsi;
+extern Register rdi;
+extern Register r8;
+extern Register r9;
+extern Register r10;
+extern Register r11;
+extern Register r12;
+extern Register r13;
+extern Register r14;
+extern Register r15;
+extern Register no_reg;
+
+
+struct XMMRegister {
+  bool is_valid() const  { return 0 <= code_ && code_ < 2; }
+  int code() const  {
+    ASSERT(is_valid());
+    return code_;
+  }
+
+  int code_;
+};
+
+extern XMMRegister xmm0;
+extern XMMRegister xmm1;
+extern XMMRegister xmm2;
+extern XMMRegister xmm3;
+extern XMMRegister xmm4;
+extern XMMRegister xmm5;
+extern XMMRegister xmm6;
+extern XMMRegister xmm7;
+
+enum Condition {
+  // any value < 0 is considered no_condition
+  no_condition  = -1,
+
+  overflow      =  0,
+  no_overflow   =  1,
+  below         =  2,
+  above_equal   =  3,
+  equal         =  4,
+  not_equal     =  5,
+  below_equal   =  6,
+  above         =  7,
+  negative      =  8,
+  positive      =  9,
+  parity_even   = 10,
+  parity_odd    = 11,
+  less          = 12,
+  greater_equal = 13,
+  less_equal    = 14,
+  greater       = 15,
+
+  // aliases
+  carry         = below,
+  not_carry     = above_equal,
+  zero          = equal,
+  not_zero      = not_equal,
+  sign          = negative,
+  not_sign      = positive
+};
+
+
+// Returns the equivalent of !cc.
+// Negation of the default no_condition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc);
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+  switch (cc) {
+    case below:
+      return above;
+    case above:
+      return below;
+    case above_equal:
+      return below_equal;
+    case below_equal:
+      return above_equal;
+    case less:
+      return greater;
+    case greater:
+      return less;
+    case greater_equal:
+      return less_equal;
+    case less_equal:
+      return greater_equal;
+    default:
+      return cc;
+  };
+}
+
+enum Hint {
+  no_hint = 0,
+  not_taken = 0x2e,
+  taken = 0x3e
+};
+
+// The result of negating a hint is as if the corresponding condition
+// were negated by NegateCondition.  That is, no_hint is mapped to
+// itself and not_taken and taken are mapped to each other.
+inline Hint NegateHint(Hint hint) {
+  return (hint == no_hint)
+      ? no_hint
+      : ((hint == not_taken) ? taken : not_taken);
+}
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Immediates
+
+class Immediate BASE_EMBEDDED {
+ public:
+  inline explicit Immediate(int64_t x);
+  inline explicit Immediate(const char* s);
+  inline explicit Immediate(const ExternalReference& ext);
+  inline explicit Immediate(Handle<Object> handle);
+  inline explicit Immediate(Smi* value);
+
+  static Immediate CodeRelativeOffset(Label* label) {
+    return Immediate(label);
+  }
+
+  bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; }
+  bool is_int8() const {
+    return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE;
+  }
+  bool is_int16() const {
+    return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE;
+  }
+  bool is_int32() const {
+    return V8_INT64_C(-2147483648) <= x_
+        && x_ < V8_INT64_C(2147483648)
+        && rmode_ == RelocInfo::NONE;
+  }
+
+ private:
+  inline explicit Immediate(Label* value) { UNIMPLEMENTED(); }
+
+  int64_t x_;
+  RelocInfo::Mode rmode_;
+
+  friend class Assembler;
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+enum ScaleFactor {
+  times_1 = 0,
+  times_2 = 1,
+  times_4 = 2,
+  times_8 = 3
+};
+
+
+class Operand BASE_EMBEDDED {
+ public:
+  // reg
+  INLINE(explicit Operand(Register reg));
+
+  // MemoryOperand
+  INLINE(explicit Operand()) { UNIMPLEMENTED(); }
+
+  // Returns true if this Operand is a wrapper for the specified register.
+  bool is_reg(Register reg) const;
+
+  // These constructors have been moved to MemOperand, and should
+  // be removed from Operand as soon as all their uses use MemOperands instead.
+    // [disp/r]
+  INLINE(explicit Operand(intptr_t disp, RelocInfo::Mode rmode)) {
+    UNIMPLEMENTED();
+  }
+  // disp only must always be relocated
+
+  // [base + disp/r]
+  explicit Operand(Register base, intptr_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  // [base + index*scale + disp/r]
+  explicit Operand(Register base,
+                   Register index,
+                   ScaleFactor scale,
+                   intptr_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  // [index*scale + disp/r]
+  explicit Operand(Register index,
+                   ScaleFactor scale,
+                   intptr_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  static Operand StaticVariable(const ExternalReference& ext) {
+    return Operand(reinterpret_cast<intptr_t>(ext.address()),
+                   RelocInfo::EXTERNAL_REFERENCE);
+  }
+
+  static Operand StaticArray(Register index,
+                             ScaleFactor scale,
+                             const ExternalReference& arr) {
+    return Operand(index, scale, reinterpret_cast<intptr_t>(arr.address()),
+                   RelocInfo::EXTERNAL_REFERENCE);
+  }
+
+  // End of constructors and methods that have been moved to MemOperand.
+
+ private:
+  byte rex_;
+  byte buf_[10];
+  // The number of bytes in buf_.
+  unsigned int len_;
+  // Only valid if len_ > 4.
+  RelocInfo::Mode rmode_;
+
+  // Set the ModRM byte without an encoded 'reg' register. The
+  // register is encoded later as part of the emit_operand operation.
+  inline void set_modrm(int mod, Register rm);
+
+  inline void set_sib(ScaleFactor scale, Register index, Register base);
+  inline void set_disp8(int8_t disp);
+  inline void set_disp32(int32_t disp);
+  inline void set_dispr(intptr_t disp, RelocInfo::Mode rmode);
+
+  friend class Assembler;
+};
+
+class MemOperand : public Operand {
+ public:
+  // [disp/r]
+  INLINE(explicit MemOperand(intptr_t disp, RelocInfo::Mode rmode)) :
+      Operand() {
+    UNIMPLEMENTED();
+  }
+  // disp only must always be relocated
+
+  // [base + disp/r]
+  explicit MemOperand(Register base, intptr_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  // [base + index*scale + disp/r]
+  explicit MemOperand(Register base,
+                   Register index,
+                   ScaleFactor scale,
+                   intptr_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  // [index*scale + disp/r]
+  explicit MemOperand(Register index,
+                   ScaleFactor scale,
+                   intptr_t disp,
+                   RelocInfo::Mode rmode = RelocInfo::NONE);
+
+  static MemOperand StaticVariable(const ExternalReference& ext) {
+    return MemOperand(reinterpret_cast<intptr_t>(ext.address()),
+                   RelocInfo::EXTERNAL_REFERENCE);
+  }
+
+  static MemOperand StaticArray(Register index,
+                             ScaleFactor scale,
+                             const ExternalReference& arr) {
+    return MemOperand(index, scale, reinterpret_cast<intptr_t>(arr.address()),
+                   RelocInfo::EXTERNAL_REFERENCE);
+  }
+};
+
+// -----------------------------------------------------------------------------
+// A Displacement describes the 32bit immediate field of an instruction which
+// may be used together with a Label in order to refer to a yet unknown code
+// position. Displacements stored in the instruction stream are used to describe
+// the instruction and to chain a list of instructions using the same Label.
+// A Displacement contains 2 different fields:
+//
+// next field: position of next displacement in the chain (0 = end of list)
+// type field: instruction type
+//
+// A next value of null (0) indicates the end of a chain (note that there can
+// be no displacement at position zero, because there is always at least one
+// instruction byte before the displacement).
+//
+// Displacement _data field layout
+//
+// |31.....2|1......0|
+// [  next  |  type  |
+
+class Displacement BASE_EMBEDDED {
+ public:
+  enum Type {
+    UNCONDITIONAL_JUMP,
+    CODE_RELATIVE,
+    OTHER
+  };
+
+  int data() const { return data_; }
+  Type type() const { return TypeField::decode(data_); }
+  void next(Label* L) const {
+    int n = NextField::decode(data_);
+    n > 0 ? L->link_to(n) : L->Unuse();
+  }
+  void link_to(Label* L) { init(L, type()); }
+
+  explicit Displacement(int data) { data_ = data; }
+
+  Displacement(Label* L, Type type) { init(L, type); }
+
+  void print() {
+    PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
+                       NextField::decode(data_));
+  }
+
+ private:
+  int data_;
+
+  class TypeField: public BitField<Type, 0, 2> {};
+  class NextField: public BitField<int,  2, 32-2> {};
+
+  void init(Label* L, Type type);
+};
+
+
+
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+// Example:
+//   if (CpuFeatures::IsSupported(SSE2)) {
+//     CpuFeatures::Scope fscope(SSE2);
+//     // Generate SSE2 floating point code.
+//   } else {
+//     // Generate standard x87 floating point code.
+//   }
+class CpuFeatures : public AllStatic {
+ public:
+  // Feature flags bit positions. They are mostly based on the CPUID spec.
+  // (We assign CPUID itself to one of the currently reserved bits --
+  // feel free to change this if needed.)
+  enum Feature { SSE3 = 32, SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 };
+  // Detect features of the target CPU. Set safe defaults if the serializer
+  // is enabled (snapshots must be portable).
+  static void Probe();
+  // Check whether a feature is supported by the target CPU.
+  static bool IsSupported(Feature f) {
+    return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+  }
+  // Check whether a feature is currently enabled.
+  static bool IsEnabled(Feature f) {
+    return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+  }
+  // Enable a specified feature within a scope.
+  class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+   public:
+    explicit Scope(Feature f) {
+      ASSERT(CpuFeatures::IsSupported(f));
+      old_enabled_ = CpuFeatures::enabled_;
+      CpuFeatures::enabled_ |= (static_cast<uint64_t>(1) << f);
+    }
+    ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
+   private:
+    uint64_t old_enabled_;
+#else
+   public:
+    explicit Scope(Feature f) {}
+#endif
+  };
+ private:
+  static uint64_t supported_;
+  static uint64_t enabled_;
+};
+
+
+class Assembler : public Malloced {
+ private:
+  // The relocation writer's position is kGap bytes below the end of
+  // the generated instructions. This leaves enough space for the
+  // longest possible ia32 instruction (17 bytes as of 9/26/06) and
+  // allows for a single, fast space check per instruction.
+  static const int kGap = 32;
+
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(void* buffer, int buffer_size);
+  ~Assembler();
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Read/Modify the code target in the branch/call instruction at pc.
+  inline static Address target_address_at(Address pc);
+  inline static void set_target_address_at(Address pc, Address target);
+
+  // Distance between the address of the code target in the call instruction
+  // and the return address
+  static const int kTargetAddrToReturnAddrDist = kPointerSize;
+
+
+  // ---------------------------------------------------------------------------
+  // Code generation
+  //
+  // - function names correspond one-to-one to ia32 instruction mnemonics
+  // - unless specified otherwise, instructions operate on 32bit operands
+  // - instructions on 8bit (byte) operands/registers have a trailing '_b'
+  // - instructions on 16bit (word) operands/registers have a trailing '_w'
+  // - naming conflicts with C++ keywords are resolved via a trailing '_'
+
+  // NOTE ON INTERFACE: Currently, the interface is not very consistent
+  // in the sense that some operations (e.g. mov()) can be called in more
+  // the one way to generate the same instruction: The Register argument
+  // can in some cases be replaced with an Operand(Register) argument.
+  // This should be cleaned up and made more orthogonal. The questions
+  // is: should we always use Operands instead of Registers where an
+  // Operand is possible, or should we have a Register (overloaded) form
+  // instead? We must be careful to make sure that the selected instruction
+  // is obvious from the parameters to avoid hard-to-find code generation
+  // bugs.
+
+  // Insert the smallest number of nop instructions
+  // possible to align the pc offset to a multiple
+  // of m. m must be a power of 2.
+  void Align(int m);
+
+  // Stack
+  void pushad();
+  void popad();
+
+  void pushfd();
+  void popfd();
+
+  void push(const Immediate& x);
+  void push(Register src);
+  void push(const Operand& src);
+  void push(Label* label, RelocInfo::Mode relocation_mode);
+
+  void pop(Register dst);
+  void pop(const Operand& dst);
+
+  void enter(const Immediate& size);
+  void leave();
+
+  // Moves
+  void mov_b(Register dst, const Operand& src);
+  void mov_b(const Operand& dst, int8_t imm8);
+  void mov_b(const Operand& dst, Register src);
+
+  void mov_w(Register dst, const Operand& src);
+  void mov_w(const Operand& dst, Register src);
+
+  void mov(Register dst, int32_t imm32);
+  void mov(Register dst, const Immediate& x);
+  void mov(Register dst, Handle<Object> handle);
+  void mov(Register dst, const Operand& src);
+  void mov(Register dst, Register src);
+  void mov(const Operand& dst, const Immediate& x);
+  void mov(const Operand& dst, Handle<Object> handle);
+  void mov(const Operand& dst, Register src);
+
+  void movsx_b(Register dst, const Operand& src);
+
+  void movsx_w(Register dst, const Operand& src);
+
+  void movzx_b(Register dst, const Operand& src);
+
+  void movzx_w(Register dst, const Operand& src);
+
+  // Conditional moves
+  void cmov(Condition cc, Register dst, int32_t imm32);
+  void cmov(Condition cc, Register dst, Handle<Object> handle);
+  void cmov(Condition cc, Register dst, const Operand& src);
+
+  // Exchange two registers
+  void xchg(Register dst, Register src);
+
+  // Arithmetics
+  void adc(Register dst, int32_t imm32);
+  void adc(Register dst, const Operand& src);
+
+  void add(Register dst, const Operand& src);
+  void add(const Operand& dst, const Immediate& x);
+
+  void and_(Register dst, int32_t imm32);
+  void and_(Register dst, const Operand& src);
+  void and_(const Operand& src, Register dst);
+  void and_(const Operand& dst, const Immediate& x);
+
+  void cmpb(const Operand& op, int8_t imm8);
+  void cmpb_al(const Operand& op);
+  void cmpw_ax(const Operand& op);
+  void cmpw(const Operand& op, Immediate imm16);
+  void cmp(Register reg, int32_t imm32);
+  void cmp(Register reg, Handle<Object> handle);
+  void cmp(Register reg, const Operand& op);
+  void cmp(const Operand& op, const Immediate& imm);
+
+  void dec_b(Register dst);
+
+  void dec(Register dst);
+  void dec(const Operand& dst);
+
+  void cdq();
+
+  void idiv(Register src);
+
+  void imul(Register dst, const Operand& src);
+  void imul(Register dst, Register src, int32_t imm32);
+
+  void inc(Register dst);
+  void inc(const Operand& dst);
+
+  void lea(Register dst, const Operand& src);
+
+  void mul(Register src);
+
+  void neg(Register dst);
+
+  void not_(Register dst);
+
+  void or_(Register dst, int32_t imm32);
+  void or_(Register dst, const Operand& src);
+  void or_(const Operand& dst, Register src);
+  void or_(const Operand& dst, const Immediate& x);
+
+  void rcl(Register dst, uint8_t imm8);
+
+  void sar(Register dst, uint8_t imm8);
+  void sar(Register dst);
+
+  void sbb(Register dst, const Operand& src);
+
+  void shld(Register dst, const Operand& src);
+
+  void shl(Register dst, uint8_t imm8);
+  void shl(Register dst);
+
+  void shrd(Register dst, const Operand& src);
+
+  void shr(Register dst, uint8_t imm8);
+  void shr(Register dst);
+  void shr_cl(Register dst);
+
+  void sub(const Operand& dst, const Immediate& x);
+  void sub(Register dst, const Operand& src);
+  void sub(const Operand& dst, Register src);
+
+  void test(Register reg, const Immediate& imm);
+  void test(Register reg, const Operand& op);
+  void test(const Operand& op, const Immediate& imm);
+
+  void xor_(Register dst, int32_t imm32);
+  void xor_(Register dst, const Operand& src);
+  void xor_(const Operand& src, Register dst);
+  void xor_(const Operand& dst, const Immediate& x);
+
+  // Bit operations.
+  void bt(const Operand& dst, Register src);
+  void bts(const Operand& dst, Register src);
+
+  // Miscellaneous
+  void hlt();
+  void int3();
+  void nop();
+  void rdtsc();
+  void ret(int imm16);
+
+  // Label operations & relative jumps (PPUM Appendix D)
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+
+  void bind(Label* L);  // binds an unbound label L to the current code position
+
+  // Calls
+  void call(Label* L);
+  void call(byte* entry, RelocInfo::Mode rmode);
+  void call(const Operand& adr);
+  void call(Handle<Code> code, RelocInfo::Mode rmode);
+
+  // Jumps
+  void jmp(Label* L);  // unconditional jump to L
+  void jmp(byte* entry, RelocInfo::Mode rmode);
+  void jmp(const Operand& adr);
+  void jmp(Handle<Code> code, RelocInfo::Mode rmode);
+
+  // Conditional jumps
+  void j(Condition cc, Label* L, Hint hint = no_hint);
+  void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
+  void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
+
+  // Floating-point operations
+  void fld(int i);
+
+  void fld1();
+  void fldz();
+
+  void fld_s(const Operand& adr);
+  void fld_d(const Operand& adr);
+
+  void fstp_s(const Operand& adr);
+  void fstp_d(const Operand& adr);
+
+  void fild_s(const Operand& adr);
+  void fild_d(const Operand& adr);
+
+  void fist_s(const Operand& adr);
+
+  void fistp_s(const Operand& adr);
+  void fistp_d(const Operand& adr);
+
+  void fisttp_s(const Operand& adr);
+
+  void fabs();
+  void fchs();
+
+  void fadd(int i);
+  void fsub(int i);
+  void fmul(int i);
+  void fdiv(int i);
+
+  void fisub_s(const Operand& adr);
+
+  void faddp(int i = 1);
+  void fsubp(int i = 1);
+  void fsubrp(int i = 1);
+  void fmulp(int i = 1);
+  void fdivp(int i = 1);
+  void fprem();
+  void fprem1();
+
+  void fxch(int i = 1);
+  void fincstp();
+  void ffree(int i = 0);
+
+  void ftst();
+  void fucomp(int i);
+  void fucompp();
+  void fcompp();
+  void fnstsw_ax();
+  void fwait();
+  void fnclex();
+
+  void frndint();
+
+  void sahf();
+  void setcc(Condition cc, Register reg);
+
+  void cpuid();
+
+  // SSE2 instructions
+  void cvttss2si(Register dst, const Operand& src);
+  void cvttsd2si(Register dst, const Operand& src);
+
+  void cvtsi2sd(XMMRegister dst, const Operand& src);
+
+  void addsd(XMMRegister dst, XMMRegister src);
+  void subsd(XMMRegister dst, XMMRegister src);
+  void mulsd(XMMRegister dst, XMMRegister src);
+  void divsd(XMMRegister dst, XMMRegister src);
+
+  // Use either movsd or movlpd.
+  void movdbl(XMMRegister dst, const Operand& src);
+  void movdbl(const Operand& dst, XMMRegister src);
+
+  // Debugging
+  void Print();
+
+  // Check the code size generated from label to here.
+  int SizeOfCodeGeneratedSince(Label* l) { return pc_offset() - l->pos(); }
+
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --debug_code to enable.
+  void RecordComment(const char* msg);
+
+  void RecordPosition(int pos);
+  void RecordStatementPosition(int pos);
+  void WriteRecordedPositions();
+
+  // Writes a single word of data in the code stream.
+  // Used for inline tables, e.g., jump-tables.
+  void dd(uint32_t data, RelocInfo::Mode reloc_info);
+
+  // Writes the absolute address of a bound label at the given position in
+  // the generated code. That positions should have the relocation mode
+  // internal_reference!
+  void WriteInternalReference(int position, const Label& bound_label);
+
+  int pc_offset() const  { return pc_ - buffer_; }
+  int current_statement_position() const { return current_statement_position_; }
+  int current_position() const  { return current_position_; }
+
+  // Check if there is less than kGap bytes available in the buffer.
+  // If this is the case, we need to grow the buffer before emitting
+  // an instruction or relocation information.
+  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+  // Get the number of bytes available in the buffer.
+  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Avoid overflows for displacements etc.
+  static const int kMaximalBufferSize = 512*MB;
+  static const int kMinimalBufferSize = 4*KB;
+
+ protected:
+  void movsd(XMMRegister dst, const Operand& src);
+  void movsd(const Operand& dst, XMMRegister src);
+
+  void emit_sse_operand(XMMRegister reg, const Operand& adr);
+  void emit_sse_operand(XMMRegister dst, XMMRegister src);
+
+
+ private:
+  byte* addr_at(int pos)  { return buffer_ + pos; }
+  byte byte_at(int pos)  { return buffer_[pos]; }
+  uint32_t long_at(int pos)  {
+    return *reinterpret_cast<uint32_t*>(addr_at(pos));
+  }
+  void long_at_put(int pos, uint32_t x)  {
+    *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
+  }
+
+  // code emission
+  void GrowBuffer();
+  inline void emit(uint32_t x);
+  inline void emit(Handle<Object> handle);
+  inline void emit(uint32_t x, RelocInfo::Mode rmode);
+  inline void emit(const Immediate& x);
+  inline void emit_w(const Immediate& x);
+
+  // Emit the code-object-relative offset of the label's position
+  inline void emit_code_relative_offset(Label* label);
+
+  // instruction generation
+  void emit_arith_b(int op1, int op2, Register dst, int imm8);
+
+  // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
+  // with a given destination expression and an immediate operand.  It attempts
+  // to use the shortest encoding possible.
+  // sel specifies the /n in the modrm byte (see the Intel PRM).
+  void emit_arith(int sel, Operand dst, const Immediate& x);
+
+  void emit_operand(Register reg, const Operand& adr);
+
+  void emit_farith(int b1, int b2, int i);
+
+  // labels
+  void print(Label* L);
+  void bind_to(Label* L, int pos);
+  void link_to(Label* L, Label* appendix);
+
+  // displacements
+  inline Displacement disp_at(Label* L);
+  inline void disp_at_put(Label* L, Displacement disp);
+  inline void emit_disp(Label* L, Displacement::Type type);
+
+  // record reloc info for current pc_
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+  friend class CodePatcher;
+  friend class EnsureSpace;
+
+  // Code buffer:
+  // The buffer into which code and relocation info are generated.
+  byte* buffer_;
+  int buffer_size_;
+  // True if the assembler owns the buffer, false if buffer is external.
+  bool own_buffer_;
+
+  // code generation
+  byte* pc_;  // the program counter; moves forward
+  RelocInfoWriter reloc_info_writer;
+
+  // push-pop elimination
+  byte* last_pc_;
+
+  // source position information
+  int current_statement_position_;
+  int current_position_;
+  int written_statement_position_;
+  int written_position_;
+};
+
+
+// Helper class that ensures that there is enough space for generating
+// instructions and relocation information.  The constructor makes
+// sure that there is enough space and (in debug mode) the destructor
+// checks that we did not generate too much.
+class EnsureSpace BASE_EMBEDDED {
+ public:
+  explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+    if (assembler_->overflow()) assembler_->GrowBuffer();
+#ifdef DEBUG
+    space_before_ = assembler_->available_space();
+#endif
+  }
+
+#ifdef DEBUG
+  ~EnsureSpace() {
+    int bytes_generated = space_before_ - assembler_->available_space();
+    ASSERT(bytes_generated < assembler_->kGap);
+  }
+#endif
+
+ private:
+  Assembler* assembler_;
+#ifdef DEBUG
+  int space_before_;
+#endif
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_ASSEMBLER_X64_H_
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 209aa2d..4acb0cb 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -25,3 +25,594 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifndef V8_X64_CODEGEN_X64_H_
+#define V8_X64_CODEGEN_X64_H_
+
+namespace v8 { namespace internal {
+
+// Forward declarations
+class DeferredCode;
+class RegisterAllocator;
+class RegisterFile;
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+// -------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
+class Reference BASE_EMBEDDED {
+ public:
+  // The values of the types is important, see size().
+  enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+  Reference(CodeGenerator* cgen, Expression* expression);
+  ~Reference();
+
+  Expression* expression() const { return expression_; }
+  Type type() const { return type_; }
+  void set_type(Type value) {
+    ASSERT(type_ == ILLEGAL);
+    type_ = value;
+  }
+
+  // The size the reference takes up on the stack.
+  int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+
+  bool is_illegal() const { return type_ == ILLEGAL; }
+  bool is_slot() const { return type_ == SLOT; }
+  bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+
+  // Return the name.  Only valid for named property references.
+  Handle<String> GetName();
+
+  // Generate code to push the value of the reference on top of the
+  // expression stack.  The reference is expected to be already on top of
+  // the expression stack, and it is left in place with its value above it.
+  void GetValue(TypeofState typeof_state);
+
+  // Like GetValue except that the slot is expected to be written to before
+  // being read from again.  Thae value of the reference may be invalidated,
+  // causing subsequent attempts to read it to fail.
+  void TakeValue(TypeofState typeof_state);
+
+  // Generate code to store the value on top of the expression stack in the
+  // reference.  The reference is expected to be immediately below the value
+  // on the expression stack.  The stored value is left in place (with the
+  // reference intact below it) to support chained assignments.
+  void SetValue(InitState init_state);
+
+ private:
+  CodeGenerator* cgen_;
+  Expression* expression_;
+  Type type_;
+};
+
+
+// -------------------------------------------------------------------------
+// Control destinations.
+
+// A control destination encapsulates a pair of jump targets and a
+// flag indicating which one is the preferred fall-through.  The
+// preferred fall-through must be unbound, the other may be already
+// bound (ie, a backward target).
+//
+// The true and false targets may be jumped to unconditionally or
+// control may split conditionally.  Unconditional jumping and
+// splitting should be emitted in tail position (as the last thing
+// when compiling an expression) because they can cause either label
+// to be bound or the non-fall through to be jumped to leaving an
+// invalid virtual frame.
+//
+// The labels in the control destination can be extracted and
+// manipulated normally without affecting the state of the
+// destination.
+
+class ControlDestination BASE_EMBEDDED {
+ public:
+  ControlDestination(JumpTarget* true_target,
+                     JumpTarget* false_target,
+                     bool true_is_fall_through)
+      : true_target_(true_target),
+        false_target_(false_target),
+        true_is_fall_through_(true_is_fall_through),
+        is_used_(false) {
+    ASSERT(true_is_fall_through ? !true_target->is_bound()
+                                : !false_target->is_bound());
+  }
+
+  // Accessors for the jump targets.  Directly jumping or branching to
+  // or binding the targets will not update the destination's state.
+  JumpTarget* true_target() const { return true_target_; }
+  JumpTarget* false_target() const { return false_target_; }
+
+  // True if the the destination has been jumped to unconditionally or
+  // control has been split to both targets.  This predicate does not
+  // test whether the targets have been extracted and manipulated as
+  // raw jump targets.
+  bool is_used() const { return is_used_; }
+
+  // True if the destination is used and the true target (respectively
+  // false target) was the fall through.  If the target is backward,
+  // "fall through" included jumping unconditionally to it.
+  bool true_was_fall_through() const {
+    return is_used_ && true_is_fall_through_;
+  }
+
+  bool false_was_fall_through() const {
+    return is_used_ && !true_is_fall_through_;
+  }
+
+  // Emit a branch to one of the true or false targets, and bind the
+  // other target.  Because this binds the fall-through target, it
+  // should be emitted in tail position (as the last thing when
+  // compiling an expression).
+  void Split(Condition cc) {
+    ASSERT(!is_used_);
+    if (true_is_fall_through_) {
+      false_target_->Branch(NegateCondition(cc));
+      true_target_->Bind();
+    } else {
+      true_target_->Branch(cc);
+      false_target_->Bind();
+    }
+    is_used_ = true;
+  }
+
+  // Emit an unconditional jump in tail position, to the true target
+  // (if the argument is true) or the false target.  The "jump" will
+  // actually bind the jump target if it is forward, jump to it if it
+  // is backward.
+  void Goto(bool where) {
+    ASSERT(!is_used_);
+    JumpTarget* target = where ? true_target_ : false_target_;
+    if (target->is_bound()) {
+      target->Jump();
+    } else {
+      target->Bind();
+    }
+    is_used_ = true;
+    true_is_fall_through_ = where;
+  }
+
+  // Mark this jump target as used as if Goto had been called, but
+  // without generating a jump or binding a label (the control effect
+  // should have already happened).  This is used when the left
+  // subexpression of the short-circuit boolean operators are
+  // compiled.
+  void Use(bool where) {
+    ASSERT(!is_used_);
+    ASSERT((where ? true_target_ : false_target_)->is_bound());
+    is_used_ = true;
+    true_is_fall_through_ = where;
+  }
+
+  // Swap the true and false targets but keep the same actual label as
+  // the fall through.  This is used when compiling negated
+  // expressions, where we want to swap the targets but preserve the
+  // state.
+  void Invert() {
+    JumpTarget* temp_target = true_target_;
+    true_target_ = false_target_;
+    false_target_ = temp_target;
+
+    true_is_fall_through_ = !true_is_fall_through_;
+  }
+
+ private:
+  // True and false jump targets.
+  JumpTarget* true_target_;
+  JumpTarget* false_target_;
+
+  // Before using the destination: true if the true target is the
+  // preferred fall through, false if the false target is.  After
+  // using the destination: true if the true target was actually used
+  // as the fall through, false if the false target was.
+  bool true_is_fall_through_;
+
+  // True if the Split or Goto functions have been called.
+  bool is_used_;
+};
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the jump target pair).  It is threaded through
+// the call stack.  Constructing a state implicitly pushes it on the owning
+// code generator's stack of states, and destroying one implicitly pops it.
+//
+// The code generator state is only used for expressions, so statements have
+// the initial state.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+  // Create an initial code generator state.  Destroying the initial state
+  // leaves the code generator with a NULL state.
+  explicit CodeGenState(CodeGenerator* owner);
+
+  // Create a code generator state based on a code generator's current
+  // state.  The new state may or may not be inside a typeof, and has its
+  // own control destination.
+  CodeGenState(CodeGenerator* owner,
+               TypeofState typeof_state,
+               ControlDestination* destination);
+
+  // Destroy a code generator state and restore the owning code generator's
+  // previous state.
+  ~CodeGenState();
+
+  // Accessors for the state.
+  TypeofState typeof_state() const { return typeof_state_; }
+  ControlDestination* destination() const { return destination_; }
+
+ private:
+  // The owning code generator.
+  CodeGenerator* owner_;
+
+  // A flag indicating whether we are compiling the immediate subexpression
+  // of a typeof expression.
+  TypeofState typeof_state_;
+
+  // A control destination in case the expression has a control-flow
+  // effect.
+  ControlDestination* destination_;
+
+  // The previous state of the owning code generator, restored when
+  // this state is destroyed.
+  CodeGenState* previous_;
+};
+
+
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public AstVisitor {
+ public:
+  // Takes a function literal, generates code for it. This function should only
+  // be called by compiler.cc.
+  static Handle<Code> MakeCode(FunctionLiteral* fun,
+                               Handle<Script> script,
+                               bool is_eval);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  static bool ShouldGenerateLog(Expression* type);
+#endif
+
+  static void SetFunctionInfo(Handle<JSFunction> fun,
+                              int length,
+                              int function_token_position,
+                              int start_position,
+                              int end_position,
+                              bool is_expression,
+                              bool is_toplevel,
+                              Handle<Script> script,
+                              Handle<String> inferred_name);
+
+  // Accessors
+  MacroAssembler* masm() { return masm_; }
+
+  VirtualFrame* frame() const { return frame_; }
+
+  bool has_valid_frame() const { return frame_ != NULL; }
+
+  // Set the virtual frame to be new_frame, with non-frame register
+  // reference counts given by non_frame_registers.  The non-frame
+  // register reference counts of the old frame are returned in
+  // non_frame_registers.
+  void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+  void DeleteFrame();
+
+  RegisterAllocator* allocator() const { return allocator_; }
+
+  CodeGenState* state() { return state_; }
+  void set_state(CodeGenState* state) { state_ = state; }
+
+  void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+  bool in_spilled_code() const { return in_spilled_code_; }
+  void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+
+ private:
+  // Construction/Destruction
+  CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
+  virtual ~CodeGenerator() { delete masm_; }
+
+  // Accessors
+  Scope* scope() const { return scope_; }
+
+  // Clearing and generating deferred code.
+  void ClearDeferred();
+  void ProcessDeferred();
+
+  bool is_eval() { return is_eval_; }
+
+  // State
+  TypeofState typeof_state() const { return state_->typeof_state(); }
+  ControlDestination* destination() const { return state_->destination(); }
+
+  // Track loop nesting level.
+  int loop_nesting() const { return loop_nesting_; }
+  void IncrementLoopNesting() { loop_nesting_++; }
+  void DecrementLoopNesting() { loop_nesting_--; }
+
+
+  // Node visitors.
+  void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DEF_VISIT(type) \
+  void Visit##type(type* node);
+  NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+  // Visit a statement and then spill the virtual frame if control flow can
+  // reach the end of the statement (ie, it does not exit via break,
+  // continue, return, or throw).  This function is used temporarily while
+  // the code generator is being transformed.
+  void VisitAndSpill(Statement* statement);
+
+  // Visit a list of statements and then spill the virtual frame if control
+  // flow can reach the end of the list.
+  void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+
+  // Main code generation function
+  void GenCode(FunctionLiteral* fun);
+
+  // Generate the return sequence code.  Should be called no more than
+  // once per compiled function, immediately after binding the return
+  // target (which can not be done more than once).
+  void GenerateReturnSequence(Result* return_value);
+
+  // The following are used by class Reference.
+  void LoadReference(Reference* ref);
+  void UnloadReference(Reference* ref);
+
+  Operand ContextOperand(Register context, int index) const {
+    return Operand(context, Context::SlotOffset(index));
+  }
+
+  Operand SlotOperand(Slot* slot, Register tmp);
+
+  Operand ContextSlotOperandCheckExtensions(Slot* slot,
+                                            Result tmp,
+                                            JumpTarget* slow);
+
+  // Expressions
+  Operand GlobalObject() const {
+    return ContextOperand(rsi, Context::GLOBAL_INDEX);
+  }
+
+  void LoadCondition(Expression* x,
+                     TypeofState typeof_state,
+                     ControlDestination* destination,
+                     bool force_control);
+  void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+  void LoadGlobal();
+  void LoadGlobalReceiver();
+
+  // Generate code to push the value of an expression on top of the frame
+  // and then spill the frame fully to memory.  This function is used
+  // temporarily while the code generator is being transformed.
+  void LoadAndSpill(Expression* expression,
+                    TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+
+  // Read a value from a slot and leave it on top of the expression stack.
+  void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+  Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
+                                           TypeofState typeof_state,
+                                           JumpTarget* slow);
+
+  // Store the value on top of the expression stack into a slot, leaving the
+  // value in place.
+  void StoreToSlot(Slot* slot, InitState init_state);
+
+  // Special code for typeof expressions: Unfortunately, we must
+  // be careful when loading the expression in 'typeof'
+  // expressions. We are not allowed to throw reference errors for
+  // non-existing properties of the global object, so we must make it
+  // look like an explicit property access, instead of an access
+  // through the context chain.
+  void LoadTypeofExpression(Expression* x);
+
+  // Translate the value on top of the frame into control flow to the
+  // control destination.
+  void ToBoolean(ControlDestination* destination);
+
+  void GenericBinaryOperation(
+      Token::Value op,
+      SmiAnalysis* type,
+      OverwriteMode overwrite_mode);
+
+  // If possible, combine two constant smi values using op to produce
+  // a smi result, and push it on the virtual frame, all at compile time.
+  // Returns true if it succeeds.  Otherwise it has no effect.
+  bool FoldConstantSmis(Token::Value op, int left, int right);
+
+  // Emit code to perform a binary operation on a constant
+  // smi and a likely smi.  Consumes the Result *operand.
+  void ConstantSmiBinaryOperation(Token::Value op,
+                                  Result* operand,
+                                  Handle<Object> constant_operand,
+                                  SmiAnalysis* type,
+                                  bool reversed,
+                                  OverwriteMode overwrite_mode);
+
+  // Emit code to perform a binary operation on two likely smis.
+  // The code to handle smi arguments is produced inline.
+  // Consumes the Results *left and *right.
+  void LikelySmiBinaryOperation(Token::Value op,
+                                Result* left,
+                                Result* right,
+                                OverwriteMode overwrite_mode);
+
+  void Comparison(Condition cc,
+                  bool strict,
+                  ControlDestination* destination);
+
+  // To prevent long attacker-controlled byte sequences, integer constants
+  // from the JavaScript source are loaded in two parts if they are larger
+  // than 16 bits.
+  static const int kMaxSmiInlinedBits = 16;
+  bool IsUnsafeSmi(Handle<Object> value);
+  // Load an integer constant x into a register target using
+  // at most 16 bits of user-controlled data per assembly operation.
+  void LoadUnsafeSmi(Register target, Handle<Object> value);
+
+  void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+
+  void CheckStack();
+
+  bool CheckForInlineRuntimeCall(CallRuntime* node);
+  Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
+  void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+  Handle<Code> ComputeCallInitialize(int argc);
+  Handle<Code> ComputeCallInitializeInLoop(int argc);
+
+  // Declare global variables and functions in the given array of
+  // name/value pairs.
+  void DeclareGlobals(Handle<FixedArray> pairs);
+
+  // Instantiate the function boilerplate.
+  void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+
+  // Support for type checks.
+  void GenerateIsSmi(ZoneList<Expression*>* args);
+  void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+  void GenerateIsArray(ZoneList<Expression*>* args);
+
+  // Support for arguments.length and arguments[?].
+  void GenerateArgumentsLength(ZoneList<Expression*>* args);
+  void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+  // Support for accessing the value field of an object (used by Date).
+  void GenerateValueOf(ZoneList<Expression*>* args);
+  void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+  // Fast support for charCodeAt(n).
+  void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+  // Fast support for object equality testing.
+  void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+  void GenerateLog(ZoneList<Expression*>* args);
+
+
+  // Methods and constants for fast case switch statement support.
+  //
+  // Only allow fast-case switch if the range of labels is at most
+  // this factor times the number of case labels.
+  // Value is derived from comparing the size of code generated by the normal
+  // switch code for Smi-labels to the size of a single pointer. If code
+  // quality increases this number should be decreased to match.
+  static const int kFastSwitchMaxOverheadFactor = 5;
+
+  // Minimal number of switch cases required before we allow jump-table
+  // optimization.
+  static const int kFastSwitchMinCaseCount = 5;
+
+  // The limit of the range of a fast-case switch, as a factor of the number
+  // of cases of the switch. Each platform should return a value that
+  // is optimal compared to the default code generated for a switch statement
+  // on that platform.
+  int FastCaseSwitchMaxOverheadFactor();
+
+  // The minimal number of cases in a switch before the fast-case switch
+  // optimization is enabled. Each platform should return a value that
+  // is optimal compared to the default code generated for a switch statement
+  // on that platform.
+  int FastCaseSwitchMinCaseCount();
+
+  // Allocate a jump table and create code to jump through it.
+  // Should call GenerateFastCaseSwitchCases to generate the code for
+  // all the cases at the appropriate point.
+  void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
+                                       int min_index,
+                                       int range,
+                                       Label* fail_label,
+                                       Vector<Label*> case_targets,
+                                       Vector<Label> case_labels);
+
+  // Generate the code for cases for the fast case switch.
+  // Called by GenerateFastCaseSwitchJumpTable.
+  void GenerateFastCaseSwitchCases(SwitchStatement* node,
+                                   Vector<Label> case_labels,
+                                   VirtualFrame* start_frame);
+
+  // Fast support for constant-Smi switches.
+  void GenerateFastCaseSwitchStatement(SwitchStatement* node,
+                                       int min_index,
+                                       int range,
+                                       int default_index);
+
+  // Fast support for constant-Smi switches. Tests whether switch statement
+  // permits optimization and calls GenerateFastCaseSwitch if it does.
+  // Returns true if the fast-case switch was generated, and false if not.
+  bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
+
+  // Methods used to indicate which source code is generated for. Source
+  // positions are collected by the assembler and emitted with the relocation
+  // information.
+  void CodeForFunctionPosition(FunctionLiteral* fun);
+  void CodeForReturnPosition(FunctionLiteral* fun);
+  void CodeForStatementPosition(Node* node);
+  void CodeForSourcePosition(int pos);
+
+#ifdef DEBUG
+  // True if the registers are valid for entry to a block.  There should be
+  // no frame-external references to eax, ebx, ecx, edx, or edi.
+  bool HasValidEntryRegisters();
+#endif
+
+  bool is_eval_;  // Tells whether code is generated for eval.
+  Handle<Script> script_;
+  List<DeferredCode*> deferred_;
+
+  // Assembler
+  MacroAssembler* masm_;  // to generate code
+
+  // Code generation state
+  Scope* scope_;
+  VirtualFrame* frame_;
+  RegisterAllocator* allocator_;
+  CodeGenState* state_;
+  int loop_nesting_;
+
+  // Jump targets.
+  // The target of the return from the function.
+  BreakTarget function_return_;
+
+  // True if the function return is shadowed (ie, jumping to the target
+  // function_return_ does not jump to the true function return, but rather
+  // to some unlinking code).
+  bool function_return_is_shadowed_;
+
+  // True when we are in code that expects the virtual frame to be fully
+  // spilled.  Some virtual frame function are disabled in DEBUG builds when
+  // called from spilled code, because they do not leave the virtual frame
+  // in a spilled state.
+  bool in_spilled_code_;
+
+  friend class VirtualFrame;
+  friend class JumpTarget;
+  friend class Reference;
+  friend class Result;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_CODEGEN_X64_H_
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 209aa2d..f4468f6 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -25,3 +25,97 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifndef V8_X64_FRAMES_X64_H_
+#define V8_X64_FRAMES_X64_H_
+
+namespace v8 { namespace internal {
+
+// TODO(x64): This is a stub, mostly just a copy of the ia32 bit version.
+// This will all need to change to be correct for x64.
+
+static const int kNumRegs = 8;
+static const RegList kJSCallerSaved = 0;
+static const int kNumJSCallerSaved = 5;
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+class StackHandlerConstants : public AllStatic {
+ public:
+  static const int kNextOffset  = -1 * kPointerSize;
+  static const int kPPOffset    = -1 * kPointerSize;
+  static const int kFPOffset    = -1 * kPointerSize;
+
+  static const int kCodeOffset  = -1 * kPointerSize;
+
+  static const int kStateOffset = -1 * kPointerSize;
+  static const int kPCOffset    = -1 * kPointerSize;
+
+  static const int kAddressDisplacement = -1 * kPointerSize;
+  static const int kSize = kPCOffset + kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset      = -1 * kPointerSize;
+
+  static const int kFunctionArgOffset   = -1 * kPointerSize;
+  static const int kReceiverArgOffset   = -1 * kPointerSize;
+  static const int kArgcOffset          = -1 * kPointerSize;
+  static const int kArgvOffset          = -1 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+  static const int kDebugMarkOffset = -1 * kPointerSize;
+  static const int kSPOffset        = -1 * kPointerSize;
+
+  static const int kPPDisplacement = -1 * kPointerSize;
+
+  static const int kCallerFPOffset = -1 * kPointerSize;
+  static const int kCallerPCOffset = -1 * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+  static const int kExpressionsOffset = -1 * kPointerSize;
+  static const int kMarkerOffset      = -1 * kPointerSize;
+  static const int kContextOffset     = -1 * kPointerSize;
+  static const int kCallerFPOffset    = -1 * kPointerSize;
+  static const int kCallerPCOffset    = -1 * kPointerSize;
+  static const int kCallerSPOffset    = -1 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+  static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+  static const int kSavedRegistersOffset = -1 * kPointerSize;
+  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+  static const int kParam0Offset   = -1 * kPointerSize;
+  static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+  const int offset = JavaScriptFrameConstants::kFunctionOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_FRAMES_X64_H_
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 209aa2d..159d0c4 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1,4 +1,4 @@
-// Copyright 2009 the V8 project authors. All rights reserved.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -25,3 +25,347 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
+#define V8_X64_MACRO_ASSEMBLER_X64_H_
+
+#include "assembler.h"
+
+namespace v8 { namespace internal {
+
+// Forward declaration.
+class JumpTarget;
+
+
+// Helper types to make flags easier to read at call sites.
+enum InvokeFlag {
+  CALL_FUNCTION,
+  JUMP_FUNCTION
+};
+
+enum CodeLocation {
+  IN_JAVASCRIPT,
+  IN_JS_ENTRY,
+  IN_C_ENTRY
+};
+
+enum HandlerType {
+  TRY_CATCH_HANDLER,
+  TRY_FINALLY_HANDLER,
+  JS_ENTRY_HANDLER
+};
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+  MacroAssembler(void* buffer, int size);
+
+  // ---------------------------------------------------------------------------
+  // GC Support
+
+  // Set the remembered set bit for [object+offset].
+  // object is the object being stored into, value is the object being stored.
+  // If offset is zero, then the scratch register contains the array index into
+  // the elements array represented as a Smi.
+  // All registers are clobbered by the operation.
+  void RecordWrite(Register object,
+                   int offset,
+                   Register value,
+                   Register scratch);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // ---------------------------------------------------------------------------
+  // Debugger Support
+
+  void SaveRegistersToMemory(RegList regs);
+  void RestoreRegistersFromMemory(RegList regs);
+  void PushRegistersFromMemory(RegList regs);
+  void PopRegistersToMemory(RegList regs);
+  void CopyRegistersFromStackToMemory(Register base,
+                                      Register scratch,
+                                      RegList regs);
+#endif
+
+  // ---------------------------------------------------------------------------
+  // Activation frames
+
+  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
+  // Enter specific kind of exit frame; either EXIT or
+  // EXIT_DEBUG. Expects the number of arguments in register eax and
+  // sets up the number of arguments in register edi and the pointer
+  // to the first argument in register esi.
+  void EnterExitFrame(StackFrame::Type type);
+
+  // Leave the current exit frame. Expects the return value in
+  // register eax:edx (untouched) and the pointer to the first
+  // argument in register esi.
+  void LeaveExitFrame(StackFrame::Type type);
+
+
+  // ---------------------------------------------------------------------------
+  // JavaScript invokes
+
+  // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(const Operand& code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag);
+
+  void InvokeCode(Handle<Code> code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  RelocInfo::Mode rmode,
+                  InvokeFlag flag);
+
+  // Invoke the JavaScript function in the given register. Changes the
+  // current context to the context in the function before invoking.
+  void InvokeFunction(Register function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag);
+
+  // Invoke specified builtin JavaScript function. Adds an entry to
+  // the unresolved list if the name does not resolve.
+  void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
+
+  // Store the code object for the given builtin in the target register.
+  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+  // Expression support
+  void Set(Register dst, const Immediate& x);
+  void Set(const Operand& dst, const Immediate& x);
+
+  // Compare object type for heap object.
+  // Incoming register is heap_object and outgoing register is map.
+  void CmpObjectType(Register heap_object, InstanceType type, Register map);
+
+  // Compare instance type for map.
+  void CmpInstanceType(Register map, InstanceType type);
+
+  // FCmp is similar to integer cmp, but requires unsigned
+  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
+  void FCmp();
+
+  // ---------------------------------------------------------------------------
+  // Exception handling
+
+  // Push a new try handler and link into try handler chain.
+  // The return address must be pushed before calling this helper.
+  // On exit, eax contains TOS (next_sp).
+  void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+
+  // ---------------------------------------------------------------------------
+  // Inline caching support
+
+  // Generates code that verifies that the maps of objects in the
+  // prototype chain of object hasn't changed since the code was
+  // generated and branches to the miss label if any map has. If
+  // necessary the function also generates code for security check
+  // in case of global object holders. The scratch and holder
+  // registers are always clobbered, but the object register is only
+  // clobbered if it the same as the holder register. The function
+  // returns a register containing the holder - either object_reg or
+  // holder_reg.
+  Register CheckMaps(JSObject* object, Register object_reg,
+                     JSObject* holder, Register holder_reg,
+                     Register scratch, Label* miss);
+
+  // Generate code for checking access rights - used for security checks
+  // on access to global objects across environments. The holder register
+  // is left untouched, but the scratch register is clobbered.
+  void CheckAccessGlobalProxy(Register holder_reg,
+                              Register scratch,
+                              Label* miss);
+
+
+  // ---------------------------------------------------------------------------
+  // Support functions.
+
+  // Check if result is zero and op is negative.
+  void NegativeZeroTest(Register result, Register op, Label* then_label);
+
+  // Check if result is zero and op is negative in code using jump targets.
+  void NegativeZeroTest(CodeGenerator* cgen,
+                        Register result,
+                        Register op,
+                        JumpTarget* then_target);
+
+  // Check if result is zero and any of op1 and op2 are negative.
+  // Register scratch is destroyed, and it must be different from op2.
+  void NegativeZeroTest(Register result, Register op1, Register op2,
+                        Register scratch, Label* then_label);
+
+  // Try to get function prototype of a function and puts the value in
+  // the result register. Checks that the function really is a
+  // function and jumps to the miss label if the fast checks fail. The
+  // function register will be untouched; the other registers may be
+  // clobbered.
+  void TryGetFunctionPrototype(Register function,
+                               Register result,
+                               Register scratch,
+                               Label* miss);
+
+  // Generates code for reporting that an illegal operation has
+  // occurred.
+  void IllegalOperation(int num_arguments);
+
+  // ---------------------------------------------------------------------------
+  // Runtime calls
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub);
+
+  // Return from a code stub after popping its arguments.
+  void StubReturn(int argc);
+
+  // Call a runtime routine.
+  // Eventually this should be used for all C calls.
+  void CallRuntime(Runtime::Function* f, int num_arguments);
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId id, int num_arguments);
+
+  // Tail call of a runtime routine (jump).
+  // Like JumpToBuiltin, but also takes care of passing the number
+  // of arguments.
+  void TailCallRuntime(const ExternalReference& ext, int num_arguments);
+
+  // Jump to the builtin routine.
+  void JumpToBuiltin(const ExternalReference& ext);
+
+
+  // ---------------------------------------------------------------------------
+  // Utilities
+
+  void Ret();
+
+  struct Unresolved {
+    int pc;
+    uint32_t flags;  // see Bootstrapper::FixupFlags decoders/encoders.
+    const char* name;
+  };
+  List<Unresolved>* unresolved() { return &unresolved_; }
+
+  Handle<Object> CodeObject() { return code_object_; }
+
+
+  // ---------------------------------------------------------------------------
+  // StatsCounter support
+
+  void SetCounter(StatsCounter* counter, int value);
+  void IncrementCounter(StatsCounter* counter, int value);
+  void DecrementCounter(StatsCounter* counter, int value);
+
+
+  // ---------------------------------------------------------------------------
+  // Debugging
+
+  // Calls Abort(msg) if the condition cc is not satisfied.
+  // Use --debug_code to enable.
+  void Assert(Condition cc, const char* msg);
+
+  // Like Assert(), but always enabled.
+  void Check(Condition cc, const char* msg);
+
+  // Print a message to stdout and abort execution.
+  void Abort(const char* msg);
+
+  // Verify restrictions about code generated in stubs.
+  void set_generating_stub(bool value) { generating_stub_ = value; }
+  bool generating_stub() { return generating_stub_; }
+  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+  bool allow_stub_calls() { return allow_stub_calls_; }
+
+ private:
+  List<Unresolved> unresolved_;
+  bool generating_stub_;
+  bool allow_stub_calls_;
+  Handle<Object> code_object_;  // This handle will be patched with the code
+                                // code object on installation.
+
+  // Helper functions for generating invokes.
+  void InvokePrologue(const ParameterCount& expected,
+                      const ParameterCount& actual,
+                      Handle<Code> code_constant,
+                      const Operand& code_operand,
+                      Label* done,
+                      InvokeFlag flag);
+
+  // Get the code for the given builtin. Returns if able to resolve
+  // the function in the 'resolved' flag.
+  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. Is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion.
+class CodePatcher {
+ public:
+  CodePatcher(byte* address, int size);
+  virtual ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+ private:
+  byte* address_;  // The address of the code being patched.
+  int size_;  // Number of bytes of the expected patch size.
+  MacroAssembler masm_;  // Macro assembler used to generate the code.
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate an Operand for loading a field from an object.
+static inline Operand FieldOperand(Register object, int offset) {
+  return Operand(object, offset - kHeapObjectTag);
+}
+
+
+// Generate an Operand for loading an indexed field from an object.
+static inline Operand FieldOperand(Register object,
+                                   Register index,
+                                   ScaleFactor scale,
+                                   int offset) {
+  return Operand(object, index, scale, offset - kHeapObjectTag);
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+extern void LogGeneratedCodeCoverage(const char* file_line);
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) {                                               \
+    byte* x64_coverage_function =                                        \
+        reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
+    masm->pushfd();                                                       \
+    masm->pushad();                                                       \
+    masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));         \
+    masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY);         \
+    masm->pop(rax);                                                       \
+    masm->popad();                                                        \
+    masm->popfd();                                                        \
+  }                                                                       \
+  masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_MACRO_ASSEMBLER_X64_H_
diff --git a/src/x64/simulator-x64.h b/src/x64/simulator-x64.h
index 209aa2d..8160e53 100644
--- a/src/x64/simulator-x64.h
+++ b/src/x64/simulator-x64.h
@@ -25,3 +25,23 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifndef V8_X64_SIMULATOR_X64_H_
+#define V8_X64_SIMULATOR_X64_H_
+
+
+// Since there is no simulator for the ia32 architecture the only thing we can
+// do is to call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  entry(p0, p1, p2, p3, p4);
+
+// Calculated the stack limit beyond which we will throw stack overflow errors.
+// This macro must be called from a C++ method. It relies on being able to take
+// the address of "this" to get a value on the current execution stack and then
+// calculates the stack limit based on that value.
+// NOTE: The check for overflow is not safe as there is no guarantee that the
+// running thread has its stack in all memory up to address 0x00000000.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+  (reinterpret_cast<uintptr_t>(this) >= limit ? \
+      reinterpret_cast<uintptr_t>(this) - limit : 0)
+
+#endif  // V8_X64_SIMULATOR_X64_H_
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 209aa2d..f71766d 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -25,3 +25,469 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#ifndef V8_X64_VIRTUAL_FRAME_X64_H_
+#define V8_X64_VIRTUAL_FRAME_X64_H_
+
+#include "register-allocator.h"
+
+namespace v8 { namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame.  It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack.  It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public Malloced {
+ public:
+  // A utility class to introduce a scope where the virtual frame is
+  // expected to remain spilled.  The constructor spills the code
+  // generator's current frame, but no attempt is made to require it
+  // to stay spilled.  It is intended as documentation while the code
+  // generator is being transformed.
+  class SpilledScope BASE_EMBEDDED {
+   public:
+    explicit SpilledScope(CodeGenerator* cgen);
+
+    ~SpilledScope();
+
+   private:
+    CodeGenerator* cgen_;
+    bool previous_state_;
+  };
+
+  // An illegal index into the virtual frame.
+  static const int kIllegalIndex = -1;
+
+  // Construct an initial virtual frame on entry to a JS function.
+  explicit VirtualFrame(CodeGenerator* cgen);
+
+  // Construct a virtual frame as a clone of an existing one.
+  explicit VirtualFrame(VirtualFrame* original);
+
+  // Create a duplicate of an existing valid frame element.
+  FrameElement CopyElementAt(int index);
+
+  // The height of the virtual expression stack.
+  int height() const {
+    return elements_.length() - expression_base_index();
+  }
+
+  int register_index(Register reg) {
+    return register_locations_[reg.code()];
+  }
+
+  bool is_used(int reg_code) {
+    return register_locations_[reg_code] != kIllegalIndex;
+  }
+
+  bool is_used(Register reg) {
+    return is_used(reg.code());
+  }
+
+  // Add extra in-memory elements to the top of the frame to match an actual
+  // frame (eg, the frame after an exception handler is pushed).  No code is
+  // emitted.
+  void Adjust(int count);
+
+  // Forget count elements from the top of the frame all in-memory
+  // (including synced) and adjust the stack pointer downward, to
+  // match an external frame effect (examples include a call removing
+  // its arguments, and exiting a try/catch removing an exception
+  // handler).  No code will be emitted.
+  void Forget(int count);
+
+  // Forget count elements from the top of the frame without adjusting
+  // the stack pointer downward.  This is used, for example, before
+  // merging frames at break, continue, and return targets.
+  void ForgetElements(int count);
+
+  // Spill all values from the frame to memory.
+  void SpillAll();
+
+  // Spill all occurrences of a specific register from the frame.
+  void Spill(Register reg);
+
+  // Spill all occurrences of an arbitrary register if possible.  Return the
+  // register spilled or no_reg if it was not possible to free any register
+  // (ie, they all have frame-external references).
+  Register SpillAnyRegister();
+
+  // Prepare this virtual frame for merging to an expected frame by
+  // performing some state changes that do not require generating
+  // code.  It is guaranteed that no code will be generated.
+  void PrepareMergeTo(VirtualFrame* expected);
+
+  // Make this virtual frame have a state identical to an expected virtual
+  // frame.  As a side effect, code may be emitted to make this frame match
+  // the expected one.
+  void MergeTo(VirtualFrame* expected);
+
+  // Detach a frame from its code generator, perhaps temporarily.  This
+  // tells the register allocator that it is free to use frame-internal
+  // registers.  Used when the code generator's frame is switched from this
+  // one to NULL by an unconditional jump.
+  void DetachFromCodeGenerator();
+
+  // (Re)attach a frame to its code generator.  This informs the register
+  // allocator that the frame-internal register references are active again.
+  // Used when a code generator's frame is switched from NULL to this one by
+  // binding a label.
+  void AttachToCodeGenerator();
+
+  // Emit code for the physical JS entry and exit frame sequences.  After
+  // calling Enter, the virtual frame is ready for use; and after calling
+  // Exit it should not be used.  Note that Enter does not allocate space in
+  // the physical frame for storing frame-allocated locals.
+  void Enter();
+  void Exit();
+
+  // Prepare for returning from the frame by spilling locals.  This
+  // avoids generating unnecessary merge code when jumping to the
+  // shared return site.  Emits code for spills.
+  void PrepareForReturn();
+
+  // Allocate and initialize the frame-allocated locals.
+  void AllocateStackSlots(int count);
+
+  // An element of the expression stack as an assembly operand.
+  Operand ElementAt(int index) const {
+    return Operand(rsp, index * kPointerSize);
+  }
+
+  // Random-access store to a frame-top relative frame element.  The result
+  // becomes owned by the frame and is invalidated.
+  void SetElementAt(int index, Result* value);
+
+  // Set a frame element to a constant.  The index is frame-top relative.
+  void SetElementAt(int index, Handle<Object> value) {
+    Result temp(value, cgen_);
+    SetElementAt(index, &temp);
+  }
+
+  void PushElementAt(int index) {
+    PushFrameSlotAt(elements_.length() - index - 1);
+  }
+
+  void StoreToElementAt(int index) {
+    StoreToFrameSlotAt(elements_.length() - index - 1);
+  }
+
+  // A frame-allocated local as an assembly operand.
+  Operand LocalAt(int index) const {
+    ASSERT(0 <= index);
+    ASSERT(index < local_count_);
+    return Operand(rbp, kLocal0Offset - index * kPointerSize);
+  }
+
+  // Push a copy of the value of a local frame slot on top of the frame.
+  void PushLocalAt(int index) {
+    PushFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the value of a local frame slot on top of the frame and invalidate
+  // the local slot.  The slot should be written to before trying to read
+  // from it again.
+  void TakeLocalAt(int index) {
+    TakeFrameSlotAt(local0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a local frame slot.  The
+  // value is left in place on top of the frame.
+  void StoreToLocalAt(int index) {
+    StoreToFrameSlotAt(local0_index() + index);
+  }
+
+  // Push the address of the receiver slot on the frame.
+  void PushReceiverSlotAddress();
+
+  // Push the function on top of the frame.
+  void PushFunction() { PushFrameSlotAt(function_index()); }
+
+  // Save the value of the esi register to the context frame slot.
+  void SaveContextRegister();
+
+  // Restore the esi register from the value of the context frame
+  // slot.
+  void RestoreContextRegister();
+
+  // A parameter as an assembly operand.
+  Operand ParameterAt(int index) const {
+    ASSERT(-1 <= index);  // -1 is the receiver.
+    ASSERT(index < parameter_count_);
+    return Operand(rbp, (1 + parameter_count_ - index) * kPointerSize);
+  }
+
+  // Push a copy of the value of a parameter frame slot on top of the frame.
+  void PushParameterAt(int index) {
+    PushFrameSlotAt(param0_index() + index);
+  }
+
+  // Push the value of a paramter frame slot on top of the frame and
+  // invalidate the parameter slot.  The slot should be written to before
+  // trying to read from it again.
+  void TakeParameterAt(int index) {
+    TakeFrameSlotAt(param0_index() + index);
+  }
+
+  // Store the top value on the virtual frame into a parameter frame slot.
+  // The value is left in place on top of the frame.
+  void StoreToParameterAt(int index) {
+    StoreToFrameSlotAt(param0_index() + index);
+  }
+
+  // The receiver frame slot.
+  Operand Receiver() const { return ParameterAt(-1); }
+
+  // Push a try-catch or try-finally handler on top of the virtual frame.
+  void PushTryHandler(HandlerType type);
+
+  // Call stub given the number of arguments it expects on (and
+  // removes from) the stack.
+  Result CallStub(CodeStub* stub, int arg_count);
+
+  // Call stub that takes a single argument passed in eax.  The
+  // argument is given as a result which does not have to be eax or
+  // even a register.  The argument is consumed by the call.
+  Result CallStub(CodeStub* stub, Result* arg);
+
+  // Call stub that takes a pair of arguments passed in edx (arg0) and
+  // eax (arg1).  The arguments are given as results which do not have
+  // to be in the proper registers or even in registers.  The
+  // arguments are consumed by the call.
+  Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+
+  // Call runtime given the number of arguments expected on (and
+  // removed from) the stack.
+  Result CallRuntime(Runtime::Function* f, int arg_count);
+  Result CallRuntime(Runtime::FunctionId id, int arg_count);
+
+  // Invoke builtin given the number of arguments it expects on (and
+  // removes from) the stack.
+  Result InvokeBuiltin(Builtins::JavaScript id,
+                       InvokeFlag flag,
+                       int arg_count);
+
+  // Call load IC.  Name and receiver are found on top of the frame.
+  // Receiver is not dropped.
+  Result CallLoadIC(RelocInfo::Mode mode);
+
+  // Call keyed load IC.  Key and receiver are found on top of the
+  // frame.  They are not dropped.
+  Result CallKeyedLoadIC(RelocInfo::Mode mode);
+
+  // Call store IC.  Name, value, and receiver are found on top of the
+  // frame.  Receiver is not dropped.
+  Result CallStoreIC();
+
+  // Call keyed store IC.  Value, key, and receiver are found on top
+  // of the frame.  Key and receiver are not dropped.
+  Result CallKeyedStoreIC();
+
+  // Call call IC.  Arguments, reciever, and function name are found
+  // on top of the frame.  Function name slot is not dropped.  The
+  // argument count does not include the receiver.
+  Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
+
+  // Allocate and call JS function as constructor.  Arguments,
+  // receiver (global object), and function are found on top of the
+  // frame.  Function is not dropped.  The argument count does not
+  // include the receiver.
+  Result CallConstructor(int arg_count);
+
+  // Drop a number of elements from the top of the expression stack.  May
+  // emit code to affect the physical frame.  Does not clobber any registers
+  // excepting possibly the stack pointer.
+  void Drop(int count);
+
+  // Drop one element.
+  void Drop() { Drop(1); }
+
+  // Duplicate the top element of the frame.
+  void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+
+  // Pop an element from the top of the expression stack.  Returns a
+  // Result, which may be a constant or a register.
+  Result Pop();
+
+  // Pop and save an element from the top of the expression stack and
+  // emit a corresponding pop instruction.
+  void EmitPop(Register reg);
+  void EmitPop(Operand operand);
+
+  // Push an element on top of the expression stack and emit a
+  // corresponding push instruction.
+  void EmitPush(Register reg);
+  void EmitPush(Operand operand);
+  void EmitPush(Immediate immediate);
+
+  // Push an element on the virtual frame.
+  void Push(Register reg, StaticType static_type = StaticType());
+  void Push(Handle<Object> value);
+  void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+  // Pushing a result invalidates it (its contents become owned by the
+  // frame).
+  void Push(Result* result);
+
+  // Nip removes zero or more elements from immediately below the top
+  // of the frame, leaving the previous top-of-frame value on top of
+  // the frame.  Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+  void Nip(int num_dropped);
+
+ private:
+  static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+  static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+  static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+  static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+  static const int kPreallocatedElements = 5 + 8;  // 8 expression stack slots.
+
+  CodeGenerator* cgen_;
+  MacroAssembler* masm_;
+
+  List<FrameElement> elements_;
+
+  // The number of frame-allocated locals and parameters respectively.
+  int parameter_count_;
+  int local_count_;
+
+  // The index of the element that is at the processor's stack pointer
+  // (the esp register).
+  int stack_pointer_;
+
+  // The index of the element that is at the processor's frame pointer
+  // (the ebp register).
+  int frame_pointer_;
+
+  // The index of the register frame element using each register, or
+  // kIllegalIndex if a register is not on the frame.
+  int register_locations_[kNumRegisters];
+
+  // The index of the first parameter.  The receiver lies below the first
+  // parameter.
+  int param0_index() const { return 1; }
+
+  // The index of the context slot in the frame.
+  int context_index() const {
+    ASSERT(frame_pointer_ != kIllegalIndex);
+    return frame_pointer_ + 1;
+  }
+
+  // The index of the function slot in the frame.  It lies above the context
+  // slot.
+  int function_index() const {
+    ASSERT(frame_pointer_ != kIllegalIndex);
+    return frame_pointer_ + 2;
+  }
+
+  // The index of the first local.  Between the parameters and the locals
+  // lie the return address, the saved frame pointer, the context, and the
+  // function.
+  int local0_index() const {
+    ASSERT(frame_pointer_ != kIllegalIndex);
+    return frame_pointer_ + 3;
+  }
+
+  // The index of the base of the expression stack.
+  int expression_base_index() const { return local0_index() + local_count_; }
+
+  // Convert a frame index into a frame pointer relative offset into the
+  // actual stack.
+  int fp_relative(int index) const {
+    return (frame_pointer_ - index) * kPointerSize;
+  }
+
+  // Record an occurrence of a register in the virtual frame.  This has the
+  // effect of incrementing the register's external reference count and
+  // of updating the index of the register's location in the frame.
+  void Use(Register reg, int index);
+
+  // Record that a register reference has been dropped from the frame.  This
+  // decrements the register's external reference count and invalidates the
+  // index of the register's location in the frame.
+  void Unuse(Register reg);
+
+  // Spill the element at a particular index---write it to memory if
+  // necessary, free any associated register, and forget its value if
+  // constant.
+  void SpillElementAt(int index);
+
+  // Sync the element at a particular index.  If it is a register or
+  // constant that disagrees with the value on the stack, write it to memory.
+  // Keep the element type as register or constant, and clear the dirty bit.
+  void SyncElementAt(int index);
+
+  // Sync the range of elements in [begin, end).
+  void SyncRange(int begin, int end);
+
+  // Sync a single unsynced element that lies beneath or at the stack pointer.
+  void SyncElementBelowStackPointer(int index);
+
+  // Sync a single unsynced element that lies just above the stack pointer.
+  void SyncElementByPushing(int index);
+
+  // Push a copy of a frame slot (typically a local or parameter) on top of
+  // the frame.
+  void PushFrameSlotAt(int index);
+
+  // Push a the value of a frame slot (typically a local or parameter) on
+  // top of the frame and invalidate the slot.
+  void TakeFrameSlotAt(int index);
+
+  // Store the value on top of the frame to a frame slot (typically a local
+  // or parameter).
+  void StoreToFrameSlotAt(int index);
+
+  // Spill all elements in registers. Spill the top spilled_args elements
+  // on the frame.  Sync all other frame elements.
+  // Then drop dropped_args elements from the virtual frame, to match
+  // the effect of an upcoming call that will drop them from the stack.
+  void PrepareForCall(int spilled_args, int dropped_args);
+
+  // Move frame elements currently in registers or constants, that
+  // should be in memory in the expected frame, to memory.
+  void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+  // Make the register-to-register moves necessary to
+  // merge this frame with the expected frame.
+  // Register to memory moves must already have been made,
+  // and memory to register moves must follow this call.
+  // This is because some new memory-to-register moves are
+  // created in order to break cycles of register moves.
+  // Used in the implementation of MergeTo().
+  void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+  // Make the memory-to-register and constant-to-register moves
+  // needed to make this frame equal the expected frame.
+  // Called after all register-to-memory and register-to-register
+  // moves have been made.  After this function returns, the frames
+  // should be equal.
+  void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+  // Invalidates a frame slot (puts an invalid frame element in it).
+  // Copies on the frame are correctly handled, and if this slot was
+  // the backing store of copies, the index of the new backing store
+  // is returned.  Otherwise, returns kIllegalIndex.
+  // Register counts are correctly updated.
+  int InvalidateFrameSlotAt(int index);
+
+  // Call a code stub that has already been prepared for calling (via
+  // PrepareForCall).
+  Result RawCallStub(CodeStub* stub);
+
+  // Calls a code object which has already been prepared for calling
+  // (via PrepareForCall).
+  Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+  bool Equals(VirtualFrame* other);
+
+  friend class JumpTarget;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_X64_VIRTUAL_FRAME_X64_H_
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index eb94fc5..740acba 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -47,6 +47,7 @@
     'test-heap.cc',
     'test-list.cc',
     'test-lock.cc',
+    'test-log.cc',
     'test-mark-compact.cc',
     'test-regexp.cc',
     'test-serialize.cc',
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index a59b1d4..4b55b6b 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -4627,35 +4627,42 @@
   Local<Value> value;
   CHECK(!try_catch.HasCaught());
 
-  value = Script::Compile(v8_str("obj(42)"))->Run();
+  value = CompileRun("obj(42)");
   CHECK(!try_catch.HasCaught());
   CHECK_EQ(42, value->Int32Value());
 
-  value = Script::Compile(v8_str("(function(o){return o(49)})(obj)"))->Run();
+  value = CompileRun("(function(o){return o(49)})(obj)");
   CHECK(!try_catch.HasCaught());
   CHECK_EQ(49, value->Int32Value());
 
   // test special case of call as function
-  value = Script::Compile(v8_str("[obj]['0'](45)"))->Run();
+  value = CompileRun("[obj]['0'](45)");
   CHECK(!try_catch.HasCaught());
   CHECK_EQ(45, value->Int32Value());
 
-  value = Script::Compile(v8_str("obj.call = Function.prototype.call;"
-                                 "obj.call(null, 87)"))->Run();
+  value = CompileRun("obj.call = Function.prototype.call;"
+                     "obj.call(null, 87)");
   CHECK(!try_catch.HasCaught());
   CHECK_EQ(87, value->Int32Value());
 
   // Regression tests for bug #1116356: Calling call through call/apply
   // must work for non-function receivers.
   const char* apply_99 = "Function.prototype.call.apply(obj, [this, 99])";
-  value = Script::Compile(v8_str(apply_99))->Run();
+  value = CompileRun(apply_99);
   CHECK(!try_catch.HasCaught());
   CHECK_EQ(99, value->Int32Value());
 
   const char* call_17 = "Function.prototype.call.call(obj, this, 17)";
-  value = Script::Compile(v8_str(call_17))->Run();
+  value = CompileRun(call_17);
   CHECK(!try_catch.HasCaught());
   CHECK_EQ(17, value->Int32Value());
+
+  // Check that the call-as-function handler can be called through
+  // new.  Currently, there is no way to check in the call-as-function
+  // handler if it has been called through new or not.
+  value = CompileRun("new obj(42)");
+  CHECK(!try_catch.HasCaught());
+  CHECK_EQ(42, value->Int32Value());
 }
 
 
@@ -4732,6 +4739,44 @@
 }
 
 
+static v8::Handle<Value> InterceptorHasOwnPropertyGetterGC(
+    Local<String> name,
+    const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  i::Heap::CollectAllGarbage();
+  return v8::Handle<Value>();
+}
+
+
+THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
+  v8::HandleScope scope;
+  LocalContext context;
+  Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+  Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
+  instance_templ->SetNamedPropertyHandler(InterceptorHasOwnPropertyGetterGC);
+  Local<Function> function = fun_templ->GetFunction();
+  context->Global()->Set(v8_str("constructor"), function);
+  // Let's first make some stuff so we can be sure to get a good GC.
+  CompileRun(
+      "function makestr(size) {"
+      "  switch (size) {"
+      "    case 1: return 'f';"
+      "    case 2: return 'fo';"
+      "    case 3: return 'foo';"
+      "  }"
+      "  return makestr(size >> 1) + makestr((size + 1) >> 1);"
+      "}"
+      "var x = makestr(12345);"
+      "x = makestr(31415);"
+      "x = makestr(23456);");
+  v8::Handle<Value> value = CompileRun(
+      "var o = new constructor();"
+      "o.__proto__ = new String(x);"
+      "o.hasOwnProperty('ostehaps');");
+  CHECK_EQ(false, value->BooleanValue());
+}
+
+
 static v8::Handle<Value> InterceptorLoadICGetter(Local<String> name,
                                                  const AccessorInfo& info) {
   ApiTestFuzzer::Fuzz();
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 93286eb..288efba 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -167,11 +167,22 @@
       (*env)->Global()->Get(v8::String::New(function_name)));
 }
 
+
+// Compile and run the supplied source and return the requested function.
+static v8::Local<v8::Function> CompileFunction(const char* source,
+                                               const char* function_name) {
+  v8::Script::Compile(v8::String::New(source))->Run();
+  return v8::Local<v8::Function>::Cast(
+    v8::Context::GetCurrent()->Global()->Get(v8::String::New(function_name)));
+}
+
+
 // Helper function that compiles and runs the source.
 static v8::Local<v8::Value> CompileRun(const char* source) {
   return v8::Script::Compile(v8::String::New(source))->Run();
 }
 
+
 // Is there any debug info for the function?
 static bool HasDebugInfo(v8::Handle<v8::Function> fun) {
   Handle<v8::internal::JSFunction> f = v8::Utils::OpenHandle(*fun);
@@ -4588,3 +4599,84 @@
   CHECK_EQ("new name", last_script_name_hit);
   CHECK_EQ("abc 123", last_script_data_hit);
 }
+
+
+static v8::Persistent<v8::Context> expected_context;
+static v8::Handle<v8::Value> expected_context_data;
+
+
+// Check that the expected context is the one generating the debug event.
+static void ContextCheckMessageHandler(const v8::Debug::Message& message) {
+  CHECK(message.GetEventContext() == expected_context);
+  CHECK(message.GetEventContext()->GetData()->StrictEquals(
+      expected_context_data));
+  message_handler_hit_count++;
+
+  const int kBufferSize = 1000;
+  uint16_t buffer[kBufferSize];
+  const char* command_continue =
+    "{\"seq\":0,"
+     "\"type\":\"request\","
+     "\"command\":\"continue\"}";
+
+  // Send a continue command for break events.
+  if (message.GetEvent() == v8::Break) {
+    v8::Debug::SendCommand(buffer, AsciiToUtf16(command_continue, buffer));
+  }
+}
+
+
+// Test which creates two contexts and sets different embedder data on each.
+// Checks that this data is set correctly and that when the debug message
+// handler is called the expected context is the one active.
+TEST(ContextData) {
+  v8::HandleScope scope;
+
+  v8::Debug::SetMessageHandler2(ContextCheckMessageHandler);
+
+  // Create two contexts.
+  v8::Persistent<v8::Context> context_1;
+  v8::Persistent<v8::Context> context_2;
+  v8::Handle<v8::ObjectTemplate> global_template =
+      v8::Handle<v8::ObjectTemplate>();
+  v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>();
+  context_1 = v8::Context::New(NULL, global_template, global_object);
+  context_2 = v8::Context::New(NULL, global_template, global_object);
+
+  // Default data value is undefined.
+  CHECK(context_1->GetData()->IsUndefined());
+  CHECK(context_2->GetData()->IsUndefined());
+
+  // Set and check different data values.
+  v8::Handle<v8::Value> data_1 = v8::Number::New(1);
+  v8::Handle<v8::Value> data_2 = v8::String::New("2");
+  context_1->SetData(data_1);
+  context_2->SetData(data_2);
+  CHECK(context_1->GetData()->StrictEquals(data_1));
+  CHECK(context_2->GetData()->StrictEquals(data_2));
+
+  // Simple test function which causes a break.
+  const char* source = "function f() { debugger; }";
+
+  // Enter and run function in the first context.
+  {
+    v8::Context::Scope context_scope(context_1);
+    expected_context = context_1;
+    expected_context_data = data_1;
+    v8::Local<v8::Function> f = CompileFunction(source, "f");
+    f->Call(context_1->Global(), 0, NULL);
+  }
+
+
+  // Enter and run function in the second context.
+  {
+    v8::Context::Scope context_scope(context_2);
+    expected_context = context_2;
+    expected_context_data = data_2;
+    v8::Local<v8::Function> f = CompileFunction(source, "f");
+    f->Call(context_2->Global(), 0, NULL);
+  }
+
+  // Two times compile event and two times break event.
+  CHECK_GT(message_handler_hit_count, 4);
+}
diff --git a/test/cctest/test-log-ia32.cc b/test/cctest/test-log-ia32.cc
index dde8512..43cb294 100644
--- a/test/cctest/test-log-ia32.cc
+++ b/test/cctest/test-log-ia32.cc
@@ -8,9 +8,11 @@
 
 #include "v8.h"
 
+#include "codegen.h"
 #include "log.h"
 #include "top.h"
 #include "cctest.h"
+#include "disassembler.h"
 
 using v8::Function;
 using v8::Local;
@@ -20,12 +22,15 @@
 using v8::Value;
 
 using v8::internal::byte;
+using v8::internal::Address;
 using v8::internal::Handle;
 using v8::internal::JSFunction;
 using v8::internal::StackTracer;
 using v8::internal::TickSample;
 using v8::internal::Top;
 
+namespace i = v8::internal;
+
 
 static v8::Persistent<v8::Context> env;
 
@@ -42,8 +47,8 @@
 }
 
 
-static void DoTrace(unsigned int fp) {
-  trace_env.sample->fp = fp;
+static void DoTrace(Address fp) {
+  trace_env.sample->fp = reinterpret_cast<uintptr_t>(fp);
   // sp is only used to define stack high bound
   trace_env.sample->sp =
       reinterpret_cast<unsigned int>(trace_env.sample) - 10240;
@@ -53,7 +58,7 @@
 
 // Hide c_entry_fp to emulate situation when sampling is done while
 // pure JS code is being executed
-static void DoTraceHideCEntryFPAddress(unsigned int fp) {
+static void DoTraceHideCEntryFPAddress(Address fp) {
   v8::internal::Address saved_c_frame_fp = *(Top::c_entry_fp_address());
   CHECK(saved_c_frame_fp);
   *(Top::c_entry_fp_address()) = 0;
@@ -62,6 +67,28 @@
 }
 
 
+static void CheckRetAddrIsInFunction(const char* func_name,
+                                     Address ret_addr,
+                                     Address func_start_addr,
+                                     unsigned int func_len) {
+  printf("CheckRetAddrIsInFunction \"%s\": %p %p %p\n",
+         func_name, func_start_addr, ret_addr, func_start_addr + func_len);
+  CHECK_GE(ret_addr, func_start_addr);
+  CHECK_GE(func_start_addr + func_len, ret_addr);
+}
+
+
+static void CheckRetAddrIsInJSFunction(const char* func_name,
+                                       Address ret_addr,
+                                       Handle<JSFunction> func) {
+  v8::internal::Code* func_code = func->code();
+  CheckRetAddrIsInFunction(
+      func_name, ret_addr,
+      func_code->instruction_start(),
+      func_code->ExecutableSize());
+}
+
+
 // --- T r a c e   E x t e n s i o n ---
 
 class TraceExtension : public v8::Extension {
@@ -72,7 +99,7 @@
   static v8::Handle<v8::Value> Trace(const v8::Arguments& args);
   static v8::Handle<v8::Value> JSTrace(const v8::Arguments& args);
  private:
-  static unsigned int GetFP(const v8::Arguments& args);
+  static Address GetFP(const v8::Arguments& args);
   static const char* kSource;
 };
 
@@ -95,10 +122,10 @@
 }
 
 
-unsigned int TraceExtension::GetFP(const v8::Arguments& args) {
+Address TraceExtension::GetFP(const v8::Arguments& args) {
   CHECK_EQ(1, args.Length());
-  unsigned int fp = args[0]->Int32Value() << 2;
-  printf("Trace: %08x\n", fp);
+  Address fp = reinterpret_cast<Address>(args[0]->Int32Value() << 2);
+  printf("Trace: %p\n", fp);
   return fp;
 }
 
@@ -119,10 +146,162 @@
 v8::DeclareExtension kTraceExtensionDeclaration(&kTraceExtension);
 
 
+static void InitializeVM() {
+  if (env.IsEmpty()) {
+    v8::HandleScope scope;
+    const char* extensions[] = { "v8/trace" };
+    v8::ExtensionConfiguration config(1, extensions);
+    env = v8::Context::New(&config);
+  }
+  v8::HandleScope scope;
+  env->Enter();
+}
+
+
+static Handle<JSFunction> CompileFunction(const char* source) {
+  return v8::Utils::OpenHandle(*Script::Compile(String::New(source)));
+}
+
+
+static void CompileRun(const char* source) {
+  Script::Compile(String::New(source))->Run();
+}
+
+
+static Local<Value> GetGlobalProperty(const char* name) {
+  return env->Global()->Get(String::New(name));
+}
+
+
+static Handle<JSFunction> GetGlobalJSFunction(const char* name) {
+  Handle<JSFunction> js_func(JSFunction::cast(
+                                 *(v8::Utils::OpenHandle(
+                                       *GetGlobalProperty(name)))));
+  return js_func;
+}
+
+
+static void CheckRetAddrIsInJSFunction(const char* func_name,
+                                       Address ret_addr) {
+  CheckRetAddrIsInJSFunction(func_name, ret_addr,
+                             GetGlobalJSFunction(func_name));
+}
+
+
+static void SetGlobalProperty(const char* name, Local<Value> value) {
+  env->Global()->Set(String::New(name), value);
+}
+
+
+static Handle<v8::internal::String> NewString(const char* s) {
+  return i::Factory::NewStringFromAscii(i::CStrVector(s));
+}
+
+
+namespace v8 { namespace internal {
+
+class CodeGeneratorPatcher {
+ public:
+  CodeGeneratorPatcher() {
+    CodeGenerator::InlineRuntimeLUT genGetFramePointer =
+        {&CodeGenerator::GenerateGetFramePointer, "_GetFramePointer"};
+    // _FastCharCodeAt is not used in our tests.
+    bool result = CodeGenerator::PatchInlineRuntimeEntry(
+        NewString("_FastCharCodeAt"),
+        genGetFramePointer, &oldInlineEntry);
+    CHECK(result);
+  }
+
+  ~CodeGeneratorPatcher() {
+    CHECK(CodeGenerator::PatchInlineRuntimeEntry(
+        NewString("_GetFramePointer"),
+        oldInlineEntry, NULL));
+  }
+
+ private:
+  CodeGenerator::InlineRuntimeLUT oldInlineEntry;
+};
+
+} }  // namespace v8::internal
+
+
+// Creates a global function named 'func_name' that calls the tracing
+// function 'trace_func_name' with an actual EBP register value,
+// shifted right to be presented as Smi.
+static void CreateTraceCallerFunction(const char* func_name,
+                                      const char* trace_func_name) {
+  i::EmbeddedVector<char, 256> trace_call_buf;
+  i::OS::SNPrintF(trace_call_buf, "%s(%%_GetFramePointer());", trace_func_name);
+
+  // Compile the script.
+  i::CodeGeneratorPatcher patcher;
+  bool allow_natives_syntax = i::FLAG_allow_natives_syntax;
+  i::FLAG_allow_natives_syntax = true;
+  Handle<JSFunction> func = CompileFunction(trace_call_buf.start());
+  CHECK(!func.is_null());
+  i::FLAG_allow_natives_syntax = allow_natives_syntax;
+
+#ifdef DEBUG
+  v8::internal::Code* func_code = func->code();
+  CHECK(func_code->IsCode());
+  func_code->Print();
+#endif
+
+  SetGlobalProperty(func_name, v8::ToApi<Value>(func));
+}
+
+
+TEST(CFromJSStackTrace) {
+  TickSample sample;
+  StackTracer tracer(reinterpret_cast<uintptr_t>(&sample));
+  InitTraceEnv(&tracer, &sample);
+
+  InitializeVM();
+  v8::HandleScope scope;
+  CreateTraceCallerFunction("JSFuncDoTrace", "trace");
+  CompileRun(
+      "function JSTrace() {"
+      "         JSFuncDoTrace();"
+      "};\n"
+      "JSTrace();");
+  CHECK_GT(sample.frames_count, 1);
+  // Stack sampling will start from the first JS function, i.e. "JSFuncDoTrace"
+  CheckRetAddrIsInJSFunction("JSFuncDoTrace",
+                             sample.stack[0]);
+  CheckRetAddrIsInJSFunction("JSTrace",
+                             sample.stack[1]);
+}
+
+
+TEST(PureJSStackTrace) {
+  TickSample sample;
+  StackTracer tracer(reinterpret_cast<uintptr_t>(&sample));
+  InitTraceEnv(&tracer, &sample);
+
+  InitializeVM();
+  v8::HandleScope scope;
+  CreateTraceCallerFunction("JSFuncDoTrace", "js_trace");
+  CompileRun(
+      "function JSTrace() {"
+      "         JSFuncDoTrace();"
+      "};\n"
+      "function OuterJSTrace() {"
+      "         JSTrace();"
+      "};\n"
+      "OuterJSTrace();");
+  CHECK_GT(sample.frames_count, 1);
+  // Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
+  CheckRetAddrIsInJSFunction("JSTrace",
+                             sample.stack[0]);
+  CheckRetAddrIsInJSFunction("OuterJSTrace",
+                             sample.stack[1]);
+}
+
+
 static void CFuncDoTrace() {
-  unsigned int fp;
+  Address fp;
 #ifdef __GNUC__
-  fp = reinterpret_cast<unsigned int>(__builtin_frame_address(0));
+  fp = reinterpret_cast<Address>(__builtin_frame_address(0));
 #elif defined _MSC_VER
   __asm mov [fp], ebp  // NOLINT
 #endif
@@ -142,7 +321,7 @@
 
 TEST(PureCStackTrace) {
   TickSample sample;
-  StackTracer tracer(reinterpret_cast<unsigned int>(&sample));
+  StackTracer tracer(reinterpret_cast<uintptr_t>(&sample));
   InitTraceEnv(&tracer, &sample);
   // Check that sampler doesn't crash
   CHECK_EQ(10, CFunc(10));
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
new file mode 100644
index 0000000..6a7e54f
--- /dev/null
+++ b/test/cctest/test-log.cc
@@ -0,0 +1,117 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+//
+// Tests of logging functions from log.h
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+
+#include "v8.h"
+
+#include "log.h"
+
+#include "cctest.h"
+
+using v8::internal::Logger;
+
+static void SetUp() {
+  // Log to memory buffer.
+  v8::internal::FLAG_logfile = "*";
+  v8::internal::FLAG_log = true;
+  Logger::Setup();
+}
+
+static void TearDown() {
+  Logger::TearDown();
+}
+
+
+TEST(EmptyLog) {
+  SetUp();
+  CHECK_EQ(0, Logger::GetLogLines(0, NULL, 0));
+  CHECK_EQ(0, Logger::GetLogLines(100, NULL, 0));
+  CHECK_EQ(0, Logger::GetLogLines(0, NULL, 100));
+  CHECK_EQ(0, Logger::GetLogLines(100, NULL, 100));
+  TearDown();
+}
+
+
+TEST(GetMessages) {
+  SetUp();
+  Logger::StringEvent("aaa", "bbb");
+  Logger::StringEvent("cccc", "dddd");
+  CHECK_EQ(0, Logger::GetLogLines(0, NULL, 0));
+  char log_lines[100];
+  memset(log_lines, 0, sizeof(log_lines));
+  // Requesting data size which is smaller than first log message length.
+  CHECK_EQ(0, Logger::GetLogLines(0, log_lines, 3));
+  // See Logger::StringEvent.
+  const char* line_1 = "aaa,\"bbb\"\n";
+  const int line_1_len = strlen(line_1);
+  // Still smaller than log message length.
+  CHECK_EQ(0, Logger::GetLogLines(0, log_lines, line_1_len - 1));
+  // The exact size.
+  CHECK_EQ(line_1_len, Logger::GetLogLines(0, log_lines, line_1_len));
+  CHECK_EQ(line_1, log_lines);
+  memset(log_lines, 0, sizeof(log_lines));
+  // A bit more than the first line length.
+  CHECK_EQ(line_1_len, Logger::GetLogLines(0, log_lines, line_1_len + 3));
+  CHECK_EQ(line_1, log_lines);
+  memset(log_lines, 0, sizeof(log_lines));
+  const char* line_2 = "cccc,\"dddd\"\n";
+  const int line_2_len = strlen(line_2);
+  // Now start with line_2 beginning.
+  CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, 0));
+  CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, 3));
+  CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, line_2_len - 1));
+  CHECK_EQ(line_2_len, Logger::GetLogLines(line_1_len, log_lines, line_2_len));
+  CHECK_EQ(line_2, log_lines);
+  memset(log_lines, 0, sizeof(log_lines));
+  CHECK_EQ(line_2_len,
+           Logger::GetLogLines(line_1_len, log_lines, line_2_len + 3));
+  CHECK_EQ(line_2, log_lines);
+  memset(log_lines, 0, sizeof(log_lines));
+  // Now get entire buffer contents.
+  const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n";
+  const int all_lines_len = strlen(all_lines);
+  CHECK_EQ(all_lines_len, Logger::GetLogLines(0, log_lines, all_lines_len));
+  CHECK_EQ(all_lines, log_lines);
+  memset(log_lines, 0, sizeof(log_lines));
+  CHECK_EQ(all_lines_len, Logger::GetLogLines(0, log_lines, all_lines_len + 3));
+  CHECK_EQ(all_lines, log_lines);
+  memset(log_lines, 0, sizeof(log_lines));
+  TearDown();
+}
+
+
+TEST(BeyondWritePosition) {
+  SetUp();
+  Logger::StringEvent("aaa", "bbb");
+  Logger::StringEvent("cccc", "dddd");
+  // See Logger::StringEvent.
+  const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n";
+  const int all_lines_len = strlen(all_lines);
+  CHECK_EQ(0, Logger::GetLogLines(all_lines_len, NULL, 1));
+  CHECK_EQ(0, Logger::GetLogLines(all_lines_len, NULL, 100));
+  CHECK_EQ(0, Logger::GetLogLines(all_lines_len + 1, NULL, 1));
+  CHECK_EQ(0, Logger::GetLogLines(all_lines_len + 1, NULL, 100));
+  CHECK_EQ(0, Logger::GetLogLines(all_lines_len + 100, NULL, 1));
+  CHECK_EQ(0, Logger::GetLogLines(all_lines_len + 100, NULL, 100));
+  CHECK_EQ(0, Logger::GetLogLines(10 * 1024 * 1024, NULL, 1));
+  CHECK_EQ(0, Logger::GetLogLines(10 * 1024 * 1024, NULL, 100));
+  TearDown();
+}
+
+
+TEST(MemoryLoggingTurnedOff) {
+  // Log to stdout
+  v8::internal::FLAG_logfile = "-";
+  v8::internal::FLAG_log = true;
+  Logger::Setup();
+  CHECK_EQ(0, Logger::GetLogLines(0, NULL, 0));
+  CHECK_EQ(0, Logger::GetLogLines(100, NULL, 0));
+  CHECK_EQ(0, Logger::GetLogLines(0, NULL, 100));
+  CHECK_EQ(0, Logger::GetLogLines(100, NULL, 100));
+  Logger::TearDown();
+}
+
+
+#endif  // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index ac75999..8761cf5 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -1,4 +1,4 @@
-/// Copyright 2008 the V8 project authors. All rights reserved.
+// Copyright 2008 the V8 project authors. All rights reserved.
 // Redistribution and use in source and binary forms, with or without
 // modification, are permitted provided that the following conditions are
 // met:
@@ -38,13 +38,13 @@
 #include "jsregexp-inl.h"
 #include "regexp-macro-assembler.h"
 #include "regexp-macro-assembler-irregexp.h"
-#ifdef V8_ARCH_ARM
+#ifdef V8_TARGET_ARCH_ARM
 #include "arm/regexp-macro-assembler-arm.h"
 #endif
-#ifdef V8_ARCH_X64
+#ifdef V8_TARGET_ARCH_X64
 // No X64-implementation yet.
 #endif
-#ifdef V8_ARCH_IA32
+#ifdef V8_TARGET_ARCH_IA32
 #include "ia32/macro-assembler-ia32.h"
 #include "ia32/regexp-macro-assembler-ia32.h"
 #endif
@@ -661,7 +661,7 @@
 }
 
 
-#ifdef V8_ARCH_IA32  // IA32 only tests.
+#ifdef V8_TARGET_ARCH_IA32  // IA32 only tests.
 
 class ContextInitializer {
  public:
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 6f42639..3065ba1 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -394,16 +394,14 @@
  public:
   TwoByteResource(const uint16_t* data, size_t length, bool* destructed)
       : data_(data), length_(length), destructed_(destructed) {
-    if (destructed_ != NULL) {
-      *destructed_ = false;
-    }
+    CHECK_NE(destructed, NULL);
+    *destructed_ = false;
   }
 
   virtual ~TwoByteResource() {
-    if (destructed_ != NULL) {
-      CHECK(!*destructed_);
-      *destructed_ = true;
-    }
+    CHECK_NE(destructed_, NULL);
+    CHECK(!*destructed_);
+    *destructed_ = true;
   }
 
   const uint16_t* data() const { return data_; }
@@ -416,49 +414,6 @@
 };
 
 
-TEST(ExternalCrBug9746) {
-  InitializeVM();
-  v8::HandleScope handle_scope;
-
-  // This set of tests verifies that the workaround for Chromium bug 9746
-  // works correctly. In certain situations the external resource of a symbol
-  // is collected while the symbol is still part of the symbol table.
-  static uint16_t two_byte_data[] = {
-    't', 'w', 'o', '-', 'b', 'y', 't', 'e', ' ', 'd', 'a', 't', 'a'
-  };
-  static size_t two_byte_length =
-      sizeof(two_byte_data) / sizeof(two_byte_data[0]);
-  static const char* one_byte_data = "two-byte data";
-
-  // Allocate an external string resource and external string.
-  TwoByteResource* resource = new TwoByteResource(two_byte_data,
-                                                  two_byte_length,
-                                                  NULL);
-  Handle<String> string = Factory::NewExternalStringFromTwoByte(resource);
-  Vector<const char> one_byte_vec = CStrVector(one_byte_data);
-  Handle<String> compare = Factory::NewStringFromAscii(one_byte_vec);
-
-  // Verify the correct behaviour before "collecting" the external resource.
-  CHECK(string->IsEqualTo(one_byte_vec));
-  CHECK(string->Equals(*compare));
-
-  // "Collect" the external resource manually by setting the external resource
-  // pointer to NULL. Then redo the comparisons, they should not match AND
-  // not crash.
-  Handle<ExternalTwoByteString> external(ExternalTwoByteString::cast(*string));
-  external->set_resource(NULL);
-  CHECK_EQ(false, string->IsEqualTo(one_byte_vec));
-#if !defined(DEBUG)
-  // These tests only work in non-debug as there are ASSERTs in the code that
-  // do prevent the ability to even get into the broken code when running the
-  // debug version of V8.
-  CHECK_EQ(false, string->Equals(*compare));
-  CHECK_EQ(false, compare->Equals(*string));
-  CHECK_EQ(false, string->Equals(Heap::empty_string()));
-#endif  // !defined(DEBUG)
-}
-
-
 // Regression test case for http://crbug.com/9746. The problem was
 // that when we marked objects reachable only through weak pointers,
 // we ended up keeping a sliced symbol alive, even though we already
@@ -476,6 +431,7 @@
   for (int i = 0; i < kFullStringLength; i++) source[i] = '1';
   char* key = new char[kSliceStringLength];
   for (int i = 0; i < kSliceStringLength; i++) key[i] = '1';
+  Vector<const char> key_vector(key, kSliceStringLength);
 
   // Allocate an external string resource that keeps track of when it
   // is destructed.
@@ -514,7 +470,7 @@
     CHECK(buffer->IsTwoByteRepresentation());
 
     // Finally, base a script on the slice of the external string and
-    // get its wrapper. This allocated yet another weak handle that
+    // get its wrapper. This allocates yet another weak handle that
     // indirectly refers to the external string.
     Handle<Script> script = Factory::NewScript(slice);
     Handle<JSObject> wrapper = GetScriptWrapper(script);
@@ -527,16 +483,33 @@
   Heap::CollectAllGarbage();
   CHECK(!resource_destructed);
 
-  // Make sure the sliced symbol is still in the table.
-  v8::HandleScope scope;
-  Vector<const char> vector(key, kSliceStringLength);
-  Handle<String> symbol = Factory::LookupSymbol(vector);
-  CHECK(StringShape(*symbol).IsSliced());
+  {
+    v8::HandleScope scope;
 
-  // Make sure the buffer is still a two-byte external string.
-  Handle<String> buffer(Handle<SlicedString>::cast(symbol)->buffer());
-  CHECK(StringShape(*buffer).IsExternal());
-  CHECK(buffer->IsTwoByteRepresentation());
+    // Make sure the sliced symbol is still in the table.
+    Handle<String> symbol = Factory::LookupSymbol(key_vector);
+    CHECK(StringShape(*symbol).IsSliced());
+
+    // Make sure the buffer is still a two-byte external string.
+    Handle<String> buffer(Handle<SlicedString>::cast(symbol)->buffer());
+    CHECK(StringShape(*buffer).IsExternal());
+    CHECK(buffer->IsTwoByteRepresentation());
+  }
+
+  // Forcing another garbage collection should let us get rid of the
+  // slice from the symbol table. The external string remains in the
+  // heap until the next GC.
+  Heap::CollectAllGarbage();
+  CHECK(!resource_destructed);
+  v8::HandleScope scope;
+  Handle<String> key_string = Factory::NewStringFromAscii(key_vector);
+  String* out;
+  CHECK(!Heap::LookupSymbolIfExists(*key_string, &out));
+
+  // Forcing yet another garbage collection must allow us to finally
+  // get rid of the external string.
+  Heap::CollectAllGarbage();
+  CHECK(resource_destructed);
 
   delete[] source;
   delete[] key;
diff --git a/test/mjsunit/bugs/bug-334.js b/test/mjsunit/bugs/bug-334.js
new file mode 100644
index 0000000..024fc9e
--- /dev/null
+++ b/test/mjsunit/bugs/bug-334.js
@@ -0,0 +1,90 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test for http://code.google.com/p/v8/issues/detail?id=334
+
+var READ_ONLY   = 1;
+var DONT_ENUM   = 2;
+var DONT_DELETE = 4;
+
+function func1(){}
+function func2(){}
+
+var object = {__proto__:{}};
+%SetProperty(object, "foo", func1, DONT_ENUM | DONT_DELETE);
+%SetProperty(object, "bar", func1, DONT_ENUM | READ_ONLY);
+%SetProperty(object, "baz", func1, DONT_DELETE | READ_ONLY);
+%SetProperty(object.__proto__, "bif", func1, DONT_ENUM | DONT_DELETE | READ_ONLY);
+object.bif = func2;
+
+function enumerable(obj) {
+  var res = [];
+  for (var i in obj) {
+    res.push(i);
+  }
+  res.sort();
+  return res;
+}
+
+// Sanity check: expected initial state.
+assertArrayEquals(["baz", "bif"], enumerable(object), "enum0");
+assertFalse(delete object.foo, "delete foo");
+assertFalse(delete object.baz, "delete baz");
+assertEquals(func1, object.foo, "read foo");
+assertEquals(func1, object.bar, "read bar");
+assertEquals(func1, object.baz, "read baz");
+assertEquals(func2, object.bif, "read bif");
+
+// Can't assign to READ_ONLY.
+object.bar = "NO WAY";
+assertEquals(func1, object.bar, "read bar 2");
+assertArrayEquals(["baz", "bif"], enumerable(object), "enum1");
+
+// Assignment to non-readonly. Assignment shouldn't change attributes!
+object.foo = func2;
+assertArrayEquals(["baz", "bif"], enumerable(object), "enum2");
+assertFalse(delete object.foo, "delete foo 2");
+
+// Delete should erase attributes if value set again.
+assertTrue(delete object.bar, "delete bar");
+assertFalse("bar" in object, "has bar");
+object.bar = func2;
+assertTrue("bar" in object, "has bar 2");
+assertEquals(func2, object.bar, "read bar 3");
+
+assertArrayEquals(["bar", "baz", "bif"], enumerable(object), "enum3");
+
+// Unshadowing a prototype property exposes its attributes.
+assertTrue(delete object.bif, "delete bif");
+assertArrayEquals(["bar", "baz"], enumerable(object), "enum4");
+assertEquals(func1, object.bif, "read bif 2");
+// Can't delete prototype property.
+assertTrue(delete object.bif, "delete bif 2");
+assertArrayEquals(["bar", "baz"], enumerable(object), "enum5");
+assertEquals(func1, object.bif, "read bif3");
diff --git a/test/mjsunit/debug-backtrace.js b/test/mjsunit/debug-backtrace.js
index 940c4cb..f08f639 100644
--- a/test/mjsunit/debug-backtrace.js
+++ b/test/mjsunit/debug-backtrace.js
@@ -32,10 +32,14 @@
   a=1;
 };
 
-function g() {
+var m = function() {
   new f(1);
 };
 
+function g() {
+ m();
+};
+
 
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug
@@ -90,22 +94,26 @@
     // Get the backtrace.
     var json;
     json = '{"seq":0,"type":"request","command":"backtrace"}'
-    response = new ParsedResponse(dcp.processDebugJSONRequest(json));
+    var resp = dcp.processDebugJSONRequest(json);
+    response = new ParsedResponse(resp);
     backtrace = response.body();
     assertEquals(0, backtrace.fromFrame);
-    assertEquals(3, backtrace.toFrame);
-    assertEquals(3, backtrace.totalFrames);
+    assertEquals(4, backtrace.toFrame);
+    assertEquals(4, backtrace.totalFrames);
     var frames = backtrace.frames;
-    assertEquals(3, frames.length);
+    assertEquals(4, frames.length);
     for (var i = 0; i < frames.length; i++) {
       assertEquals('frame', frames[i].type);
     }
     assertEquals(0, frames[0].index);
     assertEquals("f", response.lookup(frames[0].func.ref).name);
     assertEquals(1, frames[1].index);
-    assertEquals("g", response.lookup(frames[1].func.ref).name);
+    assertEquals("", response.lookup(frames[1].func.ref).name);
+    assertEquals("m", response.lookup(frames[1].func.ref).inferredName);
     assertEquals(2, frames[2].index);
-    assertEquals("", response.lookup(frames[2].func.ref).name);
+    assertEquals("g", response.lookup(frames[2].func.ref).name);
+    assertEquals(3, frames[3].index);
+    assertEquals("", response.lookup(frames[3].func.ref).name);
 
     // Get backtrace with two frames.
     json = '{"seq":0,"type":"request","command":"backtrace","arguments":{"fromFrame":1,"toFrame":3}}'
@@ -113,16 +121,17 @@
     backtrace = response.body();
     assertEquals(1, backtrace.fromFrame);
     assertEquals(3, backtrace.toFrame);
-    assertEquals(3, backtrace.totalFrames);
+    assertEquals(4, backtrace.totalFrames);
     var frames = backtrace.frames;
     assertEquals(2, frames.length);
     for (var i = 0; i < frames.length; i++) {
       assertEquals('frame', frames[i].type);
     }
     assertEquals(1, frames[0].index);
-    assertEquals("g", response.lookup(frames[0].func.ref).name);
+    assertEquals("", response.lookup(frames[0].func.ref).name);
+    assertEquals("m", response.lookup(frames[0].func.ref).inferredName);
     assertEquals(2, frames[1].index);
-    assertEquals("", response.lookup(frames[1].func.ref).name);
+    assertEquals("g", response.lookup(frames[1].func.ref).name);
 
     // Get the individual frames.
     json = '{"seq":0,"type":"request","command":"frame"}'
@@ -158,16 +167,17 @@
     response = new ParsedResponse(dcp.processDebugJSONRequest(json));
     frame = response.body();
     assertEquals(1, frame.index);
-    assertEquals("g", response.lookup(frame.func.ref).name);
+    assertEquals("", response.lookup(frame.func.ref).name);
+    assertEquals("m", response.lookup(frame.func.ref).inferredName);
     assertFalse(frame.constructCall);
     assertEquals(35, frame.line);
     assertEquals(2, frame.column);
     assertEquals(0, frame.arguments.length);
 
-    json = '{"seq":0,"type":"request","command":"frame","arguments":{"number":2}}'
+    json = '{"seq":0,"type":"request","command":"frame","arguments":{"number":3}}'
     response = new ParsedResponse(dcp.processDebugJSONRequest(json));
     frame = response.body();
-    assertEquals(2, frame.index);
+    assertEquals(3, frame.index);
     assertEquals("", response.lookup(frame.func.ref).name);
 
     // Source slices for the individual frames (they all refer to this script).
diff --git a/test/mjsunit/mirror-unresolved-function.js b/test/mjsunit/mirror-unresolved-function.js
index 21d0e56..8d8ca37 100644
--- a/test/mjsunit/mirror-unresolved-function.js
+++ b/test/mjsunit/mirror-unresolved-function.js
@@ -57,6 +57,7 @@
 assertFalse(mirror.isPrimitive());
 assertEquals("Function", mirror.className());
 assertEquals("f", mirror.name());
+assertEquals('undefined', typeof mirror.inferredName());
 assertFalse(mirror.resolved());
 assertEquals(void 0, mirror.source());
 assertEquals('undefined', mirror.constructorFunction().type());
@@ -75,4 +76,5 @@
 assertEquals('undefined', refs.lookup(fromJSON.prototypeObject.ref).type, 'Unexpected prototype object type in JSON');
 assertFalse(fromJSON.resolved);
 assertEquals("f", fromJSON.name);
+assertFalse('inferredName' in fromJSON);
 assertEquals(void 0, fromJSON.source);
diff --git a/test/mjsunit/tools/codemap.js b/test/mjsunit/tools/codemap.js
index fdad819..55b8758 100644
--- a/test/mjsunit/tools/codemap.js
+++ b/test/mjsunit/tools/codemap.js
@@ -123,4 +123,36 @@
   codeMap.addCode(0x1700, newCodeEntry(0x100, 'code'));
   assertEntry(codeMap, 'code', 0x1500);
   assertEntry(codeMap, 'code {1}', 0x1700);
+  // Test name stability.
+  assertEntry(codeMap, 'code', 0x1500);
+  assertEntry(codeMap, 'code {1}', 0x1700);
+})();
+
+
+(function testStaticEntriesExport() {
+  var codeMap = new devtools.profiler.CodeMap();
+  codeMap.addStaticCode(0x1500, newCodeEntry(0x3000, 'lib1'));
+  codeMap.addStaticCode(0x15500, newCodeEntry(0x5000, 'lib2'));
+  codeMap.addStaticCode(0x155500, newCodeEntry(0x10000, 'lib3'));
+  var allStatics = codeMap.getAllStaticEntries();
+  allStatics.sort();
+  assertEquals(['lib1: 3000', 'lib2: 5000', 'lib3: 10000'], allStatics);
+})();
+
+
+(function testDynamicEntriesExport() {
+  var codeMap = new devtools.profiler.CodeMap();
+  codeMap.addCode(0x1500, newCodeEntry(0x200, 'code1'));
+  codeMap.addCode(0x1700, newCodeEntry(0x100, 'code2'));
+  codeMap.addCode(0x1900, newCodeEntry(0x50, 'code3'));
+  var allDynamics = codeMap.getAllDynamicEntries();
+  allDynamics.sort();
+  assertEquals(['code1: 200', 'code2: 100', 'code3: 50'], allDynamics);
+  codeMap.deleteCode(0x1700);
+  var allDynamics2 = codeMap.getAllDynamicEntries();
+  allDynamics2.sort();
+  assertEquals(['code1: 200', 'code3: 50'], allDynamics2);
+  codeMap.deleteCode(0x1500);
+  var allDynamics3 = codeMap.getAllDynamicEntries();
+  assertEquals(['code3: 50'], allDynamics3);
 })();
diff --git a/test/mjsunit/undeletable-functions.js b/test/mjsunit/undeletable-functions.js
new file mode 100644
index 0000000..86a7426
--- /dev/null
+++ b/test/mjsunit/undeletable-functions.js
@@ -0,0 +1,181 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that we match JSC in making some functions undeletable.
+// See http://code.google.com/p/chromium/issues/detail?id=1717
+// The functions on these prototypes are not just undeletable.  It is
+// possible to override them with new definitions, then get the old
+// version back by deleting the new definition.
+
+var array;
+
+array = [
+  "toString", "toLocaleString", "join", "pop", "push", "concat", "reverse",
+  "shift", "unshift", "slice", "splice", "sort", "filter", "forEach", "some",
+  "every", "map", "indexOf", "lastIndexOf", "reduce", "reduceRight"];
+CheckJSCSemantics(Array.prototype, array, "Array prototype");
+
+array = [
+  "toString", "toDateString", "toTimeString", "toLocaleString",
+  "toLocaleDateString", "toLocaleTimeString", "valueOf", "getTime",
+  "getFullYear", "getUTCFullYear", "getMonth", "getUTCMonth", "getDate",
+  "getUTCDate", "getDay", "getUTCDay", "getHours", "getUTCHours", "getMinutes",
+  "getUTCMinutes", "getSeconds", "getUTCSeconds", "getMilliseconds",
+  "getUTCMilliseconds", "getTimezoneOffset", "setTime", "setMilliseconds",
+  "setUTCMilliseconds", "setSeconds", "setUTCSeconds", "setMinutes",
+  "setUTCMinutes", "setHours", "setUTCHours", "setDate", "setUTCDate",
+  "setMonth", "setUTCMonth", "setFullYear", "setUTCFullYear", "toGMTString",
+  "toUTCString", "getYear", "setYear", "toISOString", "toJSON"];
+CheckJSCSemantics(Date.prototype, array, "Date prototype");
+
+array = [
+  "random", "abs", "acos", "asin", "atan", "ceil", "cos", "exp", "floor", "log",
+  "round", "sin", "sqrt", "tan", "atan2", "pow", "max", "min"];
+CheckJSCSemantics(Math, array, "Math1");
+
+CheckEcmaSemantics(Date, ["UTC", "parse", "now"], "Date");
+
+array = [
+  "E", "LN10", "LN2", "LOG2E", "LOG10E", "PI", "SQRT1_2", "SQRT2"];
+CheckDontDelete(Math, array, "Math2");
+
+array = [
+  "escape", "unescape", "decodeURI", "decodeURIComponent", "encodeURI",
+  "encodeURIComponent", "isNaN", "isFinite", "parseInt", "parseFloat", "eval",
+  "execScript"];
+CheckEcmaSemantics(this, array, "Global");
+CheckReadOnlyAttr(this, "Infinity");
+
+array = ["exec", "test", "toString", "compile"];
+CheckEcmaSemantics(RegExp.prototype, array, "RegExp prototype");
+
+array = [
+  "toString", "toLocaleString", "valueOf", "hasOwnProperty",
+  "isPrototypeOf", "propertyIsEnumerable", "__defineGetter__",
+  "__lookupGetter__", "__defineSetter__", "__lookupSetter__"];
+CheckEcmaSemantics(Object.prototype, array, "Object prototype");
+
+array = [
+  "toString", "valueOf", "toJSON"];
+CheckEcmaSemantics(Boolean.prototype, array, "Boolean prototype");
+
+array = [
+  "toString", "toLocaleString", "valueOf", "toFixed", "toExponential",
+  "toPrecision", "toJSON"];
+CheckEcmaSemantics(Number.prototype, array, "Number prototype");
+
+CheckEcmaSemantics(Function.prototype, ["toString"], "Function prototype");
+CheckEcmaSemantics(Date.prototype, ["constructor"], "Date prototype constructor");
+
+array = [
+  "charAt", "charCodeAt", "concat", "indexOf",
+  "lastIndexOf", "localeCompare", "match", "replace", "search", "slice",
+  "split", "substring", "substr", "toLowerCase", "toLocaleLowerCase",
+  "toUpperCase", "toLocaleUpperCase", "link", "anchor", "fontcolor", "fontsize",
+  "big", "blink", "bold", "fixed", "italics", "small", "strike", "sub", "sup",
+  "toJSON", "toString", "valueOf"];
+CheckJSCSemantics(String.prototype, array, "String prototype");
+CheckEcmaSemantics(String, ["fromCharCode"], "String");
+
+
+function CheckEcmaSemantics(type, props, name) {
+  print(name);
+  for (var i = 0; i < props.length; i++) {
+    CheckDeletable(type, props[i]);
+  }
+}
+
+
+function CheckJSCSemantics(type, props, name) {
+  print(name);
+  for (var i = 0; i < props.length; i++) {
+    CheckNotDeletable(type, props[i]);
+  }
+}
+
+
+function CheckDontDelete(type, props, name) {
+  print(name);
+  for (var i = 0; i < props.length; i++) {
+    CheckDontDeleteAttr(type, props[i]);
+  }
+}
+
+
+function CheckDeletable(type, prop) {
+  var old = type[prop];
+  var hasOwnProperty = Object.prototype.hasOwnProperty;
+  if (!type[prop]) return;
+  assertTrue(type.hasOwnProperty(prop), "inherited: " + prop);
+  var deleted = delete type[prop];
+  assertTrue(deleted, "delete operator returned false: " + prop);
+  assertFalse(hasOwnProperty.call(type, prop), "still there after delete: " + prop);
+  type[prop] = "foo";
+  assertEquals("foo", type[prop], "not overwritable: " + prop);
+  type[prop] = old;
+}
+
+
+function CheckNotDeletable(type, prop) {
+  var old = type[prop];
+  if (!type[prop]) return;
+  assertTrue(type.hasOwnProperty(prop), "inherited: " + prop);
+  var deleted = delete type[prop];
+  assertTrue(deleted, "delete operator returned false: " + prop);
+  assertTrue(type.hasOwnProperty(prop), "not there after delete: " + prop);
+  type[prop] = "foo";
+  assertEquals("foo", type[prop], "not overwritable: " + prop);
+  deleted = delete type[prop];
+  assertTrue(deleted, "delete operator returned false 2nd time: " + prop);
+  assertEquals(old.toString(), type[prop].toString(), "delete didn't restore the old value: " + prop);
+}
+
+
+function CheckDontDeleteAttr(type, prop) {
+  var old = type[prop];
+  if (!type[prop]) return;
+  assertTrue(type.hasOwnProperty(prop), "inherited: " + prop);
+  var deleted = delete type[prop];
+  assertFalse(deleted, "delete operator returned true: " + prop);
+  assertTrue(type.hasOwnProperty(prop), "not there after delete: " + prop);
+  type[prop] = "foo";
+  assertFalse("foo" == type[prop], "overwritable: " + prop);
+}
+
+
+function CheckReadOnlyAttr(type, prop) {
+  var old = type[prop];
+  if (!type[prop]) return;
+  assertTrue(type.hasOwnProperty(prop), "inherited: " + prop);
+  var deleted = delete type[prop];
+  assertFalse(deleted, "delete operator returned true: " + prop);
+  assertTrue(type.hasOwnProperty(prop), "not there after delete: " + prop);
+  type[prop] = "foo";
+  assertEquals("foo", type[prop], "overwritable: " + prop);
+}
+
+print("OK");
diff --git a/tools/codemap.js b/tools/codemap.js
index 32e03d6..3766db0 100644
--- a/tools/codemap.js
+++ b/tools/codemap.js
@@ -43,10 +43,8 @@
   this.dynamics_ = new goog.structs.SplayTree();
 
   /**
-   * Deleted code entries. Used for code collected by the GC.
+   * Name generator for entries having duplicate names.
    */
-  this.deleted_ = [];
-
   this.dynamicsNameGen_ = new devtools.profiler.CodeMap.NameGenerator();
 
   /**
@@ -81,8 +79,6 @@
  * @param {devtools.profiler.CodeMap.CodeEntry} codeEntry Code entry object.
  */
 devtools.profiler.CodeMap.prototype.addCode = function(start, codeEntry) {
-  var entryName = this.dynamicsNameGen_.getName(codeEntry.name);
-  codeEntry.name = entryName;
   this.dynamics_.insert(start, codeEntry);
 };
 
@@ -102,14 +98,12 @@
 
 /**
  * Discards a dynamic code entry. Throws an exception if there is no dynamic
- * code entry with the specified starting address. The entry will still be
- * returned from the 'getAllDynamicEntries' method.
+ * code entry with the specified starting address.
  *
  * @param {number} start The starting address of the entry being deleted.
  */
 devtools.profiler.CodeMap.prototype.deleteCode = function(start) {
   var removedNode = this.dynamics_.remove(start);
-  this.deleted_.push(removedNode.value);
 };
 
 
@@ -168,7 +162,14 @@
   var min = this.dynamics_.findMin();
   var max = this.dynamics_.findMax();
   if (max != null && addr < (max.key + max.value.size) && addr >= min.key) {
-    return this.findInTree_(this.dynamics_, addr);
+    var dynaEntry = this.findInTree_(this.dynamics_, addr);
+    if (dynaEntry == null) return null;
+    // Dedupe entry name.
+    if (!dynaEntry.nameUpdated_) {
+      dynaEntry.name = this.dynamicsNameGen_.getName(dynaEntry.name);
+      dynaEntry.nameUpdated_ = true;
+    }
+    return dynaEntry;
   }
   return null;
 };
@@ -178,8 +179,7 @@
  * Returns an array of all dynamic code entries, including deleted ones.
  */
 devtools.profiler.CodeMap.prototype.getAllDynamicEntries = function() {
-  var dynamicEntries = this.dynamics_.exportValues();
-  return dynamicEntries.concat(this.deleted_);
+  return this.dynamics_.exportValues();
 };
 
 
@@ -201,6 +201,7 @@
 devtools.profiler.CodeMap.CodeEntry = function(size, opt_name) {
   this.size = size;
   this.name = opt_name || '';
+  this.nameUpdated_ = false;
 };
 
 
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index f8d9043..e980ff1 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -403,7 +403,7 @@
       'target_name': 'v8_base',
       'type': '<(library)',
       'defines': [
-        'V8_ARCH_IA32'
+        'V8_TARGET_ARCH_IA32'
       ],
       'include_dirs+': [
         '../../src',
@@ -461,7 +461,7 @@
       'target_name': 'v8_nosnapshot',
       'type': '<(library)',
       'defines': [
-        'V8_ARCH_IA32'
+        'V8_TARGET_ARCH_IA32'
       ],
       'dependencies': [
         'js2c',
@@ -493,7 +493,7 @@
       'target_name': 'v8',
       'type': '<(library)',
       'defines': [
-        'V8_ARCH_IA32'
+        'V8_TARGET_ARCH_IA32'
       ],
       'dependencies': [
         'js2c',
@@ -533,7 +533,7 @@
       'target_name': 'v8_shell',
       'type': 'executable',
       'defines': [
-        'V8_ARCH_IA32'
+        'V8_TARGET_ARCH_IA32'
       ],
       'dependencies': [
         'v8',
@@ -563,7 +563,7 @@
         'v8',
       ],
       'defines': [
-        'V8_ARCH_IA32'
+        'V8_TARGET_ARCH_IA32'
       ],
       'include_dirs': [
         '../../src',
@@ -602,7 +602,7 @@
         'js2c',
       ],
       'defines': [
-        'V8_ARCH_ARM',
+        'V8_TARGET_ARCH_ARM',
       ],
       'include_dirs+': [
         '../../src',
@@ -660,7 +660,7 @@
         'v8_arm',
       ],
       'defines': [
-        'V8_ARCH_ARM',
+        'V8_TARGET_ARCH_ARM',
       ],
       'sources': [
         '../../samples/shell.cc',
@@ -680,7 +680,7 @@
         'v8_arm',
       ],
       'defines': [
-        'V8_ARCH_ARM',
+        'V8_TARGET_ARCH_ARM',
       ],
       'include_dirs': [
         '../../src',
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 64020ca..196daa9 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -99,10 +99,11 @@
 
 
 TickProcessor.CodeTypes = {
-  JS: 0,
-  CPP: 1,
-  SHARED_LIB: 2
+  CPP: 0,
+  SHARED_LIB: 1
 };
+// Otherwise, this is JS-related code. We are not adding it to
+// codeTypes_ map because there can be zillions of them.
 
 
 TickProcessor.RecordsDispatch = {
@@ -142,7 +143,7 @@
 
 
 TickProcessor.prototype.isJsCode = function(name) {
-  return this.codeTypes_[name] == TickProcessor.CodeTypes.JS;
+  return !(name in this.codeTypes_);
 };
 
 
@@ -220,7 +221,6 @@
 TickProcessor.prototype.processCodeCreation = function(
     type, start, size, name) {
   var entry = this.profile_.addCode(type, name, start, size);
-  this.setCodeType(entry.getName(), 'JS');
 };
 
 
@@ -415,8 +415,7 @@
 
 CppEntriesProvider.prototype.parseVmSymbols = function(
     libName, libStart, libEnd, processorFunc) {
-  var syms = this.loadSymbols(libName);
-  if (syms.length == 0) return;
+  this.loadSymbols(libName);
 
   var prevEntry;
 
@@ -428,11 +427,12 @@
     }
   }
 
-  for (var i = 0, n = syms.length; i < n; ++i) {
-    var line = syms[i];
-    var funcInfo = this.parseLine(line);
-    if (!funcInfo) {
+  while (true) {
+    var funcInfo = this.parseNextLine();
+    if (funcInfo === null) {
       continue;
+    } else if (funcInfo === false) {
+      break;
     }
     if (funcInfo.start < libStart && funcInfo.start < libEnd - libStart) {
       funcInfo.start += libStart;
@@ -445,12 +445,11 @@
 
 
 CppEntriesProvider.prototype.loadSymbols = function(libName) {
-  return [];
 };
 
 
-CppEntriesProvider.prototype.parseLine = function(line) {
-  return { name: '', start: 0 };
+CppEntriesProvider.prototype.parseNextLine = function() {
+  return false;
 };
 
 
@@ -462,6 +461,8 @@
 
 
 function UnixCppEntriesProvider() {
+  this.symbols = [];
+  this.parsePos = 0;
 };
 inherits(UnixCppEntriesProvider, CppEntriesProvider);
 
@@ -470,20 +471,35 @@
 
 
 UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
-  var normalSyms = os.system('nm', ['-C', '-n', libName], -1, -1);
-  var dynaSyms = os.system('nm', ['-C', '-n', '-D', libName], -1, -1);
-  var syms = (normalSyms + dynaSyms).split('\n');
-  return syms;
+  this.symbols = [
+    os.system('nm', ['-C', '-n', libName], -1, -1),
+    os.system('nm', ['-C', '-n', '-D', libName], -1, -1)
+  ];
+  this.parsePos = 0;
 };
 
 
-UnixCppEntriesProvider.prototype.parseLine = function(line) {
+UnixCppEntriesProvider.prototype.parseNextLine = function() {
+  if (this.symbols.length == 0) {
+    return false;
+  }
+  var lineEndPos = this.symbols[0].indexOf('\n', this.parsePos);
+  if (lineEndPos == -1) {
+    this.symbols.shift();
+    this.parsePos = 0;
+    return this.parseNextLine();
+  }
+
+  var line = this.symbols[0].substring(this.parsePos, lineEndPos);
+  this.parsePos = lineEndPos + 1;
   var fields = line.match(UnixCppEntriesProvider.FUNC_RE);
   return fields ? { name: fields[2], start: parseInt(fields[1], 16) } : null;
 };
 
 
 function WindowsCppEntriesProvider() {
+  this.symbols = '';
+  this.parsePos = 0;
 };
 inherits(WindowsCppEntriesProvider, CppEntriesProvider);
 
@@ -498,13 +514,20 @@
 WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) {
   var fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
   // Only try to load symbols for the .exe file.
-  if (!fileNameFields) return [];
+  if (!fileNameFields) return;
   var mapFileName = fileNameFields[1] + '.map';
-  return readFile(mapFileName).split('\r\n');
+  this.symbols = readFile(mapFileName);
 };
 
 
-WindowsCppEntriesProvider.prototype.parseLine = function(line) {
+WindowsCppEntriesProvider.prototype.parseNextLine = function() {
+  var lineEndPos = this.symbols.indexOf('\r\n', this.parsePos);
+  if (lineEndPos == -1) {
+    return false;
+  }
+
+  var line = this.symbols.substring(this.parsePos, lineEndPos);
+  this.parsePos = lineEndPos + 2;
   var fields = line.match(WindowsCppEntriesProvider.FUNC_RE);
   return fields ?
       { name: this.unmangleName(fields[1]), start: parseInt(fields[2], 16) } :
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index c868487..83212fb 100755
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -1438,7 +1438,7 @@
 			buildSettings = {
 				GCC_PREPROCESSOR_DEFINITIONS = (
 					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_ARCH_IA32,
+					V8_TARGET_ARCH_IA32,
 					DEBUG,
 				);
 				HEADER_SEARCH_PATHS = ../src;
@@ -1451,7 +1451,7 @@
 			buildSettings = {
 				GCC_PREPROCESSOR_DEFINITIONS = (
 					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_ARCH_IA32,
+					V8_TARGET_ARCH_IA32,
 					NDEBUG,
 				);
 				HEADER_SEARCH_PATHS = ../src;
@@ -1466,7 +1466,7 @@
 				GCC_PREPROCESSOR_DEFINITIONS = (
 					"$(GCC_PREPROCESSOR_DEFINITIONS)",
 					ENABLE_DISASSEMBLER,
-					V8_ARCH_IA32,
+					V8_TARGET_ARCH_IA32,
 					ENABLE_LOGGING_AND_PROFILING,
 				);
 				HEADER_SEARCH_PATHS = ../src;
@@ -1481,7 +1481,7 @@
 				DEPLOYMENT_POSTPROCESSING = NO;
 				GCC_PREPROCESSOR_DEFINITIONS = (
 					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_ARCH_IA32,
+					V8_TARGET_ARCH_IA32,
 					NDEBUG,
 				);
 				HEADER_SEARCH_PATHS = ../src;
@@ -1512,7 +1512,7 @@
 				DEPLOYMENT_POSTPROCESSING = NO;
 				GCC_PREPROCESSOR_DEFINITIONS = (
 					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_ARCH_ARM,
+					V8_TARGET_ARCH_ARM,
 					ENABLE_DISASSEMBLER,
 					ENABLE_LOGGING_AND_PROFILING,
 				);
@@ -1528,7 +1528,7 @@
 				DEPLOYMENT_POSTPROCESSING = NO;
 				GCC_PREPROCESSOR_DEFINITIONS = (
 					"$(GCC_PREPROCESSOR_DEFINITIONS)",
-					V8_ARCH_ARM,
+					V8_TARGET_ARCH_ARM,
 				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = "v8-arm";
diff --git a/tools/visual_studio/arm.vsprops b/tools/visual_studio/arm.vsprops
index 4f0f4e3..3aa9374 100644
--- a/tools/visual_studio/arm.vsprops
+++ b/tools/visual_studio/arm.vsprops
@@ -6,7 +6,7 @@
 	>
 	<Tool
 		Name="VCCLCompilerTool"
-		PreprocessorDefinitions="V8_ARCH_ARM"
+		PreprocessorDefinitions="V8_TARGET_ARCH_ARM"
 		DisableSpecificWarnings="4996"
 	/>
 </VisualStudioPropertySheet>
diff --git a/tools/visual_studio/ia32.vsprops b/tools/visual_studio/ia32.vsprops
index 8e02c94..fda6c32 100644
--- a/tools/visual_studio/ia32.vsprops
+++ b/tools/visual_studio/ia32.vsprops
@@ -6,6 +6,6 @@
 	>
 	<Tool
 		Name="VCCLCompilerTool"
-		PreprocessorDefinitions="V8_ARCH_IA32"
+		PreprocessorDefinitions="V8_TARGET_ARCH_IA32"
 	/>
 </VisualStudioPropertySheet>
diff --git a/tools/visual_studio/v8_cctest.vcproj b/tools/visual_studio/v8_cctest.vcproj
index 29023f8..6aa090a 100644
--- a/tools/visual_studio/v8_cctest.vcproj
+++ b/tools/visual_studio/v8_cctest.vcproj
@@ -202,6 +202,10 @@
 			>
 		</File>
 		<File
+			RelativePath="..\..\test\cctest\test-log.cc"
+			>
+		</File>
+		<File
 			RelativePath="..\..\test\cctest\test-log-ia32.cc"
 			>
 		</File>
diff --git a/tools/visual_studio/v8_cctest_arm.vcproj b/tools/visual_studio/v8_cctest_arm.vcproj
index 44ffbc7..566d75e 100644
--- a/tools/visual_studio/v8_cctest_arm.vcproj
+++ b/tools/visual_studio/v8_cctest_arm.vcproj
@@ -196,6 +196,10 @@
 			>
 		</File>
 		<File
+			RelativePath="..\..\test\cctest\test-log.cc"
+			>
+		</File>
+		<File
 			RelativePath="..\..\test\cctest\test-mark-compact.cc"
 			>
 		</File>