Version 1.3.0.

Allowed RegExp objects to be called as functions (issue 132).

Fixed issue where global property cells would escape after detaching the global object; see http://crbug.com/16276.

Added support for stepping into setters and getters in the debugger.

Changed the debugger to avoid stopping in its own JavaScript code and in the code of built-in functions.

Fixed issue 345 by avoiding duplicate escaping labels.

Fixed ARM code generator crash in short-circuited boolean expressions and added regression tests.

Added an external allocation limit to avoid issues where small V8 objects would hold on to large amounts of external memory without causing garbage collections.

Finished more of the inline caching stubs for x64 targets. 


git-svn-id: http://v8.googlecode.com/svn/trunk@2537 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 83ebc02..e603b8a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,28 @@
+2009-07-27: Version 1.3.0
+
+        Allowed RegExp objects to be called as functions (issue 132).
+
+        Fixed issue where global property cells would escape after
+        detaching the global object; see http://crbug.com/16276.
+
+        Added support for stepping into setters and getters in the
+        debugger.
+
+        Changed the debugger to avoid stopping in its own JavaScript code
+        and in the code of built-in functions.
+
+        Fixed issue 345 by avoiding duplicate escaping labels.
+
+        Fixed ARM code generator crash in short-circuited boolean
+        expressions and added regression tests.
+
+        Added an external allocation limit to avoid issues where small V8
+        objects would hold on to large amounts of external memory without
+        causing garbage collections.
+ 
+        Finished more of the inline caching stubs for x64 targets. 
+
+
 2009-07-13: Version 1.2.14
 
         Added separate paged heap space for global property cells and
diff --git a/SConstruct b/SConstruct
index 78b050d..dbcd616 100644
--- a/SConstruct
+++ b/SConstruct
@@ -149,31 +149,22 @@
                        '-Wstrict-aliasing=2'],
       'CPPPATH':      ANDROID_INCLUDES,
     },
-    'wordsize:32': {
-      'arch:x64': {
-        'CCFLAGS':      ['-m64'],
-        'LINKFLAGS':    ['-m64']
-      }
-    },
-    'wordsize:64': {
-      'arch:ia32': {
-        'CCFLAGS':      ['-m32'],
-        'LINKFLAGS':    ['-m32']
-      },
-      'arch:arm': {
-        'CCFLAGS':      ['-m32'],
-        'LINKFLAGS':    ['-m32']
-      }
-    },
     'arch:ia32': {
-      'CPPDEFINES':   ['V8_TARGET_ARCH_IA32']
+      'CPPDEFINES':   ['V8_TARGET_ARCH_IA32'],
+      'CCFLAGS':      ['-m32'],
+      'LINKFLAGS':    ['-m32']
     },
     'arch:arm': {
       'CPPDEFINES':   ['V8_TARGET_ARCH_ARM']
     },
+    'simulator:arm': {
+      'CCFLAGS':      ['-m32'],
+      'LINKFLAGS':    ['-m32']
+    },
     'arch:x64': {
-      'CCFLAGS':      ['-fno-strict-aliasing'],
-      'CPPDEFINES':   ['V8_TARGET_ARCH_X64']
+      'CPPDEFINES':   ['V8_TARGET_ARCH_X64'],
+      'CCFLAGS':      ['-fno-strict-aliasing', '-m64'],
+      'LINKFLAGS':    ['-m64'],
     },
     'prof:oprofile': {
       'CPPDEFINES':   ['ENABLE_OPROFILE_AGENT']
@@ -341,22 +332,6 @@
         'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
       }
     },
-    'wordsize:32': {
-      'arch:x64': {
-        'CCFLAGS':      ['-m64'],
-        'LINKFLAGS':    ['-m64']
-      }
-    },
-    'wordsize:64': {
-      'arch:ia32': {
-        'CCFLAGS':      ['-m32'],
-        'LINKFLAGS':    ['-m32']
-      },
-      'arch:arm': {
-        'CCFLAGS':      ['-m32'],
-        'LINKFLAGS':    ['-m32']
-      }
-    }
   },
   'msvc': {
     'all': {
@@ -408,21 +383,17 @@
         'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
       }
     },
-    'wordsize:32': {
-      'arch:x64': {
-        'CCFLAGS':      ['-m64'],
-        'LINKFLAGS':    ['-m64']
-      }
+    'arch:ia32': {
+      'CCFLAGS':      ['-m32'],
+      'LINKFLAGS':    ['-m32']
     },
-    'wordsize:64': {
-      'arch:ia32': {
-        'CCFLAGS':      ['-m32'],
-        'LINKFLAGS':    ['-m32']
-      },
-      'arch:arm': {
-        'CCFLAGS':      ['-m32'],
-        'LINKFLAGS':    ['-m32']
-      }
+    'arch:x64': {
+      'CCFLAGS':      ['-m64'],
+      'LINKFLAGS':    ['-m64']
+    },
+    'simulator:arm': {
+      'CCFLAGS':      ['-m32'],
+      'LINKFLAGS':    ['-m32']
     },
     'mode:release': {
       'CCFLAGS':      ['-O2']
@@ -533,7 +504,6 @@
 OS_GUESS = utils.GuessOS()
 TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS)
 ARCH_GUESS = utils.GuessArchitecture()
-WORDSIZE_GUESS = utils.GuessWordsize()
 
 
 SIMPLE_OPTIONS = {
@@ -587,11 +557,6 @@
     'default': 'on',
     'help': 'use Microsoft Visual C++ link-time code generation'
   },
-  'wordsize': {
-    'values': ['64', '32'],
-    'default': WORDSIZE_GUESS,
-    'help': 'the word size'
-  },
   'simulator': {
     'values': ['arm', 'none'],
     'default': 'none',
diff --git a/benchmarks/run.html b/benchmarks/run.html
old mode 100755
new mode 100644
index 050764e..ef2c186
--- a/benchmarks/run.html
+++ b/benchmarks/run.html
@@ -55,9 +55,35 @@
                              NotifyScore: AddScore }); 
 }
 
+function ShowWarningIfObsolete() {
+  // If anything goes wrong we will just catch the exception and no 
+  // warning is shown, i.e., no harm is done.
+  try {
+    var xmlhttp;
+    var next_version = parseInt(BenchmarkSuite.version) + 1; 
+    var next_version_url = "../v" + next_version + "/run.html";  
+    if (window.XMLHttpRequest) {
+      xmlhttp = new window.XMLHttpRequest();
+    } else if (window.ActiveXObject) {
+      xmlhttp = new window.ActiveXObject("Microsoft.XMLHTTP");
+    }
+    xmlhttp.open('GET', next_version_url, true);
+    xmlhttp.onreadystatechange = function() {
+      if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
+        document.getElementById('obsolete').style.display="block";
+      }
+    };
+    xmlhttp.send(null);
+  } catch(e) {
+    // Ignore exception if check for next version fails. 
+    // Hence no warning is displayed.
+  }
+}
+
 function Load() {
   var version = BenchmarkSuite.version;
   document.getElementById("version").innerHTML = version;
+  ShowWarningIfObsolete();  
   setTimeout(Run, 200);
 }
 </script>
@@ -65,6 +91,12 @@
 <body onload="Load()">
 <div>
   <div class="title"><h1>V8 Benchmark Suite - version <span id="version">?</span></h1></div>
+  <div class="warning" id="obsolete"> 
+Warning! This is not the latest version of the V8 benchmark
+suite. Consider running the   
+<a href="http://v8.googlecode.com/svn/data/benchmarks/current/run.html">
+latest version</a>.  
+  </div>
   <table>
     <tr>
       <td class="contents">
diff --git a/benchmarks/style.css b/benchmarks/style.css
old mode 100755
new mode 100644
index 46320c1..d9f4dbf
--- a/benchmarks/style.css
+++ b/benchmarks/style.css
@@ -55,6 +55,15 @@
   border: 1px solid rgb(51, 102, 204);
 }
 
+div.warning { 
+  background: #ffffd9;
+  border: 1px solid #d2d26a;
+  display: none;
+  margin: 1em 0 2em;
+  padding: 8px;
+  text-align: center; 
+}
+
 #status {
   text-align: center;
   margin-top: 50px;
diff --git a/include/v8.h b/include/v8.h
index 8f22c81..cf8a3bf 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -180,7 +180,7 @@
   /**
    * Creates an empty handle.
    */
-  Handle();
+  inline Handle();
 
   /**
    * Creates a new handle for the specified value.
@@ -264,7 +264,7 @@
  */
 template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
  public:
-  Local();
+  inline Local();
   template <class S> inline Local(Local<S> that)
       : Handle<T>(reinterpret_cast<T*>(*that)) {
     /**
@@ -284,7 +284,7 @@
    *  The referee is kept alive by the local handle even when
    *  the original handle is destroyed/disposed.
    */
-  static Local<T> New(Handle<T> that);
+  inline static Local<T> New(Handle<T> that);
 };
 
 
@@ -312,7 +312,7 @@
    * Creates an empty persistent handle that doesn't point to any
    * storage cell.
    */
-  Persistent();
+  inline Persistent();
 
   /**
    * Creates a persistent handle for the same storage cell as the
@@ -353,7 +353,7 @@
    * Creates a new persistent handle for an existing local or
    * persistent handle.
    */
-  static Persistent<T> New(Handle<T> that);
+  inline static Persistent<T> New(Handle<T> that);
 
   /**
    * Releases the storage cell referenced by this persistent handle.
@@ -361,7 +361,7 @@
    * This handle's reference, and any any other references to the storage
    * cell remain and IsEmpty will still return false.
    */
-  void Dispose();
+  inline void Dispose();
 
   /**
    * Make the reference to this object weak.  When only weak handles
@@ -369,20 +369,20 @@
    * callback to the given V8::WeakReferenceCallback function, passing
    * it the object reference and the given parameters.
    */
-  void MakeWeak(void* parameters, WeakReferenceCallback callback);
+  inline void MakeWeak(void* parameters, WeakReferenceCallback callback);
 
   /** Clears the weak reference to this object.*/
-  void ClearWeak();
+  inline void ClearWeak();
 
   /**
    *Checks if the handle holds the only reference to an object.
    */
-  bool IsNearDeath() const;
+  inline bool IsNearDeath() const;
 
   /**
    * Returns true if the handle's reference is weak.
    */
-  bool IsWeak() const;
+  inline bool IsWeak() const;
 
  private:
   friend class ImplementationUtilities;
@@ -1113,6 +1113,13 @@
   /** Sets the value in an internal field. */
   void SetInternalField(int index, Handle<Value> value);
 
+  // The two functions below do not perform index bounds checks and
+  // they do not check that the VM is still running. Use with caution.
+  /** Gets a native pointer from an internal field. */
+  void* GetPointerFromInternalField(int index);
+  /** Sets a native pointer in an internal field. */
+  void SetPointerInInternalField(int index, void* value);
+
   // Testers for local properties.
   bool HasRealNamedProperty(Handle<String> key);
   bool HasRealIndexedProperty(uint32_t index);
diff --git a/src/SConscript b/src/SConscript
index f1ca875..f9f9634 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -40,7 +40,7 @@
     'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
     'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
     'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
-    'flags.cc', 'frames.cc', 'func-name-inferrer.cc',
+    'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
     'global-handles.cc', 'handles.cc', 'hashmap.cc',
     'heap.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
     'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
diff --git a/src/api.cc b/src/api.cc
index b9e0cec..9e3ca9b 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -1085,8 +1085,9 @@
   // handle it if it turns out not to be in release mode.
   ASSERT(pre_data == NULL || pre_data->SanityCheck());
   // If the pre-data isn't sane we simply ignore it
-  if (pre_data != NULL && !pre_data->SanityCheck())
+  if (pre_data != NULL && !pre_data->SanityCheck()) {
     pre_data = NULL;
+  }
   i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str,
                                                               name_obj,
                                                               line_offset,
@@ -2464,6 +2465,44 @@
 }
 
 
+void* v8::Object::GetPointerFromInternalField(int index) {
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  i::Object* pointer = obj->GetInternalField(index);
+  if (pointer->IsSmi()) {
+    // Fast case, aligned native pointer.
+    return pointer;
+  }
+
+  // Read from uninitialized field.
+  if (!pointer->IsProxy()) {
+    // Play safe even if it's something unexpected.
+    ASSERT(pointer->IsUndefined());
+    return NULL;
+  }
+
+  // Unaligned native pointer.
+  return reinterpret_cast<void*>(i::Proxy::cast(pointer)->proxy());
+}
+
+
+void v8::Object::SetPointerInInternalField(int index, void* value) {
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  i::Object* as_object = reinterpret_cast<i::Object*>(value);
+  if (as_object->IsSmi()) {
+    // Aligned pointer, store as is.
+    obj->SetInternalField(index, as_object);
+  } else {
+    // Currently internal fields are used by DOM wrappers which only
+    // get garbage collected by the mark-sweep collector, so we
+    // pretenure the proxy.
+    HandleScope scope;
+    i::Handle<i::Proxy> proxy =
+        i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
+    if (!proxy.is_null()) obj->SetInternalField(index, *proxy);
+  }
+}
+
+
 // --- E n v i r o n m e n t ---
 
 bool v8::V8::Initialize() {
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 3f7ccf5..5f8149e 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -2897,7 +2897,7 @@
     __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
 
     // Write to the indexed properties array.
-    int offset = i * kPointerSize + Array::kHeaderSize;
+    int offset = i * kPointerSize + FixedArray::kHeaderSize;
     __ str(r0, FieldMemOperand(r1, offset));
 
     // Update the write barrier for the array address.
@@ -3737,7 +3737,8 @@
     }
     frame_->EmitPush(r0);  // r0 has result
   }
-  ASSERT((has_cc() && frame_->height() == original_height) ||
+  ASSERT(!has_valid_frame() ||
+         (has_cc() && frame_->height() == original_height) ||
          (!has_cc() && frame_->height() == original_height + 1));
 }
 
@@ -3871,22 +3872,12 @@
                           &is_true,
                           false_target(),
                           false);
-    if (has_cc()) {
-      Branch(false, false_target());
-
-      // Evaluate right side expression.
-      is_true.Bind();
-      LoadConditionAndSpill(node->right(),
-                            NOT_INSIDE_TYPEOF,
-                            true_target(),
-                            false_target(),
-                            false);
-
-    } else {
+    if (has_valid_frame() && !has_cc()) {
+      // The left-hand side result is on top of the virtual frame.
       JumpTarget pop_and_continue;
       JumpTarget exit;
 
-      __ ldr(r0, frame_->Top());  // dup the stack top
+      __ ldr(r0, frame_->Top());  // Duplicate the stack top.
       frame_->EmitPush(r0);
       // Avoid popping the result if it converts to 'false' using the
       // standard ToBoolean() conversion as described in ECMA-262,
@@ -3904,6 +3895,22 @@
 
       // Exit (always with a materialized value).
       exit.Bind();
+    } else if (has_cc() || is_true.is_linked()) {
+      // The left-hand side is either (a) partially compiled to
+      // control flow with a final branch left to emit or (b) fully
+      // compiled to control flow and possibly true.
+      if (has_cc()) {
+        Branch(false, false_target());
+      }
+      is_true.Bind();
+      LoadConditionAndSpill(node->right(),
+                            NOT_INSIDE_TYPEOF,
+                            true_target(),
+                            false_target(),
+                            false);
+    } else {
+      // Nothing to do.
+      ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
     }
 
   } else if (op == Token::OR) {
@@ -3913,18 +3920,8 @@
                           true_target(),
                           &is_false,
                           false);
-    if (has_cc()) {
-      Branch(true, true_target());
-
-      // Evaluate right side expression.
-      is_false.Bind();
-      LoadConditionAndSpill(node->right(),
-                            NOT_INSIDE_TYPEOF,
-                            true_target(),
-                            false_target(),
-                            false);
-
-    } else {
+    if (has_valid_frame() && !has_cc()) {
+      // The left-hand side result is on top of the virtual frame.
       JumpTarget pop_and_continue;
       JumpTarget exit;
 
@@ -3946,6 +3943,22 @@
 
       // Exit (always with a materialized value).
       exit.Bind();
+    } else if (has_cc() || is_false.is_linked()) {
+      // The left-hand side is either (a) partially compiled to
+      // control flow with a final branch left to emit or (b) fully
+      // compiled to control flow and possibly false.
+      if (has_cc()) {
+        Branch(true, true_target());
+      }
+      is_false.Bind();
+      LoadConditionAndSpill(node->right(),
+                            NOT_INSIDE_TYPEOF,
+                            true_target(),
+                            false_target(),
+                            false);
+    } else {
+      // Nothing to do.
+      ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
     }
 
   } else {
@@ -3989,7 +4002,8 @@
     }
     frame_->EmitPush(r0);
   }
-  ASSERT((has_cc() && frame_->height() == original_height) ||
+  ASSERT(!has_valid_frame() ||
+         (has_cc() && frame_->height() == original_height) ||
          (!has_cc() && frame_->height() == original_height + 1));
 }
 
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 2ca74a9..82a2bec 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -77,6 +77,13 @@
   __ cmp(r3, Operand(JS_GLOBAL_PROXY_TYPE));
   __ b(eq, miss);
 
+  // Possible work-around for http://crbug.com/16276.
+  // See also: http://codereview.chromium.org/155418.
+  __ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ b(eq, miss);
+  __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ b(eq, miss);
+
   // Check that the properties array is a dictionary.
   __ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
   __ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset));
@@ -84,14 +91,14 @@
   __ b(ne, miss);
 
   // Compute the capacity mask.
-  const int kCapacityOffset =
-      Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize;
+  const int kCapacityOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
   __ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
   __ mov(r3, Operand(r3, ASR, kSmiTagSize));  // convert smi to int
   __ sub(r3, r3, Operand(1));
 
-  const int kElementsStartOffset =
-      Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize;
+  const int kElementsStartOffset = StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
 
   // Generate an unrolled loop that performs a few probes before
   // giving up. Measurements done on Gmail indicate that 2 probes
@@ -592,7 +599,7 @@
 
   // Fast case: Do the load.
   __ bind(&fast);
-  __ add(r3, r1, Operand(Array::kHeaderSize - kHeapObjectTag));
+  __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
   __ cmp(r0, Operand(Factory::the_hole_value()));
   // In case the loaded value is the_hole we have to consult GetProperty
@@ -659,9 +666,9 @@
   // Untag the key (for checking against untagged length in the fixed array).
   __ mov(r1, Operand(r1, ASR, kSmiTagSize));
   // Compute address to store into and check array bounds.
-  __ add(r2, r3, Operand(Array::kHeaderSize - kHeapObjectTag));
+  __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
-  __ ldr(ip, FieldMemOperand(r3, Array::kLengthOffset));
+  __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset));
   __ cmp(r1, Operand(ip));
   __ b(lo, &fast);
 
@@ -689,7 +696,7 @@
   __ mov(r3, Operand(r2));
   // NOTE: Computing the address to store into must take the fact
   // that the key has been incremented into account.
-  int displacement = Array::kHeaderSize - kHeapObjectTag -
+  int displacement = FixedArray::kHeaderSize - kHeapObjectTag -
       ((1 << kSmiTagSize) * 2);
   __ add(r2, r2, Operand(displacement));
   __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
@@ -714,7 +721,7 @@
   __ cmp(r1, Operand(ip));
   __ b(hs, &extra);
   __ mov(r3, Operand(r2));
-  __ add(r2, r2, Operand(Array::kHeaderSize - kHeapObjectTag));
+  __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
   __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
 
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 47e2749..875c91e 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -226,7 +226,7 @@
   // Add the page header (including remembered set), array header, and array
   // body size to the page address.
   add(object, object, Operand(Page::kObjectStartOffset
-                              + Array::kHeaderSize));
+                              + FixedArray::kHeaderSize));
   add(object, object, Operand(scratch));
 
   bind(&fast);
diff --git a/src/arm/register-allocator-arm-inl.h b/src/arm/register-allocator-arm-inl.h
index d98818f..4691f29 100644
--- a/src/arm/register-allocator-arm-inl.h
+++ b/src/arm/register-allocator-arm-inl.h
@@ -60,7 +60,7 @@
 
 int RegisterAllocator::ToNumber(Register reg) {
   ASSERT(reg.is_valid() && !IsReserved(reg));
-  static int numbers[] = {
+  const int kNumbers[] = {
     0,   // r0
     1,   // r1
     2,   // r2
@@ -78,15 +78,15 @@
     11,  // lr
     -1   // pc
   };
-  return numbers[reg.code()];
+  return kNumbers[reg.code()];
 }
 
 
 Register RegisterAllocator::ToRegister(int num) {
   ASSERT(num >= 0 && num < kNumRegisters);
-  static Register registers[] =
+  const Register kRegisters[] =
       { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
-  return registers[num];
+  return kRegisters[num];
 }
 
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 6d9ace8..d6650c9 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -164,7 +164,7 @@
     __ ldr(dst, FieldMemOperand(src, offset));
   } else {
     // Calculate the offset into the properties array.
-    int offset = index * kPointerSize + Array::kHeaderSize;
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
     __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
     __ ldr(dst, FieldMemOperand(dst, offset));
   }
@@ -330,7 +330,7 @@
     __ RecordWrite(receiver_reg, name_reg, scratch);
   } else {
     // Write to the properties array.
-    int offset = index * kPointerSize + Array::kHeaderSize;
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
     // Get the properties array
     __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
     __ str(r0, FieldMemOperand(scratch, offset));
@@ -1121,8 +1121,6 @@
 }
 
 
-// TODO(1224671): IC stubs for keyed loads have not been implemented
-// for ARM.
 Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
                                                 JSObject* receiver,
                                                 JSObject* holder,
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index ad5396e..a2c4562 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -47,14 +47,10 @@
 // generate an index for each native JS file.
 class SourceCodeCache BASE_EMBEDDED {
  public:
-  explicit SourceCodeCache(Script::Type type): type_(type) { }
+  explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
 
   void Initialize(bool create_heap_objects) {
-    if (create_heap_objects) {
-      cache_ = Heap::empty_fixed_array();
-    } else {
-      cache_ = NULL;
-    }
+    cache_ = create_heap_objects ? Heap::empty_fixed_array() : NULL;
   }
 
   void Iterate(ObjectVisitor* v) {
@@ -1107,12 +1103,6 @@
     global_context()->set_empty_script(*script);
   }
 
-#ifdef V8_HOST_ARCH_64_BIT
-  // TODO(X64): Remove this when inline caches work.
-  FLAG_use_ic = false;
-#endif  // V8_HOST_ARCH_64_BIT
-
-
   if (FLAG_natives_file == NULL) {
     // Without natives file, install default natives.
     for (int i = Natives::GetDelayCount();
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 0951af1..ec5b39c 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -37,10 +37,17 @@
 static const int kSubCacheCount = 4;
 
 // The number of generations for each sub cache.
+#if defined(ANDROID)
+static const int kScriptGenerations = 1;
+static const int kEvalGlobalGenerations = 1;
+static const int kEvalContextualGenerations = 1;
+static const int kRegExpGenerations = 1;
+#else
 static const int kScriptGenerations = 5;
 static const int kEvalGlobalGenerations = 2;
 static const int kEvalContextualGenerations = 2;
 static const int kRegExpGenerations = 2;
+#endif
 
 // Initial of each compilation cache table allocated.
 static const int kInitialCacheSize = 64;
@@ -56,6 +63,8 @@
     tables_ = NewArray<Object*>(generations);
   }
 
+  ~CompilationSubCache() { DeleteArray(tables_); }
+
   // Get the compilation cache tables for a specific generation.
   Handle<CompilationCacheTable> GetTable(int generation);
 
diff --git a/src/debug.cc b/src/debug.cc
index 52be930..64f98c7 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -334,8 +334,11 @@
       rinfo()->set_target_address(stub->entry());
     }
   } else {
-    // Step in through constructs call requires no changes to the running code.
-    ASSERT(RelocInfo::IsConstructCall(rmode()));
+    // Step in through construct call requires no changes to the running code.
+    // Step in through getters/setters should already be prepared as well
+    // because caller of this function (Debug::PrepareStep) is expected to
+    // flood the top frame's function with one shot breakpoints.
+    ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub());
   }
 }
 
@@ -1087,10 +1090,18 @@
 
   // Compute whether or not the target is a call target.
   bool is_call_target = false;
+  bool is_load_or_store = false;
+  bool is_inline_cache_stub = false;
   if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
     Address target = it.rinfo()->target_address();
     Code* code = Code::GetCodeFromTargetAddress(target);
-    if (code->is_call_stub()) is_call_target = true;
+    if (code->is_call_stub()) {
+      is_call_target = true;
+    }
+    if (code->is_inline_cache_stub()) {
+      is_inline_cache_stub = true;
+      is_load_or_store = !is_call_target;
+    }
   }
 
   // If this is the last break code target step out is the only possibility.
@@ -1103,8 +1114,8 @@
       JSFunction* function = JSFunction::cast(frames_it.frame()->function());
       FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
     }
-  } else if (!(is_call_target || RelocInfo::IsConstructCall(it.rmode())) ||
-             step_action == StepNext || step_action == StepMin) {
+  } else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode()))
+             || step_action == StepNext || step_action == StepMin) {
     // Step next or step min.
 
     // Fill the current function with one-shot break points.
@@ -1117,9 +1128,20 @@
   } else {
     // Fill the current function with one-shot break points even for step in on
     // a call target as the function called might be a native function for
-    // which step in will not stop.
+    // which step in will not stop. It also prepares for stepping in
+    // getters/setters.
     FloodWithOneShot(shared);
 
+    if (is_load_or_store) {
+      // Remember source position and frame to handle step in getter/setter. If
+      // there is a custom getter/setter it will be handled in
+      // Object::Get/SetPropertyWithCallback, otherwise the step action will be
+      // propagated on the next Debug::Break.
+      thread_local_.last_statement_position_ =
+          debug_info->code()->SourceStatementPosition(frame->pc());
+      thread_local_.last_fp_ = frame->fp();
+    }
+
     // Step in or Step in min
     it.PrepareStepIn();
     ActivateStepIn(frame);
diff --git a/src/execution.cc b/src/execution.cc
index adc1872..7d163cb 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -164,19 +164,16 @@
   // If you return a function from here, it will be called when an
   // attempt is made to call the given object as a function.
 
-  // The regular expression code here is really meant more as an
-  // example than anything else. KJS does not support calling regular
-  // expressions as functions, but SpiderMonkey does.
-  if (FLAG_call_regexp) {
-    bool is_regexp =
-        object->IsHeapObject() &&
-        (HeapObject::cast(*object)->map()->constructor() ==
-         *Top::regexp_function());
+  // Regular expressions can be called as functions in both Firefox
+  // and Safari so we allow it too.
+  bool is_regexp =
+      object->IsHeapObject() &&
+      (HeapObject::cast(*object)->map()->constructor() ==
+       *Top::regexp_function());
 
-    if (is_regexp) {
-      Handle<String> exec = Factory::exec_symbol();
-      return Handle<Object>(object->GetProperty(*exec));
-    }
+  if (is_regexp) {
+    Handle<String> exec = Factory::exec_symbol();
+    return Handle<Object>(object->GetProperty(*exec));
   }
 
   // Objects created through the API can have an instance-call handler
@@ -590,6 +587,23 @@
     return Heap::undefined_value();
   }
 
+  {
+    JavaScriptFrameIterator it;
+    ASSERT(!it.done());
+    Object* fun = it.frame()->function();
+    if (fun && fun->IsJSFunction()) {
+      GlobalObject* global = JSFunction::cast(fun)->context()->global();
+      // Don't stop in builtin functions.
+      if (global == Top::context()->builtins()) {
+       return Heap::undefined_value();
+      }
+      // Don't stop in debugger functions.
+      if (Debug::IsDebugGlobal(global)) {
+       return Heap::undefined_value();
+      }
+    }
+  }
+
   // Collect the break state before clearing the flags.
   bool debug_command_only =
       StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak();
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 9c696ed..b0770b0 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -144,9 +144,6 @@
             "automatically set the debug break flag when debugger commands are "
             "in the queue (experimental)")
 
-// execution.cc
-DEFINE_bool(call_regexp, false, "allow calls to RegExp objects")
-
 // frames.cc
 DEFINE_int(max_stack_trace_source_length, 300,
            "maximum length of function source code printed in a stack trace.")
@@ -158,6 +155,8 @@
 DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
 DEFINE_bool(trace_gc, false,
             "print one trace line following each garbage collection")
+DEFINE_bool(trace_gc_verbose, false,
+            "print more details following each garbage collection")
 DEFINE_bool(collect_maps, true,
             "garbage collect maps from which no objects can be reached")
 
diff --git a/src/frame-element.cc b/src/frame-element.cc
new file mode 100644
index 0000000..e6bc2ea
--- /dev/null
+++ b/src/frame-element.cc
@@ -0,0 +1,45 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frame-element.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// FrameElement implementation.
+
+
+FrameElement::ZoneObjectList* FrameElement::ConstantList() {
+  static ZoneObjectList list(10);
+  return &list;
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/frame-element.h b/src/frame-element.h
index 666aabb..ccdecf1 100644
--- a/src/frame-element.h
+++ b/src/frame-element.h
@@ -91,10 +91,7 @@
   // this table of handles to the actual constants.
   typedef ZoneList<Handle<Object> > ZoneObjectList;
 
-  static ZoneObjectList* ConstantList() {
-    static ZoneObjectList list(10);
-    return &list;
-  }
+  static ZoneObjectList* ConstantList();
 
   // Clear the constants indirection table.
   static void ClearConstantList() {
diff --git a/src/hashmap.cc b/src/hashmap.cc
index b717312..3c4e5cd 100644
--- a/src/hashmap.cc
+++ b/src/hashmap.cc
@@ -194,7 +194,10 @@
 void HashMap::Initialize(uint32_t capacity) {
   ASSERT(IsPowerOf2(capacity));
   map_ = reinterpret_cast<Entry*>(allocator_->New(capacity * sizeof(Entry)));
-  if (map_ == NULL) V8::FatalProcessOutOfMemory("HashMap::Initialize");
+  if (map_ == NULL) {
+    V8::FatalProcessOutOfMemory("HashMap::Initialize");
+    return;
+  }
   capacity_ = capacity;
   Clear();
 }
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 36c6f4b..d27f14f 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -228,6 +228,31 @@
 }
 
 
+int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
+  ASSERT(HasBeenSetup());
+  int amount = amount_of_external_allocated_memory_ + change_in_bytes;
+  if (change_in_bytes >= 0) {
+    // Avoid overflow.
+    if (amount > amount_of_external_allocated_memory_) {
+      amount_of_external_allocated_memory_ = amount;
+    }
+    int amount_since_last_global_gc =
+        amount_of_external_allocated_memory_ -
+        amount_of_external_allocated_memory_at_last_global_gc_;
+    if (amount_since_last_global_gc > external_allocation_limit_) {
+      CollectAllGarbage();
+    }
+  } else {
+    // Avoid underflow.
+    if (amount >= 0) {
+      amount_of_external_allocated_memory_ = amount;
+    }
+  }
+  ASSERT(amount_of_external_allocated_memory_ >= 0);
+  return amount_of_external_allocated_memory_;
+}
+
+
 void Heap::SetLastScriptId(Object* last_script_id) {
   roots_[kLastScriptIdRootIndex] = last_script_id;
 }
diff --git a/src/heap.cc b/src/heap.cc
index 0af3d90..213eec5 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -69,7 +69,7 @@
 
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
-#if V8_TARGET_ARCH_ARM
+#if defined(ANDROID)
 int Heap::semispace_size_  = 512*KB;
 int Heap::old_generation_size_ = 128*MB;
 int Heap::initial_semispace_size_ = 128*KB;
@@ -85,8 +85,8 @@
 // Variables set based on semispace_size_ and old_generation_size_ in
 // ConfigureHeap.
 int Heap::young_generation_size_ = 0;  // Will be 2 * semispace_size_.
-
 int Heap::survived_since_last_expansion_ = 0;
+int Heap::external_allocation_limit_ = 0;
 
 Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
 
@@ -207,6 +207,27 @@
 }
 
 
+#if defined(ENABLE_LOGGING_AND_PROFILING)
+void Heap::PrintShortHeapStatistics() {
+  if (!FLAG_trace_gc_verbose) return;
+  PrintF("Memory allocator,   used: %8d, available: %8d\n",
+         MemoryAllocator::Size(), MemoryAllocator::Available());
+  PrintF("New space,          used: %8d, available: %8d\n",
+         Heap::new_space_.Size(), new_space_.Available());
+  PrintF("Old pointers,       used: %8d, available: %8d\n",
+         old_pointer_space_->Size(), old_pointer_space_->Available());
+  PrintF("Old data space,     used: %8d, available: %8d\n",
+         old_data_space_->Size(), old_data_space_->Available());
+  PrintF("Code space,         used: %8d, available: %8d\n",
+         code_space_->Size(), code_space_->Available());
+  PrintF("Map space,          used: %8d, available: %8d\n",
+         map_space_->Size(), map_space_->Available());
+  PrintF("Large object space, used: %8d, avaialble: %8d\n",
+         lo_space_->Size(), lo_space_->Available());
+}
+#endif
+
+
 // TODO(1238405): Combine the infrastructure for --heap-stats and
 // --log-gc to avoid the complicated preprocessor and flag testing.
 void Heap::ReportStatisticsAfterGC() {
@@ -1166,7 +1187,7 @@
   set_undetectable_long_ascii_string_map(Map::cast(obj));
   Map::cast(obj)->set_is_undetectable();
 
-  obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kAlignedSize);
+  obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
   if (obj->IsFailure()) return false;
   set_byte_array_map(Map::cast(obj));
 
@@ -2967,6 +2988,7 @@
   semispace_size_ = RoundUpToPowerOf2(semispace_size_);
   initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_);
   young_generation_size_ = 2 * semispace_size_;
+  external_allocation_limit_ = 10 * semispace_size_;
 
   // The old generation is paged.
   old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize);
@@ -3385,6 +3407,8 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
 void HeapProfiler::WriteSample() {
   LOG(HeapSampleBeginEvent("Heap", "allocated"));
+  LOG(HeapSampleStats(
+      "Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects()));
 
   HistogramInfo info[LAST_TYPE+1];
 #define DEF_TYPE_NAME(name) info[name].set_name(#name);
@@ -3620,6 +3644,10 @@
          CollectorString(),
          start_size_, SizeOfHeapObjects(),
          static_cast<int>(OS::TimeCurrentMillis() - start_time_));
+
+#if defined(ENABLE_LOGGING_AND_PROFILING)
+  Heap::PrintShortHeapStatistics();
+#endif
 }
 
 
diff --git a/src/heap.h b/src/heap.h
index f395988..4e2c64c 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -733,6 +733,11 @@
   static void ZapFromSpace();
 #endif
 
+#if defined(ENABLE_LOGGING_AND_PROFILING)
+  // Print short heap statistics.
+  static void PrintShortHeapStatistics();
+#endif
+
   // Makes a new symbol object
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
@@ -741,7 +746,7 @@
   static Object* CreateSymbol(String* str);
 
   // Write barrier support for address[offset] = o.
-  inline static void RecordWrite(Address address, int offset);
+  static inline void RecordWrite(Address address, int offset);
 
   // Given an address occupied by a live code object, return that object.
   static Object* FindCodeObject(Address a);
@@ -797,22 +802,7 @@
 
   // Adjusts the amount of registered external memory.
   // Returns the adjusted value.
-  static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
-    int amount = amount_of_external_allocated_memory_ + change_in_bytes;
-    if (change_in_bytes >= 0) {
-      // Avoid overflow.
-      if (amount > amount_of_external_allocated_memory_) {
-        amount_of_external_allocated_memory_ = amount;
-      }
-    } else {
-      // Avoid underflow.
-      if (amount >= 0) {
-        amount_of_external_allocated_memory_ = amount;
-      }
-    }
-    ASSERT(amount_of_external_allocated_memory_ >= 0);
-    return amount_of_external_allocated_memory_;
-  }
+  static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
 
   // Allocate unitialized fixed array (pretenure == NON_TENURE).
   static Object* AllocateRawFixedArray(int length);
@@ -896,6 +886,10 @@
   // every allocation in large object space.
   static int old_gen_allocation_limit_;
 
+  // Limit on the amount of externally allocated memory allowed
+  // between global GCs. If reached a global GC is forced.
+  static int external_allocation_limit_;
+
   // The amount of external memory registered through the API kept alive
   // by global handles
   static int amount_of_external_allocated_memory_;
@@ -1225,7 +1219,7 @@
   // Clear the cache.
   static void Clear();
  private:
-  inline static int Hash(Map* map, String* name);
+  static inline int Hash(Map* map, String* name);
   static const int kLength = 64;
   struct Key {
     Map* map;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 6d1dc2d..457b22f 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -3857,7 +3857,7 @@
     s = s->outer_scope();
   }
 
-  if (s->is_eval_scope()) {
+  if (s != NULL && s->is_eval_scope()) {
     // Loop up the context chain.  There is no frame effect so it is
     // safe to use raw labels here.
     Label next, fast;
@@ -4351,7 +4351,7 @@
            FieldOperand(elements.reg(), JSObject::kElementsOffset));
 
     // Write to the indexed properties array.
-    int offset = i * kPointerSize + Array::kHeaderSize;
+    int offset = i * kPointerSize + FixedArray::kHeaderSize;
     __ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
 
     // Update the write barrier for the array address.
@@ -5388,12 +5388,6 @@
   } else {
     Load(node->expression());
     switch (op) {
-      case Token::NOT:
-      case Token::DELETE:
-      case Token::TYPEOF:
-        UNREACHABLE();  // handled above
-        break;
-
       case Token::SUB: {
         bool overwrite =
             (node->AsBinaryOperation() != NULL &&
@@ -5448,6 +5442,8 @@
       }
 
       default:
+        // NOT, DELETE, TYPEOF, and VOID are handled outside the
+        // switch.
         UNREACHABLE();
     }
   }
@@ -6309,7 +6305,7 @@
         __ mov(index.reg(), key.reg());
         __ sar(index.reg(), kSmiTagSize);
         __ cmp(index.reg(),
-               FieldOperand(elements.reg(), Array::kLengthOffset));
+               FieldOperand(elements.reg(), FixedArray::kLengthOffset));
         deferred->Branch(above_equal);
 
         // Load and check that the result is not the hole.  We could
@@ -6323,7 +6319,7 @@
         __ mov(value.reg(), Operand(elements.reg(),
                                     index.reg(),
                                     times_4,
-                                    Array::kHeaderSize - kHeapObjectTag));
+                                    FixedArray::kHeaderSize - kHeapObjectTag));
         elements.Unuse();
         index.Unuse();
         __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
@@ -6495,7 +6491,7 @@
         __ mov(Operand(tmp.reg(),
                        key.reg(),
                        times_2,
-                       Array::kHeaderSize - kHeapObjectTag),
+                       FixedArray::kHeaderSize - kHeapObjectTag),
                value.reg());
         __ IncrementCounter(&Counters::keyed_store_inline, 1);
 
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 90e0fd1..f7d0797 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -43,6 +43,10 @@
 
 
 // Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if the receiver has fast properties,
+// or if name is not a symbol, and will jump to the miss_label in that case.
 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
                                    Register r0, Register r1, Register r2,
                                    Register name) {
@@ -56,7 +60,7 @@
   //
   // r2   - used to hold the capacity of the property dictionary.
   //
-  // name - holds the name of the property and is unchanges.
+  // name - holds the name of the property and is unchanged.
 
   Label done;
 
@@ -89,7 +93,8 @@
 
   // Compute the capacity mask.
   const int kCapacityOffset =
-      Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize;
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
   __ mov(r2, FieldOperand(r0, kCapacityOffset));
   __ shr(r2, kSmiTagSize);  // convert smi to int
   __ dec(r2);
@@ -99,7 +104,8 @@
   // cover ~93% of loads from dictionaries.
   static const int kProbes = 4;
   const int kElementsStartOffset =
-      Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize;
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
   for (int i = 0; i < kProbes; i++) {
     // Compute the masked index: (hash + i + i * i) & mask.
     __ mov(r1, FieldOperand(name, String::kLengthOffset));
@@ -153,6 +159,9 @@
 }
 
 
+// The offset from the inlined patch site to the start of the
+// inlined load instruction.  It is 7 bytes (test eax, imm) plus
+// 6 bytes (jne slow_label).
 const int LoadIC::kOffsetToLoadInstruction = 13;
 
 
@@ -228,8 +237,8 @@
   Label slow, fast, check_string, index_int, index_string;
 
   // Load name and receiver.
-  __ mov(eax, (Operand(esp, kPointerSize)));
-  __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
 
   // Check that the object isn't a smi.
   __ test(ecx, Immediate(kSmiTagMask));
@@ -263,21 +272,28 @@
          Immediate(Factory::hash_table_map()));
   __ j(equal, &slow, not_taken);
   // Check that the key (index) is within bounds.
-  __ cmp(eax, FieldOperand(ecx, Array::kLengthOffset));
+  __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
   __ j(below, &fast, taken);
   // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
   KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
-  // Check if the key is a symbol that is not an array index.
+
   __ bind(&check_string);
+  // The key is not a smi.
+  // Is it a string?
+  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+  __ j(above_equal, &slow);
+  // Is the string an array index, with cached numeric value?
   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
   __ test(ebx, Immediate(String::kIsArrayIndexMask));
   __ j(not_zero, &index_string, not_taken);
-  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+  // If the string is a symbol, do a quick inline probe of the receiver's
+  // dictionary, if it exists.
+  __ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
   __ test(ebx, Immediate(kIsSymbolMask));
-  __ j(not_zero, &slow, not_taken);
+  __ j(zero, &slow, not_taken);
   // Probe the dictionary leaving result in ecx.
   GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
   GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
@@ -301,7 +317,8 @@
   __ jmp(&index_int);
   // Fast case: Do the load.
   __ bind(&fast);
-  __ mov(eax, Operand(ecx, eax, times_4, Array::kHeaderSize - kHeapObjectTag));
+  __ mov(eax,
+         Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
   __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
   // In case the loaded value is the_hole we have to consult GetProperty
   // to ensure the prototype chain is searched.
@@ -419,7 +436,8 @@
   // eax: value
   // ecx: FixedArray
   // ebx: index (as a smi)
-  __ mov(Operand(ecx, ebx, times_2, Array::kHeaderSize - kHeapObjectTag), eax);
+  __ mov(Operand(ecx, ebx, times_2, FixedArray::kHeaderSize - kHeapObjectTag),
+         eax);
   // Update write barrier for the elements array address.
   __ mov(edx, Operand(eax));
   __ RecordWrite(ecx, 0, edx, ebx);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 479b8ca..fae1525 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -79,7 +79,7 @@
   // Add the page header, array header, and array body size to the page
   // address.
   masm->add(Operand(object), Immediate(Page::kObjectStartOffset
-                                       + Array::kHeaderSize));
+                                       + FixedArray::kHeaderSize));
   masm->add(object, Operand(scratch));
 
 
@@ -199,9 +199,10 @@
       lea(dst, Operand(object, offset));
     } else {
       // array access: calculate the destination address in the same manner as
-      // KeyedStoreIC::GenerateGeneric
-      lea(dst,
-          Operand(object, dst, times_2, Array::kHeaderSize - kHeapObjectTag));
+      // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 2 to get an offset
+      // into an array of words.
+      lea(dst, Operand(object, dst, times_2,
+                       FixedArray::kHeaderSize - kHeapObjectTag));
     }
     // If we are already generating a shared stub, not inlining the
     // record write code isn't going to save us any memory.
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 04a5390..2129fd1 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -634,11 +634,9 @@
   __ push(Immediate(0));  // Make room for "input start - 1" constant.
 
   // Check if we have space on the stack for registers.
-  Label retry_stack_check;
   Label stack_limit_hit;
   Label stack_ok;
 
-  __ bind(&retry_stack_check);
   ExternalReference stack_guard_limit =
       ExternalReference::address_of_stack_guard_limit();
   __ mov(ecx, esp);
@@ -658,10 +656,7 @@
   CallCheckStackGuardState(ebx);
   __ or_(eax, Operand(eax));
   // If returned value is non-zero, we exit with the returned value as result.
-  // Otherwise it was a preemption and we just check the limit again.
-  __ j(equal, &retry_stack_check);
-  // Return value was non-zero. Exit with exception or retry.
-  __ jmp(&exit_label_);
+  __ j(not_zero, &exit_label_);
 
   __ bind(&stack_ok);
 
@@ -762,19 +757,11 @@
     __ push(backtrack_stackpointer());
     __ push(edi);
 
-    Label retry;
-
-    __ bind(&retry);
     CallCheckStackGuardState(ebx);
     __ or_(eax, Operand(eax));
     // If returning non-zero, we should end execution with the given
     // result as return value.
     __ j(not_zero, &exit_label_);
-    // Check if we are still preempted.
-    ExternalReference stack_guard_limit =
-        ExternalReference::address_of_stack_guard_limit();
-    __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
-    __ j(below_equal, &retry);
 
     __ pop(edi);
     __ pop(backtrack_stackpointer());
@@ -1073,10 +1060,12 @@
     unibrow::uchar c1 = substring1[i];
     unibrow::uchar c2 = substring2[i];
     if (c1 != c2) {
-      canonicalize.get(c1, '\0', &c1);
-      if (c1 != c2) {
-        canonicalize.get(c2, '\0', &c2);
-        if (c1 != c2) {
+      unibrow::uchar s1[1] = { c1 };
+      canonicalize.get(c1, '\0', s1);
+      if (s1[0] != c2) {
+        unibrow::uchar s2[1] = { c2 };
+        canonicalize.get(c2, '\0', s2);
+        if (s1[0] != s2[0]) {
           return 0;
         }
       }
diff --git a/src/ia32/register-allocator-ia32-inl.h b/src/ia32/register-allocator-ia32-inl.h
index ddee472..99ae6eb 100644
--- a/src/ia32/register-allocator-ia32-inl.h
+++ b/src/ia32/register-allocator-ia32-inl.h
@@ -49,7 +49,7 @@
 
 int RegisterAllocator::ToNumber(Register reg) {
   ASSERT(reg.is_valid() && !IsReserved(reg));
-  static int numbers[] = {
+  const int kNumbers[] = {
     0,   // eax
     2,   // ecx
     3,   // edx
@@ -59,14 +59,14 @@
     -1,  // esi
     4    // edi
   };
-  return numbers[reg.code()];
+  return kNumbers[reg.code()];
 }
 
 
 Register RegisterAllocator::ToRegister(int num) {
   ASSERT(num >= 0 && num < kNumRegisters);
-  static Register registers[] = { eax, ebx, ecx, edx, edi };
-  return registers[num];
+  const Register kRegisters[] = { eax, ebx, ecx, edx, edi };
+  return kRegisters[num];
 }
 
 
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index 0a887d5..4f5b3e0 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -266,15 +266,13 @@
     __ mov(dst, FieldOperand(src, offset));
   } else {
     // Calculate the offset into the properties array.
-    int offset = index * kPointerSize + Array::kHeaderSize;
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
     __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
     __ mov(dst, FieldOperand(dst, offset));
   }
 }
 
 
-
-
 void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
   ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
   Code* code = NULL;
@@ -349,7 +347,7 @@
     __ RecordWrite(receiver_reg, offset, name_reg, scratch);
   } else {
     // Write to the properties array.
-    int offset = index * kPointerSize + Array::kHeaderSize;
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
     // Get the properties array (optimistically).
     __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
     __ mov(FieldOperand(scratch, offset), eax);
@@ -1012,7 +1010,7 @@
   __ IncrementCounter(&Counters::named_store_global_inline, 1);
 
   // Check that the map of the global has not changed.
-  __ mov(ebx, (Operand(esp, kPointerSize)));
+  __ mov(ebx, Operand(esp, kPointerSize));
   __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
          Immediate(Handle<Map>(object->map())));
   __ j(not_equal, &miss, not_taken);
@@ -1091,7 +1089,7 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
   GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1112,7 +1110,7 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
   GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
                        callback, name, &miss);
   __ bind(&miss);
@@ -1134,7 +1132,7 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
   GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
   __ bind(&miss);
   GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1154,7 +1152,7 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
   // TODO(368): Compile in the whole chain: all the interceptors in
   // prototypes and ultimate answer.
   GenerateLoadInterceptor(receiver,
@@ -1190,7 +1188,7 @@
   __ IncrementCounter(&Counters::named_load_global_inline, 1);
 
   // Get the receiver from the stack.
-  __ mov(eax, (Operand(esp, kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
 
   // If the object is the holder then we know that it's a global
   // object which can only happen for contextual loads. In this case,
@@ -1239,8 +1237,8 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
-  __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_field, 1);
 
   // Check that the name has not changed.
@@ -1269,8 +1267,8 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
-  __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_callback, 1);
 
   // Check that the name has not changed.
@@ -1299,8 +1297,8 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
-  __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
 
   // Check that the name has not changed.
@@ -1328,8 +1326,8 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
-  __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
 
   // Check that the name has not changed.
@@ -1364,8 +1362,8 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
-  __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_array_length, 1);
 
   // Check that the name has not changed.
@@ -1390,8 +1388,8 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
-  __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_string_length, 1);
 
   // Check that the name has not changed.
@@ -1416,8 +1414,8 @@
   // -----------------------------------
   Label miss;
 
-  __ mov(eax, (Operand(esp, kPointerSize)));
-  __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
   __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
 
   // Check that the name has not changed.
diff --git a/src/ic.cc b/src/ic.cc
index 7e82295..090d7a3 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -273,28 +273,39 @@
 static void LookupForRead(Object* object,
                           String* name,
                           LookupResult* lookup) {
-  object->Lookup(name, lookup);
-  if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR) {
-    return;
-  }
+  AssertNoAllocation no_gc;  // pointers must stay valid
 
-  JSObject* holder = lookup->holder();
-  if (HasInterceptorGetter(holder)) {
-    return;
-  }
+  // Skip all the objects with named interceptors, but
+  // without actual getter.
+  while (true) {
+    object->Lookup(name, lookup);
+    // Besides normal conditions (property not found or it's not
+    // an interceptor), bail out of lookup is not cacheable: we won't
+    // be able to IC it anyway and regular lookup should work fine.
+    if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR ||
+        !lookup->IsCacheable()) {
+      return;
+    }
 
-  // There is no getter, just skip it and lookup down the proto chain
-  holder->LocalLookupRealNamedProperty(name, lookup);
-  if (lookup->IsValid()) {
-    return;
-  }
+    JSObject* holder = lookup->holder();
+    if (HasInterceptorGetter(holder)) {
+      return;
+    }
 
-  Object* proto = holder->GetPrototype();
-  if (proto == Heap::null_value()) {
-    return;
-  }
+    holder->LocalLookupRealNamedProperty(name, lookup);
+    if (lookup->IsValid()) {
+      ASSERT(lookup->type() != INTERCEPTOR);
+      return;
+    }
 
-  LookupForRead(proto, name, lookup);
+    Object* proto = holder->GetPrototype();
+    if (proto->IsNull()) {
+      lookup->NotFound();
+      return;
+    }
+
+    object = proto;
+  }
 }
 
 
@@ -726,7 +737,9 @@
       return TypeError("non_object_property_load", object, name);
     }
 
-    if (FLAG_use_ic) {
+    // TODO(X64): Enable specialized stubs for length and prototype lookup.
+#ifndef V8_TARGET_ARCH_X64
+    if (false && FLAG_use_ic) {
       // Use specialized code for getting the length of strings.
       if (object->IsString() && name->Equals(Heap::length_symbol())) {
         Handle<String> string = Handle<String>::cast(object);
@@ -736,7 +749,7 @@
         set_target(Code::cast(code));
 #ifdef DEBUG
         TraceIC("KeyedLoadIC", name, state, target());
-#endif
+#endif  // DEBUG
         return Smi::FromInt(string->length());
       }
 
@@ -748,7 +761,7 @@
         set_target(Code::cast(code));
 #ifdef DEBUG
         TraceIC("KeyedLoadIC", name, state, target());
-#endif
+#endif  // DEBUG
         return JSArray::cast(*object)->length();
       }
 
@@ -761,10 +774,11 @@
         set_target(Code::cast(code));
 #ifdef DEBUG
         TraceIC("KeyedLoadIC", name, state, target());
-#endif
+#endif  // DEBUG
         return Accessors::FunctionGetPrototype(*object, 0);
       }
     }
+#endif  // !V8_TARGET_ARCH_X64
 
     // Check if the name is trivially convertible to an index and get
     // the element or char if so.
@@ -787,10 +801,13 @@
       }
     }
 
+    // TODO(X64): Enable inline caching for load.
+#ifndef V8_TARGET_ARCH_X64
     // Update the inline cache.
     if (FLAG_use_ic && lookup.IsLoaded()) {
       UpdateCaches(&lookup, state, object, name);
     }
+#endif
 
     PropertyAttributes attr;
     if (lookup.IsValid() && lookup.type() == INTERCEPTOR) {
@@ -961,6 +978,10 @@
     return *value;
   }
 
+  // TODO(X64): Enable inline cache for StoreIC.
+#ifdef V8_TARGET_ARCH_X64
+  USE(&LookupForWrite);  // The compiler complains otherwise.
+#else
   // Lookup the property locally in the receiver.
   if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
     LookupResult lookup;
@@ -968,6 +989,7 @@
       UpdateCaches(&lookup, state, receiver, name, value);
     }
   }
+#endif
 
   // Set the property.
   return receiver->SetProperty(*name, *value, NONE);
@@ -1086,10 +1108,13 @@
     LookupResult lookup;
     receiver->LocalLookup(*name, &lookup);
 
+    // TODO(X64): Enable inline cache for KeyedStoreIC.
+#ifndef V8_TARGET_ARCH_X64
     // Update inline cache and stub cache.
     if (FLAG_use_ic && lookup.IsLoaded()) {
       UpdateCaches(&lookup, state, receiver, name, value);
     }
+#endif
 
     // Set the property.
     return receiver->SetProperty(*name, *value, NONE);
@@ -1221,11 +1246,6 @@
 }
 
 
-void CallIC::GeneratePreMonomorphic(MacroAssembler* masm, int argc) {
-  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
-}
-
-
 void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
   Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
 }
diff --git a/src/ic.h b/src/ic.h
index 7d03377..593519b 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -49,7 +49,8 @@
   ICU(StoreInterceptorProperty)
 
 //
-// IC is the base class for LoadIC, StoreIC and CallIC.
+// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
+// and KeyedStoreIC.
 //
 class IC {
  public:
@@ -173,7 +174,6 @@
 
   // Code generator routines.
   static void GenerateInitialize(MacroAssembler* masm, int argc);
-  static void GeneratePreMonomorphic(MacroAssembler* masm, int argc);
   static void GenerateMiss(MacroAssembler* masm, int argc);
   static void GenerateMegamorphic(MacroAssembler* masm, int argc);
   static void GenerateNormal(MacroAssembler* masm, int argc);
@@ -219,8 +219,8 @@
   static void GenerateFunctionPrototype(MacroAssembler* masm);
 
   // The offset from the inlined patch site to the start of the
-  // inlined load instruction.  It is 7 bytes (test eax, imm) plus
-  // 6 bytes (jne slow_label).
+  // inlined load instruction.  It is architecture-dependent, and not
+  // used on ARM.
   static const int kOffsetToLoadInstruction;
 
  private:
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index 0a8ae8c..ae914d3 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -51,9 +51,11 @@
     unibrow::uchar old_char = subject[from++];
     unibrow::uchar new_char = subject[current++];
     if (old_char == new_char) continue;
-    interp_canonicalize.get(old_char, '\0', &old_char);
-    interp_canonicalize.get(new_char, '\0', &new_char);
-    if (old_char != new_char) {
+    unibrow::uchar old_string[1] = { old_char };
+    unibrow::uchar new_string[1] = { new_char };
+    interp_canonicalize.get(old_char, '\0', old_string);
+    interp_canonicalize.get(new_char, '\0', new_string);
+    if (old_string[0] != new_string[0]) {
       return false;
     }
   }
diff --git a/src/log.cc b/src/log.cc
index 2ca89dd..33cf8e2 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -843,7 +843,22 @@
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log_gc) return;
   LogMessageBuilder msg;
-  msg.Append("heap-sample-begin,\"%s\",\"%s\"\n", space, kind);
+  // Using non-relative system time in order to be able to synchronize with
+  // external memory profiling events (e.g. DOM memory size).
+  msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
+             space, kind, OS::TimeCurrentMillis());
+  msg.WriteToLogFile();
+#endif
+}
+
+
+void Logger::HeapSampleStats(const char* space, const char* kind,
+                             int capacity, int used) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
+  LogMessageBuilder msg;
+  msg.Append("heap-sample-stats,\"%s\",\"%s\",%d,%d\n",
+             space, kind, capacity, used);
   msg.WriteToLogFile();
 #endif
 }
diff --git a/src/log.h b/src/log.h
index f68234f..95c9cde 100644
--- a/src/log.h
+++ b/src/log.h
@@ -219,6 +219,8 @@
   static void HeapSampleBeginEvent(const char* space, const char* kind);
   static void HeapSampleEndEvent(const char* space, const char* kind);
   static void HeapSampleItemEvent(const char* type, int number, int bytes);
+  static void HeapSampleStats(const char* space, const char* kind,
+                              int capacity, int used);
 
   static void SharedLibraryEvent(const char* library_path,
                                  uintptr_t start,
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index d54f741..8c57afd 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -958,6 +958,7 @@
 }
 
 
+#ifdef ENABLE_DEBUGGER_SUPPORT
 void DebugInfo::DebugInfoVerify() {
   CHECK(IsDebugInfo());
   VerifyPointer(shared());
@@ -997,6 +998,7 @@
   PrintF("\n - break_point_objects: ");
   break_point_objects()->ShortPrint();
 }
+#endif
 
 
 void JSObject::IncrementSpillStatistics(SpillInformation* info) {
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 37c9b8b..7abc7c3 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -1075,7 +1075,12 @@
 
 
 int JSObject::GetHeaderSize() {
-  switch (map()->instance_type()) {
+  InstanceType type = map()->instance_type();
+  // Check for the most common kind of JavaScript object before
+  // falling into the generic switch. This speeds up the internal
+  // field operations considerably on average.
+  if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
+  switch (type) {
     case JS_GLOBAL_PROXY_TYPE:
       return JSGlobalProxy::kSize;
     case JS_GLOBAL_OBJECT_TYPE:
@@ -1090,7 +1095,6 @@
       return JSValue::kSize;
     case JS_REGEXP_TYPE:
       return JSValue::kSize;
-    case JS_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
       return JSObject::kHeaderSize;
     default:
diff --git a/src/objects.cc b/src/objects.cc
index a9004c9..72412c1 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -216,6 +216,12 @@
   HandleScope scope;
   Handle<JSFunction> fun(JSFunction::cast(getter));
   Handle<Object> self(receiver);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Handle stepping into a getter if step into is active.
+  if (Debug::StepInActive()) {
+    Debug::HandleStepIn(fun, Handle<Object>::null(), 0, false);
+  }
+#endif
   bool has_pending_exception;
   Handle<Object> result =
       Execution::Call(fun, self, 0, NULL, &has_pending_exception);
@@ -1624,6 +1630,12 @@
   Handle<Object> value_handle(value);
   Handle<JSFunction> fun(JSFunction::cast(setter));
   Handle<JSObject> self(this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Handle stepping into a setter if step into is active.
+  if (Debug::StepInActive()) {
+    Debug::HandleStepIn(fun, Handle<Object>::null(), 0, false);
+  }
+#endif
   bool has_pending_exception;
   Object** argv[] = { value_handle.location() };
   Execution::Call(fun, self, 1, argv, &has_pending_exception);
diff --git a/src/objects.h b/src/objects.h
index 5c76e4a..5e5eb6b 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1718,6 +1718,10 @@
 
   // Layout descriptor.
   static const int kLengthOffset = HeapObject::kHeaderSize;
+
+ protected:
+  // No code should use the Array class directly, only its subclasses.
+  // Use the kHeaderSize of the appropriate subclass, which may be aligned.
   static const int kHeaderSize = kLengthOffset + kIntSize;
   static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
 
@@ -2427,6 +2431,10 @@
   void ByteArrayVerify();
 #endif
 
+  // ByteArray headers are not quadword aligned.
+  static const int kHeaderSize = Array::kHeaderSize;
+  static const int kAlignedSize = Array::kAlignedSize;
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
 };
diff --git a/src/parser.cc b/src/parser.cc
index 89d6d5b..da2b286 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -834,12 +834,7 @@
     return new CallEval(expression, arguments, pos);
   }
 
-  virtual Statement* EmptyStatement() {
-    // Use a statically allocated empty statement singleton to avoid
-    // allocating lots and lots of empty statements.
-    static v8::internal::EmptyStatement empty;
-    return &empty;
-  }
+  virtual Statement* EmptyStatement();
 };
 
 
@@ -1032,6 +1027,14 @@
 }
 
 
+Statement* AstBuildingParserFactory::EmptyStatement() {
+  // Use a statically allocated empty statement singleton to avoid
+  // allocating lots and lots of empty statements.
+  static v8::internal::EmptyStatement empty;
+  return &empty;
+}
+
+
 Scope* ParserFactory::NewScope(Scope* parent, Scope::Type type,
                                bool inside_with) {
   ASSERT(parent != NULL);
@@ -2367,7 +2370,7 @@
       result = NEW(TryFinally(try_block, finally_block));
       // Add the jump targets of the try block and the catch block.
       for (int i = 0; i < collector.targets()->length(); i++) {
-        catch_collector.targets()->Add(collector.targets()->at(i));
+        catch_collector.AddTarget(collector.targets()->at(i));
       }
       result->set_escaping_targets(catch_collector.targets());
     }
@@ -3928,7 +3931,7 @@
     case '*':
     case '+':
     case '?':
-      ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
+      return ReportError(CStrVector("Nothing to repeat"));
     case '^': {
       Advance();
       if (multiline_) {
@@ -4003,7 +4006,7 @@
     case '\\':
       switch (Next()) {
       case kEndMarker:
-        ReportError(CStrVector("\\ at end of pattern") CHECK_FAILED);
+        return ReportError(CStrVector("\\ at end of pattern"));
       case 'b':
         Advance(2);
         builder->AddAssertion(
@@ -4490,7 +4493,7 @@
         return CharacterRange::Singleton(0);  // Return dummy value.
       }
       case kEndMarker:
-        ReportError(CStrVector("\\ at end of pattern") CHECK_FAILED);
+        return ReportError(CStrVector("\\ at end of pattern"));
       default:
         uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
         return CharacterRange::Singleton(c);
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 880931e..445f588 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -28,10 +28,11 @@
 // Platform specific code for MacOS goes here. For the POSIX comaptible parts
 // the implementation is in platform-posix.cc.
 
-#include <ucontext.h>
 #include <unistd.h>
 #include <sys/mman.h>
 #include <mach/mach_init.h>
+#include <mach-o/dyld.h>
+#include <mach-o/getsect.h>
 
 #include <AvailabilityMacros.h>
 
@@ -205,7 +206,19 @@
 
 
 void OS::LogSharedLibraryAddresses() {
-  // TODO(1233579): Implement.
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  unsigned int images_count = _dyld_image_count();
+  for (unsigned int i = 0; i < images_count; ++i) {
+    const mach_header* header = _dyld_get_image_header(i);
+    if (header == NULL) continue;
+    unsigned int size;
+    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
+    if (code_ptr == NULL) continue;
+    const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
+    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
+    LOG(SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
+  }
+#endif  // ENABLE_LOGGING_AND_PROFILING
 }
 
 
@@ -411,14 +424,10 @@
  public:
 
   MacOSMutex() {
-    // For some reason the compiler doesn't allow you to write
-    // "this->mutex_ = PTHREAD_..." directly on mac.
-    pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
     pthread_mutexattr_t attr;
     pthread_mutexattr_init(&attr);
     pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
-    pthread_mutex_init(&m, &attr);
-    mutex_ = m;
+    pthread_mutex_init(&mutex_, &attr);
   }
 
   ~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index d628a51..6174522 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -42,11 +42,15 @@
 #include <netinet/in.h>
 #include <netdb.h>
 
+#if defined(ANDROID)
+#define LOG_TAG "v8"
+#include <utils/Log.h>  // LOG_PRI_VA
+#endif
+
 #include "v8.h"
 
 #include "platform.h"
 
-
 namespace v8 {
 namespace internal {
 
@@ -126,7 +130,11 @@
 
 
 void OS::VPrint(const char* format, va_list args) {
+#if defined(ANDROID)
+  LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
+#else
   vprintf(format, args);
+#endif
 }
 
 
@@ -139,7 +147,11 @@
 
 
 void OS::VPrintError(const char* format, va_list args) {
+#if defined(ANDROID)
+  LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+#else
   vfprintf(stderr, format, args);
+#endif
 }
 
 
diff --git a/src/register-allocator.cc b/src/register-allocator.cc
index d1b08bb..d55f949 100644
--- a/src/register-allocator.cc
+++ b/src/register-allocator.cc
@@ -44,6 +44,12 @@
 }
 
 
+Result::ZoneObjectList* Result::ConstantList() {
+  static ZoneObjectList list(10);
+  return &list;
+}
+
+
 // -------------------------------------------------------------------------
 // RegisterAllocator implementation.
 
diff --git a/src/register-allocator.h b/src/register-allocator.h
index f7167d9..1765633 100644
--- a/src/register-allocator.h
+++ b/src/register-allocator.h
@@ -92,10 +92,7 @@
   // of handles to the actual constants.
   typedef ZoneList<Handle<Object> > ZoneObjectList;
 
-  static ZoneObjectList* ConstantList() {
-    static ZoneObjectList list(10);
-    return &list;
-  }
+  static ZoneObjectList* ConstantList();
 
   // Clear the constants indirection table.
   static void ClearConstantList() {
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 4d1fbd9..8a7267a 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -38,8 +38,9 @@
 
 class AstOptimizer: public AstVisitor {
  public:
-  explicit AstOptimizer() {}
-  explicit AstOptimizer(Handle<String> enclosing_name) {
+  explicit AstOptimizer() : has_function_literal_(false) {}
+  explicit AstOptimizer(Handle<String> enclosing_name)
+      : has_function_literal_(false) {
     func_name_inferrer_.PushEnclosingName(enclosing_name);
   }
 
diff --git a/src/serialize.cc b/src/serialize.cc
index 592cf5a..963138e 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -1454,9 +1454,9 @@
 static void InitPagedSpace(PagedSpace* space,
                            int capacity,
                            List<Page*>* page_list) {
-  space->EnsureCapacity(capacity);
-  // TODO(1240712): PagedSpace::EnsureCapacity can return false due to
-  // a failure to allocate from the OS to expand the space.
+  if (!space->EnsureCapacity(capacity)) {
+    V8::FatalProcessOutOfMemory("InitPagedSpace");
+  }
   PageIterator it(space, PageIterator::ALL_PAGES);
   while (it.has_next()) page_list->Add(it.next());
 }
diff --git a/src/spaces.cc b/src/spaces.cc
index 2393281..4f8119f 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -133,8 +133,6 @@
 #endif
       stop_page_ = space->last_page_;
       break;
-    default:
-      UNREACHABLE();
   }
 }
 
@@ -725,11 +723,15 @@
   Page* current_page = top_page->next_page();
   // Loop over the pages to the end of the space.
   while (current_page->is_valid()) {
+#if defined(ANDROID)
+    // Free all chunks if possible
+#else
     // Advance last_page_to_keep every other step to end up at the midpoint.
     if ((free_pages & 0x1) == 1) {
       pages_to_keep++;
       last_page_to_keep = last_page_to_keep->next_page();
     }
+#endif
     free_pages++;
     current_page = current_page->next_page();
   }
diff --git a/src/spaces.h b/src/spaces.h
index ccd1d27..94f7a91 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -393,6 +393,9 @@
   // Returns the maximum available bytes of heaps.
   static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
 
+  // Returns allocated spaces in bytes.
+  static int Size() { return size_; }
+
   // Returns maximum available bytes that the old space can have.
   static int MaxAvailable() {
     return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
@@ -434,7 +437,11 @@
   static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
   // If a chunk has at least 32 pages, the maximum heap size is about
   // 8 * 1024 * 32 * 8K = 2G bytes.
+#if defined(ANDROID)
+  static const int kPagesPerChunk = 16;
+#else
   static const int kPagesPerChunk = 64;
+#endif
   static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
 
  private:
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 9a137e3..ee343a5 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -44,12 +44,6 @@
 }
 
 
-NoAllocationStringAllocator::NoAllocationStringAllocator(unsigned bytes) {
-  size_ = bytes;
-  space_ = NewArray<char>(bytes);
-}
-
-
 NoAllocationStringAllocator::NoAllocationStringAllocator(char* memory,
                                                          unsigned size) {
   size_ = size;
diff --git a/src/string-stream.h b/src/string-stream.h
index 15a72e0..5732944 100644
--- a/src/string-stream.h
+++ b/src/string-stream.h
@@ -57,11 +57,10 @@
 
 
 // Allocator for use when no new c++ heap allocation is allowed.
-// Allocates all space up front and does no allocation while building
-// message.
+// Given a preallocated buffer up front and does no allocation while
+// building message.
 class NoAllocationStringAllocator: public StringAllocator {
  public:
-  explicit NoAllocationStringAllocator(unsigned bytes);
   NoAllocationStringAllocator(char* memory, unsigned size);
   char* allocate(unsigned bytes) { return space_; }
   char* grow(unsigned* bytes);
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index 7eb8cd3..7ca2677 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -863,6 +863,8 @@
 Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
   HandleScope scope;
   int argc = Code::ExtractArgumentsCountFromFlags(flags);
+  // The code of the PreMonomorphic stub is the same as the code
+  // of the Initialized stub.  They just differ on the code object flags.
   CallIC::GenerateInitialize(masm(), argc);
   Object* result = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
   if (!result->IsFailure()) {
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 8bee370..c6b002b 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -256,11 +256,14 @@
   }
 
   // Compute the entry for a given offset in exactly the same way as
-  // we done in generated code. This makes it a lot easier to avoid
-  // making mistakes in the hashed offset computations.
+  // we do in generated code.  We generate an hash code that already
+  // ends in String::kHashShift 0s.  Then we shift it so it is a multiple
+  // of sizeof(Entry).  This makes it easier to avoid making mistakes
+  // in the hashed offset computations.
   static Entry* entry(Entry* table, int offset) {
+    const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
     return reinterpret_cast<Entry*>(
-        reinterpret_cast<Address>(table) + (offset << 1));
+        reinterpret_cast<Address>(table) + (offset << shift_amount));
   }
 };
 
diff --git a/src/version.cc b/src/version.cc
index f1833a5..505d3fa 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,8 +33,8 @@
 // NOTE these macros are used by the SCons build script so their names
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
-#define MINOR_VERSION     2
-#define BUILD_NUMBER      14
+#define MINOR_VERSION     3
+#define BUILD_NUMBER      0
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index c4ee454..f511145 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -456,13 +456,13 @@
 
 
 void Assembler::arithmetic_op_32(byte opcode,
-                                 const Operand& dst,
-                                 Register src) {
+                                 Register reg,
+                                 const Operand& rm_reg) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  emit_optional_rex_32(src, dst);
+  emit_optional_rex_32(reg, rm_reg);
   emit(opcode);
-  emit_operand(src, dst);
+  emit_operand(reg, rm_reg);
 }
 
 
@@ -773,6 +773,15 @@
 }
 
 
+void Assembler::decl(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xFF);
+  emit_modrm(0x1, dst);
+}
+
+
 void Assembler::decl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1521,7 +1530,7 @@
 
 
 void Assembler::testb(Register reg, Immediate mask) {
-  ASSERT(is_int8(mask.value_));
+  ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   if (reg.is(rax)) {
@@ -1540,7 +1549,7 @@
 
 
 void Assembler::testb(const Operand& op, Immediate mask) {
-  ASSERT(is_int8(mask.value_));
+  ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit_optional_rex_32(rax, op);
@@ -2183,48 +2192,3 @@
 
 
 } }  // namespace v8::internal
-
-
-// TODO(x64): Implement and move these to their correct cc-files:
-#include "ast.h"
-#include "bootstrapper.h"
-#include "codegen-inl.h"
-#include "cpu.h"
-#include "debug.h"
-#include "disasm.h"
-#include "disassembler.h"
-#include "frames-inl.h"
-#include "x64/macro-assembler-x64.h"
-#include "x64/regexp-macro-assembler-x64.h"
-#include "ic-inl.h"
-#include "log.h"
-#include "macro-assembler.h"
-#include "parser.h"
-#include "regexp-macro-assembler.h"
-#include "regexp-stack.h"
-#include "register-allocator-inl.h"
-#include "register-allocator.h"
-#include "runtime.h"
-#include "scopes.h"
-#include "serialize.h"
-#include "stub-cache.h"
-#include "unicode.h"
-
-namespace v8 {
-namespace internal {
-
-
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
-  UNIMPLEMENTED();
-}
-
-bool BreakLocationIterator::IsDebugBreakAtReturn()  {
-  UNIMPLEMENTED();
-  return false;
-}
-
-void BreakLocationIterator::SetDebugBreakAtReturn()  {
-  UNIMPLEMENTED();
-}
-
-} }  // namespace v8::internal
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index e895332..4b1eb7a 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -521,10 +521,6 @@
   void xchg(Register dst, Register src);
 
   // Arithmetics
-  void addq(Register dst, Register src) {
-    arithmetic_op(0x03, dst, src);
-  }
-
   void addl(Register dst, Register src) {
     arithmetic_op_32(0x03, dst, src);
   }
@@ -533,15 +529,22 @@
     immediate_arithmetic_op_32(0x0, dst, src);
   }
 
+  void addl(Register dst, const Operand& src) {
+    arithmetic_op_32(0x03, dst, src);
+  }
+
   void addl(const Operand& dst, Immediate src) {
     immediate_arithmetic_op_32(0x0, dst, src);
   }
 
+  void addq(Register dst, Register src) {
+    arithmetic_op(0x03, dst, src);
+  }
+
   void addq(Register dst, const Operand& src) {
     arithmetic_op(0x03, dst, src);
   }
 
-
   void addq(const Operand& dst, Register src) {
     arithmetic_op(0x01, src, dst);
   }
@@ -567,11 +570,11 @@
   }
 
   void cmpl(Register dst, const Operand& src) {
-    arithmetic_op_32(0x3B, src, dst);
+    arithmetic_op_32(0x3B, dst, src);
   }
 
   void cmpl(const Operand& dst, Register src) {
-    arithmetic_op_32(0x39, dst, src);
+    arithmetic_op_32(0x39, src, dst);
   }
 
   void cmpl(Register dst, Immediate src) {
@@ -624,6 +627,7 @@
 
   void decq(Register dst);
   void decq(const Operand& dst);
+  void decl(Register dst);
   void decl(const Operand& dst);
 
   // Sign-extends rax into rdx:rax.
@@ -718,6 +722,10 @@
     shift_32(dst, 0x4);
   }
 
+  void shll(Register dst, Immediate shift_amount) {
+    shift_32(dst, shift_amount, 0x4);
+  }
+
   void shr(Register dst, Immediate shift_amount) {
     shift(dst, shift_amount, 0x5);
   }
@@ -730,6 +738,10 @@
     shift_32(dst, 0x5);
   }
 
+  void shrl(Register dst, Immediate shift_amount) {
+    shift_32(dst, shift_amount, 0x5);
+  }
+
   void store_rax(void* dst, RelocInfo::Mode mode);
   void store_rax(ExternalReference ref);
 
@@ -1114,8 +1126,8 @@
   // ModR/M byte.
   void arithmetic_op(byte opcode, Register dst, Register src);
   void arithmetic_op_32(byte opcode, Register dst, Register src);
-  void arithmetic_op_32(byte opcode, const Operand& dst, Register src);
-  void arithmetic_op(byte opcode, Register reg, const Operand& op);
+  void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
+  void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
   void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
   void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
   // Operate on a 32-bit word in memory or register.
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 459921c..08f8338 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -394,9 +394,9 @@
   // If given receiver is already a JavaScript object then there's no
   // reason for converting it.
   __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
-  __ j(less, &call_to_object);
+  __ j(below, &call_to_object);
   __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
-  __ j(less_equal, &push_receiver);
+  __ j(below_equal, &push_receiver);
 
   // Convert the receiver to an object.
   __ bind(&call_to_object);
@@ -562,7 +562,7 @@
   // If the type of the result (stored in its map) is less than
   // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
   __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
-  __ j(greater_equal, &exit);
+  __ j(above_equal, &exit);
 
   // Throw away the result of the constructor invocation and use the
   // on-stack receiver as the result.
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index e3e32e6..66e4d39 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -3379,7 +3379,7 @@
   // functions to make sure they have 'Function' as their class.
 
   __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
-  null.Branch(less);
+  null.Branch(below);
 
   // As long as JS_FUNCTION_TYPE is the last instance type and it is
   // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
@@ -5321,6 +5321,7 @@
       // patch the map check if appropriate.
 
       // TODO(x64): Implement inlined loads for keyed properties.
+      // Make sure to load length field as a 32-bit quantity.
       //      Comment cmnt(masm, "[ Load from keyed Property");
 
       RelocInfo::Mode mode = is_global
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index e94e781..177eb90 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -80,6 +80,21 @@
   masm->int3();  // UNIMPLEMENTED
 }
 
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+  // TODO(X64): Implement this when we start setting Debug breaks.
+  UNIMPLEMENTED();
+}
+
+bool BreakLocationIterator::IsDebugBreakAtReturn()  {
+  // TODO(X64): Implement this when we start setting Debug breaks.
+  UNIMPLEMENTED();
+  return false;
+}
+
+void BreakLocationIterator::SetDebugBreakAtReturn()  {
+  UNIMPLEMENTED();
+}
+
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 } }  // namespace v8::internal
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index f962c01..8b746c4 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -34,8 +34,15 @@
 
 namespace disasm {
 
-enum OperandOrder {
-  UNSET_OP_ORDER = 0, REG_OPER_OP_ORDER, OPER_REG_OP_ORDER
+enum OperandType {
+  UNSET_OP_ORDER = 0,
+  // Operand size decides between 16, 32 and 64 bit operands.
+  REG_OPER_OP_ORDER = 1,  // Register destination, operand source.
+  OPER_REG_OP_ORDER = 2,  // Operand destination, register source.
+  // Fixed 8-bit operands.
+  BYTE_SIZE_OPERAND_FLAG = 4,
+  BYTE_REG_OPER_OP_ORDER = REG_OPER_OP_ORDER | BYTE_SIZE_OPERAND_FLAG,
+  BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
 };
 
 //------------------------------------------------------------------
@@ -43,28 +50,53 @@
 //------------------------------------------------------------------
 struct ByteMnemonic {
   int b;  // -1 terminates, otherwise must be in range (0..255)
-  OperandOrder op_order_;
+  OperandType op_order_;
   const char* mnem;
 };
 
 
 static ByteMnemonic two_operands_instr[] = {
-  { 0x03, REG_OPER_OP_ORDER, "add" },
-  { 0x21, OPER_REG_OP_ORDER, "and" },
-  { 0x23, REG_OPER_OP_ORDER, "and" },
-  { 0x3B, REG_OPER_OP_ORDER, "cmp" },
-  { 0x8D, REG_OPER_OP_ORDER, "lea" },
-  { 0x09, OPER_REG_OP_ORDER, "or" },
-  { 0x0B, REG_OPER_OP_ORDER, "or" },
-  { 0x1B, REG_OPER_OP_ORDER, "sbb" },
-  { 0x29, OPER_REG_OP_ORDER, "sub" },
-  { 0x2B, REG_OPER_OP_ORDER, "sub" },
-  { 0x85, REG_OPER_OP_ORDER, "test" },
-  { 0x31, OPER_REG_OP_ORDER, "xor" },
-  { 0x33, REG_OPER_OP_ORDER, "xor" },
-  { 0x87, REG_OPER_OP_ORDER, "xchg" },
-  { 0x8A, REG_OPER_OP_ORDER, "movb" },
-  { 0x8B, REG_OPER_OP_ORDER, "mov" },
+  { 0x00, BYTE_OPER_REG_OP_ORDER, "add" },
+  { 0x01, OPER_REG_OP_ORDER,      "add" },
+  { 0x02, BYTE_REG_OPER_OP_ORDER, "add" },
+  { 0x03, REG_OPER_OP_ORDER,      "add" },
+  { 0x08, BYTE_OPER_REG_OP_ORDER, "or" },
+  { 0x09, OPER_REG_OP_ORDER,      "or" },
+  { 0x0A, BYTE_REG_OPER_OP_ORDER, "or" },
+  { 0x0B, REG_OPER_OP_ORDER,      "or" },
+  { 0x10, BYTE_OPER_REG_OP_ORDER, "adc" },
+  { 0x11, OPER_REG_OP_ORDER,      "adc" },
+  { 0x12, BYTE_REG_OPER_OP_ORDER, "adc" },
+  { 0x13, REG_OPER_OP_ORDER,      "adc" },
+  { 0x18, BYTE_OPER_REG_OP_ORDER, "sbb" },
+  { 0x19, OPER_REG_OP_ORDER,      "sbb" },
+  { 0x1A, BYTE_REG_OPER_OP_ORDER, "sbb" },
+  { 0x1B, REG_OPER_OP_ORDER,      "sbb" },
+  { 0x20, BYTE_OPER_REG_OP_ORDER, "and" },
+  { 0x21, OPER_REG_OP_ORDER,      "and" },
+  { 0x22, BYTE_REG_OPER_OP_ORDER, "and" },
+  { 0x23, REG_OPER_OP_ORDER,      "and" },
+  { 0x28, BYTE_OPER_REG_OP_ORDER, "sub" },
+  { 0x29, OPER_REG_OP_ORDER,      "sub" },
+  { 0x2A, BYTE_REG_OPER_OP_ORDER, "sub" },
+  { 0x2B, REG_OPER_OP_ORDER,      "sub" },
+  { 0x30, BYTE_OPER_REG_OP_ORDER, "xor" },
+  { 0x31, OPER_REG_OP_ORDER,      "xor" },
+  { 0x32, BYTE_REG_OPER_OP_ORDER, "xor" },
+  { 0x33, REG_OPER_OP_ORDER,      "xor" },
+  { 0x38, BYTE_OPER_REG_OP_ORDER, "cmp" },
+  { 0x39, OPER_REG_OP_ORDER,      "cmp" },
+  { 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
+  { 0x3B, REG_OPER_OP_ORDER,      "cmp" },
+  { 0x8D, REG_OPER_OP_ORDER,      "lea" },
+  { 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
+  { 0x85, REG_OPER_OP_ORDER,      "test" },
+  { 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
+  { 0x87, REG_OPER_OP_ORDER,      "xchg" },
+  { 0x88, BYTE_OPER_REG_OP_ORDER, "mov" },
+  { 0x89, OPER_REG_OP_ORDER,      "mov" },
+  { 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" },
+  { 0x8B, REG_OPER_OP_ORDER,      "mov" },
   { -1, UNSET_OP_ORDER, "" }
 };
 
@@ -97,6 +129,7 @@
   { 0x05, UNSET_OP_ORDER, "add" },
   { 0x0D, UNSET_OP_ORDER, "or" },
   { 0x15, UNSET_OP_ORDER, "adc" },
+  { 0x1D, UNSET_OP_ORDER, "sbb" },
   { 0x25, UNSET_OP_ORDER, "and" },
   { 0x2D, UNSET_OP_ORDER, "sub" },
   { 0x35, UNSET_OP_ORDER, "xor" },
@@ -127,7 +160,8 @@
 struct InstructionDesc {
   const char* mnem;
   InstructionType type;
-  OperandOrder op_order_;
+  OperandType op_order_;
+  bool byte_size_operation;  // Fixed 8-bit operation.
 };
 
 
@@ -143,7 +177,7 @@
   void Clear();
   void Init();
   void CopyTable(ByteMnemonic bm[], InstructionType type);
-  void SetTableRange(InstructionType type, byte start, byte end,
+  void SetTableRange(InstructionType type, byte start, byte end, bool byte_size,
                      const char* mnem);
   void AddJumpConditionalShort();
 };
@@ -157,9 +191,10 @@
 
 void InstructionTable::Clear() {
   for (int i = 0; i < 256; i++) {
-    instructions_[i].mnem = "";
+    instructions_[i].mnem = "(bad)";
     instructions_[i].type = NO_INSTR;
     instructions_[i].op_order_ = UNSET_OP_ORDER;
+    instructions_[i].byte_size_operation = false;
   }
 }
 
@@ -170,9 +205,9 @@
   CopyTable(call_jump_instr, CALL_JUMP_INSTR);
   CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
   AddJumpConditionalShort();
-  SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, "push");
-  SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, "pop");
-  SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
+  SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, false, "push");
+  SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, false, "pop");
+  SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, false, "mov");
 }
 
 
@@ -180,20 +215,27 @@
   for (int i = 0; bm[i].b >= 0; i++) {
     InstructionDesc* id = &instructions_[bm[i].b];
     id->mnem = bm[i].mnem;
-    id->op_order_ = bm[i].op_order_;
-    assert(id->type == NO_INSTR);  // Information already entered
+    OperandType op_order = bm[i].op_order_;
+    id->op_order_ =
+        static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
+    assert(id->type == NO_INSTR);  // Information not already entered
     id->type = type;
+    id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
   }
 }
 
 
-void InstructionTable::SetTableRange(InstructionType type, byte start,
-                                     byte end, const char* mnem) {
+void InstructionTable::SetTableRange(InstructionType type,
+                                     byte start,
+                                     byte end,
+                                     bool byte_size,
+                                     const char* mnem) {
   for (byte b = start; b <= end; b++) {
     InstructionDesc* id = &instructions_[b];
     assert(id->type == NO_INSTR);  // Information already entered
     id->mnem = mnem;
     id->type = type;
+    id->byte_size_operation = byte_size;
   }
 }
 
@@ -211,13 +253,16 @@
 static InstructionTable instruction_table;
 
 
-// The X64 disassembler implementation.
+//------------------------------------------------------------------------------
+// DisassemblerX64 implementation.
+
 enum UnimplementedOpcodeAction {
   CONTINUE_ON_UNIMPLEMENTED_OPCODE,
   ABORT_ON_UNIMPLEMENTED_OPCODE
 };
 
-
+// A new DisassemblerX64 object is created to disassemble each instruction.
+// The object can only disassemble a single instruction.
 class DisassemblerX64 {
  public:
   DisassemblerX64(const NameConverter& converter,
@@ -228,7 +273,9 @@
         abort_on_unimplemented_(
             unimplemented_action == ABORT_ON_UNIMPLEMENTED_OPCODE),
         rex_(0),
-        operand_size_(0) {
+        operand_size_(0),
+        group_1_prefix_(0),
+        byte_size_operand_(false) {
     tmp_buffer_[0] = '\0';
   }
 
@@ -240,6 +287,12 @@
   int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
 
  private:
+  enum OperandSize {
+    BYTE_SIZE = 0,
+    WORD_SIZE = 1,
+    DOUBLEWORD_SIZE = 2,
+    QUADWORD_SIZE = 3
+  };
 
   const NameConverter& converter_;
   v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
@@ -247,12 +300,10 @@
   bool abort_on_unimplemented_;
   // Prefixes parsed
   byte rex_;
-  byte operand_size_;
-
-  void setOperandSizePrefix(byte prefix) {
-    ASSERT_EQ(0x66, prefix);
-    operand_size_ = prefix;
-  }
+  byte operand_size_;  // 0x66 or (if no group 3 prefix is present) 0x0.
+  byte group_1_prefix_;  // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
+  // Byte size operand override.
+  bool byte_size_operand_;
 
   void setRex(byte rex) {
     ASSERT_EQ(0x40, rex & 0xF0);
@@ -272,12 +323,15 @@
 
   bool rex_w() { return (rex_ & 0x08) != 0; }
 
-  int operand_size() {
-    return rex_w() ? 64 : (operand_size_ != 0) ? 16 : 32;
+  OperandSize operand_size() {
+    if (byte_size_operand_) return BYTE_SIZE;
+    if (rex_w()) return QUADWORD_SIZE;
+    if (operand_size_ != 0) return WORD_SIZE;
+    return DOUBLEWORD_SIZE;
   }
 
   char operand_size_code() {
-    return rex_w() ? 'q' : (operand_size_ != 0) ? 'w' : 'l';
+    return "bwlq"[operand_size()];
   }
 
   const char* NameOfCPURegister(int reg) const {
@@ -312,7 +366,7 @@
                int* base) {
     *scale = (data >> 6) & 3;
     *index = ((data >> 3) & 7) | (rex_x() ? 8 : 0);
-    *base = data & 7 | (rex_b() ? 8 : 0);
+    *base = (data & 7) | (rex_b() ? 8 : 0);
   }
 
   typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const;
@@ -322,11 +376,14 @@
   int PrintRightOperand(byte* modrmp);
   int PrintRightByteOperand(byte* modrmp);
   int PrintOperands(const char* mnem,
-                    OperandOrder op_order,
+                    OperandType op_order,
                     byte* data);
+  int PrintImmediate(byte* data, OperandSize size);
   int PrintImmediateOp(byte* data);
+  const char* TwoByteMnemonic(byte opcode);
+  int TwoByteOpcodeInstruction(byte* data);
   int F7Instruction(byte* data);
-  int D1D3C1Instruction(byte* data);
+  int ShiftInstruction(byte* data);
   int JumpShort(byte* data);
   int JumpConditional(byte* data);
   int JumpConditionalShort(byte* data);
@@ -336,7 +393,7 @@
 
   void UnimplementedInstruction() {
     if (abort_on_unimplemented_) {
-      UNIMPLEMENTED();
+      CHECK(false);
     } else {
       AppendToBuffer("'Unimplemented Instruction'");
     }
@@ -451,6 +508,36 @@
 }
 
 
+int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
+  int64_t value;
+  int count;
+  switch (size) {
+    case BYTE_SIZE:
+      value = *data;
+      count = 1;
+      break;
+    case WORD_SIZE:
+      value = *reinterpret_cast<int16_t*>(data);
+      count = 2;
+      break;
+    case DOUBLEWORD_SIZE:
+      value = *reinterpret_cast<uint32_t*>(data);
+      count = 4;
+      break;
+    case QUADWORD_SIZE:
+      value = *reinterpret_cast<int32_t*>(data);
+      count = 4;
+      break;
+    default:
+      UNREACHABLE();
+      value = 0;  // Initialize variables on all paths to satisfy the compiler.
+      count = 0;
+  }
+  AppendToBuffer(V8_PTR_PREFIX"x", value);
+  return count;
+}
+
+
 int DisassemblerX64::PrintRightOperand(byte* modrmp) {
   return PrintRightOperandHelper(modrmp,
                                  &DisassemblerX64::NameOfCPURegister);
@@ -466,25 +553,30 @@
 // Returns number of bytes used including the current *data.
 // Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
 int DisassemblerX64::PrintOperands(const char* mnem,
-                                   OperandOrder op_order,
+                                   OperandType op_order,
                                    byte* data) {
   byte modrm = *data;
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
   int advance = 0;
+  const char* register_name =
+      byte_size_operand_ ? NameOfByteCPURegister(regop)
+                         : NameOfCPURegister(regop);
   switch (op_order) {
     case REG_OPER_OP_ORDER: {
       AppendToBuffer("%s%c %s,",
                      mnem,
                      operand_size_code(),
-                     NameOfCPURegister(regop));
-      advance = PrintRightOperand(data);
+                     register_name);
+      advance = byte_size_operand_ ? PrintRightByteOperand(data)
+                                   : PrintRightOperand(data);
       break;
     }
     case OPER_REG_OP_ORDER: {
       AppendToBuffer("%s%c ", mnem, operand_size_code());
-      advance = PrintRightOperand(data);
-      AppendToBuffer(",%s", NameOfCPURegister(regop));
+      advance = byte_size_operand_ ? PrintRightByteOperand(data)
+                                   : PrintRightOperand(data);
+      AppendToBuffer(",%s", register_name);
       break;
     }
     default:
@@ -498,7 +590,7 @@
 // Returns number of bytes used by machine instruction, including *data byte.
 // Writes immediate instructions to 'tmp_buffer_'.
 int DisassemblerX64::PrintImmediateOp(byte* data) {
-  bool sign_extension_bit = (*data & 0x02) != 0;
+  bool byte_size_immediate = (*data & 0x02) != 0;
   byte modrm = *(data + 1);
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
@@ -528,15 +620,12 @@
     default:
       UnimplementedInstruction();
   }
-  AppendToBuffer("%s ", mnem);
+  AppendToBuffer("%s%c ", mnem, operand_size_code());
   int count = PrintRightOperand(data + 1);
-  if (sign_extension_bit) {
-    AppendToBuffer(",0x%x", *(data + 1 + count));
-    return 1 + count + 1 /*int8*/;
-  } else {
-    AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
-    return 1 + count + 4 /*int32_t*/;
-  }
+  AppendToBuffer(",0x");
+  OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size();
+  count += PrintImmediate(data + 1 + count, immediate_size);
+  return 1 + count;
 }
 
 
@@ -589,78 +678,65 @@
 }
 
 
-int DisassemblerX64::D1D3C1Instruction(byte* data) {
-  byte op = *data;
-  assert(op == 0xD1 || op == 0xD3 || op == 0xC1);
+int DisassemblerX64::ShiftInstruction(byte* data) {
+  byte op = *data & (~1);
+  if (op != 0xD0 && op != 0xD2 && op != 0xC0) {
+    UnimplementedInstruction();
+    return 1;
+  }
   byte modrm = *(data + 1);
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
-  ASSERT(regop < 8);
+  regop &= 0x7;  // The REX.R bit does not affect the operation.
   int imm8 = -1;
   int num_bytes = 2;
-  if (mod == 3) {
-    const char* mnem = NULL;
-    if (op == 0xD1) {
-      imm8 = 1;
-      switch (regop) {
-        case 2:
-          mnem = "rcl";
-          break;
-        case 7:
-          mnem = "sar";
-          break;
-        case 4:
-          mnem = "shl";
-          break;
-        default:
-          UnimplementedInstruction();
-      }
-    } else if (op == 0xC1) {
-      imm8 = *(data + 2);
-      num_bytes = 3;
-      switch (regop) {
-        case 2:
-          mnem = "rcl";
-          break;
-        case 4:
-          mnem = "shl";
-          break;
-        case 5:
-          mnem = "shr";
-          break;
-        case 7:
-          mnem = "sar";
-          break;
-        default:
-          UnimplementedInstruction();
-      }
-    } else if (op == 0xD3) {
-      switch (regop) {
-        case 4:
-          mnem = "shl";
-          break;
-        case 5:
-          mnem = "shr";
-          break;
-        case 7:
-          mnem = "sar";
-          break;
-        default:
-          UnimplementedInstruction();
-      }
-    }
-    assert(mnem != NULL);
-    AppendToBuffer("%s%c %s,",
-                   mnem,
-                   operand_size_code(),
-                   NameOfCPURegister(rm));
-    if (imm8 > 0) {
-      AppendToBuffer("%d", imm8);
-    } else {
-      AppendToBuffer("cl");
-    }
-  } else {
+  if (mod != 3) {
     UnimplementedInstruction();
+    return num_bytes;
+  }
+  const char* mnem = NULL;
+  switch (regop) {
+    case 0:
+      mnem = "rol";
+      break;
+    case 1:
+      mnem = "ror";
+      break;
+    case 2:
+      mnem = "rcl";
+      break;
+    case 3:
+      mnem = "rcr";
+      break;
+    case 4:
+      mnem = "shl";
+      break;
+    case 5:
+      mnem = "shr";
+      break;
+    case 7:
+      mnem = "sar";
+      break;
+    default:
+      UnimplementedInstruction();
+      return num_bytes;
+  }
+  assert(mnem != NULL);
+  if (op == 0xD0) {
+    imm8 = 1;
+  } else if (op == 0xC0) {
+    imm8 = *(data + 2);
+    num_bytes = 3;
+  }
+  AppendToBuffer("%s%c %s,",
+                 mnem,
+                 operand_size_code(),
+                 byte_size_operand_ ? NameOfByteCPURegister(rm)
+                                    : NameOfCPURegister(rm));
+  if (op == 0xD2) {
+    AppendToBuffer("cl");
+  } else {
+    AppendToBuffer("%d", imm8);
   }
   return num_bytes;
 }
@@ -716,20 +792,14 @@
   if (b1 == 0xD9) {
     const char* mnem = NULL;
     switch (b2) {
-      case 0xE8:
-        mnem = "fld1";
-        break;
-      case 0xEE:
-        mnem = "fldz";
+      case 0xE0:
+        mnem = "fchs";
         break;
       case 0xE1:
         mnem = "fabs";
         break;
-      case 0xE0:
-        mnem = "fchs";
-        break;
-      case 0xF8:
-        mnem = "fprem";
+      case 0xE4:
+        mnem = "ftst";
         break;
       case 0xF5:
         mnem = "fprem1";
@@ -737,8 +807,14 @@
       case 0xF7:
         mnem = "fincstp";
         break;
-      case 0xE4:
-        mnem = "ftst";
+      case 0xE8:
+        mnem = "fld1";
+        break;
+      case 0xEE:
+        mnem = "fldz";
+        break;
+      case 0xF8:
+        mnem = "fprem";
         break;
     }
     if (mnem != NULL) {
@@ -862,38 +938,146 @@
   return 2;
 }
 
-// Mnemonics for instructions 0xF0 byte.
+
+// Handle all two-byte opcodes, which start with 0x0F.
+// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
+// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
+int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
+  byte opcode = *(data + 1);
+  byte* current = data + 2;
+  // At return, "current" points to the start of the next instruction.
+  const char* mnemonic = TwoByteMnemonic(opcode);
+  if (opcode == 0x1F) {
+    // NOP
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    current++;
+    if (regop == 4) {  // SIB byte present.
+      current++;
+    }
+    if (mod == 1) {  // Byte displacement.
+      current += 1;
+    } else if (mod == 2) {  // 32-bit displacement.
+      current += 4;
+    }  // else no immediate displacement.
+    AppendToBuffer("nop");
+
+  } else  if (opcode == 0xA2 || opcode == 0x31) {
+    // RDTSC or CPUID
+    AppendToBuffer("%s", mnemonic);
+
+  } else if ((opcode & 0xF0) == 0x80) {
+    // Jcc: Conditional jump (branch).
+    current = data + JumpConditional(data);
+
+  } else if (opcode == 0xBE || opcode == 0xBF || opcode == 0xB6 ||
+             opcode == 0xB7 || opcode == 0xAF) {
+    // Size-extending moves, IMUL.
+    current += PrintOperands(mnemonic, REG_OPER_OP_ORDER, current);
+
+  } else if ((opcode & 0xF0) == 0x90) {
+    // SETcc: Set byte on condition. Needs pointer to beginning of instruction.
+    current = data + SetCC(data);
+
+  } else if (opcode == 0xAB || opcode == 0xA5 || opcode == 0xAD) {
+    // SHLD, SHRD (double-precision shift), BTS (bit set).
+    AppendToBuffer("%s ", mnemonic);
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    current += PrintRightOperand(current);
+    if (opcode == 0xAB) {
+      AppendToBuffer(",%s", NameOfCPURegister(regop));
+    } else {
+      AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
+    }
+  } else if (group_1_prefix_ == 0xF2) {
+    // Beginning of instructions with prefix 0xF2.
+
+    if (opcode == 0x11 || opcode == 0x10) {
+      // MOVSD: Move scalar double-precision fp to/from/between XMM registers.
+      AppendToBuffer("movsd ");
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      if (opcode == 0x11) {
+        current += PrintRightOperand(current);
+        AppendToBuffer(",%s", NameOfXMMRegister(regop));
+      } else {
+        AppendToBuffer("%s,", NameOfXMMRegister(regop));
+        current += PrintRightOperand(current);
+      }
+    } else if (opcode == 0x2A) {
+      // CVTSI2SD: integer to XMM double conversion.
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
+      data += PrintRightOperand(data);
+    } else if ((opcode & 0xF8) == 0x58) {
+      // XMM arithmetic. Mnemonic was retrieved at the start of this function.
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("%s %s,%s", mnemonic, NameOfXMMRegister(regop),
+                     NameOfXMMRegister(rm));
+    } else {
+      UnimplementedInstruction();
+    }
+  } else if (opcode == 0x2C && group_1_prefix_ == 0xF3) {
+    // Instruction with prefix 0xF3.
+
+    // CVTTSS2SI: Convert scalar single-precision FP to dword integer.
+    // Assert that mod is not 3, so source is memory, not an XMM register.
+    ASSERT((*current & 0xC0) != 0xC0);
+    current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
+  } else {
+    UnimplementedInstruction();
+  }
+  return current - data;
+}
+
+
+// Mnemonics for two-byte opcode instructions starting with 0x0F.
+// The argument is the second byte of the two-byte opcode.
 // Returns NULL if the instruction is not handled here.
-static const char* F0Mnem(byte f0byte) {
-  switch (f0byte) {
+const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
+  switch (opcode) {
     case 0x1F:
       return "nop";
+    case 0x2A:  // F2 prefix.
+      return "cvtsi2sd";
     case 0x31:
       return "rdtsc";
+    case 0x58:  // F2 prefix.
+      return "addsd";
+    case 0x59:  // F2 prefix.
+      return "mulsd";
+    case 0x5C:  // F2 prefix.
+      return "subsd";
+    case 0x5E:  // F2 prefix.
+      return "divsd";
     case 0xA2:
       return "cpuid";
-    case 0xBE:
-      return "movsxb";
-    case 0xBF:
-      return "movsxw";
+    case 0xA5:
+      return "shld";
+    case 0xAB:
+      return "bts";
+    case 0xAD:
+      return "shrd";
+    case 0xAF:
+      return "imul";
     case 0xB6:
       return "movzxb";
     case 0xB7:
       return "movzxw";
-    case 0xAF:
-      return "imul";
-    case 0xA5:
-      return "shld";
-    case 0xAD:
-      return "shrd";
-    case 0xAB:
-      return "bts";
+    case 0xBE:
+      return "movsxb";
+    case 0xBF:
+      return "movsxw";
     default:
       return NULL;
   }
 }
 
-// Disassembled instruction '*instr' and writes it into 'out_buffer'.
+
+// Disassembles the instruction at instr, and writes it into out_buffer.
 int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
                                        byte* instr) {
   tmp_buffer_pos_ = 0;  // starting to write as position 0
@@ -905,19 +1089,21 @@
   // Scan for prefixes.
   while (true) {
     current = *data;
-    if (current == 0x66) {
-      setOperandSizePrefix(current);
-      data++;
-    } else if ((current & 0xF0) == 0x40) {
+    if (current == 0x66) {  // Group 3 prefix.
+      operand_size_ = current;
+    } else if ((current & 0xF0) == 0x40) {  // REX prefix.
       setRex(current);
       if (rex_w()) AppendToBuffer("REX.W ");
-      data++;
-    } else {
+    } else if ((current & 0xFE) == 0xF2) {  // Group 1 prefix.
+      group_1_prefix_ = current;
+    } else {  // Not a prefix - an opcode.
       break;
     }
+    data++;
   }
 
   const InstructionDesc& idesc = instruction_table.Get(current);
+  byte_size_operand_ = idesc.byte_size_operation;
   switch (idesc.type) {
     case ZERO_OPERANDS_INSTR:
       AppendToBuffer(idesc.mnem);
@@ -949,15 +1135,15 @@
     case MOVE_REG_INSTR: {
       byte* addr = NULL;
       switch (operand_size()) {
-        case 16:
+        case WORD_SIZE:
           addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
           data += 3;
           break;
-        case 32:
+        case DOUBLEWORD_SIZE:
           addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
           data += 5;
           break;
-        case 64:
+        case QUADWORD_SIZE:
           addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
           data += 9;
           break;
@@ -1012,8 +1198,8 @@
         AppendToBuffer("imul %s,%s,0x%x", NameOfCPURegister(regop),
                        NameOfCPURegister(rm), imm);
         data += 2 + (*data == 0x6B ? 1 : 4);
-      }
         break;
+      }
 
       case 0xF6: {
         int mod, regop, rm;
@@ -1024,63 +1210,16 @@
           UnimplementedInstruction();
         }
         data += 3;
-      }
         break;
+      }
 
       case 0x81:  // fall through
       case 0x83:  // 0x81 with sign extension bit set
         data += PrintImmediateOp(data);
         break;
 
-      case 0x0F: {
-        byte f0byte = *(data + 1);
-        const char* f0mnem = F0Mnem(f0byte);
-        if (f0byte == 0x1F) {
-          data += 1;
-          byte modrm = *data;
-          data += 1;
-          if (((modrm >> 3) & 7) == 4) {
-            // SIB byte present.
-            data += 1;
-          }
-          int mod = modrm >> 6;
-          if (mod == 1) {
-            // Byte displacement.
-            data += 1;
-          } else if (mod == 2) {
-            // 32-bit displacement.
-            data += 4;
-          }
-          AppendToBuffer("nop");
-        } else  if (f0byte == 0xA2 || f0byte == 0x31) {
-          AppendToBuffer("%s", f0mnem);
-          data += 2;
-        } else if ((f0byte & 0xF0) == 0x80) {
-          data += JumpConditional(data);
-        } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 || f0byte
-            == 0xB7 || f0byte == 0xAF) {
-          data += 2;
-          data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
-        } else if ((f0byte & 0xF0) == 0x90) {
-          data += SetCC(data);
-        } else {
-          data += 2;
-          if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
-            // shrd, shld, bts
-            AppendToBuffer("%s ", f0mnem);
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            data += PrintRightOperand(data);
-            if (f0byte == 0xAB) {
-              AppendToBuffer(",%s", NameOfCPURegister(regop));
-            } else {
-              AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
-            }
-          } else {
-            UnimplementedInstruction();
-          }
-        }
-      }
+      case 0x0F:
+        data += TwoByteOpcodeInstruction(data);
         break;
 
       case 0x8F: {
@@ -1170,13 +1309,13 @@
       case 0x95:
       case 0x96:
       case 0x97: {
-        int reg = current & 0x7 | (rex_b() ? 8 : 0);
+        int reg = (current & 0x7) | (rex_b() ? 8 : 0);
         if (reg == 0) {
           AppendToBuffer("nop");  // Common name for xchg rax,rax.
         } else {
           AppendToBuffer("xchg%c rax, %s",
                          operand_size_code(),
-                         NameOfByteCPURegister(reg));
+                         NameOfCPURegister(reg));
         }
       }
 
@@ -1209,17 +1348,39 @@
         data += 2;
         break;
 
-      case 0xA9:
-        AppendToBuffer("test%c rax,0x%x",  // CHECKME!
+      case 0xA9: {
+        int64_t value = 0;
+        switch (operand_size()) {
+          case WORD_SIZE:
+            value = *reinterpret_cast<uint16_t*>(data + 1);
+            data += 3;
+            break;
+          case DOUBLEWORD_SIZE:
+            value = *reinterpret_cast<uint32_t*>(data + 1);
+            data += 5;
+            break;
+          case QUADWORD_SIZE:
+            value = *reinterpret_cast<int32_t*>(data + 1);
+            data += 5;
+            break;
+          default:
+            UNREACHABLE();
+        }
+        AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"ux",
                        operand_size_code(),
-                       *reinterpret_cast<int32_t*>(data + 1));
-        data += 5;
+                       value);
         break;
-
+      }
       case 0xD1:  // fall through
       case 0xD3:  // fall through
       case 0xC1:
-        data += D1D3C1Instruction(data);
+        data += ShiftInstruction(data);
+        break;
+      case 0xD0:  // fall through
+      case 0xD2:  // fall through
+      case 0xC0:
+        byte_size_operand_ = true;
+        data += ShiftInstruction(data);
         break;
 
       case 0xD9:  // fall through
@@ -1236,73 +1397,13 @@
         data += JumpShort(data);
         break;
 
-      case 0xF2:
-        if (*(data + 1) == 0x0F) {
-          byte b2 = *(data + 2);
-          if (b2 == 0x11) {
-            AppendToBuffer("movsd ");
-            data += 3;
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            data += PrintRightOperand(data);
-            AppendToBuffer(",%s", NameOfXMMRegister(regop));
-          } else if (b2 == 0x10) {
-            data += 3;
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
-            data += PrintRightOperand(data);
-          } else {
-            const char* mnem = "?";
-            switch (b2) {
-              case 0x2A:
-                mnem = "cvtsi2sd";
-                break;
-              case 0x58:
-                mnem = "addsd";
-                break;
-              case 0x59:
-                mnem = "mulsd";
-                break;
-              case 0x5C:
-                mnem = "subsd";
-                break;
-              case 0x5E:
-                mnem = "divsd";
-                break;
-            }
-            data += 3;
-            int mod, regop, rm;
-            get_modrm(*data, &mod, &regop, &rm);
-            if (b2 == 0x2A) {
-              AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
-              data += PrintRightOperand(data);
-            } else {
-              AppendToBuffer("%s %s,%s", mnem, NameOfXMMRegister(regop),
-                             NameOfXMMRegister(rm));
-              data++;
-            }
-          }
-        } else {
-          UnimplementedInstruction();
-        }
-        break;
-
-      case 0xF3:
-        if (*(data + 1) == 0x0F && *(data + 2) == 0x2C) {
-          data += 3;
-          data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
-        } else {
-          UnimplementedInstruction();
-        }
-        break;
-
       case 0xF7:
         data += F7Instruction(data);
         break;
 
       default:
         UnimplementedInstruction();
+        data += 1;
     }
   }  // !processed
 
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 7b8699f..247e9e6 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -42,12 +42,130 @@
 #define __ ACCESS_MASM(masm)
 
 
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if the receiver has fast properties,
+// or if name is not a symbol, and will jump to the miss_label in that case.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+                                   Register r0, Register r1, Register r2,
+                                   Register name) {
+  // Register use:
+  //
+  // r0   - used to hold the property dictionary.
+  //
+  // r1   - initially the receiver
+  //      - used for the index into the property dictionary
+  //      - holds the result on exit.
+  //
+  // r2   - used to hold the capacity of the property dictionary.
+  //
+  // name - holds the name of the property and is unchanged.
+
+  Label done;
+
+  // Check for the absence of an interceptor.
+  // Load the map into r0.
+  __ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
+  // Test the has_named_interceptor bit in the map.
+  __ testl(FieldOperand(r0, Map::kInstanceAttributesOffset),
+          Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+
+  // Jump to miss if the interceptor bit is set.
+  __ j(not_zero, miss_label);
+
+  // Bail out if we have a JS global proxy object.
+  __ movzxbq(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
+  __ cmpb(r0, Immediate(JS_GLOBAL_PROXY_TYPE));
+  __ j(equal, miss_label);
+
+  // Possible work-around for http://crbug.com/16276.
+  __ cmpb(r0, Immediate(JS_GLOBAL_OBJECT_TYPE));
+  __ j(equal, miss_label);
+  __ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
+  __ j(equal, miss_label);
+
+  // Check that the properties array is a dictionary.
+  __ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+  __ Cmp(FieldOperand(r0, HeapObject::kMapOffset),
+         Factory::hash_table_map());
+  __ j(not_equal, miss_label);
+
+  // Compute the capacity mask.
+  const int kCapacityOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kCapacityIndex * kPointerSize;
+  __ movq(r2, FieldOperand(r0, kCapacityOffset));
+  __ shrl(r2, Immediate(kSmiTagSize));  // convert smi to int
+  __ decl(r2);
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  static const int kProbes = 4;
+  const int kElementsStartOffset =
+      StringDictionary::kHeaderSize +
+      StringDictionary::kElementsStartIndex * kPointerSize;
+  for (int i = 0; i < kProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ movl(r1, FieldOperand(name, String::kLengthOffset));
+    __ shrl(r1, Immediate(String::kHashShift));
+    if (i > 0) {
+      __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+    }
+    __ and_(r1, r2);
+
+    // Scale the index by multiplying by the entry size.
+    ASSERT(StringDictionary::kEntrySize == 3);
+    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
+
+    // Check if the key is identical to the name.
+    __ cmpq(name, Operand(r0, r1, times_pointer_size,
+                          kElementsStartOffset - kHeapObjectTag));
+    if (i != kProbes - 1) {
+      __ j(equal, &done);
+    } else {
+      __ j(not_equal, miss_label);
+    }
+  }
+
+  // Check that the value is a normal property.
+  __ bind(&done);
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+           Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+  __ j(not_zero, miss_label);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ movq(r1,
+          Operand(r0, r1, times_pointer_size, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to check that a value is either not an object
+// or is loaded if it is an object.
+static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
+                                           Register value) {
+  Label done;
+  // Check if the value is a Smi.
+  __ testl(value, Immediate(kSmiTagMask));
+  __ j(zero, &done);
+  // Check if the object has been loaded.
+  __ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
+  __ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
+           Immediate(1 << Map::kNeedsLoading));
+  __ j(not_zero, miss);
+  __ bind(&done);
+}
+
+
 void KeyedLoadIC::ClearInlinedVersion(Address address) {
-  UNIMPLEMENTED();
+  // TODO(X64): Implement this when LoadIC is enabled.
 }
 
 void KeyedStoreIC::ClearInlinedVersion(Address address) {
-  UNIMPLEMENTED();
+  // TODO(X64): Implement this when LoadIC is enabled.
 }
 
 void KeyedStoreIC::RestoreInlinedVersion(Address address) {
@@ -77,23 +195,130 @@
 }
 
 
+#ifdef DEBUG
+// For use in assert below.
+static int TenToThe(int exponent) {
+  ASSERT(exponent <= 9);
+  ASSERT(exponent >= 1);
+  int answer = 10;
+  for (int i = 1; i < exponent; i++) answer *= 10;
+  return answer;
+}
+#endif
+
+
 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xC0AB));  // Debugging aid.
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label slow, fast, check_string, index_int, index_string;
+
+  // Load name and receiver.
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ testl(rcx, Immediate(kSmiTagMask));
+  __ j(zero, &slow);
+
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing
+  // into string objects work as intended.
+  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
+  __ j(below, &slow);
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.  The map is already in rdx.
+  __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow);
+
+  // Check that the key is a smi.
+  __ testl(rax, Immediate(kSmiTagMask));
+  __ j(not_zero, &check_string);
+  __ sarl(rax, Immediate(kSmiTagSize));
+  // Get the elements array of the object.
+  __ bind(&index_int);
+  __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::hash_table_map());
+  __ j(equal, &slow);
+  // Check that the key (index) is within bounds.
+  __ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(below, &fast);  // Unsigned comparison rejects negative indices.
+  // Slow case: Load name and receiver from stack and jump to runtime.
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+  KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+  __ bind(&check_string);
+  // The key is not a smi.
+  // Is it a string?
+  __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+  __ j(above_equal, &slow);
+  // Is the string an array index, with cached numeric value?
+  __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+  __ testl(rbx, Immediate(String::kIsArrayIndexMask));
+
+  // If the string is a symbol, do a quick inline probe of the receiver's
+  // dictionary, if it exists.
+  __ j(not_zero, &index_string);  // The value in rbx is used at jump target.
+  __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
+           Immediate(kIsSymbolMask));
+  __ j(zero, &slow);
+  // Probe the dictionary leaving result in ecx.
+  GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
+  GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
+  __ movq(rax, rcx);
+  __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+  __ ret(0);
+  // Array index string: If short enough use cache in length/hash field (ebx).
+  // We assert that there are enough bits in an int32_t after the hash shift
+  // bits have been subtracted to allow space for the length and the cached
+  // array index.
+  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+         (1 << (String::kShortLengthShift - String::kHashShift)));
+  __ bind(&index_string);
+  const int kLengthFieldLimit =
+      (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
+  __ cmpl(rbx, Immediate(kLengthFieldLimit));
+  __ j(above_equal, &slow);
+  __ movl(rax, rbx);
+  __ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
+  __ shrl(rax, Immediate(String::kLongLengthShift));
+  __ jmp(&index_int);
+  // Fast case: Do the load.
+  __ bind(&fast);
+  __ movq(rax, Operand(rcx, rax, times_pointer_size,
+                      FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Cmp(rax, Factory::the_hole_value());
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ j(equal, &slow);
+  __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+  __ ret(0);
 }
 
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xC1AB));  // Debugging aid.
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+
+  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
 }
 
 bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
-  UNIMPLEMENTED();
+  // Never patch the map in the map check, so the check always fails.
   return false;
 }
 
 bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
-  UNIMPLEMENTED();
+  // Never patch the map in the map check, so the check always fails.
   return false;
 }
 
@@ -118,13 +343,6 @@
   return NULL;
 }
 
-Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
-                                                JSObject* object,
-                                                JSObject* holder,
-                                                int index) {
-  UNIMPLEMENTED();
-  return NULL;
-}
 
 Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
   UNIMPLEMENTED();
@@ -163,15 +381,123 @@
 }
 
 void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xC2AB));  // Debugging aid.
+  Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
 }
 
+
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xC3AB));  // Debugging aid.
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : key
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label slow, fast, array, extra;
+
+  // Get the receiver from the stack.
+  __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // 2 ~ return address, key
+  // Check that the object isn't a smi.
+  __ testl(rdx, Immediate(kSmiTagMask));
+  __ j(zero, &slow);
+  // Get the map from the receiver.
+  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow);
+  // Get the key from the stack.
+  __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // 1 ~ return address
+  // Check that the key is a smi.
+  __ testl(rbx, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow);
+
+  __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
+  __ j(equal, &array);
+  // Check that the object is some kind of JS object.
+  __ CmpInstanceType(rcx, FIRST_JS_OBJECT_TYPE);
+  __ j(below, &slow);
+
+  // Object case: Check key against length in the elements array.
+  // rax: value
+  // rdx: JSObject
+  // rbx: index (as a smi)
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  // Check that the object is in fast mode (not dictionary).
+  __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::hash_table_map());
+  __ j(equal, &slow);
+  // Untag the key (for checking against untagged length in the fixed array).
+  __ movl(rdx, rbx);
+  __ sarl(rdx, Immediate(kSmiTagSize));
+  __ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
+  // rax: value
+  // rcx: FixedArray
+  // rbx: index (as a smi)
+  __ j(below, &fast);
+
+
+  // Slow case: Push extra copies of the arguments (3).
+  __ bind(&slow);
+  __ pop(rcx);
+  __ push(Operand(rsp, 1 * kPointerSize));
+  __ push(Operand(rsp, 1 * kPointerSize));
+  __ push(rax);
+  __ push(rcx);
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
+
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // rax: value
+  // rdx: JSArray
+  // rcx: FixedArray
+  // rbx: index (as a smi)
+  // flags: compare (rbx, rdx.length())
+  __ j(not_equal, &slow);  // do not leave holes in the array
+  __ sarl(rbx, Immediate(kSmiTagSize));  // untag
+  __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  // Restore tag and increment.
+  __ lea(rbx, Operand(rbx, rbx, times_1, 1 << kSmiTagSize));
+  __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
+  __ subl(rbx, Immediate(1 << kSmiTagSize));  // decrement rbx again
+  __ jmp(&fast);
+
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode; if it is the
+  // length is always a smi.
+  __ bind(&array);
+  // rax: value
+  // rdx: JSArray
+  // rbx: index (as a smi)
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::hash_table_map());
+  __ j(equal, &slow);
+
+  // Check the key against the length in the array, compute the
+  // address to store into and fall through to fast case.
+  __ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
+  __ j(above_equal, &extra);
+
+
+  // Fast case: Do the store.
+  __ bind(&fast);
+  // rax: value
+  // rcx: FixedArray
+  // rbx: index (as a smi)
+  __ movq(Operand(rcx, rbx, times_4, FixedArray::kHeaderSize - kHeapObjectTag),
+         rax);
+  // Update write barrier for the elements array address.
+  __ movq(rdx, rax);
+  __ RecordWrite(rcx, 0, rdx, rbx);
+  __ ret(0);
 }
 
+
 Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
                                                   int index,
                                                   Map* transition,
@@ -228,20 +554,24 @@
   __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
 }
 
-void CallIC::GenerateMegamorphic(MacroAssembler* a, int b) {
-  UNIMPLEMENTED();
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+  // Cache miss: Jump to runtime.
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
 }
 
-void CallIC::GenerateNormal(MacroAssembler* a, int b) {
-  UNIMPLEMENTED();
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+  // Cache miss: Jump to runtime.
+  Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
 }
 
 
+// The offset from the inlined patch site to the start of the
+// inlined load instruction.
 const int LoadIC::kOffsetToLoadInstruction = 20;
 
 
 void LoadIC::ClearInlinedVersion(Address address) {
-  UNIMPLEMENTED();
+  // TODO(X64): Implement this when LoadIC is enabled.
 }
 
 
@@ -266,37 +596,54 @@
 
 
 void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xC4AB));  // Debugging aid.
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
 }
 
 void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xC5AB));  // Debugging aid.
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
 }
 
+
 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xC6AB));  // Debugging aid.
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+
+  // Probe the stub cache.
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+                                         NOT_IN_LOOP,
+                                         MONOMORPHIC);
+  StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx);
+
+  // Cache miss: Jump to runtime.
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
 }
 
+
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xC7AB));  // Debugging aid.
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
 }
 
 void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xC8AB));  // Debugging aid.
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
 }
 
 void LoadIC::GenerateStringLength(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xC9AB));  // Debugging aid.
+  Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
 }
 
 bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
-  UNIMPLEMENTED();
+  // TODO(X64): Implement this function.  Until then, the code is not patched.
   return false;
 }
 
@@ -319,13 +666,11 @@
 }
 
 void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xCAAB));  // Debugging aid.
+  Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
 }
 
 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  masm->int3();  // UNIMPLEMENTED.
-  masm->movq(kScratchRegister, Immediate(0xCBAB));  // Debugging aid.
+  Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
 }
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 099a461..457011b 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -882,4 +882,154 @@
 }
 
 
+Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
+                                   JSObject* holder, Register holder_reg,
+                                   Register scratch,
+                                   Label* miss) {
+  // Make sure there's no overlap between scratch and the other
+  // registers.
+  ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+
+  // Keep track of the current object in register reg.  On the first
+  // iteration, reg is an alias for object_reg, on later iterations,
+  // it is an alias for holder_reg.
+  Register reg = object_reg;
+  int depth = 1;
+
+  // Check the maps in the prototype chain.
+  // Traverse the prototype chain from the object and do map checks.
+  while (object != holder) {
+    depth++;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+    JSObject* prototype = JSObject::cast(object->GetPrototype());
+    if (Heap::InNewSpace(prototype)) {
+      // Get the map of the current object.
+      movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      Cmp(scratch, Handle<Map>(object->map()));
+      // Branch on the result of the map check.
+      j(not_equal, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (object->IsJSGlobalProxy()) {
+        CheckAccessGlobalProxy(reg, scratch, miss);
+
+        // Restore scratch register to be the map of the object.
+        // We load the prototype from the map in the scratch register.
+        movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+      }
+      // The prototype is in new space; we cannot store a reference
+      // to it in the code. Load it from the map.
+      reg = holder_reg;  // from now the object is in holder_reg
+      movq(reg, FieldOperand(scratch, Map::kPrototypeOffset));
+
+    } else {
+      // Check the map of the current object.
+      Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+          Handle<Map>(object->map()));
+      // Branch on the result of the map check.
+      j(not_equal, miss);
+      // Check access rights to the global object.  This has to happen
+      // after the map check so that we know that the object is
+      // actually a global object.
+      if (object->IsJSGlobalProxy()) {
+        CheckAccessGlobalProxy(reg, scratch, miss);
+      }
+      // The prototype is in old space; load it directly.
+      reg = holder_reg;  // from now the object is in holder_reg
+      Move(reg, Handle<JSObject>(prototype));
+    }
+
+    // Go to the next object in the prototype chain.
+    object = prototype;
+  }
+
+  // Check the holder map.
+  Cmp(FieldOperand(reg, HeapObject::kMapOffset),
+      Handle<Map>(holder->map()));
+  j(not_equal, miss);
+
+  // Log the check depth.
+  LOG(IntEvent("check-maps-depth", depth));
+
+  // Perform security check for access to the global object and return
+  // the holder register.
+  ASSERT(object == holder);
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+  if (object->IsJSGlobalProxy()) {
+    CheckAccessGlobalProxy(reg, scratch, miss);
+  }
+  return reg;
+}
+
+
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+                                            Register scratch,
+                                            Label* miss) {
+  Label same_contexts;
+
+  ASSERT(!holder_reg.is(scratch));
+  ASSERT(!scratch.is(kScratchRegister));
+  // Load current lexical context from the stack frame.
+  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
+
+  // When generating debug code, make sure the lexical context is set.
+  if (FLAG_debug_code) {
+    cmpq(scratch, Immediate(0));
+    Check(not_equal, "we should not have an empty lexical context");
+  }
+  // Load the global context of the current context.
+  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+  movq(scratch, FieldOperand(scratch, offset));
+  movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
+        Factory::global_context_map());
+    Check(equal, "JSGlobalObject::global_context should be a global context.");
+  }
+
+  // Check if both contexts are the same.
+  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+  j(equal, &same_contexts);
+
+  // Compare security tokens.
+  // Check that the security token in the calling global object is
+  // compatible with the security token in the receiving global
+  // object.
+
+  // Check the context is a global context.
+  if (FLAG_debug_code) {
+    // Preserve original value of holder_reg.
+    push(holder_reg);
+    movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+    Cmp(holder_reg, Factory::null_value());
+    Check(not_equal, "JSGlobalProxy::context() should not be null.");
+
+    // Read the first word and compare to global_context_map(),
+    movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
+    Cmp(holder_reg, Factory::global_context_map());
+    Check(equal, "JSGlobalObject::global_context should be a global context.");
+    pop(holder_reg);
+  }
+
+  movq(kScratchRegister,
+       FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
+  int token_offset = Context::kHeaderSize +
+                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
+  movq(scratch, FieldOperand(scratch, token_offset));
+  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
+  j(not_equal, miss);
+
+  bind(&same_contexts);
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index f13a7ad..44a76a4 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -175,11 +175,13 @@
   void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
 
   // Compare object type for heap object.
+  // Always use unsigned comparisons: above and below, not less and greater.
   // Incoming register is heap_object and outgoing register is map.
   // They may be the same register, and may be kScratchRegister.
   void CmpObjectType(Register heap_object, InstanceType type, Register map);
 
   // Compare instance type for map.
+  // Always use unsigned comparisons: above and below, not less and greater.
   void CmpInstanceType(Register map, InstanceType type);
 
   // FCmp is similar to integer cmp, but requires unsigned
@@ -212,7 +214,8 @@
 
   // Generate code for checking access rights - used for security checks
   // on access to global objects across environments. The holder register
-  // is left untouched, but the scratch register is clobbered.
+  // is left untouched, but the scratch register and kScratchRegister,
+  // which must be different, are clobbered.
   void CheckAccessGlobalProxy(Register holder_reg,
                               Register scratch,
                               Label* miss);
diff --git a/src/x64/register-allocator-x64-inl.h b/src/x64/register-allocator-x64-inl.h
index 926dd64..54729d6 100644
--- a/src/x64/register-allocator-x64-inl.h
+++ b/src/x64/register-allocator-x64-inl.h
@@ -46,7 +46,7 @@
 // non-reserved assembler registers.
 int RegisterAllocator::ToNumber(Register reg) {
   ASSERT(reg.is_valid() && !IsReserved(reg));
-  static const int numbers[] = {
+  const int kNumbers[] = {
     0,   // rax
     2,   // rcx
     3,   // rdx
@@ -64,15 +64,15 @@
     8,   // r14
     9   // r15
   };
-  return numbers[reg.code()];
+  return kNumbers[reg.code()];
 }
 
 
 Register RegisterAllocator::ToRegister(int num) {
   ASSERT(num >= 0 && num < kNumRegisters);
-  static Register registers[] =
+  const Register kRegisters[] =
       { rax, rbx, rcx, rdx, rdi, r8, r9, r11, r14, r15, r13, r12 };
-  return registers[num];
+  return kRegisters[num];
 }
 
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index c577615..ba13996 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -36,32 +36,202 @@
 namespace v8 {
 namespace internal {
 
-#define __ ACCESS_MASM((&masm_))
+#define __ ACCESS_MASM((masm()))
 
 
-Object* CallStubCompiler::CompileCallConstant(Object* a,
-                                              JSObject* b,
-                                              JSFunction* c,
-                                              String* d,
-                                              StubCompiler::CheckType e) {
-  UNIMPLEMENTED();
-  return NULL;
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+                                              JSObject* holder,
+                                              JSFunction* function,
+                                              String* name,
+                                              StubCompiler::CheckType check) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  // rsp[0] return address
+  // rsp[8] argument argc
+  // rsp[16] argument argc - 1
+  // ...
+  // rsp[argc * 8] argument 1
+  // rsp[(argc + 1) * 8] argument 0 = reciever
+  // rsp[(argc + 2) * 8] function name
+
+  Label miss;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  if (check != NUMBER_CHECK) {
+    __ testl(rdx, Immediate(kSmiTagMask));
+    __ j(zero, &miss);
+  }
+
+  // Make sure that it's okay not to patch the on stack receiver
+  // unless we're doing a receiver map check.
+  ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+  switch (check) {
+    case RECEIVER_MAP_CHECK:
+      // Check that the maps haven't changed.
+      CheckPrototypes(JSObject::cast(object), rdx, holder,
+                      rbx, rcx, name, &miss);
+
+      // Patch the receiver on the stack with the global proxy if
+      // necessary.
+      if (object->IsGlobalObject()) {
+        __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+        __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+      }
+      break;
+
+    case STRING_CHECK:
+      // Check that the object is a two-byte string or a symbol.
+      __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rcx);
+      __ j(above_equal, &miss);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::STRING_FUNCTION_INDEX,
+                                          rcx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+                      rbx, rdx, name, &miss);
+      break;
+
+    case NUMBER_CHECK: {
+      Label fast;
+      // Check that the object is a smi or a heap number.
+      __ testl(rdx, Immediate(kSmiTagMask));
+      __ j(zero, &fast);
+      __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+      __ j(not_equal, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::NUMBER_FUNCTION_INDEX,
+                                          rcx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+                      rbx, rdx, name, &miss);
+      break;
+    }
+
+    case BOOLEAN_CHECK: {
+      Label fast;
+      // Check that the object is a boolean.
+      __ Cmp(rdx, Factory::true_value());
+      __ j(equal, &fast);
+      __ Cmp(rdx, Factory::false_value());
+      __ j(not_equal, &miss);
+      __ bind(&fast);
+      // Check that the maps starting from the prototype haven't changed.
+      GenerateLoadGlobalFunctionPrototype(masm(),
+                                          Context::BOOLEAN_FUNCTION_INDEX,
+                                          rcx);
+      CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder,
+                      rbx, rdx, name, &miss);
+      break;
+    }
+
+    case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
+      CheckPrototypes(JSObject::cast(object), rdx, holder,
+                      rbx, rcx, name, &miss);
+      // Make sure object->elements()->map() != Heap::dictionary_array_map()
+      // Get the elements array of the object.
+      __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
+      // Check that the object is in fast mode (not dictionary).
+      __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+             Factory::hash_table_map());
+      __ j(equal, &miss);
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+
+  // Get the function and setup the context.
+  __ Move(rdi, Handle<JSFunction>(function));
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  __ InvokeCode(code, expected, arguments(),
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  String* function_name = NULL;
+  if (function->shared()->name()->IsString()) {
+    function_name = String::cast(function->shared()->name());
+  }
+  return GetCode(CONSTANT_FUNCTION, function_name);
 }
 
-Object* CallStubCompiler::CompileCallField(Object* a,
-                                           JSObject* b,
-                                           int c,
-                                           String* d) {
-  UNIMPLEMENTED();
-  return NULL;
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  // rsp[0] return address
+  // rsp[8] argument argc
+  // rsp[16] argument argc - 1
+  // ...
+  // rsp[argc * 8] argument 1
+  // rsp[(argc + 1) * 8] argument 0 = receiver
+  // rsp[(argc + 2) * 8] function name
+  Label miss;
+
+  // Get the receiver from the stack.
+  const int argc = arguments().immediate();
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  // Check that the receiver isn't a smi.
+  __ testl(rdx, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+
+  // Do the right check and compute the holder register.
+  Register reg =
+      CheckPrototypes(JSObject::cast(object), rdx, holder,
+                      rbx, rcx, name, &miss);
+
+  GenerateFastPropertyLoad(masm(), rdi, reg, holder, index);
+
+  // Check that the function really is a function.
+  __ testl(rdi, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx);
+  __ j(not_equal, &miss);
+
+  // Patch the receiver on the stack with the global proxy if
+  // necessary.
+  if (object->IsGlobalObject()) {
+    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+  }
+
+  // Invoke the function.
+  __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
 }
 
 
 Object* CallStubCompiler::CompileCallInterceptor(Object* a,
                                                  JSObject* b,
                                                  String* c) {
-  UNIMPLEMENTED();
-  return NULL;
+  // TODO(X64): Implement a real stub.
+  return Failure::InternalError();
 }
 
 
@@ -71,8 +241,69 @@
                                             JSGlobalPropertyCell* cell,
                                             JSFunction* function,
                                             String* name) {
-  UNIMPLEMENTED();
-  return NULL;
+  // ----------- S t a t e -------------
+  // -----------------------------------
+  // rsp[0] return address
+  // rsp[8] argument argc
+  // rsp[16] argument argc - 1
+  // ...
+  // rsp[argc * 8] argument 1
+  // rsp[(argc + 1) * 8] argument 0 = receiver
+  // rsp[(argc + 2) * 8] function name
+  Label miss;
+
+  __ IncrementCounter(&Counters::call_global_inline, 1);
+
+  // Get the number of arguments.
+  const int argc = arguments().immediate();
+
+  // Get the receiver from the stack.
+  __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual calls. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ testl(rdx, Immediate(kSmiTagMask));
+    __ j(zero, &miss);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, rdx, holder, rbx, rcx, name, &miss);
+
+  // Get the value from the cell.
+  __ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
+  __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
+
+  // Check that the cell contains the same function.
+  __ Cmp(rdi, Handle<JSFunction>(function));
+  __ j(not_equal, &miss);
+
+  // Patch the receiver on the stack with the global proxy.
+  if (object->IsGlobalObject()) {
+    __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+    __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+  }
+
+  // Setup the context (function already in edi).
+  __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Jump to the cached code (tail call).
+  ASSERT(function->is_compiled());
+  Handle<Code> code(function->code());
+  ParameterCount expected(function->shared()->formal_parameter_count());
+  __ InvokeCode(code, expected, arguments(),
+                RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+  // Handle call cache miss.
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::call_global_inline, 1);
+  __ IncrementCounter(&Counters::call_global_inline_miss, 1);
+  Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
@@ -80,34 +311,58 @@
                                               JSObject* b,
                                               AccessorInfo* c,
                                               String* d) {
-  UNIMPLEMENTED();
-  return NULL;
+  // TODO(X64): Implement a real stub.
+  return Failure::InternalError();
 }
 
 
-Object* LoadStubCompiler::CompileLoadConstant(JSObject* a,
-                                              JSObject* b,
-                                              Object* c,
-                                              String* d) {
-  UNIMPLEMENTED();
-  return NULL;
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+                                              JSObject* holder,
+                                              Object* value,
+                                              String* name) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(CONSTANT_FUNCTION, name);
 }
 
 
-Object* LoadStubCompiler::CompileLoadField(JSObject* a,
-                                           JSObject* b,
-                                           int c,
-                                           String* d) {
-  UNIMPLEMENTED();
-  return NULL;
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+                                           JSObject* holder,
+                                           int index,
+                                           String* name) {
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss);
+  __ bind(&miss);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
 }
 
 
 Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
                                                  JSObject* b,
                                                  String* c) {
-  UNIMPLEMENTED();
-  return NULL;
+  // TODO(X64): Implement a real stub.
+  return Failure::InternalError();
 }
 
 
@@ -116,8 +371,51 @@
                                             JSGlobalPropertyCell* cell,
                                             String* name,
                                             bool is_dont_delete) {
-  UNIMPLEMENTED();
-  return NULL;
+  // ----------- S t a t e -------------
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ IncrementCounter(&Counters::named_load_global_inline, 1);
+
+  // Get the receiver from the stack.
+  __ movq(rax, Operand(rsp, kPointerSize));
+
+  // If the object is the holder then we know that it's a global
+  // object which can only happen for contextual loads. In this case,
+  // the receiver cannot be a smi.
+  if (object != holder) {
+    __ testl(rax, Immediate(kSmiTagMask));
+    __ j(zero, &miss);
+  }
+
+  // Check that the maps haven't changed.
+  CheckPrototypes(object, rax, holder, rbx, rdx, name, &miss);
+
+  // Get the value from the cell.
+  __ Move(rax, Handle<JSGlobalPropertyCell>(cell));
+  __ movq(rax, FieldOperand(rax, JSGlobalPropertyCell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (!is_dont_delete) {
+    __ Cmp(rax, Factory::the_hole_value());
+    __ j(equal, &miss);
+  } else if (FLAG_debug_code) {
+    __ Cmp(rax, Factory::the_hole_value());
+    __ Check(not_equal, "DontDelete cells can't contain the hole");
+  }
+
+  __ ret(0);
+
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::named_load_global_inline, 1);
+  __ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
+  GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(NORMAL, name);
 }
 
 
@@ -129,12 +427,38 @@
 }
 
 
-Object* StoreStubCompiler::CompileStoreField(JSObject* a,
-                                             int b,
-                                             Map* c,
-                                             String* d) {
-  UNIMPLEMENTED();
-  return NULL;
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+                                             int index,
+                                             Map* transition,
+                                             String* name) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : receiver
+  // -----------------------------------
+  Label miss;
+
+  // Get the object from the stack.
+  __ movq(rbx, Operand(rsp, 1 * kPointerSize));
+
+  // Generate store field code.  Trashes the name register.
+  GenerateStoreField(masm(),
+                     Builtins::StoreIC_ExtendStorage,
+                     object,
+                     index,
+                     transition,
+                     rbx, rcx, rdx,
+                     &miss);
+
+  // Handle store cache miss.
+  __ bind(&miss);
+  __ Move(rcx, Handle<String>(name));  // restore name
+  Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+
+  // Return the generated code.
+  return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
 }
 
 
@@ -152,6 +476,36 @@
 }
 
 
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+                                                JSObject* receiver,
+                                                JSObject* holder,
+                                                int index) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label miss;
+
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+  __ IncrementCounter(&Counters::keyed_load_field, 1);
+
+  // Check that the name has not changed.
+  __ Cmp(rax, Handle<String>(name));
+  __ j(not_equal, &miss);
+
+  GenerateLoadField(receiver, holder, rcx, rbx, rdx, index, name, &miss);
+
+  __ bind(&miss);
+  __ DecrementCounter(&Counters::keyed_load_field, 1);
+  GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+  // Return the generated code.
+  return GetCode(FIELD, name);
+}
+
+
 // TODO(1241006): Avoid having lazy compile stubs specialized by the
 // number of arguments. It is not needed anymore.
 Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
@@ -175,6 +529,308 @@
   return GetCodeWithFlags(flags, "LazyCompileStub");
 }
 
+
+Register StubCompiler::CheckPrototypes(JSObject* object,
+                                       Register object_reg,
+                                       JSObject* holder,
+                                       Register holder_reg,
+                                       Register scratch,
+                                       String* name,
+                                       Label* miss) {
+  // Check that the maps haven't changed.
+  Register result =
+      __ CheckMaps(object, object_reg, holder, holder_reg, scratch, miss);
+
+  // If we've skipped any global objects, it's not enough to verify
+  // that their maps haven't changed.
+  while (object != holder) {
+    if (object->IsGlobalObject()) {
+      GlobalObject* global = GlobalObject::cast(object);
+      Object* probe = global->EnsurePropertyCell(name);
+      if (probe->IsFailure()) {
+        set_failure(Failure::cast(probe));
+        return result;
+      }
+      JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe);
+      ASSERT(cell->value()->IsTheHole());
+      __ Move(scratch, Handle<Object>(cell));
+      __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+             Factory::the_hole_value());
+      __ j(not_equal, miss);
+    }
+    object = JSObject::cast(object->GetPrototype());
+  }
+
+  // Return the register containing the holder.
+  return result;
+}
+
+
+void StubCompiler::GenerateLoadField(JSObject* object,
+                                     JSObject* holder,
+                                     Register receiver,
+                                     Register scratch1,
+                                     Register scratch2,
+                                     int index,
+                                     String* name,
+                                     Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ testl(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss);
+
+  // Check the prototype chain.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Get the value from the properties.
+  GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
+  __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadConstant(JSObject* object,
+                                        JSObject* holder,
+                                        Register receiver,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Object* value,
+                                        String* name,
+                                        Label* miss) {
+  // Check that the receiver isn't a smi.
+  __ testl(receiver, Immediate(kSmiTagMask));
+  __ j(zero, miss);
+
+  // Check that the maps haven't changed.
+  Register reg =
+      CheckPrototypes(object, receiver, holder,
+                      scratch1, scratch2, name, miss);
+
+  // Return the constant value.
+  __ Move(rax, Handle<Object>(value));
+  __ ret(0);
+}
+
+
+#undef __
+
+//-----------------------------------------------------------------------------
+// StubCompiler static helper functions
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(MacroAssembler* masm,
+                       Code::Flags flags,
+                       StubCache::Table table,
+                       Register name,
+                       Register offset) {
+  ExternalReference key_offset(SCTableReference::keyReference(table));
+  Label miss;
+
+  __ movq(kScratchRegister, key_offset);
+  // Check that the key in the entry matches the name.
+  __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
+  __ j(not_equal, &miss);
+  // Get the code entry from the cache.
+  // Use key_offset + kPointerSize, rather than loading value_offset.
+  __ movq(kScratchRegister,
+          Operand(kScratchRegister, offset, times_4, kPointerSize));
+  // Check that the flags match what we're looking for.
+  __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
+  __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+  __ cmpl(offset, Immediate(flags));
+  __ j(not_equal, &miss);
+
+  // Jump to the first instruction in the code stub.
+  __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(kScratchRegister);
+
+  __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+  ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+  Code* code = NULL;
+  if (kind == Code::LOAD_IC) {
+    code = Builtins::builtin(Builtins::LoadIC_Miss);
+  } else {
+    code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+  }
+
+  Handle<Code> ic(code);
+  __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+                                                       int index,
+                                                       Register prototype) {
+  // Load the global or builtins object from the current context.
+  __ movq(prototype,
+             Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+  // Load the global context from the global or builtins object.
+  __ movq(prototype,
+             FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
+  // Load the function from the global context.
+  __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
+  // Load the initial map.  The global functions all have initial maps.
+  __ movq(prototype,
+             FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the prototype from the initial map.
+  __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+                                            Register dst, Register src,
+                                            JSObject* holder, int index) {
+  // Adjust for the number of properties stored in the holder.
+  index -= holder->map()->inobject_properties();
+  if (index < 0) {
+    // Get the property straight out of the holder.
+    int offset = holder->map()->instance_size() + (index * kPointerSize);
+    __ movq(dst, FieldOperand(src, offset));
+  } else {
+    // Calculate the offset into the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+    __ movq(dst, FieldOperand(dst, offset));
+  }
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+                              Code::Flags flags,
+                              Register receiver,
+                              Register name,
+                              Register scratch,
+                              Register extra) {
+  Label miss;
+  USE(extra);  // The register extra is not used on the X64 platform.
+  // Make sure that code is valid. The shifting code relies on the
+  // entry size being 16.
+  ASSERT(sizeof(Entry) == 16);
+
+  // Make sure the flags do not name a specific type.
+  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  ASSERT(!scratch.is(receiver));
+  ASSERT(!scratch.is(name));
+
+  // Check that the receiver isn't a smi.
+  __ testl(receiver, Immediate(kSmiTagMask));
+  __ j(zero, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+  // Use only the low 32 bits of the map pointer.
+  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, Immediate(flags));
+  __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the primary table.
+  ProbeTable(masm, flags, kPrimary, name, scratch);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(scratch, Immediate(flags));
+  __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+  __ subl(scratch, name);
+  __ addl(scratch, Immediate(flags));
+  __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+  // Probe the secondary table.
+  ProbeTable(masm, flags, kSecondary, name, scratch);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+                                      Builtins::Name storage_extend,
+                                      JSObject* object,
+                                      int index,
+                                      Map* transition,
+                                      Register receiver_reg,
+                                      Register name_reg,
+                                      Register scratch,
+                                      Label* miss_label) {
+  // Check that the object isn't a smi.
+  __ testl(receiver_reg, Immediate(kSmiTagMask));
+  __ j(zero, miss_label);
+
+  // Check that the map of the object hasn't changed.
+  __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+         Handle<Map>(object->map()));
+  __ j(not_equal, miss_label);
+
+  // Perform global security token check if needed.
+  if (object->IsJSGlobalProxy()) {
+    __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+  }
+
+  // Stub never generated for non-global objects that require access
+  // checks.
+  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+  // Perform map transition for the receiver if necessary.
+  if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ Move(rcx, Handle<Map>(transition));
+    Handle<Code> ic(Builtins::builtin(storage_extend));
+    __ Jump(ic, RelocInfo::CODE_TARGET);
+    return;
+  }
+
+  if (transition != NULL) {
+    // Update the map of the object; no write barrier updating is
+    // needed because the map is never in new space.
+    __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+            Handle<Map>(transition));
+  }
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= object->map()->inobject_properties();
+
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = object->map()->instance_size() + (index * kPointerSize);
+    __ movq(FieldOperand(receiver_reg, offset), rax);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ movq(name_reg, rax);
+    __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array (optimistically).
+    __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+    __ movq(FieldOperand(scratch, offset), rax);
+
+    // Update the write barrier for the array address.
+    // Pass the value being stored in the now unused name_reg.
+    __ movq(name_reg, rax);
+    __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+  }
+
+  // Return the value (register rax).
+  __ ret(0);
+}
+
+
 #undef __
 
 
diff --git a/src/zone.cc b/src/zone.cc
index d78c19b..33fe557 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -176,7 +176,10 @@
     new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
   }
   Segment* segment = Segment::New(new_size);
-  if (segment == NULL) V8::FatalProcessOutOfMemory("Zone");
+  if (segment == NULL) {
+    V8::FatalProcessOutOfMemory("Zone");
+    return NULL;
+  }
 
   // Recompute 'top' and 'limit' based on the new segment.
   Address result = RoundUp(segment->start(), kAlignment);
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index bb82fc8..fa33d32 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -63,7 +63,7 @@
 
 
 [ $arch == x64 ]
-test-regexp/Graph: CRASH || FAIL
+test-regexp/Graph: PASS || CRASH || FAIL
 test-decls/Present: CRASH || FAIL
 test-decls/Unknown: CRASH || FAIL
 test-decls/Appearing: CRASH || FAIL
@@ -108,17 +108,16 @@
 test-debug/DebugBreak: CRASH || FAIL
 test-debug/DisableBreak: CRASH || FAIL
 test-debug/MessageQueues: CRASH || FAIL
-test-debug/CallFunctionInDebugger: CRASH || FAIL
+test-debug/CallFunctionInDebugger: SKIP
 test-debug/RecursiveBreakpoints: CRASH || FAIL
 test-debug/DebuggerUnload: CRASH || FAIL
-test-debug/DebuggerClearMessageHandler: CRASH || FAIL
-test-debug/DebuggerClearMessageHandlerWhileActive: CRASH || FAIL
 test-debug/DebuggerHostDispatch: CRASH || FAIL
 test-debug/DebugBreakInMessageHandler: CRASH || FAIL
+test-debug/NoDebugBreakInAfterCompileMessageHandler: CRASH || FAIL
 test-api/HugeConsStringOutOfMemory: CRASH || FAIL
 test-api/OutOfMemory: CRASH || FAIL
 test-api/OutOfMemoryNested: CRASH || FAIL
 test-api/Threading: CRASH || FAIL
 test-api/TryCatchSourceInfo: CRASH || FAIL
-test-api/RegExpInterruption: CRASH || FAIL
-test-api/RegExpStringModification: CRASH || FAIL
+test-api/RegExpInterruption: PASS || TIMEOUT
+test-api/RegExpStringModification: PASS || TIMEOUT
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 5b04b2c..806e711 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -1266,6 +1266,38 @@
 }
 
 
+THREADED_TEST(InternalFieldsNativePointers) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+  Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
+  instance_templ->SetInternalFieldCount(1);
+  Local<v8::Object> obj = templ->GetFunction()->NewInstance();
+  CHECK_EQ(1, obj->InternalFieldCount());
+  CHECK(obj->GetPointerFromInternalField(0) == NULL);
+
+  char* data = new char[100];
+
+  void* aligned = data;
+  CHECK_EQ(0, reinterpret_cast<uintptr_t>(aligned) & 0x1);
+  void* unaligned = data + 1;
+  CHECK_EQ(1, reinterpret_cast<uintptr_t>(unaligned) & 0x1);
+
+  // Check reading and writing aligned pointers.
+  obj->SetPointerInInternalField(0, aligned);
+  i::Heap::CollectAllGarbage();
+  CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
+
+  // Check reading and writing unaligned pointers.
+  obj->SetPointerInInternalField(0, unaligned);
+  i::Heap::CollectAllGarbage();
+  CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
+
+  delete[] data;
+}
+
+
 THREADED_TEST(IdentityHash) {
   v8::HandleScope scope;
   LocalContext env;
@@ -5024,6 +5056,236 @@
 }
 
 
+// Test the case when we stored field into
+// a stub, but interceptor produced value on its own.
+THREADED_TEST(InterceptorLoadICFieldNotNeeded) {
+  CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+    "proto = new Object();"
+    "o.__proto__ = proto;"
+    "proto.x = 239;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  o.x;"
+    // Now it should be ICed and keep a reference to x defined on proto
+    "}"
+    "var result = 0;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  result += o.x;"
+    "}"
+    "result;",
+    42 * 1000);
+}
+
+
+// Test the case when we stored field into
+// a stub, but it got invalidated later on.
+THREADED_TEST(InterceptorLoadICInvalidatedField) {
+  CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+    "proto1 = new Object();"
+    "proto2 = new Object();"
+    "o.__proto__ = proto1;"
+    "proto1.__proto__ = proto2;"
+    "proto2.y = 239;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  o.y;"
+    // Now it should be ICed and keep a reference to y defined on proto2
+    "}"
+    "proto1.y = 42;"
+    "var result = 0;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  result += o.y;"
+    "}"
+    "result;",
+    42 * 1000);
+}
+
+
+// Test the case when we stored field into
+// a stub, but it got invalidated later on due to override on
+// global object which is between interceptor and fields' holders.
+THREADED_TEST(InterceptorLoadICInvalidatedFieldViaGlobal) {
+  CheckInterceptorLoadIC(InterceptorLoadXICGetter,
+    "o.__proto__ = this;"  // set a global to be a proto of o.
+    "this.__proto__.y = 239;"
+    "for (var i = 0; i < 10; i++) {"
+    "  if (o.y != 239) throw 'oops: ' + o.y;"
+    // Now it should be ICed and keep a reference to y defined on field_holder.
+    "}"
+    "this.y = 42;"  // Assign on a global.
+    "var result = 0;"
+    "for (var i = 0; i < 10; i++) {"
+    "  result += o.y;"
+    "}"
+    "result;",
+    42 * 10);
+}
+
+
+static v8::Handle<Value> Return239(Local<String> name, const AccessorInfo&) {
+  ApiTestFuzzer::Fuzz();
+  return v8_num(239);
+}
+
+
+static void SetOnThis(Local<String> name,
+                      Local<Value> value,
+                      const AccessorInfo& info) {
+  info.This()->ForceSet(name, value);
+}
+
+
+THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  templ->SetAccessor(v8_str("y"), Return239);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  v8::Handle<Value> value = CompileRun(
+      "var result = 0;"
+      "for (var i = 0; i < 7; i++) {"
+      "  result = o.y;"
+      "}");
+  CHECK_EQ(239, value->Int32Value());
+}
+
+
+THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+  templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
+  templ_p->SetAccessor(v8_str("y"), Return239);
+
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+  context->Global()->Set(v8_str("p"), templ_p->NewInstance());
+
+  v8::Handle<Value> value = CompileRun(
+      "o.__proto__ = p;"
+      "var result = 0;"
+      "for (var i = 0; i < 7; i++) {"
+      "  result = o.x + o.y;"
+      "}");
+  CHECK_EQ(239 + 42, value->Int32Value());
+}
+
+
+THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  templ->SetAccessor(v8_str("y"), Return239);
+
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+
+  v8::Handle<Value> value = CompileRun(
+    "fst = new Object();  fst.__proto__ = o;"
+    "snd = new Object();  snd.__proto__ = fst;"
+    "var result1 = 0;"
+    "for (var i = 0; i < 7;  i++) {"
+    "  result1 = snd.x;"
+    "}"
+    "fst.x = 239;"
+    "var result = 0;"
+    "for (var i = 0; i < 7; i++) {"
+    "  result = snd.x;"
+    "}"
+    "result + result1");
+  CHECK_EQ(239 + 42, value->Int32Value());
+}
+
+
+// Test the case when we stored callback into
+// a stub, but interceptor produced value on its own.
+THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+  templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
+  templ_p->SetAccessor(v8_str("y"), Return239);
+
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+  context->Global()->Set(v8_str("p"), templ_p->NewInstance());
+
+  v8::Handle<Value> value = CompileRun(
+    "o.__proto__ = p;"
+    "for (var i = 0; i < 7; i++) {"
+    "  o.x;"
+    // Now it should be ICed and keep a reference to x defined on p
+    "}"
+    "var result = 0;"
+    "for (var i = 0; i < 7; i++) {"
+    "  result += o.x;"
+    "}"
+    "result");
+  CHECK_EQ(42 * 7, value->Int32Value());
+}
+
+
+// Test the case when we stored callback into
+// a stub, but it got invalidated later on.
+THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+  templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
+  templ_p->SetAccessor(v8_str("y"), Return239, SetOnThis);
+
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+  context->Global()->Set(v8_str("p"), templ_p->NewInstance());
+
+  v8::Handle<Value> value = CompileRun(
+    "inbetween = new Object();"
+    "o.__proto__ = inbetween;"
+    "inbetween.__proto__ = p;"
+    "for (var i = 0; i < 10; i++) {"
+    "  o.y;"
+    // Now it should be ICed and keep a reference to y defined on p
+    "}"
+    "inbetween.y = 42;"
+    "var result = 0;"
+    "for (var i = 0; i < 10; i++) {"
+    "  result += o.y;"
+    "}"
+    "result");
+  CHECK_EQ(42 * 10, value->Int32Value());
+}
+
+
+// Test the case when we stored callback into
+// a stub, but it got invalidated later on due to override on
+// global object which is between interceptor and callbacks' holders.
+THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
+  templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
+  templ_p->SetAccessor(v8_str("y"), Return239, SetOnThis);
+
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ_o->NewInstance());
+  context->Global()->Set(v8_str("p"), templ_p->NewInstance());
+
+  v8::Handle<Value> value = CompileRun(
+    "o.__proto__ = this;"
+    "this.__proto__ = p;"
+    "for (var i = 0; i < 10; i++) {"
+    "  if (o.y != 239) throw 'oops: ' + o.y;"
+    // Now it should be ICed and keep a reference to y defined on p
+    "}"
+    "this.y = 42;"
+    "var result = 0;"
+    "for (var i = 0; i < 10; i++) {"
+    "  result += o.y;"
+    "}"
+    "result");
+  CHECK_EQ(42 * 10, value->Int32Value());
+}
+
+
 static v8::Handle<Value> InterceptorLoadICGetter0(Local<String> name,
                                                   const AccessorInfo& info) {
   ApiTestFuzzer::Fuzz();
@@ -5108,6 +5370,192 @@
   CHECK_EQ(42, value->Int32Value());
 }
 
+
+// This test checks that if interceptor doesn't provide
+// a value, we can fetch regular value.
+THREADED_TEST(InterceptorCallICSeesOthers) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  v8::Handle<Value> value = CompileRun(
+    "o.x = function f(x) { return x + 1; };"
+    "var result = 0;"
+    "for (var i = 0; i < 7; i++) {"
+    "  result = o.x(41);"
+    "}");
+  CHECK_EQ(42, value->Int32Value());
+}
+
+
+static v8::Handle<Value> call_ic_function4;
+static v8::Handle<Value> InterceptorCallICGetter4(Local<String> name,
+                                                  const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  CHECK(v8_str("x")->Equals(name));
+  return call_ic_function4;
+}
+
+
+// This test checks that if interceptor provides a function,
+// even if we cached shadowed variant, interceptor's function
+// is invoked
+THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(InterceptorCallICGetter4);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  call_ic_function4 =
+      v8_compile("function f(x) { return x - 1; }; f")->Run();
+  v8::Handle<Value> value = CompileRun(
+    "o.__proto__.x = function(x) { return x + 1; };"
+    "var result = 0;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  result = o.x(42);"
+    "}");
+  CHECK_EQ(41, value->Int32Value());
+}
+
+
+// Test the case when we stored cacheable lookup into
+// a stub, but it got invalidated later on
+THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  v8::Handle<Value> value = CompileRun(
+    "proto1 = new Object();"
+    "proto2 = new Object();"
+    "o.__proto__ = proto1;"
+    "proto1.__proto__ = proto2;"
+    "proto2.y = function(x) { return x + 1; };"
+    // Invoke it many times to compile a stub
+    "for (var i = 0; i < 7; i++) {"
+    "  o.y(42);"
+    "}"
+    "proto1.y = function(x) { return x - 1; };"
+    "var result = 0;"
+    "for (var i = 0; i < 7; i++) {"
+    "  result += o.y(42);"
+    "}");
+  CHECK_EQ(41 * 7, value->Int32Value());
+}
+
+
+static v8::Handle<Value> call_ic_function5;
+static v8::Handle<Value> InterceptorCallICGetter5(Local<String> name,
+                                                  const AccessorInfo& info) {
+  ApiTestFuzzer::Fuzz();
+  if (v8_str("x")->Equals(name))
+    return call_ic_function5;
+  else
+    return Local<Value>();
+}
+
+
+// This test checks that if interceptor doesn't provide a function,
+// cached constant function is used
+THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  v8::Handle<Value> value = CompileRun(
+    "function inc(x) { return x + 1; };"
+    "inc(1);"
+    "o.x = inc;"
+    "var result = 0;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  result = o.x(42);"
+    "}");
+  CHECK_EQ(43, value->Int32Value());
+}
+
+
+// This test checks that if interceptor provides a function,
+// even if we cached constant function, interceptor's function
+// is invoked
+THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(InterceptorCallICGetter5);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  call_ic_function5 =
+      v8_compile("function f(x) { return x - 1; }; f")->Run();
+  v8::Handle<Value> value = CompileRun(
+    "function inc(x) { return x + 1; };"
+    "inc(1);"
+    "o.x = inc;"
+    "var result = 0;"
+    "for (var i = 0; i < 1000; i++) {"
+    "  result = o.x(42);"
+    "}");
+  CHECK_EQ(41, value->Int32Value());
+}
+
+
+// Test the case when we stored constant function into
+// a stub, but it got invalidated later on
+THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  v8::Handle<Value> value = CompileRun(
+    "function inc(x) { return x + 1; };"
+    "inc(1);"
+    "proto1 = new Object();"
+    "proto2 = new Object();"
+    "o.__proto__ = proto1;"
+    "proto1.__proto__ = proto2;"
+    "proto2.y = inc;"
+    // Invoke it many times to compile a stub
+    "for (var i = 0; i < 7; i++) {"
+    "  o.y(42);"
+    "}"
+    "proto1.y = function(x) { return x - 1; };"
+    "var result = 0;"
+    "for (var i = 0; i < 7; i++) {"
+    "  result += o.y(42);"
+    "}");
+  CHECK_EQ(41 * 7, value->Int32Value());
+}
+
+
+// Test the case when we stored constant function into
+// a stub, but it got invalidated later on due to override on
+// global object which is between interceptor and constant function' holders.
+THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
+  v8::HandleScope scope;
+  v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
+  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  LocalContext context;
+  context->Global()->Set(v8_str("o"), templ->NewInstance());
+  v8::Handle<Value> value = CompileRun(
+    "function inc(x) { return x + 1; };"
+    "inc(1);"
+    "o.__proto__ = this;"
+    "this.__proto__.y = inc;"
+    // Invoke it many times to compile a stub
+    "for (var i = 0; i < 7; i++) {"
+    "  if (o.y(42) != 43) throw 'oops: ' + o.y(42);"
+    "}"
+    "this.y = function(x) { return x - 1; };"
+    "var result = 0;"
+    "for (var i = 0; i < 7; i++) {"
+    "  result += o.y(42);"
+    "}");
+  CHECK_EQ(41 * 7, value->Int32Value());
+}
+
+
 static int interceptor_call_count = 0;
 
 static v8::Handle<Value> InterceptorICRefErrorGetter(Local<String> name,
@@ -5768,6 +6216,7 @@
 
 THREADED_TEST(ExternalAllocatedMemory) {
   v8::HandleScope outer;
+  v8::Persistent<Context> env = Context::New();
   const int kSize = 1024*1024;
   CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(kSize), kSize);
   CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(-kSize), 0);
@@ -7081,3 +7530,21 @@
   obj_clone->Set(foo_string, v8::String::New("Hello"));
   CHECK(!obj->Get(foo_string)->IsUndefined());
 }
+
+
+// Regression test for http://crbug.com/16276.
+THREADED_TEST(Regress16276) {
+  v8::HandleScope scope;
+  LocalContext context;
+  // Force the IC in f to be a dictionary load IC.
+  CompileRun("function f(obj) { return obj.x; }\n"
+             "var obj = { x: { foo: 42 }, y: 87 };\n"
+             "var x = obj.x;\n"
+             "delete obj.y;\n"
+             "for (var i = 0; i < 5; i++) f(obj);");
+  // Detach the global object to make 'this' refer directly to the
+  // global object (not the proxy), and make sure that the dictionary
+  // load IC doesn't mess up loading directly from the global object.
+  context->DetachGlobal();
+  CHECK_EQ(42, CompileRun("f(this).foo")->Int32Value());
+}
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index fddd000..9e2c38d 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -4875,7 +4875,7 @@
   v8::Debug::SetMessageHandler2(DebugBreakMessageHandler);
 
   // Test functions.
-  const char* script = "function f() { debugger; } function g() { }";
+  const char* script = "function f() { debugger; g(); } function g() { }";
   CompileRun(script);
   v8::Local<v8::Function> f =
       v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
@@ -4954,8 +4954,10 @@
   v8::Debug::DebugBreak();
   result = f->Call(env->Global(), argc, argv);
 
-  CHECK_EQ(20, break_point_hit_count);
-  CHECK_EQ("exec", last_function_hit);
+  // Check that there was only one break event. Matching RegExp should not
+  // cause Break events.
+  CHECK_EQ(1, break_point_hit_count);
+  CHECK_EQ("f", last_function_hit);
 }
 #endif  // V8_NATIVE_REGEXP
 
@@ -5295,3 +5297,63 @@
   ClearBreakPointFromJS(sbp2);
   v8::Debug::SetMessageHandler2(NULL);
 }
+
+
+static void BreakMessageHandler(const v8::Debug::Message& message) {
+  if (message.IsEvent() && message.GetEvent() == v8::Break) {
+    // Count the number of breaks.
+    break_point_hit_count++;
+
+    v8::HandleScope scope;
+    v8::Handle<v8::String> json = message.GetJSON();
+
+    SendContinueCommand();
+  } else if (message.IsEvent() && message.GetEvent() == v8::AfterCompile) {
+    v8::HandleScope scope;
+
+    bool is_debug_break = i::StackGuard::IsDebugBreak();
+    // Force DebugBreak flag while serializer is working.
+    i::StackGuard::DebugBreak();
+
+    // Force serialization to trigger some internal JS execution.
+    v8::Handle<v8::String> json = message.GetJSON();
+
+    // Restore previous state.
+    if (is_debug_break) {
+      i::StackGuard::DebugBreak();
+    } else {
+      i::StackGuard::Continue(i::DEBUGBREAK);
+    }
+  }
+}
+
+
+// Test that if DebugBreak is forced it is ignored when code from
+// debug-delay.js is executed.
+TEST(NoDebugBreakInAfterCompileMessageHandler) {
+  v8::HandleScope scope;
+  DebugLocalContext env;
+
+  // Register a debug event listener which sets the break flag and counts.
+  v8::Debug::SetMessageHandler2(BreakMessageHandler);
+
+  // Set the debug break flag.
+  v8::Debug::DebugBreak();
+
+  // Create a function for testing stepping.
+  const char* src = "function f() { eval('var x = 10;'); } ";
+  v8::Local<v8::Function> f = CompileFunction(&env, src, "f");
+
+  // There should be only one break event.
+  CHECK_EQ(1, break_point_hit_count);
+
+  // Set the debug break flag again.
+  v8::Debug::DebugBreak();
+  f->Call(env->Global(), 0, NULL);
+  // There should be one more break event when the script is evaluated in 'f'.
+  CHECK_EQ(2, break_point_hit_count);
+
+  // Get rid of the debug message handler.
+  v8::Debug::SetMessageHandler2(NULL);
+  CheckDebuggerUnloaded();
+}
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 396bcc5..5163ff9 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -36,7 +36,7 @@
   InitializeVM();
   CheckMap(Heap::meta_map(), MAP_TYPE, Map::kSize);
   CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
-  CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, Array::kAlignedSize);
+  CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
   CheckMap(Heap::long_string_map(), LONG_STRING_TYPE,
            SeqTwoByteString::kAlignedSize);
 }
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 8db7339..743375d 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -86,7 +86,8 @@
   v8::HandleScope sc;
 
   // Allocate a fixed array in the new space.
-  int array_size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) /
+  int array_size =
+      (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
       (kPointerSize * 4);
   Object* obj = Heap::AllocateFixedArray(array_size);
   CHECK(!obj->IsFailure());
@@ -118,7 +119,7 @@
   CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
 
   // Allocate a big Fixed array in the new space.
-  int size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) /
+  int size = (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
       kPointerSize;
   Object* obj = Heap::AllocateFixedArray(size);
 
diff --git a/test/message/message.status b/test/message/message.status
index d40151e..9afaa0f 100644
--- a/test/message/message.status
+++ b/test/message/message.status
@@ -32,13 +32,13 @@
 
 [ $arch == x64 ]
 
-simple-throw.js: FAIL
-try-catch-finally-throw-in-catch-and-finally.js: FAIL
-try-catch-finally-throw-in-catch.js: FAIL
-try-catch-finally-throw-in-finally.js: FAIL
-try-finally-throw-in-finally.js: FAIL
-try-finally-throw-in-try-and-finally.js: FAIL
-try-finally-throw-in-try.js: FAIL
-overwritten-builtins.js: FAIL
-regress-73.js: FAIL
-regress-75.js: FAIL
+simple-throw: FAIL
+try-catch-finally-throw-in-catch-and-finally: FAIL
+try-catch-finally-throw-in-catch: FAIL
+try-catch-finally-throw-in-finally: FAIL
+try-finally-throw-in-finally: FAIL
+try-finally-throw-in-try-and-finally: FAIL
+try-finally-throw-in-try: FAIL
+overwritten-builtins: FAIL
+regress/regress-73: FAIL
+regress/regress-75: FAIL
diff --git a/test/mjsunit/debug-stepin-accessor.js b/test/mjsunit/debug-stepin-accessor.js
new file mode 100644
index 0000000..8b24c3c
--- /dev/null
+++ b/test/mjsunit/debug-stepin-accessor.js
@@ -0,0 +1,248 @@
+// Copyright 2008 the V8 project authors. All rights reserved.

+// Redistribution and use in source and binary forms, with or without

+// modification, are permitted provided that the following conditions are

+// met:

+//

+//     * Redistributions of source code must retain the above copyright

+//       notice, this list of conditions and the following disclaimer.

+//     * Redistributions in binary form must reproduce the above

+//       copyright notice, this list of conditions and the following

+//       disclaimer in the documentation and/or other materials provided

+//       with the distribution.

+//     * Neither the name of Google Inc. nor the names of its

+//       contributors may be used to endorse or promote products derived

+//       from this software without specific prior written permission.

+//

+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS

+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT

+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR

+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT

+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,

+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT

+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,

+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY

+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT

+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE

+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+

+// Flags: --expose-debug-as debug

+

+// Get the Debug object exposed from the debug context global object.

+Debug = debug.Debug

+

+var exception = null;

+var state = 1;

+var expected_source_line_text = null;

+var expected_function_name = null;

+

+// Simple debug event handler which first time will cause 'step in' action

+// to get into g.call and than check that execution is pauesed inside

+// function 'g'.

+function listener(event, exec_state, event_data, data) {

+  try {

+    if (event == Debug.DebugEvent.Break) {

+      if (state == 1) {

+        exec_state.prepareStep(Debug.StepAction.StepIn, 2);

+        state = 2;

+      } else if (state == 2) {

+        assertEquals(expected_source_line_text,

+                     event_data.sourceLineText());

+        assertEquals(expected_function_name, event_data.func().name());

+        state = 3;

+      }

+    }

+  } catch(e) {

+    exception = e;

+  }

+};

+

+// Add the debug event listener.

+Debug.setListener(listener);

+

+

+var c = {

+  name: 'name ',

+  get getter1() {

+    return this.name;  // getter 1

+  },

+  get getter2() {

+    return {  // getter 2

+     'a': c.name

+    };

+  },

+  set setter1(n) {

+    this.name = n;  // setter 1

+  }

+};

+

+c.__defineGetter__('y', function getterY() {

+  return this.name;  // getter y

+});

+

+c.__defineGetter__(3, function getter3() {

+  return this.name;  // getter 3

+});

+

+c.__defineSetter__('y', function setterY(n) {

+  this.name = n;  // setter y

+});

+

+c.__defineSetter__(3, function setter3(n) {

+  this.name = n;  // setter 3

+});

+

+var d = {

+  'c': c,

+};

+

+function testGetter1_1() {

+  expected_function_name = 'getter1';

+  expected_source_line_text = '    return this.name;  // getter 1';

+  debugger;

+  var x = c.getter1;

+}

+

+function testGetter1_2() {

+  expected_function_name = 'getter1';

+  expected_source_line_text = '    return this.name;  // getter 1';

+  debugger;

+  var x = c['getter1'];

+}

+

+function testGetter1_3() {

+  expected_function_name = 'getter1';

+  expected_source_line_text = '    return this.name;  // getter 1';

+  debugger;

+  for (var i = 1; i < 2; i++) {

+    var x = c['getter' + i];

+  }

+}

+

+function testGetter1_4() {

+  expected_function_name = 'getter1';

+  expected_source_line_text = '    return this.name;  // getter 1';

+  debugger;

+  var x = d.c.getter1;

+}

+

+function testGetter1_5() {

+  expected_function_name = 'getter1';

+  expected_source_line_text = '    return this.name;  // getter 1';

+  for (var i = 2; i != 1; i--);

+  debugger;

+  var x = d.c['getter' + i];

+}

+

+function testGetter2_1() {

+  expected_function_name = 'getter2';

+  expected_source_line_text = '    return {  // getter 2';

+  for (var i = 2; i != 1; i--);

+  debugger;

+  var t = d.c.getter2.name;

+}

+

+

+function testGetterY_1() {

+  expected_function_name = 'getterY';

+  expected_source_line_text = '  return this.name;  // getter y';

+  debugger;

+  var t = d.c.y;

+}

+

+function testIndexedGetter3_1() {

+  expected_function_name = 'getter3';

+  expected_source_line_text = '  return this.name;  // getter 3';

+  debugger;

+  var r = d.c[3];

+}

+

+function testSetterY_1() {

+  expected_function_name = 'setterY';

+  expected_source_line_text = '  this.name = n;  // setter y';

+  debugger;

+  d.c.y = 'www';

+}

+

+function testIndexedSetter3_1() {

+  expected_function_name = 'setter3';

+  expected_source_line_text = '  this.name = n;  // setter 3';

+  var i = 3

+  debugger;

+  d.c[3] = 'www';

+}

+

+function testSetter1_1() {

+  expected_function_name = 'setter1';

+  expected_source_line_text = '    this.name = n;  // setter 1';

+  debugger;

+  d.c.setter1 = 'aa';

+}

+

+function testSetter1_2() {

+  expected_function_name = 'setter1';

+  expected_source_line_text = '    this.name = n;  // setter 1';

+  debugger;

+  d.c['setter1'] = 'bb';

+}

+

+function testSetter1_3() {

+  expected_function_name = 'setter1';

+  expected_source_line_text = '    this.name = n;  // setter 1';

+  for (var i = 2; i != 1; i--);

+  debugger;

+  d.c['setter' + i] = i;

+}

+

+var e = {

+  name: 'e'

+};

+e.__proto__ = c;

+

+function testProtoGetter1_1() {

+  expected_function_name = 'getter1';

+  expected_source_line_text = '    return this.name;  // getter 1';

+  debugger;

+  var x = e.getter1;

+}

+

+function testProtoSetter1_1() {

+  expected_function_name = 'setter1';

+  expected_source_line_text = '    this.name = n;  // setter 1';

+  debugger;

+  e.setter1 = 'aa';

+}

+

+function testProtoIndexedGetter3_1() {

+  expected_function_name = 'getter3';

+  expected_source_line_text = '  return this.name;  // getter 3';

+  debugger;

+  var x = e[3];

+}

+

+function testProtoIndexedSetter3_1() {

+  expected_function_name = 'setter3';

+  expected_source_line_text = '  this.name = n;  // setter 3';

+  debugger;

+  e[3] = 'new val';

+}

+

+function testProtoSetter1_2() {

+  expected_function_name = 'setter1';

+  expected_source_line_text = '    this.name = n;  // setter 1';

+  for (var i = 2; i != 1; i--);

+  debugger;

+  e['setter' + i] = 'aa';

+}

+

+for (var n in this) {

+  if (n.substr(0, 4) != 'test') {

+    continue;

+  }

+  state = 1;

+  this[n]();

+  assertNull(exception);

+  assertEquals(3, state);

+}

+

+// Get rid of the debug event listener.

+Debug.setListener(null);

diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 962e4d3..d30e78c 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -58,6 +58,7 @@
 debug-multiple-breakpoints: CRASH || FAIL
 debug-setbreakpoint: CRASH || FAIL || PASS
 debug-step-stub-callfunction: SKIP
+debug-stepin-accessor: CRASH || FAIL
 debug-stepin-constructor: CRASH, FAIL
 debug-stepin-function-call: CRASH || FAIL
 debug-step: SKIP
@@ -69,40 +70,44 @@
 # Fails on real ARM hardware but not on the simulator.
 string-compare-alignment: PASS || FAIL
 
+# Times out often in release mode on ARM.
+array-splice: PASS || TIMEOUT
 
 [ $arch == x64 ]
 
-debug-backtrace.js: CRASH || FAIL
-date-parse.js: CRASH || FAIL
-debug-backtrace-text.js: CRASH || FAIL
-debug-multiple-breakpoints.js: CRASH || FAIL
-debug-breakpoints.js: CRASH || FAIL
-debug-changebreakpoint.js: CRASH || FAIL
-debug-clearbreakpoint.js: CRASH || FAIL
-debug-conditional-breakpoints.js: CRASH || FAIL
-debug-constructor.js: CRASH || FAIL
-debug-continue.js: CRASH || FAIL
-debug-enable-disable-breakpoints.js: CRASH || FAIL
-debug-evaluate-recursive.js: CRASH || FAIL
-debug-event-listener.js: CRASH || FAIL
-debug-evaluate.js: CRASH || FAIL
-debug-ignore-breakpoints.js: CRASH || FAIL
-debug-setbreakpoint.js: CRASH || FAIL
-debug-step-stub-callfunction.js: CRASH || FAIL
-debug-step.js: CRASH || FAIL
-mirror-date.js: CRASH || FAIL
-invalid-lhs.js: CRASH || FAIL
-debug-stepin-constructor.js: CRASH || FAIL
-new.js: CRASH || FAIL
-fuzz-natives.js: CRASH || FAIL
-greedy.js: CRASH || FAIL
-debug-handle.js: CRASH || FAIL
-string-indexof.js: CRASH || FAIL
-debug-clearbreakpointgroup.js: CRASH || FAIL
-regress/regress-269.js: CRASH || FAIL
-div-mod.js: CRASH || FAIL
-unicode-test.js: CRASH || FAIL
-regress/regress-392.js: CRASH || FAIL
-regress/regress-1200351.js: CRASH || FAIL
-regress/regress-998565.js: CRASH || FAIL
-tools/tickprocessor.js: CRASH || FAIL
+debug-backtrace: CRASH || FAIL
+date-parse: CRASH || FAIL
+debug-backtrace-text: CRASH || FAIL
+debug-multiple-breakpoints: CRASH || FAIL
+debug-breakpoints: CRASH || FAIL
+debug-changebreakpoint: CRASH || FAIL
+debug-clearbreakpoint: CRASH || FAIL
+debug-conditional-breakpoints: CRASH || FAIL
+debug-constructor: CRASH || FAIL
+debug-continue: CRASH || FAIL
+debug-enable-disable-breakpoints: CRASH || FAIL
+debug-evaluate-recursive: CRASH || FAIL
+debug-event-listener: CRASH || FAIL
+debug-evaluate: CRASH || FAIL
+debug-ignore-breakpoints: CRASH || FAIL
+debug-setbreakpoint: CRASH || FAIL
+debug-step-stub-callfunction: CRASH || FAIL
+debug-step: CRASH || FAIL
+mirror-date: CRASH || FAIL
+invalid-lhs: PASS || CRASH || FAIL
+debug-stepin-constructor: CRASH || FAIL
+debug-stepin-function-call: CRASH || FAIL
+debug-stepin-accessor: CRASH || FAIL
+new: CRASH || FAIL
+fuzz-natives: PASS || TIMEOUT
+greedy: PASS || TIMEOUT
+debug-handle: CRASH || FAIL
+string-indexof: PASS || TIMEOUT
+debug-clearbreakpointgroup: CRASH || FAIL
+regress/regress-269: CRASH || FAIL
+div-mod: CRASH || FAIL
+unicode-test: PASS || TIMEOUT
+regress/regress-392: CRASH || FAIL
+regress/regress-1200351: CRASH || FAIL
+regress/regress-998565: CRASH || FAIL
+tools/tickprocessor: PASS || CRASH || FAIL
diff --git a/test/mjsunit/regexp-call-as-function.js b/test/mjsunit/regexp-call-as-function.js
new file mode 100644
index 0000000..4cbe7f9
--- /dev/null
+++ b/test/mjsunit/regexp-call-as-function.js
@@ -0,0 +1,36 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that regular expressions can be called as functions.  Calling
+// a regular expression as a function corresponds to calling it's exec
+// method.
+
+var regexp = /a(b)(c)/;
+var subject = "xyzabcde";
+var expected = 'abc,b,c';
+assertEquals(expected, String(regexp.exec(subject)));
+assertEquals(expected, String(regexp(subject)));
diff --git a/test/mjsunit/regress/regress-155924.js b/test/mjsunit/regress/regress-155924.js
new file mode 100644
index 0000000..666e3ba
--- /dev/null
+++ b/test/mjsunit/regress/regress-155924.js
@@ -0,0 +1,46 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A HeapNumber with certain bits in the mantissa of the floating point
+// value should not be able to masquerade as a string in a keyed lookup
+// inline cache stub.  See http://codereview.chromium.org/155924.
+
+A = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ];
+
+function foo() {
+  x = 1 << 26;
+  x = x * x;
+  // The following floating-point heap number has a second word similar
+  // to that of the string "5":
+  // 2^52 + index << cached_index_shift + cached_index_tag
+  x = x + (5 << 2) + (1 << 1);
+  return A[x];
+}
+
+assertEquals(undefined, foo(), "First lookup A[bad_float]");
+assertEquals(undefined, foo(), "Second lookup A[bad_float]");
+assertEquals(undefined, foo(), "Third lookup A[bad_float]");
diff --git a/test/mjsunit/regress/regress-345.js b/test/mjsunit/regress/regress-345.js
new file mode 100644
index 0000000..f7f28a1
--- /dev/null
+++ b/test/mjsunit/regress/regress-345.js
@@ -0,0 +1,51 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Escaping to the same target from both the try and catch blocks of
+// try...catch...finally should not fail at compile-time.
+//
+// Reported by nth10sd.
+// See http://code.google.com/p/v8/issues/detail?id=345
+
+do {
+  try {
+    continue;
+  } catch (e) {
+    continue;
+  } finally {
+  }
+} while (false);
+
+
+L: {
+  try {
+    break L;
+  } catch (e) {
+    break L;
+  } finally {
+  }
+}
diff --git a/test/mjsunit/regress/regress-406.js b/test/mjsunit/regress/regress-406.js
new file mode 100644
index 0000000..f48a5de
--- /dev/null
+++ b/test/mjsunit/regress/regress-406.js
@@ -0,0 +1,69 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test of constant folding of boolean-valued expressions.
+
+// See http://code.google.com/p/v8/issues/detail?id=406
+
+assertFalse(typeof(0) == "zero");
+assertTrue(typeof(0) != "zero");
+
+// The and and or truth tables with both operands constant.
+assertFalse(typeof(0) == "zero" && typeof(0) == "zero");
+assertFalse(typeof(0) == "zero" && typeof(0) != "zero");
+assertFalse(typeof(0) != "zero" && typeof(0) == "zero");
+assertTrue(typeof(0) != "zero" && typeof(0) != "zero");
+
+assertFalse(typeof(0) == "zero" || typeof(0) == "zero");
+assertTrue(typeof(0) == "zero" || typeof(0) != "zero");
+assertTrue(typeof(0) != "zero" || typeof(0) == "zero");
+assertTrue(typeof(0) != "zero" || typeof(0) != "zero");
+
+// Same with just the left operand constant.
+// Helper function to prevent simple constant folding.
+function one() { return 1; }
+
+assertFalse(typeof(0) == "zero" && one() < 0);
+assertFalse(typeof(0) == "zero" && one() > 0);
+assertFalse(typeof(0) != "zero" && one() < 0);
+assertTrue(typeof(0) != "zero" && one() > 0);
+
+assertFalse(typeof(0) == "zero" || one() < 0);
+assertTrue(typeof(0) == "zero" || one() > 0);
+assertTrue(typeof(0) != "zero" || one() < 0);
+assertTrue(typeof(0) != "zero" || one() > 0);
+
+// Same with just the right operand constant.
+assertFalse(one() < 0 && typeof(0) == "zero");
+assertFalse(one() < 0 && typeof(0) != "zero");
+assertFalse(one() > 0 && typeof(0) == "zero");
+assertTrue(one() > 0 && typeof(0) != "zero");
+
+assertFalse(one() < 0 || typeof(0) == "zero");
+assertTrue(one() < 0 || typeof(0) != "zero");
+assertTrue(one() > 0 || typeof(0) == "zero");
+assertTrue(one() > 0 || typeof(0) != "zero");
diff --git a/test/mjsunit/tools/codemap.js b/test/mjsunit/tools/codemap.js
index 55b8758..06a91e8 100644
--- a/test/mjsunit/tools/codemap.js
+++ b/test/mjsunit/tools/codemap.js
@@ -46,11 +46,11 @@
 };
 
 
-(function testStaticCode() {
+(function testLibrariesAndStaticCode() {
   var codeMap = new devtools.profiler.CodeMap();
-  codeMap.addStaticCode(0x1500, newCodeEntry(0x3000, 'lib1'));
-  codeMap.addStaticCode(0x15500, newCodeEntry(0x5000, 'lib2'));
-  codeMap.addStaticCode(0x155500, newCodeEntry(0x10000, 'lib3'));
+  codeMap.addLibrary(0x1500, newCodeEntry(0x3000, 'lib1'));
+  codeMap.addLibrary(0x15500, newCodeEntry(0x5000, 'lib2'));
+  codeMap.addLibrary(0x155500, newCodeEntry(0x10000, 'lib3'));
   assertNoEntry(codeMap, 0);
   assertNoEntry(codeMap, 0x1500 - 1);
   assertEntry(codeMap, 'lib1', 0x1500);
@@ -71,6 +71,28 @@
   assertEntry(codeMap, 'lib3', 0x155500 + 0x10000 - 1);
   assertNoEntry(codeMap, 0x155500 + 0x10000);
   assertNoEntry(codeMap, 0xFFFFFFFF);
+
+  codeMap.addStaticCode(0x1510, newCodeEntry(0x30, 'lib1-f1'));
+  codeMap.addStaticCode(0x1600, newCodeEntry(0x50, 'lib1-f2'));
+  codeMap.addStaticCode(0x15520, newCodeEntry(0x100, 'lib2-f1'));
+  assertEntry(codeMap, 'lib1', 0x1500);
+  assertEntry(codeMap, 'lib1', 0x1510 - 1);
+  assertEntry(codeMap, 'lib1-f1', 0x1510);
+  assertEntry(codeMap, 'lib1-f1', 0x1510 + 0x15);
+  assertEntry(codeMap, 'lib1-f1', 0x1510 + 0x30 - 1);
+  assertEntry(codeMap, 'lib1', 0x1510 + 0x30);
+  assertEntry(codeMap, 'lib1', 0x1600 - 1);
+  assertEntry(codeMap, 'lib1-f2', 0x1600);
+  assertEntry(codeMap, 'lib1-f2', 0x1600 + 0x30);
+  assertEntry(codeMap, 'lib1-f2', 0x1600 + 0x50 - 1);
+  assertEntry(codeMap, 'lib1', 0x1600 + 0x50);
+  assertEntry(codeMap, 'lib2', 0x15500);
+  assertEntry(codeMap, 'lib2', 0x15520 - 1);
+  assertEntry(codeMap, 'lib2-f1', 0x15520);
+  assertEntry(codeMap, 'lib2-f1', 0x15520 + 0x80);
+  assertEntry(codeMap, 'lib2-f1', 0x15520 + 0x100 - 1);
+  assertEntry(codeMap, 'lib2', 0x15520 + 0x100);
+
 })();
 
 
diff --git a/test/mjsunit/tools/profile.js b/test/mjsunit/tools/profile.js
index 49eef3b..9ed851b 100644
--- a/test/mjsunit/tools/profile.js
+++ b/test/mjsunit/tools/profile.js
@@ -72,10 +72,10 @@
 
 
 ProfileTestDriver.prototype.addFunctions_ = function() {
-  this.profile.addStaticCode('lib1', 0x11000, 0x12000);
+  this.profile.addLibrary('lib1', 0x11000, 0x12000);
   this.profile.addStaticCode('lib1-f1', 0x11100, 0x11900);
   this.profile.addStaticCode('lib1-f2', 0x11200, 0x11500);
-  this.profile.addStaticCode('lib2', 0x21000, 0x22000);
+  this.profile.addLibrary('lib2', 0x21000, 0x22000);
   this.profile.addStaticCode('lib2-f1', 0x21100, 0x21900);
   this.profile.addStaticCode('lib2-f2', 0x21200, 0x21500);
   this.profile.addCode('T', 'F1', 0x50100, 0x100);
diff --git a/test/mjsunit/tools/tickprocessor-test.default b/test/mjsunit/tools/tickprocessor-test.default
index a689ea8..702f4bc 100644
--- a/test/mjsunit/tools/tickprocessor-test.default
+++ b/test/mjsunit/tools/tickprocessor-test.default
@@ -6,20 +6,19 @@
 
  [Shared libraries]:
    ticks  total  nonlib   name
-      2   15.4%    0.0%  /lib32/libm-2.7.so
+      3   23.1%    0.0%  /lib32/libm-2.7.so
       1    7.7%    0.0%  ffffe000-fffff000
 
  [JavaScript]:
    ticks  total  nonlib   name
-      1    7.7%   10.0%  LazyCompile: exp native math.js:41
+      1    7.7%   11.1%  LazyCompile: exp native math.js:41
 
  [C++]:
    ticks  total  nonlib   name
-      2   15.4%   20.0%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
-      1    7.7%   10.0%  v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)
-      1    7.7%   10.0%  v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)
-      1    7.7%   10.0%  fegetexcept
-      1    7.7%   10.0%  exp
+      2   15.4%   22.2%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
+      1    7.7%   11.1%  v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)
+      1    7.7%   11.1%  v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)
+      1    7.7%   11.1%  exp
 
  [GC]:
    ticks  total  nonlib   name
@@ -31,11 +30,11 @@
   Callers occupying less than 2.0% are not shown.
 
    ticks parent  name
-      2   15.4%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
-      2  100.0%    LazyCompile: exp native math.js:41
-      2  100.0%      Script: exp.js
+      3   23.1%  /lib32/libm-2.7.so
+      3  100.0%    LazyCompile: exp native math.js:41
+      3  100.0%      Script: exp.js
 
-      2   15.4%  /lib32/libm-2.7.so
+      2   15.4%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
       2  100.0%    LazyCompile: exp native math.js:41
       2  100.0%      Script: exp.js
 
@@ -47,10 +46,6 @@
 
       1    7.7%  ffffe000-fffff000
 
-      1    7.7%  fegetexcept
-      1  100.0%    LazyCompile: exp native math.js:41
-      1  100.0%      Script: exp.js
-
       1    7.7%  exp
       1  100.0%    LazyCompile: exp native math.js:41
       1  100.0%      Script: exp.js
diff --git a/test/mjsunit/tools/tickprocessor-test.ignore-unknown b/test/mjsunit/tools/tickprocessor-test.ignore-unknown
index 87beb08..306d646 100644
--- a/test/mjsunit/tools/tickprocessor-test.ignore-unknown
+++ b/test/mjsunit/tools/tickprocessor-test.ignore-unknown
@@ -2,20 +2,19 @@
 
  [Shared libraries]:
    ticks  total  nonlib   name
-      2   18.2%    0.0%  /lib32/libm-2.7.so
+      3   27.3%    0.0%  /lib32/libm-2.7.so
       1    9.1%    0.0%  ffffe000-fffff000
 
  [JavaScript]:
    ticks  total  nonlib   name
-      1    9.1%   12.5%  LazyCompile: exp native math.js:41
+      1    9.1%   14.3%  LazyCompile: exp native math.js:41
 
  [C++]:
    ticks  total  nonlib   name
-      2   18.2%   25.0%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
-      1    9.1%   12.5%  v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)
-      1    9.1%   12.5%  v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)
-      1    9.1%   12.5%  fegetexcept
-      1    9.1%   12.5%  exp
+      2   18.2%   28.6%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
+      1    9.1%   14.3%  v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)
+      1    9.1%   14.3%  v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)
+      1    9.1%   14.3%  exp
 
  [GC]:
    ticks  total  nonlib   name
@@ -27,11 +26,11 @@
   Callers occupying less than 2.0% are not shown.
 
    ticks parent  name
-      2   18.2%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
-      2  100.0%    LazyCompile: exp native math.js:41
-      2  100.0%      Script: exp.js
+      3   27.3%  /lib32/libm-2.7.so
+      3  100.0%    LazyCompile: exp native math.js:41
+      3  100.0%      Script: exp.js
 
-      2   18.2%  /lib32/libm-2.7.so
+      2   18.2%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
       2  100.0%    LazyCompile: exp native math.js:41
       2  100.0%      Script: exp.js
 
@@ -43,10 +42,6 @@
 
       1    9.1%  ffffe000-fffff000
 
-      1    9.1%  fegetexcept
-      1  100.0%    LazyCompile: exp native math.js:41
-      1  100.0%      Script: exp.js
-
       1    9.1%  exp
       1  100.0%    LazyCompile: exp native math.js:41
       1  100.0%      Script: exp.js
diff --git a/test/mjsunit/tools/tickprocessor-test.separate-ic b/test/mjsunit/tools/tickprocessor-test.separate-ic
index 7eb3d9a..3a2041b 100644
--- a/test/mjsunit/tools/tickprocessor-test.separate-ic
+++ b/test/mjsunit/tools/tickprocessor-test.separate-ic
@@ -6,22 +6,21 @@
 
  [Shared libraries]:
    ticks  total  nonlib   name
-      2   15.4%    0.0%  /lib32/libm-2.7.so
+      3   23.1%    0.0%  /lib32/libm-2.7.so
       1    7.7%    0.0%  ffffe000-fffff000
 
  [JavaScript]:
    ticks  total  nonlib   name
-      1    7.7%   10.0%  LoadIC: j
-      1    7.7%   10.0%  LoadIC: i
-      1    7.7%   10.0%  LazyCompile: exp native math.js:41
+      1    7.7%   11.1%  LoadIC: j
+      1    7.7%   11.1%  LoadIC: i
+      1    7.7%   11.1%  LazyCompile: exp native math.js:41
 
  [C++]:
    ticks  total  nonlib   name
-      2   15.4%   20.0%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
-      1    7.7%   10.0%  v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)
-      1    7.7%   10.0%  v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)
-      1    7.7%   10.0%  fegetexcept
-      1    7.7%   10.0%  exp
+      2   15.4%   22.2%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
+      1    7.7%   11.1%  v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*)
+      1    7.7%   11.1%  v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)
+      1    7.7%   11.1%  exp
 
  [GC]:
    ticks  total  nonlib   name
@@ -33,11 +32,11 @@
   Callers occupying less than 2.0% are not shown.
 
    ticks parent  name
-      2   15.4%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
-      2  100.0%    LazyCompile: exp native math.js:41
-      2  100.0%      Script: exp.js
+      3   23.1%  /lib32/libm-2.7.so
+      3  100.0%    LazyCompile: exp native math.js:41
+      3  100.0%      Script: exp.js
 
-      2   15.4%  /lib32/libm-2.7.so
+      2   15.4%  v8::internal::Runtime_Math_exp(v8::internal::Arguments)
       2  100.0%    LazyCompile: exp native math.js:41
       2  100.0%      Script: exp.js
 
@@ -49,10 +48,6 @@
 
       1    7.7%  ffffe000-fffff000
 
-      1    7.7%  fegetexcept
-      1  100.0%    LazyCompile: exp native math.js:41
-      1  100.0%      Script: exp.js
-
       1    7.7%  exp
       1  100.0%    LazyCompile: exp native math.js:41
       1  100.0%      Script: exp.js
diff --git a/test/mjsunit/tools/tickprocessor.js b/test/mjsunit/tools/tickprocessor.js
index 587106a..00c3fb1 100644
--- a/test/mjsunit/tools/tickprocessor.js
+++ b/test/mjsunit/tools/tickprocessor.js
@@ -31,6 +31,7 @@
 // Files: tools/logreader.js tools/tickprocessor.js
 // Env: TEST_FILE_NAME
 
+
 (function testArgumentsProcessor() {
   var p_default = new ArgumentsProcessor([]);
   assertTrue(p_default.parse());
@@ -69,12 +70,12 @@
       '         U operator delete[](void*)@@GLIBCXX_3.4',
       '08049790 T _init',
       '08049f50 T _start',
-      '08139150 t v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)',
-      '08139ca0 T v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)',
-      '0813a0b0 t v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)',
-      '08181d30 W v8::internal::RegExpMacroAssemblerIrregexp::stack_limit_slack()',
+      '08139150 00000b4b t v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)',
+      '08139ca0 000003f1 T v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)',
+      '0813a0b0 00000855 t v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)',
+      '0818b220 00000036 W v8::internal::RegExpMacroAssembler::CheckPosition(int, v8::internal::Label*)',
       '         w __gmon_start__',
-      '081f08a0 B stdout'
+      '081f08a0 00000004 B stdout\n'
     ].join('\n'), ''];
   };
 
@@ -87,22 +88,22 @@
   assertEquals(
       [['_init', 0x08049790, 0x08049f50],
        ['_start', 0x08049f50, 0x08139150],
-       ['v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)', 0x08139150, 0x08139ca0],
-       ['v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)', 0x08139ca0, 0x0813a0b0],
-       ['v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)', 0x0813a0b0, 0x08181d30],
-       ['v8::internal::RegExpMacroAssemblerIrregexp::stack_limit_slack()', 0x08181d30, 0x081ee000]],
+       ['v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)', 0x08139150, 0x08139150 + 0xb4b],
+       ['v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)', 0x08139ca0, 0x08139ca0 + 0x3f1],
+       ['v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)', 0x0813a0b0, 0x0813a0b0 + 0x855],
+       ['v8::internal::RegExpMacroAssembler::CheckPosition(int, v8::internal::Label*)', 0x0818b220, 0x0818b220 + 0x36]],
       shell_syms);
 
   // libc library
   UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
     this.symbols = [[
-        '000162a0 T __libc_init_first',
-        '0002a5f0 T __isnan',
-        '0002a5f0 W isnan',
-        '0002aaa0 W scalblnf',
-        '0002aaa0 W scalbnf',
-        '0011a340 T __libc_thread_freeres',
-        '00128860 R _itoa_lower_digits'].join('\n'), ''];
+        '000162a0 00000005 T __libc_init_first',
+        '0002a5f0 0000002d T __isnan',
+        '0002a5f0 0000002d W isnan',
+        '0002aaa0 0000000d W scalblnf',
+        '0002aaa0 0000000d W scalbnf',
+        '0011a340 00000048 T __libc_thread_freeres',
+        '00128860 00000024 R _itoa_lower_digits\n'].join('\n'), ''];
   };
   var libc_prov = new UnixCppEntriesProvider();
   var libc_syms = [];
@@ -110,17 +111,81 @@
       function (name, start, end) {
         libc_syms.push(Array.prototype.slice.apply(arguments, [0]));
       });
-  assertEquals(
-      [['__libc_init_first', 0xf7c5c000 + 0x000162a0, 0xf7c5c000 + 0x0002a5f0],
-       ['isnan', 0xf7c5c000 + 0x0002a5f0, 0xf7c5c000 + 0x0002aaa0],
-       ['scalbnf', 0xf7c5c000 + 0x0002aaa0, 0xf7c5c000 + 0x0011a340],
-       ['__libc_thread_freeres', 0xf7c5c000 + 0x0011a340, 0xf7da5000]],
-      libc_syms);
+  var libc_ref_syms = [['__libc_init_first', 0x000162a0, 0x000162a0 + 0x5],
+       ['__isnan', 0x0002a5f0, 0x0002a5f0 + 0x2d],
+       ['scalblnf', 0x0002aaa0, 0x0002aaa0 + 0xd],
+       ['__libc_thread_freeres', 0x0011a340, 0x0011a340 + 0x48]];
+  for (var i = 0; i < libc_ref_syms.length; ++i) {
+    libc_ref_syms[i][1] += 0xf7c5c000;
+    libc_ref_syms[i][2] += 0xf7c5c000;
+  }
+  assertEquals(libc_ref_syms, libc_syms);
 
   UnixCppEntriesProvider.prototype.loadSymbols = oldLoadSymbols;
 })();
 
 
+(function testMacCppEntriesProvider() {
+  var oldLoadSymbols = MacCppEntriesProvider.prototype.loadSymbols;
+
+  // shell executable
+  MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
+    this.symbols = [[
+      '         U operator delete[]',
+      '00001000 A __mh_execute_header',
+      '00001b00 T start',
+      '00001b40 t dyld_stub_binding_helper',
+      '0011b710 T v8::internal::RegExpMacroAssembler::CheckPosition',
+      '00134250 t v8::internal::Runtime_StringReplaceRegExpWithString',
+      '00137220 T v8::internal::Runtime::GetElementOrCharAt',
+      '00137400 t v8::internal::Runtime_DebugGetPropertyDetails',
+      '001c1a80 b _private_mem\n'
+    ].join('\n'), ''];
+  };
+
+  var shell_prov = new MacCppEntriesProvider();
+  var shell_syms = [];
+  shell_prov.parseVmSymbols('shell', 0x00001b00, 0x00163156,
+      function (name, start, end) {
+        shell_syms.push(Array.prototype.slice.apply(arguments, [0]));
+      });
+  assertEquals(
+      [['start', 0x00001b00, 0x00001b40],
+       ['dyld_stub_binding_helper', 0x00001b40, 0x0011b710],
+       ['v8::internal::RegExpMacroAssembler::CheckPosition', 0x0011b710, 0x00134250],
+       ['v8::internal::Runtime_StringReplaceRegExpWithString', 0x00134250, 0x00137220],
+       ['v8::internal::Runtime::GetElementOrCharAt', 0x00137220, 0x00137400],
+       ['v8::internal::Runtime_DebugGetPropertyDetails', 0x00137400, 0x00163156]],
+      shell_syms);
+
+  // stdc++ library
+  MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
+    this.symbols = [[
+        '0000107a T __gnu_cxx::balloc::__mini_vector<std::pair<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*, __gnu_cxx::bitmap_allocator<char>::_Alloc_block*> >::__mini_vector',
+        '0002c410 T std::basic_streambuf<char, std::char_traits<char> >::pubseekoff',
+        '0002c488 T std::basic_streambuf<char, std::char_traits<char> >::pubseekpos',
+        '000466aa T ___cxa_pure_virtual\n'].join('\n'), ''];
+  };
+  var stdc_prov = new MacCppEntriesProvider();
+  var stdc_syms = [];
+  stdc_prov.parseVmSymbols('stdc++', 0x95728fb4, 0x95770005,
+      function (name, start, end) {
+        stdc_syms.push(Array.prototype.slice.apply(arguments, [0]));
+      });
+  var stdc_ref_syms = [['__gnu_cxx::balloc::__mini_vector<std::pair<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*, __gnu_cxx::bitmap_allocator<char>::_Alloc_block*> >::__mini_vector', 0x0000107a, 0x0002c410],
+       ['std::basic_streambuf<char, std::char_traits<char> >::pubseekoff', 0x0002c410, 0x0002c488],
+       ['std::basic_streambuf<char, std::char_traits<char> >::pubseekpos', 0x0002c488, 0x000466aa],
+       ['___cxa_pure_virtual', 0x000466aa, 0x95770005 - 0x95728fb4]];
+  for (var i = 0; i < stdc_ref_syms.length; ++i) {
+    stdc_ref_syms[i][1] += 0x95728fb4;
+    stdc_ref_syms[i][2] += 0x95728fb4;
+  }
+  assertEquals(stdc_ref_syms, stdc_syms);
+
+  MacCppEntriesProvider.prototype.loadSymbols = oldLoadSymbols;
+})();
+
+
 (function testWindowsCppEntriesProvider() {
   var oldLoadSymbols = WindowsCppEntriesProvider.prototype.loadSymbols;
 
@@ -174,8 +239,8 @@
          ['v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)', 0x080f8210, 0x080f8800],
          ['v8::internal::Runtime_Math_exp(v8::internal::Arguments)', 0x08123b20, 0x08123b80]],
     '/lib32/libm-2.7.so':
-        [['exp', startAddr + 0x00009e80, startAddr + 0x00009f30],
-         ['fegetexcept', startAddr + 0x000061e0, startAddr + 0x00008b10]],
+        [['exp', startAddr + 0x00009e80, startAddr + 0x00009e80 + 0xa3],
+         ['fegetexcept', startAddr + 0x000061e0, startAddr + 0x000061e0 + 0x15]],
     'ffffe000-fffff000': []};
   assertTrue(name in symbols);
   var syms = symbols[name];
@@ -191,6 +256,7 @@
   var outputPos = 0;
   var diffs = this.diffs = [];
   var realOut = this.realOut = [];
+  var unexpectedOut = this.unexpectedOut = null;
 
   this.oldPrint = print;
   print = function(str) {
@@ -198,13 +264,15 @@
     for (var i = 0; i < strSplit.length; ++i) {
       s = strSplit[i];
       realOut.push(s);
-      assertTrue(outputPos < expectedOut.length,
-          'unexpected output: "' + s + '"');
-      if (expectedOut[outputPos] != s) {
-        diffs.push('line ' + outputPos + ': expected <' +
-                   expectedOut[outputPos] + '> found <' + s + '>\n');
+      if (outputPos < expectedOut.length) {
+        if (expectedOut[outputPos] != s) {
+          diffs.push('line ' + outputPos + ': expected <' +
+                     expectedOut[outputPos] + '> found <' + s + '>\n');
+        }
+        outputPos++;
+      } else {
+        unexpectedOut = true;
       }
-      outputPos++;
     }
   };
 };
@@ -218,9 +286,10 @@
 
 PrintMonitor.prototype.finish = function() {
   print = this.oldPrint;
-  if (this.diffs.length > 0) {
+  if (this.diffs.length > 0 || this.unexpectedOut != null) {
     print(this.realOut.join('\n'));
     assertEquals([], this.diffs);
+    assertNull(this.unexpectedOut);
   }
 };
 
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 13ae29c..538b0a8 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -321,10 +321,6 @@
 js1_5/Regress/regress-172699: FAIL_OK
 
 
-# Calls regexp objects with function call syntax; non-ECMA behavior.
-js1_2/Objects/toString-001: FAIL_OK
-
-
 # Assumes that the prototype of a function is enumerable. Non-ECMA,
 # see section 15.3.3.1, page 86.
 ecma/GlobalObject/15.1.2.2-1: FAIL_OK
@@ -338,6 +334,7 @@
 # Tests that rely on specific details of function decompilation or
 # print strings for errors. Non-ECMA behavior.
 js1_2/function/tostring-2: FAIL_OK
+js1_2/Objects/toString-001: FAIL_OK
 js1_5/Exceptions/regress-332472: FAIL_OK
 js1_5/Regress/regress-173067: FAIL_OK
 js1_5/Regress/regress-355556: FAIL_OK
@@ -561,23 +558,11 @@
 ecma_3/Function/regress-137181: FAIL
 
 
-# Calls regexp objects with function call syntax; non-ECMA behavior.
-ecma_2/RegExp/regress-001: FAIL
-js1_2/regexp/regress-6359: FAIL
-js1_2/regexp/regress-9141: FAIL
-js1_5/Regress/regress-224956: FAIL
-js1_5/Regress/regress-325925: FAIL
-js1_2/regexp/simple_form: FAIL
-
-
 # Tests that rely on specific details of function decompilation or
 # print strings for errors. Non-ECMA behavior.
 js1_4/Regress/function-003: FAIL
 
 
-# Relies on JavaScript 1.2 / 1.3 deprecated features.
-js1_2/function/regexparg-1: FAIL
-
 # 'export' and 'import' are not keywords in V8.
 ecma_2/Exceptions/lexical-010: FAIL
 ecma_2/Exceptions/lexical-022: FAIL
diff --git a/tools/codemap.js b/tools/codemap.js
index d6df7fa..404127f 100644
--- a/tools/codemap.js
+++ b/tools/codemap.js
@@ -48,11 +48,16 @@
   this.dynamicsNameGen_ = new devtools.profiler.CodeMap.NameGenerator();
 
   /**
-   * Static code entries. Used for libraries code.
+   * Static code entries. Used for statically compiled code.
    */
   this.statics_ = new goog.structs.SplayTree();
 
   /**
+   * Libraries entries. Used for the whole static code libraries.
+   */
+  this.libraries_ = new goog.structs.SplayTree();
+
+  /**
    * Map of memory pages occupied with static code.
    */
   this.pages_ = [];
@@ -108,6 +113,19 @@
 
 
 /**
+ * Adds a library entry.
+ *
+ * @param {number} start The starting address.
+ * @param {devtools.profiler.CodeMap.CodeEntry} codeEntry Code entry object.
+ */
+devtools.profiler.CodeMap.prototype.addLibrary = function(
+    start, codeEntry) {
+  this.markPages_(start, start + codeEntry.size);
+  this.libraries_.insert(start, codeEntry);
+};
+
+
+/**
  * Adds a static code entry.
  *
  * @param {number} start The starting address.
@@ -115,7 +133,6 @@
  */
 devtools.profiler.CodeMap.prototype.addStaticCode = function(
     start, codeEntry) {
-  this.markPages_(start, start + codeEntry.size);
   this.statics_.insert(start, codeEntry);
 };
 
@@ -157,7 +174,10 @@
 devtools.profiler.CodeMap.prototype.findEntry = function(addr) {
   var pageAddr = addr >>> devtools.profiler.CodeMap.PAGE_ALIGNMENT;
   if (pageAddr in this.pages_) {
-    return this.findInTree_(this.statics_, addr);
+    // Static code entries can contain "holes" of unnamed code.
+    // In this case, the whole library is assigned to this address.
+    return this.findInTree_(this.statics_, addr) ||
+        this.findInTree_(this.libraries_, addr);
   }
   var min = this.dynamics_.findMin();
   var max = this.dynamics_.findMax();
@@ -176,7 +196,7 @@
 
 
 /**
- * Returns an array of all dynamic code entries, including deleted ones.
+ * Returns an array of all dynamic code entries.
  */
 devtools.profiler.CodeMap.prototype.getAllDynamicEntries = function() {
   return this.dynamics_.exportValues();
@@ -192,6 +212,14 @@
 
 
 /**
+ * Returns an array of all libraries entries.
+ */
+devtools.profiler.CodeMap.prototype.getAllLibrariesEntries = function() {
+  return this.libraries_.exportValues();
+};
+
+
+/**
  * Creates a code entry object.
  *
  * @param {number} size Code entry size in bytes.
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index b11a7ff..fc49620 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -254,6 +254,7 @@
         '../../src/frames-inl.h',
         '../../src/frames.cc',
         '../../src/frames.h',
+        '../../src/frame-element.cc',
         '../../src/frame-element.h',
         '../../src/func-name-inferrer.cc',
         '../../src/func-name-inferrer.h',
diff --git a/tools/mac-nm b/tools/mac-nm
new file mode 100755
index 0000000..9c18177
--- /dev/null
+++ b/tools/mac-nm
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+# This script is a wrapper for OS X nm(1) tool. nm(1) perform C++ function
+# names demangling, so we're piping its output to c++filt(1) tool which does it.
+# But c++filt(1) comes with XCode (as a part of GNU binutils), so it doesn't
+# guaranteed to exist on a system.
+#
+# An alternative approach is to perform demangling in tick processor, but
+# for GNU C++ ABI this is a complex process (see cp-demangle.c sources), and
+# can't be done partially, because term boundaries are plain text symbols, such
+# as 'N', 'E', so one can't just do a search through a function name, it really
+# needs to be parsed, which requires a lot of knowledge to be coded in.
+
+if [ "`which c++filt`" == "" ]; then
+  nm $@
+else
+  nm $@ | c++filt -p -i
+fi
diff --git a/tools/mac-tick-processor b/tools/mac-tick-processor
new file mode 100755
index 0000000..5fba622
--- /dev/null
+++ b/tools/mac-tick-processor
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+# A wrapper script to call 'linux-tick-processor' with Mac-specific settings.
+
+tools_path=`cd $(dirname "$0");pwd`
+$tools_path/linux-tick-processor --mac --nm=$tools_path/mac-nm $@
diff --git a/tools/process-heap-prof.py b/tools/process-heap-prof.py
new file mode 100755
index 0000000..b8ab2d3
--- /dev/null
+++ b/tools/process-heap-prof.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+#
+# Copyright 2009 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is an utility for converting V8 heap logs into .hp files that can
+# be further processed using 'hp2ps' tool (bundled with GHC and Valgrind)
+# to produce heap usage histograms.
+
+# Sample usage:
+# $ ./shell --log-gc script.js
+# $ tools/process-heap-prof.py v8.log | hp2ps -c > script-heap-graph.ps
+# ('-c' enables color, see hp2ps manual page for more options)
+
+import csv, sys, time
+
+def process_logfile(filename):
+  first_call_time = None
+  sample_time = 0.0
+  sampling = False
+  try:
+    logfile = open(filename, 'rb')
+    try:
+      logreader = csv.reader(logfile)
+
+      print('JOB "v8"')
+      print('DATE "%s"' % time.asctime(time.localtime()))
+      print('SAMPLE_UNIT "seconds"')
+      print('VALUE_UNIT "bytes"')
+
+      for row in logreader:
+        if row[0] == 'heap-sample-begin' and row[1] == 'Heap':
+          sample_time = float(row[3])/1000.0
+          if first_call_time == None:
+            first_call_time = sample_time
+          sample_time -= first_call_time
+          print('BEGIN_SAMPLE %.2f' % sample_time)
+          sampling = True
+        elif row[0] == 'heap-sample-end' and row[1] == 'Heap':
+          print('END_SAMPLE %.2f' % sample_time)
+          sampling = False
+        elif row[0] == 'heap-sample-item' and sampling:
+          print('%s %d' % (row[1], int(row[3])))
+    finally:
+      logfile.close()
+  except:
+    sys.exit('can\'t open %s' % filename)
+
+process_logfile(sys.argv[1])
diff --git a/tools/profile.js b/tools/profile.js
index 614c635..db4b542 100644
--- a/tools/profile.js
+++ b/tools/profile.js
@@ -86,7 +86,23 @@
 
 
 /**
- * Registers static (library) code entry.
+ * Registers a library.
+ *
+ * @param {string} name Code entry name.
+ * @param {number} startAddr Starting address.
+ * @param {number} endAddr Ending address.
+ */
+devtools.profiler.Profile.prototype.addLibrary = function(
+    name, startAddr, endAddr) {
+  var entry = new devtools.profiler.CodeMap.CodeEntry(
+      endAddr - startAddr, name);
+  this.codeMap_.addLibrary(startAddr, entry);
+  return entry;
+};
+
+
+/**
+ * Registers statically compiled code entry.
  *
  * @param {string} name Code entry name.
  * @param {number} startAddr Starting address.
diff --git a/tools/test.py b/tools/test.py
index 05eb9fd..c1b8b80 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -1136,6 +1136,7 @@
     # was found, set the arch to the guess.
     if options.arch == 'none':
       options.arch = ARCH_GUESS
+    options.scons_flags.append("arch=" + options.arch)
   return True
 
 
diff --git a/tools/tickprocessor-driver.js b/tools/tickprocessor-driver.js
index f7cfd13..dc67796 100644
--- a/tools/tickprocessor-driver.js
+++ b/tools/tickprocessor-driver.js
@@ -37,11 +37,15 @@
   }
 }
 
+var entriesProviders = {
+  'unix': UnixCppEntriesProvider,
+  'windows': WindowsCppEntriesProvider,
+  'mac': MacCppEntriesProvider
+};
 
 var params = processArguments(arguments);
 var tickProcessor = new TickProcessor(
-  params.platform == 'unix' ? new UnixCppEntriesProvider(params.nm) :
-    new WindowsCppEntriesProvider(),
+  new (entriesProviders[params.platform])(params.nm),
   params.separateIc,
   params.ignoreUnknown,
   params.stateFilter);
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index c95a4e6..efd9750 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -174,7 +174,7 @@
 
 TickProcessor.prototype.processSharedLibrary = function(
     name, startAddr, endAddr) {
-  var entry = this.profile_.addStaticCode(name, startAddr, endAddr);
+  var entry = this.profile_.addLibrary(name, startAddr, endAddr);
   this.setCodeType(entry.getName(), 'SHARED_LIB');
 
   var self = this;
@@ -380,14 +380,21 @@
 
   var prevEntry;
 
-  function addPrevEntry(end) {
+  function addEntry(funcInfo) {
     // Several functions can be mapped onto the same address. To avoid
     // creating zero-sized entries, skip such duplicates.
     // Also double-check that function belongs to the library address space.
-    if (prevEntry && prevEntry.start < end &&
-        prevEntry.start >= libStart && end <= libEnd) {
-      processorFunc(prevEntry.name, prevEntry.start, end);
+    if (prevEntry && !prevEntry.end &&
+        prevEntry.start < funcInfo.start &&
+        prevEntry.start >= libStart && funcInfo.start <= libEnd) {
+      processorFunc(prevEntry.name, prevEntry.start, funcInfo.start);
     }
+    if (funcInfo.end &&
+        (!prevEntry || prevEntry.start != funcInfo.start) &&
+        funcInfo.start >= libStart && funcInfo.end <= libEnd) {
+      processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
+    }
+    prevEntry = funcInfo;
   }
 
   while (true) {
@@ -400,10 +407,12 @@
     if (funcInfo.start < libStart && funcInfo.start < libEnd - libStart) {
       funcInfo.start += libStart;
     }
-    addPrevEntry(funcInfo.start);
-    prevEntry = funcInfo;
+    if (funcInfo.size) {
+      funcInfo.end = funcInfo.start + funcInfo.size;
+    }
+    addEntry(funcInfo);
   }
-  addPrevEntry(libEnd);
+  addEntry({name: '', start: libEnd});
 };
 
 
@@ -420,19 +429,17 @@
   this.symbols = [];
   this.parsePos = 0;
   this.nmExec = nmExec;
+  this.FUNC_RE = /^([0-9a-fA-F]{8}) ([0-9a-fA-F]{8} )?[tTwW] (.*)$/;
 };
 inherits(UnixCppEntriesProvider, CppEntriesProvider);
 
 
-UnixCppEntriesProvider.FUNC_RE = /^([0-9a-fA-F]{8}) [tTwW] (.*)$/;
-
-
 UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
   this.parsePos = 0;
   try {
     this.symbols = [
-      os.system(this.nmExec, ['-C', '-n', libName], -1, -1),
-      os.system(this.nmExec, ['-C', '-n', '-D', libName], -1, -1)
+      os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
+      os.system(this.nmExec, ['-C', '-n', '-S', '-D', libName], -1, -1)
     ];
   } catch (e) {
     // If the library cannot be found on this system let's not panic.
@@ -454,8 +461,34 @@
 
   var line = this.symbols[0].substring(this.parsePos, lineEndPos);
   this.parsePos = lineEndPos + 1;
-  var fields = line.match(UnixCppEntriesProvider.FUNC_RE);
-  return fields ? { name: fields[2], start: parseInt(fields[1], 16) } : null;
+  var fields = line.match(this.FUNC_RE);
+  var funcInfo = null;
+  if (fields) {
+    funcInfo = { name: fields[3], start: parseInt(fields[1], 16) };
+    if (fields[2]) {
+      funcInfo.size = parseInt(fields[2], 16);
+    }
+  }
+  return funcInfo;
+};
+
+
+function MacCppEntriesProvider(nmExec) {
+  UnixCppEntriesProvider.call(this, nmExec);
+  // Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
+  this.FUNC_RE = /^([0-9a-fA-F]{8}) ()[iItT] (.*)$/;
+};
+inherits(MacCppEntriesProvider, UnixCppEntriesProvider);
+
+
+MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
+  this.parsePos = 0;
+  try {
+    this.symbols = [os.system(this.nmExec, ['-n', '-f', libName], -1, -1), ''];
+  } catch (e) {
+    // If the library cannot be found on this system let's not panic.
+    this.symbols = '';
+  }
 };
 
 
@@ -538,6 +571,8 @@
         'Specify that we are running on *nix platform'],
     '--windows': ['platform', 'windows',
         'Specify that we are running on Windows platform'],
+    '--mac': ['platform', 'mac',
+        'Specify that we are running on Mac OS X platform'],
     '--nm': ['nm', 'nm',
         'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)']
   };
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
old mode 100755
new mode 100644
index 368ba3f..da155b8
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -59,6 +59,8 @@
 		896FD03A0E78D717003DFB6A /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; };
 		897F767F0E71B690007ACF34 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; };
 		897F76850E71B6B1007ACF34 /* libv8.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8970F2F00E719FB2006AE7B5 /* libv8.a */; };
+		8981F6001010501900D1520E /* frame-element.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8981F5FE1010500F00D1520E /* frame-element.cc */; };
+		8981F6011010502800D1520E /* frame-element.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8981F5FE1010500F00D1520E /* frame-element.cc */; };
 		898BD20E0EF6CC930068B00A /* debug-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 898BD20D0EF6CC850068B00A /* debug-ia32.cc */; };
 		898BD20F0EF6CC9A0068B00A /* debug-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 898BD20C0EF6CC850068B00A /* debug-arm.cc */; };
 		89A15C7B0EE466EB00B48DEB /* regexp-macro-assembler-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C720EE466D000B48DEB /* regexp-macro-assembler-ia32.cc */; };
@@ -503,6 +505,8 @@
 		897FF1B70E719C2E00D62E90 /* macros.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; name = macros.py; path = ../src/macros.py; sourceTree = "<group>"; };
 		897FF32F0FAA0ED200136CF6 /* version.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = version.cc; sourceTree = "<group>"; };
 		897FF3300FAA0ED200136CF6 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = "<group>"; };
+		8981F5FE1010500F00D1520E /* frame-element.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "frame-element.cc"; sourceTree = "<group>"; };
+		8981F5FF1010500F00D1520E /* frame-element.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "frame-element.h"; sourceTree = "<group>"; };
 		898BD20C0EF6CC850068B00A /* debug-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "debug-arm.cc"; path = "arm/debug-arm.cc"; sourceTree = "<group>"; };
 		898BD20D0EF6CC850068B00A /* debug-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "debug-ia32.cc"; path = "ia32/debug-ia32.cc"; sourceTree = "<group>"; };
 		89A15C630EE4661A00B48DEB /* bytecodes-irregexp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "bytecodes-irregexp.h"; sourceTree = "<group>"; };
@@ -700,6 +704,8 @@
 				89471C7F0EB23EE400B6874B /* flag-definitions.h */,
 				897FF1350E719B8F00D62E90 /* flags.cc */,
 				897FF1360E719B8F00D62E90 /* flags.h */,
+				8981F5FE1010500F00D1520E /* frame-element.cc */,
+				8981F5FF1010500F00D1520E /* frame-element.h */,
 				897FF1370E719B8F00D62E90 /* frames-arm.cc */,
 				897FF1380E719B8F00D62E90 /* frames-arm.h */,
 				897FF1390E719B8F00D62E90 /* frames-ia32.cc */,
@@ -1196,6 +1202,7 @@
 				58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */,
 				89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */,
 				9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */,
+				8981F6001010501900D1520E /* frame-element.cc in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
@@ -1300,6 +1307,7 @@
 				58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */,
 				89F23C820E78D5B2006B2466 /* zone.cc in Sources */,
 				9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */,
+				8981F6011010502800D1520E /* frame-element.cc in Sources */,
 			);
 			runOnlyForDeploymentPostprocessing = 0;
 		};
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index bfdcec9..ece631a 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -397,6 +397,14 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\frame-element.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\frame-element.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\ia32\frames-ia32.cc"
 				>
 			</File>
@@ -409,10 +417,6 @@
 				>
 			</File>
 			<File
-				RelativePath="..\..\src\frame-element.h"
-				>
-			</File>
-			<File
 				RelativePath="..\..\src\frames.cc"
 				>
 			</File>
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index 8ebe386..d73747e 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -401,6 +401,14 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\frame-element.cc"
+				>
+			</File>
+			<File
+				RelativePath="..\..\src\frame-element.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\arm\frames-arm.cc"
 				>
 			</File>