Update V8 to r3121 as required for WebKit update.

Change-Id: Ic53e0aef9a9eb9b71ee7d25a8aef61520bba899c
diff --git a/Android.v8common.mk b/Android.v8common.mk
index b8a8e06..2650155 100644
--- a/Android.v8common.mk
+++ b/Android.v8common.mk
@@ -20,6 +20,7 @@
 	src/disassembler.cc \
 	src/execution.cc \
 	src/factory.cc \
+	src/fast-codegen.cc \
 	src/flags.cc \
 	src/frame-element.cc \
 	src/frames.cc \
@@ -74,6 +75,7 @@
 		src/arm/codegen-arm.cc \
 		src/arm/cpu-arm.cc \
 		src/arm/disasm-arm.cc \
+		src/arm/fast-codegen-arm.cc \
 		src/arm/frames-arm.cc \
 		src/arm/ic-arm.cc \
 		src/arm/jump-target-arm.cc \
diff --git a/ChangeLog b/ChangeLog
index 8c74591..d13d74f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,75 @@
+2009-10-16: Version 1.3.16
+        
+        X64: Convert smis to holding 32 bits of payload.
+
+        Introduce v8::Integer::NewFromUnsigned method.
+
+        Add missing null check in Context::GetCurrent.
+
+        Add trim, trimLeft and trimRight methods to String
+        Patch by Jan de Mooij <jandemooij@gmail.com>
+
+        Implement ES5 Array.isArray
+        Patch by Jan de Mooij <jandemooij@gmail.com>
+
+        Skip access checks for hidden properties.
+
+        Add String::Concat(Handle<String> left, Handle<String> right) to the V8 API.
+
+        Fix GYP-based builds of V8.
+
+
+2009-10-07: Version 1.3.15
+
+        Expand the maximum size of the code space to 512MB for 64-bit mode.
+
+        Fixed a crash bug happening when starting profiling (issue
+        http://crbug.com/23768).
+
+
+2009-10-07: Version 1.3.14
+
+        Added GetRealNamedProperty to the API to lookup real properties
+        located on the object or in the prototype chain skipping any
+        interceptors.
+
+        Fix the stack limits setting API to work correctly with threads. The
+        stack limit now needs to be set to each thread thich is used with V8.
+
+        Remove the high-priority flag from IdleNotification()
+
+        Ensure V8 is initialized before locking and unlocking threads.
+
+        Implemented a new JavaScript minifier for compressing the source of
+        the built-in JavaScript. This Remove non-Open Source code from Douglas
+        Crockford from the project.
+
+        Added a missing optimization in StringCharAt.
+
+        Fixed some flaky socket tests.
+
+        Change by Alexander Botero-Lowry to fix profiler sampling on FreeBSD
+        in 64-bit mode.
+
+        Fixed memory leaks in the thread management code.
+
+        Fixed the result of assignment to a pixel array. The assigned value
+        is now the result.
+
+        Error reporting for invalid left-hand sides in for-in statements, pre-
+        and postfix count expressions, and assignments now matches the JSC
+        behavior in Safari 4.
+
+        Follow the spec in disallowing function declarations without a name.
+
+        Always allocate code objects within a 2 GB range. On x64 architecture
+        this is used to use near calls (32-bit displacement) in Code objects.
+
+        Optimized array construction ported to x64 and ARM architectures.
+
+        [ES5] Changed Object.keys to return strings for element indices.
+
+
 2009-09-23: Version 1.3.13
 
         Fixed uninitialized memory problem.
diff --git a/SConstruct b/SConstruct
old mode 100644
new mode 100755
index b5aa7ab..2b2ce1d
--- a/SConstruct
+++ b/SConstruct
@@ -373,7 +373,8 @@
       'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
     },
     'arch:x64': {
-      'CPPDEFINES':   ['V8_TARGET_ARCH_X64']
+      'CPPDEFINES':   ['V8_TARGET_ARCH_X64'],
+      'LINKFLAGS': ['/STACK:2091752']
     },
   }
 }
@@ -474,7 +475,7 @@
     },
     'arch:x64': {
       'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
-      'LINKFLAGS': ['/MACHINE:X64']
+      'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
     },
     'mode:debug': {
       'CCFLAGS':   ['/Od'],
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index f85fd48..613ecd4 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,4 +1,4 @@
 We sync with Chromium release revision, which has both webkit revision and V8 revision.
 
-http://src.chromium.org/svn/branches/187/src@18043
-http://v8.googlecode.com/svn/branches/bleeding_edge@3018
+http://src.chromium.org/svn/branches/229/src/DEPS@30923
+http://v8.googlecode.com/svn/branches/bleeding_edge@3121
diff --git a/include/v8.h b/include/v8.h
index 4992d75..b2a3fb7 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -756,7 +756,7 @@
   /** JS == */
   bool Equals(Handle<Value> that) const;
   bool StrictEquals(Handle<Value> that) const;
-  
+
  private:
   inline bool QuickIsString() const;
   bool FullIsString() const;
@@ -919,6 +919,12 @@
   static Local<String> NewSymbol(const char* data, int length = -1);
 
   /**
+   * Creates a new string by concatenating the left and the right strings
+   * passed in as parameters.
+   */
+  static Local<String> Concat(Handle<String> left, Handle<String>right);
+
+  /**
    * Creates a new external string using the data defined in the given
    * resource. The resource is deleted when the external string is no
    * longer live on V8's heap. The caller of this function should not
@@ -1036,7 +1042,7 @@
     Value(const Value&);
     void operator=(const Value&);
   };
-  
+
  private:
   void VerifyExternalStringResource(ExternalStringResource* val) const;
   static void CheckCast(v8::Value* obj);
@@ -1063,6 +1069,7 @@
 class V8EXPORT Integer : public Number {
  public:
   static Local<Integer> New(int32_t value);
+  static Local<Integer> NewFromUnsigned(uint32_t value);
   int64_t Value() const;
   static inline Integer* Cast(v8::Value* obj);
  private:
@@ -1119,6 +1126,16 @@
   DontDelete = 1 << 2
 };
 
+enum ExternalArrayType {
+  kExternalByteArray = 1,
+  kExternalUnsignedByteArray,
+  kExternalShortArray,
+  kExternalUnsignedShortArray,
+  kExternalIntArray,
+  kExternalUnsignedIntArray,
+  kExternalFloatArray
+};
+
 /**
  * A JavaScript object (ECMA-262, 4.3.3)
  */
@@ -1193,7 +1210,7 @@
 
   /** Gets a native pointer from an internal field. */
   inline void* GetPointerFromInternalField(int index);
-  
+
   /** Sets a native pointer in an internal field. */
   void SetPointerInInternalField(int index, void* value);
 
@@ -1246,7 +1263,7 @@
   bool SetHiddenValue(Handle<String> key, Handle<Value> value);
   Local<Value> GetHiddenValue(Handle<String> key);
   bool DeleteHiddenValue(Handle<String> key);
-  
+
   /**
    * Returns true if this is an instance of an api function (one
    * created from a function created from a function template) and has
@@ -1271,16 +1288,28 @@
    */
   void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
 
+  /**
+   * Set the backing store of the indexed properties to be managed by the
+   * embedding layer. Access to the indexed properties will follow the rules
+   * spelled out for the CanvasArray subtypes in the WebGL specification.
+   * Note: The embedding program still owns the data and needs to ensure that
+   *       the backing store is preserved while V8 has a reference.
+   */
+  void SetIndexedPropertiesToExternalArrayData(void* data,
+                                               ExternalArrayType array_type,
+                                               int number_of_elements);
+
   static Local<Object> New();
   static inline Object* Cast(Value* obj);
  private:
   Object();
   static void CheckCast(Value* obj);
   Local<Value> CheckedGetInternalField(int index);
+  void* SlowGetPointerFromInternalField(int index);
 
   /**
    * If quick access to the internal field is possible this method
-   * returns the value.  Otherwise an empty handle is returned. 
+   * returns the value.  Otherwise an empty handle is returned.
    */
   inline Local<Value> UncheckedGetInternalField(int index);
 };
@@ -2095,6 +2124,29 @@
 
 
 /**
+ * Collection of V8 heap information.
+ *
+ * Instances of this class can be passed to v8::V8::HeapStatistics to
+ * get heap statistics from V8.
+ */
+class V8EXPORT HeapStatistics {
+ public:
+  HeapStatistics();
+  size_t total_heap_size() { return total_heap_size_; }
+  size_t used_heap_size() { return used_heap_size_; }
+
+ private:
+  void set_total_heap_size(size_t size) { total_heap_size_ = size; }
+  void set_used_heap_size(size_t size) { used_heap_size_ = size; }
+
+  size_t total_heap_size_;
+  size_t used_heap_size_;
+
+  friend class V8;
+};
+
+
+/**
  * Container class for static utility functions.
  */
 class V8EXPORT V8 {
@@ -2344,17 +2396,20 @@
    */
   static bool Dispose();
 
+  /**
+   * Get statistics about the heap memory usage.
+   */
+  static void GetHeapStatistics(HeapStatistics* heap_statistics);
 
   /**
    * Optional notification that the embedder is idle.
    * V8 uses the notification to reduce memory footprint.
    * This call can be used repeatedly if the embedder remains idle.
-   * \param is_high_priority tells whether the embedder is high priority.
    * Returns true if the embedder should stop calling IdleNotification
    * until real work has been done.  This indicates that V8 has done
    * as much cleanup as it will be able to do.
    */
-  static bool IdleNotification(bool is_high_priority);
+  static bool IdleNotification();
 
   /**
    * Optional notification that the system is running low on memory.
@@ -2720,12 +2775,37 @@
 const int kHeapObjectTagSize = 2;
 const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
 
-
 // Tag information for Smi.
 const int kSmiTag = 0;
 const int kSmiTagSize = 1;
 const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
 
+template <size_t ptr_size> struct SmiConstants;
+
+// Smi constants for 32-bit systems.
+template <> struct SmiConstants<4> {
+  static const int kSmiShiftSize = 0;
+  static const int kSmiValueSize = 31;
+  static inline int SmiToInt(internal::Object* value) {
+    int shift_bits = kSmiTagSize + kSmiShiftSize;
+    // Throw away top 32 bits and shift down (requires >> to be sign extending).
+    return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
+  }
+};
+
+// Smi constants for 64-bit systems.
+template <> struct SmiConstants<8> {
+  static const int kSmiShiftSize = 31;
+  static const int kSmiValueSize = 32;
+  static inline int SmiToInt(internal::Object* value) {
+    int shift_bits = kSmiTagSize + kSmiShiftSize;
+    // Shift down and throw away top 32 bits.
+    return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
+  }
+};
+
+const int kSmiShiftSize = SmiConstants<sizeof(void*)>::kSmiShiftSize;
+const int kSmiValueSize = SmiConstants<sizeof(void*)>::kSmiValueSize;
 
 /**
  * This class exports constants and functionality from within v8 that
@@ -2744,7 +2824,6 @@
   static const int kJSObjectHeaderSize = 3 * sizeof(void*);
   static const int kFullStringRepresentationMask = 0x07;
   static const int kExternalTwoByteRepresentationTag = 0x03;
-  static const int kAlignedPointerShift = 2;
 
   // These constants are compiler dependent so their values must be
   // defined within the implementation.
@@ -2762,7 +2841,23 @@
   }
 
   static inline int SmiValue(internal::Object* value) {
-    return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> kSmiTagSize;
+    return SmiConstants<sizeof(void*)>::SmiToInt(value);
+  }
+
+  static inline int GetInstanceType(internal::Object* obj) {
+    typedef internal::Object O;
+    O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
+    return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
+  }
+
+  static inline void* GetExternalPointer(internal::Object* obj) {
+    if (HasSmiTag(obj)) {
+      return obj;
+    } else if (GetInstanceType(obj) == kProxyType) {
+      return ReadField<void*>(obj, kProxyProxyOffset);
+    } else {
+      return NULL;
+    }
   }
 
   static inline bool IsExternalTwoByteString(int instance_type) {
@@ -2922,9 +3017,7 @@
   typedef internal::Object O;
   typedef internal::Internals I;
   O* obj = *reinterpret_cast<O**>(this);
-  O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
-  int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
-  if (instance_type == I::kJSObjectType) {
+  if (I::GetInstanceType(obj) == I::kJSObjectType) {
     // If the object is a plain JSObject, which is the common case,
     // we know where to find the internal fields and can return the
     // value directly.
@@ -2949,25 +3042,27 @@
 
 void* External::QuickUnwrap(Handle<v8::Value> wrapper) {
   typedef internal::Object O;
-  typedef internal::Internals I;
   O* obj = *reinterpret_cast<O**>(const_cast<v8::Value*>(*wrapper));
-  if (I::HasSmiTag(obj)) {
-    int value = I::SmiValue(obj) << I::kAlignedPointerShift;
-    return reinterpret_cast<void*>(value);
-  } else {
-    O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
-    int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
-    if (instance_type == I::kProxyType) {
-      return I::ReadField<void*>(obj, I::kProxyProxyOffset);
-    } else {
-      return NULL;
-    }
-  }
+  return internal::Internals::GetExternalPointer(obj);
 }
 
 
 void* Object::GetPointerFromInternalField(int index) {
-  return External::Unwrap(GetInternalField(index));
+  typedef internal::Object O;
+  typedef internal::Internals I;
+
+  O* obj = *reinterpret_cast<O**>(this);
+
+  if (I::GetInstanceType(obj) == I::kJSObjectType) {
+    // If the object is a plain JSObject, which is the common case,
+    // we know where to find the internal fields and can return the
+    // value directly.
+    int offset = I::kJSObjectHeaderSize + (sizeof(void*) * index);
+    O* value = I::ReadField<O*>(obj, offset);
+    return I::GetExternalPointer(value);
+  }
+
+  return SlowGetPointerFromInternalField(index);
 }
 
 
@@ -2983,10 +3078,8 @@
   typedef internal::Object O;
   typedef internal::Internals I;
   O* obj = *reinterpret_cast<O**>(const_cast<String*>(this));
-  O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
-  int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
   String::ExternalStringResource* result;
-  if (I::IsExternalTwoByteString(instance_type)) {
+  if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
     void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
     result = reinterpret_cast<String::ExternalStringResource*>(value);
   } else {
@@ -3012,9 +3105,7 @@
   typedef internal::Internals I;
   O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
   if (!I::HasHeapObjectTag(obj)) return false;
-  O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
-  int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
-  return (instance_type < I::kFirstNonstringType);
+  return (I::GetInstanceType(obj) < I::kFirstNonstringType);
 }
 
 
diff --git a/src/SConscript b/src/SConscript
index b6c2b4d..85fd724 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -34,51 +34,129 @@
 
 
 SOURCES = {
-  'all': [
-    'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc',
-    'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
-    'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
-    'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
-    'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
-    'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
-    'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
-    'heap-profiler.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
-    'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc',
-    'messages.cc', 'objects.cc', 'oprofile-agent.cc', 'parser.cc',
-    'property.cc', 'regexp-macro-assembler.cc',
-    'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
-    'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
-    'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
-    'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
-    'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
-    'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
-    'virtual-frame.cc', 'zone.cc'
-  ],
-  'arch:arm': [
-    'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/codegen-arm.cc',
-    'arm/constants-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
-    'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
-    'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
-    'arm/regexp-macro-assembler-arm.cc', 'arm/register-allocator-arm.cc',
-    'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc'
-  ],
-  'arch:ia32': [
-    'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
-    'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
-    'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
-    'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
-    'ia32/regexp-macro-assembler-ia32.cc',
-    'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
-    'ia32/virtual-frame-ia32.cc'
-  ],
-  'arch:x64': [
-    'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/codegen-x64.cc',
-    'x64/cpu-x64.cc', 'x64/disasm-x64.cc', 'x64/debug-x64.cc',
-    'x64/frames-x64.cc', 'x64/ic-x64.cc', 'x64/jump-target-x64.cc',
-    'x64/macro-assembler-x64.cc', 'x64/regexp-macro-assembler-x64.cc',
-    'x64/register-allocator-x64.cc', 'x64/stub-cache-x64.cc',
-    'x64/virtual-frame-x64.cc'
-  ],
+  'all': Split("""
+    accessors.cc
+    allocation.cc
+    api.cc
+    assembler.cc
+    ast.cc
+    bootstrapper.cc
+    builtins.cc
+    checks.cc
+    code-stubs.cc
+    codegen.cc
+    compilation-cache.cc
+    compiler.cc
+    contexts.cc
+    conversions.cc
+    counters.cc
+    dateparser.cc
+    debug-agent.cc
+    debug.cc
+    disassembler.cc
+    execution.cc
+    factory.cc
+    fast-codegen.cc
+    flags.cc
+    frame-element.cc
+    frames.cc
+    func-name-inferrer.cc
+    global-handles.cc
+    handles.cc
+    hashmap.cc
+    heap-profiler.cc
+    heap.cc
+    ic.cc
+    interpreter-irregexp.cc
+    jsregexp.cc
+    jump-target.cc
+    log-utils.cc
+    log.cc
+    mark-compact.cc
+    messages.cc
+    objects.cc
+    oprofile-agent.cc
+    parser.cc
+    property.cc
+    regexp-macro-assembler-irregexp.cc
+    regexp-macro-assembler.cc
+    regexp-stack.cc
+    register-allocator.cc
+    rewriter.cc
+    runtime.cc
+    scanner.cc
+    scopeinfo.cc
+    scopes.cc
+    serialize.cc
+    snapshot-common.cc
+    spaces.cc
+    string-stream.cc
+    stub-cache.cc
+    token.cc
+    top.cc
+    unicode.cc
+    usage-analyzer.cc
+    utils.cc
+    v8-counters.cc
+    v8.cc
+    v8threads.cc
+    variables.cc
+    version.cc
+    virtual-frame.cc
+    zone.cc
+    """),
+  'arch:arm': Split("""
+    arm/assembler-arm.cc
+    arm/builtins-arm.cc
+    arm/codegen-arm.cc
+    arm/constants-arm.cc
+    arm/cpu-arm.cc
+    arm/debug-arm.cc
+    arm/disasm-arm.cc
+    arm/fast-codegen-arm.cc
+    arm/frames-arm.cc
+    arm/ic-arm.cc
+    arm/jump-target-arm.cc
+    arm/macro-assembler-arm.cc
+    arm/regexp-macro-assembler-arm.cc
+    arm/register-allocator-arm.cc
+    arm/stub-cache-arm.cc
+    arm/virtual-frame-arm.cc
+    """),
+  'arch:ia32': Split("""
+    ia32/assembler-ia32.cc
+    ia32/builtins-ia32.cc
+    ia32/codegen-ia32.cc
+    ia32/cpu-ia32.cc
+    ia32/debug-ia32.cc
+    ia32/disasm-ia32.cc
+    ia32/fast-codegen-ia32.cc
+    ia32/frames-ia32.cc
+    ia32/ic-ia32.cc
+    ia32/jump-target-ia32.cc
+    ia32/macro-assembler-ia32.cc
+    ia32/regexp-macro-assembler-ia32.cc
+    ia32/register-allocator-ia32.cc
+    ia32/stub-cache-ia32.cc
+    ia32/virtual-frame-ia32.cc
+    """),
+  'arch:x64': Split("""
+    x64/assembler-x64.cc
+    x64/builtins-x64.cc
+    x64/codegen-x64.cc
+    x64/cpu-x64.cc
+    x64/debug-x64.cc
+    x64/disasm-x64.cc
+    x64/fast-codegen-x64.cc
+    x64/frames-x64.cc
+    x64/ic-x64.cc
+    x64/jump-target-x64.cc
+    x64/macro-assembler-x64.cc
+    x64/regexp-macro-assembler-x64.cc
+    x64/register-allocator-x64.cc
+    x64/stub-cache-x64.cc
+    x64/virtual-frame-x64.cc
+    """),
   'simulator:arm': ['arm/simulator-arm.cc'],
   'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
   'os:linux':   ['platform-linux.cc', 'platform-posix.cc'],
diff --git a/src/api.cc b/src/api.cc
index fd3d921..b457aad 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -342,10 +342,10 @@
 
 
 bool SetResourceConstraints(ResourceConstraints* constraints) {
-  int semispace_size = constraints->max_young_space_size();
+  int young_space_size = constraints->max_young_space_size();
   int old_gen_size = constraints->max_old_space_size();
-  if (semispace_size != 0 || old_gen_size != 0) {
-    bool result = i::Heap::ConfigureHeap(semispace_size, old_gen_size);
+  if (young_space_size != 0 || old_gen_size != 0) {
+    bool result = i::Heap::ConfigureHeap(young_space_size / 2, old_gen_size);
     if (!result) return false;
   }
   if (constraints->stack_limit() != NULL) {
@@ -2290,7 +2290,7 @@
   ON_BAILOUT("v8::SetElementsToPixelData()", return);
   ENTER_V8;
   HandleScope scope;
-  if (!ApiCheck(i::Smi::IsValid(length),
+  if (!ApiCheck(length <= i::PixelArray::kMaxLength,
                 "v8::Object::SetIndexedPropertiesToPixelData()",
                 "length exceeds max acceptable value")) {
     return;
@@ -2306,6 +2306,30 @@
 }
 
 
+void v8::Object::SetIndexedPropertiesToExternalArrayData(
+    void* data,
+    ExternalArrayType array_type,
+    int length) {
+  ON_BAILOUT("v8::SetIndexedPropertiesToExternalArrayData()", return);
+  ENTER_V8;
+  HandleScope scope;
+  if (!ApiCheck(length <= i::ExternalArray::kMaxLength,
+                "v8::Object::SetIndexedPropertiesToExternalArrayData()",
+                "length exceeds max acceptable value")) {
+    return;
+  }
+  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+  if (!ApiCheck(!self->IsJSArray(),
+                "v8::Object::SetIndexedPropertiesToExternalArrayData()",
+                "JSArray is not supported")) {
+    return;
+  }
+  i::Handle<i::ExternalArray> array =
+      i::Factory::NewExternalArray(length, array_type, data);
+  self->set_elements(*array);
+}
+
+
 Local<v8::Object> Function::NewInstance() const {
   return NewInstance(0, NULL);
 }
@@ -2578,7 +2602,16 @@
 
 
 void v8::Object::SetPointerInInternalField(int index, void* value) {
-  SetInternalField(index, External::Wrap(value));
+  i::Object* as_object = reinterpret_cast<i::Object*>(value);
+  if (as_object->IsSmi()) {
+    Utils::OpenHandle(this)->SetInternalField(index, as_object);
+    return;
+  }
+  HandleScope scope;
+  i::Handle<i::Proxy> proxy =
+      i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
+  if (!proxy.is_null())
+      Utils::OpenHandle(this)->SetInternalField(index, *proxy);
 }
 
 
@@ -2602,11 +2635,20 @@
 }
 
 
-bool v8::V8::IdleNotification(bool is_high_priority) {
+HeapStatistics::HeapStatistics(): total_heap_size_(0), used_heap_size_(0) { }
+
+
+void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
+  heap_statistics->set_total_heap_size(i::Heap::CommittedMemory());
+  heap_statistics->set_used_heap_size(i::Heap::SizeOfObjects());
+}
+
+
+bool v8::V8::IdleNotification() {
   // Returning true tells the caller that it need not
   // continue to call IdleNotification.
   if (!i::V8::IsRunning()) return true;
-  return i::V8::IdleNotification(is_high_priority);
+  return i::V8::IdleNotification();
 }
 
 
@@ -2760,7 +2802,9 @@
 
 v8::Local<v8::Context> Context::GetCurrent() {
   if (IsDeadCheck("v8::Context::GetCurrent()")) return Local<Context>();
-  i::Handle<i::Context> context(i::Top::global_context());
+  i::Handle<i::Object> current = i::Top::global_context();
+  if (current.is_null()) return Local<Context>();
+  i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
   return Utils::ToLocal(context);
 }
 
@@ -2837,36 +2881,39 @@
 }
 
 
-static const intptr_t kAlignedPointerMask = 3;
-
 Local<Value> v8::External::Wrap(void* data) {
   STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
   LOG_API("External::Wrap");
   EnsureInitialized("v8::External::Wrap()");
   ENTER_V8;
-  if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) {
-    uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data);
-    intptr_t data_value =
-        static_cast<intptr_t>(data_ptr >> i::Internals::kAlignedPointerShift);
-    STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value));
-    if (i::Smi::IsIntptrValid(data_value)) {
-      i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value));
-      return Utils::ToLocal(obj);
-    }
+  i::Object* as_object = reinterpret_cast<i::Object*>(data);
+  if (as_object->IsSmi()) {
+    return Utils::ToLocal(i::Handle<i::Object>(as_object));
   }
   return ExternalNewImpl(data);
 }
 
 
+void* v8::Object::SlowGetPointerFromInternalField(int index) {
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  i::Object* value = obj->GetInternalField(index);
+  if (value->IsSmi()) {
+    return value;
+  } else if (value->IsProxy()) {
+    return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
+  } else {
+    return NULL;
+  }
+}
+
+
 void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
   if (IsDeadCheck("v8::External::Unwrap()")) return 0;
   i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
   void* result;
   if (obj->IsSmi()) {
     // The external value was an aligned pointer.
-    uintptr_t value = static_cast<uintptr_t>(
-        i::Smi::cast(*obj)->value()) << i::Internals::kAlignedPointerShift;
-    result = reinterpret_cast<void*>(value);
+    result = *obj;
   } else if (obj->IsProxy()) {
     result = ExternalValueImpl(obj);
   } else {
@@ -2912,6 +2959,18 @@
 }
 
 
+Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
+  EnsureInitialized("v8::String::New()");
+  LOG_API("String::New(char)");
+  ENTER_V8;
+  i::Handle<i::String> left_string = Utils::OpenHandle(*left);
+  i::Handle<i::String> right_string = Utils::OpenHandle(*right);
+  i::Handle<i::String> result = i::Factory::NewConsString(left_string,
+                                                          right_string);
+  return Utils::ToLocal(result);
+}
+
+
 Local<String> v8::String::NewUndetectable(const char* data, int length) {
   EnsureInitialized("v8::String::NewUndetectable()");
   LOG_API("String::NewUndetectable(char)");
@@ -3215,6 +3274,17 @@
 }
 
 
+Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
+  bool fits_into_int32_t = (value & (1 << 31)) == 0;
+  if (fits_into_int32_t) {
+    return Integer::New(static_cast<int32_t>(value));
+  }
+  ENTER_V8;
+  i::Handle<i::Object> result = i::Factory::NewNumber(value);
+  return Utils::IntegerToLocal(result);
+}
+
+
 void V8::IgnoreOutOfMemoryException() {
   thread_local.set_ignore_out_of_memory(true);
 }
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index cd5a1bb..48cc090 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -81,7 +81,13 @@
 
 Object* RelocInfo::target_object() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+  return Memory::Object_at(Assembler::target_address_address_at(pc_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
 }
 
 
@@ -104,7 +110,7 @@
 
 
 Address RelocInfo::call_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   // The 2 instructions offset assumes patched return sequence.
   ASSERT(IsJSReturn(rmode()));
   return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
@@ -112,7 +118,7 @@
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   // The 2 instructions offset assumes patched return sequence.
   ASSERT(IsJSReturn(rmode()));
   Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
@@ -125,7 +131,7 @@
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   // The 2 instructions offset assumes patched return sequence.
   ASSERT(IsJSReturn(rmode()));
   return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
@@ -137,7 +143,7 @@
 }
 
 
-bool RelocInfo::IsCallInstruction() {
+bool RelocInfo::IsPatchedReturnSequence() {
   // On ARM a "call instruction" is actually two instructions.
   //   mov lr, pc
   //   ldr pc, [pc, #XXX]
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index cdd32f3..47f0e96 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1172,9 +1172,9 @@
 
 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
   VirtualFrame::SpilledScope spilled_scope;
+  frame_->EmitPush(cp);
   __ mov(r0, Operand(pairs));
   frame_->EmitPush(r0);
-  frame_->EmitPush(cp);
   __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
   frame_->EmitPush(r0);
   frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
@@ -1539,191 +1539,200 @@
 }
 
 
-void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ LoopStatement");
+  Comment cmnt(masm_, "[ DoWhileStatement");
   CodeForStatementPosition(node);
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  JumpTarget body(JumpTarget::BIDIRECTIONAL);
 
-  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
-  // known result for the test expression, with no side effects.
-  enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
-  if (node->cond() == NULL) {
-    ASSERT(node->type() == LoopStatement::FOR_LOOP);
-    info = ALWAYS_TRUE;
-  } else {
-    Literal* lit = node->cond()->AsLiteral();
-    if (lit != NULL) {
-      if (lit->IsTrue()) {
-        info = ALWAYS_TRUE;
-      } else if (lit->IsFalse()) {
-        info = ALWAYS_FALSE;
-      }
-    }
-  }
-
-  switch (node->type()) {
-    case LoopStatement::DO_LOOP: {
-      JumpTarget body(JumpTarget::BIDIRECTIONAL);
-
-      // Label the top of the loop for the backward CFG edge.  If the test
-      // is always true we can use the continue target, and if the test is
-      // always false there is no need.
-      if (info == ALWAYS_TRUE) {
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else if (info == ALWAYS_FALSE) {
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        ASSERT(info == DONT_KNOW);
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        body.Bind();
-      }
-
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      VisitAndSpill(node->body());
-
-      // Compile the test.
-      if (info == ALWAYS_TRUE) {
-        if (has_valid_frame()) {
-          // If control can fall off the end of the body, jump back to the
-          // top.
-          node->continue_target()->Jump();
-        }
-      } else if (info == ALWAYS_FALSE) {
-        // If we have a continue in the body, we only have to bind its jump
-        // target.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-      } else {
-        ASSERT(info == DONT_KNOW);
-        // We have to compile the test expression if it can be reached by
-        // control flow falling out of the body or via continue.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
-                                &body, node->break_target(), true);
-          if (has_valid_frame()) {
-            // A invalid frame here indicates that control did not
-            // fall out of the test expression.
-            Branch(true, &body);
-          }
-        }
-      }
-      break;
-    }
-
-    case LoopStatement::WHILE_LOOP: {
-      // If the test is never true and has no side effects there is no need
-      // to compile the test or body.
-      if (info == ALWAYS_FALSE) break;
-
-      // Label the top of the loop with the continue target for the backward
-      // CFG edge.
+  // Label the top of the loop for the backward CFG edge.  If the test
+  // is always true we can use the continue target, and if the test is
+  // always false there is no need.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  switch (info) {
+    case ALWAYS_TRUE:
       node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
       node->continue_target()->Bind();
+      break;
+    case ALWAYS_FALSE:
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      break;
+    case DONT_KNOW:
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      body.Bind();
+      break;
+  }
 
-      if (info == DONT_KNOW) {
-        JumpTarget body;
-        LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
-                              &body, node->break_target(), true);
-        if (has_valid_frame()) {
-          // A NULL frame indicates that control did not fall out of the
-          // test expression.
-          Branch(false, node->break_target());
-        }
-        if (has_valid_frame() || body.is_linked()) {
-          body.Bind();
-        }
-      }
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  VisitAndSpill(node->body());
 
+      // Compile the test.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // If control can fall off the end of the body, jump back to the
+      // top.
       if (has_valid_frame()) {
-        CheckStack();  // TODO(1222600): ignore if body contains calls.
-        VisitAndSpill(node->body());
-
-        // If control flow can fall out of the body, jump back to the top.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
+        node->continue_target()->Jump();
       }
       break;
-    }
-
-    case LoopStatement::FOR_LOOP: {
-      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-      if (node->init() != NULL) {
-        VisitAndSpill(node->init());
-      }
-
-      // There is no need to compile the test or body.
-      if (info == ALWAYS_FALSE) break;
-
-      // If there is no update statement, label the top of the loop with the
-      // continue target, otherwise with the loop target.
-      if (node->next() == NULL) {
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+    case ALWAYS_FALSE:
+      // If we have a continue in the body, we only have to bind its
+      // jump target.
+      if (node->continue_target()->is_linked()) {
         node->continue_target()->Bind();
-      } else {
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
       }
-
-      // If the test is always true, there is no need to compile it.
-      if (info == DONT_KNOW) {
-        JumpTarget body;
+      break;
+    case DONT_KNOW:
+      // We have to compile the test expression if it can be reached by
+      // control flow falling out of the body or via continue.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
+      }
+      if (has_valid_frame()) {
         LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
                               &body, node->break_target(), true);
         if (has_valid_frame()) {
-          Branch(false, node->break_target());
-        }
-        if (has_valid_frame() || body.is_linked()) {
-          body.Bind();
-        }
-      }
-
-      if (has_valid_frame()) {
-        CheckStack();  // TODO(1222600): ignore if body contains calls.
-        VisitAndSpill(node->body());
-
-        if (node->next() == NULL) {
-          // If there is no update statement and control flow can fall out
-          // of the loop, jump directly to the continue label.
-          if (has_valid_frame()) {
-            node->continue_target()->Jump();
-          }
-        } else {
-          // If there is an update statement and control flow can reach it
-          // via falling out of the body of the loop or continuing, we
-          // compile the update statement.
-          if (node->continue_target()->is_linked()) {
-            node->continue_target()->Bind();
-          }
-          if (has_valid_frame()) {
-            // Record source position of the statement as this code which is
-            // after the code for the body actually belongs to the loop
-            // statement and not the body.
-            CodeForStatementPosition(node);
-            VisitAndSpill(node->next());
-            loop.Jump();
-          }
+          // A invalid frame here indicates that control did not
+          // fall out of the test expression.
+          Branch(true, &body);
         }
       }
       break;
-    }
   }
 
   if (node->break_target()->is_linked()) {
     node->break_target()->Bind();
   }
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ WhileStatement");
+  CodeForStatementPosition(node);
+
+  // If the test is never true and has no side effects there is no need
+  // to compile the test or body.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // Label the top of the loop with the continue target for the backward
+  // CFG edge.
+  node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+  node->continue_target()->Bind();
+
+  if (info == DONT_KNOW) {
+    JumpTarget body;
+    LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+                          &body, node->break_target(), true);
+    if (has_valid_frame()) {
+      // A NULL frame indicates that control did not fall out of the
+      // test expression.
+      Branch(false, node->break_target());
+    }
+    if (has_valid_frame() || body.is_linked()) {
+      body.Bind();
+    }
+  }
+
+  if (has_valid_frame()) {
+    CheckStack();  // TODO(1222600): ignore if body contains calls.
+    VisitAndSpill(node->body());
+
+    // If control flow can fall out of the body, jump back to the top.
+    if (has_valid_frame()) {
+      node->continue_target()->Jump();
+    }
+  }
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ForStatement");
+  CodeForStatementPosition(node);
+  if (node->init() != NULL) {
+    VisitAndSpill(node->init());
+  }
+
+  // If the test is never true there is no need to compile the test or
+  // body.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // If there is no update statement, label the top of the loop with the
+  // continue target, otherwise with the loop target.
+  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+  if (node->next() == NULL) {
+    node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+    node->continue_target()->Bind();
+  } else {
+    node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+    loop.Bind();
+  }
+
+  // If the test is always true, there is no need to compile it.
+  if (info == DONT_KNOW) {
+    JumpTarget body;
+    LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+                          &body, node->break_target(), true);
+    if (has_valid_frame()) {
+      Branch(false, node->break_target());
+    }
+    if (has_valid_frame() || body.is_linked()) {
+      body.Bind();
+    }
+  }
+
+  if (has_valid_frame()) {
+    CheckStack();  // TODO(1222600): ignore if body contains calls.
+    VisitAndSpill(node->body());
+
+    if (node->next() == NULL) {
+      // If there is no update statement and control flow can fall out
+      // of the loop, jump directly to the continue label.
+      if (has_valid_frame()) {
+        node->continue_target()->Jump();
+      }
+    } else {
+      // If there is an update statement and control flow can reach it
+      // via falling out of the body of the loop or continuing, we
+      // compile the update statement.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
+      }
+      if (has_valid_frame()) {
+        // Record source position of the statement as this code which is
+        // after the code for the body actually belongs to the loop
+        // statement and not the body.
+        CodeForStatementPosition(node);
+        VisitAndSpill(node->next());
+        loop.Jump();
+      }
+    }
+  }
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
   ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
@@ -1918,12 +1927,12 @@
 }
 
 
-void CodeGenerator::VisitTryCatch(TryCatch* node) {
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatch");
+  Comment cmnt(masm_, "[ TryCatchStatement");
   CodeForStatementPosition(node);
 
   JumpTarget try_block;
@@ -2043,12 +2052,12 @@
 }
 
 
-void CodeGenerator::VisitTryFinally(TryFinally* node) {
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinally");
+  Comment cmnt(masm_, "[ TryFinallyStatement");
   CodeForStatementPosition(node);
 
   // State: Used to keep track of reason for entering the finally
@@ -2246,12 +2255,10 @@
   VirtualFrame::SpilledScope spilled_scope;
   ASSERT(boilerplate->IsBoilerplate());
 
-  // Push the boilerplate on the stack.
-  __ mov(r0, Operand(boilerplate));
-  frame_->EmitPush(r0);
-
   // Create a new closure.
   frame_->EmitPush(cp);
+  __ mov(r0, Operand(boilerplate));
+  frame_->EmitPush(r0);
   frame_->CallRuntime(Runtime::kNewClosure, 2);
   frame_->EmitPush(r0);
 }
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 1eb0932..e079950 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -147,6 +147,15 @@
                                Handle<Script> script,
                                bool is_eval);
 
+  // Printing of AST, etc. as requested by flags.
+  static void MakeCodePrologue(FunctionLiteral* fun);
+
+  // Allocate and install the code.
+  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
+                                       MacroAssembler* masm,
+                                       Code::Flags flags,
+                                       Handle<Script> script);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -156,6 +165,8 @@
                               bool is_toplevel,
                               Handle<Script> script);
 
+  static void RecordPositions(MacroAssembler* masm, int pos);
+
   // Accessors
   MacroAssembler* masm() { return masm_; }
 
@@ -231,7 +242,7 @@
   void LoadReference(Reference* ref);
   void UnloadReference(Reference* ref);
 
-  MemOperand ContextOperand(Register context, int index) const {
+  static MemOperand ContextOperand(Register context, int index) {
     return MemOperand(context, Context::SlotOffset(index));
   }
 
@@ -243,7 +254,7 @@
                                                JumpTarget* slow);
 
   // Expressions
-  MemOperand GlobalObject() const  {
+  static MemOperand GlobalObject()  {
     return ContextOperand(cp, Context::GLOBAL_INDEX);
   }
 
@@ -319,10 +330,11 @@
                                       const InlineRuntimeLUT& new_entry,
                                       InlineRuntimeLUT* old_entry);
 
+  static Handle<Code> ComputeLazyCompile(int argc);
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
-  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+  static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
 
   // Declare global variables and functions in the given array of
   // name/value pairs.
@@ -365,6 +377,14 @@
   inline void GenerateMathSin(ZoneList<Expression*>* args);
   inline void GenerateMathCos(ZoneList<Expression*>* args);
 
+  // Simple condition analysis.
+  enum ConditionAnalysis {
+    ALWAYS_TRUE,
+    ALWAYS_FALSE,
+    DONT_KNOW
+  };
+  ConditionAnalysis AnalyzeCondition(Expression* cond);
+
   // Methods used to indicate which source code is generated for. Source
   // positions are collected by the assembler and emitted with the relocation
   // information.
@@ -406,6 +426,8 @@
   friend class VirtualFrame;
   friend class JumpTarget;
   friend class Reference;
+  friend class FastCodeGenerator;
+  friend class CodeGenSelector;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 4f45175..ef33653 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -68,7 +68,7 @@
 // A debug break in the exit code is identified by a call.
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
-  return rinfo->IsCallInstruction();
+  return rinfo->IsPatchedReturnSequence();
 }
 
 
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
new file mode 100644
index 0000000..97feae5
--- /dev/null
+++ b/src/arm/fast-codegen-arm.cc
@@ -0,0 +1,539 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function.  On entry to the function the receiver
+// and arguments have been pushed on the stack left to right.  The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+//   o r1: the JS function object being called (ie, ourselves)
+//   o cp: our context
+//   o fp: our caller's frame pointer
+//   o sp: stack pointer
+//   o lr: return address
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+  function_ = fun;
+  // ARM does NOT call SetFunctionPosition.
+
+  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+  // Adjust fp to point to caller's fp.
+  __ add(fp, sp, Operand(2 * kPointerSize));
+
+  { Comment cmnt(masm_, "[ Allocate locals");
+    int locals_count = fun->scope()->num_stack_slots();
+    if (locals_count > 0) {
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    }
+    if (FLAG_check_stack) {
+      __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+    }
+    for (int i = 0; i < locals_count; i++) {
+      __ push(ip);
+    }
+  }
+
+  if (FLAG_check_stack) {
+    // Put the lr setup instruction in the delay slot.  The kInstrSize is
+    // added to the implicit 8 byte offset that always applies to operations
+    // with pc and gives a return address 12 bytes down.
+    Comment cmnt(masm_, "[ Stack check");
+    __ add(lr, pc, Operand(Assembler::kInstrSize));
+    __ cmp(sp, Operand(r2));
+    StackCheckStub stub;
+    __ mov(pc,
+           Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+                   RelocInfo::CODE_TARGET),
+           LeaveCC,
+           lo);
+  }
+
+  { Comment cmnt(masm_, "[ Declarations");
+    VisitDeclarations(fun->scope()->declarations());
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
+  { Comment cmnt(masm_, "[ Body");
+    VisitStatements(fun->body());
+  }
+
+  { Comment cmnt(masm_, "[ return <undefined>;");
+    // Emit a 'return undefined' in case control fell off the end of the
+    // body.
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    SetReturnPosition(fun);
+    if (FLAG_trace) {
+      // Push the return value on the stack as the parameter.
+      // Runtime::TraceExit returns its parameter in r0.
+      __ push(r0);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
+
+    __ RecordJSReturn();
+    __ mov(sp, fp);
+    __ ldm(ia_w, sp, fp.bit() | lr.bit());
+    int num_parameters = function_->scope()->num_parameters();
+    __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
+    __ Jump(lr);
+  }
+}
+
+
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.
+  // The context is the first argument.
+  __ mov(r1, Operand(pairs));
+  __ mov(r0, Operand(Smi::FromInt(is_eval_ ? 1 : 0)));
+  __ stm(db_w, sp, cp.bit() | r1.bit() | r0.bit());
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
+}
+
+
+void FastCodeGenerator::VisitBlock(Block* stmt) {
+  Comment cmnt(masm_, "[ Block");
+  SetStatementPosition(stmt);
+  VisitStatements(stmt->statements());
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  SetStatementPosition(stmt);
+  Visit(stmt->expression());
+}
+
+
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+  Comment cmnt(masm_, "[ ReturnStatement");
+  SetStatementPosition(stmt);
+  Expression* expr = stmt->expression();
+  Visit(expr);
+
+  // Complete the statement based on the location of the subexpression.
+  Location source = expr->location();
+  ASSERT(!source.is_nowhere());
+  if (source.is_temporary()) {
+    __ pop(r0);
+  } else {
+    ASSERT(source.is_constant());
+    ASSERT(expr->AsLiteral() != NULL);
+    __ mov(r0, Operand(expr->AsLiteral()->handle()));
+  }
+
+  if (FLAG_trace) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in r0.
+    __ push(r0);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+
+  __ RecordJSReturn();
+  __ mov(sp, fp);
+  __ ldm(ia_w, sp, fp.bit() | lr.bit());
+  int num_parameters = function_->scope()->num_parameters();
+  __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
+  __ Jump(lr);
+}
+
+
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+  if (HasStackOverflow()) return;
+
+  ASSERT(boilerplate->IsBoilerplate());
+
+  // Create a new closure.
+  __ mov(r0, Operand(boilerplate));
+  __ stm(db_w, sp, cp.bit() | r0.bit());
+  __ CallRuntime(Runtime::kNewClosure, 2);
+
+  if (expr->location().is_temporary()) {
+    __ push(r0);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  Expression* rewrite = expr->var()->rewrite();
+  if (rewrite == NULL) {
+    Comment cmnt(masm_, "Global variable");
+    // Use inline caching. Variable name is passed in r2 and the global
+    // object on the stack.
+    __ ldr(ip, CodeGenerator::GlobalObject());
+    __ push(ip);
+    __ mov(r2, Operand(expr->name()));
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+    if (expr->location().is_temporary()) {
+      // Replace the global object with the result.
+      __ str(r0, MemOperand(sp));
+    } else {
+      ASSERT(expr->location().is_nowhere());
+      __ pop();
+    }
+
+  } else {
+    Comment cmnt(masm_, "Stack slot");
+    Slot* slot = rewrite->AsSlot();
+    ASSERT(slot != NULL);
+    if (expr->location().is_temporary()) {
+      __ ldr(ip, MemOperand(fp, SlotOffset(slot)));
+      __ push(ip);
+    } else {
+      ASSERT(expr->location().is_nowhere());
+    }
+  }
+}
+
+
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Comment cmnt(masm_, "[ RegExp Literal");
+  Label done;
+  // Registers will be used as follows:
+  // r4 = JS function, literals array
+  // r3 = literal index
+  // r2 = RegExp pattern
+  // r1 = RegExp flags
+  // r0 = temp + return value (RegExp literal)
+  __ ldr(r0, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
+  __ ldr(r4,  FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+  int literal_offset =
+    FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ ldr(r0, FieldMemOperand(r4, literal_offset));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(ne, &done);
+  __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
+  __ mov(r2, Operand(expr->pattern()));
+  __ mov(r1, Operand(expr->flags()));
+  __ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  __ bind(&done);
+  if (expr->location().is_temporary()) {
+    __ push(r0);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+  Label make_clone;
+
+  // Fetch the function's literals array.
+  __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+  // Check if the literal's boilerplate has been instantiated.
+  int offset =
+      FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+  __ ldr(r0, FieldMemOperand(r3, offset));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(&make_clone, ne);
+
+  // Instantiate the boilerplate.
+  __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+  __ mov(r1, Operand(expr->literals()));
+  __ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+  __ bind(&make_clone);
+  // Clone the boilerplate.
+  __ push(r0);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  } else {
+    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  ZoneList<Expression*>* subexprs = expr->values();
+  for (int i = 0, len = subexprs->length(); i < len; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (subexpr->AsLiteral() != NULL ||
+        CompileTimeValue::IsCompileTimeValue(subexpr)) {
+      continue;
+    }
+
+    if (!result_saved) {
+      __ push(r0);
+      result_saved = true;
+    }
+    Visit(subexpr);
+    ASSERT(subexpr->location().is_temporary());
+
+    // Store the subexpression value in the array's elements.
+    __ pop(r0);  // Subexpression value.
+    __ ldr(r1, MemOperand(sp));  // Copy of array literal.
+    __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ str(r0, FieldMemOperand(r1, offset));
+
+    // Update the write barrier for the array store with r0 as the scratch
+    // register.
+    __ mov(r2, Operand(offset));
+    __ RecordWrite(r1, r2, r0);
+  }
+
+  Location destination = expr->location();
+  if (destination.is_nowhere() && result_saved) {
+    __ pop();
+  } else if (destination.is_temporary() && !result_saved) {
+    __ push(r0);
+  }
+}
+
+
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+  Comment cmnt(masm_, "[ Assignment");
+  ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
+  Expression* rhs = expr->value();
+  Visit(rhs);
+
+  // Left-hand side can only be a global or a (parameter or local) slot.
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL);
+  ASSERT(var->is_global() || var->slot() != NULL);
+
+  // Complete the assignment based on the location of the right-hand-side
+  // value and the desired location of the assignment value.
+  Location destination = expr->location();
+  Location source = rhs->location();
+  ASSERT(!destination.is_constant());
+  ASSERT(!source.is_nowhere());
+
+  if (var->is_global()) {
+    // Assignment to a global variable, use inline caching.  Right-hand-side
+    // value is passed in r0, variable name in r2, and the global object on
+    // the stack.
+    if (source.is_temporary()) {
+      __ pop(r0);
+    } else {
+      ASSERT(source.is_constant());
+      ASSERT(rhs->AsLiteral() != NULL);
+      __ mov(r0, Operand(rhs->AsLiteral()->handle()));
+    }
+    __ mov(r2, Operand(var->name()));
+    __ ldr(ip, CodeGenerator::GlobalObject());
+    __ push(ip);
+    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // Overwrite the global object on the stack with the result if needed.
+    if (destination.is_temporary()) {
+      __ str(r0, MemOperand(sp));
+    } else {
+      ASSERT(destination.is_nowhere());
+      __ pop();
+    }
+
+  } else {
+    if (source.is_temporary()) {
+      if (destination.is_temporary()) {
+        // Case 'temp1 <- (var = temp0)'.  Preserve right-hand-side
+        // temporary on the stack.
+        __ ldr(ip, MemOperand(sp));
+      } else {
+        ASSERT(destination.is_nowhere());
+        // Case 'var = temp'.  Discard right-hand-side temporary.
+        __ pop(ip);
+      }
+      __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+    } else {
+      ASSERT(source.is_constant());
+      ASSERT(rhs->AsLiteral() != NULL);
+      // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
+      // discarded result.  Always perform the assignment.
+      __ mov(ip, Operand(rhs->AsLiteral()->handle()));
+      __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+      if (destination.is_temporary()) {
+        // Case 'temp <- (var = constant)'.  Save result.
+        __ push(ip);
+      }
+    }
+  }
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+  Comment cmnt(masm_, "[ Call");
+  Expression* fun = expr->expression();
+  ZoneList<Expression*>* args = expr->arguments();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL && !var->is_this() && var->is_global());
+  ASSERT(!var->is_possibly_eval());
+
+  __ mov(r1, Operand(var->name()));
+  // Push global object as receiver.
+  __ ldr(r0, CodeGenerator::GlobalObject());
+  __ stm(db_w, sp, r1.bit() | r0.bit());
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(!args->at(i)->location().is_nowhere());
+    if (args->at(i)->location().is_constant()) {
+      ASSERT(args->at(i)->AsLiteral() != NULL);
+      __ mov(r0, Operand(args->at(i)->AsLiteral()->handle()));
+      __ push(r0);
+    }
+  }
+  // Record source position for debugger
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+                                                         NOT_IN_LOOP);
+  __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+  // Restore context register.
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  if (expr->location().is_temporary()) {
+    __ str(r0, MemOperand(sp));
+  } else {
+    ASSERT(expr->location().is_nowhere());
+    __ pop();
+  }
+}
+
+
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+  Runtime::Function* function = expr->function();
+
+  ASSERT(function != NULL);
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(!args->at(i)->location().is_nowhere());
+    if (args->at(i)->location().is_constant()) {
+      ASSERT(args->at(i)->AsLiteral() != NULL);
+      __ mov(r0, Operand(args->at(i)->AsLiteral()->handle()));
+      __ push(r0);
+    } else {
+      ASSERT(args->at(i)->location().is_temporary());
+      // If location is temporary, it is already on the stack,
+      // so nothing to do here.
+    }
+  }
+
+  __ CallRuntime(function, arg_count);
+  if (expr->location().is_temporary()) {
+    __ push(r0);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+  // Compile a short-circuited boolean or operation in a non-test
+  // context.
+  ASSERT(expr->op() == Token::OR);
+  // Compile (e0 || e1) as if it were
+  // (let (temp = e0) temp ? temp : e1).
+
+  Label done;
+  Location destination = expr->location();
+  ASSERT(!destination.is_constant());
+
+  Expression* left = expr->left();
+  Location left_source = left->location();
+  ASSERT(!left_source.is_nowhere());
+
+  Expression* right = expr->right();
+  Location right_source = right->location();
+  ASSERT(!right_source.is_nowhere());
+
+  Visit(left);
+  // Call the runtime to find the boolean value of the left-hand
+  // subexpression.  Duplicate the value if it may be needed as the final
+  // result.
+  if (left_source.is_temporary()) {
+    if (destination.is_temporary()) {
+      __ ldr(r0, MemOperand(sp));
+      __ push(r0);
+    }
+  } else {
+    ASSERT(left->AsLiteral() != NULL);
+    __ mov(r0, Operand(left->AsLiteral()->handle()));
+    __ push(r0);
+    if (destination.is_temporary()) __ push(r0);
+  }
+  // The left-hand value is in on top of the stack.  It is duplicated on the
+  // stack iff the destination location is temporary.
+  __ CallRuntime(Runtime::kToBool, 1);
+  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+  __ cmp(r0, ip);
+  __ b(eq, &done);
+
+  // Discard the left-hand value if present on the stack.
+  if (destination.is_temporary()) __ pop();
+  Visit(right);
+
+  // Save or discard the right-hand value as needed.
+  if (destination.is_temporary() && right_source.is_constant()) {
+    ASSERT(right->AsLiteral() != NULL);
+    __ mov(ip, Operand(right->AsLiteral()->handle()));
+    __ push(ip);
+  } else if (destination.is_nowhere() && right_source.is_temporary()) {
+    __ pop();
+  }
+
+  __ bind(&done);
+}
+
+} }  // namespace v8::internal
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index d230b45..ba83645 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -615,6 +615,13 @@
 }
 
 
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+                                        ExternalArrayType array_type) {
+  // TODO(476): port specialized code.
+  GenerateGeneric(masm);
+}
+
+
 void KeyedStoreIC::Generate(MacroAssembler* masm,
                             const ExternalReference& f) {
   // ---------- S t a t e --------------
@@ -748,6 +755,13 @@
 }
 
 
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+                                         ExternalArrayType array_type) {
+  // TODO(476): port specialized code.
+  GenerateGeneric(masm);
+}
+
+
 void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
   // ---------- S t a t e --------------
   //  -- r0     : value
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index cf46773..45c6540 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1051,7 +1051,6 @@
     int argc = Builtins::GetArgumentsCount(id);
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(false);
     Unresolved entry = { pc_offset() - kInstrSize, flags, name };
     unresolved_.Add(entry);
@@ -1069,7 +1068,6 @@
     int argc = Builtins::GetArgumentsCount(id);
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(true);
     Unresolved entry = { pc_offset() - kInstrSize, flags, name };
     unresolved_.Add(entry);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index ee9d70d..e37bb5e 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -246,7 +246,6 @@
 
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = al);
-  void CallJSExitStub(CodeStub* stub);
 
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
diff --git a/src/arm/virtual-frame-arm.cc b/src/arm/virtual-frame-arm.cc
index 2d5b140..97d164e 100644
--- a/src/arm/virtual-frame-arm.cc
+++ b/src/arm/virtual-frame-arm.cc
@@ -255,7 +255,7 @@
 
 
 void VirtualFrame::RawCallCodeObject(Handle<Code> code,
-                                       RelocInfo::Mode rmode) {
+                                     RelocInfo::Mode rmode) {
   ASSERT(cgen()->HasValidEntryRegisters());
   __ Call(code, rmode);
 }
diff --git a/src/array.js b/src/array.js
index f8e63d0..94d74a5 100644
--- a/src/array.js
+++ b/src/array.js
@@ -1058,6 +1058,10 @@
   return current;
 }
 
+// ES5, 15.4.3.2
+function ArrayIsArray(obj) {
+  return IS_ARRAY(obj);
+}
 
 // -------------------------------------------------------------------
 
@@ -1075,6 +1079,11 @@
   // object.
   %SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
 
+  // Setup non-enumerable functions on the Array object.
+  InstallFunctions($Array, DONT_ENUM, $Array(
+    "isArray", ArrayIsArray
+  ));
+
   // Setup non-enumerable functions of the Array.prototype object and
   // set their names.
   InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
@@ -1098,8 +1107,9 @@
     "indexOf", ArrayIndexOf,
     "lastIndexOf", ArrayLastIndexOf,
     "reduce", ArrayReduce,
-    "reduceRight", ArrayReduceRight));
-
+    "reduceRight", ArrayReduceRight
+  ));
+    
   // Manipulate the length of some of the functions to meet
   // expectations set by ECMA-262 or Mozilla.
   UpdateFunctionLengths({
diff --git a/src/assembler.cc b/src/assembler.cc
index d81b4b0..34595f8 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -343,9 +343,6 @@
       if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
     } else if (tag == kCodeTargetTag) {
       ReadTaggedPC();
-      if (*(reinterpret_cast<int*>(rinfo_.pc())) == 0x61) {
-        tag = 0;
-      }
       if (SetMode(RelocInfo::CODE_TARGET)) return;
     } else if (tag == kPositionTag) {
       ReadTaggedPC();
diff --git a/src/assembler.h b/src/assembler.h
index 827389a..21a66dd 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -191,6 +191,7 @@
   INLINE(Address target_address());
   INLINE(void set_target_address(Address target));
   INLINE(Object* target_object());
+  INLINE(Handle<Object> target_object_handle(Assembler* origin));
   INLINE(Object** target_object_address());
   INLINE(void set_target_object(Object* target));
 
@@ -216,10 +217,10 @@
 
   // Patch the code with a call.
   void PatchCodeWithCall(Address target, int guard_bytes);
-  // Check whether the current instruction is currently a call
-  // sequence (whether naturally or a return sequence overwritten
-  // to enter the debugger).
-  INLINE(bool IsCallInstruction());
+
+  // Check whether this return sequence has been patched
+  // with a call to the debugger.
+  INLINE(bool IsPatchedReturnSequence());
 
 #ifdef ENABLE_DISASSEMBLER
   // Printing
diff --git a/src/ast.cc b/src/ast.cc
index 692bec0..f6864b8 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -91,20 +91,6 @@
 }
 
 
-#ifdef DEBUG
-
-const char* LoopStatement::OperatorString() const {
-  switch (type()) {
-    case DO_LOOP: return "DO";
-    case FOR_LOOP: return "FOR";
-    case WHILE_LOOP: return "WHILE";
-  }
-  return NULL;
-}
-
-#endif  // DEBUG
-
-
 Token::Value Assignment::binary_op() const {
   switch (op_) {
     case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
@@ -187,6 +173,13 @@
 // Implementation of AstVisitor
 
 
+void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+  for (int i = 0; i < declarations->length(); i++) {
+    Visit(declarations->at(i));
+  }
+}
+
+
 void AstVisitor::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
     Visit(statements->at(i));
diff --git a/src/ast.h b/src/ast.h
index 6a1cdf5..42154f6 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -28,14 +28,14 @@
 #ifndef V8_AST_H_
 #define V8_AST_H_
 
+#include "location.h"
 #include "execution.h"
 #include "factory.h"
+#include "jsregexp.h"
+#include "jump-target.h"
 #include "runtime.h"
 #include "token.h"
 #include "variables.h"
-#include "macro-assembler.h"
-#include "jsregexp.h"
-#include "jump-target.h"
 
 namespace v8 {
 namespace internal {
@@ -64,10 +64,12 @@
   V(WithEnterStatement)                         \
   V(WithExitStatement)                          \
   V(SwitchStatement)                            \
-  V(LoopStatement)                              \
+  V(DoWhileStatement)                           \
+  V(WhileStatement)                             \
+  V(ForStatement)                               \
   V(ForInStatement)                             \
-  V(TryCatch)                                   \
-  V(TryFinally)                                 \
+  V(TryCatchStatement)                          \
+  V(TryFinallyStatement)                        \
   V(DebuggerStatement)
 
 #define EXPRESSION_NODE_LIST(V)                 \
@@ -160,6 +162,8 @@
 
 class Expression: public AstNode {
  public:
+  Expression() : location_(Location::Temporary()) {}
+
   virtual Expression* AsExpression()  { return this; }
 
   virtual bool IsValidJSON() { return false; }
@@ -173,8 +177,12 @@
   // Static type information for this expression.
   SmiAnalysis* type() { return &type_; }
 
+  Location location() { return location_; }
+  void set_location(Location loc) { location_ = loc; }
+
  private:
   SmiAnalysis type_;
+  Location location_;
 };
 
 
@@ -294,13 +302,59 @@
 };
 
 
-class LoopStatement: public IterationStatement {
+class DoWhileStatement: public IterationStatement {
  public:
-  enum Type { DO_LOOP, FOR_LOOP, WHILE_LOOP };
+  explicit DoWhileStatement(ZoneStringList* labels)
+      : IterationStatement(labels), cond_(NULL) {
+  }
 
-  LoopStatement(ZoneStringList* labels, Type type)
+  void Initialize(Expression* cond, Statement* body) {
+    IterationStatement::Initialize(body);
+    cond_ = cond;
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  Expression* cond() const { return cond_; }
+
+ private:
+  Expression* cond_;
+};
+
+
+class WhileStatement: public IterationStatement {
+ public:
+  explicit WhileStatement(ZoneStringList* labels)
       : IterationStatement(labels),
-        type_(type),
+        cond_(NULL),
+        may_have_function_literal_(true) {
+  }
+
+  void Initialize(Expression* cond, Statement* body) {
+    IterationStatement::Initialize(body);
+    cond_ = cond;
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  Expression* cond() const { return cond_; }
+  bool may_have_function_literal() const {
+    return may_have_function_literal_;
+  }
+
+ private:
+  Expression* cond_;
+  // True if there is a function literal subexpression in the condition.
+  bool may_have_function_literal_;
+
+  friend class AstOptimizer;
+};
+
+
+class ForStatement: public IterationStatement {
+ public:
+  explicit ForStatement(ZoneStringList* labels)
+      : IterationStatement(labels),
         init_(NULL),
         cond_(NULL),
         next_(NULL),
@@ -311,8 +365,6 @@
                   Expression* cond,
                   Statement* next,
                   Statement* body) {
-    ASSERT(init == NULL || type_ == FOR_LOOP);
-    ASSERT(next == NULL || type_ == FOR_LOOP);
     IterationStatement::Initialize(body);
     init_ = init;
     cond_ = cond;
@@ -321,7 +373,6 @@
 
   virtual void Accept(AstVisitor* v);
 
-  Type type() const  { return type_; }
   Statement* init() const  { return init_; }
   Expression* cond() const  { return cond_; }
   Statement* next() const  { return next_; }
@@ -329,12 +380,7 @@
     return may_have_function_literal_;
   }
 
-#ifdef DEBUG
-  const char* OperatorString() const;
-#endif
-
  private:
-  Type type_;
   Statement* init_;
   Expression* cond_;
   Statement* next_;
@@ -569,9 +615,11 @@
 };
 
 
-class TryCatch: public TryStatement {
+class TryCatchStatement: public TryStatement {
  public:
-  TryCatch(Block* try_block, Expression* catch_var, Block* catch_block)
+  TryCatchStatement(Block* try_block,
+                    Expression* catch_var,
+                    Block* catch_block)
       : TryStatement(try_block),
         catch_var_(catch_var),
         catch_block_(catch_block) {
@@ -589,9 +637,9 @@
 };
 
 
-class TryFinally: public TryStatement {
+class TryFinallyStatement: public TryStatement {
  public:
-  TryFinally(Block* try_block, Block* finally_block)
+  TryFinallyStatement(Block* try_block, Block* finally_block)
       : TryStatement(try_block),
         finally_block_(finally_block) { }
 
@@ -1212,7 +1260,6 @@
                   Scope* scope,
                   ZoneList<Statement*>* body,
                   int materialized_literal_count,
-                  bool contains_array_literal,
                   int expected_property_count,
                   bool has_only_this_property_assignments,
                   bool has_only_simple_this_property_assignments,
@@ -1225,7 +1272,6 @@
         scope_(scope),
         body_(body),
         materialized_literal_count_(materialized_literal_count),
-        contains_array_literal_(contains_array_literal),
         expected_property_count_(expected_property_count),
         has_only_this_property_assignments_(has_only_this_property_assignments),
         has_only_simple_this_property_assignments_(
@@ -1258,7 +1304,6 @@
   bool is_expression() const { return is_expression_; }
 
   int materialized_literal_count() { return materialized_literal_count_; }
-  bool contains_array_literal() { return contains_array_literal_; }
   int expected_property_count() { return expected_property_count_; }
   bool has_only_this_property_assignments() {
       return has_only_this_property_assignments_;
@@ -1293,7 +1338,6 @@
   Scope* scope_;
   ZoneList<Statement*>* body_;
   int materialized_literal_count_;
-  bool contains_array_literal_;
   int expected_property_count_;
   bool has_only_this_property_assignments_;
   bool has_only_simple_this_property_assignments_;
@@ -1690,6 +1734,7 @@
   void Visit(AstNode* node) { node->Accept(this); }
 
   // Iteration
+  virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
   virtual void VisitStatements(ZoneList<Statement*>* statements);
   virtual void VisitExpressions(ZoneList<Expression*>* expressions);
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index e2d23ef..43aa1a3 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -201,20 +201,13 @@
     }
     Code* code = Code::cast(code_[i]);
     Address pc = code->instruction_start() + pc_[i];
-    bool is_pc_relative = Bootstrapper::FixupFlagsIsPCRelative::decode(flags);
+    RelocInfo target(pc, RelocInfo::CODE_TARGET, 0);
     bool use_code_object = Bootstrapper::FixupFlagsUseCodeObject::decode(flags);
-
     if (use_code_object) {
-      if (is_pc_relative) {
-        Assembler::set_target_address_at(
-            pc, reinterpret_cast<Address>(f->code()));
-      } else {
-        *reinterpret_cast<Object**>(pc) = f->code();
-      }
+      target.set_target_object(f->code());
     } else {
-      Assembler::set_target_address_at(pc, f->code()->instruction_start());
+      target.set_target_address(f->code()->instruction_start());
     }
-
     LOG(StringEvent("resolved", name));
   }
   Clear();
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 809cd41..15fc88d 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -66,9 +66,8 @@
   static bool IsActive();
 
   // Encoding/decoding support for fixup flags.
-  class FixupFlagsIsPCRelative: public BitField<bool, 0, 1> {};
-  class FixupFlagsUseCodeObject: public BitField<bool, 1, 1> {};
-  class FixupFlagsArgumentsCount: public BitField<uint32_t, 2, 32-2> {};
+  class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {};
+  class FixupFlagsArgumentsCount: public BitField<uint32_t, 1, 32-1> {};
 
   // Support for thread preemption.
   static int ArchiveSpacePerThread();
diff --git a/src/builtins.cc b/src/builtins.cc
index afb5427..fa1b34e 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -538,6 +538,44 @@
 }
 
 
+static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalUnsignedByteArray(
+    MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalShortArray(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalShortArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalUnsignedShortArray(
+    MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalIntArray(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalIntArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalUnsignedIntArray(
+    MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
+}
+
+
+static void Generate_KeyedLoadIC_ExternalFloatArray(MacroAssembler* masm) {
+  KeyedLoadIC::GenerateExternalArray(masm, kExternalFloatArray);
+}
+
+
 static void Generate_KeyedLoadIC_PreMonomorphic(MacroAssembler* masm) {
   KeyedLoadIC::GeneratePreMonomorphic(masm);
 }
@@ -567,6 +605,44 @@
 }
 
 
+static void Generate_KeyedStoreIC_ExternalByteArray(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalByteArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalUnsignedByteArray(
+    MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedByteArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalShortArray(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalShortArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalUnsignedShortArray(
+    MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedShortArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalIntArray(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalIntArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalUnsignedIntArray(
+    MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalUnsignedIntArray);
+}
+
+
+static void Generate_KeyedStoreIC_ExternalFloatArray(MacroAssembler* masm) {
+  KeyedStoreIC::GenerateExternalArray(masm, kExternalFloatArray);
+}
+
+
 static void Generate_KeyedStoreIC_ExtendStorage(MacroAssembler* masm) {
   KeyedStoreIC::GenerateExtendStorage(masm);
 }
diff --git a/src/builtins.h b/src/builtins.h
index 141d5b7..bc32c49 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -48,44 +48,58 @@
 
 
 // Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V)                                      \
-  V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED)        \
-  V(JSConstructCall,            BUILTIN, UNINITIALIZED)        \
-  V(JSConstructStubGeneric,     BUILTIN, UNINITIALIZED)        \
-  V(JSEntryTrampoline,          BUILTIN, UNINITIALIZED)        \
-  V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED)        \
-                                                               \
-  V(LoadIC_Miss,                BUILTIN, UNINITIALIZED)        \
-  V(KeyedLoadIC_Miss,           BUILTIN, UNINITIALIZED)        \
-  V(StoreIC_Miss,               BUILTIN, UNINITIALIZED)        \
-  V(KeyedStoreIC_Miss,          BUILTIN, UNINITIALIZED)        \
-                                                               \
-  V(StoreIC_ExtendStorage,      BUILTIN, UNINITIALIZED)        \
-  V(KeyedStoreIC_ExtendStorage, BUILTIN, UNINITIALIZED)        \
-                                                               \
-  V(LoadIC_Initialize,          LOAD_IC, UNINITIALIZED)        \
-  V(LoadIC_PreMonomorphic,      LOAD_IC, PREMONOMORPHIC)       \
-  V(LoadIC_Normal,              LOAD_IC, MONOMORPHIC)          \
-  V(LoadIC_ArrayLength,         LOAD_IC, MONOMORPHIC)          \
-  V(LoadIC_StringLength,        LOAD_IC, MONOMORPHIC)          \
-  V(LoadIC_FunctionPrototype,   LOAD_IC, MONOMORPHIC)          \
-  V(LoadIC_Megamorphic,         LOAD_IC, MEGAMORPHIC)          \
-                                                               \
-  V(KeyedLoadIC_Initialize,     KEYED_LOAD_IC, UNINITIALIZED)  \
-  V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
-  V(KeyedLoadIC_Generic,        KEYED_LOAD_IC, MEGAMORPHIC)    \
-                                                               \
-  V(StoreIC_Initialize,         STORE_IC, UNINITIALIZED)       \
-  V(StoreIC_Megamorphic,        STORE_IC, MEGAMORPHIC)         \
-                                                               \
-  V(KeyedStoreIC_Initialize,    KEYED_STORE_IC, UNINITIALIZED) \
-  V(KeyedStoreIC_Generic,       KEYED_STORE_IC, MEGAMORPHIC)   \
-                                                               \
-  /* Uses KeyedLoadIC_Initialize; must be after in list. */    \
-  V(FunctionCall,               BUILTIN, UNINITIALIZED)        \
-  V(FunctionApply,              BUILTIN, UNINITIALIZED)        \
-                                                               \
-  V(ArrayCode,                  BUILTIN, UNINITIALIZED)        \
+#define BUILTIN_LIST_A(V)                                                 \
+  V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED)                   \
+  V(JSConstructCall,            BUILTIN, UNINITIALIZED)                   \
+  V(JSConstructStubGeneric,     BUILTIN, UNINITIALIZED)                   \
+  V(JSEntryTrampoline,          BUILTIN, UNINITIALIZED)                   \
+  V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED)                   \
+                                                                          \
+  V(LoadIC_Miss,                BUILTIN, UNINITIALIZED)                   \
+  V(KeyedLoadIC_Miss,           BUILTIN, UNINITIALIZED)                   \
+  V(StoreIC_Miss,               BUILTIN, UNINITIALIZED)                   \
+  V(KeyedStoreIC_Miss,          BUILTIN, UNINITIALIZED)                   \
+                                                                          \
+  V(StoreIC_ExtendStorage,      BUILTIN, UNINITIALIZED)                   \
+  V(KeyedStoreIC_ExtendStorage, BUILTIN, UNINITIALIZED)                   \
+                                                                          \
+  V(LoadIC_Initialize,          LOAD_IC, UNINITIALIZED)                   \
+  V(LoadIC_PreMonomorphic,      LOAD_IC, PREMONOMORPHIC)                  \
+  V(LoadIC_Normal,              LOAD_IC, MONOMORPHIC)                     \
+  V(LoadIC_ArrayLength,         LOAD_IC, MONOMORPHIC)                     \
+  V(LoadIC_StringLength,        LOAD_IC, MONOMORPHIC)                     \
+  V(LoadIC_FunctionPrototype,   LOAD_IC, MONOMORPHIC)                     \
+  V(LoadIC_Megamorphic,         LOAD_IC, MEGAMORPHIC)                     \
+                                                                          \
+  V(KeyedLoadIC_Initialize,     KEYED_LOAD_IC, UNINITIALIZED)             \
+  V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC)            \
+  V(KeyedLoadIC_Generic,        KEYED_LOAD_IC, MEGAMORPHIC)               \
+  V(KeyedLoadIC_ExternalByteArray,          KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalUnsignedByteArray,  KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalShortArray,         KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalUnsignedShortArray, KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalIntArray,           KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalUnsignedIntArray,   KEYED_LOAD_IC, MEGAMORPHIC)   \
+  V(KeyedLoadIC_ExternalFloatArray,         KEYED_LOAD_IC, MEGAMORPHIC)   \
+                                                                          \
+  V(StoreIC_Initialize,         STORE_IC, UNINITIALIZED)                  \
+  V(StoreIC_Megamorphic,        STORE_IC, MEGAMORPHIC)                    \
+                                                                          \
+  V(KeyedStoreIC_Initialize,    KEYED_STORE_IC, UNINITIALIZED)            \
+  V(KeyedStoreIC_Generic,       KEYED_STORE_IC, MEGAMORPHIC)              \
+  V(KeyedStoreIC_ExternalByteArray,          KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalUnsignedByteArray,  KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalShortArray,         KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalUnsignedShortArray, KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalIntArray,           KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalUnsignedIntArray,   KEYED_STORE_IC, MEGAMORPHIC) \
+  V(KeyedStoreIC_ExternalFloatArray,         KEYED_STORE_IC, MEGAMORPHIC) \
+                                                                          \
+  /* Uses KeyedLoadIC_Initialize; must be after in list. */               \
+  V(FunctionCall,               BUILTIN, UNINITIALIZED)                   \
+  V(FunctionApply,              BUILTIN, UNINITIALIZED)                   \
+                                                                          \
+  V(ArrayCode,                  BUILTIN, UNINITIALIZED)                   \
   V(ArrayConstructCode,         BUILTIN, UNINITIALIZED)
 
 #ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/checks.h b/src/checks.h
index b302e5b..3b0c851 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -80,6 +80,27 @@
   }
 }
 
+// Helper function used by the CHECK_EQ function when given int64_t
+// arguments.  Should not be called directly.
+static inline void CheckEqualsHelper(const char* file, int line,
+                                     const char* expected_source,
+                                     int64_t expected,
+                                     const char* value_source,
+                                     int64_t value) {
+  if (expected != value) {
+    // Print int64_t values in hex, as two int32s,
+    // to avoid platform-dependencies.
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#"
+             "   Expected: 0x%08x%08x\n#   Found: 0x%08x%08x",
+             expected_source, value_source,
+             static_cast<uint32_t>(expected >> 32),
+             static_cast<uint32_t>(expected),
+             static_cast<uint32_t>(value >> 32),
+             static_cast<uint32_t>(value));
+  }
+}
+
 
 // Helper function used by the CHECK_NE function when given int
 // arguments.  Should not be called directly.
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 9c24c60..586c948 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -132,8 +132,6 @@
       return "SetProperty";
     case InvokeBuiltin:
       return "InvokeBuiltin";
-    case JSExit:
-      return "JSExit";
     case ConvertToDouble:
       return "ConvertToDouble";
     case WriteInt32ToHeapNumber:
diff --git a/src/code-stubs.h b/src/code-stubs.h
index ae86c20..91d951f 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -56,7 +56,6 @@
     GetProperty,   // ARM only
     SetProperty,   // ARM only
     InvokeBuiltin,  // ARM only
-    JSExit,        // ARM only
     RegExpCEntry,  // ARM only
     NUMBER_OF_IDS
   };
diff --git a/src/codegen.cc b/src/codegen.cc
index a18fa0f..28c0ba5 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -125,102 +125,114 @@
 }
 
 
-// Generate the code. Takes a function literal, generates code for it, assemble
-// all the pieces into a Code object. This function is only to be called by
-// the compiler.cc code.
-Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
-                                     Handle<Script> script,
-                                     bool is_eval) {
-#ifdef ENABLE_DISASSEMBLER
-  bool print_code = Bootstrapper::IsActive()
-      ? FLAG_print_builtin_code
-      : FLAG_print_code;
-#endif
-
+void CodeGenerator::MakeCodePrologue(FunctionLiteral* fun) {
 #ifdef DEBUG
   bool print_source = false;
   bool print_ast = false;
+  bool print_json_ast = false;
   const char* ftype;
 
   if (Bootstrapper::IsActive()) {
     print_source = FLAG_print_builtin_source;
     print_ast = FLAG_print_builtin_ast;
+    print_json_ast = FLAG_print_builtin_json_ast;
     ftype = "builtin";
   } else {
     print_source = FLAG_print_source;
     print_ast = FLAG_print_ast;
+    print_json_ast = FLAG_print_json_ast;
     ftype = "user-defined";
   }
 
   if (FLAG_trace_codegen || print_source || print_ast) {
     PrintF("*** Generate code for %s function: ", ftype);
-    flit->name()->ShortPrint();
+    fun->name()->ShortPrint();
     PrintF(" ***\n");
   }
 
   if (print_source) {
-    PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(flit));
+    PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(fun));
   }
 
   if (print_ast) {
-    PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(flit));
+    PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(fun));
+  }
+
+  if (print_json_ast) {
+    JsonAstBuilder builder;
+    PrintF("%s", builder.BuildProgram(fun));
   }
 #endif  // DEBUG
+}
 
-  // Generate code.
-  const int initial_buffer_size = 4 * KB;
-  CodeGenerator cgen(initial_buffer_size, script, is_eval);
-  CodeGeneratorScope scope(&cgen);
-  cgen.GenCode(flit);
-  if (cgen.HasStackOverflow()) {
-    ASSERT(!Top::has_pending_exception());
-    return Handle<Code>::null();
-  }
 
-  // Allocate and install the code.  Time the rest of this function as
-  // code creation.
-  HistogramTimerScope timer(&Counters::code_creation);
+Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun,
+                                             MacroAssembler* masm,
+                                             Code::Flags flags,
+                                             Handle<Script> script) {
+  // Allocate and install the code.
   CodeDesc desc;
-  cgen.masm()->GetCode(&desc);
-  ZoneScopeInfo sinfo(flit->scope());
-  InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
-  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
-  Handle<Code> code = Factory::NewCode(desc,
-                                       &sinfo,
-                                       flags,
-                                       cgen.masm()->CodeObject());
+  masm->GetCode(&desc);
+  ZoneScopeInfo sinfo(fun->scope());
+  Handle<Code> code =
+      Factory::NewCode(desc, &sinfo, flags, masm->CodeObject());
 
   // Add unresolved entries in the code to the fixup list.
-  Bootstrapper::AddFixup(*code, cgen.masm());
+  Bootstrapper::AddFixup(*code, masm);
 
 #ifdef ENABLE_DISASSEMBLER
+  bool print_code = Bootstrapper::IsActive()
+      ? FLAG_print_builtin_code
+      : FLAG_print_code;
   if (print_code) {
     // Print the source code if available.
     if (!script->IsUndefined() && !script->source()->IsUndefined()) {
       PrintF("--- Raw source ---\n");
       StringInputBuffer stream(String::cast(script->source()));
-      stream.Seek(flit->start_position());
-      // flit->end_position() points to the last character in the stream. We
+      stream.Seek(fun->start_position());
+      // fun->end_position() points to the last character in the stream. We
       // need to compensate by adding one to calculate the length.
-      int source_len = flit->end_position() - flit->start_position() + 1;
+      int source_len = fun->end_position() - fun->start_position() + 1;
       for (int i = 0; i < source_len; i++) {
         if (stream.has_more()) PrintF("%c", stream.GetNext());
       }
       PrintF("\n\n");
     }
     PrintF("--- Code ---\n");
-    code->Disassemble(*flit->name()->ToCString());
+    code->Disassemble(*fun->name()->ToCString());
   }
 #endif  // ENABLE_DISASSEMBLER
 
   if (!code.is_null()) {
     Counters::total_compiled_code_size.Increment(code->instruction_size());
   }
-
   return code;
 }
 
 
+// Generate the code. Takes a function literal, generates code for it, assemble
+// all the pieces into a Code object. This function is only to be called by
+// the compiler.cc code.
+Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
+                                     Handle<Script> script,
+                                     bool is_eval) {
+  MakeCodePrologue(fun);
+  // Generate code.
+  const int kInitialBufferSize = 4 * KB;
+  CodeGenerator cgen(kInitialBufferSize, script, is_eval);
+  CodeGeneratorScope scope(&cgen);
+  cgen.GenCode(fun);
+  if (cgen.HasStackOverflow()) {
+    ASSERT(!Top::has_pending_exception());
+    return Handle<Code>::null();
+  }
+
+  InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
+  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
+  return MakeCodeEpilogue(fun, cgen.masm(), flags, script);
+}
+
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 bool CodeGenerator::ShouldGenerateLog(Expression* type) {
@@ -262,7 +274,7 @@
 }
 
 
-static Handle<Code> ComputeLazyCompile(int argc) {
+Handle<Code> CodeGenerator::ComputeLazyCompile(int argc) {
   CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code);
 }
 
@@ -314,7 +326,6 @@
   Handle<JSFunction> function =
       Factory::NewFunctionBoilerplate(node->name(),
                                       node->materialized_literal_count(),
-                                      node->contains_array_literal(),
                                       code);
   CodeGenerator::SetFunctionInfo(function, node, false, script_);
 
@@ -469,26 +480,45 @@
 }
 
 
-static inline void RecordPositions(CodeGenerator* cgen, int pos) {
+// Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
+// known result for the test expression, with no side effects.
+CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
+    Expression* cond) {
+  if (cond == NULL) return ALWAYS_TRUE;
+
+  Literal* lit = cond->AsLiteral();
+  if (lit == NULL) return DONT_KNOW;
+
+  if (lit->IsTrue()) {
+    return ALWAYS_TRUE;
+  } else if (lit->IsFalse()) {
+    return ALWAYS_FALSE;
+  }
+
+  return DONT_KNOW;
+}
+
+
+void CodeGenerator::RecordPositions(MacroAssembler* masm, int pos) {
   if (pos != RelocInfo::kNoPosition) {
-    cgen->masm()->RecordStatementPosition(pos);
-    cgen->masm()->RecordPosition(pos);
+    masm->RecordStatementPosition(pos);
+    masm->RecordPosition(pos);
   }
 }
 
 
 void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) RecordPositions(this, fun->start_position());
+  if (FLAG_debug_info) RecordPositions(masm(), fun->start_position());
 }
 
 
 void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) RecordPositions(this, fun->end_position());
+  if (FLAG_debug_info) RecordPositions(masm(), fun->end_position());
 }
 
 
 void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
-  if (FLAG_debug_info) RecordPositions(this, stmt->statement_pos());
+  if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos());
 }
 
 
diff --git a/src/codegen.h b/src/codegen.h
index d03f4b6..8c1b733 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -36,6 +36,8 @@
 // The contract  to the shared code is that the the CodeGenerator is a subclass
 // of Visitor and that the following methods are available publicly:
 //   MakeCode
+//   MakeCodePrologue
+//   MakeCodeEpilogue
 //   SetFunctionInfo
 //   masm
 //   frame
@@ -46,6 +48,7 @@
 //   AddDeferred
 //   in_spilled_code
 //   set_in_spilled_code
+//   RecordPositions
 //
 // These methods are either used privately by the shared code or implemented as
 // shared code:
@@ -53,6 +56,7 @@
 //   ~CodeGenerator
 //   ProcessDeferred
 //   GenCode
+//   ComputeLazyCompile
 //   BuildBoilerplate
 //   ComputeCallInitialize
 //   ComputeCallInitializeInLoop
@@ -61,6 +65,7 @@
 //   FindInlineRuntimeLUT
 //   CheckForInlineRuntimeCall
 //   PatchInlineRuntimeEntry
+//   AnalyzeCondition
 //   CodeForFunctionPosition
 //   CodeForReturnPosition
 //   CodeForStatementPosition
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 8dd9ec1..5427367 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -43,20 +43,22 @@
 static const int kEvalContextualGenerations = 1;
 static const int kRegExpGenerations = 1;
 #else
+// The number of ScriptGenerations is carefully chosen based on histograms.
+// See issue 458: http://code.google.com/p/v8/issues/detail?id=458
 static const int kScriptGenerations = 5;
 static const int kEvalGlobalGenerations = 2;
 static const int kEvalContextualGenerations = 2;
 static const int kRegExpGenerations = 2;
 #endif
 
-// Initial of each compilation cache table allocated.
+// Initial size of each compilation cache table allocated.
 static const int kInitialCacheSize = 64;
 
 // The compilation cache consists of several generational sub-caches which uses
 // this class as a base class. A sub-cache contains a compilation cache tables
-// for each generation of the sub-cache. As the same source code string has
-// different compiled code for scripts and evals. Internally, we use separate
-// sub-caches to avoid getting the wrong kind of result when looking up.
+// for each generation of the sub-cache. Since the same source code string has
+// different compiled code for scripts and evals, we use separate sub-caches
+// for different compilation modes, to avoid retrieving the wrong result.
 class CompilationSubCache {
  public:
   explicit CompilationSubCache(int generations): generations_(generations) {
diff --git a/src/compiler.cc b/src/compiler.cc
index 6ba7a9a..e422bf7 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -32,6 +32,7 @@
 #include "compilation-cache.h"
 #include "compiler.h"
 #include "debug.h"
+#include "fast-codegen.h"
 #include "oprofile-agent.h"
 #include "rewriter.h"
 #include "scopes.h"
@@ -40,6 +41,30 @@
 namespace v8 {
 namespace internal {
 
+
+class CodeGenSelector: public AstVisitor {
+ public:
+  enum CodeGenTag { NORMAL, FAST };
+
+  CodeGenSelector() : has_supported_syntax_(true) {}
+
+  CodeGenTag Select(FunctionLiteral* fun);
+
+ private:
+  void VisitDeclarations(ZoneList<Declaration*>* decls);
+  void VisitStatements(ZoneList<Statement*>* stmts);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  bool has_supported_syntax_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
+};
+
+
 static Handle<Code> MakeCode(FunctionLiteral* literal,
                              Handle<Script> script,
                              Handle<Context> context,
@@ -79,8 +104,15 @@
   }
 
   // Generate code and return it.
-  Handle<Code> result = CodeGenerator::MakeCode(literal, script, is_eval);
-  return result;
+  if (FLAG_fast_compiler) {
+    CodeGenSelector selector;
+    CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
+    if (code_gen == CodeGenSelector::FAST) {
+      return FastCodeGenerator::MakeCode(literal, script, is_eval);
+    }
+    ASSERT(code_gen == CodeGenSelector::NORMAL);
+  }
+  return CodeGenerator::MakeCode(literal, script, is_eval);
 }
 
 
@@ -197,7 +229,6 @@
   Handle<JSFunction> fun =
       Factory::NewFunctionBoilerplate(lit->name(),
                                       lit->materialized_literal_count(),
-                                      lit->contains_array_literal(),
                                       code);
 
   ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
@@ -417,4 +448,332 @@
 }
 
 
+CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
+  Scope* scope = fun->scope();
+
+  if (!scope->is_global_scope()) {
+    if (FLAG_trace_bailout) PrintF("Non-global scope\n");
+    return NORMAL;
+  }
+  ASSERT(scope->num_heap_slots() == 0);
+  ASSERT(scope->arguments() == NULL);
+
+  has_supported_syntax_ = true;
+  VisitDeclarations(fun->scope()->declarations());
+  if (!has_supported_syntax_) return NORMAL;
+
+  VisitStatements(fun->body());
+  return has_supported_syntax_ ? FAST : NORMAL;
+}
+
+
+#define BAILOUT(reason)                         \
+  do {                                          \
+    if (FLAG_trace_bailout) {                   \
+      PrintF("%s\n", reason);                   \
+    }                                           \
+    has_supported_syntax_ = false;              \
+    return;                                     \
+  } while (false)
+
+
+#define CHECK_BAILOUT                           \
+  do {                                          \
+    if (!has_supported_syntax_) return;         \
+  } while (false)
+
+
+void CodeGenSelector::VisitDeclarations(ZoneList<Declaration*>* decls) {
+  for (int i = 0; i < decls->length(); i++) {
+    Visit(decls->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
+void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
+  for (int i = 0, len = stmts->length(); i < len; i++) {
+    Visit(stmts->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
+void CodeGenSelector::VisitDeclaration(Declaration* decl) {
+  Variable* var = decl->proxy()->var();
+  if (!var->is_global() || var->mode() == Variable::CONST) {
+    BAILOUT("Non-global declaration");
+  }
+}
+
+
+void CodeGenSelector::VisitBlock(Block* stmt) {
+  VisitStatements(stmt->statements());
+}
+
+
+void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Expression* expr = stmt->expression();
+  Visit(expr);
+  CHECK_BAILOUT;
+  expr->set_location(Location::Nowhere());
+}
+
+
+void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {
+  // EmptyStatement is supported.
+}
+
+
+void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
+  BAILOUT("IfStatement");
+}
+
+
+void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {
+  BAILOUT("ContinueStatement");
+}
+
+
+void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
+  BAILOUT("BreakStatement");
+}
+
+
+void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  BAILOUT("WithEnterStatement");
+}
+
+
+void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {
+  BAILOUT("WithExitStatement");
+}
+
+
+void CodeGenSelector::VisitSwitchStatement(SwitchStatement* stmt) {
+  BAILOUT("SwitchStatement");
+}
+
+
+void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  BAILOUT("DoWhileStatement");
+}
+
+
+void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
+  BAILOUT("WhileStatement");
+}
+
+
+void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
+  BAILOUT("ForStatement");
+}
+
+
+void CodeGenSelector::VisitForInStatement(ForInStatement* stmt) {
+  BAILOUT("ForInStatement");
+}
+
+
+void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  BAILOUT("TryCatchStatement");
+}
+
+
+void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  BAILOUT("TryFinallyStatement");
+}
+
+
+void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  BAILOUT("DebuggerStatement");
+}
+
+
+void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {
+  if (!expr->AllowsLazyCompilation()) {
+    BAILOUT("FunctionLiteral does not allow lazy compilation");
+  }
+}
+
+
+void CodeGenSelector::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* expr) {
+  BAILOUT("FunctionBoilerplateLiteral");
+}
+
+
+void CodeGenSelector::VisitConditional(Conditional* expr) {
+  BAILOUT("Conditional");
+}
+
+
+void CodeGenSelector::VisitSlot(Slot* expr) {
+  Slot::Type type = expr->type();
+  if (type != Slot::PARAMETER && type != Slot::LOCAL) {
+    BAILOUT("non-parameter/non-local slot reference");
+  }
+}
+
+
+void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
+  Expression* rewrite = expr->var()->rewrite();
+  if (rewrite != NULL) Visit(rewrite);
+}
+
+
+void CodeGenSelector::VisitLiteral(Literal* expr) {
+  // All literals are supported.
+  expr->set_location(Location::Constant());
+}
+
+
+void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {
+  // RegexpLiterals are supported.
+}
+
+
+void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
+  BAILOUT("ObjectLiteral");
+}
+
+
+void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
+  ZoneList<Expression*>* subexprs = expr->values();
+  for (int i = 0, len = subexprs->length(); i < len; i++) {
+    Expression* subexpr = subexprs->at(i);
+    if (subexpr->AsLiteral() != NULL) continue;
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+    Visit(subexpr);
+    CHECK_BAILOUT;
+  }
+}
+
+
+void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+  BAILOUT("CatchExtensionObject");
+}
+
+
+void CodeGenSelector::VisitAssignment(Assignment* expr) {
+  // We support plain non-compound assignments to parameters and
+  // non-context (stack-allocated) locals.
+  if (expr->starts_initialization_block()) BAILOUT("initialization block");
+
+  Token::Value op = expr->op();
+  if (op == Token::INIT_CONST) BAILOUT("initialize constant");
+  if (op != Token::ASSIGN && op != Token::INIT_VAR) {
+    BAILOUT("compound assignment");
+  }
+
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  if (var == NULL) BAILOUT("non-variable assignment");
+
+  if (!var->is_global()) {
+    ASSERT(var->slot() != NULL);
+    Slot::Type type = var->slot()->type();
+    if (type != Slot::PARAMETER && type != Slot::LOCAL) {
+      BAILOUT("non-parameter/non-local slot assignment");
+    }
+  }
+
+  Visit(expr->value());
+}
+
+
+void CodeGenSelector::VisitThrow(Throw* expr) {
+  BAILOUT("Throw");
+}
+
+
+void CodeGenSelector::VisitProperty(Property* expr) {
+  BAILOUT("Property");
+}
+
+
+void CodeGenSelector::VisitCall(Call* expr) {
+  Expression* fun = expr->expression();
+  ZoneList<Expression*>* args = expr->arguments();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+
+  // Check for supported calls
+  if (var != NULL && var->is_possibly_eval()) {
+    BAILOUT("Call to a function named 'eval'");
+  } else if (var != NULL && !var->is_this() && var->is_global()) {
+    // ----------------------------------
+    // JavaScript example: 'foo(1, 2, 3)'  // foo is global
+    // ----------------------------------
+  } else {
+    BAILOUT("Call to a non-global function");
+  }
+  // Check all arguments to the call
+  for (int i = 0; i < args->length(); i++) {
+    Visit(args->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
+void CodeGenSelector::VisitCallNew(CallNew* expr) {
+  BAILOUT("CallNew");
+}
+
+
+void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
+  // In case of JS runtime function bail out.
+  if (expr->function() == NULL) BAILOUT("CallRuntime");
+  // Check for inline runtime call
+  if (expr->name()->Get(0) == '_' &&
+      CodeGenerator::FindInlineRuntimeLUT(expr->name()) != NULL) {
+    BAILOUT("InlineRuntimeCall");
+  }
+  for (int i = 0; i < expr->arguments()->length(); i++) {
+    Visit(expr->arguments()->at(i));
+    CHECK_BAILOUT;
+  }
+}
+
+
+void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
+  BAILOUT("UnaryOperation");
+}
+
+
+void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
+  BAILOUT("CountOperation");
+}
+
+
+void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::OR:
+      Visit(expr->left());
+      CHECK_BAILOUT;
+      Visit(expr->right());
+      break;
+
+    default:
+      BAILOUT("Unsupported binary operation");
+  }
+}
+
+
+void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
+  BAILOUT("CompareOperation");
+}
+
+
+void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {
+  BAILOUT("ThisFunction");
+}
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
+
+
 } }  // namespace v8::internal
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 8c875d7..ba7220a 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -84,7 +84,7 @@
   static const double two32 = 4294967296.0;
   static const double two31 = 2147483648.0;
   if (!isfinite(x) || x == 0) return 0;
-  if (x < 0 || x >= two32) x = fmod(x, two32);
+  if (x < 0 || x >= two32) x = modulo(x, two32);
   x = (x >= 0) ? floor(x) : ceil(x) + two32;
   return (int32_t) ((x >= two31) ? x - two32 : x);
 }
diff --git a/src/conversions.cc b/src/conversions.cc
index 2a3db7b..3e66d28 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -664,7 +664,7 @@
   int integer_pos = kBufferSize - 2;
   do {
     integer_buffer[integer_pos--] =
-        chars[static_cast<int>(fmod(integer_part, radix))];
+        chars[static_cast<int>(modulo(integer_part, radix))];
     integer_part /= radix;
   } while (integer_part >= 1.0);
   // Sanity check.
diff --git a/src/conversions.h b/src/conversions.h
index b6589cb..67f7d53 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -31,6 +31,7 @@
 namespace v8 {
 namespace internal {
 
+
 // The fast double-to-int conversion routine does not guarantee
 // rounding towards zero.
 // The result is unspecified if x is infinite or NaN, or if the rounded
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index fe130ce..2535ce0 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -311,7 +311,7 @@
                                int read_timeout,
                                int total_timeout) {
   Handle<String> accumulator = String::Empty();
-  const char* source = "function(a, b) { return a + b; }";
+  const char* source = "(function(a, b) { return a + b; })";
   Handle<Value> cons_as_obj(Script::Compile(String::New(source))->Run());
   Handle<Function> cons_function(Function::Cast(*cons_as_obj));
   Handle<Value> cons_args[2];
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index 3d4161d..d5921d5 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -28,6 +28,8 @@
 #ifndef V8_DATEPARSER_INL_H_
 #define V8_DATEPARSER_INL_H_
 
+#include "dateparser.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/debug-delay.js b/src/debug-delay.js
index cb789be..35f7fcd 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -795,8 +795,8 @@
   return this.selected_frame;
 };
 
-ExecutionState.prototype.debugCommandProcessor = function(protocol) {
-  return new DebugCommandProcessor(this, protocol);
+ExecutionState.prototype.debugCommandProcessor = function(opt_is_running) {
+  return new DebugCommandProcessor(this, opt_is_running);
 };
 
 
@@ -1081,9 +1081,9 @@
 };
 
 
-function DebugCommandProcessor(exec_state) {
+function DebugCommandProcessor(exec_state, opt_is_running) {
   this.exec_state_ = exec_state;
-  this.running_ = false;
+  this.running_ = opt_is_running || false;
 };
 
 
@@ -1107,7 +1107,8 @@
     this.type = 'event';
   }
   this.success = true;
-  this.running = false;
+  // Handler may set this field to control debugger state.
+  this.running = undefined;
 }
 
 
@@ -1168,11 +1169,7 @@
   if (this.message) {
     json.message = this.message;
   }
-  if (this.running) {
-    json.running = true;
-  } else {
-    json.running = false;
-  }
+  json.running = this.running;
   return JSON.stringify(json);
 }
 
@@ -1244,6 +1241,10 @@
         this.scriptsRequest_(request, response);
       } else if (request.command == 'threads') {
         this.threadsRequest_(request, response);
+      } else if (request.command == 'suspend') {
+        this.suspendRequest_(request, response);
+      } else if (request.command == 'version') {
+        this.versionRequest_(request, response);
       } else {
         throw new Error('Unknown command "' + request.command + '" in request');
       }
@@ -1258,7 +1259,11 @@
 
     // Return the response as a JSON encoded string.
     try {
-      this.running_ = response.running;  // Store the running state.
+      if (!IS_UNDEFINED(response.running)) {
+        // Response controls running state.
+        this.running_ = response.running;
+      }
+      response.running = this.running_; 
       return response.toJSONProtocol();
     } catch (e) {
       // Failed to generate response - return generic error.
@@ -1907,6 +1912,18 @@
 };
 
 
+DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
+  response.running = false;
+};
+
+
+DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
+  response.body = {
+    V8Version: %GetV8Version()
+  }
+};
+
+
 // Check whether the previously processed command caused the VM to become
 // running.
 DebugCommandProcessor.prototype.isRunning = function() {
diff --git a/src/debug.cc b/src/debug.cc
index ec658d6..d3a6b5b 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1614,7 +1614,7 @@
     if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
       at_js_return = (it.rinfo()->pc() ==
           addr - Assembler::kPatchReturnSequenceAddressOffset);
-      break_at_js_return_active = it.rinfo()->IsCallInstruction();
+      break_at_js_return_active = it.rinfo()->IsPatchedReturnSequence();
     }
     it.next();
   }
@@ -1677,22 +1677,6 @@
 }
 
 
-// If an object given is an external string, check that the underlying
-// resource is accessible. For other kinds of objects, always return true.
-static bool IsExternalStringValid(Object* str) {
-  if (!str->IsString() || !StringShape(String::cast(str)).IsExternal()) {
-    return true;
-  }
-  if (String::cast(str)->IsAsciiRepresentation()) {
-    return ExternalAsciiString::cast(str)->resource() != NULL;
-  } else if (String::cast(str)->IsTwoByteRepresentation()) {
-    return ExternalTwoByteString::cast(str)->resource() != NULL;
-  } else {
-    return true;
-  }
-}
-
-
 void Debug::CreateScriptCache() {
   HandleScope scope;
 
@@ -1711,7 +1695,7 @@
   while (iterator.has_next()) {
     HeapObject* obj = iterator.next();
     ASSERT(obj != NULL);
-    if (obj->IsScript() && IsExternalStringValid(Script::cast(obj)->source())) {
+    if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
       script_cache_->Add(Handle<Script>(Script::cast(obj)));
       count++;
     }
@@ -2228,21 +2212,31 @@
     return;
   }
 
-  // Get the DebugCommandProcessor.
-  v8::Local<v8::Object> api_exec_state =
-      v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
-  v8::Local<v8::String> fun_name =
-      v8::String::New("debugCommandProcessor");
-  v8::Local<v8::Function> fun =
-      v8::Function::Cast(*api_exec_state->Get(fun_name));
   v8::TryCatch try_catch;
-  v8::Local<v8::Object> cmd_processor =
-      v8::Object::Cast(*fun->Call(api_exec_state, 0, NULL));
-  if (try_catch.HasCaught()) {
-    PrintLn(try_catch.Exception());
-    return;
+
+  // DebugCommandProcessor goes here.
+  v8::Local<v8::Object> cmd_processor;
+  {
+    v8::Local<v8::Object> api_exec_state =
+        v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
+    v8::Local<v8::String> fun_name =
+        v8::String::New("debugCommandProcessor");
+    v8::Local<v8::Function> fun =
+        v8::Function::Cast(*api_exec_state->Get(fun_name));
+
+    v8::Handle<v8::Boolean> running =
+        auto_continue ? v8::True() : v8::False();
+    static const int kArgc = 1;
+    v8::Handle<Value> argv[kArgc] = { running };
+    cmd_processor = v8::Object::Cast(*fun->Call(api_exec_state, kArgc, argv));
+    if (try_catch.HasCaught()) {
+      PrintLn(try_catch.Exception());
+      return;
+    }
   }
 
+  bool running = auto_continue;
+
   // Process requests from the debugger.
   while (true) {
     // Wait for new command in the queue.
@@ -2283,7 +2277,6 @@
 
     // Get the response.
     v8::Local<v8::String> response;
-    bool running = false;
     if (!try_catch.HasCaught()) {
       // Get response string.
       if (!response_val->IsUndefined()) {
@@ -2326,7 +2319,7 @@
     // Return from debug event processing if either the VM is put into the
     // runnning state (through a continue command) or auto continue is active
     // and there are no more commands queued.
-    if (running || (auto_continue && !HasCommands())) {
+    if (running && !HasCommands()) {
       return;
     }
   }
diff --git a/src/execution.cc b/src/execution.cc
index 8bc6b74..229b8df 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -96,8 +96,11 @@
     JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
 
     // Call the function through the right JS entry stub.
-    value = CALL_GENERATED_CODE(entry, func->code()->entry(), *func,
-                                *receiver, argc, args);
+    byte* entry_address= func->code()->entry();
+    JSFunction* function = *func;
+    Object* receiver_pointer = *receiver;
+    value = CALL_GENERATED_CODE(entry, entry_address, function,
+                                receiver_pointer, argc, args);
   }
 
 #ifdef DEBUG
@@ -383,7 +386,8 @@
   if (initial_climit_ == kIllegalLimit) {
     // Takes the address of the limit variable in order to find out where
     // the top of stack is right now.
-    intptr_t limit = reinterpret_cast<intptr_t>(&limit) - kLimitSize;
+    uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
+    ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
     initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
     jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
     initial_climit_ = limit;
diff --git a/src/execution.h b/src/execution.h
index 55307f7..ac00aa4 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -216,6 +216,7 @@
   static void DisableInterrupts();
 
   static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
+
 #ifdef V8_TARGET_ARCH_X64
   static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
   static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
diff --git a/src/factory.cc b/src/factory.cc
index 622055c..32b69db 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -222,6 +222,18 @@
 }
 
 
+Handle<ExternalArray> Factory::NewExternalArray(int length,
+                                                ExternalArrayType array_type,
+                                                void* external_pointer,
+                                                PretenureFlag pretenure) {
+  ASSERT(0 <= length);
+  CALL_HEAP_FUNCTION(Heap::AllocateExternalArray(length,
+                                                 array_type,
+                                                 external_pointer,
+                                                 pretenure), ExternalArray);
+}
+
+
 Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
   CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
 }
@@ -477,7 +489,6 @@
 
 Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name,
                                                    int number_of_literals,
-                                                   bool contains_array_literal,
                                                    Handle<Code> code) {
   Handle<JSFunction> function = NewFunctionBoilerplate(name);
   function->set_code(*code);
@@ -485,7 +496,7 @@
   // If the function contains object, regexp or array literals,
   // allocate extra space for a literals array prefix containing the
   // object, regexp and array constructor functions.
-  if (number_of_literals > 0 || contains_array_literal) {
+  if (number_of_literals > 0) {
     literals_array_size += JSFunction::kLiteralsPrefixSize;
   }
   Handle<FixedArray> literals =
diff --git a/src/factory.h b/src/factory.h
index 0596fbf..cb438e9 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -155,10 +155,17 @@
   static Handle<ByteArray> NewByteArray(int length,
                                         PretenureFlag pretenure = NOT_TENURED);
 
-  static Handle<PixelArray> NewPixelArray(int length,
+  static Handle<PixelArray> NewPixelArray(
+      int length,
       uint8_t* external_pointer,
       PretenureFlag pretenure = NOT_TENURED);
 
+  static Handle<ExternalArray> NewExternalArray(
+      int length,
+      ExternalArrayType array_type,
+      void* external_pointer,
+      PretenureFlag pretenure = NOT_TENURED);
+
   static Handle<Map> NewMap(InstanceType type, int instance_size);
 
   static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
@@ -264,7 +271,6 @@
 
   static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name,
                                                    int number_of_literals,
-                                                   bool contains_array_literal,
                                                    Handle<Code> code);
 
   static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name);
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
new file mode 100644
index 0000000..d0c264a
--- /dev/null
+++ b/src/fast-codegen.cc
@@ -0,0 +1,332 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+#include "stub-cache.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
+                                         Handle<Script> script,
+                                         bool is_eval) {
+  CodeGenerator::MakeCodePrologue(fun);
+  const int kInitialBufferSize = 4 * KB;
+  MacroAssembler masm(NULL, kInitialBufferSize);
+  FastCodeGenerator cgen(&masm, script, is_eval);
+  cgen.Generate(fun);
+  if (cgen.HasStackOverflow()) {
+    ASSERT(!Top::has_pending_exception());
+    return Handle<Code>::null();
+  }
+  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
+  return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+}
+
+
+int FastCodeGenerator::SlotOffset(Slot* slot) {
+  // Offset is negative because higher indexes are at lower addresses.
+  int offset = -slot->index() * kPointerSize;
+  // Adjust by a (parameter or local) base offset.
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+      offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
+      break;
+    case Slot::LOCAL:
+      offset += JavaScriptFrameConstants::kLocal0Offset;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  return offset;
+}
+
+
+void FastCodeGenerator::VisitDeclarations(
+    ZoneList<Declaration*>* declarations) {
+  int length = declarations->length();
+  int globals = 0;
+  for (int i = 0; i < length; i++) {
+    Declaration* node = declarations->at(i);
+    Variable* var = node->proxy()->var();
+    Slot* slot = var->slot();
+
+    // If it was not possible to allocate the variable at compile
+    // time, we need to "declare" it at runtime to make sure it
+    // actually exists in the local context.
+    if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
+      UNREACHABLE();
+    } else {
+      // Count global variables and functions for later processing
+      globals++;
+    }
+  }
+
+  // Return in case of no declared global functions or variables.
+  if (globals == 0) return;
+
+  // Compute array of global variable and function declarations.
+  Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
+  for (int j = 0, i = 0; i < length; i++) {
+    Declaration* node = declarations->at(i);
+    Variable* var = node->proxy()->var();
+    Slot* slot = var->slot();
+
+    if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) {
+      array->set(j++, *(var->name()));
+      if (node->fun() == NULL) {
+        if (var->mode() == Variable::CONST) {
+          // In case this is const property use the hole.
+          array->set_the_hole(j++);
+        } else {
+          array->set_undefined(j++);
+        }
+      } else {
+        Handle<JSFunction> function = BuildBoilerplate(node->fun());
+        // Check for stack-overflow exception.
+        if (HasStackOverflow()) return;
+        array->set(j++, *function);
+      }
+    }
+  }
+
+  // Invoke the platform-dependent code generator to do the actual
+  // declaration the global variables and functions.
+  DeclareGlobals(array);
+}
+
+Handle<JSFunction> FastCodeGenerator::BuildBoilerplate(FunctionLiteral* fun) {
+#ifdef DEBUG
+  // We should not try to compile the same function literal more than
+  // once.
+  fun->mark_as_compiled();
+#endif
+
+  // Generate code
+  Handle<Code> code = CodeGenerator::ComputeLazyCompile(fun->num_parameters());
+  // Check for stack-overflow exception.
+  if (code.is_null()) {
+    SetStackOverflow();
+    return Handle<JSFunction>::null();
+  }
+
+  // Create a boilerplate function.
+  Handle<JSFunction> function =
+      Factory::NewFunctionBoilerplate(fun->name(),
+                                      fun->materialized_literal_count(),
+                                      code);
+  CodeGenerator::SetFunctionInfo(function, fun, false, script_);
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Notify debugger that a new function has been added.
+  Debugger::OnNewFunction(function);
+#endif
+
+  // Set the expected number of properties for instances and return
+  // the resulting function.
+  SetExpectedNofPropertiesFromEstimate(function,
+                                       fun->expected_property_count());
+  return function;
+}
+
+
+void FastCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
+  if (FLAG_debug_info) {
+    CodeGenerator::RecordPositions(masm_, fun->start_position());
+  }
+}
+
+
+void FastCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
+  if (FLAG_debug_info) {
+    CodeGenerator::RecordPositions(masm_, fun->end_position());
+  }
+}
+
+
+void FastCodeGenerator::SetStatementPosition(Statement* stmt) {
+  if (FLAG_debug_info) {
+    CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+  }
+}
+
+
+void FastCodeGenerator::SetSourcePosition(int pos) {
+  if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+    masm_->RecordPosition(pos);
+  }
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
+  Comment cmnt(masm_, "[ EmptyStatement");
+  SetStatementPosition(stmt);
+}
+
+
+void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitConditional(Conditional* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitSlot(Slot* expr) {
+  // Slots do not appear directly in the AST.
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitLiteral(Literal* expr) {
+  // No code is emitted (here) for simple literals.
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitThrow(Throw* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  UNREACHABLE();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
new file mode 100644
index 0000000..42d6cde
--- /dev/null
+++ b/src/fast-codegen.h
@@ -0,0 +1,79 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FAST_CODEGEN_H_
+#define V8_FAST_CODEGEN_H_
+
+#include "v8.h"
+
+#include "ast.h"
+
+namespace v8 {
+namespace internal {
+
+
+class FastCodeGenerator: public AstVisitor {
+ public:
+  FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval)
+    : masm_(masm), function_(NULL), script_(script), is_eval_(is_eval) {
+  }
+
+  static Handle<Code> MakeCode(FunctionLiteral* fun,
+                               Handle<Script> script,
+                               bool is_eval);
+
+  void Generate(FunctionLiteral* fun);
+
+ private:
+  int SlotOffset(Slot* slot);
+
+  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  Handle<JSFunction> BuildBoilerplate(FunctionLiteral* fun);
+  void DeclareGlobals(Handle<FixedArray> pairs);
+
+  void SetFunctionPosition(FunctionLiteral* fun);
+  void SetReturnPosition(FunctionLiteral* fun);
+  void SetStatementPosition(Statement* stmt);
+  void SetSourcePosition(int pos);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  MacroAssembler* masm_;
+  FunctionLiteral* function_;
+  Handle<Script> script_;
+  bool is_eval_;
+
+  DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_FAST_CODEGEN_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 91c5bca..42c96b6 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -96,7 +96,7 @@
 //
 #define FLAG FLAG_FULL
 
-// assembler-ia32.cc / assembler-arm.cc
+// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
 DEFINE_bool(debug_code, false,
             "generate extra code (comments, assertions) for debugging")
 DEFINE_bool(emit_branch_hints, false, "emit branch hints")
@@ -104,6 +104,16 @@
             "eliminate redundant push/pops in assembly code")
 DEFINE_bool(print_push_pop_elimination, false,
             "print elimination of redundant push/pops in assembly code")
+DEFINE_bool(enable_sse2, true,
+            "enable use of SSE2 instructions if available")
+DEFINE_bool(enable_sse3, true,
+            "enable use of SSE3 instructions if available")
+DEFINE_bool(enable_cmov, true,
+            "enable use of CMOV instruction if available")
+DEFINE_bool(enable_rdtsc, true,
+            "enable use of RDTSC instruction if available")
+DEFINE_bool(enable_sahf, true,
+            "enable use of SAHF instruction if available (X64 only)")
 
 // bootstrapper.cc
 DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@@ -132,7 +142,11 @@
 // compiler.cc
 DEFINE_bool(strict, false, "strict error checking")
 DEFINE_int(min_preparse_length, 1024,
-           "Minimum length for automatic enable preparsing")
+           "minimum length for automatic enable preparsing")
+DEFINE_bool(fast_compiler, true,
+            "use the fast-mode compiler for some top-level code")
+DEFINE_bool(trace_bailout, false,
+            "print reasons for failing to use fast compilation")
 
 // compilation-cache.cc
 DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -149,8 +163,8 @@
            "maximum length of function source code printed in a stack trace.")
 
 // heap.cc
-DEFINE_int(new_space_size, 0, "size of (each semispace in) the new generation")
-DEFINE_int(old_space_size, 0, "size of the old generation")
+DEFINE_int(max_new_space_size, 0, "max size of the new generation")
+DEFINE_int(max_old_space_size, 0, "max size of the old generation")
 DEFINE_bool(gc_global, false, "always perform global GCs")
 DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
 DEFINE_bool(trace_gc, false,
@@ -263,6 +277,9 @@
             "pretty print source code for builtins")
 DEFINE_bool(print_ast, false, "print source AST")
 DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
+DEFINE_bool(print_json_ast, false, "print source AST as JSON")
+DEFINE_bool(print_builtin_json_ast, false,
+            "print source AST for builtins as JSON")
 DEFINE_bool(trace_calls, false, "trace calls")
 DEFINE_bool(trace_builtin_calls, false, "trace builtins calls")
 DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
@@ -333,6 +350,7 @@
 DEFINE_bool(log_handles, false, "Log global handle events.")
 DEFINE_bool(log_state_changes, false, "Log state changes.")
 DEFINE_bool(log_suspect, false, "Log suspect operations.")
+DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
 DEFINE_bool(compress_log, false,
             "Compress log to save space (makes log less human-readable).")
 DEFINE_bool(prof, false,
diff --git a/src/global-handles.cc b/src/global-handles.cc
index e51c4aa..f4b69fc 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -264,6 +264,16 @@
 }
 
 
+void GlobalHandles::IterateWeakRoots(WeakReferenceGuest f,
+                                     WeakReferenceCallback callback) {
+  for (Node* current = head_; current != NULL; current = current->next()) {
+    if (current->IsWeak() && current->callback() == callback) {
+      f(current->object_, current->parameter());
+    }
+  }
+}
+
+
 void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
   for (Node* current = head_; current != NULL; current = current->next()) {
     if (current->state_ == Node::WEAK) {
diff --git a/src/global-handles.h b/src/global-handles.h
index 9e63ba7..feb95bf 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -54,6 +54,8 @@
 };
 
 
+typedef void (*WeakReferenceGuest)(Object* object, void* parameter);
+
 class GlobalHandles : public AllStatic {
  public:
   // Creates a new global handle that is alive until Destroy is called.
@@ -99,6 +101,10 @@
   // Iterates over all weak roots in heap.
   static void IterateWeakRoots(ObjectVisitor* v);
 
+  // Iterates over weak roots that are bound to a given callback.
+  static void IterateWeakRoots(WeakReferenceGuest f,
+                               WeakReferenceCallback callback);
+
   // Find all weak handles satisfying the callback predicate, mark
   // them as pending.
   static void IdentifyWeakHandles(WeakSlotCallback f);
diff --git a/src/handles.cc b/src/handles.cc
index b43ec53..b764334 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -345,7 +345,7 @@
 Handle<Object> SetElement(Handle<JSObject> object,
                           uint32_t index,
                           Handle<Object> value) {
-  if (object->HasPixelElements()) {
+  if (object->HasPixelElements() || object->HasExternalArrayElements()) {
     if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
       bool has_exception;
       Handle<Object> number = Execution::ToNumber(value, &has_exception);
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index ecb6919..bfd378d 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -28,6 +28,8 @@
 #include "v8.h"
 
 #include "heap-profiler.h"
+#include "frames-inl.h"
+#include "global-handles.h"
 #include "string-stream.h"
 
 namespace v8 {
@@ -327,6 +329,11 @@
 }
 
 
+static const char* GetConstructorName(const char* name) {
+  return name[0] != '\0' ? name : "(anonymous)";
+}
+
+
 void JSObjectsCluster::Print(StringStream* accumulator) const {
   ASSERT(!is_null());
   if (constructor_ == FromSpecialCase(ROOTS)) {
@@ -338,7 +345,7 @@
   } else {
     SmartPointer<char> s_name(
         constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
-    accumulator->Add("%s", (*s_name)[0] != '\0' ? *s_name : "(anonymous)");
+    accumulator->Add("%s", GetConstructorName(*s_name));
     if (instance_ != NULL) {
       accumulator->Add(":%p", static_cast<void*>(instance_));
     }
@@ -569,15 +576,34 @@
 void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
   InstanceType type = obj->map()->instance_type();
   ASSERT(0 <= type && type <= LAST_TYPE);
-  info[type].increment_number(1);
-  info[type].increment_bytes(obj->Size());
+  if (!FreeListNode::IsFreeListNode(obj)) {
+    info[type].increment_number(1);
+    info[type].increment_bytes(obj->Size());
+  }
+}
+
+
+static void StackWeakReferenceCallback(Persistent<Value> object,
+                                       void* trace) {
+  DeleteArray(static_cast<Address*>(trace));
+  object.Dispose();
+}
+
+
+static void PrintProducerStackTrace(Object* obj, void* trace) {
+  if (!obj->IsJSObject()) return;
+  String* constructor = JSObject::cast(obj)->constructor_name();
+  SmartPointer<char> s_name(
+      constructor->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
+  LOG(HeapSampleJSProducerEvent(GetConstructorName(*s_name),
+                                reinterpret_cast<Address*>(trace)));
 }
 
 
 void HeapProfiler::WriteSample() {
   LOG(HeapSampleBeginEvent("Heap", "allocated"));
   LOG(HeapSampleStats(
-      "Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects()));
+      "Heap", "allocated", Heap::CommittedMemory(), Heap::SizeOfObjects()));
 
   HistogramInfo info[LAST_TYPE+1];
 #define DEF_TYPE_NAME(name) info[name].set_name(#name);
@@ -616,10 +642,40 @@
   js_cons_profile.PrintStats();
   js_retainer_profile.PrintStats();
 
+  GlobalHandles::IterateWeakRoots(PrintProducerStackTrace,
+                                  StackWeakReferenceCallback);
+
   LOG(HeapSampleEndEvent("Heap", "allocated"));
 }
 
 
+bool ProducerHeapProfile::can_log_ = false;
+
+void ProducerHeapProfile::Setup() {
+  can_log_ = true;
+}
+
+void ProducerHeapProfile::RecordJSObjectAllocation(Object* obj) {
+  if (!can_log_ || !FLAG_log_producers) return;
+  int framesCount = 0;
+  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+    ++framesCount;
+  }
+  if (framesCount == 0) return;
+  ++framesCount;  // Reserve place for the terminator item.
+  Vector<Address> stack(NewArray<Address>(framesCount), framesCount);
+  int i = 0;
+  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+    stack[i++] = it.frame()->pc();
+  }
+  stack[i] = NULL;
+  Handle<Object> handle = GlobalHandles::Create(obj);
+  GlobalHandles::MakeWeak(handle.location(),
+                          static_cast<void*>(stack.start()),
+                          StackWeakReferenceCallback);
+}
+
+
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 7fda883..bd875df 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -256,6 +256,14 @@
 };
 
 
+class ProducerHeapProfile : public AllStatic {
+ public:
+  static void Setup();
+  static void RecordJSObjectAllocation(Object* obj);
+ private:
+  static bool can_log_;
+};
+
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/heap.cc b/src/heap.cc
index dcc25a3..5084058 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -39,6 +39,7 @@
 #include "natives.h"
 #include "scanner.h"
 #include "scopeinfo.h"
+#include "snapshot.h"
 #include "v8threads.h"
 #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP
 #include "regexp-macro-assembler.h"
@@ -74,28 +75,35 @@
 // semispace_size_ should be a power of 2 and old_generation_size_ should be
 // a multiple of Page::kPageSize.
 #if defined(ANDROID)
-int Heap::semispace_size_  = 512*KB;
-int Heap::old_generation_size_ = 128*MB;
+int Heap::max_semispace_size_  = 512*KB;
+int Heap::max_old_generation_size_ = 128*MB;
 int Heap::initial_semispace_size_ = 128*KB;
 size_t Heap::code_range_size_ = 0;
 #elif defined(V8_TARGET_ARCH_X64)
-int Heap::semispace_size_  = 16*MB;
-int Heap::old_generation_size_ = 1*GB;
+int Heap::max_semispace_size_  = 16*MB;
+int Heap::max_old_generation_size_ = 1*GB;
 int Heap::initial_semispace_size_ = 1*MB;
-size_t Heap::code_range_size_ = 256*MB;
+size_t Heap::code_range_size_ = 512*MB;
 #else
-int Heap::semispace_size_  = 8*MB;
-int Heap::old_generation_size_ = 512*MB;
+int Heap::max_semispace_size_  = 8*MB;
+int Heap::max_old_generation_size_ = 512*MB;
 int Heap::initial_semispace_size_ = 512*KB;
 size_t Heap::code_range_size_ = 0;
 #endif
 
+// The snapshot semispace size will be the default semispace size if
+// snapshotting is used and will be the requested semispace size as
+// set up by ConfigureHeap otherwise.
+int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
+
 GCCallback Heap::global_gc_prologue_callback_ = NULL;
 GCCallback Heap::global_gc_epilogue_callback_ = NULL;
 
 // Variables set based on semispace_size_ and old_generation_size_ in
 // ConfigureHeap.
-int Heap::young_generation_size_ = 0;  // Will be 2 * semispace_size_.
+
+// Will be 4 * reserved_semispace_size_ to ensure that young
+// generation can be aligned to its size.
 int Heap::survived_since_last_expansion_ = 0;
 int Heap::external_allocation_limit_ = 0;
 
@@ -127,6 +135,19 @@
 }
 
 
+int Heap::CommittedMemory() {
+  if (!HasBeenSetup()) return 0;
+
+  return new_space_.CommittedMemory() +
+      old_pointer_space_->CommittedMemory() +
+      old_data_space_->CommittedMemory() +
+      code_space_->CommittedMemory() +
+      map_space_->CommittedMemory() +
+      cell_space_->CommittedMemory() +
+      lo_space_->Size();
+}
+
+
 int Heap::Available() {
   if (!HasBeenSetup()) return 0;
 
@@ -222,19 +243,34 @@
 void Heap::PrintShortHeapStatistics() {
   if (!FLAG_trace_gc_verbose) return;
   PrintF("Memory allocator,   used: %8d, available: %8d\n",
-         MemoryAllocator::Size(), MemoryAllocator::Available());
+         MemoryAllocator::Size(),
+         MemoryAllocator::Available());
   PrintF("New space,          used: %8d, available: %8d\n",
-         Heap::new_space_.Size(), new_space_.Available());
-  PrintF("Old pointers,       used: %8d, available: %8d\n",
-         old_pointer_space_->Size(), old_pointer_space_->Available());
-  PrintF("Old data space,     used: %8d, available: %8d\n",
-         old_data_space_->Size(), old_data_space_->Available());
-  PrintF("Code space,         used: %8d, available: %8d\n",
-         code_space_->Size(), code_space_->Available());
-  PrintF("Map space,          used: %8d, available: %8d\n",
-         map_space_->Size(), map_space_->Available());
+         Heap::new_space_.Size(),
+         new_space_.Available());
+  PrintF("Old pointers,       used: %8d, available: %8d, waste: %8d\n",
+         old_pointer_space_->Size(),
+         old_pointer_space_->Available(),
+         old_pointer_space_->Waste());
+  PrintF("Old data space,     used: %8d, available: %8d, waste: %8d\n",
+         old_data_space_->Size(),
+         old_data_space_->Available(),
+         old_data_space_->Waste());
+  PrintF("Code space,         used: %8d, available: %8d, waste: %8d\n",
+         code_space_->Size(),
+         code_space_->Available(),
+         code_space_->Waste());
+  PrintF("Map space,          used: %8d, available: %8d, waste: %8d\n",
+         map_space_->Size(),
+         map_space_->Available(),
+         map_space_->Waste());
+  PrintF("Cell space,         used: %8d, available: %8d, waste: %8d\n",
+         cell_space_->Size(),
+         cell_space_->Available(),
+         cell_space_->Waste());
   PrintF("Large object space, used: %8d, avaialble: %8d\n",
-         lo_space_->Size(), lo_space_->Available());
+         lo_space_->Size(),
+         lo_space_->Available());
 }
 #endif
 
@@ -478,7 +514,13 @@
 
   Counters::objs_since_last_young.Set(0);
 
-  PostGarbageCollectionProcessing();
+  if (collector == MARK_COMPACTOR) {
+    DisableAssertNoAllocation allow_allocation;
+    GlobalHandles::PostGarbageCollectionProcessing();
+  }
+
+  // Update relocatables.
+  Relocatable::PostGarbageCollectionProcessing();
 
   if (collector == MARK_COMPACTOR) {
     // Register the amount of external allocated memory.
@@ -494,17 +536,6 @@
 }
 
 
-void Heap::PostGarbageCollectionProcessing() {
-  // Process weak handles post gc.
-  {
-    DisableAssertNoAllocation allow_allocation;
-    GlobalHandles::PostGarbageCollectionProcessing();
-  }
-  // Update relocatables.
-  Relocatable::PostGarbageCollectionProcessing();
-}
-
-
 void Heap::MarkCompact(GCTracer* tracer) {
   gc_state_ = MARK_COMPACT;
   mc_count_++;
@@ -1195,6 +1226,41 @@
   if (obj->IsFailure()) return false;
   set_pixel_array_map(Map::cast(obj));
 
+  obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_byte_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_unsigned_byte_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_short_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_unsigned_short_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_int_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_unsigned_int_array_map(Map::cast(obj));
+
+  obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
+                    ExternalArray::kAlignedSize);
+  if (obj->IsFailure()) return false;
+  set_external_float_array_map(Map::cast(obj));
+
   obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
   if (obj->IsFailure()) return false;
   set_code_map(Map::cast(obj));
@@ -1615,6 +1681,35 @@
 }
 
 
+Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
+  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
+}
+
+
+Heap::RootListIndex Heap::RootIndexForExternalArrayType(
+    ExternalArrayType array_type) {
+  switch (array_type) {
+    case kExternalByteArray:
+      return kExternalByteArrayMapRootIndex;
+    case kExternalUnsignedByteArray:
+      return kExternalUnsignedByteArrayMapRootIndex;
+    case kExternalShortArray:
+      return kExternalShortArrayMapRootIndex;
+    case kExternalUnsignedShortArray:
+      return kExternalUnsignedShortArrayMapRootIndex;
+    case kExternalIntArray:
+      return kExternalIntArrayMapRootIndex;
+    case kExternalUnsignedIntArray:
+      return kExternalUnsignedIntArrayMapRootIndex;
+    case kExternalFloatArray:
+      return kExternalFloatArrayMapRootIndex;
+    default:
+      UNREACHABLE();
+      return kUndefinedValueRootIndex;
+  }
+}
+
+
 Object* Heap::NewNumberFromDouble(double value, PretenureFlag pretenure) {
   return SmiOrNumberFromDouble(value,
                                true /* number object must be new */,
@@ -1679,8 +1774,8 @@
       && second->IsAsciiRepresentation();
 
   // Make sure that an out of memory exception is thrown if the length
-  // of the new cons string is too large to fit in a Smi.
-  if (length > Smi::kMaxValue || length < -0) {
+  // of the new cons string is too large.
+  if (length > String::kMaxLength || length < 0) {
     Top::context()->mark_out_of_memory();
     return Failure::OutOfMemoryException();
   }
@@ -1940,6 +2035,31 @@
 }
 
 
+Object* Heap::AllocateExternalArray(int length,
+                                    ExternalArrayType array_type,
+                                    void* external_pointer,
+                                    PretenureFlag pretenure) {
+  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+  // New space can't cope with forced allocation.
+  if (always_allocate()) space = OLD_DATA_SPACE;
+
+  Object* result = AllocateRaw(ExternalArray::kAlignedSize,
+                               space,
+                               OLD_DATA_SPACE);
+
+  if (result->IsFailure()) return result;
+
+  reinterpret_cast<ExternalArray*>(result)->set_map(
+      MapForExternalArrayType(array_type));
+  reinterpret_cast<ExternalArray*>(result)->set_length(length);
+  reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
+      external_pointer);
+
+  return result;
+}
+
+
 Object* Heap::CreateCode(const CodeDesc& desc,
                          ZoneScopeInfo* sinfo,
                          Code::Flags flags,
@@ -2021,6 +2141,9 @@
                                TargetSpaceId(map->instance_type()));
   if (result->IsFailure()) return result;
   HeapObject::cast(result)->set_map(map);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  ProducerHeapProfile::RecordJSObjectAllocation(result);
+#endif
   return result;
 }
 
@@ -2134,7 +2257,8 @@
   // constructed without having these properties.
   ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
   if (fun->shared()->has_only_this_property_assignments() &&
-      fun->shared()->this_property_assignments_count() > 0) {
+      fun->shared()->this_property_assignments_count() > 0 &&
+      fun->shared()->has_only_simple_this_property_assignments()) {
     int count = fun->shared()->this_property_assignments_count();
     if (count > in_object_properties) {
       count = in_object_properties;
@@ -2342,6 +2466,9 @@
     JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
   }
   // Return the new clone.
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  ProducerHeapProfile::RecordJSObjectAllocation(clone);
+#endif
   return clone;
 }
 
@@ -3179,21 +3306,37 @@
 // TODO(1236194): Since the heap size is configurable on the command line
 // and through the API, we should gracefully handle the case that the heap
 // size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
+bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
   if (HasBeenSetup()) return false;
 
-  if (semispace_size > 0) semispace_size_ = semispace_size;
-  if (old_gen_size > 0) old_generation_size_ = old_gen_size;
+  if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
+
+  if (Snapshot::IsEnabled()) {
+    // If we are using a snapshot we always reserve the default amount
+    // of memory for each semispace because code in the snapshot has
+    // write-barrier code that relies on the size and alignment of new
+    // space.  We therefore cannot use a larger max semispace size
+    // than the default reserved semispace size.
+    if (max_semispace_size_ > reserved_semispace_size_) {
+      max_semispace_size_ = reserved_semispace_size_;
+    }
+  } else {
+    // If we are not using snapshots we reserve space for the actual
+    // max semispace size.
+    reserved_semispace_size_ = max_semispace_size_;
+  }
+
+  if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
 
   // The new space size must be a power of two to support single-bit testing
   // for containment.
-  semispace_size_ = RoundUpToPowerOf2(semispace_size_);
-  initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_);
-  young_generation_size_ = 2 * semispace_size_;
-  external_allocation_limit_ = 10 * semispace_size_;
+  max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
+  reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
+  initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
+  external_allocation_limit_ = 10 * max_semispace_size_;
 
   // The old generation is paged.
-  old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize);
+  max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
 
   heap_configured = true;
   return true;
@@ -3201,7 +3344,7 @@
 
 
 bool Heap::ConfigureHeapDefault() {
-  return ConfigureHeap(FLAG_new_space_size, FLAG_old_space_size);
+  return ConfigureHeap(FLAG_max_new_space_size / 2, FLAG_max_old_space_size);
 }
 
 
@@ -3237,30 +3380,31 @@
   }
 
   // Setup memory allocator and reserve a chunk of memory for new
-  // space.  The chunk is double the size of the new space to ensure
-  // that we can find a pair of semispaces that are contiguous and
-  // aligned to their size.
-  if (!MemoryAllocator::Setup(MaxCapacity())) return false;
+  // space.  The chunk is double the size of the requested reserved
+  // new space size to ensure that we can find a pair of semispaces that
+  // are contiguous and aligned to their size.
+  if (!MemoryAllocator::Setup(MaxReserved())) return false;
   void* chunk =
-      MemoryAllocator::ReserveInitialChunk(2 * young_generation_size_);
+      MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
   if (chunk == NULL) return false;
 
   // Align the pair of semispaces to their size, which must be a power
   // of 2.
-  ASSERT(IsPowerOf2(young_generation_size_));
   Address new_space_start =
-      RoundUp(reinterpret_cast<byte*>(chunk), young_generation_size_);
-  if (!new_space_.Setup(new_space_start, young_generation_size_)) return false;
+      RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
+  if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
+    return false;
+  }
 
   // Initialize old pointer space.
   old_pointer_space_ =
-      new OldSpace(old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
+      new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
   if (old_pointer_space_ == NULL) return false;
   if (!old_pointer_space_->Setup(NULL, 0)) return false;
 
   // Initialize old data space.
   old_data_space_ =
-      new OldSpace(old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
+      new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
   if (old_data_space_ == NULL) return false;
   if (!old_data_space_->Setup(NULL, 0)) return false;
 
@@ -3275,7 +3419,7 @@
   }
 
   code_space_ =
-      new OldSpace(old_generation_size_, CODE_SPACE, EXECUTABLE);
+      new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
   if (code_space_ == NULL) return false;
   if (!code_space_->Setup(NULL, 0)) return false;
 
@@ -3285,7 +3429,7 @@
   if (!map_space_->Setup(NULL, 0)) return false;
 
   // Initialize global property cell space.
-  cell_space_ = new CellSpace(old_generation_size_, CELL_SPACE);
+  cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
   if (cell_space_ == NULL) return false;
   if (!cell_space_->Setup(NULL, 0)) return false;
 
@@ -3308,6 +3452,11 @@
   LOG(IntEvent("heap-capacity", Capacity()));
   LOG(IntEvent("heap-available", Available()));
 
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  // This should be called only after initial objects have been created.
+  ProducerHeapProfile::Setup();
+#endif
+
   return true;
 }
 
diff --git a/src/heap.h b/src/heap.h
index e878efc..cd49a8d 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -111,6 +111,13 @@
   V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap)   \
   V(Map, byte_array_map, ByteArrayMap)                                         \
   V(Map, pixel_array_map, PixelArrayMap)                                       \
+  V(Map, external_byte_array_map, ExternalByteArrayMap)                        \
+  V(Map, external_unsigned_byte_array_map, ExternalUnsignedByteArrayMap)       \
+  V(Map, external_short_array_map, ExternalShortArrayMap)                      \
+  V(Map, external_unsigned_short_array_map, ExternalUnsignedShortArrayMap)     \
+  V(Map, external_int_array_map, ExternalIntArrayMap)                          \
+  V(Map, external_unsigned_int_array_map, ExternalUnsignedIntArrayMap)         \
+  V(Map, external_float_array_map, ExternalFloatArrayMap)                      \
   V(Map, context_map, ContextMap)                                              \
   V(Map, catch_context_map, CatchContextMap)                                   \
   V(Map, code_map, CodeMap)                                                    \
@@ -228,7 +235,7 @@
  public:
   // Configure heap size before setup. Return false if the heap has been
   // setup already.
-  static bool ConfigureHeap(int semispace_size, int old_gen_size);
+  static bool ConfigureHeap(int max_semispace_size, int max_old_gen_size);
   static bool ConfigureHeapDefault();
 
   // Initializes the global object heap. If create_heap_objects is true,
@@ -247,19 +254,26 @@
   // Returns whether Setup has been called.
   static bool HasBeenSetup();
 
-  // Returns the maximum heap capacity.
-  static int MaxCapacity() {
-    return young_generation_size_ + old_generation_size_;
+  // Returns the maximum amount of memory reserved for the heap.  For
+  // the young generation, we reserve 4 times the amount needed for a
+  // semi space.  The young generation consists of two semi spaces and
+  // we reserve twice the amount needed for those in order to ensure
+  // that new space can be aligned to its size.
+  static int MaxReserved() {
+    return 4 * reserved_semispace_size_ + max_old_generation_size_;
   }
-  static int SemiSpaceSize() { return semispace_size_; }
+  static int MaxSemiSpaceSize() { return max_semispace_size_; }
+  static int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
   static int InitialSemiSpaceSize() { return initial_semispace_size_; }
-  static int YoungGenerationSize() { return young_generation_size_; }
-  static int OldGenerationSize() { return old_generation_size_; }
+  static int MaxOldGenerationSize() { return max_old_generation_size_; }
 
   // Returns the capacity of the heap in bytes w/o growing. Heap grows when
   // more spaces are needed until it reaches the limit.
   static int Capacity();
 
+  // Returns the amount of memory currently committed for the heap.
+  static int CommittedMemory();
+
   // Returns the available bytes in space w/o growing.
   // Heap doesn't guarantee that it can allocate an object that requires
   // all available bytes. Check MaxHeapObjectSize() instead.
@@ -449,6 +463,15 @@
                                     uint8_t* external_pointer,
                                     PretenureFlag pretenure);
 
+  // Allocates an external array of the specified length and type.
+  // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+  // failed.
+  // Please note this does not perform a garbage collection.
+  static Object* AllocateExternalArray(int length,
+                                       ExternalArrayType array_type,
+                                       void* external_pointer,
+                                       PretenureFlag pretenure);
+
   // Allocate a tenured JS global property cell.
   // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
   // failed.
@@ -637,9 +660,6 @@
   static void GarbageCollectionPrologue();
   static void GarbageCollectionEpilogue();
 
-  // Code that should be executed after the garbage collection proper.
-  static void PostGarbageCollectionProcessing();
-
   // Performs garbage collection operation.
   // Returns whether required_space bytes are available after the collection.
   static bool CollectGarbage(int required_space, AllocationSpace space);
@@ -884,11 +904,15 @@
 
   static Object* NumberToString(Object* number);
 
+  static Map* MapForExternalArrayType(ExternalArrayType array_type);
+  static RootListIndex RootIndexForExternalArrayType(
+      ExternalArrayType array_type);
+
  private:
-  static int semispace_size_;
+  static int reserved_semispace_size_;
+  static int max_semispace_size_;
   static int initial_semispace_size_;
-  static int young_generation_size_;
-  static int old_generation_size_;
+  static int max_old_generation_size_;
   static size_t code_range_size_;
 
   // For keeping track of how much data has survived
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 9a5352b..5fa75ec 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -52,7 +52,7 @@
   if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
     int32_t* p = reinterpret_cast<int32_t*>(pc_);
     *p -= delta;  // relocate entry
-  } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+  } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
     // Special handling of js_return when a break point is set (call
     // instruction has been inserted).
     int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
@@ -85,19 +85,25 @@
 
 Object* RelocInfo::target_object() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return *reinterpret_cast<Object**>(pc_);
+  return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Memory::Object_Handle_at(pc_);
 }
 
 
 Object** RelocInfo::target_object_address() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return reinterpret_cast<Object**>(pc_);
+  return &Memory::Object_at(pc_);
 }
 
 
 void RelocInfo::set_target_object(Object* target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  *reinterpret_cast<Object**>(pc_) = target;
+  Memory::Object_at(pc_) = target;
 }
 
 
@@ -108,36 +114,36 @@
 
 
 Address RelocInfo::call_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return Assembler::target_address_at(pc_ + 1);
 }
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   Assembler::set_target_address_at(pc_ + 1, target);
 }
 
 
 Object* RelocInfo::call_object() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return *call_object_address();
 }
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return reinterpret_cast<Object**>(pc_ + 1);
 }
 
 
 void RelocInfo::set_call_object(Object* target) {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   *call_object_address() = target;
 }
 
 
-bool RelocInfo::IsCallInstruction() {
+bool RelocInfo::IsPatchedReturnSequence() {
   return *pc_ == 0xE8;
 }
 
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index b8dda17..698377a 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -1166,6 +1166,19 @@
 }
 
 
+void Assembler::subb(const Operand& op, int8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (op.is_reg(eax)) {
+    EMIT(0x2c);
+  } else {
+    EMIT(0x80);
+    emit_operand(ebp, op);  // ebp == 5
+  }
+  EMIT(imm8);
+}
+
+
 void Assembler::sub(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1837,6 +1850,22 @@
 }
 
 
+void Assembler::fucomi(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDB);
+  EMIT(0xE8 + i);
+}
+
+
+void Assembler::fucomip() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  EMIT(0xDF);
+  EMIT(0xE9);
+}
+
+
 void Assembler::fcompp() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2096,7 +2125,7 @@
   // Some internal data structures overflow for very large buffers,
   // they must ensure that kMaximalBufferSize is not too large.
   if ((desc.buffer_size > kMaximalBufferSize) ||
-      (desc.buffer_size > Heap::OldGenerationSize())) {
+      (desc.buffer_size > Heap::MaxOldGenerationSize())) {
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 610017b..4d9f08b 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -367,6 +367,10 @@
   static void Probe();
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(Feature f) {
+    if (f == SSE2 && !FLAG_enable_sse2) return false;
+    if (f == SSE3 && !FLAG_enable_sse3) return false;
+    if (f == CMOV && !FLAG_enable_cmov) return false;
+    if (f == RDTSC && !FLAG_enable_rdtsc) return false;
     return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
   }
   // Check whether a feature is currently enabled.
@@ -590,6 +594,7 @@
   void shr(Register dst);
   void shr_cl(Register dst);
 
+  void subb(const Operand& dst, int8_t imm8);
   void sub(const Operand& dst, const Immediate& x);
   void sub(Register dst, const Operand& src);
   void sub(const Operand& dst, Register src);
@@ -697,6 +702,8 @@
   void ftst();
   void fucomp(int i);
   void fucompp();
+  void fucomi(int i);
+  void fucomip();
   void fcompp();
   void fnstsw_ax();
   void fwait();
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 0e314b9..a339e90 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -697,18 +697,6 @@
 }
 
 
-class ToBooleanStub: public CodeStub {
- public:
-  ToBooleanStub() { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Major MajorKey() { return ToBoolean; }
-  int MinorKey() { return 0; }
-};
-
-
 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
 // convert it to a boolean in the condition code register or jump to
 // 'false_target'/'true_target' as appropriate.
@@ -773,13 +761,6 @@
   // either operand is not a number.  Operands are in edx and eax.
   // Leaves operands unchanged.
   static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
-  // Allocate a heap number in new space with undefined value.
-  // Returns tagged pointer in eax, or jumps to need_gc if new space is full.
-  static void AllocateHeapNumber(MacroAssembler* masm,
-                                 Label* need_gc,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Register result);
 };
 
 
@@ -824,10 +805,8 @@
 
 
 void DeferredInlineBinaryOperation::Generate() {
-  __ push(left_);
-  __ push(right_);
-  GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
-  __ CallStub(&stub);
+  GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+  stub.GenerateCall(masm_, left_, right_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -856,16 +835,16 @@
       // Bit operations always assume they likely operate on Smis. Still only
       // generate the inline Smi check code if this operation is part of a loop.
       flags = (loop_nesting() > 0)
-              ? SMI_CODE_INLINED
-              : SMI_CODE_IN_STUB;
+              ? NO_SMI_CODE_IN_STUB
+              : NO_GENERIC_BINARY_FLAGS;
       break;
 
     default:
       // By default only inline the Smi check code for likely smis if this
       // operation is part of a loop.
       flags = ((loop_nesting() > 0) && type->IsLikelySmi())
-              ? SMI_CODE_INLINED
-              : SMI_CODE_IN_STUB;
+              ? NO_SMI_CODE_IN_STUB
+              : NO_GENERIC_BINARY_FLAGS;
       break;
   }
 
@@ -924,16 +903,15 @@
     return;
   }
 
-  if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+  if (((flags & NO_SMI_CODE_IN_STUB) != 0) && !generate_no_smi_code) {
     LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
   } else {
     frame_->Push(&left);
     frame_->Push(&right);
     // If we know the arguments aren't smis, use the binary operation stub
     // that does not check for the fast smi case.
-    // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
     if (generate_no_smi_code) {
-      flags = SMI_CODE_INLINED;
+      flags = NO_SMI_CODE_IN_STUB;
     }
     GenericBinaryOpStub stub(op, overwrite_mode, flags);
     Result answer = frame_->CallStub(&stub, 2);
@@ -1376,14 +1354,12 @@
 
 
 void DeferredInlineSmiOperation::Generate() {
-  __ push(src_);
-  __ push(Immediate(value_));
   // For mod we don't generate all the Smi code inline.
   GenericBinaryOpStub stub(
       op_,
       overwrite_mode_,
-      (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
-  __ CallStub(&stub);
+      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+  stub.GenerateCall(masm_, src_, value_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -1417,10 +1393,8 @@
 
 
 void DeferredInlineSmiOperationReversed::Generate() {
-  __ push(Immediate(value_));
-  __ push(src_);
-  GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(op_, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, value_, src_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -1449,10 +1423,8 @@
 void DeferredInlineSmiAdd::Generate() {
   // Undo the optimistic add operation and call the shared stub.
   __ sub(Operand(dst_), Immediate(value_));
-  __ push(dst_);
-  __ push(Immediate(value_));
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, dst_, value_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -1481,10 +1453,8 @@
 void DeferredInlineSmiAddReversed::Generate() {
   // Undo the optimistic add operation and call the shared stub.
   __ sub(Operand(dst_), Immediate(value_));
-  __ push(Immediate(value_));
-  __ push(dst_);
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, value_, dst_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -1514,10 +1484,8 @@
 void DeferredInlineSmiSub::Generate() {
   // Undo the optimistic sub operation and call the shared stub.
   __ add(Operand(dst_), Immediate(value_));
-  __ push(dst_);
-  __ push(Immediate(value_));
-  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, dst_, value_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -2295,8 +2263,8 @@
   // allow us to push the arguments directly into place.
   frame_->SyncRange(0, frame_->element_count() - 1);
 
+  frame_->EmitPush(esi);  // The context is the first argument.
   frame_->EmitPush(Immediate(pairs));
-  frame_->EmitPush(esi);  // The context is the second argument.
   frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
   Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
@@ -2711,288 +2679,332 @@
 }
 
 
-void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
   ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ LoopStatement");
+  Comment cmnt(masm_, "[ DoWhileStatement");
   CodeForStatementPosition(node);
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  JumpTarget body(JumpTarget::BIDIRECTIONAL);
+  IncrementLoopNesting();
 
-  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
-  // known result for the test expression, with no side effects.
-  enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
-  if (node->cond() == NULL) {
-    ASSERT(node->type() == LoopStatement::FOR_LOOP);
-    info = ALWAYS_TRUE;
-  } else {
-    Literal* lit = node->cond()->AsLiteral();
-    if (lit != NULL) {
-      if (lit->IsTrue()) {
-        info = ALWAYS_TRUE;
-      } else if (lit->IsFalse()) {
-        info = ALWAYS_FALSE;
-      }
-    }
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  // Label the top of the loop for the backward jump if necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // Use the continue target.
+      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->Bind();
+      break;
+    case ALWAYS_FALSE:
+      // No need to label it.
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      break;
+    case DONT_KNOW:
+      // Continue is the test, so use the backward body target.
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      body.Bind();
+      break;
   }
 
-  switch (node->type()) {
-    case LoopStatement::DO_LOOP: {
-      JumpTarget body(JumpTarget::BIDIRECTIONAL);
-      IncrementLoopNesting();
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
 
-      // Label the top of the loop for the backward jump if necessary.
-      if (info == ALWAYS_TRUE) {
-        // Use the continue target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else if (info == ALWAYS_FALSE) {
-        // No need to label it.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        // Continue is the test, so use the backward body target.
-        ASSERT(info == DONT_KNOW);
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        body.Bind();
+  // Compile the test.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // If control flow can fall off the end of the body, jump back to
+      // the top and bind the break target at the exit.
+      if (has_valid_frame()) {
+        node->continue_target()->Jump();
       }
-
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
-
-      // Compile the test.
-      if (info == ALWAYS_TRUE) {
-        // If control flow can fall off the end of the body, jump back
-        // to the top and bind the break target at the exit.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-
-      } else if (info == ALWAYS_FALSE) {
-        // We may have had continues or breaks in the body.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-
-      } else {
-        ASSERT(info == DONT_KNOW);
-        // We have to compile the test expression if it can be reached by
-        // control flow falling out of the body or via continue.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-      }
-      break;
-    }
-
-    case LoopStatement::WHILE_LOOP: {
-      // Do not duplicate conditions that may have function literal
-      // subexpressions.  This can cause us to compile the function
-      // literal twice.
-      bool test_at_bottom = !node->may_have_function_literal();
-
-      IncrementLoopNesting();
-
-      // If the condition is always false and has no side effects, we
-      // do not need to compile anything.
-      if (info == ALWAYS_FALSE) break;
-
-      JumpTarget body;
-      if (test_at_bottom) {
-        body.set_direction(JumpTarget::BIDIRECTIONAL);
-      }
-
-      // Based on the condition analysis, compile the test as necessary.
-      if (info == ALWAYS_TRUE) {
-        // We will not compile the test expression.  Label the top of
-        // the loop with the continue target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          // Continue is the test at the bottom, no need to label the
-          // test at the top.  The body is a backward target.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        } else {
-          // Label the test at the top as the continue target.  The
-          // body is a forward-only target.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        }
-        // Compile the test with the body as the true target and
-        // preferred fall-through and with the break target as the
-        // false target.
-        ControlDestination dest(&body, node->break_target(), true);
-        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-
-        if (dest.false_was_fall_through()) {
-          // If we got the break target as fall-through, the test may
-          // have been unconditionally false (if there are no jumps to
-          // the body).
-          if (!body.is_linked()) break;
-
-          // Otherwise, jump around the body on the fall through and
-          // then bind the body target.
-          node->break_target()->Unuse();
-          node->break_target()->Jump();
-          body.Bind();
-        }
-      }
-
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
-
-      // Based on the condition analysis, compile the backward jump as
-      // necessary.
-      if (info == ALWAYS_TRUE) {
-        // The loop body has been labeled with the continue target.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          // If we have chosen to recompile the test at the bottom,
-          // then it is the continue target.
-          if (node->continue_target()->is_linked()) {
-            node->continue_target()->Bind();
-          }
-          if (has_valid_frame()) {
-            // The break target is the fall-through (body is a backward
-            // jump from here and thus an invalid fall-through).
-            ControlDestination dest(&body, node->break_target(), false);
-            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-          }
-        } else {
-          // If we have chosen not to recompile the test at the
-          // bottom, jump back to the one at the top.
-          if (has_valid_frame()) {
-            node->continue_target()->Jump();
-          }
-        }
-      }
-
-      // The break target may be already bound (by the condition), or
-      // there may not be a valid frame.  Bind it only if needed.
       if (node->break_target()->is_linked()) {
         node->break_target()->Bind();
       }
       break;
-    }
-
-    case LoopStatement::FOR_LOOP: {
-      // Do not duplicate conditions that may have function literal
-      // subexpressions.  This can cause us to compile the function
-      // literal twice.
-      bool test_at_bottom = !node->may_have_function_literal();
-
-      // Compile the init expression if present.
-      if (node->init() != NULL) {
-        Visit(node->init());
+    case ALWAYS_FALSE:
+      // We may have had continues or breaks in the body.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
       }
-
-      IncrementLoopNesting();
-
-      // If the condition is always false and has no side effects, we
-      // do not need to compile anything else.
-      if (info == ALWAYS_FALSE) break;
-
-      // Target for backward edge if no test at the bottom, otherwise
-      // unused.
-      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-      // Target for backward edge if there is a test at the bottom,
-      // otherwise used as target for test at the top.
-      JumpTarget body;
-      if (test_at_bottom) {
-        body.set_direction(JumpTarget::BIDIRECTIONAL);
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
       }
-
-      // Based on the condition analysis, compile the test as necessary.
-      if (info == ALWAYS_TRUE) {
-        // We will not compile the test expression.  Label the top of
-        // the loop.
-        if (node->next() == NULL) {
-          // Use the continue target if there is no update expression.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        } else {
-          // Otherwise use the backward loop target.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-          loop.Bind();
-        }
-      } else {
-        ASSERT(info == DONT_KNOW);
-        if (test_at_bottom) {
-          // Continue is either the update expression or the test at
-          // the bottom, no need to label the test at the top.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        } else if (node->next() == NULL) {
-          // We are not recompiling the test at the bottom and there
-          // is no update expression.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        } else {
-          // We are not recompiling the test at the bottom and there
-          // is an update expression.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-          loop.Bind();
-        }
-
-        // Compile the test with the body as the true target and
-        // preferred fall-through and with the break target as the
-        // false target.
-        ControlDestination dest(&body, node->break_target(), true);
+      break;
+    case DONT_KNOW:
+      // We have to compile the test expression if it can be reached by
+      // control flow falling out of the body or via continue.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
+      }
+      if (has_valid_frame()) {
+        ControlDestination dest(&body, node->break_target(), false);
         LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-
-        if (dest.false_was_fall_through()) {
-          // If we got the break target as fall-through, the test may
-          // have been unconditionally false (if there are no jumps to
-          // the body).
-          if (!body.is_linked()) break;
-
-          // Otherwise, jump around the body on the fall through and
-          // then bind the body target.
-          node->break_target()->Unuse();
-          node->break_target()->Jump();
-          body.Bind();
-        }
       }
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
+      break;
+  }
 
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
+  DecrementLoopNesting();
+}
 
-      // If there is an update expression, compile it if necessary.
-      if (node->next() != NULL) {
+
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ WhileStatement");
+  CodeForStatementPosition(node);
+
+  // If the condition is always false and has no side effects, we do not
+  // need to compile anything.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  // Do not duplicate conditions that may have function literal
+  // subexpressions.  This can cause us to compile the function literal
+  // twice.
+  bool test_at_bottom = !node->may_have_function_literal();
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  IncrementLoopNesting();
+  JumpTarget body;
+  if (test_at_bottom) {
+    body.set_direction(JumpTarget::BIDIRECTIONAL);
+  }
+
+  // Based on the condition analysis, compile the test as necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // We will not compile the test expression.  Label the top of the
+      // loop with the continue target.
+      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->Bind();
+      break;
+    case DONT_KNOW: {
+      if (test_at_bottom) {
+        // Continue is the test at the bottom, no need to label the test
+        // at the top.  The body is a backward target.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else {
+        // Label the test at the top as the continue target.  The body
+        // is a forward-only target.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      }
+      // Compile the test with the body as the true target and preferred
+      // fall-through and with the break target as the false target.
+      ControlDestination dest(&body, node->break_target(), true);
+      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+      if (dest.false_was_fall_through()) {
+        // If we got the break target as fall-through, the test may have
+        // been unconditionally false (if there are no jumps to the
+        // body).
+        if (!body.is_linked()) {
+          DecrementLoopNesting();
+          return;
+        }
+
+        // Otherwise, jump around the body on the fall through and then
+        // bind the body target.
+        node->break_target()->Unuse();
+        node->break_target()->Jump();
+        body.Bind();
+      }
+      break;
+    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
+
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
+
+  // Based on the condition analysis, compile the backward jump as
+  // necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // The loop body has been labeled with the continue target.
+      if (has_valid_frame()) {
+        node->continue_target()->Jump();
+      }
+      break;
+    case DONT_KNOW:
+      if (test_at_bottom) {
+        // If we have chosen to recompile the test at the bottom, then
+        // it is the continue target.
         if (node->continue_target()->is_linked()) {
           node->continue_target()->Bind();
         }
-
-        // Control can reach the update by falling out of the body or
-        // by a continue.
         if (has_valid_frame()) {
-          // Record the source position of the statement as this code
-          // which is after the code for the body actually belongs to
-          // the loop statement and not the body.
-          CodeForStatementPosition(node);
-          Visit(node->next());
+          // The break target is the fall-through (body is a backward
+          // jump from here and thus an invalid fall-through).
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+      } else {
+        // If we have chosen not to recompile the test at the bottom,
+        // jump back to the one at the top.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
         }
       }
+      break;
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
 
-      // Based on the condition analysis, compile the backward jump as
-      // necessary.
-      if (info == ALWAYS_TRUE) {
+  // The break target may be already bound (by the condition), or there
+  // may not be a valid frame.  Bind it only if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  DecrementLoopNesting();
+}
+
+
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ForStatement");
+  CodeForStatementPosition(node);
+
+  // Compile the init expression if present.
+  if (node->init() != NULL) {
+    Visit(node->init());
+  }
+
+  // If the condition is always false and has no side effects, we do not
+  // need to compile anything else.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  // Do not duplicate conditions that may have function literal
+  // subexpressions.  This can cause us to compile the function literal
+  // twice.
+  bool test_at_bottom = !node->may_have_function_literal();
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  IncrementLoopNesting();
+
+  // Target for backward edge if no test at the bottom, otherwise
+  // unused.
+  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+  // Target for backward edge if there is a test at the bottom,
+  // otherwise used as target for test at the top.
+  JumpTarget body;
+  if (test_at_bottom) {
+    body.set_direction(JumpTarget::BIDIRECTIONAL);
+  }
+
+  // Based on the condition analysis, compile the test as necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // We will not compile the test expression.  Label the top of the
+      // loop.
+      if (node->next() == NULL) {
+        // Use the continue target if there is no update expression.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        // Otherwise use the backward loop target.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        loop.Bind();
+      }
+      break;
+    case DONT_KNOW: {
+      if (test_at_bottom) {
+        // Continue is either the update expression or the test at the
+        // bottom, no need to label the test at the top.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else if (node->next() == NULL) {
+        // We are not recompiling the test at the bottom and there is no
+        // update expression.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        // We are not recompiling the test at the bottom and there is an
+        // update expression.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        loop.Bind();
+      }
+      // Compile the test with the body as the true target and preferred
+      // fall-through and with the break target as the false target.
+      ControlDestination dest(&body, node->break_target(), true);
+      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+      if (dest.false_was_fall_through()) {
+        // If we got the break target as fall-through, the test may have
+        // been unconditionally false (if there are no jumps to the
+        // body).
+        if (!body.is_linked()) {
+          DecrementLoopNesting();
+          return;
+        }
+
+        // Otherwise, jump around the body on the fall through and then
+        // bind the body target.
+        node->break_target()->Unuse();
+        node->break_target()->Jump();
+        body.Bind();
+      }
+      break;
+    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
+
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
+
+  // If there is an update expression, compile it if necessary.
+  if (node->next() != NULL) {
+    if (node->continue_target()->is_linked()) {
+      node->continue_target()->Bind();
+    }
+
+    // Control can reach the update by falling out of the body or by a
+    // continue.
+    if (has_valid_frame()) {
+      // Record the source position of the statement as this code which
+      // is after the code for the body actually belongs to the loop
+      // statement and not the body.
+      CodeForStatementPosition(node);
+      Visit(node->next());
+    }
+  }
+
+  // Based on the condition analysis, compile the backward jump as
+  // necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      if (has_valid_frame()) {
+        if (node->next() == NULL) {
+          node->continue_target()->Jump();
+        } else {
+          loop.Jump();
+        }
+      }
+      break;
+    case DONT_KNOW:
+      if (test_at_bottom) {
+        if (node->continue_target()->is_linked()) {
+          // We can have dangling jumps to the continue target if there
+          // was no update expression.
+          node->continue_target()->Bind();
+        }
+        // Control can reach the test at the bottom by falling out of
+        // the body, by a continue in the body, or from the update
+        // expression.
+        if (has_valid_frame()) {
+          // The break target is the fall-through (body is a backward
+          // jump from here).
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+      } else {
+        // Otherwise, jump back to the test at the top.
         if (has_valid_frame()) {
           if (node->next() == NULL) {
             node->continue_target()->Jump();
@@ -3000,47 +3012,19 @@
             loop.Jump();
           }
         }
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          if (node->continue_target()->is_linked()) {
-            // We can have dangling jumps to the continue target if
-            // there was no update expression.
-            node->continue_target()->Bind();
-          }
-          // Control can reach the test at the bottom by falling out
-          // of the body, by a continue in the body, or from the
-          // update expression.
-          if (has_valid_frame()) {
-            // The break target is the fall-through (body is a
-            // backward jump from here).
-            ControlDestination dest(&body, node->break_target(), false);
-            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-          }
-        } else {
-          // Otherwise, jump back to the test at the top.
-          if (has_valid_frame()) {
-            if (node->next() == NULL) {
-              node->continue_target()->Jump();
-            } else {
-              loop.Jump();
-            }
-          }
-        }
-      }
-
-      // The break target may be already bound (by the condition), or
-      // there may not be a valid frame.  Bind it only if needed.
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
       }
       break;
-    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
   }
 
+  // The break target may be already bound (by the condition), or
+  // there may not be a valid frame.  Bind it only if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
   DecrementLoopNesting();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
 }
 
 
@@ -3234,10 +3218,10 @@
 }
 
 
-void CodeGenerator::VisitTryCatch(TryCatch* node) {
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
   ASSERT(!in_spilled_code());
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatch");
+  Comment cmnt(masm_, "[ TryCatchStatement");
   CodeForStatementPosition(node);
 
   JumpTarget try_block;
@@ -3370,10 +3354,10 @@
 }
 
 
-void CodeGenerator::VisitTryFinally(TryFinally* node) {
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
   ASSERT(!in_spilled_code());
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinally");
+  Comment cmnt(masm_, "[ TryFinallyStatement");
   CodeForStatementPosition(node);
 
   // State: Used to keep track of reason for entering the finally
@@ -3580,11 +3564,9 @@
   ASSERT(boilerplate->IsBoilerplate());
   frame_->SyncRange(0, frame_->element_count() - 1);
 
-  // Push the boilerplate on the stack.
-  frame_->EmitPush(Immediate(boilerplate));
-
   // Create a new closure.
   frame_->EmitPush(esi);
+  frame_->EmitPush(Immediate(boilerplate));
   Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
   frame_->Push(&result);
 }
@@ -5172,11 +5154,10 @@
   Result scratch1 = allocator()->Allocate();
   Result scratch2 = allocator()->Allocate();
   Result heap_number = allocator()->Allocate();
-  FloatingPointHelper::AllocateHeapNumber(masm_,
-                                          call_runtime.entry_label(),
-                                          scratch1.reg(),
-                                          scratch2.reg(),
-                                          heap_number.reg());
+  __ AllocateHeapNumber(heap_number.reg(),
+                        scratch1.reg(),
+                        scratch2.reg(),
+                        call_runtime.entry_label());
   scratch1.Unuse();
   scratch2.Unuse();
 
@@ -5338,8 +5319,8 @@
     switch (op) {
       case Token::SUB: {
         bool overwrite =
-            (node->AsBinaryOperation() != NULL &&
-             node->AsBinaryOperation()->ResultOverwriteAllowed());
+          (node->expression()->AsBinaryOperation() != NULL &&
+           node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
         UnarySubStub stub(overwrite);
         // TODO(1222589): remove dependency of TOS being cached inside stub
         Result operand = frame_->Pop();
@@ -6505,11 +6486,7 @@
   __ j(not_equal, &true_result);
   __ fldz();
   __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
-  __ fucompp();
-  __ push(eax);
-  __ fnstsw_ax();
-  __ sahf();
-  __ pop(eax);
+  __ FCmp();
   __ j(zero, &false_result);
   // Fall through to |true_result|.
 
@@ -6523,6 +6500,116 @@
 }
 
 
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Register left,
+    Register right) {
+  if (!ArgsInRegistersSupported()) {
+    // Pass arguments on the stack.
+    __ push(left);
+    __ push(right);
+  } else {
+    // The calling convention with registers is left in edx and right in eax.
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+    if (!(left.is(edx) && right.is(eax))) {
+      if (left.is(eax) && right.is(edx)) {
+        if (IsOperationCommutative()) {
+          SetArgsReversed();
+        } else {
+          __ xchg(left, right);
+        }
+      } else if (left.is(edx)) {
+        __ mov(eax, right);
+      } else if (left.is(eax)) {
+        if (IsOperationCommutative()) {
+          __ mov(edx, right);
+          SetArgsReversed();
+        } else {
+          __ mov(edx, left);
+          __ mov(eax, right);
+        }
+      } else if (right.is(edx)) {
+        if (IsOperationCommutative()) {
+          __ mov(eax, left);
+          SetArgsReversed();
+        } else {
+          __ mov(eax, right);
+          __ mov(edx, left);
+        }
+      } else if (right.is(eax)) {
+        __ mov(edx, left);
+      } else {
+        __ mov(edx, left);
+        __ mov(eax, right);
+      }
+    }
+
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Register left,
+    Smi* right) {
+  if (!ArgsInRegistersSupported()) {
+    // Pass arguments on the stack.
+    __ push(left);
+    __ push(Immediate(right));
+  } else {
+    // Adapt arguments to the calling convention left in edx and right in eax.
+    if (left.is(edx)) {
+      __ mov(eax, Immediate(right));
+    } else if (left.is(eax) && IsOperationCommutative()) {
+      __ mov(edx, Immediate(right));
+      SetArgsReversed();
+    } else {
+      __ mov(edx, left);
+      __ mov(eax, Immediate(right));
+    }
+
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Smi* left,
+    Register right) {
+  if (!ArgsInRegistersSupported()) {
+    // Pass arguments on the stack.
+    __ push(Immediate(left));
+    __ push(right);
+  } else {
+    // Adapt arguments to the calling convention left in edx and right in eax.
+    bool is_commutative = (op_ == (Token::ADD) || (op_ == Token::MUL));
+    if (right.is(eax)) {
+      __ mov(edx, Immediate(left));
+    } else if (right.is(edx) && is_commutative) {
+        __ mov(eax, Immediate(left));
+    } else {
+      __ mov(edx, Immediate(left));
+      __ mov(eax, right);
+    }
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
   // Perform fast-case smi code for the operation (eax <op> ebx) and
   // leave result in register eax.
@@ -6670,22 +6757,23 @@
 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   Label call_runtime;
 
-  if (flags_ == SMI_CODE_IN_STUB) {
-    // The fast case smi code wasn't inlined in the stub caller
-    // code. Generate it here to speed up common operations.
-    Label slow;
-    __ mov(ebx, Operand(esp, 1 * kPointerSize));  // get y
-    __ mov(eax, Operand(esp, 2 * kPointerSize));  // get x
-    GenerateSmiCode(masm, &slow);
-    __ ret(2 * kPointerSize);  // remove both operands
+  __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
 
+  // Generate fast case smi code if requested. This flag is set when the fast
+  // case smi code is not generated by the caller. Generating it here will speed
+  // up common operations.
+  if (HasSmiCodeInStub()) {
+    Label slow;
+    __ mov(ebx, Operand(esp, 1 * kPointerSize));
+    __ mov(eax, Operand(esp, 2 * kPointerSize));
+    GenerateSmiCode(masm, &slow);
+    GenerateReturn(masm);
     // Too bad. The fast case smi code didn't succeed.
     __ bind(&slow);
   }
 
-  // Setup registers.
-  __ mov(eax, Operand(esp, 1 * kPointerSize));  // get y
-  __ mov(edx, Operand(esp, 2 * kPointerSize));  // get x
+  // Make sure the arguments are in edx and eax.
+  GenerateLoadArguments(masm);
 
   // Floating point case.
   switch (op_) {
@@ -6719,19 +6807,20 @@
             __ test(eax, Immediate(kSmiTagMask));
             __ j(not_zero, &skip_allocation, not_taken);
             // Fall through!
-          case NO_OVERWRITE:
-            FloatingPointHelper::AllocateHeapNumber(masm,
-                                                    &call_runtime,
-                                                    ecx,
-                                                    edx,
-                                                    eax);
+          case NO_OVERWRITE: {
+            // Allocate a heap number for the result. Keep eax and edx intact
+            // for the possible runtime call.
+            __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
+            // Now eax can be overwritten losing one of the arguments as we are
+            // now done and will not need it any more.
+            __ mov(eax, ebx);
             __ bind(&skip_allocation);
             break;
+          }
           default: UNREACHABLE();
         }
         __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-        __ ret(2 * kPointerSize);
-
+        GenerateReturn(masm);
       } else {  // SSE2 not available, use FPU.
         FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
         // Allocate a heap number, if needed.
@@ -6747,11 +6836,12 @@
             __ j(not_zero, &skip_allocation, not_taken);
             // Fall through!
           case NO_OVERWRITE:
-            FloatingPointHelper::AllocateHeapNumber(masm,
-                                                    &call_runtime,
-                                                    ecx,
-                                                    edx,
-                                                    eax);
+            // Allocate a heap number for the result. Keep eax and edx intact
+            // for the possible runtime call.
+            __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime);
+            // Now eax can be overwritten losing one of the arguments as we are
+            // now done and will not need it any more.
+            __ mov(eax, ebx);
             __ bind(&skip_allocation);
             break;
           default: UNREACHABLE();
@@ -6766,7 +6856,7 @@
           default: UNREACHABLE();
         }
         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-        __ ret(2 * kPointerSize);
+        GenerateReturn(masm);
       }
     }
     case Token::MOD: {
@@ -6800,18 +6890,14 @@
         // Check if right operand is int32.
         __ fist_s(Operand(esp, 0 * kPointerSize));
         __ fild_s(Operand(esp, 0 * kPointerSize));
-        __ fucompp();
-        __ fnstsw_ax();
-        __ sahf();
+        __ FCmp();
         __ j(not_zero, &operand_conversion_failure);
         __ j(parity_even, &operand_conversion_failure);
 
         // Check if left operand is int32.
         __ fist_s(Operand(esp, 1 * kPointerSize));
         __ fild_s(Operand(esp, 1 * kPointerSize));
-        __ fucompp();
-        __ fnstsw_ax();
-        __ sahf();
+        __ FCmp();
         __ j(not_zero, &operand_conversion_failure);
         __ j(parity_even, &operand_conversion_failure);
       }
@@ -6858,8 +6944,7 @@
             __ j(not_zero, &skip_allocation, not_taken);
             // Fall through!
           case NO_OVERWRITE:
-            FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
-                                                    ecx, edx, eax);
+            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
             __ bind(&skip_allocation);
             break;
           default: UNREACHABLE();
@@ -6899,8 +6984,20 @@
   }
 
   // If all else fails, use the runtime system to get the correct
-  // result.
+  // result. If arguments was passed in registers now place them on the
+  // stack in the correct order.
   __ bind(&call_runtime);
+  if (HasArgumentsInRegisters()) {
+    __ pop(ecx);
+    if (HasArgumentsReversed()) {
+      __ push(eax);
+      __ push(edx);
+    } else {
+      __ push(edx);
+      __ push(eax);
+    }
+    __ push(ecx);
+  }
   switch (op_) {
     case Token::ADD: {
       // Test for string arguments before calling runtime.
@@ -6977,22 +7074,23 @@
 }
 
 
-void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
-                                             Label* need_gc,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Register result) {
-  // Allocate heap number in new space.
-  __ AllocateInNewSpace(HeapNumber::kSize,
-                        result,
-                        scratch1,
-                        scratch2,
-                        need_gc,
-                        TAG_OBJECT);
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+  // If arguments are not passed in registers read them from the stack.
+  if (!HasArgumentsInRegisters()) {
+    __ mov(eax, Operand(esp, 1 * kPointerSize));
+    __ mov(edx, Operand(esp, 2 * kPointerSize));
+  }
+}
 
-  // Set the map.
-  __ mov(FieldOperand(result, HeapObject::kMapOffset),
-         Immediate(Factory::heap_number_map()));
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+  // If arguments are not passed in registers remove them from the stack before
+  // returning.
+  if (!HasArgumentsInRegisters()) {
+    __ ret(2 * kPointerSize);  // Remove both operands
+  } else {
+    __ ret(0);
+  }
 }
 
 
@@ -7152,7 +7250,7 @@
   } else {
     __ mov(edx, Operand(eax));
     // edx: operand
-    FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx, eax);
+    __ AllocateHeapNumber(eax, ebx, ecx, &undo);
     // eax: allocated 'empty' number
     __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
     __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 142a5a1..a37bffe 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -294,6 +294,15 @@
                                Handle<Script> script,
                                bool is_eval);
 
+  // Printing of AST, etc. as requested by flags.
+  static void MakeCodePrologue(FunctionLiteral* fun);
+
+  // Allocate and install the code.
+  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
+                                       MacroAssembler* masm,
+                                       Code::Flags flags,
+                                       Handle<Script> script);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -303,6 +312,8 @@
                               bool is_toplevel,
                               Handle<Script> script);
 
+  static void RecordPositions(MacroAssembler* masm, int pos);
+
   // Accessors
   MacroAssembler* masm() { return masm_; }
 
@@ -385,7 +396,7 @@
   void LoadReference(Reference* ref);
   void UnloadReference(Reference* ref);
 
-  Operand ContextOperand(Register context, int index) const {
+  static Operand ContextOperand(Register context, int index) {
     return Operand(context, Context::SlotOffset(index));
   }
 
@@ -396,7 +407,7 @@
                                             JumpTarget* slow);
 
   // Expressions
-  Operand GlobalObject() const {
+  static Operand GlobalObject() {
     return ContextOperand(esi, Context::GLOBAL_INDEX);
   }
 
@@ -500,10 +511,11 @@
                                       const InlineRuntimeLUT& new_entry,
                                       InlineRuntimeLUT* old_entry);
 
+  static Handle<Code> ComputeLazyCompile(int argc);
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
-  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+  static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
 
   // Declare global variables and functions in the given array of
   // name/value pairs.
@@ -548,6 +560,14 @@
   inline void GenerateMathSin(ZoneList<Expression*>* args);
   inline void GenerateMathCos(ZoneList<Expression*>* args);
 
+  // Simple condition analysis.
+  enum ConditionAnalysis {
+    ALWAYS_TRUE,
+    ALWAYS_FALSE,
+    DONT_KNOW
+  };
+  ConditionAnalysis AnalyzeCondition(Expression* cond);
+
   // Methods used to indicate which source code is generated for. Source
   // positions are collected by the assembler and emitted with the relocation
   // information.
@@ -597,6 +617,8 @@
   friend class JumpTarget;
   friend class Reference;
   friend class Result;
+  friend class FastCodeGenerator;
+  friend class CodeGenSelector;
 
   friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
 
@@ -604,47 +626,74 @@
 };
 
 
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
+class ToBooleanStub: public CodeStub {
+ public:
+  ToBooleanStub() { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return ToBoolean; }
+  int MinorKey() { return 0; }
+};
+
+
+// Flag that indicates whether how to generate code for the stub.
 enum GenericBinaryFlags {
-  SMI_CODE_IN_STUB,
-  SMI_CODE_INLINED
+  NO_GENERIC_BINARY_FLAGS = 0,
+  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
 };
 
 
 class GenericBinaryOpStub: public CodeStub {
  public:
-  GenericBinaryOpStub(Token::Value op,
+  GenericBinaryOpStub(Token::Value operation,
                       OverwriteMode mode,
                       GenericBinaryFlags flags)
-      : op_(op), mode_(mode), flags_(flags) {
+      : op_(operation),
+        mode_(mode),
+        flags_(flags),
+        args_in_registers_(false),
+        args_reversed_(false) {
     use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+  // Generate code to call the stub with the supplied arguments. This will add
+  // code at the call site to prepare arguments either in registers or on the
+  // stack together with the actual call.
+  void GenerateCall(MacroAssembler* masm, Register left, Register right);
+  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
 
  private:
   Token::Value op_;
   OverwriteMode mode_;
   GenericBinaryFlags flags_;
+  bool args_in_registers_;  // Arguments passed in registers not on the stack.
+  bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
 
   const char* GetName();
 
 #ifdef DEBUG
   void Print() {
-    PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+    PrintF("GenericBinaryOpStub (op %s), "
+           "(mode %d, flags %d, registers %d, reversed %d)\n",
            Token::String(op_),
            static_cast<int>(mode_),
-           static_cast<int>(flags_));
+           static_cast<int>(flags_),
+           static_cast<int>(args_in_registers_),
+           static_cast<int>(args_reversed_));
   }
 #endif
 
-  // Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
+  // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 12> {};
-  class SSE3Bits: public BitField<bool, 14, 1> {};
+  class OpBits: public BitField<Token::Value, 2, 10> {};
+  class SSE3Bits: public BitField<bool, 12, 1> {};
+  class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
+  class ArgsReversedBits: public BitField<bool, 14, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
 
   Major MajorKey() { return GenericBinaryOp; }
@@ -653,9 +702,30 @@
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
            | FlagBits::encode(flags_)
-           | SSE3Bits::encode(use_sse3_);
+           | SSE3Bits::encode(use_sse3_)
+           | ArgsInRegistersBits::encode(args_in_registers_)
+           | ArgsReversedBits::encode(args_reversed_);
   }
+
   void Generate(MacroAssembler* masm);
+  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+  void GenerateLoadArguments(MacroAssembler* masm);
+  void GenerateReturn(MacroAssembler* masm);
+
+  bool ArgsInRegistersSupported() {
+    return ((op_ == Token::ADD) || (op_ == Token::SUB)
+             || (op_ == Token::MUL) || (op_ == Token::DIV))
+            && flags_ != NO_SMI_CODE_IN_STUB;
+  }
+  bool IsOperationCommutative() {
+    return (op_ == Token::ADD) || (op_ == Token::MUL);
+  }
+
+  void SetArgsInRegisters() { args_in_registers_ = true; }
+  void SetArgsReversed() { args_reversed_ = true; }
+  bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+  bool HasArgumentsInRegisters() { return args_in_registers_; }
+  bool HasArgumentsReversed() { return args_reversed_; }
 };
 
 
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 7e0dfd1..2d20117 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -63,7 +63,7 @@
 // having been patched with a call instruction.
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
-  return rinfo->IsCallInstruction();
+  return rinfo->IsPatchedReturnSequence();
 }
 
 
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 458844e..adedf34 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -124,6 +124,14 @@
 };
 
 
+static const char* conditional_move_mnem[] = {
+  /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
+  /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
+  /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
+  /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"
+};
+
+
 enum InstructionType {
   NO_INSTR,
   ZERO_OPERANDS_INSTR,
@@ -311,6 +319,7 @@
   int JumpConditional(byte* data, const char* comment);
   int JumpConditionalShort(byte* data, const char* comment);
   int SetCC(byte* data);
+  int CMov(byte* data);
   int FPUInstruction(byte* data);
   void AppendToBuffer(const char* format, ...);
 
@@ -615,6 +624,16 @@
 
 
 // Returns number of bytes used, including *data.
+int DisassemblerIA32::CMov(byte* data) {
+  assert(*data == 0x0F);
+  byte cond = *(data + 1) & 0x0F;
+  const char* mnem = conditional_move_mnem[cond];
+  int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
+  return 2 + op_size;  // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
 int DisassemblerIA32::FPUInstruction(byte* data) {
   byte b1 = *data;
   byte b2 = *(data + 1);
@@ -861,6 +880,8 @@
             data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
           } else if ((f0byte & 0xF0) == 0x90) {
             data += SetCC(data);
+          } else if ((f0byte & 0xF0) == 0x40) {
+            data += CMov(data);
           } else {
             data += 2;
             if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
@@ -956,6 +977,19 @@
           AppendToBuffer("mov_w ");
           data += PrintRightOperand(data);
           AppendToBuffer(",%s", NameOfCPURegister(regop));
+        } else if (*data == 0x0F) {
+          data++;
+          if (*data == 0x2F) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("comisd %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
+          } else {
+            UnimplementedInstruction();
+          }
         } else {
           UnimplementedInstruction();
         }
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
new file mode 100644
index 0000000..663d136
--- /dev/null
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -0,0 +1,545 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function.  On entry to the function the receiver
+// and arguments have been pushed on the stack left to right, with the
+// return address on top of them.  The actual argument count matches the
+// formal parameter count expected by the function.
+//
+// The live registers are:
+//   o edi: the JS function object being called (ie, ourselves)
+//   o esi: our context
+//   o ebp: our caller's frame pointer
+//   o esp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-ia32.h for its layout.
+void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+  function_ = fun;
+  SetFunctionPosition(fun);
+
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS Function.
+
+  { Comment cmnt(masm_, "[ Allocate locals");
+    int locals_count = fun->scope()->num_stack_slots();
+    for (int i = 0; i < locals_count; i++) {
+      __ push(Immediate(Factory::undefined_value()));
+    }
+  }
+
+  { Comment cmnt(masm_, "[ Stack check");
+    Label ok;
+    ExternalReference stack_guard_limit =
+        ExternalReference::address_of_stack_guard_limit();
+    __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+    __ j(above_equal, &ok, taken);
+    StackCheckStub stub;
+    __ CallStub(&stub);
+    __ bind(&ok);
+  }
+
+  { Comment cmnt(masm_, "[ Declarations");
+    VisitDeclarations(fun->scope()->declarations());
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
+  { Comment cmnt(masm_, "[ Body");
+    VisitStatements(fun->body());
+  }
+
+  { Comment cmnt(masm_, "[ return <undefined>;");
+    // Emit a 'return undefined' in case control fell off the end of the
+    // body.
+    __ mov(eax, Factory::undefined_value());
+    SetReturnPosition(fun);
+
+    if (FLAG_trace) {
+      __ push(eax);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
+    __ RecordJSReturn();
+    // Do not use the leave instruction here because it is too short to
+    // patch with the code required by the debugger.
+    __ mov(esp, ebp);
+    __ pop(ebp);
+    __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+  }
+}
+
+
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.
+  __ push(esi);  // The context is the first argument.
+  __ push(Immediate(pairs));
+  __ push(Immediate(Smi::FromInt(is_eval_ ? 1 : 0)));
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
+}
+
+
+void FastCodeGenerator::VisitBlock(Block* stmt) {
+  Comment cmnt(masm_, "[ Block");
+  SetStatementPosition(stmt);
+  VisitStatements(stmt->statements());
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  SetStatementPosition(stmt);
+  Visit(stmt->expression());
+}
+
+
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+  Comment cmnt(masm_, "[ ReturnStatement");
+  SetStatementPosition(stmt);
+  Expression* expr = stmt->expression();
+  Visit(expr);
+
+  // Complete the statement based on the location of the subexpression.
+  Location source = expr->location();
+  ASSERT(!source.is_nowhere());
+  if (source.is_temporary()) {
+    __ pop(eax);
+  } else {
+    ASSERT(source.is_constant());
+    ASSERT(expr->AsLiteral() != NULL);
+    __ mov(eax, expr->AsLiteral()->handle());
+  }
+  if (FLAG_trace) {
+    __ push(eax);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  __ RecordJSReturn();
+
+  // Do not use the leave instruction here because it is too short to
+  // patch with the code required by the debugger.
+  __ mov(esp, ebp);
+  __ pop(ebp);
+  __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+}
+
+
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+  if (HasStackOverflow()) return;
+
+  ASSERT(boilerplate->IsBoilerplate());
+
+  // Create a new closure.
+  __ push(esi);
+  __ push(Immediate(boilerplate));
+  __ CallRuntime(Runtime::kNewClosure, 2);
+
+  if (expr->location().is_temporary()) {
+    __ push(eax);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  Expression* rewrite = expr->var()->rewrite();
+  if (rewrite == NULL) {
+    Comment cmnt(masm_, "Global variable");
+    // Use inline caching. Variable name is passed in ecx and the global
+    // object on the stack.
+    __ push(CodeGenerator::GlobalObject());
+    __ mov(ecx, expr->name());
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+
+    // A test eax instruction following the call is used by the IC to
+    // indicate that the inobject property case was inlined.  Ensure there
+    // is no test eax instruction here.  Remember that the assembler may
+    // choose to do peephole optimization (eg, push/pop elimination).
+    if (expr->location().is_temporary()) {
+      // Replace the global object with the result.
+      __ mov(Operand(esp, 0), eax);
+    } else {
+      ASSERT(expr->location().is_nowhere());
+      __ add(Operand(esp), Immediate(kPointerSize));
+    }
+
+  } else {
+    Comment cmnt(masm_, "Stack slot");
+    Slot* slot = rewrite->AsSlot();
+    ASSERT(slot != NULL);
+    if (expr->location().is_temporary()) {
+      __ push(Operand(ebp, SlotOffset(slot)));
+    } else {
+      ASSERT(expr->location().is_nowhere());
+    }
+  }
+}
+
+
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Comment cmnt(masm_, "[ RegExp Literal");
+  Label done;
+  // Registers will be used as follows:
+  // edi = JS function.
+  // ebx = literals array.
+  // eax = regexp literal.
+  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ebx, FieldOperand(edi, JSFunction::kLiteralsOffset));
+  int literal_offset =
+    FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ mov(eax, FieldOperand(ebx, literal_offset));
+  __ cmp(eax, Factory::undefined_value());
+  __ j(not_equal, &done);
+  // Create regexp literal using runtime function
+  // Result will be in eax.
+  __ push(ebx);
+  __ push(Immediate(Smi::FromInt(expr->literal_index())));
+  __ push(Immediate(expr->pattern()));
+  __ push(Immediate(expr->flags()));
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  // Label done:
+  __ bind(&done);
+  if (expr->location().is_temporary()) {
+    __ push(eax);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+  Label make_clone;
+
+  // Fetch the function's literals array.
+  __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ mov(ebx, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+  // Check if the literal's boilerplate has been instantiated.
+  int offset =
+      FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+  __ mov(eax, FieldOperand(ebx, offset));
+  __ cmp(eax, Factory::undefined_value());
+  __ j(not_equal, &make_clone);
+
+  // Instantiate the boilerplate.
+  __ push(ebx);
+  __ push(Immediate(Smi::FromInt(expr->literal_index())));
+  __ push(Immediate(expr->literals()));
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+  __ bind(&make_clone);
+  // Clone the boilerplate.
+  __ push(eax);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  } else {
+    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  ZoneList<Expression*>* subexprs = expr->values();
+  for (int i = 0, len = subexprs->length(); i < len; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (subexpr->AsLiteral() != NULL ||
+        CompileTimeValue::IsCompileTimeValue(subexpr)) {
+      continue;
+    }
+
+    if (!result_saved) {
+      __ push(eax);
+      result_saved = true;
+    }
+    Visit(subexpr);
+    ASSERT(subexpr->location().is_temporary());
+
+    // Store the subexpression value in the array's elements.
+    __ pop(eax);  // Subexpression value.
+    __ mov(ebx, Operand(esp, 0));  // Copy of array literal.
+    __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ mov(FieldOperand(ebx, offset), eax);
+
+    // Update the write barrier for the array store.
+    __ RecordWrite(ebx, offset, eax, ecx);
+  }
+
+  Location destination = expr->location();
+  if (destination.is_nowhere() && result_saved) {
+    __ add(Operand(esp), Immediate(kPointerSize));
+  } else if (destination.is_temporary() && !result_saved) {
+    __ push(eax);
+  }
+}
+
+
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+  Comment cmnt(masm_, "[ Assignment");
+  ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
+  Expression* rhs = expr->value();
+  Visit(rhs);
+
+  // Left-hand side can only be a global or a (parameter or local) slot.
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL);
+  ASSERT(var->is_global() || var->slot() != NULL);
+
+  // Complete the assignment based on the location of the right-hand-side
+  // value and the desired location of the assignment value.
+  Location destination = expr->location();
+  Location source = rhs->location();
+  ASSERT(!destination.is_constant());
+  ASSERT(!source.is_nowhere());
+
+  if (var->is_global()) {
+    // Assignment to a global variable, use inline caching.  Right-hand-side
+    // value is passed in eax, variable name in ecx, and the global object
+    // on the stack.
+    if (source.is_temporary()) {
+      __ pop(eax);
+    } else {
+      ASSERT(source.is_constant());
+      ASSERT(rhs->AsLiteral() != NULL);
+      __ mov(eax, rhs->AsLiteral()->handle());
+    }
+    __ mov(ecx, var->name());
+    __ push(CodeGenerator::GlobalObject());
+    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+    __ call(ic, RelocInfo::CODE_TARGET);
+    // Overwrite the global object on the stack with the result if needed.
+    if (destination.is_temporary()) {
+      __ mov(Operand(esp, 0), eax);
+    } else {
+      ASSERT(destination.is_nowhere());
+      __ add(Operand(esp), Immediate(kPointerSize));
+    }
+
+  } else {
+    // Local or parameter assignment.
+    if (source.is_temporary()) {
+      if (destination.is_temporary()) {
+        // Case 'temp1 <- (var = temp0)'.  Preserve right-hand-side
+        // temporary on the stack.
+        __ mov(eax, Operand(esp, 0));
+        __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+      } else {
+        ASSERT(destination.is_nowhere());
+        // Case 'var = temp'.  Discard right-hand-side temporary.
+        __ pop(Operand(ebp, SlotOffset(var->slot())));
+      }
+    } else {
+      ASSERT(source.is_constant());
+      ASSERT(rhs->AsLiteral() != NULL);
+      // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
+      // discarded result.  Always perform the assignment.
+      __ mov(eax, rhs->AsLiteral()->handle());
+      __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+      if (destination.is_temporary()) {
+        // Case 'temp <- (var = constant)'.  Save result.
+        __ push(eax);
+      }
+    }
+  }
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+  Expression* fun = expr->expression();
+  ZoneList<Expression*>* args = expr->arguments();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL && !var->is_this() && var->is_global());
+  ASSERT(!var->is_possibly_eval());
+
+  __ push(Immediate(var->name()));
+  // Push global object (receiver).
+  __ push(CodeGenerator::GlobalObject());
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(!args->at(i)->location().is_nowhere());
+    if (args->at(i)->location().is_constant()) {
+      ASSERT(args->at(i)->AsLiteral() != NULL);
+      __ push(Immediate(args->at(i)->AsLiteral()->handle()));
+    }
+  }
+  // Record source position for debugger
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+                                                         NOT_IN_LOOP);
+  __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+  // Restore context register.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  // Discard the function left on TOS.
+  if (expr->location().is_temporary()) {
+    __ mov(Operand(esp, 0), eax);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+    __ add(Operand(esp), Immediate(kPointerSize));
+  }
+}
+
+
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+  Runtime::Function* function = expr->function();
+
+  ASSERT(function != NULL);
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(!args->at(i)->location().is_nowhere());
+    if (args->at(i)->location().is_constant()) {
+      ASSERT(args->at(i)->AsLiteral() != NULL);
+      __ push(Immediate(args->at(i)->AsLiteral()->handle()));
+    } else {
+      ASSERT(args->at(i)->location().is_temporary());
+      // If location is temporary, it is already on the stack,
+      // so nothing to do here.
+    }
+  }
+
+  __ CallRuntime(function, arg_count);
+  if (expr->location().is_temporary()) {
+    __ push(eax);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+  // Compile a short-circuited boolean or operation in a non-test
+  // context.
+  ASSERT(expr->op() == Token::OR);
+  // Compile (e0 || e1) as if it were
+  // (let (temp = e0) temp ? temp : e1).
+
+  Label eval_right, done;
+  Location destination = expr->location();
+  ASSERT(!destination.is_constant());
+
+  Expression* left = expr->left();
+  Location left_source = left->location();
+  ASSERT(!left_source.is_nowhere());
+
+  Expression* right = expr->right();
+  Location right_source = right->location();
+  ASSERT(!right_source.is_nowhere());
+
+  Visit(left);
+  // Use the shared ToBoolean stub to find the boolean value of the
+  // left-hand subexpression.  Load the value into eax to perform some
+  // inlined checks assumed by the stub.
+  if (left_source.is_temporary()) {
+    if (destination.is_temporary()) {
+      // Copy the left-hand value into eax because we may need it as the
+      // final result.
+      __ mov(eax, Operand(esp, 0));
+    } else {
+      // Pop the left-hand value into eax because we will not need it as the
+      // final result.
+      __ pop(eax);
+    }
+  } else {
+    // Load the left-hand value into eax.  Put it on the stack if we may
+    // need it.
+    ASSERT(left->AsLiteral() != NULL);
+    __ mov(eax, left->AsLiteral()->handle());
+    if (destination.is_temporary()) __ push(eax);
+  }
+  // The left-hand value is in eax.  It is also on the stack iff the
+  // destination location is temporary.
+
+  // Perform fast checks assumed by the stub.
+  __ cmp(eax, Factory::undefined_value());  // The undefined value is false.
+  __ j(equal, &eval_right);
+  __ cmp(eax, Factory::true_value());  // True is true.
+  __ j(equal, &done);
+  __ cmp(eax, Factory::false_value());  // False is false.
+  __ j(equal, &eval_right);
+  ASSERT(kSmiTag == 0);
+  __ test(eax, Operand(eax));  // The smi zero is false.
+  __ j(zero, &eval_right);
+  __ test(eax, Immediate(kSmiTagMask));  // All other smis are true.
+  __ j(zero, &done);
+
+  // Call the stub for all other cases.
+  __ push(eax);
+  ToBooleanStub stub;
+  __ CallStub(&stub);
+  __ test(eax, Operand(eax));  // The stub returns nonzero for true.
+  __ j(not_zero, &done);
+
+  __ bind(&eval_right);
+  // Discard the left-hand value if present on the stack.
+  if (destination.is_temporary()) {
+    __ add(Operand(esp), Immediate(kPointerSize));
+  }
+  Visit(right);
+
+  // Save or discard the right-hand value as needed.
+  if (destination.is_temporary() && right_source.is_constant()) {
+    ASSERT(right->AsLiteral() != NULL);
+    __ push(Immediate(right->AsLiteral()->handle()));
+  } else if (destination.is_nowhere() && right_source.is_temporary()) {
+    __ add(Operand(esp), Immediate(kPointerSize));
+  }
+
+  __ bind(&done);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index f7369a8..3aa3c34 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -298,11 +298,10 @@
   __ shl(eax, kSmiTagSize);
   __ ret(0);
 
-
   // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
-  KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
 
   __ bind(&check_string);
   // The key is not a smi.
@@ -343,6 +342,166 @@
 }
 
 
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+                                        ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label slow, failed_allocation;
+
+  // Load name and receiver.
+  __ mov(eax, Operand(esp, kPointerSize));
+  __ mov(ecx, Operand(esp, 2 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ test(ecx, Immediate(kSmiTagMask));
+  __ j(zero, &slow, not_taken);
+
+  // Check that the key is a smi.
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow, not_taken);
+
+  // Get the map of the receiver.
+  __ mov(edx, FieldOperand(ecx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.
+  __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow, not_taken);
+
+  // Get the instance type from the map of the receiver.
+  __ movzx_b(edx, FieldOperand(edx, Map::kInstanceTypeOffset));
+  // Check that the object is a JS object.
+  __ cmp(edx, JS_OBJECT_TYPE);
+  __ j(not_equal, &slow, not_taken);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // eax: index (as a smi)
+  // ecx: JSObject
+  __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
+  Handle<Map> map(Heap::MapForExternalArrayType(array_type));
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(map));
+  __ j(not_equal, &slow, not_taken);
+
+  // Check that the index is in range.
+  __ sar(eax, kSmiTagSize);  // Untag the index.
+  __ cmp(eax, FieldOperand(ecx, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ j(above_equal, &slow);
+
+  // eax: untagged index
+  // ecx: elements array
+  __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+  // ecx: base pointer of external storage
+  switch (array_type) {
+    case kExternalByteArray:
+      __ movsx_b(eax, Operand(ecx, eax, times_1, 0));
+      break;
+    case kExternalUnsignedByteArray:
+      __ mov_b(eax, Operand(ecx, eax, times_1, 0));
+      break;
+    case kExternalShortArray:
+      __ movsx_w(eax, Operand(ecx, eax, times_2, 0));
+      break;
+    case kExternalUnsignedShortArray:
+      __ mov_w(eax, Operand(ecx, eax, times_2, 0));
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ mov(eax, Operand(ecx, eax, times_4, 0));
+      break;
+    case kExternalFloatArray:
+      __ fld_s(Operand(ecx, eax, times_4, 0));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // eax: value
+  // For floating-point array type:
+  // FP(0): value
+
+  if (array_type == kExternalIntArray ||
+      array_type == kExternalUnsignedIntArray) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    if (array_type == kExternalIntArray) {
+      // See Smi::IsValid for why this works.
+      __ mov(ebx, eax);
+      __ add(Operand(ebx), Immediate(0x40000000));
+      __ cmp(ebx, 0x80000000);
+      __ j(above_equal, &box_int);
+    } else {
+      ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+      // The test is different for unsigned int values. Since we need
+      // the Smi-encoded result to be treated as unsigned, we can't
+      // handle either of the top two bits being set in the value.
+      __ test(eax, Immediate(0xC0000000));
+      __ j(not_zero, &box_int);
+    }
+
+    __ shl(eax, kSmiTagSize);
+    __ ret(0);
+
+    __ bind(&box_int);
+
+    // Allocate a HeapNumber for the int and perform int-to-double
+    // conversion.
+    if (array_type == kExternalIntArray) {
+      __ push(eax);
+      __ fild_s(Operand(esp, 0));
+      __ pop(eax);
+    } else {
+      ASSERT(array_type == kExternalUnsignedIntArray);
+      // Need to zero-extend the value.
+      // There's no fild variant for unsigned values, so zero-extend
+      // to a 64-bit int manually.
+      __ push(Immediate(0));
+      __ push(eax);
+      __ fild_d(Operand(esp, 0));
+      __ pop(eax);
+      __ pop(eax);
+    }
+    // FP(0): value
+    __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+    // Set the value.
+    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+    __ ret(0);
+  } else if (array_type == kExternalFloatArray) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    __ AllocateHeapNumber(eax, ebx, ecx, &failed_allocation);
+    // Set the value.
+    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+    __ ret(0);
+  } else {
+    __ shl(eax, kSmiTagSize);
+    __ ret(0);
+  }
+
+  // If we fail allocation of the HeapNumber, we still have a value on
+  // top of the FPU stack. Remove it.
+  __ bind(&failed_allocation);
+  __ ffree();
+  __ fincstp();
+  // Fall through to slow case.
+
+  // Slow case: Load name and receiver from stack and jump to runtime.
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+}
+
+
 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- eax    : value
@@ -396,15 +555,9 @@
   // ebx: index (as a smi)
   __ j(below, &fast, taken);
 
-  // Slow case: Push extra copies of the arguments (3).
+  // Slow case: call runtime.
   __ bind(&slow);
-  __ pop(ecx);
-  __ push(Operand(esp, 1 * kPointerSize));
-  __ push(Operand(esp, 1 * kPointerSize));
-  __ push(eax);
-  __ push(ecx);
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+  Generate(masm, ExternalReference(Runtime::kSetProperty));
 
   // Check whether the elements is a pixel array.
   // eax: value
@@ -424,14 +577,11 @@
   __ mov(edx, eax);  // Save the value.
   __ sar(eax, kSmiTagSize);  // Untag the value.
   {  // Clamp the value to [0..255].
-    Label done, is_negative;
+    Label done;
     __ test(eax, Immediate(0xFFFFFF00));
     __ j(zero, &done);
-    __ j(negative, &is_negative);
-    __ mov(eax, Immediate(255));
-    __ jmp(&done);
-    __ bind(&is_negative);
-    __ xor_(eax, Operand(eax));  // Clear eax.
+    __ setcc(negative, eax);  // 1 if negative, 0 if positive.
+    __ dec_b(eax);  // 0 if negative, 255 if positive.
     __ bind(&done);
   }
   __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
@@ -458,7 +608,6 @@
   __ sub(Operand(ebx), Immediate(1 << kSmiTagSize));  // decrement ebx again
   __ jmp(&fast);
 
-
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode; if it is the
   // length is always a smi.
@@ -490,6 +639,201 @@
 }
 
 
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+                                         ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- eax    : value
+  //  -- esp[0] : return address
+  //  -- esp[4] : key
+  //  -- esp[8] : receiver
+  // -----------------------------------
+  Label slow, check_heap_number;
+
+  // Get the receiver from the stack.
+  __ mov(edx, Operand(esp, 2 * kPointerSize));
+  // Check that the object isn't a smi.
+  __ test(edx, Immediate(kSmiTagMask));
+  __ j(zero, &slow);
+  // Get the map from the receiver.
+  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ movzx_b(ebx, FieldOperand(ecx, Map::kBitFieldOffset));
+  __ test(ebx, Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow);
+  // Get the key from the stack.
+  __ mov(ebx, Operand(esp, 1 * kPointerSize));  // 1 ~ return address
+  // Check that the key is a smi.
+  __ test(ebx, Immediate(kSmiTagMask));
+  __ j(not_zero, &slow);
+  // Get the instance type from the map of the receiver.
+  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+  // Check that the object is a JS object.
+  __ cmp(ecx, JS_OBJECT_TYPE);
+  __ j(not_equal, &slow);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // eax: value
+  // edx: JSObject
+  // ebx: index (as a smi)
+  __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
+  Handle<Map> map(Heap::MapForExternalArrayType(array_type));
+  __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+         Immediate(map));
+  __ j(not_equal, &slow);
+
+  // Check that the index is in range.
+  __ sar(ebx, kSmiTagSize);  // Untag the index.
+  __ cmp(ebx, FieldOperand(ecx, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ j(above_equal, &slow);
+
+  // Handle both smis and HeapNumbers in the fast path. Go to the
+  // runtime for all other kinds of values.
+  // eax: value
+  // ecx: elements array
+  // ebx: untagged index
+  __ test(eax, Immediate(kSmiTagMask));
+  __ j(not_equal, &check_heap_number);
+  // smi case
+  __ mov(edx, eax);  // Save the value.
+  __ sar(eax, kSmiTagSize);  // Untag the value.
+  __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+  // ecx: base pointer of external storage
+  switch (array_type) {
+    case kExternalByteArray:
+    case kExternalUnsignedByteArray:
+      __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+      break;
+    case kExternalShortArray:
+    case kExternalUnsignedShortArray:
+      __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ mov(Operand(ecx, ebx, times_4, 0), eax);
+      break;
+    case kExternalFloatArray:
+      // Need to perform int-to-float conversion.
+      __ push(eax);
+      __ fild_s(Operand(esp, 0));
+      __ pop(eax);
+      __ fstp_s(Operand(ecx, ebx, times_4, 0));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  __ mov(eax, edx);  // Return the original value.
+  __ ret(0);
+
+  __ bind(&check_heap_number);
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+         Immediate(Factory::heap_number_map()));
+  __ j(not_equal, &slow);
+
+  // The WebGL specification leaves the behavior of storing NaN and
+  // +/-Infinity into integer arrays basically undefined. For more
+  // reproducible behavior, convert these to zero.
+  __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+  __ mov(edx, eax);  // Save the value.
+  __ mov(ecx, FieldOperand(ecx, ExternalArray::kExternalPointerOffset));
+  // ebx: untagged index
+  // ecx: base pointer of external storage
+  // top of FPU stack: value
+  if (array_type == kExternalFloatArray) {
+    __ fstp_s(Operand(ecx, ebx, times_4, 0));
+    __ mov(eax, edx);  // Return the original value.
+    __ ret(0);
+  } else {
+    // Need to perform float-to-int conversion.
+    // Test the top of the FP stack for NaN.
+    Label is_nan;
+    __ fucomi(0);
+    __ j(parity_even, &is_nan);
+
+    if (array_type != kExternalUnsignedIntArray) {
+      __ push(eax);  // Make room on stack
+      __ fistp_s(Operand(esp, 0));
+      __ pop(eax);
+    } else {
+      // fistp stores values as signed integers.
+      // To represent the entire range, we need to store as a 64-bit
+      // int and discard the high 32 bits.
+      __ push(eax);  // Make room on stack
+      __ push(eax);  // Make room on stack
+      __ fistp_d(Operand(esp, 0));
+      __ pop(eax);
+      __ mov(Operand(esp, 0), eax);
+      __ pop(eax);
+    }
+    // eax: untagged integer value
+    switch (array_type) {
+      case kExternalByteArray:
+      case kExternalUnsignedByteArray:
+        __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+        break;
+      case kExternalShortArray:
+      case kExternalUnsignedShortArray:
+        __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+        break;
+      case kExternalIntArray:
+      case kExternalUnsignedIntArray: {
+        // We also need to explicitly check for +/-Infinity. These are
+        // converted to MIN_INT, but we need to be careful not to
+        // confuse with legal uses of MIN_INT.
+        Label not_infinity;
+        // This test would apparently detect both NaN and Infinity,
+        // but we've already checked for NaN using the FPU hardware
+        // above.
+        __ mov_w(edi, FieldOperand(edx, HeapNumber::kValueOffset + 6));
+        __ and_(edi, 0x7FF0);
+        __ cmp(edi, 0x7FF0);
+        __ j(not_equal, &not_infinity);
+        __ mov(eax, 0);
+        __ bind(&not_infinity);
+        __ mov(Operand(ecx, ebx, times_4, 0), eax);
+        break;
+      }
+      default:
+        UNREACHABLE();
+        break;
+    }
+    __ mov(eax, edx);  // Return the original value.
+    __ ret(0);
+
+    __ bind(&is_nan);
+    __ ffree();
+    __ fincstp();
+    switch (array_type) {
+      case kExternalByteArray:
+      case kExternalUnsignedByteArray:
+        __ mov_b(Operand(ecx, ebx, times_1, 0), 0);
+        break;
+      case kExternalShortArray:
+      case kExternalUnsignedShortArray:
+        __ mov(eax, 0);
+        __ mov_w(Operand(ecx, ebx, times_2, 0), eax);
+        break;
+      case kExternalIntArray:
+      case kExternalUnsignedIntArray:
+        __ mov(Operand(ecx, ebx, times_4, 0), Immediate(0));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    __ mov(eax, edx);  // Return the original value.
+    __ ret(0);
+  }
+
+  // Slow case: call runtime.
+  __ bind(&slow);
+  Generate(masm, ExternalReference(Runtime::kSetProperty));
+}
+
+
 // Defined in ic.cc.
 Object* CallIC_Miss(Arguments args);
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index e83bb92..08c4c0c 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -319,11 +319,17 @@
 
 
 void MacroAssembler::FCmp() {
-  fucompp();
-  push(eax);
-  fnstsw_ax();
-  sahf();
-  pop(eax);
+  if (CpuFeatures::IsSupported(CpuFeatures::CMOV)) {
+    fucomip();
+    ffree(0);
+    fincstp();
+  } else {
+    fucompp();
+    push(eax);
+    fnstsw_ax();
+    sahf();
+    pop(eax);
+  }
 }
 
 
@@ -767,6 +773,24 @@
 }
 
 
+void MacroAssembler::AllocateHeapNumber(Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Label* gc_required) {
+  // Allocate heap number in new space.
+  AllocateInNewSpace(HeapNumber::kSize,
+                     result,
+                     scratch1,
+                     scratch2,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map.
+  mov(FieldOperand(result, HeapObject::kMapOffset),
+      Immediate(Factory::heap_number_map()));
+}
+
+
 void MacroAssembler::NegativeZeroTest(CodeGenerator* cgen,
                                       Register result,
                                       Register op,
@@ -1049,7 +1073,6 @@
   if (!resolved) {
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(false);
     Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
     unresolved_.Add(entry);
@@ -1068,7 +1091,6 @@
   if (!resolved) {
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(true);
     Unresolved entry = { pc_offset() - sizeof(int32_t), flags, name };
     unresolved_.Add(entry);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index ed72c96..a0a2428 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -206,6 +206,15 @@
   // un-done.
   void UndoAllocationInNewSpace(Register object);
 
+  // Allocate a heap number in new space with undefined value. The
+  // register scratch2 can be passed as no_reg; the others must be
+  // valid registers. Returns tagged pointer in result register, or
+  // jumps to gc_required if new space is full.
+  void AllocateHeapNumber(Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Label* gc_required);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 1b8232f..980cec8 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -161,15 +161,16 @@
   // on the stack.
   int start = Min(begin, stack_pointer_ + 1);
 
-  // If positive we have to adjust the stack pointer.
-  int delta = end - stack_pointer_;
-  if (delta > 0) {
-    stack_pointer_ = end;
-    __ sub(Operand(esp), Immediate(delta * kPointerSize));
-  }
-
+  // Emit normal 'push' instructions for elements above stack pointer
+  // and use mov instructions if we are below stack pointer.
   for (int i = start; i <= end; i++) {
-    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+    if (!elements_[i].is_synced()) {
+      if (i <= stack_pointer_) {
+        SyncElementBelowStackPointer(i);
+      } else {
+        SyncElementByPushing(i);
+      }
+    }
   }
 }
 
@@ -454,14 +455,16 @@
   Comment cmnt(masm(), "[ Enter JS frame");
 
 #ifdef DEBUG
-  // Verify that edi contains a JS function.  The following code
-  // relies on eax being available for use.
-  __ test(edi, Immediate(kSmiTagMask));
-  __ Check(not_zero,
-           "VirtualFrame::Enter - edi is not a function (smi check).");
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
-  __ Check(equal,
-           "VirtualFrame::Enter - edi is not a function (map check).");
+  if (FLAG_debug_code) {
+    // Verify that edi contains a JS function.  The following code
+    // relies on eax being available for use.
+    __ test(edi, Immediate(kSmiTagMask));
+    __ Check(not_zero,
+             "VirtualFrame::Enter - edi is not a function (smi check).");
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
+    __ Check(equal,
+             "VirtualFrame::Enter - edi is not a function (map check).");
+  }
 #endif
 
   EmitPush(ebp);
diff --git a/src/ic.cc b/src/ic.cc
index 264b99c..c12dba7 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -265,6 +265,55 @@
 }
 
 
+Code* KeyedLoadIC::external_array_stub(JSObject::ElementsKind elements_kind) {
+  switch (elements_kind) {
+    case JSObject::EXTERNAL_BYTE_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalByteArray);
+    case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedByteArray);
+    case JSObject::EXTERNAL_SHORT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalShortArray);
+    case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      return Builtins::builtin(
+          Builtins::KeyedLoadIC_ExternalUnsignedShortArray);
+    case JSObject::EXTERNAL_INT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalIntArray);
+    case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalUnsignedIntArray);
+    case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedLoadIC_ExternalFloatArray);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+Code* KeyedStoreIC::external_array_stub(JSObject::ElementsKind elements_kind) {
+  switch (elements_kind) {
+    case JSObject::EXTERNAL_BYTE_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedStoreIC_ExternalByteArray);
+    case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      return Builtins::builtin(
+          Builtins::KeyedStoreIC_ExternalUnsignedByteArray);
+    case JSObject::EXTERNAL_SHORT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedStoreIC_ExternalShortArray);
+    case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      return Builtins::builtin(
+          Builtins::KeyedStoreIC_ExternalUnsignedShortArray);
+    case JSObject::EXTERNAL_INT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedStoreIC_ExternalIntArray);
+    case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedStoreIC_ExternalUnsignedIntArray);
+    case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+      return Builtins::builtin(Builtins::KeyedStoreIC_ExternalFloatArray);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
 static bool HasInterceptorGetter(JSObject* object) {
   return !object->GetNamedInterceptor()->getter()->IsUndefined();
 }
@@ -823,7 +872,14 @@
   bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
 
   if (use_ic) {
-    set_target(generic_stub());
+    Code* stub = generic_stub();
+    if (object->IsJSObject()) {
+      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+      if (receiver->HasExternalArrayElements()) {
+        stub = external_array_stub(receiver->GetElementsKind());
+      }
+    }
+    set_target(stub);
     // For JSObjects that are not value wrappers and that do not have
     // indexed interceptors, we initialize the inlined fast case (if
     // present) by patching the inlined map check.
@@ -1110,7 +1166,16 @@
   bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
   ASSERT(!(use_ic && object->IsJSGlobalProxy()));
 
-  if (use_ic) set_target(generic_stub());
+  if (use_ic) {
+    Code* stub = generic_stub();
+    if (object->IsJSObject()) {
+      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+      if (receiver->HasExternalArrayElements()) {
+        stub = external_array_stub(receiver->GetElementsKind());
+      }
+    }
+    set_target(stub);
+  }
 
   // Set the property.
   return Runtime::SetObjectProperty(object, key, value, NONE);
diff --git a/src/ic.h b/src/ic.h
index fcf1ec0..8709088 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -269,6 +269,13 @@
   static void GeneratePreMonomorphic(MacroAssembler* masm);
   static void GenerateGeneric(MacroAssembler* masm);
 
+  // Generators for external array types. See objects.h.
+  // These are similar to the generic IC; they optimize the case of
+  // operating upon external array types but fall back to the runtime
+  // for all other types.
+  static void GenerateExternalArray(MacroAssembler* masm,
+                                    ExternalArrayType array_type);
+
   // Clear the use of the inlined version.
   static void ClearInlinedVersion(Address address);
 
@@ -294,6 +301,7 @@
   static Code* pre_monomorphic_stub() {
     return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic);
   }
+  static Code* external_array_stub(JSObject::ElementsKind elements_kind);
 
   static void Clear(Address address, Code* target);
 
@@ -358,6 +366,13 @@
   static void GenerateGeneric(MacroAssembler* masm);
   static void GenerateExtendStorage(MacroAssembler* masm);
 
+  // Generators for external array types. See objects.h.
+  // These are similar to the generic IC; they optimize the case of
+  // operating upon external array types but fall back to the runtime
+  // for all other types.
+  static void GenerateExternalArray(MacroAssembler* masm,
+                                    ExternalArrayType array_type);
+
   // Clear the inlined version so the IC is always hit.
   static void ClearInlinedVersion(Address address);
 
@@ -384,6 +399,7 @@
   static Code* generic_stub() {
     return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
   }
+  static Code* external_array_stub(JSObject::ElementsKind elements_kind);
 
   static void Clear(Address address, Code* target);
 
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index e518662..c77f32d 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -45,13 +45,10 @@
 
 #ifdef V8_NATIVE_REGEXP
 #if V8_TARGET_ARCH_IA32
-#include "ia32/macro-assembler-ia32.h"
 #include "ia32/regexp-macro-assembler-ia32.h"
 #elif V8_TARGET_ARCH_X64
-#include "x64/macro-assembler-x64.h"
 #include "x64/regexp-macro-assembler-x64.h"
 #elif V8_TARGET_ARCH_ARM
-#include "arm/macro-assembler-arm.h"
 #include "arm/regexp-macro-assembler-arm.h"
 #else
 #error Unsupported target architecture.
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 3bc30b6..84f8d98 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -28,6 +28,8 @@
 #ifndef V8_JSREGEXP_H_
 #define V8_JSREGEXP_H_
 
+#include "macro-assembler.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/jump-target.h b/src/jump-target.h
index 0c42f1b..0933ee7 100644
--- a/src/jump-target.h
+++ b/src/jump-target.h
@@ -28,6 +28,8 @@
 #ifndef V8_JUMP_TARGET_H_
 #define V8_JUMP_TARGET_H_
 
+#include "macro-assembler.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/location.h b/src/location.h
new file mode 100644
index 0000000..9702ce4
--- /dev/null
+++ b/src/location.h
@@ -0,0 +1,57 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LOCATION_H_
+#define V8_LOCATION_H_
+
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+class Location BASE_EMBEDDED {
+ public:
+  static Location Temporary() { return Location(TEMP); }
+  static Location Nowhere() { return Location(NOWHERE); }
+  static Location Constant() { return Location(CONSTANT); }
+
+  bool is_temporary() { return type_ == TEMP; }
+  bool is_nowhere() { return type_ == NOWHERE; }
+  bool is_constant() { return type_ == CONSTANT; }
+
+ private:
+  enum Type { TEMP, NOWHERE, CONSTANT };
+
+  explicit Location(Type type) : type_(type) {}
+
+  Type type_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_LOCATION_H_
diff --git a/src/log.cc b/src/log.cc
index d225c3b..d1d9a31 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -934,6 +934,21 @@
 }
 
 
+void Logger::HeapSampleJSProducerEvent(const char* constructor,
+                                       Address* stack) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
+  LogMessageBuilder msg;
+  msg.Append("heap-js-prod-item,%s", constructor);
+  while (*stack != NULL) {
+    msg.Append(",0x%" V8PRIxPTR, *stack++);
+  }
+  msg.Append("\n");
+  msg.WriteToLogFile();
+#endif
+}
+
+
 void Logger::DebugTag(const char* call_site_tag) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log) return;
@@ -1070,37 +1085,33 @@
 }
 
 
-void Logger::LogCompiledFunctions() {
-  HandleScope scope;
-  Handle<SharedFunctionInfo>* sfis = NULL;
+static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis) {
+  AssertNoAllocation no_alloc;
   int compiled_funcs_count = 0;
-
-  {
-    AssertNoAllocation no_alloc;
-
-    HeapIterator iterator;
-    while (iterator.has_next()) {
-      HeapObject* obj = iterator.next();
-      ASSERT(obj != NULL);
-      if (obj->IsSharedFunctionInfo()
-          && SharedFunctionInfo::cast(obj)->is_compiled()) {
-        ++compiled_funcs_count;
-      }
-    }
-
-    sfis = NewArray< Handle<SharedFunctionInfo> >(compiled_funcs_count);
-    iterator.reset();
-
-    int i = 0;
-    while (iterator.has_next()) {
-      HeapObject* obj = iterator.next();
-      ASSERT(obj != NULL);
-      if (obj->IsSharedFunctionInfo()
-          && SharedFunctionInfo::cast(obj)->is_compiled()) {
-        sfis[i++] = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
-      }
+  HeapIterator iterator;
+  while (iterator.has_next()) {
+    HeapObject* obj = iterator.next();
+    ASSERT(obj != NULL);
+    if (!obj->IsSharedFunctionInfo()) continue;
+    SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+    if (sfi->is_compiled()
+        && (!sfi->script()->IsScript()
+            || Script::cast(sfi->script())->HasValidSource())) {
+      if (sfis != NULL)
+        sfis[compiled_funcs_count] = Handle<SharedFunctionInfo>(sfi);
+      ++compiled_funcs_count;
     }
   }
+  return compiled_funcs_count;
+}
+
+
+void Logger::LogCompiledFunctions() {
+  HandleScope scope;
+  const int compiled_funcs_count = EnumerateCompiledFunctions(NULL);
+  Handle<SharedFunctionInfo>* sfis =
+      NewArray< Handle<SharedFunctionInfo> >(compiled_funcs_count);
+  EnumerateCompiledFunctions(sfis);
 
   // During iteration, there can be heap allocation due to
   // GetScriptLineNumber call.
diff --git a/src/log.h b/src/log.h
index 07a0429..13d45d2 100644
--- a/src/log.h
+++ b/src/log.h
@@ -223,6 +223,8 @@
                                            int number, int bytes);
   static void HeapSampleJSRetainersEvent(const char* constructor,
                                          const char* event);
+  static void HeapSampleJSProducerEvent(const char* constructor,
+                                        Address* stack);
   static void HeapSampleStats(const char* space, const char* kind,
                               int capacity, int used);
 
diff --git a/src/macros.py b/src/macros.py
index c75f0ea..ddd2f13 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -118,9 +118,7 @@
 # a type error is thrown.
 macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
 
-# Last input and last subject are after the captures so we can omit them on
-# results returned from global searches.  Beware - these evaluate their
-# arguments twice.
+# Last input and last subject of regexp matches.
 macro LAST_SUBJECT(array) = ((array)[1]);
 macro LAST_INPUT(array) = ((array)[2]);
 
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index cbd47a8..5a3ab89 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -279,11 +279,9 @@
 
   void VisitDebugTarget(RelocInfo* rinfo) {
     ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
-           rinfo->IsCallInstruction());
+           rinfo->IsPatchedReturnSequence());
     HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
     MarkCompactCollector::MarkObject(code);
-    // When compacting we convert the call to a real object pointer.
-    if (IsCompacting()) rinfo->set_call_object(code);
   }
 
  private:
@@ -1383,6 +1381,15 @@
         reinterpret_cast<Code*>(target)->instruction_start());
   }
 
+  void VisitDebugTarget(RelocInfo* rinfo) {
+    ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
+           rinfo->IsPatchedReturnSequence());
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    VisitPointer(&target);
+    rinfo->set_call_address(
+        reinterpret_cast<Code*>(target)->instruction_start());
+  }
+
  private:
   void UpdatePointer(Object** p) {
     if (!(*p)->IsHeapObject()) return;
diff --git a/src/memory.h b/src/memory.h
index c64699e..503492a 100644
--- a/src/memory.h
+++ b/src/memory.h
@@ -63,6 +63,10 @@
   static Object*& Object_at(Address addr)  {
     return *reinterpret_cast<Object**>(addr);
   }
+
+  static Handle<Object>& Object_Handle_at(Address addr)  {
+    return *reinterpret_cast<Handle<Object>*>(addr);
+  }
 };
 
 } }  // namespace v8::internal
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index c4ab7b8..cde5534 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -764,7 +764,7 @@
 ObjectMirror.prototype.toText = function() {
   var name;
   var ctor = this.constructorFunction();
-  if (ctor.isUndefined()) {
+  if (!ctor.isFunction()) {
     name = this.className();
   } else {
     name = ctor.name();
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 288cc21..0188134 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -29,7 +29,6 @@
 
 #include "disassembler.h"
 #include "disasm.h"
-#include "macro-assembler.h"
 #include "jsregexp.h"
 
 namespace v8 {
@@ -118,6 +117,27 @@
     case PIXEL_ARRAY_TYPE:
       PixelArray::cast(this)->PixelArrayPrint();
       break;
+    case EXTERNAL_BYTE_ARRAY_TYPE:
+      ExternalByteArray::cast(this)->ExternalByteArrayPrint();
+      break;
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+      ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayPrint();
+      break;
+    case EXTERNAL_SHORT_ARRAY_TYPE:
+      ExternalShortArray::cast(this)->ExternalShortArrayPrint();
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+      ExternalUnsignedShortArray::cast(this)->ExternalUnsignedShortArrayPrint();
+      break;
+    case EXTERNAL_INT_ARRAY_TYPE:
+      ExternalIntArray::cast(this)->ExternalIntArrayPrint();
+      break;
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+      ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayPrint();
+      break;
+    case EXTERNAL_FLOAT_ARRAY_TYPE:
+      ExternalFloatArray::cast(this)->ExternalFloatArrayPrint();
+      break;
     case FILLER_TYPE:
       PrintF("filler");
       break;
@@ -197,6 +217,28 @@
     case PIXEL_ARRAY_TYPE:
       PixelArray::cast(this)->PixelArrayVerify();
       break;
+    case EXTERNAL_BYTE_ARRAY_TYPE:
+      ExternalByteArray::cast(this)->ExternalByteArrayVerify();
+      break;
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+      ExternalUnsignedByteArray::cast(this)->ExternalUnsignedByteArrayVerify();
+      break;
+    case EXTERNAL_SHORT_ARRAY_TYPE:
+      ExternalShortArray::cast(this)->ExternalShortArrayVerify();
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+      ExternalUnsignedShortArray::cast(this)->
+          ExternalUnsignedShortArrayVerify();
+      break;
+    case EXTERNAL_INT_ARRAY_TYPE:
+      ExternalIntArray::cast(this)->ExternalIntArrayVerify();
+      break;
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+      ExternalUnsignedIntArray::cast(this)->ExternalUnsignedIntArrayVerify();
+      break;
+    case EXTERNAL_FLOAT_ARRAY_TYPE:
+      ExternalFloatArray::cast(this)->ExternalFloatArrayVerify();
+      break;
     case CODE_TYPE:
       Code::cast(this)->CodeVerify();
       break;
@@ -275,6 +317,41 @@
 }
 
 
+void ExternalByteArray::ExternalByteArrayPrint() {
+  PrintF("external byte array");
+}
+
+
+void ExternalUnsignedByteArray::ExternalUnsignedByteArrayPrint() {
+  PrintF("external unsigned byte array");
+}
+
+
+void ExternalShortArray::ExternalShortArrayPrint() {
+  PrintF("external short array");
+}
+
+
+void ExternalUnsignedShortArray::ExternalUnsignedShortArrayPrint() {
+  PrintF("external unsigned short array");
+}
+
+
+void ExternalIntArray::ExternalIntArrayPrint() {
+  PrintF("external int array");
+}
+
+
+void ExternalUnsignedIntArray::ExternalUnsignedIntArrayPrint() {
+  PrintF("external unsigned int array");
+}
+
+
+void ExternalFloatArray::ExternalFloatArrayPrint() {
+  PrintF("external float array");
+}
+
+
 void ByteArray::ByteArrayVerify() {
   ASSERT(IsByteArray());
 }
@@ -285,6 +362,41 @@
 }
 
 
+void ExternalByteArray::ExternalByteArrayVerify() {
+  ASSERT(IsExternalByteArray());
+}
+
+
+void ExternalUnsignedByteArray::ExternalUnsignedByteArrayVerify() {
+  ASSERT(IsExternalUnsignedByteArray());
+}
+
+
+void ExternalShortArray::ExternalShortArrayVerify() {
+  ASSERT(IsExternalShortArray());
+}
+
+
+void ExternalUnsignedShortArray::ExternalUnsignedShortArrayVerify() {
+  ASSERT(IsExternalUnsignedShortArray());
+}
+
+
+void ExternalIntArray::ExternalIntArrayVerify() {
+  ASSERT(IsExternalIntArray());
+}
+
+
+void ExternalUnsignedIntArray::ExternalUnsignedIntArrayVerify() {
+  ASSERT(IsExternalUnsignedIntArray());
+}
+
+
+void ExternalFloatArray::ExternalFloatArrayVerify() {
+  ASSERT(IsExternalFloatArray());
+}
+
+
 void JSObject::PrintProperties() {
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
@@ -346,6 +458,58 @@
       }
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS: {
+      ExternalByteArray* p = ExternalByteArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+      ExternalUnsignedByteArray* p =
+          ExternalUnsignedByteArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_SHORT_ELEMENTS: {
+      ExternalShortArray* p = ExternalShortArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+      ExternalUnsignedShortArray* p =
+          ExternalUnsignedShortArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_INT_ELEMENTS: {
+      ExternalIntArray* p = ExternalIntArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+      ExternalUnsignedIntArray* p =
+          ExternalUnsignedIntArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %d\n", i, static_cast<int>(p->get(i)));
+      }
+      break;
+    }
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalFloatArray* p = ExternalFloatArray::cast(elements());
+      for (int i = 0; i < p->length(); i++) {
+        PrintF("   %d: %f\n", i, p->get(i));
+      }
+      break;
+    }
     case DICTIONARY_ELEMENTS:
       elements()->Print();
       break;
@@ -434,6 +598,16 @@
     case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
     case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
     case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY";
+    case EXTERNAL_BYTE_ARRAY_TYPE: return "EXTERNAL_BYTE_ARRAY";
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+      return "EXTERNAL_UNSIGNED_BYTE_ARRAY";
+    case EXTERNAL_SHORT_ARRAY_TYPE: return "EXTERNAL_SHORT_ARRAY";
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+      return "EXTERNAL_UNSIGNED_SHORT_ARRAY";
+    case EXTERNAL_INT_ARRAY_TYPE: return "EXTERNAL_INT_ARRAY";
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+      return "EXTERNAL_UNSIGNED_INT_ARRAY";
+    case EXTERNAL_FLOAT_ARRAY_TYPE: return "EXTERNAL_FLOAT_ARRAY";
     case FILLER_TYPE: return "FILLER";
     case JS_OBJECT_TYPE: return "JS_OBJECT";
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 29b886d..1ada583 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -360,6 +360,65 @@
 }
 
 
+bool Object::IsExternalArray() {
+  if (!Object::IsHeapObject())
+    return false;
+  InstanceType instance_type =
+      HeapObject::cast(this)->map()->instance_type();
+  return (instance_type >= EXTERNAL_BYTE_ARRAY_TYPE &&
+          instance_type <= EXTERNAL_FLOAT_ARRAY_TYPE);
+}
+
+
+bool Object::IsExternalByteArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_BYTE_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalUnsignedByteArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalShortArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_SHORT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalUnsignedShortArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalIntArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_INT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalUnsignedIntArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_UNSIGNED_INT_ARRAY_TYPE;
+}
+
+
+bool Object::IsExternalFloatArray() {
+  return Object::IsHeapObject() &&
+      HeapObject::cast(this)->map()->instance_type() ==
+      EXTERNAL_FLOAT_ARRAY_TYPE;
+}
+
+
 bool Object::IsFailure() {
   return HAS_FAILURE_TAG(this);
 }
@@ -744,15 +803,17 @@
 
 Smi* Smi::FromInt(int value) {
   ASSERT(Smi::IsValid(value));
+  int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
   intptr_t tagged_value =
-      (static_cast<intptr_t>(value) << kSmiTagSize) | kSmiTag;
+      (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
   return reinterpret_cast<Smi*>(tagged_value);
 }
 
 
 Smi* Smi::FromIntptr(intptr_t value) {
   ASSERT(Smi::IsValid(value));
-  return reinterpret_cast<Smi*>((value << kSmiTagSize) | kSmiTag);
+  int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
+  return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
 }
 
 
@@ -776,7 +837,7 @@
       kFailureTypeTagSize + kSpaceTagSize - kObjectAlignmentBits;
   STATIC_ASSERT(kShiftBits >= 0);
   ASSERT(type() == RETRY_AFTER_GC);
-  return value() >> kShiftBits;
+  return static_cast<int>(value() >> kShiftBits);
 }
 
 
@@ -802,29 +863,31 @@
 }
 
 
-int Failure::value() const {
-  return static_cast<int>(reinterpret_cast<intptr_t>(this) >> kFailureTagSize);
+intptr_t Failure::value() const {
+  return reinterpret_cast<intptr_t>(this) >> kFailureTagSize;
 }
 
 
 Failure* Failure::RetryAfterGC(int requested_bytes) {
   // Assert that the space encoding fits in the three bytes allotted for it.
   ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
-  int requested = requested_bytes >> kObjectAlignmentBits;
+  intptr_t requested = requested_bytes >> kObjectAlignmentBits;
+  int tag_bits = kSpaceTagSize + kFailureTypeTagSize;
+  if (((requested << tag_bits) >> tag_bits) != requested) {
+    // No room for entire requested size in the bits. Round down to
+    // maximally representable size.
+    requested = static_cast<intptr_t>(
+                    (~static_cast<uintptr_t>(0)) >> (tag_bits + 1));
+  }
   int value = (requested << kSpaceTagSize) | NEW_SPACE;
-  ASSERT(value >> kSpaceTagSize == requested);
-  ASSERT(Smi::IsValid(value));
-  ASSERT(value == ((value << kFailureTypeTagSize) >> kFailureTypeTagSize));
-  ASSERT(Smi::IsValid(value << kFailureTypeTagSize));
   return Construct(RETRY_AFTER_GC, value);
 }
 
 
-Failure* Failure::Construct(Type type, int value) {
-  int info = (value << kFailureTypeTagSize) | type;
+Failure* Failure::Construct(Type type, intptr_t value) {
+  intptr_t info = (static_cast<intptr_t>(value) << kFailureTypeTagSize) | type;
   ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
-  return reinterpret_cast<Failure*>(
-      (static_cast<intptr_t>(info) << kFailureTagSize) | kFailureTag);
+  return reinterpret_cast<Failure*>((info << kFailureTagSize) | kFailureTag);
 }
 
 
@@ -832,6 +895,11 @@
 #ifdef DEBUG
   bool in_range = (value >= kMinValue) && (value <= kMaxValue);
 #endif
+
+#ifdef V8_TARGET_ARCH_X64
+  // To be representable as a long smi, the value must be a 32-bit integer.
+  bool result = (value == static_cast<int32_t>(value));
+#else
   // To be representable as an tagged small integer, the two
   // most-significant bits of 'value' must be either 00 or 11 due to
   // sign-extension. To check this we add 01 to the two
@@ -843,20 +911,8 @@
   // in fact doesn't work correctly with gcc4.1.1 in some cases: The
   // compiler may produce undefined results in case of signed integer
   // overflow. The computation must be done w/ unsigned ints.
-  bool result =
-      ((static_cast<unsigned int>(value) + 0x40000000U) & 0x80000000U) == 0;
-  ASSERT(result == in_range);
-  return result;
-}
-
-
-bool Smi::IsIntptrValid(intptr_t value) {
-#ifdef DEBUG
-  bool in_range = (value >= kMinValue) && (value <= kMaxValue);
+  bool result = (static_cast<uintptr_t>(value + 0x40000000U) < 0x80000000U);
 #endif
-  // See Smi::IsValid(int) for description.
-  bool result =
-      ((static_cast<uintptr_t>(value) + 0x40000000U) < 0x80000000U);
   ASSERT(result == in_range);
   return result;
 }
@@ -1087,14 +1143,16 @@
 Array* JSObject::elements() {
   Object* array = READ_FIELD(this, kElementsOffset);
   // In the assert below Dictionary is covered under FixedArray.
-  ASSERT(array->IsFixedArray() || array->IsPixelArray());
+  ASSERT(array->IsFixedArray() || array->IsPixelArray() ||
+         array->IsExternalArray());
   return reinterpret_cast<Array*>(array);
 }
 
 
 void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
   // In the assert below Dictionary is covered under FixedArray.
-  ASSERT(value->IsFixedArray() || value->IsPixelArray());
+  ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
+         value->IsExternalArray());
   WRITE_FIELD(this, kElementsOffset, value);
   CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, mode);
 }
@@ -1557,6 +1615,14 @@
 CAST_ACCESSOR(Proxy)
 CAST_ACCESSOR(ByteArray)
 CAST_ACCESSOR(PixelArray)
+CAST_ACCESSOR(ExternalArray)
+CAST_ACCESSOR(ExternalByteArray)
+CAST_ACCESSOR(ExternalUnsignedByteArray)
+CAST_ACCESSOR(ExternalShortArray)
+CAST_ACCESSOR(ExternalUnsignedShortArray)
+CAST_ACCESSOR(ExternalIntArray)
+CAST_ACCESSOR(ExternalUnsignedIntArray)
+CAST_ACCESSOR(ExternalFloatArray)
 CAST_ACCESSOR(Struct)
 
 
@@ -1941,6 +2007,116 @@
 }
 
 
+void* ExternalArray::external_pointer() {
+  intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
+  return reinterpret_cast<void*>(ptr);
+}
+
+
+void ExternalArray::set_external_pointer(void* value, WriteBarrierMode mode) {
+  intptr_t ptr = reinterpret_cast<intptr_t>(value);
+  WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
+}
+
+
+int8_t ExternalByteArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int8_t* ptr = static_cast<int8_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalByteArray::set(int index, int8_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int8_t* ptr = static_cast<int8_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+uint8_t ExternalUnsignedByteArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalUnsignedByteArray::set(int index, uint8_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+int16_t ExternalShortArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int16_t* ptr = static_cast<int16_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalShortArray::set(int index, int16_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int16_t* ptr = static_cast<int16_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+uint16_t ExternalUnsignedShortArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalUnsignedShortArray::set(int index, uint16_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+int32_t ExternalIntArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int32_t* ptr = static_cast<int32_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalIntArray::set(int index, int32_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  int32_t* ptr = static_cast<int32_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+uint32_t ExternalUnsignedIntArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalUnsignedIntArray::set(int index, uint32_t value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
+  ptr[index] = value;
+}
+
+
+float ExternalFloatArray::get(int index) {
+  ASSERT((index >= 0) && (index < this->length()));
+  float* ptr = static_cast<float*>(external_pointer());
+  return ptr[index];
+}
+
+
+void ExternalFloatArray::set(int index, float value) {
+  ASSERT((index >= 0) && (index < this->length()));
+  float* ptr = static_cast<float*>(external_pointer());
+  ptr[index] = value;
+}
+
+
 int Map::instance_size() {
   return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
 }
@@ -2361,6 +2537,20 @@
               kThisPropertyAssignmentsCountOffset)
 
 
+bool Script::HasValidSource() {
+  Object* src = this->source();
+  if (!src->IsString()) return true;
+  String* src_str = String::cast(src);
+  if (!StringShape(src_str).IsExternal()) return true;
+  if (src_str->IsAsciiRepresentation()) {
+    return ExternalAsciiString::cast(src)->resource() != NULL;
+  } else if (src_str->IsTwoByteRepresentation()) {
+    return ExternalTwoByteString::cast(src)->resource() != NULL;
+  }
+  return true;
+}
+
+
 void SharedFunctionInfo::DontAdaptArguments() {
   ASSERT(code()->kind() == Code::BUILTIN);
   set_formal_parameter_count(kDontAdaptArgumentsSentinel);
@@ -2635,6 +2825,25 @@
     ASSERT(array->IsDictionary());
     return DICTIONARY_ELEMENTS;
   }
+  if (array->IsExternalArray()) {
+    switch (array->map()->instance_type()) {
+      case EXTERNAL_BYTE_ARRAY_TYPE:
+        return EXTERNAL_BYTE_ELEMENTS;
+      case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+        return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+      case EXTERNAL_SHORT_ARRAY_TYPE:
+        return EXTERNAL_SHORT_ELEMENTS;
+      case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+        return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+      case EXTERNAL_INT_ARRAY_TYPE:
+        return EXTERNAL_INT_ELEMENTS;
+      case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+        return EXTERNAL_UNSIGNED_INT_ELEMENTS;
+      default:
+        ASSERT(array->map()->instance_type() == EXTERNAL_FLOAT_ARRAY_TYPE);
+        return EXTERNAL_FLOAT_ELEMENTS;
+    }
+  }
   ASSERT(array->IsPixelArray());
   return PIXEL_ELEMENTS;
 }
@@ -2655,6 +2864,52 @@
 }
 
 
+bool JSObject::HasExternalArrayElements() {
+  return (HasExternalByteElements() ||
+          HasExternalUnsignedByteElements() ||
+          HasExternalShortElements() ||
+          HasExternalUnsignedShortElements() ||
+          HasExternalIntElements() ||
+          HasExternalUnsignedIntElements() ||
+          HasExternalFloatElements());
+}
+
+
+bool JSObject::HasExternalByteElements() {
+  return GetElementsKind() == EXTERNAL_BYTE_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalUnsignedByteElements() {
+  return GetElementsKind() == EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalShortElements() {
+  return GetElementsKind() == EXTERNAL_SHORT_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalUnsignedShortElements() {
+  return GetElementsKind() == EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalIntElements() {
+  return GetElementsKind() == EXTERNAL_INT_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalUnsignedIntElements() {
+  return GetElementsKind() == EXTERNAL_UNSIGNED_INT_ELEMENTS;
+}
+
+
+bool JSObject::HasExternalFloatElements() {
+  return GetElementsKind() == EXTERNAL_FLOAT_ELEMENTS;
+}
+
+
 bool JSObject::HasNamedInterceptor() {
   return map()->has_named_interceptor();
 }
diff --git a/src/objects.cc b/src/objects.cc
index e2fa3b5..af1a0e5 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -618,12 +618,12 @@
 
 
 void Failure::FailurePrint(StringStream* accumulator) {
-  accumulator->Add("Failure(%d)", value());
+  accumulator->Add("Failure(%p)", reinterpret_cast<void*>(value()));
 }
 
 
 void Failure::FailurePrint() {
-  PrintF("Failure(%d)", value());
+  PrintF("Failure(%p)", reinterpret_cast<void*>(value()));
 }
 
 
@@ -751,10 +751,11 @@
 
 bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
 #ifdef DEBUG
-  {  // NOLINT (presubmit.py gets confused about if and braces)
+  if (FLAG_enable_slow_asserts) {
     // Assert that the resource and the string are equivalent.
     ASSERT(static_cast<size_t>(this->length()) == resource->length());
-    SmartPointer<uc16> smart_chars = this->ToWideCString();
+    SmartPointer<uc16> smart_chars(NewArray<uc16>(this->length()));
+    String::WriteToFlat(this, *smart_chars, 0, this->length());
     ASSERT(memcmp(*smart_chars,
                   resource->data(),
                   resource->length() * sizeof(**smart_chars)) == 0);
@@ -794,10 +795,11 @@
 
 bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
 #ifdef DEBUG
-  {  // NOLINT (presubmit.py gets confused about if and braces)
+  if (FLAG_enable_slow_asserts) {
     // Assert that the resource and the string are equivalent.
     ASSERT(static_cast<size_t>(this->length()) == resource->length());
-    SmartPointer<char> smart_chars = this->ToCString();
+    SmartPointer<char> smart_chars(NewArray<char>(this->length()));
+    String::WriteToFlat(this, *smart_chars, 0, this->length());
     ASSERT(memcmp(*smart_chars,
                   resource->data(),
                   resource->length()*sizeof(**smart_chars)) == 0);
@@ -1005,6 +1007,34 @@
     case PIXEL_ARRAY_TYPE:
       accumulator->Add("<PixelArray[%u]>", PixelArray::cast(this)->length());
       break;
+    case EXTERNAL_BYTE_ARRAY_TYPE:
+      accumulator->Add("<ExternalByteArray[%u]>",
+                       ExternalByteArray::cast(this)->length());
+      break;
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+      accumulator->Add("<ExternalUnsignedByteArray[%u]>",
+                       ExternalUnsignedByteArray::cast(this)->length());
+      break;
+    case EXTERNAL_SHORT_ARRAY_TYPE:
+      accumulator->Add("<ExternalShortArray[%u]>",
+                       ExternalShortArray::cast(this)->length());
+      break;
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+      accumulator->Add("<ExternalUnsignedShortArray[%u]>",
+                       ExternalUnsignedShortArray::cast(this)->length());
+      break;
+    case EXTERNAL_INT_ARRAY_TYPE:
+      accumulator->Add("<ExternalIntArray[%u]>",
+                       ExternalIntArray::cast(this)->length());
+      break;
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+      accumulator->Add("<ExternalUnsignedIntArray[%u]>",
+                       ExternalUnsignedIntArray::cast(this)->length());
+      break;
+    case EXTERNAL_FLOAT_ARRAY_TYPE:
+      accumulator->Add("<ExternalFloatArray[%u]>",
+                       ExternalFloatArray::cast(this)->length());
+      break;
     case SHARED_FUNCTION_INFO_TYPE:
       accumulator->Add("<SharedFunctionInfo>");
       break;
@@ -1147,6 +1177,13 @@
     case FILLER_TYPE:
     case BYTE_ARRAY_TYPE:
     case PIXEL_ARRAY_TYPE:
+    case EXTERNAL_BYTE_ARRAY_TYPE:
+    case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+    case EXTERNAL_SHORT_ARRAY_TYPE:
+    case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+    case EXTERNAL_INT_ARRAY_TYPE:
+    case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+    case EXTERNAL_FLOAT_ARRAY_TYPE:
       break;
     case SHARED_FUNCTION_INFO_TYPE: {
       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this);
@@ -2237,7 +2274,7 @@
 
 
 Object* JSObject::NormalizeElements() {
-  ASSERT(!HasPixelElements());
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
   if (HasDictionaryElements()) return this;
 
   // Get number of entries.
@@ -2322,7 +2359,7 @@
 
 Object* JSObject::DeleteElementPostInterceptor(uint32_t index,
                                                DeleteMode mode) {
-  ASSERT(!HasPixelElements());
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
   switch (GetElementsKind()) {
     case FAST_ELEMENTS: {
       uint32_t length = IsJSArray() ?
@@ -2413,10 +2450,17 @@
       }
       break;
     }
-    case PIXEL_ELEMENTS: {
-      // Pixel elements cannot be deleted. Just silently ignore here.
+    case PIXEL_ELEMENTS:
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+      // Pixel and external array elements cannot be deleted. Just
+      // silently ignore here.
       break;
-    }
     case DICTIONARY_ELEMENTS: {
       NumberDictionary* dictionary = element_dictionary();
       int entry = dictionary->FindEntry(index);
@@ -2507,7 +2551,15 @@
   // Check if the object is among the indexed properties.
   switch (GetElementsKind()) {
     case PIXEL_ELEMENTS:
-      // Raw pixels do not reference other objects.
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS:
+      // Raw pixels and external arrays do not reference other
+      // objects.
       break;
     case FAST_ELEMENTS: {
       int length = IsJSArray() ?
@@ -2752,7 +2804,15 @@
       case FAST_ELEMENTS:
         break;
       case PIXEL_ELEMENTS:
-        // Ignore getters and setters on pixel elements.
+      case EXTERNAL_BYTE_ELEMENTS:
+      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+      case EXTERNAL_SHORT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+      case EXTERNAL_INT_ELEMENTS:
+      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+      case EXTERNAL_FLOAT_ELEMENTS:
+        // Ignore getters and setters on pixel and external array
+        // elements.
         return Heap::undefined_value();
       case DICTIONARY_ELEMENTS: {
         // Lookup the index.
@@ -3087,7 +3147,7 @@
 
 
 Object* FixedArray::AddKeysFromJSArray(JSArray* array) {
-  ASSERT(!array->HasPixelElements());
+  ASSERT(!array->HasPixelElements() && !array->HasExternalArrayElements());
   switch (array->GetElementsKind()) {
     case JSObject::FAST_ELEMENTS:
       return UnionOfKeys(FixedArray::cast(array->elements()));
@@ -4983,7 +5043,8 @@
 
 
 void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsCallInstruction());
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
+         rinfo->IsPatchedReturnSequence());
   Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
   Object* old_target = target;
   VisitPointer(&target);
@@ -5009,7 +5070,7 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
     } else if (Debug::has_break_points() &&
                RelocInfo::IsJSReturn(rmode) &&
-               it.rinfo()->IsCallInstruction()) {
+               it.rinfo()->IsPatchedReturnSequence()) {
       v->VisitDebugTarget(it.rinfo());
 #endif
     } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
@@ -5047,19 +5108,20 @@
           desc.reloc_size);
 
   // unbox handles and relocate
-  int delta = instruction_start() - desc.buffer;
+  intptr_t delta = instruction_start() - desc.buffer;
   int mode_mask = RelocInfo::kCodeTargetMask |
                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
                   RelocInfo::kApplyMask;
+  Assembler* origin = desc.origin;  // Needed to find target_object on X64.
   for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
     RelocInfo::Mode mode = it.rinfo()->rmode();
     if (mode == RelocInfo::EMBEDDED_OBJECT) {
-      Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+      Handle<Object> p = it.rinfo()->target_object_handle(origin);
       it.rinfo()->set_target_object(*p);
     } else if (RelocInfo::IsCodeTarget(mode)) {
       // rewrite code handles in inline cache targets to direct
       // pointers to the first instruction in the code object
-      Object** p = reinterpret_cast<Object**>(it.rinfo()->target_object());
+      Handle<Object> p = it.rinfo()->target_object_handle(origin);
       Code* code = Code::cast(*p);
       it.rinfo()->set_target_address(code->instruction_start());
     } else {
@@ -5199,8 +5261,8 @@
 
 
 void JSObject::SetFastElements(FixedArray* elems) {
-  // We should never end in here with a pixel array.
-  ASSERT(!HasPixelElements());
+  // We should never end in here with a pixel or external array.
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
 #ifdef DEBUG
   // Check the provided array is filled with the_hole.
   uint32_t len = static_cast<uint32_t>(elems->length());
@@ -5237,8 +5299,8 @@
 
 
 Object* JSObject::SetSlowElements(Object* len) {
-  // We should never end in here with a pixel array.
-  ASSERT(!HasPixelElements());
+  // We should never end in here with a pixel or external array.
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
 
   uint32_t new_length = static_cast<uint32_t>(len->Number());
 
@@ -5316,8 +5378,8 @@
 
 
 Object* JSObject::SetElementsLength(Object* len) {
-  // We should never end in here with a pixel array.
-  ASSERT(!HasPixelElements());
+  // We should never end in here with a pixel or external array.
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
 
   Object* smi_length = len->ToSmi();
   if (smi_length->IsSmi()) {
@@ -5418,6 +5480,20 @@
       }
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      // TODO(kbr): Add testcase.
+      ExternalArray* array = ExternalArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        return true;
+      }
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       if (element_dictionary()->FindEntry(index)
           != NumberDictionary::kNotFound) {
@@ -5505,6 +5581,16 @@
       PixelArray* pixels = PixelArray::cast(elements());
       return (index < static_cast<uint32_t>(pixels->length()));
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalArray* array = ExternalArray::cast(elements());
+      return (index < static_cast<uint32_t>(array->length()));
+    }
     case DICTIONARY_ELEMENTS: {
       return element_dictionary()->FindEntry(index)
           != NumberDictionary::kNotFound;
@@ -5548,6 +5634,19 @@
       }
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalArray* array = ExternalArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        return true;
+      }
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       if (element_dictionary()->FindEntry(index)
           != NumberDictionary::kNotFound) {
@@ -5688,6 +5787,37 @@
       PixelArray* pixels = PixelArray::cast(elements());
       return pixels->SetValue(index, value);
     }
+    case EXTERNAL_BYTE_ELEMENTS: {
+      ExternalByteArray* array = ExternalByteArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+      ExternalUnsignedByteArray* array =
+          ExternalUnsignedByteArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_SHORT_ELEMENTS: {
+      ExternalShortArray* array = ExternalShortArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+      ExternalUnsignedShortArray* array =
+          ExternalUnsignedShortArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_INT_ELEMENTS: {
+      ExternalIntArray* array = ExternalIntArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+      ExternalUnsignedIntArray* array =
+          ExternalUnsignedIntArray::cast(elements());
+      return array->SetValue(index, value);
+    }
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalFloatArray* array = ExternalFloatArray::cast(elements());
+      return array->SetValue(index, value);
+    }
     case DICTIONARY_ELEMENTS: {
       // Insert element in the dictionary.
       FixedArray* elms = FixedArray::cast(elements());
@@ -5805,6 +5935,17 @@
       UNIMPLEMENTED();
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      // TODO(kbr): Add testcase and implement.
+      UNIMPLEMENTED();
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       NumberDictionary* dictionary = element_dictionary();
       int entry = dictionary->FindEntry(index);
@@ -5903,6 +6044,65 @@
       }
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS: {
+      ExternalByteArray* array = ExternalByteArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        int8_t value = array->get(index);
+        return Smi::FromInt(value);
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+      ExternalUnsignedByteArray* array =
+          ExternalUnsignedByteArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        uint8_t value = array->get(index);
+        return Smi::FromInt(value);
+      }
+      break;
+    }
+    case EXTERNAL_SHORT_ELEMENTS: {
+      ExternalShortArray* array = ExternalShortArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        int16_t value = array->get(index);
+        return Smi::FromInt(value);
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+      ExternalUnsignedShortArray* array =
+          ExternalUnsignedShortArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        uint16_t value = array->get(index);
+        return Smi::FromInt(value);
+      }
+      break;
+    }
+    case EXTERNAL_INT_ELEMENTS: {
+      ExternalIntArray* array = ExternalIntArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        int32_t value = array->get(index);
+        return Heap::NumberFromInt32(value);
+      }
+      break;
+    }
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+      ExternalUnsignedIntArray* array =
+          ExternalUnsignedIntArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        uint32_t value = array->get(index);
+        return Heap::NumberFromUint32(value);
+      }
+      break;
+    }
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalFloatArray* array = ExternalFloatArray::cast(elements());
+      if (index < static_cast<uint32_t>(array->length())) {
+        float value = array->get(index);
+        return Heap::AllocateHeapNumber(value);
+      }
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       NumberDictionary* dictionary = element_dictionary();
       int entry = dictionary->FindEntry(index);
@@ -5946,7 +6146,14 @@
       }
       break;
     }
-    case PIXEL_ELEMENTS: {
+    case PIXEL_ELEMENTS:
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
       return true;
     }
     case DICTIONARY_ELEMENTS: {
@@ -6170,6 +6377,16 @@
       PixelArray* pixels = PixelArray::cast(elements());
       return index < static_cast<uint32_t>(pixels->length());
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      ExternalArray* array = ExternalArray::cast(elements());
+      return index < static_cast<uint32_t>(array->length());
+    }
     case DICTIONARY_ELEMENTS: {
       return element_dictionary()->FindEntry(index)
           != NumberDictionary::kNotFound;
@@ -6390,6 +6607,23 @@
       ASSERT(!storage || storage->length() >= counter);
       break;
     }
+    case EXTERNAL_BYTE_ELEMENTS:
+    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+    case EXTERNAL_SHORT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+    case EXTERNAL_INT_ELEMENTS:
+    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+    case EXTERNAL_FLOAT_ELEMENTS: {
+      int length = ExternalArray::cast(elements())->length();
+      while (counter < length) {
+        if (storage != NULL) {
+          storage->set(counter, Smi::FromInt(counter), SKIP_WRITE_BARRIER);
+        }
+        counter++;
+      }
+      ASSERT(!storage || storage->length() >= counter);
+      break;
+    }
     case DICTIONARY_ELEMENTS: {
       if (storage != NULL) {
         element_dictionary()->CopyKeysTo(storage, filter);
@@ -6561,6 +6795,10 @@
       : string_(string),
         flags_(Smi::FromInt(flags.value())) { }
 
+  // Rather than storing the key in the hash table, a pointer to the
+  // stored value is stored where the key should be.  IsMatch then
+  // compares the search key to the found object, rather than comparing
+  // a key to a key.
   bool IsMatch(Object* obj) {
     FixedArray* val = FixedArray::cast(obj);
     return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex)))
@@ -6932,7 +7170,7 @@
 // If the object is in dictionary mode, it is converted to fast elements
 // mode.
 Object* JSObject::PrepareElementsForSort(uint32_t limit) {
-  ASSERT(!HasPixelElements());
+  ASSERT(!HasPixelElements() && !HasExternalArrayElements());
 
   if (HasDictionaryElements()) {
     // Convert to fast elements containing only the existing properties.
@@ -7064,6 +7302,99 @@
 }
 
 
+template<typename ExternalArrayClass, typename ValueType>
+static Object* ExternalArrayIntSetter(ExternalArrayClass* receiver,
+                                      uint32_t index,
+                                      Object* value) {
+  ValueType cast_value = 0;
+  if (index < static_cast<uint32_t>(receiver->length())) {
+    if (value->IsSmi()) {
+      int int_value = Smi::cast(value)->value();
+      cast_value = static_cast<ValueType>(int_value);
+    } else if (value->IsHeapNumber()) {
+      double double_value = HeapNumber::cast(value)->value();
+      cast_value = static_cast<ValueType>(DoubleToInt32(double_value));
+    } else {
+      // Clamp undefined to zero (default). All other types have been
+      // converted to a number type further up in the call chain.
+      ASSERT(value->IsUndefined());
+    }
+    receiver->set(index, cast_value);
+  }
+  return Heap::NumberFromInt32(cast_value);
+}
+
+
+Object* ExternalByteArray::SetValue(uint32_t index, Object* value) {
+  return ExternalArrayIntSetter<ExternalByteArray, int8_t>
+      (this, index, value);
+}
+
+
+Object* ExternalUnsignedByteArray::SetValue(uint32_t index, Object* value) {
+  return ExternalArrayIntSetter<ExternalUnsignedByteArray, uint8_t>
+      (this, index, value);
+}
+
+
+Object* ExternalShortArray::SetValue(uint32_t index, Object* value) {
+  return ExternalArrayIntSetter<ExternalShortArray, int16_t>
+      (this, index, value);
+}
+
+
+Object* ExternalUnsignedShortArray::SetValue(uint32_t index, Object* value) {
+  return ExternalArrayIntSetter<ExternalUnsignedShortArray, uint16_t>
+      (this, index, value);
+}
+
+
+Object* ExternalIntArray::SetValue(uint32_t index, Object* value) {
+  return ExternalArrayIntSetter<ExternalIntArray, int32_t>
+      (this, index, value);
+}
+
+
+Object* ExternalUnsignedIntArray::SetValue(uint32_t index, Object* value) {
+  uint32_t cast_value = 0;
+  if (index < static_cast<uint32_t>(length())) {
+    if (value->IsSmi()) {
+      int int_value = Smi::cast(value)->value();
+      cast_value = static_cast<uint32_t>(int_value);
+    } else if (value->IsHeapNumber()) {
+      double double_value = HeapNumber::cast(value)->value();
+      cast_value = static_cast<uint32_t>(DoubleToUint32(double_value));
+    } else {
+      // Clamp undefined to zero (default). All other types have been
+      // converted to a number type further up in the call chain.
+      ASSERT(value->IsUndefined());
+    }
+    set(index, cast_value);
+  }
+  return Heap::NumberFromUint32(cast_value);
+}
+
+
+Object* ExternalFloatArray::SetValue(uint32_t index, Object* value) {
+  float cast_value = 0;
+  if (index < static_cast<uint32_t>(length())) {
+    if (value->IsSmi()) {
+      int int_value = Smi::cast(value)->value();
+      cast_value = static_cast<float>(int_value);
+    } else if (value->IsHeapNumber()) {
+      double double_value = HeapNumber::cast(value)->value();
+      cast_value = static_cast<float>(double_value);
+    } else {
+      // Clamp undefined to zero (default). All other types have been
+      // converted to a number type further up in the call chain.
+      ASSERT(value->IsUndefined());
+    }
+    set(index, cast_value);
+  }
+  return Heap::AllocateHeapNumber(cast_value);
+}
+
+
 Object* GlobalObject::GetPropertyCell(LookupResult* result) {
   ASSERT(!HasFastProperties());
   Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
@@ -7220,6 +7551,8 @@
   CompilationCacheTable* cache =
       reinterpret_cast<CompilationCacheTable*>(obj);
   int entry = cache->FindInsertionEntry(key.Hash());
+  // We store the value in the key slot, and compare the search key
+  // to the stored value with a custon IsMatch function during lookups.
   cache->set(EntryToIndex(entry), value);
   cache->set(EntryToIndex(entry) + 1, value);
   cache->ElementAdded();
diff --git a/src/objects.h b/src/objects.h
index e9430f5..68bed6c 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -32,6 +32,9 @@
 #include "code-stubs.h"
 #include "smart-pointer.h"
 #include "unicode-inl.h"
+#if V8_TARGET_ARCH_ARM
+#include "arm/constants-arm.h"
+#endif
 
 //
 // All object types in the V8 JavaScript are described in this file.
@@ -53,6 +56,14 @@
 //       - Array
 //         - ByteArray
 //         - PixelArray
+//         - ExternalArray
+//           - ExternalByteArray
+//           - ExternalUnsignedByteArray
+//           - ExternalShortArray
+//           - ExternalUnsignedShortArray
+//           - ExternalIntArray
+//           - ExternalUnsignedIntArray
+//           - ExternalFloatArray
 //         - FixedArray
 //           - DescriptorArray
 //           - HashTable
@@ -271,6 +282,16 @@
   V(PROXY_TYPE)                                 \
   V(BYTE_ARRAY_TYPE)                            \
   V(PIXEL_ARRAY_TYPE)                           \
+  /* Note: the order of these external array */ \
+  /* types is relied upon in */                 \
+  /* Object::IsExternalArray(). */              \
+  V(EXTERNAL_BYTE_ARRAY_TYPE)                   \
+  V(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE)          \
+  V(EXTERNAL_SHORT_ARRAY_TYPE)                  \
+  V(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE)         \
+  V(EXTERNAL_INT_ARRAY_TYPE)                    \
+  V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE)           \
+  V(EXTERNAL_FLOAT_ARRAY_TYPE)                  \
   V(FILLER_TYPE)                                \
                                                 \
   V(ACCESSOR_INFO_TYPE)                         \
@@ -670,6 +691,13 @@
   PROXY_TYPE,
   BYTE_ARRAY_TYPE,
   PIXEL_ARRAY_TYPE,
+  EXTERNAL_BYTE_ARRAY_TYPE,
+  EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
+  EXTERNAL_SHORT_ARRAY_TYPE,
+  EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
+  EXTERNAL_INT_ARRAY_TYPE,
+  EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
+  EXTERNAL_FLOAT_ARRAY_TYPE,
   FILLER_TYPE,
   SMI_TYPE,
 
@@ -777,6 +805,14 @@
   inline bool IsNumber();
   inline bool IsByteArray();
   inline bool IsPixelArray();
+  inline bool IsExternalArray();
+  inline bool IsExternalByteArray();
+  inline bool IsExternalUnsignedByteArray();
+  inline bool IsExternalShortArray();
+  inline bool IsExternalUnsignedShortArray();
+  inline bool IsExternalIntArray();
+  inline bool IsExternalUnsignedIntArray();
+  inline bool IsExternalFloatArray();
   inline bool IsFailure();
   inline bool IsRetryAfterGC();
   inline bool IsOutOfMemoryFailure();
@@ -904,10 +940,10 @@
 
 // Smi represents integer Numbers that can be stored in 31 bits.
 // Smis are immediate which means they are NOT allocated in the heap.
-// Smi stands for small integer.
 // The this pointer has the following format: [31 bit signed int] 0
-// On 64-bit, the top 32 bits of the pointer is allowed to have any
-// value.
+// For long smis it has the following format:
+//     [32 bit signed int] [31 bits zero padding] 0
+// Smi stands for small integer.
 class Smi: public Object {
  public:
   // Returns the integer value.
@@ -921,8 +957,6 @@
   // Returns whether value can be represented in a Smi.
   static inline bool IsValid(intptr_t value);
 
-  static inline bool IsIntptrValid(intptr_t);
-
   // Casting.
   static inline Smi* cast(Object* object);
 
@@ -933,10 +967,8 @@
   void SmiVerify();
 #endif
 
-  static const int kSmiNumBits = 31;
-  // Min and max limits for Smi values.
-  static const int kMinValue = -(1 << (kSmiNumBits - 1));
-  static const int kMaxValue = (1 << (kSmiNumBits - 1)) - 1;
+  static const int kMinValue = (-1 << (kSmiValueSize - 1));
+  static const int kMaxValue = -(kMinValue + 1);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Smi);
@@ -949,10 +981,10 @@
 //
 // Failures are a single word, encoded as follows:
 // +-------------------------+---+--+--+
-// |rrrrrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
+// |...rrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
 // +-------------------------+---+--+--+
-//  3                       7 6 4 32 10
-//  1
+//                          7 6 4 32 10
+//
 //
 // The low two bits, 0-1, are the failure tag, 11.  The next two bits,
 // 2-3, are a failure type tag 'tt' with possible values:
@@ -1014,8 +1046,8 @@
 #endif
 
  private:
-  inline int value() const;
-  static inline Failure* Construct(Type type, int value = 0);
+  inline intptr_t value() const;
+  static inline Failure* Construct(Type type, intptr_t value = 0);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Failure);
 };
@@ -1291,7 +1323,7 @@
   // is a mixture of sign, exponent and mantissa.  Our current platforms are all
   // little endian apart from non-EABI arm which is little endian with big
   // endian floating point word ordering!
-#if !defined(V8_HOST_ARCH_ARM) || __ARM_EABI__
+#if !defined(V8_HOST_ARCH_ARM) || defined(USE_ARM_EABI)
   static const int kMantissaOffset = kValueOffset;
   static const int kExponentOffset = kValueOffset + 4;
 #else
@@ -1324,7 +1356,14 @@
   enum ElementsKind {
     FAST_ELEMENTS,
     DICTIONARY_ELEMENTS,
-    PIXEL_ELEMENTS
+    PIXEL_ELEMENTS,
+    EXTERNAL_BYTE_ELEMENTS,
+    EXTERNAL_UNSIGNED_BYTE_ELEMENTS,
+    EXTERNAL_SHORT_ELEMENTS,
+    EXTERNAL_UNSIGNED_SHORT_ELEMENTS,
+    EXTERNAL_INT_ELEMENTS,
+    EXTERNAL_UNSIGNED_INT_ELEMENTS,
+    EXTERNAL_FLOAT_ELEMENTS
   };
 
   // [properties]: Backing storage for properties.
@@ -1344,6 +1383,14 @@
   inline bool HasFastElements();
   inline bool HasDictionaryElements();
   inline bool HasPixelElements();
+  inline bool HasExternalArrayElements();
+  inline bool HasExternalByteElements();
+  inline bool HasExternalUnsignedByteElements();
+  inline bool HasExternalShortElements();
+  inline bool HasExternalUnsignedShortElements();
+  inline bool HasExternalIntElements();
+  inline bool HasExternalUnsignedIntElements();
+  inline bool HasExternalFloatElements();
   inline NumberDictionary* element_dictionary();  // Gets slow elements.
 
   // Collects elements starting at index 0.
@@ -2036,33 +2083,33 @@
 //     // The Element size indicates number of elements per entry.
 //     static const int kEntrySize = ..;
 //   };
-// table.  The prefix size indicates an amount of memory in the
+// The prefix size indicates an amount of memory in the
 // beginning of the backing storage that can be used for non-element
 // information by subclasses.
 
 template<typename Shape, typename Key>
 class HashTable: public FixedArray {
  public:
-  // Returns the number of elements in the dictionary.
+  // Returns the number of elements in the hash table.
   int NumberOfElements() {
     return Smi::cast(get(kNumberOfElementsIndex))->value();
   }
 
-  // Returns the capacity of the dictionary.
+  // Returns the capacity of the hash table.
   int Capacity() {
     return Smi::cast(get(kCapacityIndex))->value();
   }
 
   // ElementAdded should be called whenever an element is added to a
-  // dictionary.
+  // hash table.
   void ElementAdded() { SetNumberOfElements(NumberOfElements() + 1); }
 
   // ElementRemoved should be called whenever an element is removed from
-  // a dictionary.
+  // a hash table.
   void ElementRemoved() { SetNumberOfElements(NumberOfElements() - 1); }
   void ElementsRemoved(int n) { SetNumberOfElements(NumberOfElements() - n); }
 
-  // Returns a new array for dictionary usage. Might return Failure.
+  // Returns a new HashTable object. Might return Failure.
   static Object* Allocate(int at_least_space_for);
 
   // Returns the key at entry.
@@ -2112,7 +2159,7 @@
     return (entry * kEntrySize) + kElementsStartIndex;
   }
 
-  // Update the number of elements in the dictionary.
+  // Update the number of elements in the hash table.
   void SetNumberOfElements(int nof) {
     fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
   }
@@ -2148,7 +2195,7 @@
   virtual uint32_t Hash() = 0;
   // Returns the hash value for object.
   virtual uint32_t HashForObject(Object* key) = 0;
-  // Returns the key object for storing into the dictionary.
+  // Returns the key object for storing into the hash table.
   // If allocations fails a failure object is returned.
   virtual Object* AsObject() = 0;
   // Required.
@@ -2495,6 +2542,9 @@
   void PixelArrayVerify();
 #endif  // DEBUG
 
+  // Maximal acceptable length for a pixel array.
+  static const int kMaxLength = 0x3fffffff;
+
   // PixelArray headers are not quadword aligned.
   static const int kExternalPointerOffset = Array::kAlignedSize;
   static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
@@ -2505,6 +2555,200 @@
 };
 
 
+// An ExternalArray represents a fixed-size array of primitive values
+// which live outside the JavaScript heap. Its subclasses are used to
+// implement the CanvasArray types being defined in the WebGL
+// specification. As of this writing the first public draft is not yet
+// available, but Khronos members can access the draft at:
+//   https://cvs.khronos.org/svn/repos/3dweb/trunk/doc/spec/WebGL-spec.html
+//
+// The semantics of these arrays differ from CanvasPixelArray.
+// Out-of-range values passed to the setter are converted via a C
+// cast, not clamping. Out-of-range indices cause exceptions to be
+// raised rather than being silently ignored.
+class ExternalArray: public Array {
+ public:
+  // [external_pointer]: The pointer to the external memory area backing this
+  // external array.
+  DECL_ACCESSORS(external_pointer, void)  // Pointer to the data store.
+
+  // Casting.
+  static inline ExternalArray* cast(Object* obj);
+
+  // Maximal acceptable length for an external array.
+  static const int kMaxLength = 0x3fffffff;
+
+  // ExternalArray headers are not quadword aligned.
+  static const int kExternalPointerOffset = Array::kAlignedSize;
+  static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
+  static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
+};
+
+
+class ExternalByteArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline int8_t get(int index);
+  inline void set(int index, int8_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalByteArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalByteArrayPrint();
+  void ExternalByteArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalByteArray);
+};
+
+
+class ExternalUnsignedByteArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline uint8_t get(int index);
+  inline void set(int index, uint8_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalUnsignedByteArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalUnsignedByteArrayPrint();
+  void ExternalUnsignedByteArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedByteArray);
+};
+
+
+class ExternalShortArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline int16_t get(int index);
+  inline void set(int index, int16_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalShortArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalShortArrayPrint();
+  void ExternalShortArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalShortArray);
+};
+
+
+class ExternalUnsignedShortArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline uint16_t get(int index);
+  inline void set(int index, uint16_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalUnsignedShortArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalUnsignedShortArrayPrint();
+  void ExternalUnsignedShortArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedShortArray);
+};
+
+
+class ExternalIntArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline int32_t get(int index);
+  inline void set(int index, int32_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalIntArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalIntArrayPrint();
+  void ExternalIntArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalIntArray);
+};
+
+
+class ExternalUnsignedIntArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline uint32_t get(int index);
+  inline void set(int index, uint32_t value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalUnsignedIntArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalUnsignedIntArrayPrint();
+  void ExternalUnsignedIntArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalUnsignedIntArray);
+};
+
+
+class ExternalFloatArray: public ExternalArray {
+ public:
+  // Setter and getter.
+  inline float get(int index);
+  inline void set(int index, float value);
+
+  // This accessor applies the correct conversion from Smi, HeapNumber
+  // and undefined.
+  Object* SetValue(uint32_t index, Object* value);
+
+  // Casting.
+  static inline ExternalFloatArray* cast(Object* obj);
+
+#ifdef DEBUG
+  void ExternalFloatArrayPrint();
+  void ExternalFloatArrayVerify();
+#endif  // DEBUG
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalFloatArray);
+};
+
+
 // Code describes objects with on-the-fly generated machine code.
 class Code: public HeapObject {
  public:
@@ -2998,6 +3242,10 @@
 
   static inline Script* cast(Object* obj);
 
+  // If script source is an external string, check that the underlying
+  // resource is accessible. Otherwise, always return true.
+  inline bool HasValidSource();
+
 #ifdef DEBUG
   void ScriptPrint();
   void ScriptVerify();
@@ -3572,6 +3820,7 @@
   static const int kEntrySize = 2;
 };
 
+
 class CompilationCacheTable: public HashTable<CompilationCacheShape,
                                               HashTableKey*> {
  public:
@@ -3845,6 +4094,8 @@
   static const int kShortLengthShift = kHashShift + kShortStringTag;
   static const int kMediumLengthShift = kHashShift + kMediumStringTag;
   static const int kLongLengthShift = kHashShift + kLongStringTag;
+  // Maximal string length that can be stored in the hash/length field.
+  static const int kMaxLength = (1 << (32 - kLongLengthShift)) - 1;
 
   // Limit for truncation in short printing.
   static const int kMaxShortPrintLength = 1024;
diff --git a/src/parser.cc b/src/parser.cc
index 3b24687..02fcfdc 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -177,8 +177,8 @@
   Statement* ParseWithStatement(ZoneStringList* labels, bool* ok);
   CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
   SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok);
-  LoopStatement* ParseDoStatement(ZoneStringList* labels, bool* ok);
-  LoopStatement* ParseWhileStatement(ZoneStringList* labels, bool* ok);
+  DoWhileStatement* ParseDoWhileStatement(ZoneStringList* labels, bool* ok);
+  WhileStatement* ParseWhileStatement(ZoneStringList* labels, bool* ok);
   Statement* ParseForStatement(ZoneStringList* labels, bool* ok);
   Statement* ParseThrowStatement(bool* ok);
   Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
@@ -675,9 +675,6 @@
   }
   int materialized_literal_count() { return materialized_literal_count_; }
 
-  void set_contains_array_literal() { contains_array_literal_ = true; }
-  bool contains_array_literal() { return contains_array_literal_; }
-
   void SetThisPropertyAssignmentInfo(
       bool only_this_property_assignments,
       bool only_simple_this_property_assignments,
@@ -700,17 +697,11 @@
   void AddProperty() { expected_property_count_++; }
   int expected_property_count() { return expected_property_count_; }
  private:
-  // Captures the number of nodes that need materialization in the
-  // function.  regexp literals, and boilerplate for object literals.
+  // Captures the number of literals that need materialization in the
+  // function.  Includes regexp literals, and boilerplate for object
+  // and array literals.
   int materialized_literal_count_;
 
-  // Captures whether or not the function contains array literals.  If
-  // the function contains array literals, we have to allocate space
-  // for the array constructor in the literals array of the function.
-  // This array constructor is used when creating the actual array
-  // literals.
-  bool contains_array_literal_;
-
   // Properties count estimation.
   int expected_property_count_;
 
@@ -728,7 +719,6 @@
 
 TemporaryScope::TemporaryScope(Parser* parser)
   : materialized_literal_count_(0),
-    contains_array_literal_(false),
     expected_property_count_(0),
     only_this_property_assignments_(false),
     only_simple_this_property_assignments_(false),
@@ -1236,7 +1226,6 @@
           top_scope_,
           body.elements(),
           temp_scope.materialized_literal_count(),
-          temp_scope.contains_array_literal(),
           temp_scope.expected_property_count(),
           temp_scope.only_this_property_assignments(),
           temp_scope.only_simple_this_property_assignments(),
@@ -1692,7 +1681,7 @@
       break;
 
     case Token::DO:
-      stmt = ParseDoStatement(labels, ok);
+      stmt = ParseDoWhileStatement(labels, ok);
       break;
 
     case Token::WHILE:
@@ -1903,7 +1892,7 @@
   const int literals = fun->NumberOfLiterals();
   Handle<Code> code = Handle<Code>(fun->shared()->code());
   Handle<JSFunction> boilerplate =
-      Factory::NewFunctionBoilerplate(name, literals, false, code);
+      Factory::NewFunctionBoilerplate(name, literals, code);
 
   // Copy the function data to the boilerplate. Used by
   // builtins.cc:HandleApiCall to perform argument type checks and to
@@ -2361,7 +2350,7 @@
     exit->AddStatement(NEW(WithExitStatement()));
 
     // Return a try-finally statement.
-    TryFinally* wrapper = NEW(TryFinally(body, exit));
+    TryFinallyStatement* wrapper = NEW(TryFinallyStatement(body, exit));
     wrapper->set_escaping_targets(collector.targets());
     result->AddStatement(wrapper);
   }
@@ -2537,7 +2526,8 @@
   //   'try { try { } catch { } } finally { }'
 
   if (!is_pre_parsing_ && catch_block != NULL && finally_block != NULL) {
-    TryCatch* statement = NEW(TryCatch(try_block, catch_var, catch_block));
+    TryCatchStatement* statement =
+        NEW(TryCatchStatement(try_block, catch_var, catch_block));
     statement->set_escaping_targets(collector.targets());
     try_block = NEW(Block(NULL, 1, false));
     try_block->AddStatement(statement);
@@ -2548,11 +2538,11 @@
   if (!is_pre_parsing_) {
     if (catch_block != NULL) {
       ASSERT(finally_block == NULL);
-      result = NEW(TryCatch(try_block, catch_var, catch_block));
+      result = NEW(TryCatchStatement(try_block, catch_var, catch_block));
       result->set_escaping_targets(collector.targets());
     } else {
       ASSERT(finally_block != NULL);
-      result = NEW(TryFinally(try_block, finally_block));
+      result = NEW(TryFinallyStatement(try_block, finally_block));
       // Add the jump targets of the try block and the catch block.
       for (int i = 0; i < collector.targets()->length(); i++) {
         catch_collector.AddTarget(collector.targets()->at(i));
@@ -2565,11 +2555,12 @@
 }
 
 
-LoopStatement* Parser::ParseDoStatement(ZoneStringList* labels, bool* ok) {
+DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
+                                                bool* ok) {
   // DoStatement ::
   //   'do' Statement 'while' '(' Expression ')' ';'
 
-  LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::DO_LOOP));
+  DoWhileStatement* loop = NEW(DoWhileStatement(labels));
   Target target(this, loop);
 
   Expect(Token::DO, CHECK_OK);
@@ -2585,16 +2576,16 @@
   // ExpectSemicolon() functionality here.
   if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
 
-  if (loop) loop->Initialize(NULL, cond, NULL, body);
+  if (loop != NULL) loop->Initialize(cond, body);
   return loop;
 }
 
 
-LoopStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
+WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
   // WhileStatement ::
   //   'while' '(' Expression ')' Statement
 
-  LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::WHILE_LOOP));
+  WhileStatement* loop = NEW(WhileStatement(labels));
   Target target(this, loop);
 
   Expect(Token::WHILE, CHECK_OK);
@@ -2603,7 +2594,7 @@
   Expect(Token::RPAREN, CHECK_OK);
   Statement* body = ParseStatement(NULL, CHECK_OK);
 
-  if (loop) loop->Initialize(NULL, cond, NULL, body);
+  if (loop != NULL) loop->Initialize(cond, body);
   return loop;
 }
 
@@ -2676,7 +2667,7 @@
   }
 
   // Standard 'for' loop
-  LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::FOR_LOOP));
+  ForStatement* loop = NEW(ForStatement(labels));
   Target target(this, loop);
 
   // Parsed initializer at this point.
@@ -3304,7 +3295,6 @@
   Expect(Token::RBRACK, CHECK_OK);
 
   // Update the scope information before the pre-parsing bailout.
-  temp_scope_->set_contains_array_literal();
   int literal_index = temp_scope_->NextMaterializedLiteralIndex();
 
   if (is_pre_parsing_) return NULL;
@@ -3634,7 +3624,6 @@
 
     int materialized_literal_count;
     int expected_property_count;
-    bool contains_array_literal;
     bool only_this_property_assignments;
     bool only_simple_this_property_assignments;
     Handle<FixedArray> this_property_assignments;
@@ -3648,12 +3637,10 @@
       only_this_property_assignments = false;
       only_simple_this_property_assignments = false;
       this_property_assignments = Factory::empty_fixed_array();
-      contains_array_literal = entry.contains_array_literal();
     } else {
       ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
       materialized_literal_count = temp_scope.materialized_literal_count();
       expected_property_count = temp_scope.expected_property_count();
-      contains_array_literal = temp_scope.contains_array_literal();
       only_this_property_assignments =
           temp_scope.only_this_property_assignments();
       only_simple_this_property_assignments =
@@ -3669,7 +3656,6 @@
       entry.set_end_pos(end_pos);
       entry.set_literal_count(materialized_literal_count);
       entry.set_property_count(expected_property_count);
-      entry.set_contains_array_literal(contains_array_literal);
     }
 
     FunctionLiteral* function_literal =
@@ -3677,7 +3663,6 @@
                             top_scope_,
                             body.elements(),
                             materialized_literal_count,
-                            contains_array_literal,
                             expected_property_count,
                             only_this_property_assignments,
                             only_simple_this_property_assignments,
diff --git a/src/parser.h b/src/parser.h
index 86e1f74..7328e81 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -70,16 +70,9 @@
   int property_count() { return backing_[kPropertyCountOffset]; }
   void set_property_count(int value) { backing_[kPropertyCountOffset] = value; }
 
-  bool contains_array_literal() {
-    return backing_[kContainsArrayLiteralOffset] != 0;
-  }
-  void set_contains_array_literal(bool value) {
-    backing_[kContainsArrayLiteralOffset] = value ? 1 : 0;
-  }
-
   bool is_valid() { return backing_.length() > 0; }
 
-  static const int kSize = 5;
+  static const int kSize = 4;
 
  private:
   Vector<unsigned> backing_;
@@ -87,7 +80,6 @@
   static const int kEndPosOffset = 1;
   static const int kLiteralCountOffset = 2;
   static const int kPropertyCountOffset = 3;
-  static const int kContainsArrayLiteralOffset = 4;
 };
 
 
diff --git a/src/platform-nullos.cc b/src/platform-nullos.cc
index c0cf7f4..084880e 100644
--- a/src/platform-nullos.cc
+++ b/src/platform-nullos.cc
@@ -47,6 +47,13 @@
 }
 
 
+// Give V8 the opportunity to override the default fmod behavior.
+double modulo(double x, double y) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+
 // Initialize OS class early in the V8 startup.
 void OS::Setup() {
   // Seed the random number generator.
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index b8fe967..1e1245c 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -54,6 +54,12 @@
 namespace v8 {
 namespace internal {
 
+// ----------------------------------------------------------------------------
+// Math functions
+
+double modulo(double x, double y) {
+  return fmod(x, y);
+}
 
 // ----------------------------------------------------------------------------
 // POSIX date/time support.
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index d4a183d..54d7b37 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -223,6 +223,31 @@
   return ceil(x);
 }
 
+#ifdef _WIN64
+typedef double (*ModuloFunction)(double, double);
+
+// Defined in codegen-x64.cc.
+ModuloFunction CreateModuloFunction();
+
+double modulo(double x, double y) {
+  static ModuloFunction function = CreateModuloFunction();
+  return function(x, y);
+}
+#else  // Win32
+
+double modulo(double x, double y) {
+  // Workaround MS fmod bugs. ECMA-262 says:
+  // dividend is finite and divisor is an infinity => result equals dividend
+  // dividend is a zero and divisor is nonzero finite => result equals dividend
+  if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
+      !(x == 0 && (y != 0 && isfinite(y)))) {
+    x = fmod(x, y);
+  }
+  return x;
+}
+
+#endif  // _WIN64
+
 // ----------------------------------------------------------------------------
 // The Time class represents time on win32. A timestamp is represented as
 // a 64-bit integer in 100 nano-seconds since January 1, 1601 (UTC). JavaScript
@@ -1794,7 +1819,6 @@
         context.ContextFlags = CONTEXT_FULL;
         if (GetThreadContext(profiled_thread_, &context) != 0) {
 #if V8_HOST_ARCH_X64
-          UNIMPLEMENTED();
           sample.pc = context.Rip;
           sample.sp = context.Rsp;
           sample.fp = context.Rbp;
diff --git a/src/platform.h b/src/platform.h
index 76bf891..fefe4b8 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -111,6 +111,7 @@
 class Semaphore;
 
 double ceiling(double x);
+double modulo(double x, double y);
 
 // Forward declarations.
 class Socket;
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index bf66c4b..10c1ea8 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -147,46 +147,42 @@
 }
 
 
-void PrettyPrinter::VisitLoopStatement(LoopStatement* node) {
+void PrettyPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
   PrintLabels(node->labels());
-  switch (node->type()) {
-    case LoopStatement::DO_LOOP:
-      ASSERT(node->init() == NULL);
-      ASSERT(node->next() == NULL);
-      Print("do ");
-      Visit(node->body());
-      Print(" while (");
-      Visit(node->cond());
-      Print(");");
-      break;
+  Print("do ");
+  Visit(node->body());
+  Print(" while (");
+  Visit(node->cond());
+  Print(");");
+}
 
-    case LoopStatement::FOR_LOOP:
-      Print("for (");
-      if (node->init() != NULL) {
-        Visit(node->init());
-        Print(" ");
-      } else {
-        Print("; ");
-      }
-      if (node->cond() != NULL)
-        Visit(node->cond());
-      Print("; ");
-      if (node->next() != NULL)
-        Visit(node->next());  // prints extra ';', unfortunately
-      // to fix: should use Expression for next
-      Print(") ");
-      Visit(node->body());
-      break;
 
-    case LoopStatement::WHILE_LOOP:
-      ASSERT(node->init() == NULL);
-      ASSERT(node->next() == NULL);
-      Print("while (");
-      Visit(node->cond());
-      Print(") ");
-      Visit(node->body());
-      break;
+void PrettyPrinter::VisitWhileStatement(WhileStatement* node) {
+  PrintLabels(node->labels());
+  Print("while (");
+  Visit(node->cond());
+  Print(") ");
+  Visit(node->body());
+}
+
+
+void PrettyPrinter::VisitForStatement(ForStatement* node) {
+  PrintLabels(node->labels());
+  Print("for (");
+  if (node->init() != NULL) {
+    Visit(node->init());
+    Print(" ");
+  } else {
+    Print("; ");
   }
+  if (node->cond() != NULL) Visit(node->cond());
+  Print("; ");
+  if (node->next() != NULL) {
+    Visit(node->next());  // prints extra ';', unfortunately
+    // to fix: should use Expression for next
+  }
+  Print(") ");
+  Visit(node->body());
 }
 
 
@@ -201,7 +197,7 @@
 }
 
 
-void PrettyPrinter::VisitTryCatch(TryCatch* node) {
+void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
   Print("try ");
   Visit(node->try_block());
   Print(" catch (");
@@ -211,7 +207,7 @@
 }
 
 
-void PrettyPrinter::VisitTryFinally(TryFinally* node) {
+void PrettyPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
   Print("try ");
   Visit(node->try_block());
   Print(" finally ");
@@ -841,12 +837,28 @@
 }
 
 
-void AstPrinter::VisitLoopStatement(LoopStatement* node) {
-  IndentedScope indent(node->OperatorString());
+void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
+  IndentedScope indent("DO");
+  PrintLabelsIndented(NULL, node->labels());
+  PrintIndentedVisit("BODY", node->body());
+  PrintIndentedVisit("COND", node->cond());
+}
+
+
+void AstPrinter::VisitWhileStatement(WhileStatement* node) {
+  IndentedScope indent("WHILE");
+  PrintLabelsIndented(NULL, node->labels());
+  PrintIndentedVisit("COND", node->cond());
+  PrintIndentedVisit("BODY", node->body());
+}
+
+
+void AstPrinter::VisitForStatement(ForStatement* node) {
+  IndentedScope indent("FOR");
   PrintLabelsIndented(NULL, node->labels());
   if (node->init()) PrintIndentedVisit("INIT", node->init());
   if (node->cond()) PrintIndentedVisit("COND", node->cond());
-  if (node->body()) PrintIndentedVisit("BODY", node->body());
+  PrintIndentedVisit("BODY", node->body());
   if (node->next()) PrintIndentedVisit("NEXT", node->next());
 }
 
@@ -859,7 +871,7 @@
 }
 
 
-void AstPrinter::VisitTryCatch(TryCatch* node) {
+void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
   IndentedScope indent("TRY CATCH");
   PrintIndentedVisit("TRY", node->try_block());
   PrintIndentedVisit("CATCHVAR", node->catch_var());
@@ -867,7 +879,7 @@
 }
 
 
-void AstPrinter::VisitTryFinally(TryFinally* node) {
+void AstPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
   IndentedScope indent("TRY FINALLY");
   PrintIndentedVisit("TRY", node->try_block());
   PrintIndentedVisit("FINALLY", node->finally_block());
@@ -1088,6 +1100,414 @@
 }
 
 
+TagScope::TagScope(JsonAstBuilder* builder, const char* name)
+    : builder_(builder), next_(builder->tag()), has_body_(false) {
+  if (next_ != NULL) {
+    next_->use();
+    builder->Print(",\n");
+  }
+  builder->set_tag(this);
+  builder->PrintIndented("[");
+  builder->Print("\"%s\"", name);
+  builder->increase_indent(JsonAstBuilder::kTagIndentSize);
+}
+
+
+TagScope::~TagScope() {
+  builder_->decrease_indent(JsonAstBuilder::kTagIndentSize);
+  if (has_body_) {
+    builder_->Print("\n");
+    builder_->PrintIndented("]");
+  } else {
+    builder_->Print("]");
+  }
+  builder_->set_tag(next_);
+}
+
+
+AttributesScope::AttributesScope(JsonAstBuilder* builder)
+    : builder_(builder), attribute_count_(0) {
+  builder->set_attributes(this);
+  builder->tag()->use();
+  builder->Print(",\n");
+  builder->PrintIndented("{");
+  builder->increase_indent(JsonAstBuilder::kAttributesIndentSize);
+}
+
+
+AttributesScope::~AttributesScope() {
+  builder_->decrease_indent(JsonAstBuilder::kAttributesIndentSize);
+  if (attribute_count_ > 1) {
+    builder_->Print("\n");
+    builder_->PrintIndented("}");
+  } else {
+    builder_->Print("}");
+  }
+  builder_->set_attributes(NULL);
+}
+
+
+const char* JsonAstBuilder::BuildProgram(FunctionLiteral* program) {
+  Init();
+  Visit(program);
+  Print("\n");
+  return Output();
+}
+
+
+void JsonAstBuilder::AddAttributePrefix(const char* name) {
+  if (attributes()->is_used()) {
+    Print(",\n");
+    PrintIndented("\"");
+  } else {
+    Print("\"");
+  }
+  Print("%s\":", name);
+  attributes()->use();
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, Handle<String> value) {
+  SmartPointer<char> value_string = value->ToCString();
+  AddAttributePrefix(name);
+  Print("\"%s\"", *value_string);
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, const char* value) {
+  AddAttributePrefix(name);
+  Print("\"%s\"", value);
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, int value) {
+  AddAttributePrefix(name);
+  Print("%d", value);
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, bool value) {
+  AddAttributePrefix(name);
+  Print(value ? "true" : "false");
+}
+
+
+void JsonAstBuilder::VisitBlock(Block* stmt) {
+  TagScope tag(this, "Block");
+  VisitStatements(stmt->statements());
+}
+
+
+void JsonAstBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  TagScope tag(this, "ExpressionStatement");
+  Visit(stmt->expression());
+}
+
+
+void JsonAstBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  TagScope tag(this, "EmptyStatement");
+}
+
+
+void JsonAstBuilder::VisitIfStatement(IfStatement* stmt) {
+  TagScope tag(this, "IfStatement");
+  Visit(stmt->condition());
+  Visit(stmt->then_statement());
+  Visit(stmt->else_statement());
+}
+
+
+void JsonAstBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  TagScope tag(this, "ContinueStatement");
+}
+
+
+void JsonAstBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  TagScope tag(this, "BreakStatement");
+}
+
+
+void JsonAstBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  TagScope tag(this, "ReturnStatement");
+  Visit(stmt->expression());
+}
+
+
+void JsonAstBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  TagScope tag(this, "WithEnterStatement");
+  Visit(stmt->expression());
+}
+
+
+void JsonAstBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
+  TagScope tag(this, "WithExitStatement");
+}
+
+
+void JsonAstBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  TagScope tag(this, "SwitchStatement");
+}
+
+
+void JsonAstBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  TagScope tag(this, "DoWhileStatement");
+  Visit(stmt->body());
+  Visit(stmt->cond());
+}
+
+
+void JsonAstBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  TagScope tag(this, "WhileStatement");
+  Visit(stmt->cond());
+  Visit(stmt->body());
+}
+
+
+void JsonAstBuilder::VisitForStatement(ForStatement* stmt) {
+  TagScope tag(this, "ForStatement");
+  if (stmt->init() != NULL) Visit(stmt->init());
+  if (stmt->cond() != NULL) Visit(stmt->cond());
+  Visit(stmt->body());
+  if (stmt->next() != NULL) Visit(stmt->next());
+}
+
+
+void JsonAstBuilder::VisitForInStatement(ForInStatement* stmt) {
+  TagScope tag(this, "ForInStatement");
+  Visit(stmt->each());
+  Visit(stmt->enumerable());
+  Visit(stmt->body());
+}
+
+
+void JsonAstBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  TagScope tag(this, "TryCatchStatement");
+  Visit(stmt->try_block());
+  Visit(stmt->catch_var());
+  Visit(stmt->catch_block());
+}
+
+
+void JsonAstBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  TagScope tag(this, "TryFinallyStatement");
+  Visit(stmt->try_block());
+  Visit(stmt->finally_block());
+}
+
+
+void JsonAstBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  TagScope tag(this, "DebuggerStatement");
+}
+
+
+void JsonAstBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  TagScope tag(this, "FunctionLiteral");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("name", expr->name());
+  }
+  VisitDeclarations(expr->scope()->declarations());
+  VisitStatements(expr->body());
+}
+
+
+void JsonAstBuilder::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* expr) {
+  TagScope tag(this, "FunctionBoilerplateLiteral");
+}
+
+
+void JsonAstBuilder::VisitConditional(Conditional* expr) {
+  TagScope tag(this, "Conditional");
+}
+
+
+void JsonAstBuilder::VisitSlot(Slot* expr) {
+  TagScope tag(this, "Slot");
+  {
+    AttributesScope attributes(this);
+    switch (expr->type()) {
+      case Slot::PARAMETER:
+        AddAttribute("type", "PARAMETER");
+        break;
+      case Slot::LOCAL:
+        AddAttribute("type", "LOCAL");
+        break;
+      case Slot::CONTEXT:
+        AddAttribute("type", "CONTEXT");
+        break;
+      case Slot::LOOKUP:
+        AddAttribute("type", "LOOKUP");
+        break;
+      case Slot::GLOBAL:
+        AddAttribute("type", "GLOBAL");
+        break;
+    }
+    AddAttribute("index", expr->index());
+  }
+}
+
+
+void JsonAstBuilder::VisitVariableProxy(VariableProxy* expr) {
+  if (expr->var()->rewrite() == NULL) {
+    TagScope tag(this, "VariableProxy");
+    {
+      AttributesScope attributes(this);
+      AddAttribute("name", expr->name());
+      AddAttribute("mode", Variable::Mode2String(expr->var()->mode()));
+    }
+  } else {
+    Visit(expr->var()->rewrite());
+  }
+}
+
+
+void JsonAstBuilder::VisitLiteral(Literal* expr) {
+  TagScope tag(this, "Literal");
+  {
+    AttributesScope attributes(this);
+    Handle<Object> handle = expr->handle();
+    if (handle->IsString()) {
+      AddAttribute("handle", Handle<String>(String::cast(*handle)));
+    } else if (handle->IsSmi()) {
+      AddAttribute("handle", Smi::cast(*handle)->value());
+    }
+  }
+}
+
+
+void JsonAstBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  TagScope tag(this, "RegExpLiteral");
+}
+
+
+void JsonAstBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+  TagScope tag(this, "ObjectLiteral");
+}
+
+
+void JsonAstBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  TagScope tag(this, "ArrayLiteral");
+}
+
+
+void JsonAstBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+  TagScope tag(this, "CatchExtensionObject");
+  Visit(expr->key());
+  Visit(expr->value());
+}
+
+
+void JsonAstBuilder::VisitAssignment(Assignment* expr) {
+  TagScope tag(this, "Assignment");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->target());
+  Visit(expr->value());
+}
+
+
+void JsonAstBuilder::VisitThrow(Throw* expr) {
+  TagScope tag(this, "Throw");
+  Visit(expr->exception());
+}
+
+
+void JsonAstBuilder::VisitProperty(Property* expr) {
+  TagScope tag(this, "Property");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("type", expr->is_synthetic() ? "SYNTHETIC" : "NORMAL");
+  }
+  Visit(expr->obj());
+  Visit(expr->key());
+}
+
+
+void JsonAstBuilder::VisitCall(Call* expr) {
+  TagScope tag(this, "Call");
+  Visit(expr->expression());
+  VisitExpressions(expr->arguments());
+}
+
+
+void JsonAstBuilder::VisitCallNew(CallNew* expr) {
+  TagScope tag(this, "CallNew");
+  Visit(expr->expression());
+  VisitExpressions(expr->arguments());
+}
+
+
+void JsonAstBuilder::VisitCallRuntime(CallRuntime* expr) {
+  TagScope tag(this, "CallRuntime");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("name", expr->name());
+  }
+  VisitExpressions(expr->arguments());
+}
+
+
+void JsonAstBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  TagScope tag(this, "UnaryOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->expression());
+}
+
+
+void JsonAstBuilder::VisitCountOperation(CountOperation* expr) {
+  TagScope tag(this, "CountOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("is_prefix", expr->is_prefix());
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->expression());
+}
+
+
+void JsonAstBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  TagScope tag(this, "BinaryOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->left());
+  Visit(expr->right());
+}
+
+
+void JsonAstBuilder::VisitCompareOperation(CompareOperation* expr) {
+  TagScope tag(this, "CompareOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->left());
+  Visit(expr->right());
+}
+
+
+void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
+  TagScope tag(this, "ThisFunction");
+}
+
+
+void JsonAstBuilder::VisitDeclaration(Declaration* decl) {
+  TagScope tag(this, "Declaration");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("mode", Variable::Mode2String(decl->mode()));
+  }
+  Visit(decl->proxy());
+  if (decl->fun() != NULL) Visit(decl->fun());
+}
+
 
 #endif  // DEBUG
 
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 8a6d1fb..f885cb3 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -46,14 +46,15 @@
   const char* PrintExpression(FunctionLiteral* program);
   const char* PrintProgram(FunctionLiteral* program);
 
+  void Print(const char* format, ...);
+
   // Print a node to stdout.
   static void PrintOut(AstNode* node);
 
   // Individual nodes
-#define DEF_VISIT(type)                         \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
 
  private:
   char* output_;  // output string buffer
@@ -62,7 +63,6 @@
 
  protected:
   void Init();
-  void Print(const char* format, ...);
   const char* Output() const { return output_; }
 
   virtual void PrintStatements(ZoneList<Statement*>* statements);
@@ -85,10 +85,9 @@
   const char* PrintProgram(FunctionLiteral* program);
 
   // Individual nodes
-#define DEF_VISIT(type)                         \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
  private:
   friend class IndentedScope;
   void PrintIndented(const char* txt);
@@ -112,6 +111,107 @@
   static int indent_;
 };
 
+
+// Forward declaration of helper classes.
+class TagScope;
+class AttributesScope;
+
+// Build a C string containing a JSON representation of a function's
+// AST. The representation is based on JsonML (www.jsonml.org).
+class JsonAstBuilder: public PrettyPrinter {
+ public:
+  JsonAstBuilder()
+      : indent_(0), top_tag_scope_(NULL), attributes_scope_(NULL) {
+  }
+  virtual ~JsonAstBuilder() {}
+
+  // Controls the indentation of subsequent lines of a tag body after
+  // the first line.
+  static const int kTagIndentSize = 2;
+
+  // Controls the indentation of subsequent lines of an attributes
+  // blocks's body after the first line.
+  static const int kAttributesIndentSize = 1;
+
+  // Construct a JSON representation of a function literal.
+  const char* BuildProgram(FunctionLiteral* program);
+
+  // Print text indented by the current indentation level.
+  void PrintIndented(const char* text) { Print("%*s%s", indent_, "", text); }
+
+  // Change the indentation level.
+  void increase_indent(int amount) { indent_ += amount; }
+  void decrease_indent(int amount) { indent_ -= amount; }
+
+  // The builder maintains a stack of opened AST node constructors.
+  // Each node constructor corresponds to a JsonML tag.
+  TagScope* tag() { return top_tag_scope_; }
+  void set_tag(TagScope* scope) { top_tag_scope_ = scope; }
+
+  // The builder maintains a pointer to the currently opened attributes
+  // of current AST node or NULL if the attributes are not opened.
+  AttributesScope* attributes() { return attributes_scope_; }
+  void set_attributes(AttributesScope* scope) { attributes_scope_ = scope; }
+
+  // Add an attribute to the currently opened attributes.
+  void AddAttribute(const char* name, Handle<String> value);
+  void AddAttribute(const char* name, const char* value);
+  void AddAttribute(const char* name, int value);
+  void AddAttribute(const char* name, bool value);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+  int indent_;
+  TagScope* top_tag_scope_;
+  AttributesScope* attributes_scope_;
+
+  // Utility function used by AddAttribute implementations.
+  void AddAttributePrefix(const char* name);
+};
+
+
+// The JSON AST builder keeps a stack of open element tags (AST node
+// constructors from the current iteration point to the root of the
+// AST).  TagScope is a helper class to manage the opening and closing
+// of tags, the indentation of their bodies, and comma separating their
+// contents.
+class TagScope BASE_EMBEDDED {
+ public:
+  TagScope(JsonAstBuilder* builder, const char* name);
+  ~TagScope();
+
+  void use() { has_body_ = true; }
+
+ private:
+  JsonAstBuilder* builder_;
+  TagScope* next_;
+  bool has_body_;
+};
+
+
+// AttributesScope is a helper class to manage the opening and closing
+// of attribute blocks, the indentation of their bodies, and comma
+// separating their contents. JsonAstBuilder::AddAttribute adds an
+// attribute to the currently open AttributesScope. They cannot be
+// nested so the builder keeps an optional single scope rather than a
+// stack.
+class AttributesScope BASE_EMBEDDED {
+ public:
+  explicit AttributesScope(JsonAstBuilder* builder);
+  ~AttributesScope();
+
+  bool is_used() { return attribute_count_ > 0; }
+  void use() { ++attribute_count_; }
+
+ private:
+  JsonAstBuilder* builder_;
+  int attribute_count_;
+};
+
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 11fc071..de1b95b 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -100,7 +100,21 @@
 }
 
 
-void AstOptimizer::VisitLoopStatement(LoopStatement* node) {
+void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
+  Visit(node->cond());
+  Visit(node->body());
+}
+
+
+void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
+  has_function_literal_ = false;
+  Visit(node->cond());
+  node->may_have_function_literal_ = has_function_literal_;
+  Visit(node->body());
+}
+
+
+void AstOptimizer::VisitForStatement(ForStatement* node) {
   if (node->init() != NULL) {
     Visit(node->init());
   }
@@ -109,9 +123,7 @@
     Visit(node->cond());
     node->may_have_function_literal_ = has_function_literal_;
   }
-  if (node->body() != NULL) {
-    Visit(node->body());
-  }
+  Visit(node->body());
   if (node->next() != NULL) {
     Visit(node->next());
   }
@@ -125,14 +137,14 @@
 }
 
 
-void AstOptimizer::VisitTryCatch(TryCatch* node) {
+void AstOptimizer::VisitTryCatchStatement(TryCatchStatement* node) {
   Visit(node->try_block());
   Visit(node->catch_var());
   Visit(node->catch_block());
 }
 
 
-void AstOptimizer::VisitTryFinally(TryFinally* node) {
+void AstOptimizer::VisitTryFinallyStatement(TryFinallyStatement* node) {
   Visit(node->try_block());
   Visit(node->finally_block());
 }
@@ -553,6 +565,8 @@
   virtual void Visit##type(type* node);
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
+
+  void VisitIterationStatement(IterationStatement* stmt);
 };
 
 
@@ -596,25 +610,35 @@
 }
 
 
-
-
-void Processor::VisitLoopStatement(LoopStatement* node) {
-  // Rewrite loop body statement.
+void Processor::VisitIterationStatement(IterationStatement* node) {
+  // Rewrite the body.
   bool set_after_loop = is_set_;
   Visit(node->body());
   is_set_ = is_set_ && set_after_loop;
 }
 
 
-void Processor::VisitForInStatement(ForInStatement* node) {
-  // Rewrite for-in body statement.
-  bool set_after_for = is_set_;
-  Visit(node->body());
-  is_set_ = is_set_ && set_after_for;
+void Processor::VisitDoWhileStatement(DoWhileStatement* node) {
+  VisitIterationStatement(node);
 }
 
 
-void Processor::VisitTryCatch(TryCatch* node) {
+void Processor::VisitWhileStatement(WhileStatement* node) {
+  VisitIterationStatement(node);
+}
+
+
+void Processor::VisitForStatement(ForStatement* node) {
+  VisitIterationStatement(node);
+}
+
+
+void Processor::VisitForInStatement(ForInStatement* node) {
+  VisitIterationStatement(node);
+}
+
+
+void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
   // Rewrite both try and catch blocks (reversed order).
   bool set_after_catch = is_set_;
   Visit(node->catch_block());
@@ -626,7 +650,7 @@
 }
 
 
-void Processor::VisitTryFinally(TryFinally* node) {
+void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
   // Rewrite both try and finally block (reversed order).
   Visit(node->finally_block());
   bool save = in_try_;
diff --git a/src/runtime.cc b/src/runtime.cc
index 4e1940d..8fd62c9 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -34,18 +34,17 @@
 #include "arguments.h"
 #include "compiler.h"
 #include "cpu.h"
-#include "dateparser.h"
 #include "dateparser-inl.h"
 #include "debug.h"
 #include "execution.h"
 #include "jsregexp.h"
+#include "parser.h"
 #include "platform.h"
 #include "runtime.h"
 #include "scopeinfo.h"
-#include "v8threads.h"
 #include "smart-pointer.h"
-#include "parser.h"
 #include "stub-cache.h"
+#include "v8threads.h"
 
 namespace v8 {
 namespace internal {
@@ -157,7 +156,7 @@
 
   // Deep copy local elements.
   // Pixel elements cannot be created using an object literal.
-  ASSERT(!copy->HasPixelElements());
+  ASSERT(!copy->HasPixelElements() && !copy->HasExternalArrayElements());
   switch (copy->GetElementsKind()) {
     case JSObject::FAST_ELEMENTS: {
       FixedArray* elements = FixedArray::cast(copy->elements());
@@ -522,7 +521,7 @@
   RUNTIME_ASSERT(type ==  FUNCTION_TEMPLATE_INFO_TYPE ||
                  type ==  OBJECT_TEMPLATE_INFO_TYPE);
   RUNTIME_ASSERT(offset > 0);
-  if (type ==  FUNCTION_TEMPLATE_INFO_TYPE) {
+  if (type == FUNCTION_TEMPLATE_INFO_TYPE) {
     RUNTIME_ASSERT(offset < FunctionTemplateInfo::kSize);
   } else {
     RUNTIME_ASSERT(offset < ObjectTemplateInfo::kSize);
@@ -578,8 +577,8 @@
   HandleScope scope;
   Handle<GlobalObject> global = Handle<GlobalObject>(Top::context()->global());
 
-  CONVERT_ARG_CHECKED(FixedArray, pairs, 0);
-  Handle<Context> context = args.at<Context>(1);
+  Handle<Context> context = args.at<Context>(0);
+  CONVERT_ARG_CHECKED(FixedArray, pairs, 1);
   bool is_eval = Smi::cast(args[2])->value() == 1;
 
   // Compute the property attributes. According to ECMA-262, section
@@ -3252,8 +3251,8 @@
       } else {
         escaped_length += 3;
       }
-      // We don't allow strings that are longer than Smi range.
-      if (!Smi::IsValid(escaped_length)) {
+      // We don't allow strings that are longer than a maximal length.
+      if (escaped_length > String::kMaxLength) {
         Top::context()->mark_out_of_memory();
         return Failure::OutOfMemoryException();
       }
@@ -3584,6 +3583,36 @@
   return ConvertCase<unibrow::ToUppercase>(args, &to_upper_mapping);
 }
 
+static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
+  return unibrow::WhiteSpace::Is(c) || c == 0x200b;
+}
+
+static Object* Runtime_StringTrim(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 3);
+
+  CONVERT_CHECKED(String, s, args[0]);
+  CONVERT_BOOLEAN_CHECKED(trimLeft, args[1]);
+  CONVERT_BOOLEAN_CHECKED(trimRight, args[2]);
+
+  s->TryFlattenIfNotFlat();
+  int length = s->length();
+
+  int left = 0;
+  if (trimLeft) {
+    while (left < length && IsTrimWhiteSpace(s->Get(left))) {
+      left++;
+    }
+  }
+
+  int right = length;
+  if (trimRight) {
+    while (right > left && IsTrimWhiteSpace(s->Get(right - 1))) {
+      right--;
+    }
+  }
+  return s->Slice(left, right);
+}
 
 bool Runtime::IsUpperCaseChar(uint16_t ch) {
   unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
@@ -3713,14 +3742,7 @@
   CONVERT_DOUBLE_CHECKED(x, args[0]);
   CONVERT_DOUBLE_CHECKED(y, args[1]);
 
-#if defined WIN32 || defined _WIN64
-  // Workaround MS fmod bugs. ECMA-262 says:
-  // dividend is finite and divisor is an infinity => result equals dividend
-  // dividend is a zero and divisor is nonzero finite => result equals dividend
-  if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) &&
-      !(x == 0 && (y != 0 && isfinite(y))))
-#endif
-  x = fmod(x, y);
+  x = modulo(x, y);
   // NewNumberFromDouble may return a Smi instead of a Number object
   return Heap::NewNumberFromDouble(x);
 }
@@ -3804,10 +3826,6 @@
     } else if (elt->IsString()) {
       String* element = String::cast(elt);
       int element_length = element->length();
-      if (!Smi::IsValid(element_length + position)) {
-        Top::context()->mark_out_of_memory();
-        return Failure::OutOfMemoryException();
-      }
       position += element_length;
       if (ascii && !element->IsAsciiRepresentation()) {
         ascii = false;
@@ -3815,6 +3833,10 @@
     } else {
       return Top::Throw(Heap::illegal_argument_symbol());
     }
+    if (position > String::kMaxLength) {
+      Top::context()->mark_out_of_memory();
+      return Failure::OutOfMemoryException();
+    }
   }
 
   int length = position;
@@ -4338,8 +4360,8 @@
 static Object* Runtime_NewClosure(Arguments args) {
   HandleScope scope;
   ASSERT(args.length() == 2);
-  CONVERT_ARG_CHECKED(JSFunction, boilerplate, 0);
-  CONVERT_ARG_CHECKED(Context, context, 1);
+  CONVERT_ARG_CHECKED(Context, context, 0);
+  CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1);
 
   Handle<JSFunction> result =
       Factory::NewFunctionFromBoilerplate(boilerplate, context);
@@ -5244,6 +5266,47 @@
 };
 
 
+template<class ExternalArrayClass, class ElementType>
+static uint32_t IterateExternalArrayElements(Handle<JSObject> receiver,
+                                             bool elements_are_ints,
+                                             bool elements_are_guaranteed_smis,
+                                             uint32_t range,
+                                             ArrayConcatVisitor* visitor) {
+  Handle<ExternalArrayClass> array(
+      ExternalArrayClass::cast(receiver->elements()));
+  uint32_t len = Min(static_cast<uint32_t>(array->length()), range);
+
+  if (visitor != NULL) {
+    if (elements_are_ints) {
+      if (elements_are_guaranteed_smis) {
+        for (uint32_t j = 0; j < len; j++) {
+          Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get(j))));
+          visitor->visit(j, e);
+        }
+      } else {
+        for (uint32_t j = 0; j < len; j++) {
+          int64_t val = static_cast<int64_t>(array->get(j));
+          if (Smi::IsValid(static_cast<intptr_t>(val))) {
+            Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
+            visitor->visit(j, e);
+          } else {
+            Handle<Object> e(
+                Heap::AllocateHeapNumber(static_cast<ElementType>(val)));
+            visitor->visit(j, e);
+          }
+        }
+      }
+    } else {
+      for (uint32_t j = 0; j < len; j++) {
+        Handle<Object> e(Heap::AllocateHeapNumber(array->get(j)));
+        visitor->visit(j, e);
+      }
+    }
+  }
+
+  return len;
+}
+
 /**
  * A helper function that visits elements of a JSObject. Only elements
  * whose index between 0 and range (exclusive) are visited.
@@ -5293,6 +5356,48 @@
       }
       break;
     }
+    case JSObject::EXTERNAL_BYTE_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalByteArray, int8_t>(
+              receiver, true, true, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalUnsignedByteArray, uint8_t>(
+              receiver, true, true, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_SHORT_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalShortArray, int16_t>(
+              receiver, true, true, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalUnsignedShortArray, uint16_t>(
+              receiver, true, true, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_INT_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalIntArray, int32_t>(
+              receiver, true, false, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalUnsignedIntArray, uint32_t>(
+              receiver, true, false, range, visitor);
+      break;
+    }
+    case JSObject::EXTERNAL_FLOAT_ELEMENTS: {
+      num_of_elements =
+          IterateExternalArrayElements<ExternalFloatArray, float>(
+              receiver, false, false, range, visitor);
+      break;
+    }
     case JSObject::DICTIONARY_ELEMENTS: {
       Handle<NumberDictionary> dict(receiver->element_dictionary());
       uint32_t capacity = dict->Capacity();
@@ -7630,6 +7735,18 @@
 }
 
 
+// Returns V8 version as a string.
+static Object* Runtime_GetV8Version(Arguments args) {
+  ASSERT_EQ(args.length(), 0);
+
+  NoHandleAllocation ha;
+
+  const char* version_string = v8::V8::GetVersion();
+
+  return Heap::AllocateStringFromAscii(CStrVector(version_string), NOT_TENURED);
+}
+
+
 static Object* Runtime_Abort(Arguments args) {
   ASSERT(args.length() == 2);
   OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) +
diff --git a/src/runtime.h b/src/runtime.h
index afa278b..6b1ce48 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -152,6 +152,7 @@
   F(StringSlice, 3, 1) \
   F(StringReplaceRegExpWithString, 4, 1) \
   F(StringMatch, 3, 1) \
+  F(StringTrim, 3, 1) \
   \
   /* Numbers */ \
   F(NumberToRadixString, 2, 1) \
@@ -174,6 +175,7 @@
   F(FunctionIsBuiltin, 1, 1) \
   F(GetScript, 1, 1) \
   F(CollectStackTrace, 2, 1) \
+  F(GetV8Version, 0, 1) \
   \
   F(ClassOf, 1, 1) \
   F(SetCode, 2, 1) \
diff --git a/src/serialize.cc b/src/serialize.cc
index e0ee4bd..6ff1d7f 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -38,6 +38,7 @@
 #include "serialize.h"
 #include "stub-cache.h"
 #include "v8threads.h"
+#include "top.h"
 
 namespace v8 {
 namespace internal {
@@ -612,12 +613,23 @@
   }
 
   // Top addresses
-  const char* top_address_format = "Top::get_address_from_id(%i)";
-  size_t top_format_length = strlen(top_address_format);
+  const char* top_address_format = "Top::%s";
+
+  const char* AddressNames[] = {
+#define C(name) #name,
+    TOP_ADDRESS_LIST(C)
+    TOP_ADDRESS_LIST_PROF(C)
+    NULL
+#undef C
+  };
+
+  size_t top_format_length = strlen(top_address_format) - 2;
   for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
-    Vector<char> name = Vector<char>::New(top_format_length + 1);
+    const char* address_name = AddressNames[i];
+    Vector<char> name =
+        Vector<char>::New(top_format_length + strlen(address_name) + 1);
     const char* chars = name.start();
-    OS::SNPrintF(name, top_address_format, i);
+    OS::SNPrintF(name, top_address_format, address_name);
     Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
   }
 
@@ -922,7 +934,9 @@
       serializer_(serializer),
       reference_encoder_(serializer->reference_encoder_),
       offsets_(8),
-      addresses_(8) {
+      addresses_(8),
+      offsets_32_bit_(0),
+      data_32_bit_(0) {
   }
 
   virtual void VisitPointers(Object** start, Object** end) {
@@ -939,8 +953,12 @@
     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
     Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
     Address encoded_target = serializer_->GetSavedAddress(target);
-    offsets_.Add(rinfo->target_address_address() - obj_address_);
-    addresses_.Add(encoded_target);
+    // All calls and jumps are to code objects that encode into 32 bits.
+    offsets_32_bit_.Add(rinfo->target_address_address() - obj_address_);
+    uint32_t small_target =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(encoded_target));
+    ASSERT(reinterpret_cast<uintptr_t>(encoded_target) == small_target);
+    data_32_bit_.Add(small_target);
   }
 
 
@@ -965,6 +983,10 @@
     for (int i = 0; i < offsets_.length(); i++) {
       memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address));
     }
+    for (int i = 0; i < offsets_32_bit_.length(); i++) {
+      memcpy(start_address + offsets_32_bit_[i], &data_32_bit_[i],
+             sizeof(uint32_t));
+    }
   }
 
  private:
@@ -973,6 +995,10 @@
   ExternalReferenceEncoder* reference_encoder_;
   List<int> offsets_;
   List<Address> addresses_;
+  // Some updates are 32-bit even on a 64-bit platform.
+  // We keep a separate list of them on 64-bit platforms.
+  List<int> offsets_32_bit_;
+  List<uint32_t> data_32_bit_;
 };
 
 
@@ -1432,7 +1458,9 @@
 
 void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-  Address encoded_address = reinterpret_cast<Address>(rinfo->target_object());
+  // On all platforms, the encoded code object address is only 32 bits.
+  Address encoded_address = reinterpret_cast<Address>(Memory::uint32_at(
+      reinterpret_cast<Address>(rinfo->target_object_address())));
   Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
   rinfo->set_target_address(target_object->instruction_start());
 }
@@ -1663,7 +1691,6 @@
 
   // Encoded addresses of HeapObjects always have 'HeapObject' tags.
   ASSERT(o->IsHeapObject());
-
   switch (GetSpace(encoded)) {
     // For Map space and Old space, we cache the known Pages in map_pages,
     // old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
index da72497..847bb9a 100644
--- a/src/spaces-inl.h
+++ b/src/spaces-inl.h
@@ -360,6 +360,13 @@
   return obj;
 }
 
+
+bool FreeListNode::IsFreeListNode(HeapObject* object) {
+  return object->map() == Heap::raw_unchecked_byte_array_map()
+      || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
+      || object->map() == Heap::raw_unchecked_two_pointer_filler_map();
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_SPACES_INL_H_
diff --git a/src/spaces.cc b/src/spaces.cc
index 43abaa4..bd58742 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -982,7 +982,7 @@
   // To support fast containment testing in the new space, the size of
   // this chunk must be a power of two and it must be aligned to its size.
   int initial_semispace_capacity = Heap::InitialSemiSpaceSize();
-  int maximum_semispace_capacity = Heap::SemiSpaceSize();
+  int maximum_semispace_capacity = Heap::MaxSemiSpaceSize();
 
   ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
   ASSERT(IsPowerOf2(maximum_semispace_capacity));
@@ -998,7 +998,7 @@
 #undef SET_NAME
 #endif
 
-  ASSERT(size == 2 * maximum_semispace_capacity);
+  ASSERT(size == 2 * Heap::ReservedSemiSpaceSize());
   ASSERT(IsAddressAligned(start, size, 0));
 
   if (!to_space_.Setup(start,
@@ -1540,8 +1540,7 @@
 
 
 Address FreeListNode::next() {
-  ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
-         map() == Heap::raw_unchecked_two_pointer_filler_map());
+  ASSERT(IsFreeListNode(this));
   if (map() == Heap::raw_unchecked_byte_array_map()) {
     ASSERT(Size() >= kNextOffset + kPointerSize);
     return Memory::Address_at(address() + kNextOffset);
@@ -1552,8 +1551,7 @@
 
 
 void FreeListNode::set_next(Address next) {
-  ASSERT(map() == Heap::raw_unchecked_byte_array_map() ||
-         map() == Heap::raw_unchecked_two_pointer_filler_map());
+  ASSERT(IsFreeListNode(this));
   if (map() == Heap::raw_unchecked_byte_array_map()) {
     ASSERT(Size() >= kNextOffset + kPointerSize);
     Memory::Address_at(address() + kNextOffset) = next;
diff --git a/src/spaces.h b/src/spaces.h
index 76b88ef..9e1d873 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -862,6 +862,10 @@
   // Current capacity without growing (Size() + Available() + Waste()).
   int Capacity() { return accounting_stats_.Capacity(); }
 
+  // Total amount of memory committed for this space.  For paged
+  // spaces this equals the capacity.
+  int CommittedMemory() { return Capacity(); }
+
   // Available bytes without growing.
   int Available() { return accounting_stats_.Available(); }
 
@@ -1252,11 +1256,19 @@
 
   // Return the allocated bytes in the active semispace.
   virtual int Size() { return top() - bottom(); }
+
   // Return the current capacity of a semispace.
   int Capacity() {
     ASSERT(to_space_.Capacity() == from_space_.Capacity());
     return to_space_.Capacity();
   }
+
+  // Return the total amount of memory committed for new space.
+  int CommittedMemory() {
+    if (from_space_.is_committed()) return 2 * Capacity();
+    return Capacity();
+  }
+
   // Return the available bytes without growing in the active semispace.
   int Available() { return Capacity() - Size(); }
 
@@ -1423,6 +1435,8 @@
     return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
   }
 
+  static inline bool IsFreeListNode(HeapObject* object);
+
   // Set the size in bytes, which can be read with HeapObject::Size().  This
   // function also writes a map to the first word of the block so that it
   // looks like a heap object to the garbage collector and heap iteration
diff --git a/src/string.js b/src/string.js
index fbdc307..d2d6e96 100644
--- a/src/string.js
+++ b/src/string.js
@@ -680,6 +680,18 @@
   return %StringToUpperCase(ToString(this));
 }
 
+// ES5, 15.5.4.20
+function StringTrim() {
+  return %StringTrim(ToString(this), true, true);
+}
+
+function StringTrimLeft() {
+  return %StringTrim(ToString(this), true, false);
+}
+
+function StringTrimRight() {
+  return %StringTrim(ToString(this), false, true);
+}
 
 // ECMA-262, section 15.5.3.2
 function StringFromCharCode(code) {
@@ -855,6 +867,9 @@
     "toLocaleLowerCase", StringToLocaleLowerCase,
     "toUpperCase", StringToUpperCase,
     "toLocaleUpperCase", StringToLocaleUpperCase,
+    "trim", StringTrim,
+    "trimLeft", StringTrimLeft,
+    "trimRight", StringTrimRight,
     "link", StringLink,
     "anchor", StringAnchor,
     "fontcolor", StringFontcolor,
diff --git a/src/top.cc b/src/top.cc
index aa7788e..bb2dea4 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -54,6 +54,7 @@
   return top_addresses[id];
 }
 
+
 char* Top::Iterate(ObjectVisitor* v, char* thread_storage) {
   ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
   Iterate(v, thread);
@@ -493,11 +494,17 @@
 
 bool Top::MayNamedAccess(JSObject* receiver, Object* key, v8::AccessType type) {
   ASSERT(receiver->IsAccessCheckNeeded());
+
+  // The callers of this method are not expecting a GC.
+  AssertNoAllocation no_gc;
+
+  // Skip checks for hidden properties access.  Note, we do not
+  // require existence of a context in this case.
+  if (key == Heap::hidden_symbol()) return true;
+
   // Check for compatibility between the security tokens in the
   // current lexical context and the accessed object.
   ASSERT(Top::context());
-  // The callers of this method are not expecting a GC.
-  AssertNoAllocation no_gc;
 
   MayAccessDecision decision = MayAccessPreCheck(receiver, type);
   if (decision != UNKNOWN) return decision == YES;
diff --git a/src/usage-analyzer.cc b/src/usage-analyzer.cc
index 23a4d9f..74cf982 100644
--- a/src/usage-analyzer.cc
+++ b/src/usage-analyzer.cc
@@ -159,14 +159,25 @@
 }
 
 
-void UsageComputer::VisitLoopStatement(LoopStatement* node) {
-  if (node->init() != NULL)
-    Visit(node->init());
+void UsageComputer::VisitDoWhileStatement(DoWhileStatement* node) {
+  WeightScaler ws(this, 10.0);
+  Read(node->cond());
+  Visit(node->body());
+}
+
+
+void UsageComputer::VisitWhileStatement(WhileStatement* node) {
+  WeightScaler ws(this, 10.0);
+  Read(node->cond());
+  Visit(node->body());
+}
+
+
+void UsageComputer::VisitForStatement(ForStatement* node) {
+  if (node->init() != NULL) Visit(node->init());
   { WeightScaler ws(this, 10.0);  // executed in each iteration
-    if (node->cond() != NULL)
-      Read(node->cond());
-    if (node->next() != NULL)
-      Visit(node->next());
+    if (node->cond() != NULL) Read(node->cond());
+    if (node->next() != NULL) Visit(node->next());
     Visit(node->body());
   }
 }
@@ -180,7 +191,7 @@
 }
 
 
-void UsageComputer::VisitTryCatch(TryCatch* node) {
+void UsageComputer::VisitTryCatchStatement(TryCatchStatement* node) {
   Visit(node->try_block());
   { WeightScaler ws(this, 0.25);
     Write(node->catch_var());
@@ -189,7 +200,7 @@
 }
 
 
-void UsageComputer::VisitTryFinally(TryFinally* node) {
+void UsageComputer::VisitTryFinallyStatement(TryFinallyStatement* node) {
   Visit(node->try_block());
   Visit(node->finally_block());
 }
diff --git a/src/utils.h b/src/utils.h
index 275dbb5..f4a0598 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -36,7 +36,8 @@
 // ----------------------------------------------------------------------------
 // General helper functions
 
-// Returns true iff x is a power of 2.  Does not work for zero.
+// Returns true iff x is a power of 2 (or zero). Cannot be used with the
+// maximally negative value of the type T (the -1 overflows).
 template <typename T>
 static inline bool IsPowerOf2(T x) {
   return (x & (x - 1)) == 0;
diff --git a/src/v8-counters.h b/src/v8-counters.h
index e360b55..b3f29f5 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -52,8 +52,8 @@
   HT(variable_allocation, V8.VariableAllocation)                      \
   HT(ast_optimization, V8.ASTOptimization)                            \
   HT(code_generation, V8.CodeGeneration)                              \
-  HT(deferred_code_generation, V8.DeferredCodeGeneration)             \
-  HT(code_creation, V8.CodeCreation)
+  HT(deferred_code_generation, V8.DeferredCodeGeneration)
+
 
 // WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
 // Intellisense to crash.  It was broken into two macros (each of length 40
@@ -118,6 +118,7 @@
   SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                \
   SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)          \
   SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow)              \
+  SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
   /* Count how much the monomorphic keyed-load stubs are hit. */    \
   SC(keyed_load_function_prototype, V8.KeyedLoadFunctionPrototype)  \
   SC(keyed_load_string_length, V8.KeyedLoadStringLength)            \
@@ -150,7 +151,9 @@
   SC(reloc_info_count, V8.RelocInfoCount)                           \
   SC(reloc_info_size, V8.RelocInfoSize)                             \
   SC(zone_segment_bytes, V8.ZoneSegmentBytes)                       \
-  SC(compute_entry_frame, V8.ComputeEntryFrame)
+  SC(compute_entry_frame, V8.ComputeEntryFrame)                     \
+  SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls)          \
+  SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs)
 
 
 // This file contains all the v8 counters that are in use.
diff --git a/src/v8.cc b/src/v8.cc
index f0115ec..3c70ee9 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -169,22 +169,23 @@
 }
 
 
-bool V8::IdleNotification(bool is_high_priority) {
+bool V8::IdleNotification() {
   // Returning true tells the caller that there is no need to call
   // IdleNotification again.
   if (!FLAG_use_idle_notification) return true;
-  // Ignore high priority instances of V8.
-  if (is_high_priority) return true;
 
   // Tell the heap that it may want to adjust.
   return Heap::IdleNotification();
 }
 
+static const uint32_t kRandomPositiveSmiMax = 0x3fffffff;
 
 Smi* V8::RandomPositiveSmi() {
   uint32_t random = Random();
-  ASSERT(IsPowerOf2(Smi::kMaxValue + 1));
-  return Smi::FromInt(random & Smi::kMaxValue);
+  ASSERT(static_cast<uint32_t>(Smi::kMaxValue) >= kRandomPositiveSmiMax);
+  // kRandomPositiveSmiMax must match the value being divided
+  // by in math.js.
+  return Smi::FromInt(random & kRandomPositiveSmiMax);
 }
 
 } }  // namespace v8::internal
diff --git a/src/v8.h b/src/v8.h
index 7786d66..106ae61 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -95,7 +95,7 @@
   static Smi* RandomPositiveSmi();
 
   // Idle notification directly from the API.
-  static bool IdleNotification(bool is_high_priority);
+  static bool IdleNotification();
 
  private:
   // True if engine is currently running
diff --git a/src/version.cc b/src/version.cc
index a36e17c..54e688b 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     3
-#define BUILD_NUMBER      14
+#define BUILD_NUMBER      17
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION true
 
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index f51a3ea..8f078ff 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -38,11 +38,6 @@
   return static_cast<Condition>(cc ^ 1);
 }
 
-// -----------------------------------------------------------------------------
-
-Immediate::Immediate(Smi* value) {
-  value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
-}
 
 // -----------------------------------------------------------------------------
 // Implementation of Assembler
@@ -70,6 +65,20 @@
 }
 
 
+void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode) {
+  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  RecordRelocInfo(rmode);
+  int current = code_targets_.length();
+  if (current > 0 && code_targets_.last().is_identical_to(target)) {
+    // Optimization if we keep jumping to the same code target.
+    emitl(current - 1);
+  } else {
+    code_targets_.Add(target);
+    emitl(current);
+  }
+}
+
+
 void Assembler::emit_rex_64(Register reg, Register rm_reg) {
   emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
 }
@@ -162,15 +171,18 @@
 
 
 Address Assembler::target_address_at(Address pc) {
-  return Memory::Address_at(pc);
+  return Memory::int32_at(pc) + pc + 4;
 }
 
 
 void Assembler::set_target_address_at(Address pc, Address target) {
-  Memory::Address_at(pc) = target;
-  CPU::FlushICache(pc, sizeof(intptr_t));
+  Memory::int32_at(pc) = target - pc - 4;
+  CPU::FlushICache(pc, sizeof(int32_t));
 }
 
+Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
+  return code_targets_[Memory::int32_at(pc)];
+}
 
 // -----------------------------------------------------------------------------
 // Implementation of RelocInfo
@@ -179,15 +191,24 @@
 void RelocInfo::apply(intptr_t delta) {
   if (IsInternalReference(rmode_)) {
     // absolute code pointer inside code object moves with the code object.
-    intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
-    *p += delta;  // relocate entry
+    Memory::Address_at(pc_) += delta;
+  } else if (IsCodeTarget(rmode_)) {
+    Memory::int32_at(pc_) -= delta;
+  } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
+    // Special handling of js_return when a break point is set (call
+    // instruction has been inserted).
+    Memory::int32_at(pc_ + 1) -= delta;  // relocate entry
   }
 }
 
 
 Address RelocInfo::target_address() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  return Assembler::target_address_at(pc_);
+  if (IsCodeTarget(rmode_)) {
+    return Assembler::target_address_at(pc_);
+  } else {
+    return Memory::Address_at(pc_);
+  }
 }
 
 
@@ -199,13 +220,27 @@
 
 void RelocInfo::set_target_address(Address target) {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
-  Assembler::set_target_address_at(pc_, target);
+  if (IsCodeTarget(rmode_)) {
+    Assembler::set_target_address_at(pc_, target);
+  } else {
+    Memory::Address_at(pc_) = target;
+  }
 }
 
 
 Object* RelocInfo::target_object() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  return *reinterpret_cast<Object**>(pc_);
+  return Memory::Object_at(pc_);
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler *origin) {
+  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  if (rmode_ == EMBEDDED_OBJECT) {
+    return Memory::Object_Handle_at(pc_);
+  } else {
+    return origin->code_target_object_handle_at(pc_);
+  }
 }
 
 
@@ -227,46 +262,49 @@
 }
 
 
-bool RelocInfo::IsCallInstruction() {
+bool RelocInfo::IsPatchedReturnSequence() {
   // The recognized call sequence is:
   //  movq(kScratchRegister, immediate64); call(kScratchRegister);
   // It only needs to be distinguished from a return sequence
   //  movq(rsp, rbp); pop(rbp); ret(n); int3 *6
   // The 11th byte is int3 (0xCC) in the return sequence and
   // REX.WB (0x48+register bit) for the call sequence.
+#ifdef ENABLE_DEBUGGER_SUPPORT
   return pc_[10] != 0xCC;
+#else
+  return false;
+#endif
 }
 
 
 Address RelocInfo::call_address() {
-  ASSERT(IsCallInstruction());
-  return Assembler::target_address_at(
-      pc_ + Assembler::kPatchReturnSequenceAddressOffset);
+  ASSERT(IsPatchedReturnSequence());
+  return Memory::Address_at(
+      pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
 }
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT(IsCallInstruction());
-  Assembler::set_target_address_at(
-      pc_ + Assembler::kPatchReturnSequenceAddressOffset,
-      target);
+  ASSERT(IsPatchedReturnSequence());
+  Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
+      target;
 }
 
 
 Object* RelocInfo::call_object() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return *call_object_address();
 }
 
 
 void RelocInfo::set_call_object(Object* target) {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   *call_object_address() = target;
 }
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return reinterpret_cast<Object**>(
       pc_ + Assembler::kPatchReturnSequenceAddressOffset);
 }
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index b4204a9..61e8753 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -264,7 +264,8 @@
 
 byte* Assembler::spare_buffer_ = NULL;
 
-Assembler::Assembler(void* buffer, int buffer_size) {
+Assembler::Assembler(void* buffer, int buffer_size)
+    : code_targets_(100) {
   if (buffer == NULL) {
     // do our own buffer management
     if (buffer_size <= kMinimalBufferSize) {
@@ -392,7 +393,7 @@
   // Some internal data structures overflow for very large buffers,
   // they must ensure that kMaximalBufferSize is not too large.
   if ((desc.buffer_size > kMaximalBufferSize) ||
-      (desc.buffer_size > Heap::OldGenerationSize())) {
+      (desc.buffer_size > Heap::MaxOldGenerationSize())) {
     V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
   }
 
@@ -573,11 +574,11 @@
     emit(src.value_);
   } else if (dst.is(rax)) {
     emit(0x05 | (subcode << 3));
-    emitl(src.value_);
+    emitw(src.value_);
   } else {
     emit(0x81);
     emit_modrm(subcode, dst);
-    emitl(src.value_);
+    emitw(src.value_);
   }
 }
 
@@ -596,7 +597,7 @@
   } else {
     emit(0x81);
     emit_operand(subcode, dst);
-    emitl(src.value_);
+    emitw(src.value_);
   }
 }
 
@@ -707,7 +708,7 @@
 void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  ASSERT(is_uint6(shift_amount.value_));  // illegal shift count
+  ASSERT(is_uint5(shift_amount.value_));  // illegal shift count
   if (shift_amount.value_ == 1) {
     emit_optional_rex_32(dst);
     emit(0xD1);
@@ -762,6 +763,15 @@
 }
 
 
+void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // 1110 1000 #32-bit disp
+  emit(0xE8);
+  emit_code_target(target, rmode);
+}
+
+
 void Assembler::call(Register adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -784,6 +794,12 @@
 }
 
 
+void Assembler::clc() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF8);
+}
+
 void Assembler::cdq() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -792,6 +808,11 @@
 
 
 void Assembler::cmovq(Condition cc, Register dst, Register src) {
+  if (cc == always) {
+    movq(dst, src);
+  } else if (cc == never) {
+    return;
+  }
   // No need to check CpuInfo for CMOV support, it's a required part of the
   // 64-bit architecture.
   ASSERT(cc >= 0);  // Use mov for unconditional moves.
@@ -806,6 +827,11 @@
 
 
 void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
+  if (cc == always) {
+    movq(dst, src);
+  } else if (cc == never) {
+    return;
+  }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -818,6 +844,11 @@
 
 
 void Assembler::cmovl(Condition cc, Register dst, Register src) {
+  if (cc == always) {
+    movl(dst, src);
+  } else if (cc == never) {
+    return;
+  }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -830,6 +861,11 @@
 
 
 void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
+  if (cc == always) {
+    movl(dst, src);
+  } else if (cc == never) {
+    return;
+  }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -903,6 +939,27 @@
 }
 
 
+void Assembler::decb(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (dst.code() > 3) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+    emit_rex_32(dst);
+  }
+  emit(0xFE);
+  emit_modrm(0x1, dst);
+}
+
+
+void Assembler::decb(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xFE);
+  emit_operand(1, dst);
+}
+
+
 void Assembler::enter(Immediate size) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1027,6 +1084,12 @@
 
 
 void Assembler::j(Condition cc, Label* L) {
+  if (cc == always) {
+    jmp(L);
+    return;
+  } else if (cc == never) {
+    return;
+  }
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   ASSERT(is_uint4(cc));
@@ -1062,6 +1125,19 @@
 }
 
 
+void Assembler::j(Condition cc,
+                  Handle<Code> target,
+                  RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  ASSERT(is_uint4(cc));
+  // 0000 1111 1000 tttn #32-bit disp
+  emit(0x0F);
+  emit(0x80 | cc);
+  emit_code_target(target, rmode);
+}
+
+
 void Assembler::jmp(Label* L) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1093,6 +1169,15 @@
 }
 
 
+void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  // 1110 1001 #32-bit disp
+  emit(0xE9);
+  emit_code_target(target, rmode);
+}
+
+
 void Assembler::jmp(Register target) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1170,6 +1255,15 @@
   emit_operand(src, dst);
 }
 
+void Assembler::movw(const Operand& dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0x66);
+  emit_optional_rex_32(src, dst);
+  emit(0x89);
+  emit_operand(src, dst);
+}
+
 void Assembler::movl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1341,10 +1435,7 @@
     // There is no possible reason to store a heap pointer without relocation
     // info, so it must be a smi.
     ASSERT(value->IsSmi());
-    // Smis never have more than 32 significant bits, but they might
-    // have garbage in the high bits.
-    movq(dst,
-         Immediate(static_cast<int32_t>(reinterpret_cast<intptr_t>(*value))));
+    movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
   } else {
     EnsureSpace ensure_space(this);
     last_pc_ = pc_;
@@ -1357,6 +1448,26 @@
 }
 
 
+void Assembler::movsxbq(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xBE);
+  emit_operand(dst, src);
+}
+
+
+void Assembler::movsxwq(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0xBF);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::movsxlq(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1395,6 +1506,16 @@
 }
 
 
+void Assembler::movzxwq(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0xB7);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::movzxwl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1618,22 +1739,6 @@
 }
 
 
-void Assembler::rcl(Register dst, uint8_t imm8) {
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  ASSERT(is_uint6(imm8));  // illegal shift count
-  if (imm8 == 1) {
-    emit_rex_64(dst);
-    emit(0xD1);
-    emit_modrm(0x2, dst);
-  } else {
-    emit_rex_64(dst);
-    emit(0xC1);
-    emit_modrm(0x2, dst);
-    emit(imm8);
-  }
-}
-
 void Assembler::rdtsc() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1657,6 +1762,10 @@
 
 
 void Assembler::setcc(Condition cc, Register reg) {
+  if (cc > last_condition) {
+    movb(reg, Immediate(cc == always ? 1 : 0));
+    return;
+  }
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   ASSERT(is_uint4(cc));
@@ -1718,6 +1827,18 @@
 }
 
 
+void Assembler::testb(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (dst.code() > 3 || src.code() > 3) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+    emit_rex_32(dst, src);
+  }
+  emit(0x84);
+  emit_modrm(dst, src);
+}
+
+
 void Assembler::testb(Register reg, Immediate mask) {
   ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
   EnsureSpace ensure_space(this);
@@ -1888,6 +2009,14 @@
 }
 
 
+void Assembler::fstp(int index) {
+  ASSERT(is_uint3(index));
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_farith(0xDD, 0xD8, index);
+}
+
+
 void Assembler::fild_s(const Operand& adr) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1939,7 +2068,7 @@
   last_pc_ = pc_;
   emit_optional_rex_32(adr);
   emit(0xDF);
-  emit_operand(8, adr);
+  emit_operand(7, adr);
 }
 
 
@@ -2108,6 +2237,22 @@
 }
 
 
+void Assembler::fucomi(int i) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xDB);
+  emit(0xE8 + i);
+}
+
+
+void Assembler::fucomip() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xDF);
+  emit(0xE9);
+}
+
+
 void Assembler::fcompp() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -2176,18 +2321,7 @@
 }
 
 
-void Assembler::movsd(Register dst, XMMRegister src) {
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  emit(0xF2);  // double
-  emit_optional_rex_32(src, dst);
-  emit(0x0F);
-  emit(0x11);  // store
-  emit_sse_operand(src, dst);
-}
-
-
-void Assembler::movsd(XMMRegister dst, Register src) {
+void Assembler::movsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   emit(0xF2);  // double
@@ -2387,7 +2521,8 @@
 }
 
 
-const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
-
+const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
+                                  1 << RelocInfo::INTERNAL_REFERENCE |
+                                  1 << RelocInfo::JS_RETURN;
 
 } }  // namespace v8::internal
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index ff87286..4f514f2 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -222,13 +222,18 @@
   less_equal    = 14,
   greater       = 15,
 
+  // Fake conditions that are handled by the
+  // opcodes using them.
+  always        = 16,
+  never         = 17,
   // aliases
   carry         = below,
   not_carry     = above_equal,
   zero          = equal,
   not_zero      = not_equal,
   sign          = negative,
-  not_sign      = positive
+  not_sign      = positive,
+  last_condition = greater
 };
 
 
@@ -284,7 +289,6 @@
 class Immediate BASE_EMBEDDED {
  public:
   explicit Immediate(int32_t value) : value_(value) {}
-  inline explicit Immediate(Smi* value);
 
  private:
   int32_t value_;
@@ -372,6 +376,11 @@
   static void Probe();
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(Feature f) {
+    if (f == SSE2 && !FLAG_enable_sse2) return false;
+    if (f == SSE3 && !FLAG_enable_sse3) return false;
+    if (f == CMOV && !FLAG_enable_cmov) return false;
+    if (f == RDTSC && !FLAG_enable_rdtsc) return false;
+    if (f == SAHF && !FLAG_enable_sahf) return false;
     return (supported_ & (V8_UINT64_C(1) << f)) != 0;
   }
   // Check whether a feature is currently enabled.
@@ -440,18 +449,26 @@
   // Assembler functions are invoked in between GetCode() calls.
   void GetCode(CodeDesc* desc);
 
-  // Read/Modify the code target in the branch/call instruction at pc.
-  // On the x64 architecture, the address is absolute, not relative.
+  // Read/Modify the code target in the relative branch/call instruction at pc.
+  // On the x64 architecture, we use relative jumps with a 32-bit displacement
+  // to jump to other Code objects in the Code space in the heap.
+  // Jumps to C functions are done indirectly through a 64-bit register holding
+  // the absolute address of the target.
+  // These functions convert between absolute Addresses of Code objects and
+  // the relative displacements stored in the code.
   static inline Address target_address_at(Address pc);
   static inline void set_target_address_at(Address pc, Address target);
-
+  inline Handle<Object> code_target_object_handle_at(Address pc);
   // Distance between the address of the code target in the call instruction
-  // and the return address.  Checked in the debug build.
-  static const int kCallTargetAddressOffset = 3 + kPointerSize;
-  // Distance between start of patched return sequence and the emitted address
-  // to jump to (movq = REX.W 0xB8+r.).
-  static const int kPatchReturnSequenceAddressOffset = 2;
-
+  // and the return address pushed on the stack.
+  static const int kCallTargetAddressOffset = 4;  // Use 32-bit displacement.
+  // Distance between the start of the JS return sequence and where the
+  // 32-bit displacement of a near call would be, relative to the pushed
+  // return address.  TODO: Use return sequence length instead.
+  // Should equal Debug::kX64JSReturnSequenceLength - kCallTargetAddressOffset;
+  static const int kPatchReturnSequenceAddressOffset = 13 - 4;
+  // TODO(X64): Rename this, removing the "Real", after changing the above.
+  static const int kRealPatchReturnSequenceAddressOffset = 2;
   // ---------------------------------------------------------------------------
   // Code generation
   //
@@ -496,6 +513,10 @@
   void movb(Register dst, Immediate imm);
   void movb(const Operand& dst, Register src);
 
+  // Move the low 16 bits of a 64-bit register value to a 16-bit
+  // memory location.
+  void movw(const Operand& dst, Register src);
+
   void movl(Register dst, Register src);
   void movl(Register dst, const Operand& src);
   void movl(const Operand& dst, Register src);
@@ -525,10 +546,13 @@
   void movq(Register dst, ExternalReference ext);
   void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
 
+  void movsxbq(Register dst, const Operand& src);
+  void movsxwq(Register dst, const Operand& src);
   void movsxlq(Register dst, Register src);
   void movsxlq(Register dst, const Operand& src);
   void movzxbq(Register dst, const Operand& src);
   void movzxbl(Register dst, const Operand& src);
+  void movzxwq(Register dst, const Operand& src);
   void movzxwl(Register dst, const Operand& src);
 
   // New x64 instruction to load from an immediate 64-bit pointer into RAX.
@@ -691,10 +715,17 @@
     immediate_arithmetic_op_32(0x4, dst, src);
   }
 
+  void andl(Register dst, Register src) {
+    arithmetic_op_32(0x23, dst, src);
+  }
+
+
   void decq(Register dst);
   void decq(const Operand& dst);
   void decl(Register dst);
   void decl(const Operand& dst);
+  void decb(Register dst);
+  void decb(const Operand& dst);
 
   // Sign-extends rax into rdx:rax.
   void cqo();
@@ -750,12 +781,34 @@
     immediate_arithmetic_op(0x1, dst, src);
   }
 
+  void orl(Register dst, Immediate src) {
+    immediate_arithmetic_op_32(0x1, dst, src);
+  }
+
   void or_(const Operand& dst, Immediate src) {
     immediate_arithmetic_op(0x1, dst, src);
   }
 
+  void orl(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_32(0x1, dst, src);
+  }
 
-  void rcl(Register dst, uint8_t imm8);
+
+  void rcl(Register dst, Immediate imm8) {
+    shift(dst, imm8, 0x2);
+  }
+
+  void rol(Register dst, Immediate imm8) {
+    shift(dst, imm8, 0x0);
+  }
+
+  void rcr(Register dst, Immediate imm8) {
+    shift(dst, imm8, 0x3);
+  }
+
+  void ror(Register dst, Immediate imm8) {
+    shift(dst, imm8, 0x1);
+  }
 
   // Shifts dst:src left by cl bits, affecting only dst.
   void shld(Register dst, Register src);
@@ -856,6 +909,7 @@
     immediate_arithmetic_op_8(0x5, dst, src);
   }
 
+  void testb(Register dst, Register src);
   void testb(Register reg, Immediate mask);
   void testb(const Operand& op, Immediate mask);
   void testl(Register dst, Register src);
@@ -894,6 +948,7 @@
   void bts(const Operand& dst, Register src);
 
   // Miscellaneous
+  void clc();
   void cpuid();
   void hlt();
   void int3();
@@ -923,6 +978,7 @@
   // Calls
   // Call near relative 32-bit displacement, relative to next instruction.
   void call(Label* L);
+  void call(Handle<Code> target, RelocInfo::Mode rmode);
 
   // Call near absolute indirect, address in register
   void call(Register adr);
@@ -932,7 +988,9 @@
 
   // Jumps
   // Jump short or near relative.
+  // Use a 32-bit signed displacement.
   void jmp(Label* L);  // unconditional jump to L
+  void jmp(Handle<Code> target, RelocInfo::Mode rmode);
 
   // Jump near absolute indirect (r64)
   void jmp(Register adr);
@@ -942,6 +1000,7 @@
 
   // Conditional jumps
   void j(Condition cc, Label* L);
+  void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
 
   // Floating-point operations
   void fld(int i);
@@ -954,6 +1013,7 @@
 
   void fstp_s(const Operand& adr);
   void fstp_d(const Operand& adr);
+  void fstp(int index);
 
   void fild_s(const Operand& adr);
   void fild_d(const Operand& adr);
@@ -990,6 +1050,9 @@
   void ftst();
   void fucomp(int i);
   void fucompp();
+  void fucomi(int i);
+  void fucomip();
+
   void fcompp();
   void fnstsw_ax();
   void fwait();
@@ -1004,8 +1067,7 @@
 
   // SSE2 instructions
   void movsd(const Operand& dst, XMMRegister src);
-  void movsd(Register src, XMMRegister dst);
-  void movsd(XMMRegister dst, Register src);
+  void movsd(XMMRegister src, XMMRegister dst);
   void movsd(XMMRegister src, const Operand& dst);
 
   void cvttss2si(Register dst, const Operand& src);
@@ -1047,14 +1109,6 @@
   void RecordStatementPosition(int pos);
   void WriteRecordedPositions();
 
-  // Writes a doubleword of data in the code stream.
-  // Used for inline tables, e.g., jump-tables.
-  // void dd(uint32_t data);
-
-  // Writes a quadword of data in the code stream.
-  // Used for inline tables, e.g., jump-tables.
-  // void dd(uint64_t data, RelocInfo::Mode reloc_info);
-
   int pc_offset() const  { return pc_ - buffer_; }
   int current_statement_position() const { return current_statement_position_; }
   int current_position() const  { return current_position_; }
@@ -1096,9 +1150,9 @@
 
   void emit(byte x) { *pc_++ = x; }
   inline void emitl(uint32_t x);
-  inline void emit(Handle<Object> handle);
   inline void emitq(uint64_t x, RelocInfo::Mode rmode);
   inline void emitw(uint16_t x);
+  inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode);
   void emit(Immediate x) { emitl(x.value_); }
 
   // Emits a REX prefix that encodes a 64-bit operand size and
@@ -1276,6 +1330,7 @@
   byte* pc_;  // the program counter; moves forward
   RelocInfoWriter reloc_info_writer;
 
+  List< Handle<Code> > code_targets_;
   // push-pop elimination
   byte* last_pc_;
 
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 35eddc4..01992ce 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -53,7 +53,7 @@
   __ movq(rbp, rsp);
 
   // Store the arguments adaptor context sentinel.
-  __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
 
   // Push the function on the stack.
   __ push(rdi);
@@ -75,14 +75,9 @@
   __ pop(rbp);
 
   // Remove caller arguments from the stack.
-  // rbx holds a Smi, so we convery to dword offset by multiplying by 4.
-  // TODO(smi): Find a way to abstract indexing by a smi.
-  ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
-  ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
-  // TODO(smi): Find way to abstract indexing by a smi.
   __ pop(rcx);
-  // 1 * kPointerSize is offset of receiver.
-  __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
+  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+  __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
   __ push(rcx);
 }
 
@@ -342,7 +337,7 @@
     // Because runtime functions always remove the receiver from the stack, we
     // have to fake one to avoid underflowing the stack.
     __ push(rax);
-    __ push(Immediate(Smi::FromInt(0)));
+    __ Push(Smi::FromInt(0));
 
     // Do call to runtime routine.
     __ CallRuntime(Runtime::kStackGuard, 1);
@@ -434,7 +429,7 @@
 
   // Update the index on the stack and in register rax.
   __ movq(rax, Operand(rbp, kIndexOffset));
-  __ addq(rax, Immediate(Smi::FromInt(1)));
+  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
   __ movq(Operand(rbp, kIndexOffset), rax);
 
   __ bind(&entry);
@@ -507,7 +502,7 @@
   __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
           Factory::empty_fixed_array());
   // Field JSArray::kElementsOffset is initialized later.
-  __ movq(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+  __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
 
   // If no storage is requested for the elements array just set the empty
   // fixed array.
@@ -718,14 +713,12 @@
   __ cmpq(rax, Immediate(1));
   __ j(not_equal, &argc_two_or_more);
   __ movq(rdx, Operand(rsp, kPointerSize));  // Get the argument from the stack.
-  Condition not_positive_smi = __ CheckNotPositiveSmi(rdx);
-  __ j(not_positive_smi, call_generic_code);
+  __ JumpIfNotPositiveSmi(rdx, call_generic_code);
 
   // Handle construction of an empty array of a certain size. Bail out if size
   // is to large to actually allocate an elements array.
-  __ JumpIfSmiGreaterEqualsConstant(rdx,
-                                    JSObject::kInitialMaxFastElementArray,
-                                    call_generic_code);
+  __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
+  __ j(greater_equal, call_generic_code);
 
   // rax: argc
   // rdx: array_size (smi)
@@ -825,10 +818,10 @@
     __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi.
     ASSERT(kSmiTag == 0);
-    Condition not_smi = __ CheckNotSmi(rbx);
-    __ Assert(not_smi, "Unexpected initial map for Array function");
+    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+    __ Check(not_smi, "Unexpected initial map for Array function");
     __ CmpObjectType(rbx, MAP_TYPE, rcx);
-    __ Assert(equal, "Unexpected initial map for Array function");
+    __ Check(equal, "Unexpected initial map for Array function");
   }
 
   // Run the native code for the Array function called as a normal function.
@@ -857,15 +850,15 @@
     // does always have a map.
     GenerateLoadArrayFunction(masm, rbx);
     __ cmpq(rdi, rbx);
-    __ Assert(equal, "Unexpected Array function");
+    __ Check(equal, "Unexpected Array function");
     // Initial map for the builtin Array function should be a map.
     __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi.
     ASSERT(kSmiTag == 0);
-    Condition not_smi = __ CheckNotSmi(rbx);
-    __ Assert(not_smi, "Unexpected initial map for Array function");
+    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+    __ Check(not_smi, "Unexpected initial map for Array function");
     __ CmpObjectType(rbx, MAP_TYPE, rcx);
-    __ Assert(equal, "Unexpected initial map for Array function");
+    __ Check(equal, "Unexpected initial map for Array function");
   }
 
   // Run the native code for the Array function called as constructor.
@@ -902,7 +895,6 @@
   // edi: called object
   // eax: number of arguments
   __ bind(&non_function_call);
-
   // Set expected number of arguments to zero (not changing eax).
   __ movq(rbx, Immediate(0));
   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@@ -1143,11 +1135,9 @@
   __ LeaveConstructFrame();
 
   // Remove caller arguments from the stack and return.
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  // TODO(smi): Find a way to abstract indexing by a smi.
   __ pop(rcx);
-  // 1 * kPointerSize is offset of receiver.
-  __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
+  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+  __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
   __ push(rcx);
   __ IncrementCounter(&Counters::constructed_objects, 1);
   __ ret(0);
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 8e6dbef..0029b74 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -54,6 +54,7 @@
   }
 }
 
+
 void DeferredCode::RestoreRegisters() {
   // Restore registers in reverse order due to the stack.
   for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
@@ -237,15 +238,8 @@
   // Test if operands are smi or number objects (fp). Requirements:
   // operand_1 in rax, operand_2 in rdx; falls through on float or smi
   // operands, jumps to the non_float label otherwise.
-  static void CheckFloatOperands(MacroAssembler* masm,
-                                 Label* non_float);
-
-  // Allocate a heap number in new space with undefined value.
-  // Returns tagged pointer in result, or jumps to need_gc if new space is full.
-  static void AllocateHeapNumber(MacroAssembler* masm,
-                                 Label* need_gc,
-                                 Register scratch,
-                                 Register result);
+  static void CheckNumberOperands(MacroAssembler* masm,
+                                  Label* non_float);
 };
 
 
@@ -276,9 +270,9 @@
   frame_->SyncRange(0, frame_->element_count() - 1);
 
   __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
+  frame_->EmitPush(rsi);  // The context is the first argument.
   frame_->EmitPush(kScratchRegister);
-  frame_->EmitPush(rsi);  // The context is the second argument.
-  frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
+  frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
   Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
@@ -767,8 +761,8 @@
     // adaptor frame below it.
     Label invoke, adapted;
     __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-    __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
-    __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+                  Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
     __ j(equal, &adapted);
 
     // No arguments adaptor frame. Copy fixed number of arguments.
@@ -793,12 +787,12 @@
     // have to worry about getting rid of the elements from the virtual
     // frame.
     Label loop;
-    __ bind(&loop);
     __ testl(rcx, rcx);
     __ j(zero, &invoke);
+    __ bind(&loop);
     __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
     __ decl(rcx);
-    __ jmp(&loop);
+    __ j(not_zero, &loop);
 
     // Invoke the function. The virtual frame knows about the receiver
     // so make sure to forget that explicitly.
@@ -933,7 +927,7 @@
     // Declaration nodes are always introduced in one of two modes.
     ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
     PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    frame_->EmitPush(Immediate(Smi::FromInt(attr)));
+    frame_->EmitPush(Smi::FromInt(attr));
     // Push initial value, if any.
     // Note: For variables we must not push an initial value (such as
     // 'undefined') because we may have a (legal) redeclaration and we
@@ -943,7 +937,7 @@
     } else if (node->fun() != NULL) {
       Load(node->fun());
     } else {
-      frame_->EmitPush(Immediate(Smi::FromInt(0)));  // no initial value!
+      frame_->EmitPush(Smi::FromInt(0));  // no initial value!
     }
     Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
     // Ignore the return value (declarations are statements).
@@ -1291,288 +1285,335 @@
 }
 
 
-void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
   ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ LoopStatement");
+  Comment cmnt(masm_, "[ DoWhileStatement");
   CodeForStatementPosition(node);
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  JumpTarget body(JumpTarget::BIDIRECTIONAL);
+  IncrementLoopNesting();
 
-  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
-  // known result for the test expression, with no side effects.
-  enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
-  if (node->cond() == NULL) {
-    ASSERT(node->type() == LoopStatement::FOR_LOOP);
-    info = ALWAYS_TRUE;
-  } else {
-    Literal* lit = node->cond()->AsLiteral();
-    if (lit != NULL) {
-      if (lit->IsTrue()) {
-        info = ALWAYS_TRUE;
-      } else if (lit->IsFalse()) {
-        info = ALWAYS_FALSE;
-      }
-    }
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  // Label the top of the loop for the backward jump if necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // Use the continue target.
+      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->Bind();
+      break;
+    case ALWAYS_FALSE:
+      // No need to label it.
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      break;
+    case DONT_KNOW:
+      // Continue is the test, so use the backward body target.
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      body.Bind();
+      break;
   }
 
-  switch (node->type()) {
-    case LoopStatement::DO_LOOP: {
-      JumpTarget body(JumpTarget::BIDIRECTIONAL);
-      IncrementLoopNesting();
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
 
-      // Label the top of the loop for the backward jump if necessary.
-      if (info == ALWAYS_TRUE) {
-        // Use the continue target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else if (info == ALWAYS_FALSE) {
-        // No need to label it.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        // Continue is the test, so use the backward body target.
-        ASSERT(info == DONT_KNOW);
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        body.Bind();
+  // Compile the test.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // If control flow can fall off the end of the body, jump back
+      // to the top and bind the break target at the exit.
+      if (has_valid_frame()) {
+        node->continue_target()->Jump();
       }
-
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
-
-      // Compile the test.
-      if (info == ALWAYS_TRUE) {
-        // If control flow can fall off the end of the body, jump back
-        // to the top and bind the break target at the exit.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-
-      } else if (info == ALWAYS_FALSE) {
-        // We may have had continues or breaks in the body.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-
-      } else {
-        ASSERT(info == DONT_KNOW);
-        // We have to compile the test expression if it can be reached by
-        // control flow falling out of the body or via continue.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-      }
-      break;
-    }
-
-    case LoopStatement::WHILE_LOOP: {
-      // Do not duplicate conditions that may have function literal
-      // subexpressions.  This can cause us to compile the function
-      // literal twice.
-      bool test_at_bottom = !node->may_have_function_literal();
-
-      IncrementLoopNesting();
-
-      // If the condition is always false and has no side effects, we
-      // do not need to compile anything.
-      if (info == ALWAYS_FALSE) break;
-
-      JumpTarget body;
-      if (test_at_bottom) {
-        body.set_direction(JumpTarget::BIDIRECTIONAL);
-      }
-
-      // Based on the condition analysis, compile the test as necessary.
-      if (info == ALWAYS_TRUE) {
-        // We will not compile the test expression.  Label the top of
-        // the loop with the continue target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          // Continue is the test at the bottom, no need to label the
-          // test at the top.  The body is a backward target.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        } else {
-          // Label the test at the top as the continue target.  The
-          // body is a forward-only target.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        }
-        // Compile the test with the body as the true target and
-        // preferred fall-through and with the break target as the
-        // false target.
-        ControlDestination dest(&body, node->break_target(), true);
-        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-
-        if (dest.false_was_fall_through()) {
-          // If we got the break target as fall-through, the test may
-          // have been unconditionally false (if there are no jumps to
-          // the body).
-          if (!body.is_linked()) break;
-
-          // Otherwise, jump around the body on the fall through and
-          // then bind the body target.
-          node->break_target()->Unuse();
-          node->break_target()->Jump();
-          body.Bind();
-        }
-      }
-
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
-
-      // Based on the condition analysis, compile the backward jump as
-      // necessary.
-      if (info == ALWAYS_TRUE) {
-        // The loop body has been labeled with the continue target.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          // If we have chosen to recompile the test at the bottom,
-          // then it is the continue target.
-          if (node->continue_target()->is_linked()) {
-            node->continue_target()->Bind();
-          }
-          if (has_valid_frame()) {
-            // The break target is the fall-through (body is a backward
-            // jump from here and thus an invalid fall-through).
-            ControlDestination dest(&body, node->break_target(), false);
-            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-          }
-        } else {
-          // If we have chosen not to recompile the test at the
-          // bottom, jump back to the one at the top.
-          if (has_valid_frame()) {
-            node->continue_target()->Jump();
-          }
-        }
-      }
-
-      // The break target may be already bound (by the condition), or
-      // there may not be a valid frame.  Bind it only if needed.
       if (node->break_target()->is_linked()) {
         node->break_target()->Bind();
       }
       break;
-    }
-
-    case LoopStatement::FOR_LOOP: {
-      // Do not duplicate conditions that may have function literal
-      // subexpressions.  This can cause us to compile the function
-      // literal twice.
-      bool test_at_bottom = !node->may_have_function_literal();
-
-      // Compile the init expression if present.
-      if (node->init() != NULL) {
-        Visit(node->init());
+    case ALWAYS_FALSE:
+      // We may have had continues or breaks in the body.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
       }
-
-      IncrementLoopNesting();
-
-      // If the condition is always false and has no side effects, we
-      // do not need to compile anything else.
-      if (info == ALWAYS_FALSE) break;
-
-      // Target for backward edge if no test at the bottom, otherwise
-      // unused.
-      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-      // Target for backward edge if there is a test at the bottom,
-      // otherwise used as target for test at the top.
-      JumpTarget body;
-      if (test_at_bottom) {
-        body.set_direction(JumpTarget::BIDIRECTIONAL);
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
       }
-
-      // Based on the condition analysis, compile the test as necessary.
-      if (info == ALWAYS_TRUE) {
-        // We will not compile the test expression.  Label the top of
-        // the loop.
-        if (node->next() == NULL) {
-          // Use the continue target if there is no update expression.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        } else {
-          // Otherwise use the backward loop target.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-          loop.Bind();
-        }
-      } else {
-        ASSERT(info == DONT_KNOW);
-        if (test_at_bottom) {
-          // Continue is either the update expression or the test at
-          // the bottom, no need to label the test at the top.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        } else if (node->next() == NULL) {
-          // We are not recompiling the test at the bottom and there
-          // is no update expression.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        } else {
-          // We are not recompiling the test at the bottom and there
-          // is an update expression.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-          loop.Bind();
-        }
-
-        // Compile the test with the body as the true target and
-        // preferred fall-through and with the break target as the
-        // false target.
-        ControlDestination dest(&body, node->break_target(), true);
+      break;
+    case DONT_KNOW:
+      // We have to compile the test expression if it can be reached by
+      // control flow falling out of the body or via continue.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
+      }
+      if (has_valid_frame()) {
+        ControlDestination dest(&body, node->break_target(), false);
         LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-
-        if (dest.false_was_fall_through()) {
-          // If we got the break target as fall-through, the test may
-          // have been unconditionally false (if there are no jumps to
-          // the body).
-          if (!body.is_linked()) break;
-
-          // Otherwise, jump around the body on the fall through and
-          // then bind the body target.
-          node->break_target()->Unuse();
-          node->break_target()->Jump();
-          body.Bind();
-        }
       }
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
+      break;
+  }
 
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
+  DecrementLoopNesting();
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+}
 
-      // If there is an update expression, compile it if necessary.
-      if (node->next() != NULL) {
+
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ WhileStatement");
+  CodeForStatementPosition(node);
+
+  // If the condition is always false and has no side effects, we do not
+  // need to compile anything.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  // Do not duplicate conditions that may have function literal
+  // subexpressions.  This can cause us to compile the function literal
+  // twice.
+  bool test_at_bottom = !node->may_have_function_literal();
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  IncrementLoopNesting();
+  JumpTarget body;
+  if (test_at_bottom) {
+    body.set_direction(JumpTarget::BIDIRECTIONAL);
+  }
+
+  // Based on the condition analysis, compile the test as necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // We will not compile the test expression.  Label the top of the
+      // loop with the continue target.
+      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->Bind();
+      break;
+    case DONT_KNOW: {
+      if (test_at_bottom) {
+        // Continue is the test at the bottom, no need to label the test
+        // at the top.  The body is a backward target.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else {
+        // Label the test at the top as the continue target.  The body
+        // is a forward-only target.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      }
+      // Compile the test with the body as the true target and preferred
+      // fall-through and with the break target as the false target.
+      ControlDestination dest(&body, node->break_target(), true);
+      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+      if (dest.false_was_fall_through()) {
+        // If we got the break target as fall-through, the test may have
+        // been unconditionally false (if there are no jumps to the
+        // body).
+        if (!body.is_linked()) {
+          DecrementLoopNesting();
+          return;
+        }
+
+        // Otherwise, jump around the body on the fall through and then
+        // bind the body target.
+        node->break_target()->Unuse();
+        node->break_target()->Jump();
+        body.Bind();
+      }
+      break;
+    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
+
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
+
+  // Based on the condition analysis, compile the backward jump as
+  // necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // The loop body has been labeled with the continue target.
+      if (has_valid_frame()) {
+        node->continue_target()->Jump();
+      }
+      break;
+    case DONT_KNOW:
+      if (test_at_bottom) {
+        // If we have chosen to recompile the test at the bottom,
+        // then it is the continue target.
         if (node->continue_target()->is_linked()) {
           node->continue_target()->Bind();
         }
-
-        // Control can reach the update by falling out of the body or
-        // by a continue.
         if (has_valid_frame()) {
-          // Record the source position of the statement as this code
-          // which is after the code for the body actually belongs to
-          // the loop statement and not the body.
-          CodeForStatementPosition(node);
-          Visit(node->next());
+          // The break target is the fall-through (body is a backward
+          // jump from here and thus an invalid fall-through).
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+      } else {
+        // If we have chosen not to recompile the test at the
+        // bottom, jump back to the one at the top.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
         }
       }
+      break;
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
 
-      // Based on the condition analysis, compile the backward jump as
-      // necessary.
-      if (info == ALWAYS_TRUE) {
+  // The break target may be already bound (by the condition), or there
+  // may not be a valid frame.  Bind it only if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  DecrementLoopNesting();
+}
+
+
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ForStatement");
+  CodeForStatementPosition(node);
+
+  // Compile the init expression if present.
+  if (node->init() != NULL) {
+    Visit(node->init());
+  }
+
+  // If the condition is always false and has no side effects, we do not
+  // need to compile anything else.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  // Do not duplicate conditions that may have function literal
+  // subexpressions.  This can cause us to compile the function literal
+  // twice.
+  bool test_at_bottom = !node->may_have_function_literal();
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  IncrementLoopNesting();
+
+  // Target for backward edge if no test at the bottom, otherwise
+  // unused.
+  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+  // Target for backward edge if there is a test at the bottom,
+  // otherwise used as target for test at the top.
+  JumpTarget body;
+  if (test_at_bottom) {
+    body.set_direction(JumpTarget::BIDIRECTIONAL);
+  }
+
+  // Based on the condition analysis, compile the test as necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // We will not compile the test expression.  Label the top of the
+      // loop.
+      if (node->next() == NULL) {
+        // Use the continue target if there is no update expression.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        // Otherwise use the backward loop target.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        loop.Bind();
+      }
+      break;
+    case DONT_KNOW: {
+      if (test_at_bottom) {
+        // Continue is either the update expression or the test at the
+        // bottom, no need to label the test at the top.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else if (node->next() == NULL) {
+        // We are not recompiling the test at the bottom and there is no
+        // update expression.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        // We are not recompiling the test at the bottom and there is an
+        // update expression.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        loop.Bind();
+      }
+
+      // Compile the test with the body as the true target and preferred
+      // fall-through and with the break target as the false target.
+      ControlDestination dest(&body, node->break_target(), true);
+      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+      if (dest.false_was_fall_through()) {
+        // If we got the break target as fall-through, the test may have
+        // been unconditionally false (if there are no jumps to the
+        // body).
+        if (!body.is_linked()) {
+          DecrementLoopNesting();
+          return;
+        }
+
+        // Otherwise, jump around the body on the fall through and then
+        // bind the body target.
+        node->break_target()->Unuse();
+        node->break_target()->Jump();
+        body.Bind();
+      }
+      break;
+    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
+
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
+
+  // If there is an update expression, compile it if necessary.
+  if (node->next() != NULL) {
+    if (node->continue_target()->is_linked()) {
+      node->continue_target()->Bind();
+    }
+
+    // Control can reach the update by falling out of the body or by a
+    // continue.
+    if (has_valid_frame()) {
+      // Record the source position of the statement as this code which
+      // is after the code for the body actually belongs to the loop
+      // statement and not the body.
+      CodeForStatementPosition(node);
+      Visit(node->next());
+    }
+  }
+
+  // Based on the condition analysis, compile the backward jump as
+  // necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      if (has_valid_frame()) {
+        if (node->next() == NULL) {
+          node->continue_target()->Jump();
+        } else {
+          loop.Jump();
+        }
+      }
+      break;
+    case DONT_KNOW:
+      if (test_at_bottom) {
+        if (node->continue_target()->is_linked()) {
+          // We can have dangling jumps to the continue target if there
+          // was no update expression.
+          node->continue_target()->Bind();
+        }
+        // Control can reach the test at the bottom by falling out of
+        // the body, by a continue in the body, or from the update
+        // expression.
+        if (has_valid_frame()) {
+          // The break target is the fall-through (body is a backward
+          // jump from here).
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+      } else {
+        // Otherwise, jump back to the test at the top.
         if (has_valid_frame()) {
           if (node->next() == NULL) {
             node->continue_target()->Jump();
@@ -1580,47 +1621,19 @@
             loop.Jump();
           }
         }
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          if (node->continue_target()->is_linked()) {
-            // We can have dangling jumps to the continue target if
-            // there was no update expression.
-            node->continue_target()->Bind();
-          }
-          // Control can reach the test at the bottom by falling out
-          // of the body, by a continue in the body, or from the
-          // update expression.
-          if (has_valid_frame()) {
-            // The break target is the fall-through (body is a
-            // backward jump from here).
-            ControlDestination dest(&body, node->break_target(), false);
-            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-          }
-        } else {
-          // Otherwise, jump back to the test at the top.
-          if (has_valid_frame()) {
-            if (node->next() == NULL) {
-              node->continue_target()->Jump();
-            } else {
-              loop.Jump();
-            }
-          }
-        }
-      }
-
-      // The break target may be already bound (by the condition), or
-      // there may not be a valid frame.  Bind it only if needed.
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
       }
       break;
-    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
   }
 
+  // The break target may be already bound (by the condition), or there
+  // may not be a valid frame.  Bind it only if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
   DecrementLoopNesting();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
 }
 
 
@@ -1700,19 +1713,19 @@
   __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
   __ Integer32ToSmi(rax, rax);
   frame_->EmitPush(rax);  // <- slot 1
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
   entry.Jump();
 
   fixed_array.Bind();
   // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 3
+  frame_->EmitPush(Smi::FromInt(0));  // <- slot 3
   frame_->EmitPush(rax);  // <- slot 2
 
   // Push the length of the array and the initial index onto the stack.
   __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
   __ Integer32ToSmi(rax, rax);
   frame_->EmitPush(rax);  // <- slot 1
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
 
   // Condition.
   entry.Bind();
@@ -1722,8 +1735,8 @@
   node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
 
   __ movq(rax, frame_->ElementAt(0));  // load the current count
-  __ cmpl(rax, frame_->ElementAt(1));  // compare to the array length
-  node->break_target()->Branch(above_equal);
+  __ SmiCompare(frame_->ElementAt(1), rax);  // compare to the array length
+  node->break_target()->Branch(below_equal);
 
   // Get the i'th entry of the array.
   __ movq(rdx, frame_->ElementAt(2));
@@ -1796,7 +1809,7 @@
   node->continue_target()->Bind();
   frame_->SpillAll();
   frame_->EmitPop(rax);
-  __ addq(rax, Immediate(Smi::FromInt(1)));
+  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
   frame_->EmitPush(rax);
   entry.Jump();
 
@@ -1812,10 +1825,10 @@
   node->break_target()->Unuse();
 }
 
-void CodeGenerator::VisitTryCatch(TryCatch* node) {
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
   ASSERT(!in_spilled_code());
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatch");
+  Comment cmnt(masm_, "[ TryCatchStatement");
   CodeForStatementPosition(node);
 
   JumpTarget try_block;
@@ -1951,10 +1964,10 @@
 }
 
 
-void CodeGenerator::VisitTryFinally(TryFinally* node) {
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
   ASSERT(!in_spilled_code());
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinally");
+  Comment cmnt(masm_, "[ TryFinallyStatement");
   CodeForStatementPosition(node);
 
   // State: Used to keep track of reason for entering the finally
@@ -1969,7 +1982,7 @@
 
   frame_->EmitPush(rax);
   // In case of thrown exceptions, this is where we continue.
-  __ movq(rcx, Immediate(Smi::FromInt(THROWING)));
+  __ Move(rcx, Smi::FromInt(THROWING));
   finally_block.Jump();
 
   // --- Try block ---
@@ -2028,7 +2041,7 @@
     // Fake a top of stack value (unneeded when FALLING) and set the
     // state in ecx, then jump around the unlink blocks if any.
     frame_->EmitPush(Heap::kUndefinedValueRootIndex);
-    __ movq(rcx, Immediate(Smi::FromInt(FALLING)));
+    __ Move(rcx, Smi::FromInt(FALLING));
     if (nof_unlinks > 0) {
       finally_block.Jump();
     }
@@ -2074,7 +2087,7 @@
         // Fake TOS for targets that shadowed breaks and continues.
         frame_->EmitPush(Heap::kUndefinedValueRootIndex);
       }
-      __ movq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+      __ Move(rcx, Smi::FromInt(JUMPING + i));
       if (--nof_unlinks > 0) {
         // If this is not the last unlink block, jump around the next.
         finally_block.Jump();
@@ -2105,7 +2118,7 @@
   for (int i = 0; i < shadows.length(); i++) {
     if (has_valid_frame() && shadows[i]->is_bound()) {
       BreakTarget* original = shadows[i]->other_target();
-      __ cmpq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+      __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
       if (i == kReturnShadowIndex) {
         // The return value is (already) in rax.
         Result return_value = allocator_->Allocate(rax);
@@ -2130,7 +2143,7 @@
   if (has_valid_frame()) {
     // Check if we need to rethrow the exception.
     JumpTarget exit;
-    __ cmpq(rcx, Immediate(Smi::FromInt(THROWING)));
+    __ SmiCompare(rcx, Smi::FromInt(THROWING));
     exit.Branch(not_equal);
 
     // Rethrow exception.
@@ -2164,12 +2177,10 @@
   ASSERT(boilerplate->IsBoilerplate());
   frame_->SyncRange(0, frame_->element_count() - 1);
 
-  // Push the boilerplate on the stack.
-  __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
-  frame_->EmitPush(kScratchRegister);
-
   // Create a new closure.
   frame_->EmitPush(rsi);
+  __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
+  frame_->EmitPush(kScratchRegister);
   Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
   frame_->Push(&result);
 }
@@ -2278,7 +2289,7 @@
   // Literal array (0).
   __ push(literals_);
   // Literal index (1).
-  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  __ Push(Smi::FromInt(node_->literal_index()));
   // RegExp pattern (2).
   __ Push(node_->pattern());
   // RegExp flags (3).
@@ -2351,7 +2362,7 @@
   // Literal array (0).
   __ push(literals_);
   // Literal index (1).
-  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  __ Push(Smi::FromInt(node_->literal_index()));
   // Constant properties (2).
   __ Push(node_->constant_properties());
   __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
@@ -2484,7 +2495,7 @@
   // Literal array (0).
   __ push(literals_);
   // Literal index (1).
-  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  __ Push(Smi::FromInt(node_->literal_index()));
   // Constant properties (2).
   __ Push(node_->literals());
   __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
@@ -3072,8 +3083,8 @@
 
       case Token::SUB: {
         bool overwrite =
-            (node->AsBinaryOperation() != NULL &&
-             node->AsBinaryOperation()->ResultOverwriteAllowed());
+          (node->expression()->AsBinaryOperation() != NULL &&
+           node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
         UnarySubStub stub(overwrite);
         // TODO(1222589): remove dependency of TOS being cached inside stub
         Result operand = frame_->Pop();
@@ -3151,7 +3162,7 @@
   __ push(dst_);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
   __ push(rax);
-  __ push(Immediate(Smi::FromInt(1)));
+  __ Push(Smi::FromInt(1));
   if (is_increment_) {
     __ CallRuntime(Runtime::kNumberAdd, 2);
   } else {
@@ -3191,7 +3202,7 @@
 
   // Call the runtime for the addition or subtraction.
   __ push(rax);
-  __ push(Immediate(Smi::FromInt(1)));
+  __ Push(Smi::FromInt(1));
   if (is_increment_) {
     __ CallRuntime(Runtime::kNumberAdd, 2);
   } else {
@@ -3249,15 +3260,18 @@
                                                   is_increment);
     }
 
-    __ movq(kScratchRegister, new_value.reg());
+    __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
     if (is_increment) {
-      __ addl(kScratchRegister, Immediate(Smi::FromInt(1)));
+      __ SmiAddConstant(kScratchRegister,
+                        new_value.reg(),
+                        Smi::FromInt(1),
+                        deferred->entry_label());
     } else {
-      __ subl(kScratchRegister, Immediate(Smi::FromInt(1)));
+      __ SmiSubConstant(kScratchRegister,
+                        new_value.reg(),
+                        Smi::FromInt(1),
+                        deferred->entry_label());
     }
-    // Smi test.
-    deferred->Branch(overflow);
-    __ JumpIfNotSmi(kScratchRegister, deferred->entry_label());
     __ movq(new_value.reg(), kScratchRegister);
     deferred->BindExit();
 
@@ -3634,15 +3648,15 @@
 
   // Skip the arguments adaptor frame if it exists.
   Label check_frame_marker;
-  __ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &check_frame_marker);
   __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
 
   // Check the marker in the calling frame.
   __ bind(&check_frame_marker);
-  __ cmpq(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
-          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+  __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+                Smi::FromInt(StackFrame::CONSTRUCT));
   fp.Unuse();
   destination()->Split(equal);
 }
@@ -3878,7 +3892,7 @@
 void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
   // RBP value is aligned, so it should be tagged as a smi (without necesarily
-  // being padded as a smi).
+  // being padded as a smi, so it should not be treated as a smi.).
   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   Result rbp_as_smi = allocator_->Allocate();
   ASSERT(rbp_as_smi.is_valid());
@@ -3952,10 +3966,9 @@
   // Allocate heap number for result if possible.
   Result scratch = allocator()->Allocate();
   Result heap_number = allocator()->Allocate();
-  FloatingPointHelper::AllocateHeapNumber(masm_,
-                                          call_runtime.entry_label(),
-                                          scratch.reg(),
-                                          heap_number.reg());
+  __ AllocateHeapNumber(heap_number.reg(),
+                        scratch.reg(),
+                        call_runtime.entry_label());
   scratch.Unuse();
 
   // Store the result in the allocated heap number.
@@ -4226,18 +4239,6 @@
 }
 
 
-class ToBooleanStub: public CodeStub {
- public:
-  ToBooleanStub() { }
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Major MajorKey() { return ToBoolean; }
-  int MinorKey() { return 0; }
-};
-
-
 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
 // convert it to a boolean in the condition code register or jump to
 // 'false_target'/'true_target' as appropriate.
@@ -4262,8 +4263,8 @@
   dest->false_target()->Branch(equal);
 
   // Smi => false iff zero.
-  Condition equals = masm_->CheckSmiEqualsConstant(value.reg(), 0);
-  dest->false_target()->Branch(equals);
+  __ SmiCompare(value.reg(), Smi::FromInt(0));
+  dest->false_target()->Branch(equal);
   Condition is_smi = masm_->CheckSmi(value.reg());
   dest->true_target()->Branch(is_smi);
 
@@ -4945,7 +4946,7 @@
       right_side = Result(right_val);
       // Test smi equality and comparison by signed int comparison.
       // Both sides are smis, so we can use an Immediate.
-      __ cmpl(left_side.reg(), Immediate(Smi::cast(*right_side.handle())));
+      __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
       left_side.Unuse();
       right_side.Unuse();
       dest->Split(cc);
@@ -4978,7 +4979,7 @@
       Result temp = allocator()->Allocate();
       ASSERT(temp.is_valid());
       __ movq(temp.reg(),
-             FieldOperand(operand.reg(), HeapObject::kMapOffset));
+              FieldOperand(operand.reg(), HeapObject::kMapOffset));
       __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
                Immediate(1 << Map::kIsUndetectable));
       temp.Unuse();
@@ -4998,7 +4999,7 @@
       CompareStub stub(cc, strict);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
       // The result is a Smi, which is negative, zero, or positive.
-      __ testl(answer.reg(), answer.reg());  // Both zero and sign flag right.
+      __ SmiTest(answer.reg());  // Sets both zero and sign flag.
       answer.Unuse();
       dest->Split(cc);
     } else {
@@ -5016,7 +5017,7 @@
       // When non-smi, call out to the compare stub.
       CompareStub stub(cc, strict);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      __ testl(answer.reg(), answer.reg());  // Sets both zero and sign flags.
+      __ SmiTest(answer.reg());  // Sets both zero and sign flags.
       answer.Unuse();
       dest->true_target()->Branch(cc);
       dest->false_target()->Jump();
@@ -5024,7 +5025,7 @@
       is_smi.Bind();
       left_side = Result(left_reg);
       right_side = Result(right_reg);
-      __ cmpl(left_side.reg(), right_side.reg());
+      __ SmiCompare(left_side.reg(), right_side.reg());
       right_side.Unuse();
       left_side.Unuse();
       dest->Split(cc);
@@ -5221,7 +5222,7 @@
 
 void DeferredInlineSmiAdd::Generate() {
   __ push(dst_);
-  __ push(Immediate(value_));
+  __ Push(value_);
   GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
   __ CallStub(&igostub);
   if (!dst_.is(rax)) __ movq(dst_, rax);
@@ -5229,7 +5230,7 @@
 
 
 void DeferredInlineSmiAddReversed::Generate() {
-  __ push(Immediate(value_));  // Note: sign extended.
+  __ Push(value_);
   __ push(dst_);
   GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
   __ CallStub(&igostub);
@@ -5239,7 +5240,7 @@
 
 void DeferredInlineSmiSub::Generate() {
   __ push(dst_);
-  __ push(Immediate(value_));  // Note: sign extended.
+  __ Push(value_);
   GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
   __ CallStub(&igostub);
   if (!dst_.is(rax)) __ movq(dst_, rax);
@@ -5248,7 +5249,7 @@
 
 void DeferredInlineSmiOperation::Generate() {
   __ push(src_);
-  __ push(Immediate(value_));  // Note: sign extended.
+  __ Push(value_);
   // For mod we don't generate all the Smi code inline.
   GenericBinaryOpStub stub(
       op_,
@@ -5306,7 +5307,7 @@
       __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
       __ SmiAddConstant(operand->reg(),
                         operand->reg(),
-                        int_value,
+                        smi_value,
                         deferred->entry_label());
       deferred->BindExit();
       frame_->Push(operand);
@@ -5328,7 +5329,7 @@
         // A smi currently fits in a 32-bit Immediate.
         __ SmiSubConstant(operand->reg(),
                           operand->reg(),
-                          int_value,
+                          smi_value,
                           deferred->entry_label());
         deferred->BindExit();
         frame_->Push(operand);
@@ -5382,9 +5383,9 @@
                                            overwrite_mode);
         __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
         __ SmiShiftLogicalRightConstant(answer.reg(),
-                                      operand->reg(),
-                                      shift_value,
-                                      deferred->entry_label());
+                                        operand->reg(),
+                                        shift_value,
+                                        deferred->entry_label());
         deferred->BindExit();
         operand->Unuse();
         frame_->Push(&answer);
@@ -5453,15 +5454,15 @@
                                                                overwrite_mode);
       __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
       if (op == Token::BIT_AND) {
-        __ SmiAndConstant(operand->reg(), operand->reg(), int_value);
+        __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
       } else if (op == Token::BIT_XOR) {
         if (int_value != 0) {
-          __ SmiXorConstant(operand->reg(), operand->reg(), int_value);
+          __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
         }
       } else {
         ASSERT(op == Token::BIT_OR);
         if (int_value != 0) {
-          __ SmiOrConstant(operand->reg(), operand->reg(), int_value);
+          __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
         }
       }
       deferred->BindExit();
@@ -5476,18 +5477,21 @@
           (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
         operand->ToRegister();
         frame_->Spill(operand->reg());
-        DeferredCode* deferred = new DeferredInlineSmiOperation(op,
-                                                                operand->reg(),
-                                                                operand->reg(),
-                                                                smi_value,
-                                                                overwrite_mode);
+        DeferredCode* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           operand->reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
         // Check for negative or non-Smi left hand side.
         __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
         if (int_value < 0) int_value = -int_value;
         if (int_value == 1) {
-          __ movl(operand->reg(), Immediate(Smi::FromInt(0)));
+          __ Move(operand->reg(), Smi::FromInt(0));
         } else {
-          __ SmiAndConstant(operand->reg(), operand->reg(), int_value - 1);
+          __ SmiAndConstant(operand->reg(),
+                            operand->reg(),
+                            Smi::FromInt(int_value - 1));
         }
         deferred->BindExit();
         frame_->Push(operand);
@@ -6085,8 +6089,6 @@
 
         // Check that the key is a non-negative smi.
         __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
-        // Ensure that the smi is zero-extended.  This is not guaranteed.
-        __ movl(key.reg(), key.reg());
 
         // Check that the receiver is not a smi.
         __ JumpIfSmi(receiver.reg(), deferred->entry_label());
@@ -6096,10 +6098,10 @@
         deferred->Branch(not_equal);
 
         // Check that the key is within bounds.  Both the key and the
-        // length of the JSArray are smis, so compare only low 32 bits.
-        __ cmpl(key.reg(),
-                FieldOperand(receiver.reg(), JSArray::kLengthOffset));
-        deferred->Branch(greater_equal);
+        // length of the JSArray are smis.
+        __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
+                      key.reg());
+        deferred->Branch(less_equal);
 
         // Get the elements array from the receiver and check that it
         // is a flat array (not a dictionary).
@@ -6190,16 +6192,11 @@
   // These three cases set C3 when compared to zero in the FPU.
   __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
   __ j(not_equal, &true_result);
-  // TODO(x64): Don't use fp stack, use MMX registers?
   __ fldz();  // Load zero onto fp stack
   // Load heap-number double value onto fp stack
   __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
-  __ fucompp();  // Compare and pop both values.
-  __ movq(kScratchRegister, rax);
-  __ fnstsw_ax();  // Store fp status word in ax, no checking for exceptions.
-  __ testl(rax, Immediate(0x4000));  // Test FP condition flag C3, bit 16.
-  __ movq(rax, kScratchRegister);
-  __ j(not_zero, &false_result);
+  __ FCmp();
+  __ j(zero, &false_result);
   // Fall through to |true_result|.
 
   // Return 1/0 for true/false in rax.
@@ -6303,22 +6300,17 @@
   Label slow;
   Label done;
   Label try_float;
-  Label special;
   // Check whether the value is a smi.
   __ JumpIfNotSmi(rax, &try_float);
 
   // Enter runtime system if the value of the smi is zero
   // to make sure that we switch between 0 and -0.
-  // Also enter it if the value of the smi is Smi::kMinValue
-  __ testl(rax, Immediate(0x7FFFFFFE));
-  __ j(zero, &special);
-  __ negl(rax);
-  __ jmp(&done);
+  // Also enter it if the value of the smi is Smi::kMinValue.
+  __ SmiNeg(rax, rax, &done);
 
-  __ bind(&special);
-  // Either zero or -0x4000000, neither of which become a smi when negated.
-  __ testl(rax, rax);
-  __ j(not_zero, &slow);
+  // Either zero or Smi::kMinValue, neither of which become a smi when negated.
+  __ SmiCompare(rax, Smi::FromInt(0));
+  __ j(not_equal, &slow);
   __ Move(rax, Factory::minus_zero_value());
   __ jmp(&done);
 
@@ -6344,7 +6336,7 @@
   if (overwrite_) {
     __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
   } else {
-    FloatingPointHelper::AllocateHeapNumber(masm, &slow, rbx, rcx);
+    __ AllocateHeapNumber(rcx, rbx, &slow);
     // rcx: allocated 'empty' number
     __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
     __ movq(rax, rcx);
@@ -6470,7 +6462,7 @@
   // Call builtin if operands are not floating point or smi.
   Label check_for_symbols;
   // Push arguments on stack, for helper functions.
-  FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols);
+  FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
   FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
   __ FCmp();
 
@@ -6527,7 +6519,7 @@
       ASSERT(cc_ == greater || cc_ == greater_equal);  // remaining cases
       ncr = LESS;
     }
-    __ push(Immediate(Smi::FromInt(ncr)));
+    __ Push(Smi::FromInt(ncr));
   }
 
   // Restore return address on the stack.
@@ -6626,7 +6618,7 @@
   __ ret(2 * kPointerSize);
 
   __ bind(&is_not_instance);
-  __ movq(rax, Immediate(Smi::FromInt(1)));
+  __ Move(rax, Smi::FromInt(1));
   __ ret(2 * kPointerSize);
 
   // Slow-case: Go through the JavaScript implementation.
@@ -6644,8 +6636,8 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label runtime;
   __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
-  __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &runtime);
   // Value in rcx is Smi encoded.
 
@@ -6678,8 +6670,8 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor;
   __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
-  __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &adaptor);
 
   // Check index against formal parameters count limit passed in
@@ -6726,8 +6718,8 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor;
   __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
-  __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &adaptor);
 
   // Nothing to do: The formal number of parameters has already been
@@ -6858,6 +6850,15 @@
   // Check for failure result.
   Label failure_returned;
   ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+#ifdef _WIN64
+  // If return value is on the stack, pop it to registers.
+  if (result_size_ > 1) {
+    ASSERT_EQ(2, result_size_);
+    // Position above 4 argument mirrors and arguments object.
+    __ movq(rax, Operand(rsp, 6 * kPointerSize));
+    __ movq(rdx, Operand(rsp, 7 * kPointerSize));
+  }
+#endif
   __ lea(rcx, Operand(rax, 1));
   // Lower 2 bits of rcx are 0 iff rax has failure tag.
   __ testl(rcx, Immediate(kFailureTagMask));
@@ -7069,8 +7070,8 @@
 
   // Push the stack frame type marker twice.
   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
-  __ push(Immediate(Smi::FromInt(marker)));  // context slot
-  __ push(Immediate(Smi::FromInt(marker)));  // function slot
+  __ Push(Smi::FromInt(marker));  // context slot
+  __ Push(Smi::FromInt(marker));  // function slot
   // Save callee-saved registers (X64 calling conventions).
   __ push(r12);
   __ push(r13);
@@ -7182,7 +7183,7 @@
   // must be inserted below the return address on the stack so we
   // temporarily store that in a register.
   __ pop(rax);
-  __ push(Immediate(Smi::FromInt(0)));
+  __ Push(Smi::FromInt(0));
   __ push(rax);
 
   // Do tail-call to runtime routine.
@@ -7191,24 +7192,6 @@
 }
 
 
-void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
-                                             Label* need_gc,
-                                             Register scratch,
-                                             Register result) {
-  // Allocate heap number in new space.
-  __ AllocateInNewSpace(HeapNumber::kSize,
-                        result,
-                        scratch,
-                        no_reg,
-                        need_gc,
-                        TAG_OBJECT);
-
-  // Set the map and tag the result.
-  __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
-  __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
                                            Register number) {
   Label load_smi, done;
@@ -7321,8 +7304,8 @@
 }
 
 
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
-                                             Label* non_float) {
+void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
+                                              Label* non_float) {
   Label test_other, done;
   // Test if both operands are numbers (heap_numbers or smis).
   // If not, jump to label non_float.
@@ -7403,17 +7386,17 @@
     case Token::SHR:
     case Token::SAR:
       // Move the second operand into register ecx.
-      __ movl(rcx, rbx);
+      __ movq(rcx, rbx);
       // Perform the operation.
       switch (op_) {
         case Token::SAR:
-          __ SmiShiftArithmeticRight(rax, rax, rbx);
+          __ SmiShiftArithmeticRight(rax, rax, rcx);
           break;
         case Token::SHR:
-          __ SmiShiftLogicalRight(rax, rax, rbx, slow);
+          __ SmiShiftLogicalRight(rax, rax, rcx, slow);
           break;
         case Token::SHL:
-          __ SmiShiftLeft(rax, rax, rbx, slow);
+          __ SmiShiftLeft(rax, rax, rcx, slow);
           break;
         default:
           UNREACHABLE();
@@ -7454,7 +7437,7 @@
     case Token::DIV: {
       // rax: y
       // rdx: x
-      FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+      FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
       // Fast-case: Both operands are numbers.
       // Allocate a heap number, if needed.
       Label skip_allocation;
@@ -7468,10 +7451,7 @@
           __ JumpIfNotSmi(rax, &skip_allocation);
           // Fall through!
         case NO_OVERWRITE:
-          FloatingPointHelper::AllocateHeapNumber(masm,
-                                                  &call_runtime,
-                                                  rcx,
-                                                  rax);
+          __ AllocateHeapNumber(rax, rcx, &call_runtime);
           __ bind(&skip_allocation);
           break;
         default: UNREACHABLE();
@@ -7499,7 +7479,7 @@
     case Token::SAR:
     case Token::SHL:
     case Token::SHR: {
-      FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+      FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
       // TODO(X64): Don't convert a Smi to float and then back to int32
       // afterwards.
       FloatingPointHelper::LoadFloatOperands(masm);
@@ -7522,60 +7502,43 @@
         // Check if right operand is int32.
         __ fist_s(Operand(rsp, 0 * kPointerSize));
         __ fild_s(Operand(rsp, 0 * kPointerSize));
-        __ fucompp();
-        __ fnstsw_ax();
-        if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
-          __ sahf();
-          __ j(not_zero, &operand_conversion_failure);
-          __ j(parity_even, &operand_conversion_failure);
-        } else {
-          __ and_(rax, Immediate(0x4400));
-          __ cmpl(rax, Immediate(0x4000));
-          __ j(not_zero, &operand_conversion_failure);
-        }
+        __ FCmp();
+        __ j(not_zero, &operand_conversion_failure);
+        __ j(parity_even, &operand_conversion_failure);
+
         // Check if left operand is int32.
         __ fist_s(Operand(rsp, 1 * kPointerSize));
         __ fild_s(Operand(rsp, 1 * kPointerSize));
-        __ fucompp();
-        __ fnstsw_ax();
-        if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
-          __ sahf();
-          __ j(not_zero, &operand_conversion_failure);
-          __ j(parity_even, &operand_conversion_failure);
-        } else {
-          __ and_(rax, Immediate(0x4400));
-          __ cmpl(rax, Immediate(0x4000));
-          __ j(not_zero, &operand_conversion_failure);
-        }
+        __ FCmp();
+        __ j(not_zero, &operand_conversion_failure);
+        __ j(parity_even, &operand_conversion_failure);
       }
 
       // Get int32 operands and perform bitop.
       __ pop(rcx);
       __ pop(rax);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(rax, rcx); break;
-        case Token::BIT_AND: __ and_(rax, rcx); break;
-        case Token::BIT_XOR: __ xor_(rax, rcx); break;
+        case Token::BIT_OR:  __ orl(rax, rcx); break;
+        case Token::BIT_AND: __ andl(rax, rcx); break;
+        case Token::BIT_XOR: __ xorl(rax, rcx); break;
         case Token::SAR: __ sarl(rax); break;
         case Token::SHL: __ shll(rax); break;
         case Token::SHR: __ shrl(rax); break;
         default: UNREACHABLE();
       }
       if (op_ == Token::SHR) {
-        // Check if result is non-negative and fits in a smi.
-        __ testl(rax, Immediate(0xc0000000));
-        __ j(not_zero, &non_smi_result);
-      } else {
-        // Check if result fits in a smi.
-        __ cmpl(rax, Immediate(0xc0000000));
+        // Check if result is non-negative. This can only happen for a shift
+        // by zero, which also doesn't update the sign flag.
+        __ testl(rax, rax);
         __ j(negative, &non_smi_result);
       }
-      // Tag smi result and return.
+      __ JumpIfNotValidSmiValue(rax, &non_smi_result);
+      // Tag smi result, if possible, and return.
       __ Integer32ToSmi(rax, rax);
       __ ret(2 * kPointerSize);
 
       // All ops except SHR return a signed int32 that we load in a HeapNumber.
-      if (op_ != Token::SHR) {
+      if (op_ != Token::SHR && non_smi_result.is_linked()) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
         __ movsxlq(rbx, rax);  // rbx: sign extended 32-bit result
@@ -7589,8 +7552,7 @@
             __ JumpIfNotSmi(rax, &skip_allocation);
             // Fall through!
           case NO_OVERWRITE:
-            FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
-                                                    rcx, rax);
+            __ AllocateHeapNumber(rax, rcx, &call_runtime);
             __ bind(&skip_allocation);
             break;
           default: UNREACHABLE();
@@ -7678,6 +7640,98 @@
   return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
 }
 
+#undef __
+
+#define __ masm.
+
+#ifdef _WIN64
+typedef double (*ModuloFunction)(double, double);
+// Define custom fmod implementation.
+ModuloFunction CreateModuloFunction() {
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  Assembler masm(buffer, actual_size);
+  // Generated code is put into a fixed, unmovable, buffer, and not into
+  // the V8 heap. We can't, and don't, refer to any relocatable addresses
+  // (e.g. the JavaScript nan-object).
+
+  // Windows 64 ABI passes double arguments in xmm0, xmm1 and
+  // returns result in xmm0.
+  // Argument backing space is allocated on the stack above
+  // the return address.
+
+  // Compute x mod y.
+  // Load y and x (use argument backing store as temporary storage).
+  __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
+  __ movsd(Operand(rsp, kPointerSize), xmm0);
+  __ fld_d(Operand(rsp, kPointerSize * 2));
+  __ fld_d(Operand(rsp, kPointerSize));
+
+  // Clear exception flags before operation.
+  {
+    Label no_exceptions;
+    __ fwait();
+    __ fnstsw_ax();
+    // Clear if Illegal Operand or Zero Division exceptions are set.
+    __ testb(rax, Immediate(5));
+    __ j(zero, &no_exceptions);
+    __ fnclex();
+    __ bind(&no_exceptions);
+  }
+
+  // Compute st(0) % st(1)
+  {
+    Label partial_remainder_loop;
+    __ bind(&partial_remainder_loop);
+    __ fprem();
+    __ fwait();
+    __ fnstsw_ax();
+    __ testl(rax, Immediate(0x400 /* C2 */));
+    // If C2 is set, computation only has partial result. Loop to
+    // continue computation.
+    __ j(not_zero, &partial_remainder_loop);
+  }
+
+  Label valid_result;
+  Label return_result;
+  // If Invalid Operand or Zero Division exceptions are set,
+  // return NaN.
+  __ testb(rax, Immediate(5));
+  __ j(zero, &valid_result);
+  __ fstp(0);  // Drop result in st(0).
+  int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
+  __ movq(rcx, kNaNValue, RelocInfo::NONE);
+  __ movq(Operand(rsp, kPointerSize), rcx);
+  __ movsd(xmm0, Operand(rsp, kPointerSize));
+  __ jmp(&return_result);
+
+  // If result is valid, return that.
+  __ bind(&valid_result);
+  __ fstp_d(Operand(rsp, kPointerSize));
+  __ movsd(xmm0, Operand(rsp, kPointerSize));
+
+  // Clean up FPU stack and exceptions and return xmm0
+  __ bind(&return_result);
+  __ fstp(0);  // Unload y.
+
+  Label clear_exceptions;
+  __ testb(rax, Immediate(0x3f /* Any Exception*/));
+  __ j(not_zero, &clear_exceptions);
+  __ ret(0);
+  __ bind(&clear_exceptions);
+  __ fnclex();
+  __ ret(0);
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  // Call the function from C++.
+  return FUNCTION_CAST<ModuloFunction>(buffer);
+}
+
+#endif
 
 #undef __
 
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 87db3a9..56b88b7 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -294,6 +294,15 @@
                                Handle<Script> script,
                                bool is_eval);
 
+  // Printing of AST, etc. as requested by flags.
+  static void MakeCodePrologue(FunctionLiteral* fun);
+
+  // Allocate and install the code.
+  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
+                                       MacroAssembler* masm,
+                                       Code::Flags flags,
+                                       Handle<Script> script);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -303,6 +312,8 @@
                               bool is_toplevel,
                               Handle<Script> script);
 
+  static void RecordPositions(MacroAssembler* masm, int pos);
+
   // Accessors
   MacroAssembler* masm() { return masm_; }
 
@@ -387,7 +398,7 @@
   void LoadReference(Reference* ref);
   void UnloadReference(Reference* ref);
 
-  Operand ContextOperand(Register context, int index) const {
+  static Operand ContextOperand(Register context, int index) {
     return Operand(context, Context::SlotOffset(index));
   }
 
@@ -398,7 +409,7 @@
                                             JumpTarget* slow);
 
   // Expressions
-  Operand GlobalObject() const {
+  static Operand GlobalObject() {
     return ContextOperand(rsi, Context::GLOBAL_INDEX);
   }
 
@@ -500,10 +511,11 @@
   static bool PatchInlineRuntimeEntry(Handle<String> name,
                                       const InlineRuntimeLUT& new_entry,
                                       InlineRuntimeLUT* old_entry);
+  static Handle<Code> ComputeLazyCompile(int argc);
   Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
   void ProcessDeclarations(ZoneList<Declaration*>* declarations);
 
-  Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+  static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
 
   // Declare global variables and functions in the given array of
   // name/value pairs.
@@ -548,6 +560,14 @@
   inline void GenerateMathSin(ZoneList<Expression*>* args);
   inline void GenerateMathCos(ZoneList<Expression*>* args);
 
+  // Simple condition analysis.
+  enum ConditionAnalysis {
+    ALWAYS_TRUE,
+    ALWAYS_FALSE,
+    DONT_KNOW
+  };
+  ConditionAnalysis AnalyzeCondition(Expression* cond);
+
   // Methods used to indicate which source code is generated for. Source
   // positions are collected by the assembler and emitted with the relocation
   // information.
@@ -597,6 +617,8 @@
   friend class JumpTarget;
   friend class Reference;
   friend class Result;
+  friend class FastCodeGenerator;
+  friend class CodeGenSelector;
 
   friend class CodeGeneratorPatcher;  // Used in test-log-stack-tracer.cc
 
@@ -613,6 +635,18 @@
 // which is declared in code-stubs.h.
 
 
+class ToBooleanStub: public CodeStub {
+ public:
+  ToBooleanStub() { }
+
+  void Generate(MacroAssembler* masm);
+
+ private:
+  Major MajorKey() { return ToBoolean; }
+  int MinorKey() { return 0; }
+};
+
+
 // Flag that indicates whether or not the code that handles smi arguments
 // should be placed in the stub, inlined, or omitted entirely.
 enum GenericBinaryFlags {
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 10092c5..49240b4 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -39,10 +39,7 @@
 
 bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
-  // 11th byte of patch is 0x49 (REX.WB byte of computed jump/call to r10),
-  // 11th byte of JS return is 0xCC (int3).
-  ASSERT(*(rinfo->pc() + 10) == 0x49 || *(rinfo->pc() + 10) == 0xCC);
-  return (*(rinfo->pc() + 10) != 0xCC);
+  return rinfo->IsPatchedReturnSequence();
 }
 
 #define __ ACCESS_MASM(masm)
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index d8d6dbb..19bcf66 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -860,12 +860,22 @@
       return count + 1;
     }
   } else if (b1 == 0xDD) {
-    if ((b2 & 0xF8) == 0xC0) {
-      AppendToBuffer("ffree st%d", b2 & 0x7);
+    int mod, regop, rm;
+    get_modrm(*(data + 1), &mod, &regop, &rm);
+    if (mod == 3) {
+      switch (regop) {
+        case 0:
+          AppendToBuffer("ffree st%d", rm & 7);
+          break;
+        case 2:
+          AppendToBuffer("fstp st%d", rm & 7);
+          break;
+        default:
+          UnimplementedInstruction();
+          break;
+      }
       return 2;
     } else {
-      int mod, regop, rm;
-      get_modrm(*(data + 1), &mod, &regop, &rm);
       const char* mnem = "?";
       switch (regop) {
         case 0:
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
new file mode 100644
index 0000000..46d8dc4
--- /dev/null
+++ b/src/x64/fast-codegen-x64.cc
@@ -0,0 +1,559 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+#include "fast-codegen.h"
+#include "parser.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function.  On entry to the function the receiver
+// and arguments have been pushed on the stack left to right, with the
+// return address on top of them.  The actual argument count matches the
+// formal parameter count expected by the function.
+//
+// The live registers are:
+//   o rdi: the JS function object being called (ie, ourselves)
+//   o rsi: our context
+//   o rbp: our caller's frame pointer
+//   o rsp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-x64.h for its layout.
+void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+  function_ = fun;
+  SetFunctionPosition(fun);
+
+  __ push(rbp);  // Caller's frame pointer.
+  __ movq(rbp, rsp);
+  __ push(rsi);  // Callee's context.
+  __ push(rdi);  // Callee's JS Function.
+
+  { Comment cmnt(masm_, "[ Allocate locals");
+    int locals_count = fun->scope()->num_stack_slots();
+    for (int i = 0; i < locals_count; i++) {
+      __ PushRoot(Heap::kUndefinedValueRootIndex);
+    }
+  }
+
+  { Comment cmnt(masm_, "[ Stack check");
+    Label ok;
+    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+    __ j(above_equal, &ok);
+    StackCheckStub stub;
+    __ CallStub(&stub);
+    __ bind(&ok);
+  }
+
+  { Comment cmnt(masm_, "[ Declarations");
+    VisitDeclarations(fun->scope()->declarations());
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
+  { Comment cmnt(masm_, "[ Body");
+    VisitStatements(fun->body());
+  }
+
+  { Comment cmnt(masm_, "[ return <undefined>;");
+    // Emit a 'return undefined' in case control fell off the end of the
+    // body.
+    __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+    SetReturnPosition(fun);
+    if (FLAG_trace) {
+      __ push(rax);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
+    __ RecordJSReturn();
+
+    // Do not use the leave instruction here because it is too short to
+    // patch with the code required by the debugger.
+    __ movq(rsp, rbp);
+    __ pop(rbp);
+    __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    // Add padding that will be overwritten by a debugger breakpoint.  We
+    // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
+    // (3 + 1 + 3).
+    const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+    for (int i = 0; i < kPadding; ++i) {
+      masm_->int3();
+    }
+#endif
+  }
+}
+
+
+void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.
+  __ push(rsi);  // The context is the first argument.
+  __ Push(pairs);
+  __ Push(Smi::FromInt(is_eval_ ? 1 : 0));
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
+}
+
+
+void FastCodeGenerator::VisitBlock(Block* stmt) {
+  Comment cmnt(masm_, "[ Block");
+  SetStatementPosition(stmt);
+  VisitStatements(stmt->statements());
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  SetStatementPosition(stmt);
+  Visit(stmt->expression());
+}
+
+
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+  Comment cmnt(masm_, "[ ReturnStatement");
+  SetStatementPosition(stmt);
+  Expression* expr = stmt->expression();
+  Visit(expr);
+
+  // Complete the statement based on the location of the subexpression.
+  Location source = expr->location();
+  ASSERT(!source.is_nowhere());
+  if (source.is_temporary()) {
+    __ pop(rax);
+  } else {
+    ASSERT(source.is_constant());
+    ASSERT(expr->AsLiteral() != NULL);
+    __ Move(rax, expr->AsLiteral()->handle());
+  }
+  if (FLAG_trace) {
+    __ push(rax);
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+
+  __ RecordJSReturn();
+  // Do not use the leave instruction here because it is too short to
+  // patch with the code required by the debugger.
+  __ movq(rsp, rbp);
+  __ pop(rbp);
+  __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Add padding that will be overwritten by a debugger breakpoint.  We
+  // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
+  // (3 + 1 + 3).
+  const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+  for (int i = 0; i < kPadding; ++i) {
+    masm_->int3();
+  }
+#endif
+}
+
+
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Comment cmnt(masm_, "[ FunctionLiteral");
+
+  // Build the function boilerplate and instantiate it.
+  Handle<JSFunction> boilerplate = BuildBoilerplate(expr);
+  if (HasStackOverflow()) return;
+
+  ASSERT(boilerplate->IsBoilerplate());
+
+  // Create a new closure.
+  __ push(rsi);
+  __ Push(boilerplate);
+  __ CallRuntime(Runtime::kNewClosure, 2);
+
+  if (expr->location().is_temporary()) {
+    __ push(rax);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  Expression* rewrite = expr->var()->rewrite();
+  if (rewrite == NULL) {
+    Comment cmnt(masm_, "Global variable");
+    // Use inline caching. Variable name is passed in rcx and the global
+    // object on the stack.
+    __ push(CodeGenerator::GlobalObject());
+    __ Move(rcx, expr->name());
+    Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+
+    // A test rax instruction following the call is used by the IC to
+    // indicate that the inobject property case was inlined.  Ensure there
+    // is no test rax instruction here.
+    if (expr->location().is_temporary()) {
+      // Replace the global object with the result.
+      __ movq(Operand(rsp, 0), rax);
+    } else {
+      ASSERT(expr->location().is_nowhere());
+      __ addq(rsp, Immediate(kPointerSize));
+    }
+
+  } else {
+    Comment cmnt(masm_, "Stack slot");
+    Slot* slot = rewrite->AsSlot();
+    ASSERT(slot != NULL);
+    if (expr->location().is_temporary()) {
+      __ push(Operand(rbp, SlotOffset(slot)));
+    } else {
+      ASSERT(expr->location().is_nowhere());
+    }
+  }
+}
+
+
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Comment cmnt(masm_, "[ RegExp Literal");
+  Label done;
+  // Registers will be used as follows:
+  // rdi = JS function.
+  // rbx = literals array.
+  // rax = regexp literal.
+  __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ movq(rbx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+  int literal_offset =
+    FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ movq(rax, FieldOperand(rbx, literal_offset));
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(not_equal, &done);
+  // Create regexp literal using runtime function
+  // Result will be in rax.
+  __ push(rbx);
+  __ Push(Smi::FromInt(expr->literal_index()));
+  __ Push(expr->pattern());
+  __ Push(expr->flags());
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  // Label done:
+  __ bind(&done);
+  if (expr->location().is_temporary()) {
+    __ push(rax);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+  Label make_clone;
+
+  // Fetch the function's literals array.
+  __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ movq(rbx, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+  // Check if the literal's boilerplate has been instantiated.
+  int offset =
+      FixedArray::kHeaderSize + (expr->literal_index() * kPointerSize);
+  __ movq(rax, FieldOperand(rbx, offset));
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(not_equal, &make_clone);
+
+  // Instantiate the boilerplate.
+  __ push(rbx);
+  __ Push(Smi::FromInt(expr->literal_index()));
+  __ Push(expr->literals());
+  __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+
+  __ bind(&make_clone);
+  // Clone the boilerplate.
+  __ push(rax);
+  if (expr->depth() > 1) {
+    __ CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+  } else {
+    __ CallRuntime(Runtime::kCloneShallowLiteralBoilerplate, 1);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  ZoneList<Expression*>* subexprs = expr->values();
+  for (int i = 0, len = subexprs->length(); i < len; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (subexpr->AsLiteral() != NULL ||
+        CompileTimeValue::IsCompileTimeValue(subexpr)) {
+      continue;
+    }
+
+    if (!result_saved) {
+      __ push(rax);
+      result_saved = true;
+    }
+    Visit(subexpr);
+    ASSERT(subexpr->location().is_temporary());
+
+    // Store the subexpression value in the array's elements.
+    __ pop(rax);  // Subexpression value.
+    __ movq(rbx, Operand(rsp, 0));  // Copy of array literal.
+    __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
+    int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+    __ movq(FieldOperand(rbx, offset), rax);
+
+    // Update the write barrier for the array store.
+    __ RecordWrite(rbx, offset, rax, rcx);
+  }
+
+  Location destination = expr->location();
+  if (destination.is_nowhere() && result_saved) {
+    __ addq(rsp, Immediate(kPointerSize));
+  } else if (destination.is_temporary() && !result_saved) {
+    __ push(rax);
+  }
+}
+
+
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+  Comment cmnt(masm_, "[ Assignment");
+  ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
+  Expression* rhs = expr->value();
+  Visit(rhs);
+
+  // Left-hand side can only be a global or a (parameter or local) slot.
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL);
+  ASSERT(var->is_global() || var->slot() != NULL);
+
+  // Complete the assignment based on the location of the right-hand-side
+  // value and the desired location of the assignment value.
+  Location destination = expr->location();
+  Location source = rhs->location();
+  ASSERT(!destination.is_constant());
+  ASSERT(!source.is_nowhere());
+
+  if (var->is_global()) {
+    // Assignment to a global variable, use inline caching.  Right-hand-side
+    // value is passed in rax, variable name in rcx, and the global object
+    // on the stack.
+    if (source.is_temporary()) {
+      __ pop(rax);
+    } else {
+      ASSERT(source.is_constant());
+      ASSERT(rhs->AsLiteral() != NULL);
+      __ Move(rax, rhs->AsLiteral()->handle());
+    }
+    __ Move(rcx, var->name());
+    __ push(CodeGenerator::GlobalObject());
+    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // Overwrite the global object on the stack with the result if needed.
+    if (destination.is_temporary()) {
+      __ movq(Operand(rsp, 0), rax);
+    } else {
+      __ addq(rsp, Immediate(kPointerSize));
+    }
+  } else {
+    if (source.is_temporary()) {
+      if (destination.is_temporary()) {
+        // Case 'temp1 <- (var = temp0)'.  Preserve right-hand-side temporary
+        // on the stack.
+        __ movq(kScratchRegister, Operand(rsp, 0));
+        __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
+      } else {
+        ASSERT(destination.is_nowhere());
+        // Case 'var = temp'.  Discard right-hand-side temporary.
+        __ pop(Operand(rbp, SlotOffset(var->slot())));
+      }
+    } else {
+      ASSERT(source.is_constant());
+      ASSERT(rhs->AsLiteral() != NULL);
+      // Two cases: 'temp <- (var = constant)', or 'var = constant' with a
+      // discarded result.  Always perform the assignment.
+      __ Move(kScratchRegister, rhs->AsLiteral()->handle());
+      __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
+      if (destination.is_temporary()) {
+        // Case 'temp <- (var = constant)'.  Save result.
+        __ push(kScratchRegister);
+      }
+    }
+  }
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+  Expression* fun = expr->expression();
+  ZoneList<Expression*>* args = expr->arguments();
+  Variable* var = fun->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL && !var->is_this() && var->is_global());
+  ASSERT(!var->is_possibly_eval());
+
+  __ Push(var->name());
+  // Push global object (receiver).
+  __ push(CodeGenerator::GlobalObject());
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(!args->at(i)->location().is_nowhere());
+    if (args->at(i)->location().is_constant()) {
+      ASSERT(args->at(i)->AsLiteral() != NULL);
+      __ Push(args->at(i)->AsLiteral()->handle());
+    }
+  }
+  // Record source position for debugger
+  SetSourcePosition(expr->position());
+  // Call the IC initialization code.
+  Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count,
+                                                         NOT_IN_LOOP);
+  __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+  // Restore context register.
+  __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  // Discard the function left on TOS.
+  if (expr->location().is_temporary()) {
+    __ movq(Operand(rsp, 0), rax);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+    __ addq(rsp, Immediate(kPointerSize));
+  }
+}
+
+
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+  Runtime::Function* function = expr->function();
+
+  ASSERT(function != NULL);
+
+  // Push the arguments ("left-to-right").
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    Visit(args->at(i));
+    ASSERT(!args->at(i)->location().is_nowhere());
+    if (args->at(i)->location().is_constant()) {
+      ASSERT(args->at(i)->AsLiteral() != NULL);
+      __ Push(args->at(i)->AsLiteral()->handle());
+    } else {
+      ASSERT(args->at(i)->location().is_temporary());
+      // If location is temporary, it is already on the stack,
+      // so nothing to do here.
+    }
+  }
+
+  __ CallRuntime(function, arg_count);
+  if (expr->location().is_temporary()) {
+    __ push(rax);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+  // Compile a short-circuited boolean or operation in a non-test
+  // context.
+  ASSERT(expr->op() == Token::OR);
+  // Compile (e0 || e1) as if it were
+  // (let (temp = e0) temp ? temp : e1).
+
+  Label eval_right, done;
+  Location destination = expr->location();
+  ASSERT(!destination.is_constant());
+
+  Expression* left = expr->left();
+  Location left_source = left->location();
+  ASSERT(!left_source.is_nowhere());
+
+  Expression* right = expr->right();
+  Location right_source = right->location();
+  ASSERT(!right_source.is_nowhere());
+
+  Visit(left);
+  // Use the shared ToBoolean stub to find the boolean value of the
+  // left-hand subexpression.  Load the value into rax to perform some
+  // inlined checks assumed by the stub.
+  if (left_source.is_temporary()) {
+    if (destination.is_temporary()) {
+      // Copy the left-hand value into rax because we may need it as the
+      // final result.
+      __ movq(rax, Operand(rsp, 0));
+    } else {
+      // Pop the left-hand value into rax because we will not need it as the
+      // final result.
+      __ pop(rax);
+    }
+  } else {
+    // Load the left-hand value into rax.  Put it on the stack if we may
+    // need it.
+    ASSERT(left->AsLiteral() != NULL);
+    __ Move(rax, left->AsLiteral()->handle());
+    if (destination.is_temporary()) __ push(rax);
+  }
+  // The left-hand value is in rax.  It is also on the stack iff the
+  // destination location is temporary.
+
+  // Perform fast checks assumed by the stub.
+  // The undefined value is false.
+  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &eval_right);
+  __ CompareRoot(rax, Heap::kTrueValueRootIndex);  // True is true.
+  __ j(equal, &done);
+  __ CompareRoot(rax, Heap::kFalseValueRootIndex);  // False is false.
+  __ j(equal, &eval_right);
+  ASSERT(kSmiTag == 0);
+  __ SmiCompare(rax, Smi::FromInt(0));  // The smi zero is false.
+  __ j(equal, &eval_right);
+  Condition is_smi = masm_->CheckSmi(rax);  // All other smis are true.
+  __ j(is_smi, &done);
+
+  // Call the stub for all other cases.
+  __ push(rax);
+  ToBooleanStub stub;
+  __ CallStub(&stub);
+  __ testq(rax, rax);  // The stub returns nonzero for true.
+  __ j(not_zero, &done);
+
+  __ bind(&eval_right);
+  // Discard the left-hand value if present on the stack.
+  if (destination.is_temporary()) {
+    __ addq(rsp, Immediate(kPointerSize));
+  }
+  Visit(right);
+
+  // Save or discard the right-hand value as needed.
+  if (destination.is_temporary() && right_source.is_constant()) {
+    ASSERT(right->AsLiteral() != NULL);
+    __ Push(right->AsLiteral()->handle());
+  } else if (destination.is_nowhere() && right_source.is_temporary()) {
+    __ addq(rsp, Immediate(kPointerSize));
+  }
+
+  __ bind(&done);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 5442be9..eefaa0a 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -31,9 +31,6 @@
 namespace v8 {
 namespace internal {
 
-// TODO(x64): This is a stub, mostly just a copy of the ia32 bit version.
-// This might all need to change to be correct for x64.
-
 static const int kNumRegs = 8;
 static const RegList kJSCallerSaved =
     1 << 0 |  // rax
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 8209091..2812df1 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -131,8 +131,8 @@
   // Check that the value is a normal property.
   __ bind(&done);
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
-           Immediate(Smi::FromInt(PropertyDetails::TypeField::mask())));
+  __ Test(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+          Smi::FromInt(PropertyDetails::TypeField::mask()));
   __ j(not_zero, miss_label);
 
   // Get the value at the masked, scaled index.
@@ -320,7 +320,7 @@
   // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
-  KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
   __ bind(&check_string);
   // The key is not a smi.
   // Is it a string?
@@ -336,13 +336,13 @@
   __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
            Immediate(kIsSymbolMask));
   __ j(zero, &slow);
-  // Probe the dictionary leaving result in ecx.
+  // Probe the dictionary leaving result in rcx.
   GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
   GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
   __ movq(rax, rcx);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
-  // Array index string: If short enough use cache in length/hash field (ebx).
+  // Array index string: If short enough use cache in length/hash field (rbx).
   // We assert that there are enough bits in an int32_t after the hash shift
   // bits have been subtracted to allow space for the length and the cached
   // array index.
@@ -360,6 +360,146 @@
 }
 
 
+void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
+                                        ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : name
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label slow, failed_allocation;
+
+  // Load name and receiver.
+  __ movq(rax, Operand(rsp, kPointerSize));
+  __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(rcx, &slow);
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rax, &slow);
+
+  // Check that the object is a JS object.
+  __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
+  __ j(not_equal, &slow);
+  // Check that the receiver does not require access checks.  We need
+  // to check this explicitly since this generic stub does not perform
+  // map checks.  The map is already in rdx.
+  __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // rax: index (as a smi)
+  // rcx: JSObject
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::RootIndexForExternalArrayType(array_type));
+  __ j(not_equal, &slow);
+
+  // Check that the index is in range.
+  __ SmiToInteger32(rax, rax);
+  __ cmpl(rax, FieldOperand(rcx, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ j(above_equal, &slow);
+
+  // rax: untagged index
+  // rcx: elements array
+  __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
+  // rcx: base pointer of external storage
+  switch (array_type) {
+    case kExternalByteArray:
+      __ movsxbq(rax, Operand(rcx, rax, times_1, 0));
+      break;
+    case kExternalUnsignedByteArray:
+      __ movb(rax, Operand(rcx, rax, times_1, 0));
+      break;
+    case kExternalShortArray:
+      __ movsxwq(rax, Operand(rcx, rax, times_2, 0));
+      break;
+    case kExternalUnsignedShortArray:
+      __ movzxwq(rax, Operand(rcx, rax, times_2, 0));
+      break;
+    case kExternalIntArray:
+      __ movsxlq(rax, Operand(rcx, rax, times_4, 0));
+      break;
+    case kExternalUnsignedIntArray:
+      __ movl(rax, Operand(rcx, rax, times_4, 0));
+      break;
+    case kExternalFloatArray:
+      __ fld_s(Operand(rcx, rax, times_4, 0));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // For integer array types:
+  // rax: value
+  // For floating-point array type:
+  // FP(0): value
+
+  if (array_type == kExternalIntArray ||
+      array_type == kExternalUnsignedIntArray) {
+    // For the Int and UnsignedInt array types, we need to see whether
+    // the value can be represented in a Smi. If not, we need to convert
+    // it to a HeapNumber.
+    Label box_int;
+    if (array_type == kExternalIntArray) {
+      __ JumpIfNotValidSmiValue(rax, &box_int);
+    } else {
+      ASSERT_EQ(array_type, kExternalUnsignedIntArray);
+      __ JumpIfUIntNotValidSmiValue(rax, &box_int);
+    }
+
+    __ Integer32ToSmi(rax, rax);
+    __ ret(0);
+
+    __ bind(&box_int);
+
+    // Allocate a HeapNumber for the int and perform int-to-double
+    // conversion.
+    __ push(rax);
+    if (array_type == kExternalIntArray) {
+      __ fild_s(Operand(rsp, 0));
+    } else {
+      ASSERT(array_type == kExternalUnsignedIntArray);
+      // Need to zero-extend the value.
+      __ fild_d(Operand(rsp, 0));
+    }
+    __ pop(rax);
+    // FP(0): value
+    __ AllocateHeapNumber(rax, rbx, &failed_allocation);
+    // Set the value.
+    __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+    __ ret(0);
+  } else if (array_type == kExternalFloatArray) {
+    // For the floating-point array type, we need to always allocate a
+    // HeapNumber.
+    __ AllocateHeapNumber(rax, rbx, &failed_allocation);
+    // Set the value.
+    __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
+    __ ret(0);
+  } else {
+    __ Integer32ToSmi(rax, rax);
+    __ ret(0);
+  }
+
+  // If we fail allocation of the HeapNumber, we still have a value on
+  // top of the FPU stack. Remove it.
+  __ bind(&failed_allocation);
+  __ ffree();
+  __ fincstp();
+  // Fall through to slow case.
+
+  // Slow case: Load name and receiver from stack and jump to runtime.
+  __ bind(&slow);
+  __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1);
+  Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+}
+
+
 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- rsp[0] : return address
@@ -434,9 +574,6 @@
   __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // 1 ~ return address
   // Check that the key is a smi.
   __ JumpIfNotSmi(rbx, &slow);
-  // If it is a smi, make sure it is zero-extended, so it can be
-  // used as an index in a memory operand.
-  __ movl(rbx, rbx);  // Clear the high bits of rbx.
 
   __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
   __ j(equal, &array);
@@ -447,7 +584,7 @@
   // Object case: Check key against length in the elements array.
   // rax: value
   // rdx: JSObject
-  // rbx: index (as a smi), zero-extended.
+  // rbx: index (as a smi)
   __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
   __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
@@ -461,15 +598,9 @@
   // rbx: index (as a smi)
   __ j(below, &fast);
 
-  // Slow case: Push extra copies of the arguments (3).
+  // Slow case: call runtime.
   __ bind(&slow);
-  __ pop(rcx);
-  __ push(Operand(rsp, 1 * kPointerSize));
-  __ push(Operand(rsp, 1 * kPointerSize));
-  __ push(rax);
-  __ push(rcx);
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3, 1);
+  Generate(masm, ExternalReference(Runtime::kSetProperty));
 
   // Check whether the elements is a pixel array.
   // rax: value
@@ -488,14 +619,11 @@
   __ movq(rdx, rax);  // Save the value.
   __ SmiToInteger32(rax, rax);
   {  // Clamp the value to [0..255].
-    Label done, is_negative;
+    Label done;
     __ testl(rax, Immediate(0xFFFFFF00));
     __ j(zero, &done);
-    __ j(negative, &is_negative);
-    __ movl(rax, Immediate(255));
-    __ jmp(&done);
-    __ bind(&is_negative);
-    __ xorl(rax, rax);  // Clear rax.
+    __ setcc(negative, rax);  // 1 if negative, 0 if positive.
+    __ decb(rax);  // 0 if negative, 255 if positive.
     __ bind(&done);
   }
   __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
@@ -511,15 +639,15 @@
   // rdx: JSArray
   // rcx: FixedArray
   // rbx: index (as a smi)
-  // flags: compare (rbx, rdx.length())
+  // flags: smicompare (rdx.length(), rbx)
   __ j(not_equal, &slow);  // do not leave holes in the array
   __ SmiToInteger64(rbx, rbx);
   __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
   __ j(above_equal, &slow);
   // Increment and restore smi-tag.
-  __ Integer64AddToSmi(rbx, rbx, 1);
+  __ Integer64PlusConstantToSmi(rbx, rbx, 1);
   __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
-  __ SmiSubConstant(rbx, rbx, 1, NULL);
+  __ SmiSubConstant(rbx, rbx, Smi::FromInt(1));
   __ jmp(&fast);
 
   // Array case: Get the length and the elements array from the JS
@@ -530,26 +658,211 @@
   // rdx: JSArray
   // rbx: index (as a smi)
   __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
   __ j(not_equal, &slow);
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
-  __ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
-  __ j(above_equal, &extra);
+  __ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
+  __ j(below_equal, &extra);
 
   // Fast case: Do the store.
   __ bind(&fast);
   // rax: value
   // rcx: FixedArray
   // rbx: index (as a smi)
-  __ movq(Operand(rcx, rbx, times_half_pointer_size,
+  Label non_smi_value;
+  __ JumpIfNotSmi(rax, &non_smi_value);
+  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+  __ movq(Operand(rcx, index.reg, index.scale,
                   FixedArray::kHeaderSize - kHeapObjectTag),
-         rax);
-  // Update write barrier for the elements array address.
-  __ movq(rdx, rax);
-  __ RecordWrite(rcx, 0, rdx, rbx);
+          rax);
   __ ret(0);
+  __ bind(&non_smi_value);
+  // Slow case that needs to retain rbx for use by RecordWrite.
+  // Update write barrier for the elements array address.
+  SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rbx, kPointerSizeLog2);
+  __ movq(Operand(rcx, index2.reg, index2.scale,
+                  FixedArray::kHeaderSize - kHeapObjectTag),
+          rax);
+  __ movq(rdx, rax);
+  __ RecordWriteNonSmi(rcx, 0, rdx, rbx);
+  __ ret(0);
+}
+
+
+void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm,
+                                         ExternalArrayType array_type) {
+  // ----------- S t a t e -------------
+  //  -- rax    : value
+  //  -- rsp[0] : return address
+  //  -- rsp[8] : key
+  //  -- rsp[16] : receiver
+  // -----------------------------------
+  Label slow, check_heap_number;
+
+  // Get the receiver from the stack.
+  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(rdx, &slow);
+  // Get the map from the receiver.
+  __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks.  We need
+  // to do this because this generic stub does not perform map checks.
+  __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsAccessCheckNeeded));
+  __ j(not_zero, &slow);
+  // Get the key from the stack.
+  __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // 1 ~ return address
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(rbx, &slow);
+
+  // Check that the object is a JS object.
+  __ CmpInstanceType(rcx, JS_OBJECT_TYPE);
+  __ j(not_equal, &slow);
+
+  // Check that the elements array is the appropriate type of
+  // ExternalArray.
+  // rax: value
+  // rdx: JSObject
+  // rbx: index (as a smi)
+  __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::RootIndexForExternalArrayType(array_type));
+  __ j(not_equal, &slow);
+
+  // Check that the index is in range.
+  __ SmiToInteger32(rbx, rbx);  // Untag the index.
+  __ cmpl(rbx, FieldOperand(rcx, ExternalArray::kLengthOffset));
+  // Unsigned comparison catches both negative and too-large values.
+  __ j(above_equal, &slow);
+
+  // Handle both smis and HeapNumbers in the fast path. Go to the
+  // runtime for all other kinds of values.
+  // rax: value
+  // rcx: elements array
+  // rbx: untagged index
+  __ JumpIfNotSmi(rax, &check_heap_number);
+  __ movq(rdx, rax);  // Save the value.
+  __ SmiToInteger32(rax, rax);
+  __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
+  // rcx: base pointer of external storage
+  switch (array_type) {
+    case kExternalByteArray:
+    case kExternalUnsignedByteArray:
+      __ movb(Operand(rcx, rbx, times_1, 0), rax);
+      break;
+    case kExternalShortArray:
+    case kExternalUnsignedShortArray:
+      __ movw(Operand(rcx, rbx, times_2, 0), rax);
+      break;
+    case kExternalIntArray:
+    case kExternalUnsignedIntArray:
+      __ movl(Operand(rcx, rbx, times_4, 0), rax);
+      break;
+    case kExternalFloatArray:
+      // Need to perform int-to-float conversion.
+      __ push(rax);
+      __ fild_s(Operand(rsp, 0));
+      __ pop(rax);
+      __ fstp_s(Operand(rcx, rbx, times_4, 0));
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  __ movq(rax, rdx);  // Return the original value.
+  __ ret(0);
+
+  __ bind(&check_heap_number);
+  __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rdx);
+  __ j(not_equal, &slow);
+
+  // The WebGL specification leaves the behavior of storing NaN and
+  // +/-Infinity into integer arrays basically undefined. For more
+  // reproducible behavior, convert these to zero.
+  __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
+  __ movq(rdx, rax);  // Save the value.
+  __ movq(rcx, FieldOperand(rcx, ExternalArray::kExternalPointerOffset));
+  // rbx: untagged index
+  // rcx: base pointer of external storage
+  // top of FPU stack: value
+  if (array_type == kExternalFloatArray) {
+    __ fstp_s(Operand(rcx, rbx, times_4, 0));
+  } else {
+    // Need to perform float-to-int conversion.
+    // Test the top of the FP stack for NaN.
+    Label is_nan;
+    __ fucomi(0);
+    __ j(parity_even, &is_nan);
+
+    __ push(rax);  // Make room on stack
+    __ fistp_d(Operand(rsp, 0));
+    __ pop(rax);
+    // rax: untagged integer value
+    switch (array_type) {
+      case kExternalByteArray:
+      case kExternalUnsignedByteArray:
+        __ movb(Operand(rcx, rbx, times_1, 0), rax);
+        break;
+      case kExternalShortArray:
+      case kExternalUnsignedShortArray:
+        __ movw(Operand(rcx, rbx, times_2, 0), rax);
+        break;
+      case kExternalIntArray:
+      case kExternalUnsignedIntArray: {
+        // We also need to explicitly check for +/-Infinity. These are
+        // converted to MIN_INT, but we need to be careful not to
+        // confuse with legal uses of MIN_INT.
+        Label not_infinity;
+        // This test would apparently detect both NaN and Infinity,
+        // but we've already checked for NaN using the FPU hardware
+        // above.
+        __ movzxwq(rdi, FieldOperand(rdx, HeapNumber::kValueOffset + 6));
+        __ and_(rdi, Immediate(0x7FF0));
+        __ cmpw(rdi, Immediate(0x7FF0));
+        __ j(not_equal, &not_infinity);
+        __ movq(rax, Immediate(0));
+        __ bind(&not_infinity);
+        __ movl(Operand(rcx, rbx, times_4, 0), rax);
+        break;
+      }
+      default:
+        UNREACHABLE();
+        break;
+    }
+    __ movq(rax, rdx);  // Return the original value.
+    __ ret(0);
+
+    __ bind(&is_nan);
+    __ ffree();
+    __ fincstp();
+    __ movq(rax, Immediate(0));
+    switch (array_type) {
+      case kExternalByteArray:
+      case kExternalUnsignedByteArray:
+        __ movb(Operand(rcx, rbx, times_1, 0), rax);
+        break;
+      case kExternalShortArray:
+      case kExternalUnsignedShortArray:
+        __ movw(Operand(rcx, rbx, times_2, 0), rax);
+        break;
+      case kExternalIntArray:
+      case kExternalUnsignedIntArray:
+        __ movl(Operand(rcx, rbx, times_4, 0), rax);
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    __ movq(rax, rdx);  // Return the original value.
+    __ ret(0);
+  }
+
+  // Slow case: call runtime.
+  __ bind(&slow);
+  Generate(masm, ExternalReference(Runtime::kSetProperty));
 }
 
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 38ada92..b2f69bb 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -38,16 +38,15 @@
 namespace internal {
 
 MacroAssembler::MacroAssembler(void* buffer, int size)
-  : Assembler(buffer, size),
-    unresolved_(0),
-    generating_stub_(false),
-    allow_stub_calls_(true),
-    code_object_(Heap::undefined_value()) {
+    : Assembler(buffer, size),
+      unresolved_(0),
+      generating_stub_(false),
+      allow_stub_calls_(true),
+      code_object_(Heap::undefined_value()) {
 }
 
 
-void MacroAssembler::LoadRoot(Register destination,
-                              Heap::RootListIndex index) {
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
   movq(destination, Operand(r13, index << kPointerSizeLog2));
 }
 
@@ -57,14 +56,12 @@
 }
 
 
-void MacroAssembler::CompareRoot(Register with,
-                                 Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
   cmpq(with, Operand(r13, index << kPointerSizeLog2));
 }
 
 
-void MacroAssembler::CompareRoot(Operand with,
-                                 Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
   LoadRoot(kScratchRegister, index);
   cmpq(with, kScratchRegister);
 }
@@ -144,9 +141,9 @@
 
   // Minor key encoding in 12 bits of three registers (object, address and
   // scratch) OOOOAAAASSSS.
-  class ScratchBits: public BitField<uint32_t, 0, 4> {};
-  class AddressBits: public BitField<uint32_t, 4, 4> {};
-  class ObjectBits: public BitField<uint32_t, 8, 4> {};
+  class ScratchBits : public BitField<uint32_t, 0, 4> {};
+  class AddressBits : public BitField<uint32_t, 4, 4> {};
+  class ObjectBits : public BitField<uint32_t, 8, 4> {};
 
   Major MajorKey() { return RecordWrite; }
 
@@ -167,33 +164,45 @@
 
 // Set the remembered set bit for [object+offset].
 // object is the object being stored into, value is the object being stored.
-// If offset is zero, then the scratch register contains the array index into
-// the elements array represented as a Smi.
+// If offset is zero, then the smi_index register contains the array index into
+// the elements array represented as a smi. Otherwise it can be used as a
+// scratch register.
 // All registers are clobbered by the operation.
 void MacroAssembler::RecordWrite(Register object,
                                  int offset,
                                  Register value,
-                                 Register scratch) {
+                                 Register smi_index) {
   // First, check if a remembered set write is even needed. The tests below
   // catch stores of Smis and stores into young gen (which does not have space
   // for the remembered set bits.
   Label done;
+  JumpIfSmi(value, &done);
 
+  RecordWriteNonSmi(object, offset, value, smi_index);
+  bind(&done);
+}
+
+
+void MacroAssembler::RecordWriteNonSmi(Register object,
+                                       int offset,
+                                       Register scratch,
+                                       Register smi_index) {
+  Label done;
   // Test that the object address is not in the new space.  We cannot
   // set remembered set bits in the new space.
-  movq(value, object);
+  movq(scratch, object);
   ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
-  and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+  and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
   movq(kScratchRegister, ExternalReference::new_space_start());
-  cmpq(value, kScratchRegister);
+  cmpq(scratch, kScratchRegister);
   j(equal, &done);
 
   if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
     // Compute the bit offset in the remembered set, leave it in 'value'.
-    lea(value, Operand(object, offset));
+    lea(scratch, Operand(object, offset));
     ASSERT(is_int32(Page::kPageAlignmentMask));
-    and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
-    shr(value, Immediate(kObjectAlignmentBits));
+    and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
+    shr(scratch, Immediate(kObjectAlignmentBits));
 
     // Compute the page address from the heap object pointer, leave it in
     // 'object' (immediate value is sign extended).
@@ -203,24 +212,26 @@
     // to limit code size. We should probably evaluate this decision by
     // measuring the performance of an equivalent implementation using
     // "simpler" instructions
-    bts(Operand(object, Page::kRSetOffset), value);
+    bts(Operand(object, Page::kRSetOffset), scratch);
   } else {
-    Register dst = scratch;
+    Register dst = smi_index;
     if (offset != 0) {
       lea(dst, Operand(object, offset));
     } else {
       // array access: calculate the destination address in the same manner as
-      // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 4 to get an offset
-      // into an array of pointers.
-      lea(dst, Operand(object, dst, times_half_pointer_size,
+      // KeyedStoreIC::GenerateGeneric.
+      SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
+      lea(dst, Operand(object,
+                       index.reg,
+                       index.scale,
                        FixedArray::kHeaderSize - kHeapObjectTag));
     }
     // If we are already generating a shared stub, not inlining the
     // record write code isn't going to save us any memory.
     if (generating_stub()) {
-      RecordWriteHelper(this, object, dst, value);
+      RecordWriteHelper(this, object, dst, scratch);
     } else {
-      RecordWriteStub stub(object, dst, value);
+      RecordWriteStub stub(object, dst, scratch);
       CallStub(&stub);
     }
   }
@@ -348,8 +359,7 @@
   // Set the entry point and jump to the C entry runtime stub.
   movq(rbx, ext);
   CEntryStub ces(result_size);
-  movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
-  jmp(kScratchRegister);
+  jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
 }
 
 
@@ -364,7 +374,6 @@
   if (!resolved) {
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(true);
     Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
     unresolved_.Add(entry);
@@ -372,7 +381,6 @@
   addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
 }
 
-
 Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
                                             bool* resolved) {
   // Move the builtin function into the temporary function slot by
@@ -386,7 +394,6 @@
       JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
   movq(rdi, FieldOperand(rdx, builtins_offset));
 
-
   return Builtins::GetCode(id, resolved);
 }
 
@@ -418,68 +425,96 @@
   }
 }
 
-
 // ----------------------------------------------------------------------------
 // Smi tagging, untagging and tag detection.
 
+static int kSmiShift = kSmiTagSize + kSmiShiftSize;
 
 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
-  ASSERT_EQ(1, kSmiTagSize);
   ASSERT_EQ(0, kSmiTag);
-#ifdef DEBUG
-    cmpq(src, Immediate(0xC0000000u));
-    Check(positive, "Smi conversion overflow");
-#endif
-  if (dst.is(src)) {
-    addl(dst, src);
-  } else {
-    lea(dst, Operand(src, src, times_1, 0));
+  if (!dst.is(src)) {
+    movl(dst, src);
   }
+  shl(dst, Immediate(kSmiShift));
 }
 
 
 void MacroAssembler::Integer32ToSmi(Register dst,
                                     Register src,
                                     Label* on_overflow) {
-  ASSERT_EQ(1, kSmiTagSize);
   ASSERT_EQ(0, kSmiTag);
+  // 32-bit integer always fits in a long smi.
   if (!dst.is(src)) {
     movl(dst, src);
   }
-  addl(dst, src);
-  j(overflow, on_overflow);
+  shl(dst, Immediate(kSmiShift));
 }
 
 
-void MacroAssembler::Integer64AddToSmi(Register dst,
-                                       Register src,
-                                       int constant) {
-#ifdef DEBUG
-  movl(kScratchRegister, src);
-  addl(kScratchRegister, Immediate(constant));
-  Check(no_overflow, "Add-and-smi-convert overflow");
-  Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
-  Check(valid, "Add-and-smi-convert overflow");
-#endif
-  lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
+void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
+                                                Register src,
+                                                int constant) {
+  if (dst.is(src)) {
+    addq(dst, Immediate(constant));
+  } else {
+    lea(dst, Operand(src, constant));
+  }
+  shl(dst, Immediate(kSmiShift));
 }
 
 
 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
-  ASSERT_EQ(1, kSmiTagSize);
   ASSERT_EQ(0, kSmiTag);
   if (!dst.is(src)) {
-    movl(dst, src);
+    movq(dst, src);
   }
-  sarl(dst, Immediate(kSmiTagSize));
+  shr(dst, Immediate(kSmiShift));
 }
 
 
 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
-  ASSERT_EQ(1, kSmiTagSize);
   ASSERT_EQ(0, kSmiTag);
-  movsxlq(dst, src);
-  sar(dst, Immediate(kSmiTagSize));
+  if (!dst.is(src)) {
+    movq(dst, src);
+  }
+  sar(dst, Immediate(kSmiShift));
+}
+
+
+void MacroAssembler::SmiTest(Register src) {
+  testq(src, src);
+}
+
+
+void MacroAssembler::SmiCompare(Register dst, Register src) {
+  cmpq(dst, src);
+}
+
+
+void MacroAssembler::SmiCompare(Register dst, Smi* src) {
+  ASSERT(!dst.is(kScratchRegister));
+  if (src->value() == 0) {
+    testq(dst, dst);
+  } else {
+    Move(kScratchRegister, src);
+    cmpq(dst, kScratchRegister);
+  }
+}
+
+
+void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
+  cmpq(dst, src);
+}
+
+
+void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
+  if (src->value() == 0) {
+    // Only tagged long smi to have 32-bit representation.
+    cmpq(dst, Immediate(0));
+  } else {
+    Move(kScratchRegister, src);
+    cmpq(dst, kScratchRegister);
+  }
 }
 
 
@@ -492,170 +527,83 @@
     SmiToInteger64(dst, src);
     return;
   }
-  movsxlq(dst, src);
-  shl(dst, Immediate(power - 1));
-}
-
-void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
-  ASSERT_EQ(0, kSmiTag);
-  testl(src, Immediate(kSmiTagMask));
-  j(zero, on_smi);
-}
-
-
-void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
-  Condition not_smi = CheckNotSmi(src);
-  j(not_smi, on_not_smi);
-}
-
-
-void MacroAssembler::JumpIfNotPositiveSmi(Register src,
-                                          Label* on_not_positive_smi) {
-  Condition not_positive_smi = CheckNotPositiveSmi(src);
-  j(not_positive_smi, on_not_positive_smi);
-}
-
-
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
-                                             int constant,
-                                             Label* on_equals) {
-  if (Smi::IsValid(constant)) {
-    Condition are_equal = CheckSmiEqualsConstant(src, constant);
-    j(are_equal, on_equals);
+  if (!dst.is(src)) {
+    movq(dst, src);
+  }
+  if (power < kSmiShift) {
+    sar(dst, Immediate(kSmiShift - power));
+  } else if (power > kSmiShift) {
+    shl(dst, Immediate(power - kSmiShift));
   }
 }
 
 
-void MacroAssembler::JumpIfSmiGreaterEqualsConstant(Register src,
-                                                    int constant,
-                                                    Label* on_greater_equals) {
-  if (Smi::IsValid(constant)) {
-    Condition are_greater_equal = CheckSmiGreaterEqualsConstant(src, constant);
-    j(are_greater_equal, on_greater_equals);
-  } else if (constant < Smi::kMinValue) {
-    jmp(on_greater_equals);
-  }
-}
-
-
-void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
-  Condition is_valid = CheckInteger32ValidSmiValue(src);
-  j(ReverseCondition(is_valid), on_invalid);
-}
-
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
-                                      Register src2,
-                                      Label* on_not_both_smi) {
-  Condition not_both_smi = CheckNotBothSmi(src1, src2);
-  j(not_both_smi, on_not_both_smi);
-}
-
 Condition MacroAssembler::CheckSmi(Register src) {
-  testb(src, Immediate(kSmiTagMask));
-  return zero;
-}
-
-
-Condition MacroAssembler::CheckNotSmi(Register src) {
   ASSERT_EQ(0, kSmiTag);
   testb(src, Immediate(kSmiTagMask));
-  return not_zero;
+  return zero;
 }
 
 
 Condition MacroAssembler::CheckPositiveSmi(Register src) {
   ASSERT_EQ(0, kSmiTag);
-  testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
+  movq(kScratchRegister, src);
+  rol(kScratchRegister, Immediate(1));
+  testl(kScratchRegister, Immediate(0x03));
   return zero;
 }
 
 
-Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
-  ASSERT_EQ(0, kSmiTag);
-  testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
-  return not_zero;
-}
-
-
 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
   if (first.is(second)) {
     return CheckSmi(first);
   }
   movl(kScratchRegister, first);
   orl(kScratchRegister, second);
-  return CheckSmi(kScratchRegister);
-}
-
-
-Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
-  ASSERT_EQ(0, kSmiTag);
-  if (first.is(second)) {
-    return CheckNotSmi(first);
-  }
-  movl(kScratchRegister, first);
-  or_(kScratchRegister, second);
-  return CheckNotSmi(kScratchRegister);
+  testb(kScratchRegister, Immediate(kSmiTagMask));
+  return zero;
 }
 
 
 Condition MacroAssembler::CheckIsMinSmi(Register src) {
   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  cmpl(src, Immediate(0x40000000));
+  movq(kScratchRegister, src);
+  rol(kScratchRegister, Immediate(1));
+  cmpq(kScratchRegister, Immediate(1));
   return equal;
 }
 
-Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
-  if (constant == 0) {
-    testl(src, src);
-    return zero;
-  }
-  if (Smi::IsValid(constant)) {
-    cmpl(src, Immediate(Smi::FromInt(constant)));
-    return zero;
-  }
-  // Can't be equal.
-  UNREACHABLE();
-  return no_condition;
-}
-
-
-Condition MacroAssembler::CheckSmiGreaterEqualsConstant(Register src,
-                                                        int constant) {
-  if (constant == 0) {
-    testl(src, Immediate(static_cast<uint32_t>(0x80000000u)));
-    return positive;
-  }
-  if (Smi::IsValid(constant)) {
-    cmpl(src, Immediate(Smi::FromInt(constant)));
-    return greater_equal;
-  }
-  // Can't be equal.
-  UNREACHABLE();
-  return no_condition;
-}
-
 
 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
-  // A 32-bit integer value can be converted to a smi if it is in the
-  // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
-  // representation have bits 30 and 31 be equal.
-  cmpl(src, Immediate(0xC0000000u));
-  return positive;
+  // A 32-bit integer value can always be converted to a smi.
+  return always;
 }
 
 
-void MacroAssembler::SmiNeg(Register dst,
-                            Register src,
-                            Label* on_not_smi_result) {
-  if (!dst.is(src)) {
-    movl(dst, src);
+Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
+  // An unsigned 32-bit integer value is valid as long as the high bit
+  // is not set.
+  testq(src, Immediate(0x80000000));
+  return zero;
+}
+
+
+void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    movq(kScratchRegister, src);
+    neg(dst);  // Low 32 bits are retained as zero by negation.
+    // Test if result is zero or Smi::kMinValue.
+    cmpq(dst, kScratchRegister);
+    j(not_equal, on_smi_result);
+    movq(src, kScratchRegister);
+  } else {
+    movq(dst, src);
+    neg(dst);
+    cmpq(dst, src);
+    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+    j(not_equal, on_smi_result);
   }
-  negl(dst);
-  testl(dst, Immediate(0x7fffffff));
-  // If the result is zero or 0x80000000, negation failed to create a smi.
-  j(equal, on_not_smi_result);
 }
 
 
@@ -664,42 +612,39 @@
                             Register src2,
                             Label* on_not_smi_result) {
   ASSERT(!dst.is(src2));
-  if (!dst.is(src1)) {
-    movl(dst, src1);
-  }
-  addl(dst, src2);
-  if (!dst.is(src1)) {
-    j(overflow, on_not_smi_result);
-  } else {
+  if (dst.is(src1)) {
+    addq(dst, src2);
     Label smi_result;
     j(no_overflow, &smi_result);
     // Restore src1.
-    subl(src1, src2);
+    subq(src1, src2);
     jmp(on_not_smi_result);
     bind(&smi_result);
+  } else {
+    movq(dst, src1);
+    addq(dst, src2);
+    j(overflow, on_not_smi_result);
   }
 }
 
 
-
 void MacroAssembler::SmiSub(Register dst,
                             Register src1,
                             Register src2,
                             Label* on_not_smi_result) {
   ASSERT(!dst.is(src2));
-  if (!dst.is(src1)) {
-    movl(dst, src1);
-  }
-  subl(dst, src2);
-  if (!dst.is(src1)) {
-    j(overflow, on_not_smi_result);
-  } else {
+  if (dst.is(src1)) {
+    subq(dst, src2);
     Label smi_result;
     j(no_overflow, &smi_result);
     // Restore src1.
-    addl(src1, src2);
+    addq(src1, src2);
     jmp(on_not_smi_result);
     bind(&smi_result);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result);
   }
 }
 
@@ -709,80 +654,137 @@
                             Register src2,
                             Label* on_not_smi_result) {
   ASSERT(!dst.is(src2));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
 
   if (dst.is(src1)) {
+    Label failure, zero_correct_result;
+    movq(kScratchRegister, src1);  // Create backup for later testing.
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, &failure);
+
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    Label correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result);
+
+    movq(dst, kScratchRegister);
+    xor_(dst, src2);
+    j(positive, &zero_correct_result);  // Result was positive zero.
+
+    bind(&failure);  // Reused failure exit, restores src1.
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+
+    bind(&zero_correct_result);
+    xor_(dst, dst);
+
+    bind(&correct_result);
+  } else {
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, on_not_smi_result);
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    Label correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result);
+    // One of src1 and src2 is zero, the check whether the other is
+    // negative.
     movq(kScratchRegister, src1);
+    xor_(kScratchRegister, src2);
+    j(negative, on_not_smi_result);
+    bind(&correct_result);
   }
-  SmiToInteger32(dst, src1);
-
-  imull(dst, src2);
-  j(overflow, on_not_smi_result);
-
-  // Check for negative zero result.  If product is zero, and one
-  // argument is negative, go to slow case.  The frame is unchanged
-  // in this block, so local control flow can use a Label rather
-  // than a JumpTarget.
-  Label non_zero_result;
-  testl(dst, dst);
-  j(not_zero, &non_zero_result);
-
-  // Test whether either operand is negative (the other must be zero).
-  orl(kScratchRegister, src2);
-  j(negative, on_not_smi_result);
-  bind(&non_zero_result);
 }
 
 
 void MacroAssembler::SmiTryAddConstant(Register dst,
                                        Register src,
-                                       int32_t constant,
+                                       Smi* constant,
                                        Label* on_not_smi_result) {
   // Does not assume that src is a smi.
-  ASSERT_EQ(1, kSmiTagMask);
+  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
   ASSERT_EQ(0, kSmiTag);
-  ASSERT(Smi::IsValid(constant));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src.is(kScratchRegister));
 
-  Register tmp = (src.is(dst) ? kScratchRegister : dst);
-  movl(tmp, src);
-  addl(tmp, Immediate(Smi::FromInt(constant)));
-  if (tmp.is(kScratchRegister)) {
-    j(overflow, on_not_smi_result);
-    testl(tmp, Immediate(kSmiTagMask));
-    j(not_zero, on_not_smi_result);
-    movl(dst, tmp);
+  JumpIfNotSmi(src, on_not_smi_result);
+  Register tmp = (dst.is(src) ? kScratchRegister : dst);
+  Move(tmp, constant);
+  addq(tmp, src);
+  j(overflow, on_not_smi_result);
+  if (dst.is(src)) {
+    movq(dst, tmp);
+  }
+}
+
+
+void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    Move(kScratchRegister, constant);
+    addq(dst, kScratchRegister);
   } else {
-    movl(kScratchRegister, Immediate(kSmiTagMask));
-    cmovl(overflow, dst, kScratchRegister);
-    testl(dst, kScratchRegister);
-    j(not_zero, on_not_smi_result);
+    Move(dst, constant);
+    addq(dst, src);
   }
 }
 
 
 void MacroAssembler::SmiAddConstant(Register dst,
                                     Register src,
-                                    int32_t constant,
+                                    Smi* constant,
                                     Label* on_not_smi_result) {
-  ASSERT(Smi::IsValid(constant));
-  if (on_not_smi_result == NULL) {
-    if (dst.is(src)) {
-      movl(dst, src);
-    } else {
-      lea(dst, Operand(src, constant << kSmiTagSize));
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
     }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    Move(kScratchRegister, constant);
+    addq(dst, kScratchRegister);
+    Label result_ok;
+    j(no_overflow, &result_ok);
+    subq(dst, kScratchRegister);
+    jmp(on_not_smi_result);
+    bind(&result_ok);
   } else {
+    Move(dst, constant);
+    addq(dst, src);
+    j(overflow, on_not_smi_result);
+  }
+}
+
+
+void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
+  if (constant->value() == 0) {
     if (!dst.is(src)) {
-      movl(dst, src);
+      movq(dst, src);
     }
-    addl(dst, Immediate(Smi::FromInt(constant)));
-    if (!dst.is(src)) {
-      j(overflow, on_not_smi_result);
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    Move(kScratchRegister, constant);
+    subq(dst, kScratchRegister);
+  } else {
+    // Subtract by adding the negative, to do it in two operations.
+    if (constant->value() == Smi::kMinValue) {
+      Move(kScratchRegister, constant);
+      movq(dst, src);
+      subq(dst, kScratchRegister);
     } else {
-      Label result_ok;
-      j(no_overflow, &result_ok);
-      subl(dst, Immediate(Smi::FromInt(constant)));
-      jmp(on_not_smi_result);
-      bind(&result_ok);
+      Move(dst, Smi::FromInt(-constant->value()));
+      addq(dst, src);
     }
   }
 }
@@ -790,24 +792,33 @@
 
 void MacroAssembler::SmiSubConstant(Register dst,
                                     Register src,
-                                    int32_t constant,
+                                    Smi* constant,
                                     Label* on_not_smi_result) {
-  ASSERT(Smi::IsValid(constant));
-  Smi* smi_value = Smi::FromInt(constant);
-  if (dst.is(src)) {
-    // Optimistic subtract - may change value of dst register,
-    // if it has garbage bits in the higher half, but will not change
-    // the value as a tagged smi.
-    subl(dst, Immediate(smi_value));
-    if (on_not_smi_result != NULL) {
-      Label add_success;
-      j(no_overflow, &add_success);
-      addl(dst, Immediate(smi_value));
-      jmp(on_not_smi_result);
-      bind(&add_success);
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
     }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    Move(kScratchRegister, constant);
+    subq(dst, kScratchRegister);
+    Label sub_success;
+    j(no_overflow, &sub_success);
+    addq(src, kScratchRegister);
+    jmp(on_not_smi_result);
+    bind(&sub_success);
   } else {
-    UNIMPLEMENTED();  // Not used yet.
+    if (constant->value() == Smi::kMinValue) {
+      Move(kScratchRegister, constant);
+      movq(dst, src);
+      subq(dst, kScratchRegister);
+      j(overflow, on_not_smi_result);
+    } else {
+      Move(dst, Smi::FromInt(-(constant->value())));
+      addq(dst, src);
+      j(overflow, on_not_smi_result);
+    }
   }
 }
 
@@ -816,38 +827,61 @@
                             Register src1,
                             Register src2,
                             Label* on_not_smi_result) {
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(kScratchRegister));
   ASSERT(!src2.is(rax));
   ASSERT(!src2.is(rdx));
   ASSERT(!src1.is(rdx));
 
   // Check for 0 divisor (result is +/-Infinity).
   Label positive_divisor;
-  testl(src2, src2);
+  testq(src2, src2);
   j(zero, on_not_smi_result);
-  j(positive, &positive_divisor);
-  // Check for negative zero result.  If the dividend is zero, and the
-  // divisor is negative, return a floating point negative zero.
-  testl(src1, src1);
-  j(zero, on_not_smi_result);
-  bind(&positive_divisor);
 
-  // Sign extend src1 into edx:eax.
-  if (!src1.is(rax)) {
-    movl(rax, src1);
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
   }
-  cdq();
+  SmiToInteger32(rax, src1);
+  // We need to rule out dividing Smi::kMinValue by -1, since that would
+  // overflow in idiv and raise an exception.
+  // We combine this with negative zero test (negative zero only happens
+  // when dividing zero by a negative number).
 
+  // We overshoot a little and go to slow case if we divide min-value
+  // by any negative value, not just -1.
+  Label safe_div;
+  testl(rax, Immediate(0x7fffffff));
+  j(not_zero, &safe_div);
+  testq(src2, src2);
+  if (src1.is(rax)) {
+    j(positive, &safe_div);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+  } else {
+    j(negative, on_not_smi_result);
+  }
+  bind(&safe_div);
+
+  SmiToInteger32(src2, src2);
+  // Sign extend src1 into edx:eax.
+  cdq();
   idivl(src2);
-  // Check for the corner case of dividing the most negative smi by
-  // -1. We cannot use the overflow flag, since it is not set by
-  // idiv instruction.
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  cmpl(rax, Immediate(0x40000000));
-  j(equal, on_not_smi_result);
+  Integer32ToSmi(src2, src2);
   // Check that the remainder is zero.
   testl(rdx, rdx);
-  j(not_zero, on_not_smi_result);
-  // Tag the result and store it in the destination register.
+  if (src1.is(rax)) {
+    Label smi_result;
+    j(zero, &smi_result);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+    bind(&smi_result);
+  } else {
+    j(not_zero, on_not_smi_result);
+  }
+  if (!dst.is(src1) && src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
   Integer32ToSmi(dst, rax);
 }
 
@@ -862,109 +896,136 @@
   ASSERT(!src2.is(rax));
   ASSERT(!src2.is(rdx));
   ASSERT(!src1.is(rdx));
+  ASSERT(!src1.is(src2));
 
-  testl(src2, src2);
+  testq(src2, src2);
   j(zero, on_not_smi_result);
 
   if (src1.is(rax)) {
-    // Mist remember the value to see if a zero result should
-    // be a negative zero.
-    movl(kScratchRegister, rax);
-  } else {
-    movl(rax, src1);
+    movq(kScratchRegister, src1);
   }
+  SmiToInteger32(rax, src1);
+  SmiToInteger32(src2, src2);
+
+  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+  Label safe_div;
+  cmpl(rax, Immediate(Smi::kMinValue));
+  j(not_equal, &safe_div);
+  cmpl(src2, Immediate(-1));
+  j(not_equal, &safe_div);
+  // Retag inputs and go slow case.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  jmp(on_not_smi_result);
+  bind(&safe_div);
+
   // Sign extend eax into edx:eax.
   cdq();
   idivl(src2);
-  // Check for a negative zero result.  If the result is zero, and the
-  // dividend is negative, return a floating point negative zero.
-  Label non_zero_result;
-  testl(rdx, rdx);
-  j(not_zero, &non_zero_result);
+  // Restore smi tags on inputs.
+  Integer32ToSmi(src2, src2);
   if (src1.is(rax)) {
-    testl(kScratchRegister, kScratchRegister);
-  } else {
-    testl(src1, src1);
+    movq(src1, kScratchRegister);
   }
+  // Check for a negative zero result.  If the result is zero, and the
+  // dividend is negative, go slow to return a floating point negative zero.
+  Label smi_result;
+  testl(rdx, rdx);
+  j(not_zero, &smi_result);
+  testq(src1, src1);
   j(negative, on_not_smi_result);
-  bind(&non_zero_result);
-  if (!dst.is(rdx)) {
-    movl(dst, rdx);
-  }
+  bind(&smi_result);
+  Integer32ToSmi(dst, rdx);
 }
 
 
 void MacroAssembler::SmiNot(Register dst, Register src) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src.is(kScratchRegister));
+  // Set tag and padding bits before negating, so that they are zero afterwards.
+  movl(kScratchRegister, Immediate(~0));
   if (dst.is(src)) {
-    not_(dst);
-    // Remove inverted smi-tag.  The mask is sign-extended to 64 bits.
-    xor_(src, Immediate(kSmiTagMask));
+    xor_(dst, kScratchRegister);
   } else {
-    ASSERT_EQ(0, kSmiTag);
-    lea(dst, Operand(src, kSmiTagMask));
-    not_(dst);
+    lea(dst, Operand(src, kScratchRegister, times_1, 0));
   }
+  not_(dst);
 }
 
 
 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
+  ASSERT(!dst.is(src2));
   if (!dst.is(src1)) {
-    movl(dst, src1);
+    movq(dst, src1);
   }
   and_(dst, src2);
 }
 
 
-void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
-  ASSERT(Smi::IsValid(constant));
-  if (!dst.is(src)) {
-    movl(dst, src);
+void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
+  if (constant->value() == 0) {
+    xor_(dst, dst);
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    Move(kScratchRegister, constant);
+    and_(dst, kScratchRegister);
+  } else {
+    Move(dst, constant);
+    and_(dst, src);
   }
-  and_(dst, Immediate(Smi::FromInt(constant)));
 }
 
 
 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
-    movl(dst, src1);
+    movq(dst, src1);
   }
   or_(dst, src2);
 }
 
 
-void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
-  ASSERT(Smi::IsValid(constant));
-  if (!dst.is(src)) {
-    movl(dst, src);
+void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    Move(kScratchRegister, constant);
+    or_(dst, kScratchRegister);
+  } else {
+    Move(dst, constant);
+    or_(dst, src);
   }
-  or_(dst, Immediate(Smi::FromInt(constant)));
 }
 
+
 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
-    movl(dst, src1);
+    movq(dst, src1);
   }
   xor_(dst, src2);
 }
 
 
-void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
-  ASSERT(Smi::IsValid(constant));
-  if (!dst.is(src)) {
-    movl(dst, src);
+void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    Move(kScratchRegister, constant);
+    xor_(dst, kScratchRegister);
+  } else {
+    Move(dst, constant);
+    xor_(dst, src);
   }
-  xor_(dst, Immediate(Smi::FromInt(constant)));
 }
 
 
-
 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
                                                      Register src,
                                                      int shift_value) {
+  ASSERT(is_uint5(shift_value));
   if (shift_value > 0) {
     if (dst.is(src)) {
-      sarl(dst, Immediate(shift_value));
-      and_(dst, Immediate(~kSmiTagMask));
+      sar(dst, Immediate(shift_value + kSmiShift));
+      shl(dst, Immediate(kSmiShift));
     } else {
       UNIMPLEMENTED();  // Not used.
     }
@@ -980,20 +1041,13 @@
   if (dst.is(src)) {
     UNIMPLEMENTED();  // Not used.
   } else {
-    movl(dst, src);
-    // Untag the smi.
-    sarl(dst, Immediate(kSmiTagSize));
-    if (shift_value < 2) {
-      // A negative Smi shifted right two is in the positive Smi range,
-      // but if shifted only by zero or one, it never is.
+    movq(dst, src);
+    if (shift_value == 0) {
+      testq(dst, dst);
       j(negative, on_not_smi_result);
     }
-    if (shift_value > 0) {
-      // Do the right shift on the integer value.
-      shrl(dst, Immediate(shift_value));
-    }
-    // Re-tag the result.
-    addl(dst, dst);
+    shr(dst, Immediate(shift_value + kSmiShift));
+    shl(dst, Immediate(kSmiShift));
   }
 }
 
@@ -1002,20 +1056,11 @@
                                           Register src,
                                           int shift_value,
                                           Label* on_not_smi_result) {
-  if (dst.is(src)) {
-    UNIMPLEMENTED();  // Not used.
-  } else {
-    movl(dst, src);
-    if (shift_value > 0) {
-      // Treat dst as an untagged integer value equal to two times the
-      // smi value of src, i.e., already shifted left by one.
-      if (shift_value > 1) {
-        shll(dst, Immediate(shift_value - 1));
-      }
-      // Convert int result to Smi, checking that it is in smi range.
-      ASSERT(kSmiTagSize == 1);  // adjust code if not the case
-      Integer32ToSmi(dst, dst, on_not_smi_result);
-    }
+  if (!dst.is(src)) {
+    movq(dst, src);
+  }
+  if (shift_value > 0) {
+    shl(dst, Immediate(shift_value));
   }
 }
 
@@ -1026,23 +1071,14 @@
                                   Label* on_not_smi_result) {
   ASSERT(!dst.is(rcx));
   Label result_ok;
-  // Untag both operands.
-  SmiToInteger32(dst, src1);
-  SmiToInteger32(rcx, src2);
-  shll(dst);
-  // Check that the *signed* result fits in a smi.
-  Condition is_valid = CheckInteger32ValidSmiValue(dst);
-  j(is_valid, &result_ok);
-  // Restore the relevant bits of the source registers
-  // and call the slow version.
-  if (dst.is(src1)) {
-    shrl(dst);
-    Integer32ToSmi(dst, dst);
+  // Untag shift amount.
+  if (!dst.is(src1)) {
+    movq(dst, src1);
   }
-  Integer32ToSmi(rcx, rcx);
-  jmp(on_not_smi_result);
-  bind(&result_ok);
-  Integer32ToSmi(dst, dst);
+  SmiToInteger32(rcx, src2);
+  // Shift amount specified by lower 5 bits, not six as the shl opcode.
+  and_(rcx, Immediate(0x1f));
+  shl(dst);
 }
 
 
@@ -1050,48 +1086,62 @@
                                           Register src1,
                                           Register src2,
                                           Label* on_not_smi_result) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
   ASSERT(!dst.is(rcx));
   Label result_ok;
-  // Untag both operands.
-  SmiToInteger32(dst, src1);
-  SmiToInteger32(rcx, src2);
-
-  shrl(dst);
-  // Check that the *unsigned* result fits in a smi.
-  // I.e., that it is a valid positive smi value. The positive smi
-  // values are  0..0x3fffffff, i.e., neither of the top-most two
-  // bits can be set.
-  //
-  // These two cases can only happen with shifts by 0 or 1 when
-  // handed a valid smi.  If the answer cannot be represented by a
-  // smi, restore the left and right arguments, and jump to slow
-  // case.  The low bit of the left argument may be lost, but only
-  // in a case where it is dropped anyway.
-  testl(dst, Immediate(0xc0000000));
-  j(zero, &result_ok);
-  if (dst.is(src1)) {
-    shll(dst);
-    Integer32ToSmi(dst, dst);
+  if (src1.is(rcx) || src2.is(rcx)) {
+    movq(kScratchRegister, rcx);
   }
-  Integer32ToSmi(rcx, rcx);
-  jmp(on_not_smi_result);
-  bind(&result_ok);
-  // Smi-tag the result in answer.
-  Integer32ToSmi(dst, dst);
+  if (!dst.is(src1)) {
+    movq(dst, src1);
+  }
+  SmiToInteger32(rcx, src2);
+  orl(rcx, Immediate(kSmiShift));
+  shr(dst);  // Shift is rcx modulo 0x1f + 32.
+  shl(dst, Immediate(kSmiShift));
+  testq(dst, dst);
+  if (src1.is(rcx) || src2.is(rcx)) {
+    Label positive_result;
+    j(positive, &positive_result);
+    if (src1.is(rcx)) {
+      movq(src1, kScratchRegister);
+    } else {
+      movq(src2, kScratchRegister);
+    }
+    jmp(on_not_smi_result);
+    bind(&positive_result);
+  } else {
+    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
+  }
 }
 
 
 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
                                              Register src1,
                                              Register src2) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
   ASSERT(!dst.is(rcx));
-  // Untag both operands.
-  SmiToInteger32(dst, src1);
+  if (src1.is(rcx)) {
+    movq(kScratchRegister, src1);
+  } else if (src2.is(rcx)) {
+    movq(kScratchRegister, src2);
+  }
+  if (!dst.is(src1)) {
+    movq(dst, src1);
+  }
   SmiToInteger32(rcx, src2);
-  // Shift as integer.
-  sarl(dst);
-  // Retag result.
-  Integer32ToSmi(dst, dst);
+  orl(rcx, Immediate(kSmiShift));
+  sar(dst);  // Shift 32 + original rcx & 0x1f.
+  shl(dst, Immediate(kSmiShift));
+  if (src1.is(rcx)) {
+    movq(src1, kScratchRegister);
+  } else if (src2.is(rcx)) {
+    movq(src2, kScratchRegister);
+  }
 }
 
 
@@ -1099,21 +1149,27 @@
                                   Register src1,
                                   Register src2,
                                   Label* on_not_smis) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
   ASSERT(!dst.is(src1));
   ASSERT(!dst.is(src2));
   // Both operands must not be smis.
 #ifdef DEBUG
-  Condition not_both_smis = CheckNotBothSmi(src1, src2);
-  Check(not_both_smis, "Both registers were smis.");
+  if (allow_stub_calls()) {  // Check contains a stub call.
+    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+  }
 #endif
   ASSERT_EQ(0, kSmiTag);
   ASSERT_EQ(0, Smi::FromInt(0));
-  movq(kScratchRegister, Immediate(kSmiTagMask));
+  movl(kScratchRegister, Immediate(kSmiTagMask));
   and_(kScratchRegister, src1);
   testl(kScratchRegister, src2);
+  // If non-zero then both are smis.
   j(not_zero, on_not_smis);
-  // One operand is a smi.
 
+  // Exactly one operand is a smi.
   ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
   // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
   subq(kScratchRegister, Immediate(1));
@@ -1123,71 +1179,96 @@
   and_(dst, kScratchRegister);
   // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
   xor_(dst, src1);
-  // If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
+  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
 }
 
-
-SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
+SmiIndex MacroAssembler::SmiToIndex(Register dst,
+                                    Register src,
+                                    int shift) {
   ASSERT(is_uint6(shift));
-  if (shift == 0) {  // times_1.
-    SmiToInteger32(dst, src);
-    return SmiIndex(dst, times_1);
+  // There is a possible optimization if shift is in the range 60-63, but that
+  // will (and must) never happen.
+  if (!dst.is(src)) {
+    movq(dst, src);
   }
-  if (shift <= 4) {  // 2 - 16 times multiplier is handled using ScaleFactor.
-    // We expect that all smis are actually zero-padded. If this holds after
-    // checking, this line can be omitted.
-    movl(dst, src);  // Ensure that the smi is zero-padded.
-    return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
+  if (shift < kSmiShift) {
+    sar(dst, Immediate(kSmiShift - shift));
+  } else {
+    shl(dst, Immediate(shift - kSmiShift));
   }
-  // Shift by shift-kSmiTagSize.
-  movl(dst, src);  // Ensure that the smi is zero-padded.
-  shl(dst, Immediate(shift - kSmiTagSize));
   return SmiIndex(dst, times_1);
 }
 
-
 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
                                             Register src,
                                             int shift) {
   // Register src holds a positive smi.
   ASSERT(is_uint6(shift));
-  if (shift == 0) {  // times_1.
-    SmiToInteger32(dst, src);
-    neg(dst);
-    return SmiIndex(dst, times_1);
+  if (!dst.is(src)) {
+    movq(dst, src);
   }
-  if (shift <= 4) {  // 2 - 16 times multiplier is handled using ScaleFactor.
-    movl(dst, src);
-    neg(dst);
-    return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
-  }
-  // Shift by shift-kSmiTagSize.
-  movl(dst, src);
   neg(dst);
-  shl(dst, Immediate(shift - kSmiTagSize));
+  if (shift < kSmiShift) {
+    sar(dst, Immediate(kSmiShift - shift));
+  } else {
+    shl(dst, Immediate(shift - kSmiShift));
+  }
   return SmiIndex(dst, times_1);
 }
 
 
-
-bool MacroAssembler::IsUnsafeSmi(Smi* value) {
-  return false;
+void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
+  ASSERT_EQ(0, kSmiTag);
+  Condition smi = CheckSmi(src);
+  j(smi, on_smi);
 }
 
-void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
-  UNIMPLEMENTED();
+
+void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
+  Condition smi = CheckSmi(src);
+  j(NegateCondition(smi), on_not_smi);
+}
+
+
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+                                          Label* on_not_positive_smi) {
+  Condition positive_smi = CheckPositiveSmi(src);
+  j(NegateCondition(positive_smi), on_not_positive_smi);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+                                             Smi* constant,
+                                             Label* on_equals) {
+  SmiCompare(src, constant);
+  j(equal, on_equals);
+}
+
+
+void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
+  Condition is_valid = CheckInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid);
+}
+
+
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+                                                Label* on_invalid) {
+  Condition is_valid = CheckUInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid);
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
+                                      Label* on_not_both_smi) {
+  Condition both_smi = CheckBothSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi);
 }
 
 
 void MacroAssembler::Move(Register dst, Handle<Object> source) {
   ASSERT(!source->IsFailure());
   if (source->IsSmi()) {
-    if (IsUnsafeSmi(source)) {
-      LoadUnsafeSmi(dst, source);
-    } else {
-      int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
-      movq(dst, Immediate(smi));
-    }
+    Move(dst, Smi::cast(*source));
   } else {
     movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
   }
@@ -1195,9 +1276,9 @@
 
 
 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
+  ASSERT(!source->IsFailure());
   if (source->IsSmi()) {
-    int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
-    movq(dst, Immediate(smi));
+    Move(dst, Smi::cast(*source));
   } else {
     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
     movq(dst, kScratchRegister);
@@ -1206,21 +1287,18 @@
 
 
 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
-  Move(kScratchRegister, source);
-  cmpq(dst, kScratchRegister);
+  if (source->IsSmi()) {
+    SmiCompare(dst, Smi::cast(*source));
+  } else {
+    Move(kScratchRegister, source);
+    cmpq(dst, kScratchRegister);
+  }
 }
 
 
 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
   if (source->IsSmi()) {
-    if (IsUnsafeSmi(source)) {
-      LoadUnsafeSmi(kScratchRegister, source);
-      cmpl(dst, kScratchRegister);
-    } else {
-      // For smi-comparison, it suffices to compare the low 32 bits.
-      int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
-      cmpl(dst, Immediate(smi));
-    }
+    SmiCompare(dst, Smi::cast(*source));
   } else {
     ASSERT(source->IsHeapObject());
     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
@@ -1231,13 +1309,7 @@
 
 void MacroAssembler::Push(Handle<Object> source) {
   if (source->IsSmi()) {
-    if (IsUnsafeSmi(source)) {
-      LoadUnsafeSmi(kScratchRegister, source);
-      push(kScratchRegister);
-    } else {
-      int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
-      push(Immediate(smi));
-    }
+    Push(Smi::cast(*source));
   } else {
     ASSERT(source->IsHeapObject());
     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
@@ -1247,12 +1319,23 @@
 
 
 void MacroAssembler::Push(Smi* source) {
-  if (IsUnsafeSmi(source)) {
-    LoadUnsafeSmi(kScratchRegister, source);
-    push(kScratchRegister);
+  intptr_t smi = reinterpret_cast<intptr_t>(source);
+  if (is_int32(smi)) {
+    push(Immediate(static_cast<int32_t>(smi)));
   } else {
-    int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
-    push(Immediate(smi));
+    Set(kScratchRegister, smi);
+    push(kScratchRegister);
+  }
+}
+
+
+void MacroAssembler::Test(const Operand& src, Smi* source) {
+  intptr_t smi = reinterpret_cast<intptr_t>(source);
+  if (is_int32(smi)) {
+    testl(src, Immediate(static_cast<int32_t>(smi)));
+  } else {
+    Move(kScratchRegister, source);
+    testq(src, kScratchRegister);
   }
 }
 
@@ -1270,17 +1353,8 @@
 
 
 void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
-  movq(kScratchRegister, code_object, rmode);
-#ifdef DEBUG
-  Label target;
-  bind(&target);
-#endif
-  jmp(kScratchRegister);
-#ifdef DEBUG
-  ASSERT_EQ(kCallTargetAddressOffset,
-            SizeOfCodeGeneratedSince(&target) + kPointerSize);
-#endif
+  // TODO(X64): Inline this
+  jmp(code_object, rmode);
 }
 
 
@@ -1299,17 +1373,7 @@
 void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
   ASSERT(RelocInfo::IsCodeTarget(rmode));
   WriteRecordedPositions();
-  movq(kScratchRegister, code_object, rmode);
-#ifdef DEBUG
-  // Patch target is kPointer size bytes *before* target label.
-  Label target;
-  bind(&target);
-#endif
-  call(kScratchRegister);
-#ifdef DEBUG
-  ASSERT_EQ(kCallTargetAddressOffset,
-            SizeOfCodeGeneratedSince(&target) + kPointerSize);
-#endif
+  call(code_object, rmode);
 }
 
 
@@ -1357,18 +1421,9 @@
 
 
 void MacroAssembler::FCmp() {
-  fucompp();
-  push(rax);
-  fnstsw_ax();
-  if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
-    sahf();
-  } else {
-    shrl(rax, Immediate(8));
-    and_(rax, Immediate(0xFF));
-    push(rax);
-    popfq();
-  }
-  pop(rax);
+  fucomip();
+  ffree(0);
+  fincstp();
 }
 
 
@@ -1467,7 +1522,6 @@
   }
 }
 
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 void MacroAssembler::PushRegistersFromMemory(RegList regs) {
@@ -1484,6 +1538,7 @@
   }
 }
 
+
 void MacroAssembler::SaveRegistersToMemory(RegList regs) {
   ASSERT((regs & ~kJSCallerSaved) == 0);
   // Copy the content of registers to memory location.
@@ -1566,8 +1621,11 @@
   // arguments match the expected number of arguments. Fake a
   // parameter count to avoid emitting code to do the check.
   ParameterCount expected(0);
-  InvokeCode(Handle<Code>(code), expected, expected,
-             RelocInfo::CODE_TARGET, flag);
+  InvokeCode(Handle<Code>(code),
+             expected,
+             expected,
+             RelocInfo::CODE_TARGET,
+             flag);
 
   const char* name = Builtins::GetName(id);
   int argc = Builtins::GetArgumentsCount(id);
@@ -1576,7 +1634,6 @@
   if (!resolved) {
     uint32_t flags =
         Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
-        Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
         Bootstrapper::FixupFlagsUseCodeObject::encode(false);
     Unresolved entry =
         { pc_offset() - kCallTargetAddressOffset, flags, name };
@@ -1600,7 +1657,7 @@
     } else {
       movq(rax, Immediate(actual.immediate()));
       if (expected.immediate() ==
-          SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
         // Don't worry about adapting arguments for built-ins that
         // don't want that done. Skip adaption code by making it look
         // like we have a match between expected and actual number of
@@ -1706,7 +1763,7 @@
   push(rbp);
   movq(rbp, rsp);
   push(rsi);  // Context.
-  push(Immediate(Smi::FromInt(type)));
+  Push(Smi::FromInt(type));
   movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   push(kScratchRegister);
   if (FLAG_debug_code) {
@@ -1721,7 +1778,7 @@
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   if (FLAG_debug_code) {
-    movq(kScratchRegister, Immediate(Smi::FromInt(type)));
+    Move(kScratchRegister, Smi::FromInt(type));
     cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
     Check(equal, "stack frame types must match");
   }
@@ -1730,7 +1787,6 @@
 }
 
 
-
 void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
   ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
 
@@ -1743,7 +1799,7 @@
   movq(rbp, rsp);
 
   // Reserve room for entry stack pointer and push the debug marker.
-  ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
+  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
   push(Immediate(0));  // saved entry sp, patched before call
   push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
 
@@ -1824,16 +1880,6 @@
   movq(rcx, Operand(rbp, 1 * kPointerSize));
   movq(rbp, Operand(rbp, 0 * kPointerSize));
 
-#ifdef _WIN64
-  // If return value is on the stack, pop it to registers.
-  if (result_size > 1) {
-    ASSERT_EQ(2, result_size);
-    // Position above 4 argument mirrors and arguments object.
-    movq(rax, Operand(rsp, 6 * kPointerSize));
-    movq(rdx, Operand(rsp, 7 * kPointerSize));
-  }
-#endif
-
   // Pop everything up to and including the arguments and the receiver
   // from the caller stack.
   lea(rsp, Operand(r15, 1 * kPointerSize));
@@ -1856,8 +1902,10 @@
 }
 
 
-Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
-                                   JSObject* holder, Register holder_reg,
+Register MacroAssembler::CheckMaps(JSObject* object,
+                                   Register object_reg,
+                                   JSObject* holder,
+                                   Register holder_reg,
                                    Register scratch,
                                    Label* miss) {
   // Make sure there's no overlap between scratch and the other
@@ -1923,8 +1971,7 @@
   }
 
   // Check the holder map.
-  Cmp(FieldOperand(reg, HeapObject::kMapOffset),
-      Handle<Map>(holder->map()));
+  Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
   j(not_equal, miss);
 
   // Log the check depth.
@@ -1941,8 +1988,6 @@
 }
 
 
-
-
 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
                                             Register scratch,
                                             Label* miss) {
@@ -1996,8 +2041,8 @@
 
   movq(kScratchRegister,
        FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
-  int token_offset = Context::kHeaderSize +
-                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
+  int token_offset =
+      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
   movq(scratch, FieldOperand(scratch, token_offset));
   cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
   j(not_equal, miss);
@@ -2164,6 +2209,23 @@
 }
 
 
+void MacroAssembler::AllocateHeapNumber(Register result,
+                                        Register scratch,
+                                        Label* gc_required) {
+  // Allocate heap number in new space.
+  AllocateInNewSpace(HeapNumber::kSize,
+                     result,
+                     scratch,
+                     no_reg,
+                     gc_required,
+                     TAG_OBJECT);
+
+  // Set the map.
+  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
+}
+
+
 CodePatcher::CodePatcher(byte* address, int size)
     : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
   // Create a new macro assembler pointing to the address of the code to patch.
@@ -2182,5 +2244,4 @@
   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
-
 } }  // namespace v8::internal
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index adc136a..4c2f35b 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -72,6 +72,18 @@
                    Register value,
                    Register scratch);
 
+  // Set the remembered set bit for [object+offset].
+  // The value is known to not be a smi.
+  // object is the object being stored into, value is the object being stored.
+  // If offset is zero, then the scratch register contains the array index into
+  // the elements array represented as a Smi.
+  // All registers are clobbered by the operation.
+  void RecordWriteNonSmi(Register object,
+                         int offset,
+                         Register value,
+                         Register scratch);
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
   // Debugger Support
@@ -146,11 +158,12 @@
 
   // Tag an integer value if possible, or jump the integer value cannot be
   // represented as a smi. Only uses the low 32 bit of the src registers.
+  // NOTICE: Destroys the dst register even if unsuccessful!
   void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
 
   // Adds constant to src and tags the result as a smi.
   // Result must be a valid smi.
-  void Integer64AddToSmi(Register dst, Register src, int constant);
+  void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
 
   // Convert smi to 32-bit integer. I.e., not sign extended into
   // high 32 bits of destination.
@@ -165,48 +178,48 @@
                                              Register src,
                                              int power);
 
+  // Simple comparison of smis.
+  void SmiCompare(Register dst, Register src);
+  void SmiCompare(Register dst, Smi* src);
+  void SmiCompare(const Operand& dst, Register src);
+  void SmiCompare(const Operand& dst, Smi* src);
+  // Sets sign and zero flags depending on value of smi in register.
+  void SmiTest(Register src);
+
   // Functions performing a check on a known or potential smi. Returns
   // a condition that is satisfied if the check is successful.
 
   // Is the value a tagged smi.
   Condition CheckSmi(Register src);
 
-  // Is the value not a tagged smi.
-  Condition CheckNotSmi(Register src);
-
   // Is the value a positive tagged smi.
   Condition CheckPositiveSmi(Register src);
 
-  // Is the value not a positive tagged smi.
-  Condition CheckNotPositiveSmi(Register src);
-
   // Are both values are tagged smis.
   Condition CheckBothSmi(Register first, Register second);
 
-  // Is one of the values not a tagged smi.
-  Condition CheckNotBothSmi(Register first, Register second);
-
   // Is the value the minimum smi value (since we are using
   // two's complement numbers, negating the value is known to yield
   // a non-smi value).
   Condition CheckIsMinSmi(Register src);
 
-  // Check whether a tagged smi is equal to a constant.
-  Condition CheckSmiEqualsConstant(Register src, int constant);
-
-  // Check whether a tagged smi is greater than or equal to a constant.
-  Condition CheckSmiGreaterEqualsConstant(Register src, int constant);
-
   // Checks whether an 32-bit integer value is a valid for conversion
   // to a smi.
   Condition CheckInteger32ValidSmiValue(Register src);
 
+  // Checks whether an 32-bit unsigned integer value is a valid for
+  // conversion to a smi.
+  Condition CheckUInteger32ValidSmiValue(Register src);
+
   // Test-and-jump functions. Typically combines a check function
   // above with a conditional jump.
 
   // Jump if the value cannot be represented by a smi.
   void JumpIfNotValidSmiValue(Register src, Label* on_invalid);
 
+  // Jump if the unsigned integer value cannot be represented by a smi.
+  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid);
+
   // Jump to label if the value is a tagged smi.
   void JumpIfSmi(Register src, Label* on_smi);
 
@@ -216,15 +229,9 @@
   // Jump to label if the value is not a positive tagged smi.
   void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
 
-  // Jump to label if the value is a tagged smi with value equal
+  // Jump to label if the value, which must be a tagged smi, has value equal
   // to the constant.
-  void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
-
-  // Jump to label if the value is a tagged smi with value greater than or equal
-  // to the constant.
-  void JumpIfSmiGreaterEqualsConstant(Register src,
-                                      int constant,
-                                      Label* on_equals);
+  void JumpIfSmiEqualsConstant(Register src,  Smi* constant, Label* on_equals);
 
   // Jump if either or both register are not smi values.
   void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
@@ -239,29 +246,36 @@
   // the label.
   void SmiTryAddConstant(Register dst,
                          Register src,
-                         int32_t constant,
+                         Smi* constant,
                          Label* on_not_smi_result);
 
+  // Add an integer constant to a tagged smi, giving a tagged smi as result.
+  // No overflow testing on the result is done.
+  void SmiAddConstant(Register dst, Register src, Smi* constant);
+
   // Add an integer constant to a tagged smi, giving a tagged smi as result,
   // or jumping to a label if the result cannot be represented by a smi.
-  // If the label is NULL, no testing on the result is done.
   void SmiAddConstant(Register dst,
                       Register src,
-                      int32_t constant,
+                      Smi* constant,
                       Label* on_not_smi_result);
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
+  // result. No testing on the result is done.
+  void SmiSubConstant(Register dst, Register src, Smi* constant);
+
+  // Subtract an integer constant from a tagged smi, giving a tagged smi as
   // result, or jumping to a label if the result cannot be represented by a smi.
-  // If the label is NULL, no testing on the result is done.
   void SmiSubConstant(Register dst,
                       Register src,
-                      int32_t constant,
+                      Smi* constant,
                       Label* on_not_smi_result);
 
   // Negating a smi can give a negative zero or too large positive value.
+  // NOTICE: This operation jumps on success, not failure!
   void SmiNeg(Register dst,
               Register src,
-              Label* on_not_smi_result);
+              Label* on_smi_result);
 
   // Adds smi values and return the result as a smi.
   // If dst is src1, then src1 will be destroyed, even if
@@ -307,9 +321,9 @@
   void SmiAnd(Register dst, Register src1, Register src2);
   void SmiOr(Register dst, Register src1, Register src2);
   void SmiXor(Register dst, Register src1, Register src2);
-  void SmiAndConstant(Register dst, Register src1, int constant);
-  void SmiOrConstant(Register dst, Register src1, int constant);
-  void SmiXorConstant(Register dst, Register src1, int constant);
+  void SmiAndConstant(Register dst, Register src1, Smi* constant);
+  void SmiOrConstant(Register dst, Register src1, Smi* constant);
+  void SmiXorConstant(Register dst, Register src1, Smi* constant);
 
   void SmiShiftLeftConstant(Register dst,
                             Register src,
@@ -367,30 +381,31 @@
   // Converts a positive smi to a negative index.
   SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
 
+  // Basic Smi operations.
+  void Move(Register dst, Smi* source) {
+    Set(dst, reinterpret_cast<int64_t>(source));
+  }
+
+  void Move(const Operand& dst, Smi* source) {
+    Set(dst, reinterpret_cast<int64_t>(source));
+  }
+
+  void Push(Smi* smi);
+  void Test(const Operand& dst, Smi* source);
+
   // ---------------------------------------------------------------------------
   // Macro instructions
 
-  // Expression support
+  // Load a register with a long value as efficiently as possible.
   void Set(Register dst, int64_t x);
   void Set(const Operand& dst, int64_t x);
 
   // Handle support
-  bool IsUnsafeSmi(Smi* value);
-  bool IsUnsafeSmi(Handle<Object> value) {
-    return IsUnsafeSmi(Smi::cast(*value));
-  }
-
-  void LoadUnsafeSmi(Register dst, Smi* source);
-  void LoadUnsafeSmi(Register dst, Handle<Object> source) {
-    LoadUnsafeSmi(dst, Smi::cast(*source));
-  }
-
   void Move(Register dst, Handle<Object> source);
   void Move(const Operand& dst, Handle<Object> source);
   void Cmp(Register dst, Handle<Object> source);
   void Cmp(const Operand& dst, Handle<Object> source);
   void Push(Handle<Object> source);
-  void Push(Smi* smi);
 
   // Control Flow
   void Jump(Address destination, RelocInfo::Mode rmode);
@@ -490,6 +505,13 @@
   // un-done.
   void UndoAllocationInNewSpace(Register object);
 
+  // Allocate a heap number in new space with undefined value. Returns
+  // tagged pointer in result register, or jumps to gc_required if new
+  // space is full.
+  void AllocateHeapNumber(Register result,
+                          Register scratch,
+                          Label* gc_required);
+
   // ---------------------------------------------------------------------------
   // Support functions.
 
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 0994230..58a3e0f 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -47,19 +47,24 @@
                        StubCache::Table table,
                        Register name,
                        Register offset) {
-  // The offset register must hold a *positive* smi.
+  ASSERT_EQ(8, kPointerSize);
+  ASSERT_EQ(16, sizeof(StubCache::Entry));
+  // The offset register holds the entry offset times four (due to masking
+  // and shifting optimizations).
   ExternalReference key_offset(SCTableReference::keyReference(table));
   Label miss;
 
   __ movq(kScratchRegister, key_offset);
-  SmiIndex index = masm->SmiToIndex(offset, offset, kPointerSizeLog2);
   // Check that the key in the entry matches the name.
-  __ cmpl(name, Operand(kScratchRegister, index.reg, index.scale, 0));
+  // Multiply entry offset by 16 to get the entry address. Since the
+  // offset register already holds the entry offset times four, multiply
+  // by a further four.
+  __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
   __ j(not_equal, &miss);
   // Get the code entry from the cache.
   // Use key_offset + kPointerSize, rather than loading value_offset.
   __ movq(kScratchRegister,
-          Operand(kScratchRegister, index.reg, index.scale, kPointerSize));
+          Operand(kScratchRegister, offset, times_4, kPointerSize));
   // Check that the flags match what we're looking for.
   __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
   __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 655f4c6..781efd1 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -63,14 +63,16 @@
   Comment cmnt(masm(), "[ Enter JS frame");
 
 #ifdef DEBUG
-  // Verify that rdi contains a JS function.  The following code
-  // relies on rax being available for use.
-  Condition not_smi = masm()->CheckNotSmi(rdi);
-  __ Check(not_smi,
-           "VirtualFrame::Enter - rdi is not a function (smi check).");
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
-  __ Check(equal,
-           "VirtualFrame::Enter - rdi is not a function (map check).");
+  if (FLAG_debug_code) {
+    // Verify that rdi contains a JS function.  The following code
+    // relies on rax being available for use.
+    Condition not_smi = NegateCondition(masm()->CheckSmi(rdi));
+    __ Check(not_smi,
+             "VirtualFrame::Enter - rdi is not a function (smi check).");
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
+    __ Check(equal,
+             "VirtualFrame::Enter - rdi is not a function (map check).");
+  }
 #endif
 
   EmitPush(rbp);
@@ -197,6 +199,14 @@
 }
 
 
+void VirtualFrame::EmitPush(Smi* smi_value) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ Push(smi_value);
+}
+
+
 void VirtualFrame::EmitPush(Handle<Object> value) {
   ASSERT(stack_pointer_ == element_count() - 1);
   elements_.Add(FrameElement::MemoryElement());
@@ -841,7 +851,7 @@
 
   switch (element.type()) {
     case FrameElement::INVALID:
-      __ push(Immediate(Smi::FromInt(0)));
+      __ Push(Smi::FromInt(0));
       break;
 
     case FrameElement::MEMORY:
@@ -883,15 +893,16 @@
   // on the stack.
   int start = Min(begin, stack_pointer_ + 1);
 
-  // If positive we have to adjust the stack pointer.
-  int delta = end - stack_pointer_;
-  if (delta > 0) {
-    stack_pointer_ = end;
-    __ subq(rsp, Immediate(delta * kPointerSize));
-  }
-
+  // Emit normal 'push' instructions for elements above stack pointer
+  // and use mov instructions if we are below stack pointer.
   for (int i = start; i <= end; i++) {
-    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+    if (!elements_[i].is_synced()) {
+      if (i <= stack_pointer_) {
+        SyncElementBelowStackPointer(i);
+      } else {
+        SyncElementByPushing(i);
+      }
+    }
   }
 }
 
@@ -1004,7 +1015,7 @@
   function.ToRegister(rdi);
 
   // Constructors are called with the number of arguments in register
-  // eax for now. Another option would be to have separate construct
+  // rax for now. Another option would be to have separate construct
   // call trampolines per different arguments counts encountered.
   Result num_args = cgen()->allocator()->Allocate(rax);
   ASSERT(num_args.is_valid());
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 006148d..e492305 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -377,6 +377,7 @@
   void EmitPush(const Operand& operand);
   void EmitPush(Heap::RootListIndex index);
   void EmitPush(Immediate immediate);
+  void EmitPush(Smi* value);
   // Uses kScratchRegister, emits appropriate relocation info.
   void EmitPush(Handle<Object> value);
 
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index 9103403..f041041 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -67,7 +67,9 @@
     'test-disasm-ia32.cc',
     'test-log-stack-tracer.cc'
   ],
-  'arch:x64': ['test-assembler-x64.cc', 'test-log-stack-tracer.cc'],
+  'arch:x64': ['test-assembler-x64.cc',
+               'test-macro-assembler-x64.cc',
+               'test-log-stack-tracer.cc'],
   'os:linux':  ['test-platform-linux.cc'],
   'os:macos':  ['test-platform-macos.cc'],
   'os:nullos': ['test-platform-nullos.cc'],
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 8fff769..b43cd64 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -33,6 +33,17 @@
 # BUG(382): Weird test. Can't guarantee that it never times out.
 test-api/ApplyInterruption: PASS || TIMEOUT
 
+# This is about to go away anyway since new snapshot code is on the way.
+test-serialize/Deserialize: FAIL
+test-serialize/DeserializeAndRunScript: FAIL
+test-serialize/DeserializeNatives: FAIL
+test-serialize/DeserializeExtensions: FAIL
+
+# These tests always fail.  They are here to test test.py.  If
+# they don't fail then test.py has failed.
+test-serialize/TestThatAlwaysFails: FAIL
+test-serialize/DependentTestThatAlwaysFails: FAIL
+
 
 [ $arch == arm ]
 
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index f430cbd..a943f30 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <stdlib.h>
+#include <limits.h>
 
 #include "v8.h"
 
@@ -35,6 +35,7 @@
 #include "snapshot.h"
 #include "platform.h"
 #include "top.h"
+#include "utils.h"
 #include "cctest.h"
 
 static bool IsNaN(double x) {
@@ -574,6 +575,44 @@
 }
 
 
+THREADED_TEST(StringConcat) {
+  {
+    v8::HandleScope scope;
+    LocalContext env;
+    const char* one_byte_string_1 = "function a_times_t";
+    const char* two_byte_string_1 = "wo_plus_b(a, b) {return ";
+    const char* one_byte_extern_1 = "a * 2 + b;} a_times_two_plus_b(4, 8) + ";
+    const char* two_byte_extern_1 = "a_times_two_plus_b(4, 8) + ";
+    const char* one_byte_string_2 = "a_times_two_plus_b(4, 8) + ";
+    const char* two_byte_string_2 = "a_times_two_plus_b(4, 8) + ";
+    const char* two_byte_extern_2 = "a_times_two_plus_b(1, 2);";
+    Local<String> left = v8_str(one_byte_string_1);
+    Local<String> right = String::New(AsciiToTwoByteString(two_byte_string_1));
+    Local<String> source = String::Concat(left, right);
+    right = String::NewExternal(
+        new TestAsciiResource(i::StrDup(one_byte_extern_1)));
+    source = String::Concat(source, right);
+    right = String::NewExternal(
+        new TestResource(AsciiToTwoByteString(two_byte_extern_1)));
+    source = String::Concat(source, right);
+    right = v8_str(one_byte_string_2);
+    source = String::Concat(source, right);
+    right = String::New(AsciiToTwoByteString(two_byte_string_2));
+    source = String::Concat(source, right);
+    right = String::NewExternal(
+        new TestResource(AsciiToTwoByteString(two_byte_extern_2)));
+    source = String::Concat(source, right);
+    Local<Script> script = Script::Compile(source);
+    Local<Value> value = script->Run();
+    CHECK(value->IsNumber());
+    CHECK_EQ(68, value->Int32Value());
+  }
+  v8::internal::CompilationCache::Clear();
+  i::Heap::CollectAllGarbage(false);
+  i::Heap::CollectAllGarbage(false);
+}
+
+
 THREADED_TEST(GlobalProperties) {
   v8::HandleScope scope;
   LocalContext env;
@@ -702,6 +741,88 @@
 }
 
 
+THREADED_TEST(TinyInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  int32_t value = 239;
+  Local<v8::Integer> value_obj = v8::Integer::New(value);
+  CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+}
+
+
+THREADED_TEST(BigSmiInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  int32_t value = i::Smi::kMaxValue;
+  // We cannot add one to a Smi::kMaxValue without wrapping.
+  if (i::kSmiValueSize < 32) {
+    CHECK(i::Smi::IsValid(value));
+    CHECK(!i::Smi::IsValid(value + 1));
+    Local<v8::Integer> value_obj = v8::Integer::New(value);
+    CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+  }
+}
+
+
+THREADED_TEST(BigInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  // We cannot add one to a Smi::kMaxValue without wrapping.
+  if (i::kSmiValueSize < 32) {
+    // The casts allow this to compile, even if Smi::kMaxValue is 2^31-1.
+    // The code will not be run in that case, due to the "if" guard.
+    int32_t value =
+        static_cast<int32_t>(static_cast<uint32_t>(i::Smi::kMaxValue) + 1);
+    CHECK(value > i::Smi::kMaxValue);
+    CHECK(!i::Smi::IsValid(value));
+    Local<v8::Integer> value_obj = v8::Integer::New(value);
+    CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+  }
+}
+
+
+THREADED_TEST(TinyUnsignedInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  uint32_t value = 239;
+  Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+  CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+}
+
+
+THREADED_TEST(BigUnsignedSmiInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  uint32_t value = static_cast<uint32_t>(i::Smi::kMaxValue);
+  CHECK(i::Smi::IsValid(value));
+  CHECK(!i::Smi::IsValid(value + 1));
+  Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+  CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+}
+
+
+THREADED_TEST(BigUnsignedInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  uint32_t value = static_cast<uint32_t>(i::Smi::kMaxValue) + 1;
+  CHECK(value > static_cast<uint32_t>(i::Smi::kMaxValue));
+  CHECK(!i::Smi::IsValid(value));
+  Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+  CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+}
+
+
+THREADED_TEST(OutOfSignedRangeUnsignedInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  uint32_t INT32_MAX_AS_UINT = (1U << 31) - 1;
+  uint32_t value = INT32_MAX_AS_UINT + 1;
+  CHECK(value > INT32_MAX_AS_UINT);  // No overflow.
+  Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+  CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+}
+
+
 THREADED_TEST(Number) {
   v8::HandleScope scope;
   LocalContext env;
@@ -1346,6 +1467,44 @@
 }
 
 
+THREADED_TEST(InternalFieldsNativePointersAndExternal) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+  Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
+  instance_templ->SetInternalFieldCount(1);
+  Local<v8::Object> obj = templ->GetFunction()->NewInstance();
+  CHECK_EQ(1, obj->InternalFieldCount());
+  CHECK(obj->GetPointerFromInternalField(0) == NULL);
+
+  char* data = new char[100];
+
+  void* aligned = data;
+  CHECK_EQ(0, reinterpret_cast<uintptr_t>(aligned) & 0x1);
+  void* unaligned = data + 1;
+  CHECK_EQ(1, reinterpret_cast<uintptr_t>(unaligned) & 0x1);
+
+  obj->SetPointerInInternalField(0, aligned);
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(aligned, v8::External::Unwrap(obj->GetInternalField(0)));
+
+  obj->SetPointerInInternalField(0, unaligned);
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(unaligned, v8::External::Unwrap(obj->GetInternalField(0)));
+
+  obj->SetInternalField(0, v8::External::Wrap(aligned));
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
+
+  obj->SetInternalField(0, v8::External::Wrap(unaligned));
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
+
+  delete[] data;
+}
+
+
 THREADED_TEST(IdentityHash) {
   v8::HandleScope scope;
   LocalContext env;
@@ -1810,7 +1969,7 @@
   // Build huge string. This should fail with out of memory exception.
   Local<Value> result = CompileRun(
     "var str = Array.prototype.join.call({length: 513}, \"A\").toUpperCase();"
-    "for (var i = 0; i < 21; i++) { str = str + str; }");
+    "for (var i = 0; i < 22; i++) { str = str + str; }");
 
   // Check for out of memory state.
   CHECK(result.IsEmpty());
@@ -7883,6 +8042,333 @@
 }
 
 
+template <class ExternalArrayClass, class ElementType>
+static void ExternalArrayTestHelper(v8::ExternalArrayType array_type,
+                                    int64_t low,
+                                    int64_t high) {
+  v8::HandleScope scope;
+  LocalContext context;
+  const int kElementCount = 40;
+  int element_size = 0;
+  switch (array_type) {
+    case v8::kExternalByteArray:
+    case v8::kExternalUnsignedByteArray:
+      element_size = 1;
+      break;
+    case v8::kExternalShortArray:
+    case v8::kExternalUnsignedShortArray:
+      element_size = 2;
+      break;
+    case v8::kExternalIntArray:
+    case v8::kExternalUnsignedIntArray:
+    case v8::kExternalFloatArray:
+      element_size = 4;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  ElementType* array_data =
+      static_cast<ElementType*>(malloc(kElementCount * element_size));
+  i::Handle<ExternalArrayClass> array =
+      i::Handle<ExternalArrayClass>::cast(
+          i::Factory::NewExternalArray(kElementCount, array_type, array_data));
+  i::Heap::CollectAllGarbage(false);  // Force GC to trigger verification.
+  for (int i = 0; i < kElementCount; i++) {
+    array->set(i, static_cast<ElementType>(i));
+  }
+  i::Heap::CollectAllGarbage(false);  // Force GC to trigger verification.
+  for (int i = 0; i < kElementCount; i++) {
+    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array->get(i)));
+    CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array_data[i]));
+  }
+
+  v8::Handle<v8::Object> obj = v8::Object::New();
+  i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
+  // Set the elements to be the external array.
+  obj->SetIndexedPropertiesToExternalArrayData(array_data,
+                                               array_type,
+                                               kElementCount);
+  CHECK_EQ(1, static_cast<int>(jsobj->GetElement(1)->Number()));
+  obj->Set(v8_str("field"), v8::Int32::New(1503));
+  context->Global()->Set(v8_str("ext_array"), obj);
+  v8::Handle<v8::Value> result = CompileRun("ext_array.field");
+  CHECK_EQ(1503, result->Int32Value());
+  result = CompileRun("ext_array[1]");
+  CHECK_EQ(1, result->Int32Value());
+
+  // Check pass through of assigned smis
+  result = CompileRun("var sum = 0;"
+                      "for (var i = 0; i < 8; i++) {"
+                      "  sum += ext_array[i] = ext_array[i] = -i;"
+                      "}"
+                      "sum;");
+  CHECK_EQ(-28, result->Int32Value());
+
+  // Check assigned smis
+  result = CompileRun("for (var i = 0; i < 8; i++) {"
+                      "  ext_array[i] = i;"
+                      "}"
+                      "var sum = 0;"
+                      "for (var i = 0; i < 8; i++) {"
+                      "  sum += ext_array[i];"
+                      "}"
+                      "sum;");
+  CHECK_EQ(28, result->Int32Value());
+
+  // Check assigned smis in reverse order
+  result = CompileRun("for (var i = 8; --i >= 0; ) {"
+                      "  ext_array[i] = i;"
+                      "}"
+                      "var sum = 0;"
+                      "for (var i = 0; i < 8; i++) {"
+                      "  sum += ext_array[i];"
+                      "}"
+                      "sum;");
+  CHECK_EQ(28, result->Int32Value());
+
+  // Check pass through of assigned HeapNumbers
+  result = CompileRun("var sum = 0;"
+                      "for (var i = 0; i < 16; i+=2) {"
+                      "  sum += ext_array[i] = ext_array[i] = (-i * 0.5);"
+                      "}"
+                      "sum;");
+  CHECK_EQ(-28, result->Int32Value());
+
+  // Check assigned HeapNumbers
+  result = CompileRun("for (var i = 0; i < 16; i+=2) {"
+                      "  ext_array[i] = (i * 0.5);"
+                      "}"
+                      "var sum = 0;"
+                      "for (var i = 0; i < 16; i+=2) {"
+                      "  sum += ext_array[i];"
+                      "}"
+                      "sum;");
+  CHECK_EQ(28, result->Int32Value());
+
+  // Check assigned HeapNumbers in reverse order
+  result = CompileRun("for (var i = 14; i >= 0; i-=2) {"
+                      "  ext_array[i] = (i * 0.5);"
+                      "}"
+                      "var sum = 0;"
+                      "for (var i = 0; i < 16; i+=2) {"
+                      "  sum += ext_array[i];"
+                      "}"
+                      "sum;");
+  CHECK_EQ(28, result->Int32Value());
+
+  i::ScopedVector<char> test_buf(1024);
+
+  // Check legal boundary conditions.
+  // The repeated loads and stores ensure the ICs are exercised.
+  const char* boundary_program =
+      "var res = 0;"
+      "for (var i = 0; i < 16; i++) {"
+      "  ext_array[i] = %lld;"
+      "  if (i > 8) {"
+      "    res = ext_array[i];"
+      "  }"
+      "}"
+      "res;";
+  i::OS::SNPrintF(test_buf,
+                  boundary_program,
+                  low);
+  result = CompileRun(test_buf.start());
+  CHECK_EQ(low, result->IntegerValue());
+
+  i::OS::SNPrintF(test_buf,
+                  boundary_program,
+                  high);
+  result = CompileRun(test_buf.start());
+  CHECK_EQ(high, result->IntegerValue());
+
+  // Check misprediction of type in IC.
+  result = CompileRun("var tmp_array = ext_array;"
+                      "var sum = 0;"
+                      "for (var i = 0; i < 8; i++) {"
+                      "  tmp_array[i] = i;"
+                      "  sum += tmp_array[i];"
+                      "  if (i == 4) {"
+                      "    tmp_array = {};"
+                      "  }"
+                      "}"
+                      "sum;");
+  i::Heap::CollectAllGarbage(false);  // Force GC to trigger verification.
+  CHECK_EQ(28, result->Int32Value());
+
+  // Make sure out-of-range loads do not throw.
+  i::OS::SNPrintF(test_buf,
+                  "var caught_exception = false;"
+                  "try {"
+                  "  ext_array[%d];"
+                  "} catch (e) {"
+                  "  caught_exception = true;"
+                  "}"
+                  "caught_exception;",
+                  kElementCount);
+  result = CompileRun(test_buf.start());
+  CHECK_EQ(false, result->BooleanValue());
+
+  // Make sure out-of-range stores do not throw.
+  i::OS::SNPrintF(test_buf,
+                  "var caught_exception = false;"
+                  "try {"
+                  "  ext_array[%d] = 1;"
+                  "} catch (e) {"
+                  "  caught_exception = true;"
+                  "}"
+                  "caught_exception;",
+                  kElementCount);
+  result = CompileRun(test_buf.start());
+  CHECK_EQ(false, result->BooleanValue());
+
+  // Check other boundary conditions, values and operations.
+  result = CompileRun("for (var i = 0; i < 8; i++) {"
+                      "  ext_array[7] = undefined;"
+                      "}"
+                      "ext_array[7];");
+  CHECK_EQ(0, result->Int32Value());
+  CHECK_EQ(0, static_cast<int>(jsobj->GetElement(7)->Number()));
+
+  result = CompileRun("for (var i = 0; i < 8; i++) {"
+                      "  ext_array[6] = '2.3';"
+                      "}"
+                      "ext_array[6];");
+  CHECK_EQ(2, result->Int32Value());
+  CHECK_EQ(2, static_cast<int>(jsobj->GetElement(6)->Number()));
+
+  if (array_type != v8::kExternalFloatArray) {
+    // Though the specification doesn't state it, be explicit about
+    // converting NaNs and +/-Infinity to zero.
+    result = CompileRun("for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = 5;"
+                        "}"
+                        "for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = NaN;"
+                        "}"
+                        "ext_array[5];");
+    CHECK_EQ(0, result->Int32Value());
+    CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value());
+
+    result = CompileRun("for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = 5;"
+                        "}"
+                        "for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = Infinity;"
+                        "}"
+                        "ext_array[5];");
+    CHECK_EQ(0, result->Int32Value());
+    CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value());
+
+    result = CompileRun("for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = 5;"
+                        "}"
+                        "for (var i = 0; i < 8; i++) {"
+                        "  ext_array[i] = -Infinity;"
+                        "}"
+                        "ext_array[5];");
+    CHECK_EQ(0, result->Int32Value());
+    CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value());
+  }
+
+  result = CompileRun("ext_array[3] = 33;"
+                      "delete ext_array[3];"
+                      "ext_array[3];");
+  CHECK_EQ(33, result->Int32Value());
+
+  result = CompileRun("ext_array[0] = 10; ext_array[1] = 11;"
+                      "ext_array[2] = 12; ext_array[3] = 13;"
+                      "ext_array.__defineGetter__('2',"
+                      "function() { return 120; });"
+                      "ext_array[2];");
+  CHECK_EQ(12, result->Int32Value());
+
+  result = CompileRun("var js_array = new Array(40);"
+                      "js_array[0] = 77;"
+                      "js_array;");
+  CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
+
+  result = CompileRun("ext_array[1] = 23;"
+                      "ext_array.__proto__ = [];"
+                      "js_array.__proto__ = ext_array;"
+                      "js_array.concat(ext_array);");
+  CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
+  CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value());
+
+  result = CompileRun("ext_array[1] = 23;");
+  CHECK_EQ(23, result->Int32Value());
+
+  free(array_data);
+}
+
+
+THREADED_TEST(ExternalByteArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalByteArray, int8_t>(
+      v8::kExternalByteArray,
+      -128,
+      127);
+}
+
+
+THREADED_TEST(ExternalUnsignedByteArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalUnsignedByteArray, uint8_t>(
+      v8::kExternalUnsignedByteArray,
+      0,
+      255);
+}
+
+
+THREADED_TEST(ExternalShortArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalShortArray, int16_t>(
+      v8::kExternalShortArray,
+      -32768,
+      32767);
+}
+
+
+THREADED_TEST(ExternalUnsignedShortArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalUnsignedShortArray, uint16_t>(
+      v8::kExternalUnsignedShortArray,
+      0,
+      65535);
+}
+
+
+THREADED_TEST(ExternalIntArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalIntArray, int32_t>(
+      v8::kExternalIntArray,
+      INT_MIN,   // -2147483648
+      INT_MAX);  //  2147483647
+}
+
+
+THREADED_TEST(ExternalUnsignedIntArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalUnsignedIntArray, uint32_t>(
+      v8::kExternalUnsignedIntArray,
+      0,
+      UINT_MAX);  // 4294967295
+}
+
+
+THREADED_TEST(ExternalFloatArray) {
+  ExternalArrayTestHelper<v8::internal::ExternalFloatArray, float>(
+      v8::kExternalFloatArray,
+      -500,
+      500);
+}
+
+
+THREADED_TEST(ExternalArrays) {
+  TestExternalByteArray();
+  TestExternalUnsignedByteArray();
+  TestExternalShortArray();
+  TestExternalUnsignedShortArray();
+  TestExternalIntArray();
+  TestExternalUnsignedIntArray();
+  TestExternalFloatArray();
+}
+
+
 THREADED_TEST(ScriptContextDependence) {
   v8::HandleScope scope;
   LocalContext c1;
@@ -7913,11 +8399,15 @@
 }
 
 
-// Test that idle notification can be handled when V8 has not yet been
-// set up.
+// Test that idle notification can be handled and eventually returns true.
 THREADED_TEST(IdleNotification) {
-  for (int i = 0; i < 100; i++) v8::V8::IdleNotification(true);
-  for (int i = 0; i < 100; i++) v8::V8::IdleNotification(false);
+  bool rv = false;
+  for (int i = 0; i < 100; i++) {
+    rv = v8::V8::IdleNotification();
+    if (rv)
+      break;
+  }
+  CHECK(rv == true);
 }
 
 
@@ -7993,3 +8483,15 @@
     CHECK(stack_limit == set_limit);
   }
 }
+
+
+THREADED_TEST(GetHeapStatistics) {
+  v8::HandleScope scope;
+  LocalContext c1;
+  v8::HeapStatistics heap_statistics;
+  CHECK_EQ(heap_statistics.total_heap_size(), 0);
+  CHECK_EQ(heap_statistics.used_heap_size(), 0);
+  v8::V8::GetHeapStatistics(&heap_statistics);
+  CHECK_NE(heap_statistics.total_heap_size(), 0);
+  CHECK_NE(heap_statistics.used_heap_size(), 0);
+}
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index cd750c5..81aa973 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -44,6 +44,7 @@
 using v8::internal::rax;
 using v8::internal::rsi;
 using v8::internal::rdi;
+using v8::internal::rcx;
 using v8::internal::rdx;
 using v8::internal::rbp;
 using v8::internal::rsp;
@@ -53,20 +54,28 @@
 using v8::internal::not_equal;
 using v8::internal::greater;
 
-
 // Test the x64 assembler by compiling some simple functions into
 // a buffer and executing them.  These tests do not initialize the
 // V8 library, create a context, or use any V8 objects.
-// The AMD64 calling convention is used, with the first five arguments
-// in RSI, RDI, RDX, RCX, R8, and R9, and floating point arguments in
+// The AMD64 calling convention is used, with the first six arguments
+// in RDI, RSI, RDX, RCX, R8, and R9, and floating point arguments in
 // the XMM registers.  The return value is in RAX.
 // This calling convention is used on Linux, with GCC, and on Mac OS,
-// with GCC.  A different convention is used on 64-bit windows.
+// with GCC.  A different convention is used on 64-bit windows,
+// where the first four integer arguments are passed in RCX, RDX, R8 and R9.
 
 typedef int (*F0)();
 typedef int (*F1)(int64_t x);
 typedef int (*F2)(int64_t x, int64_t y);
 
+#ifdef _WIN64
+static const v8::internal::Register arg1 = rcx;
+static const v8::internal::Register arg2 = rdx;
+#else
+static const v8::internal::Register arg1 = rdi;
+static const v8::internal::Register arg2 = rsi;
+#endif
+
 #define __ assm.
 
 
@@ -80,7 +89,7 @@
   Assembler assm(buffer, actual_size);
 
   // Assemble a simple function that copies argument 2 and returns it.
-  __ movq(rax, rsi);
+  __ movq(rax, arg2);
   __ nop();
   __ ret(0);
 
@@ -105,9 +114,9 @@
   // incorrect stack frames when debugging this function (which has them).
   __ push(rbp);
   __ movq(rbp, rsp);
-  __ push(rsi);  // Value at (rbp - 8)
-  __ push(rsi);  // Value at (rbp - 16)
-  __ push(rdi);  // Value at (rbp - 24)
+  __ push(arg2);  // Value at (rbp - 8)
+  __ push(arg2);  // Value at (rbp - 16)
+  __ push(arg1);  // Value at (rbp - 24)
   __ pop(rax);
   __ pop(rax);
   __ pop(rax);
@@ -132,8 +141,8 @@
   Assembler assm(buffer, actual_size);
 
   // Assemble a simple function that adds arguments returning the sum.
-  __ movq(rax, rsi);
-  __ addq(rax, rdi);
+  __ movq(rax, arg2);
+  __ addq(rax, arg1);
   __ ret(0);
 
   CodeDesc desc;
@@ -154,8 +163,8 @@
 
   // Assemble a simple function that multiplies arguments returning the high
   // word.
-  __ movq(rax, rsi);
-  __ imul(rdi);
+  __ movq(rax, arg2);
+  __ imul(arg1);
   __ movq(rax, rdx);
   __ ret(0);
 
@@ -182,14 +191,16 @@
   // Assemble a simple function that copies argument 2 and returns it.
   __ push(rbp);
   __ movq(rbp, rsp);
-  __ push(rsi);  // Value at (rbp - 8)
-  __ push(rsi);  // Value at (rbp - 16)
-  __ push(rdi);  // Value at (rbp - 24)
+
+  __ push(arg2);  // Value at (rbp - 8)
+  __ push(arg2);  // Value at (rbp - 16)
+  __ push(arg1);  // Value at (rbp - 24)
+
   const int kStackElementSize = 8;
   __ movq(rax, Operand(rbp, -3 * kStackElementSize));
-  __ pop(rsi);
-  __ pop(rsi);
-  __ pop(rsi);
+  __ pop(arg2);
+  __ pop(arg2);
+  __ pop(arg2);
   __ pop(rbp);
   __ nop();
   __ ret(0);
@@ -210,13 +221,14 @@
   CHECK(buffer);
   Assembler assm(buffer, actual_size);
 
-  // Assemble a simple function that copies argument 2 and returns it.
+  // Assemble a simple function that copies argument 1 and returns it.
   __ push(rbp);
+
   __ movq(rbp, rsp);
-  __ movq(rax, rdi);
+  __ movq(rax, arg1);
   Label target;
   __ jmp(&target);
-  __ movq(rax, rsi);
+  __ movq(rax, arg2);
   __ bind(&target);
   __ pop(rbp);
   __ ret(0);
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 1da363c..4ffcee3 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -3539,6 +3539,52 @@
 }
 
 
+// We match parts of the message to decide if it is a exception message.
+bool IsExceptionEventMessage(char *message) {
+  const char* type_event = "\"type\":\"event\"";
+  const char* event_exception = "\"event\":\"exception\"";
+  // Does the message contain both type:event and event:exception?
+  return strstr(message, type_event) != NULL &&
+      strstr(message, event_exception) != NULL;
+}
+
+
+// We match the message wether it is an evaluate response message.
+bool IsEvaluateResponseMessage(char* message) {
+  const char* type_response = "\"type\":\"response\"";
+  const char* command_evaluate = "\"command\":\"evaluate\"";
+  // Does the message contain both type:response and command:evaluate?
+  return strstr(message, type_response) != NULL &&
+         strstr(message, command_evaluate) != NULL;
+}
+
+
+// We match parts of the message to get evaluate result int value.
+int GetEvaluateIntResult(char *message) {
+  const char* value = "\"value\":";
+  char* pos = strstr(message, value);
+  if (pos == NULL) {
+    return -1;
+  }
+  int res = -1;
+  res = atoi(pos + strlen(value));
+  return res;
+}
+
+
+// We match parts of the message to get hit breakpoint id.
+int GetBreakpointIdFromBreakEventMessage(char *message) {
+  const char* breakpoints = "\"breakpoints\":[";
+  char* pos = strstr(message, breakpoints);
+  if (pos == NULL) {
+    return -1;
+  }
+  int res = -1;
+  res = atoi(pos + strlen(breakpoints));
+  return res;
+}
+
+
 /* Test MessageQueues */
 /* Tests the message queues that hold debugger commands and
  * response messages to the debugger.  Fills queues and makes
@@ -3566,8 +3612,6 @@
   // Allow message handler to block on a semaphore, to test queueing of
   // messages while blocked.
   message_queue_barriers.semaphore_1->Wait();
-  printf("%s\n", print_buffer);
-  fflush(stdout);
 }
 
 void MessageQueueDebuggerThread::Run() {
@@ -3822,8 +3866,6 @@
   if (IsBreakEventMessage(print_buffer)) {
     threaded_debugging_barriers.barrier_2.Wait();
   }
-  printf("%s\n", print_buffer);
-  fflush(stdout);
 }
 
 
@@ -3911,16 +3953,20 @@
 
 
 Barriers* breakpoints_barriers;
+int break_event_breakpoint_id;
+int evaluate_int_result;
 
 static void BreakpointsMessageHandler(const v8::Debug::Message& message) {
   static char print_buffer[1000];
   v8::String::Value json(message.GetJSON());
   Utf16ToAscii(*json, json.length(), print_buffer);
-  printf("%s\n", print_buffer);
-  fflush(stdout);
 
-  // Is break_template a prefix of the message?
   if (IsBreakEventMessage(print_buffer)) {
+    break_event_breakpoint_id =
+        GetBreakpointIdFromBreakEventMessage(print_buffer);
+    breakpoints_barriers->semaphore_1->Signal();
+  } else if (IsEvaluateResponseMessage(print_buffer)) {
+    evaluate_int_result = GetEvaluateIntResult(print_buffer);
     breakpoints_barriers->semaphore_1->Signal();
   }
 }
@@ -3930,9 +3976,9 @@
   const char* source_1 = "var y_global = 3;\n"
     "function cat( new_value ) {\n"
     "  var x = new_value;\n"
-    "  y_global = 4;\n"
+    "  y_global = y_global + 4;\n"
     "  x = 3 * x + 1;\n"
-    "  y_global = 5;\n"
+    "  y_global = y_global + 5;\n"
     "  return x;\n"
     "}\n"
     "\n"
@@ -3970,59 +4016,76 @@
       "\"type\":\"request\","
       "\"command\":\"setbreakpoint\","
       "\"arguments\":{\"type\":\"function\",\"target\":\"dog\",\"line\":3}}";
-  const char* command_3 = "{\"seq\":104,"
+  const char* command_3 = "{\"seq\":103,"
       "\"type\":\"request\","
       "\"command\":\"evaluate\","
       "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":false}}";
-  const char* command_4 = "{\"seq\":105,"
+  const char* command_4 = "{\"seq\":104,"
       "\"type\":\"request\","
       "\"command\":\"evaluate\","
-      "\"arguments\":{\"expression\":\"x\",\"disable_break\":true}}";
-  const char* command_5 = "{\"seq\":106,"
+      "\"arguments\":{\"expression\":\"x + 1\",\"disable_break\":true}}";
+  const char* command_5 = "{\"seq\":105,"
       "\"type\":\"request\","
       "\"command\":\"continue\"}";
-  const char* command_6 = "{\"seq\":107,"
+  const char* command_6 = "{\"seq\":106,"
       "\"type\":\"request\","
       "\"command\":\"continue\"}";
-  const char* command_7 = "{\"seq\":108,"
+  const char* command_7 = "{\"seq\":107,"
      "\"type\":\"request\","
      "\"command\":\"evaluate\","
      "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":true}}";
-  const char* command_8 = "{\"seq\":109,"
+  const char* command_8 = "{\"seq\":108,"
       "\"type\":\"request\","
       "\"command\":\"continue\"}";
 
 
   // v8 thread initializes, runs source_1
   breakpoints_barriers->barrier_1.Wait();
-  // 1:Set breakpoint in cat().
+  // 1:Set breakpoint in cat() (will get id 1).
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_1, buffer));
-  // 2:Set breakpoint in dog()
+  // 2:Set breakpoint in dog() (will get id 2).
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_2, buffer));
   breakpoints_barriers->barrier_2.Wait();
-  // v8 thread starts compiling source_2.
+  // V8 thread starts compiling source_2.
   // Automatic break happens, to run queued commands
   // breakpoints_barriers->semaphore_1->Wait();
   // Commands 1 through 3 run, thread continues.
   // v8 thread runs source_2 to breakpoint in cat().
   // message callback receives break event.
   breakpoints_barriers->semaphore_1->Wait();
+  // Must have hit breakpoint #1.
+  CHECK_EQ(1, break_event_breakpoint_id);
   // 4:Evaluate dog() (which has a breakpoint).
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_3, buffer));
-  // v8 thread hits breakpoint in dog()
+  // V8 thread hits breakpoint in dog().
   breakpoints_barriers->semaphore_1->Wait();  // wait for break event
-  // 5:Evaluate x
+  // Must have hit breakpoint #2.
+  CHECK_EQ(2, break_event_breakpoint_id);
+  // 5:Evaluate (x + 1).
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_4, buffer));
-  // 6:Continue evaluation of dog()
+  // Evaluate (x + 1) finishes.
+  breakpoints_barriers->semaphore_1->Wait();
+  // Must have result 108.
+  CHECK_EQ(108, evaluate_int_result);
+  // 6:Continue evaluation of dog().
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_5, buffer));
-  // dog() finishes.
+  // Evaluate dog() finishes.
+  breakpoints_barriers->semaphore_1->Wait();
+  // Must have result 107.
+  CHECK_EQ(107, evaluate_int_result);
   // 7:Continue evaluation of source_2, finish cat(17), hit breakpoint
   // in cat(19).
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_6, buffer));
-  // message callback gets break event
+  // Message callback gets break event.
   breakpoints_barriers->semaphore_1->Wait();  // wait for break event
-  // 8: Evaluate dog() with breaks disabled
+  // Must have hit breakpoint #1.
+  CHECK_EQ(1, break_event_breakpoint_id);
+  // 8: Evaluate dog() with breaks disabled.
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_7, buffer));
+  // Evaluate dog() finishes.
+  breakpoints_barriers->semaphore_1->Wait();
+  // Must have result 116.
+  CHECK_EQ(116, evaluate_int_result);
   // 9: Continue evaluation of source2, reach end.
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_8, buffer));
 }
@@ -4325,7 +4388,13 @@
 static void MessageHandlerHitCount(const v8::Debug::Message& message) {
   message_handler_hit_count++;
 
-  SendContinueCommand();
+  static char print_buffer[1000];
+  v8::String::Value json(message.GetJSON());
+  Utf16ToAscii(*json, json.length(), print_buffer);
+  if (IsExceptionEventMessage(print_buffer)) {
+    // Send a continue command for exception events.
+    SendContinueCommand();
+  }
 }
 
 
@@ -4415,8 +4484,6 @@
   static char print_buffer[1000];
   v8::String::Value json(message.GetJSON());
   Utf16ToAscii(*json, json.length(), print_buffer);
-  printf("%s\n", print_buffer);
-  fflush(stdout);
 }
 
 
@@ -4776,8 +4843,12 @@
       expected_context_data));
   message_handler_hit_count++;
 
+  static char print_buffer[1000];
+  v8::String::Value json(message.GetJSON());
+  Utf16ToAscii(*json, json.length(), print_buffer);
+
   // Send a continue command for break events.
-  if (message.GetEvent() == v8::Break) {
+  if (IsBreakEventMessage(print_buffer)) {
     SendContinueCommand();
   }
 }
@@ -5016,7 +5087,11 @@
       expected_context_data));
   message_handler_hit_count++;
 
-  if (message.IsEvent() && message.GetEvent() == v8::Break) {
+  static char print_buffer[1000];
+  v8::String::Value json(message.GetJSON());
+  Utf16ToAscii(*json, json.length(), print_buffer);
+
+  if (IsBreakEventMessage(print_buffer)) {
     break_count++;
     if (!sent_eval) {
       sent_eval = true;
@@ -5038,7 +5113,8 @@
       SendContinueCommand();
       continue_command_send_count++;
     }
-  } else if (message.IsResponse() && continue_command_send_count < 2) {
+  } else if (IsEvaluateResponseMessage(print_buffer) &&
+      continue_command_send_count < 2) {
     // Response to the evaluation request. We're still on the breakpoint so
     // send continue.
     SendContinueCommand();
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index af9fb97..74db234 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -363,7 +363,31 @@
     __ divsd(xmm1, xmm0);
     __ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
     __ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
+    __ comisd(xmm0, xmm1);
   }
+
+  // cmov.
+  {
+    CHECK(CpuFeatures::IsSupported(CpuFeatures::CMOV));
+    CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
+    __ cmov(overflow, eax, Operand(eax, 0));
+    __ cmov(no_overflow, eax, Operand(eax, 1));
+    __ cmov(below, eax, Operand(eax, 2));
+    __ cmov(above_equal, eax, Operand(eax, 3));
+    __ cmov(equal, eax, Operand(ebx, 0));
+    __ cmov(not_equal, eax, Operand(ebx, 1));
+    __ cmov(below_equal, eax, Operand(ebx, 2));
+    __ cmov(above, eax, Operand(ebx, 3));
+    __ cmov(sign, eax, Operand(ecx, 0));
+    __ cmov(not_sign, eax, Operand(ecx, 1));
+    __ cmov(parity_even, eax, Operand(ecx, 2));
+    __ cmov(parity_odd, eax, Operand(ecx, 3));
+    __ cmov(less, eax, Operand(edx, 0));
+    __ cmov(greater_equal, eax, Operand(edx, 1));
+    __ cmov(less_equal, eax, Operand(edx, 2));
+    __ cmov(greater, eax, Operand(edx, 3));
+  }
+
   __ ret(0);
 
   CodeDesc desc;
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index eb32b65..9911ce4 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -132,15 +132,19 @@
   CHECK(value->IsNumber());
   CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
 
+#ifndef V8_TARGET_ARCH_X64
+  // TODO(lrn): We need a NumberFromIntptr function in order to test this.
   value = Heap::NumberFromInt32(Smi::kMinValue - 1);
   CHECK(value->IsHeapNumber());
   CHECK(value->IsNumber());
   CHECK_EQ(static_cast<double>(Smi::kMinValue - 1), value->Number());
+#endif
 
-  value = Heap::NumberFromInt32(Smi::kMaxValue + 1);
+  value = Heap::NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
   CHECK(value->IsHeapNumber());
   CHECK(value->IsNumber());
-  CHECK_EQ(static_cast<double>(Smi::kMaxValue + 1), value->Number());
+  CHECK_EQ(static_cast<double>(static_cast<uint32_t>(Smi::kMaxValue) + 1),
+           value->Number());
 
   // nan oddball checks
   CHECK(Heap::nan_value()->IsNumber());
@@ -640,8 +644,9 @@
   CHECK_EQ(Smi::FromInt(1), array->length());
   CHECK_EQ(array->GetElement(0), name);
 
-  // Set array length with larger than smi value.
-  Object* length = Heap::NumberFromInt32(Smi::kMaxValue + 1);
+// Set array length with larger than smi value.
+  Object* length =
+      Heap::NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
   array->SetElementsLength(length);
 
   uint32_t int_length = 0;
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 65ab50a..3983215 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -430,6 +430,50 @@
 #endif  // __linux__
 
 
+// Test for issue http://crbug.com/23768 in Chromium.
+// Heap can contain scripts with already disposed external sources.
+// We need to verify that LogCompiledFunctions doesn't crash on them.
+namespace {
+
+class SimpleExternalString : public v8::String::ExternalStringResource {
+ public:
+  explicit SimpleExternalString(const char* source)
+      : utf_source_(strlen(source)) {
+    for (int i = 0; i < utf_source_.length(); ++i)
+      utf_source_[i] = source[i];
+  }
+  virtual ~SimpleExternalString() {}
+  virtual size_t length() const { return utf_source_.length(); }
+  virtual const uint16_t* data() const { return utf_source_.start(); }
+ private:
+  i::ScopedVector<uint16_t> utf_source_;
+};
+
+}  // namespace
+
+TEST(Issue23768) {
+  v8::HandleScope scope;
+  v8::Handle<v8::Context> env = v8::Context::New();
+  env->Enter();
+
+  SimpleExternalString source_ext_str("(function ext() {})();");
+  v8::Local<v8::String> source = v8::String::NewExternal(&source_ext_str);
+  // Script needs to have a name in order to trigger InitLineEnds execution.
+  v8::Handle<v8::String> origin = v8::String::New("issue-23768-test");
+  v8::Handle<v8::Script> evil_script = v8::Script::Compile(source, origin);
+  CHECK(!evil_script.IsEmpty());
+  CHECK(!evil_script->Run().IsEmpty());
+  i::Handle<i::ExternalTwoByteString> i_source(
+      i::ExternalTwoByteString::cast(*v8::Utils::OpenHandle(*source)));
+  // This situation can happen if source was an external string disposed
+  // by its owner.
+  i_source->set_resource(NULL);
+
+  // Must not crash.
+  i::Logger::LogCompiledFunctions();
+}
+
+
 static inline bool IsStringEqualTo(const char* r, const char* s) {
   return strncmp(r, s, strlen(r)) == 0;
 }
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
new file mode 100755
index 0000000..9c1197f
--- /dev/null
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -0,0 +1,2096 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "factory.h"
+#include "platform.h"
+#include "serialize.h"
+#include "cctest.h"
+
+using v8::internal::byte;
+using v8::internal::OS;
+using v8::internal::Assembler;
+using v8::internal::Condition;
+using v8::internal::MacroAssembler;
+using v8::internal::HandleScope;
+using v8::internal::Operand;
+using v8::internal::Immediate;
+using v8::internal::SmiIndex;
+using v8::internal::Label;
+using v8::internal::RelocInfo;
+using v8::internal::rax;
+using v8::internal::rbx;
+using v8::internal::rsi;
+using v8::internal::rdi;
+using v8::internal::rcx;
+using v8::internal::rdx;
+using v8::internal::rbp;
+using v8::internal::rsp;
+using v8::internal::r8;
+using v8::internal::r9;
+using v8::internal::r11;
+using v8::internal::r12;
+using v8::internal::r13;
+using v8::internal::r14;
+using v8::internal::r15;
+using v8::internal::FUNCTION_CAST;
+using v8::internal::CodeDesc;
+using v8::internal::less_equal;
+using v8::internal::not_equal;
+using v8::internal::not_zero;
+using v8::internal::greater;
+using v8::internal::greater_equal;
+using v8::internal::carry;
+using v8::internal::not_carry;
+using v8::internal::negative;
+using v8::internal::positive;
+using v8::internal::Smi;
+using v8::internal::kSmiTagMask;
+using v8::internal::kSmiValueSize;
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them.  These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+// The AMD64 calling convention is used, with the first five arguments
+// in RSI, RDI, RDX, RCX, R8, and R9, and floating point arguments in
+// the XMM registers.  The return value is in RAX.
+// This calling convention is used on Linux, with GCC, and on Mac OS,
+// with GCC.  A different convention is used on 64-bit windows.
+
+typedef int (*F0)();
+
+#define __ masm->
+
+TEST(Smi) {
+  // Check that C++ Smi operations work as expected.
+  intptr_t test_numbers[] = {
+      0, 1, -1, 127, 128, -128, -129, 255, 256, -256, -257,
+      Smi::kMaxValue, static_cast<intptr_t>(Smi::kMaxValue) + 1,
+      Smi::kMinValue, static_cast<intptr_t>(Smi::kMinValue) - 1
+  };
+  int test_number_count = 15;
+  for (int i = 0; i < test_number_count; i++) {
+    intptr_t number = test_numbers[i];
+    bool is_valid = Smi::IsValid(number);
+    bool is_in_range = number >= Smi::kMinValue && number <= Smi::kMaxValue;
+    CHECK_EQ(is_in_range, is_valid);
+    if (is_valid) {
+      Smi* smi_from_intptr = Smi::FromIntptr(number);
+      if (static_cast<int>(number) == number) {  // Is a 32-bit int.
+        Smi* smi_from_int = Smi::FromInt(static_cast<int32_t>(number));
+        CHECK_EQ(smi_from_int, smi_from_intptr);
+      }
+      int smi_value = smi_from_intptr->value();
+      CHECK_EQ(number, static_cast<intptr_t>(smi_value));
+    }
+  }
+}
+
+
+static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
+  __ movl(rax, Immediate(id));
+  __ Move(rcx, Smi::FromInt(0));
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+  __ cmpq(rcx, rdx);
+  __ j(not_equal, exit);
+}
+
+
+// Test that we can move a Smi value literally into a register.
+TEST(SmiMove) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                   &actual_size,
+                                                   true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+  MacroAssembler* masm = &assembler;  // Create a pointer for the __ macro.
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestMoveSmi(masm, &exit, 1, Smi::FromInt(0));
+  TestMoveSmi(masm, &exit, 2, Smi::FromInt(127));
+  TestMoveSmi(masm, &exit, 3, Smi::FromInt(128));
+  TestMoveSmi(masm, &exit, 4, Smi::FromInt(255));
+  TestMoveSmi(masm, &exit, 5, Smi::FromInt(256));
+  TestMoveSmi(masm, &exit, 6, Smi::FromInt(Smi::kMaxValue));
+  TestMoveSmi(masm, &exit, 7, Smi::FromInt(-1));
+  TestMoveSmi(masm, &exit, 8, Smi::FromInt(-128));
+  TestMoveSmi(masm, &exit, 9, Smi::FromInt(-129));
+  TestMoveSmi(masm, &exit, 10, Smi::FromInt(-256));
+  TestMoveSmi(masm, &exit, 11, Smi::FromInt(-257));
+  TestMoveSmi(masm, &exit, 12, Smi::FromInt(Smi::kMinValue));
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r8, rcx);
+  __ Move(rdx, Smi::FromInt(y));
+  __ movq(r9, rdx);
+  __ SmiCompare(rcx, rdx);
+  if (x < y) {
+    __ movl(rax, Immediate(id + 1));
+    __ j(greater_equal, exit);
+  } else if (x > y) {
+    __ movl(rax, Immediate(id + 2));
+    __ j(less_equal, exit);
+  } else {
+    ASSERT_EQ(x, y);
+    __ movl(rax, Immediate(id + 3));
+    __ j(not_equal, exit);
+  }
+  __ movl(rax, Immediate(id + 4));
+  __ cmpq(rcx, r8);
+  __ j(not_equal, exit);
+  __ incq(rax);
+  __ cmpq(rdx, r9);
+  __ j(not_equal, exit);
+
+  if (x != y) {
+    __ SmiCompare(rdx, rcx);
+    if (y < x) {
+      __ movl(rax, Immediate(id + 9));
+      __ j(greater_equal, exit);
+    } else {
+      ASSERT(y > x);
+      __ movl(rax, Immediate(id + 10));
+      __ j(less_equal, exit);
+    }
+  } else {
+    __ SmiCompare(rcx, rcx);
+    __ movl(rax, Immediate(id + 11));
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ cmpq(rcx, r8);
+    __ j(not_equal, exit);
+  }
+}
+
+
+// Test that we can compare smis for equality (and more).
+TEST(SmiCompare) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiCompare(masm, &exit, 0x10, 0, 0);
+  TestSmiCompare(masm, &exit, 0x20, 0, 1);
+  TestSmiCompare(masm, &exit, 0x30, 1, 0);
+  TestSmiCompare(masm, &exit, 0x40, 1, 1);
+  TestSmiCompare(masm, &exit, 0x50, 0, -1);
+  TestSmiCompare(masm, &exit, 0x60, -1, 0);
+  TestSmiCompare(masm, &exit, 0x70, -1, -1);
+  TestSmiCompare(masm, &exit, 0x80, 0, Smi::kMinValue);
+  TestSmiCompare(masm, &exit, 0x90, Smi::kMinValue, 0);
+  TestSmiCompare(masm, &exit, 0xA0, 0, Smi::kMaxValue);
+  TestSmiCompare(masm, &exit, 0xB0, Smi::kMaxValue, 0);
+  TestSmiCompare(masm, &exit, 0xC0, -1, Smi::kMinValue);
+  TestSmiCompare(masm, &exit, 0xD0, Smi::kMinValue, -1);
+  TestSmiCompare(masm, &exit, 0xE0, -1, Smi::kMaxValue);
+  TestSmiCompare(masm, &exit, 0xF0, Smi::kMaxValue, -1);
+  TestSmiCompare(masm, &exit, 0x100, Smi::kMinValue, Smi::kMinValue);
+  TestSmiCompare(masm, &exit, 0x110, Smi::kMinValue, Smi::kMaxValue);
+  TestSmiCompare(masm, &exit, 0x120, Smi::kMaxValue, Smi::kMinValue);
+  TestSmiCompare(masm, &exit, 0x130, Smi::kMaxValue, Smi::kMaxValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+
+TEST(Integer32ToSmi) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  __ movq(rax, Immediate(1));  // Test number.
+  __ movl(rcx, Immediate(0));
+  __ Integer32ToSmi(rcx, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+  __ SmiCompare(rcx, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(2));  // Test number.
+  __ movl(rcx, Immediate(1024));
+  __ Integer32ToSmi(rcx, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
+  __ SmiCompare(rcx, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(3));  // Test number.
+  __ movl(rcx, Immediate(-1));
+  __ Integer32ToSmi(rcx, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
+  __ SmiCompare(rcx, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(4));  // Test number.
+  __ movl(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(rcx, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
+  __ SmiCompare(rcx, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(5));  // Test number.
+  __ movl(rcx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(rcx, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
+  __ SmiCompare(rcx, rdx);
+  __ j(not_equal, &exit);
+
+  // Different target register.
+
+  __ movq(rax, Immediate(6));  // Test number.
+  __ movl(rcx, Immediate(0));
+  __ Integer32ToSmi(r8, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+  __ SmiCompare(r8, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(7));  // Test number.
+  __ movl(rcx, Immediate(1024));
+  __ Integer32ToSmi(r8, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
+  __ SmiCompare(r8, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(8));  // Test number.
+  __ movl(rcx, Immediate(-1));
+  __ Integer32ToSmi(r8, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
+  __ SmiCompare(r8, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(9));  // Test number.
+  __ movl(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(r8, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
+  __ SmiCompare(r8, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(10));  // Test number.
+  __ movl(rcx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(r8, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
+  __ SmiCompare(r8, rdx);
+  __ j(not_equal, &exit);
+
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestI64PlusConstantToSmi(MacroAssembler* masm,
+                              Label* exit,
+                              int id,
+                              int64_t x,
+                              int y) {
+  int64_t result = x + y;
+  ASSERT(Smi::IsValid(result));
+  __ movl(rax, Immediate(id));
+  __ Move(r8, Smi::FromInt(result));
+  __ movq(rcx, x, RelocInfo::NONE);
+  __ movq(r11, rcx);
+  __ Integer64PlusConstantToSmi(rdx, rcx, y);
+  __ SmiCompare(rdx, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ Integer64PlusConstantToSmi(rcx, rcx, y);
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+}
+
+
+TEST(Integer64PlusConstantToSmi) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  int64_t twice_max = static_cast<int64_t>(Smi::kMaxValue) * 2;
+
+  TestI64PlusConstantToSmi(masm, &exit, 0x10, 0, 0);
+  TestI64PlusConstantToSmi(masm, &exit, 0x20, 0, 1);
+  TestI64PlusConstantToSmi(masm, &exit, 0x30, 1, 0);
+  TestI64PlusConstantToSmi(masm, &exit, 0x40, Smi::kMaxValue - 5, 5);
+  TestI64PlusConstantToSmi(masm, &exit, 0x50, Smi::kMinValue + 5, 5);
+  TestI64PlusConstantToSmi(masm, &exit, 0x60, twice_max, -Smi::kMaxValue);
+  TestI64PlusConstantToSmi(masm, &exit, 0x70, -twice_max, Smi::kMaxValue);
+  TestI64PlusConstantToSmi(masm, &exit, 0x80, 0, Smi::kMinValue);
+  TestI64PlusConstantToSmi(masm, &exit, 0x90, 0, Smi::kMaxValue);
+  TestI64PlusConstantToSmi(masm, &exit, 0xA0, Smi::kMinValue, 0);
+  TestI64PlusConstantToSmi(masm, &exit, 0xB0, Smi::kMaxValue, 0);
+  TestI64PlusConstantToSmi(masm, &exit, 0xC0, twice_max, Smi::kMinValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+TEST(SmiCheck) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                   &actual_size,
+                                                   true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+  Condition cond;
+
+  __ movl(rax, Immediate(1));  // Test number.
+
+  // CheckSmi
+
+  __ movl(rcx, Immediate(0));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckSmi(rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckSmi(rcx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movl(rcx, Immediate(-1));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckSmi(rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckSmi(rcx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movl(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckSmi(rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckSmi(rcx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movl(rcx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckSmi(rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckSmi(rcx);
+  __ j(cond, &exit);
+
+  // CheckPositiveSmi
+
+  __ incq(rax);
+  __ movl(rcx, Immediate(0));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckPositiveSmi(rcx);  // Zero counts as positive.
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckPositiveSmi(rcx);  // "zero" non-smi.
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(-1));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckPositiveSmi(rcx);  // Negative smis are not positive.
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckPositiveSmi(rcx);  // Most negative smi is not positive.
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckPositiveSmi(rcx);  // "Negative" non-smi.
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckPositiveSmi(rcx);  // Most positive smi is positive.
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckPositiveSmi(rcx);  // "Positive" non-smi.
+  __ j(cond, &exit);
+
+  // CheckIsMinSmi
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckIsMinSmi(rcx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(0));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckIsMinSmi(rcx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckIsMinSmi(rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMinValue + 1));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckIsMinSmi(rcx);
+  __ j(cond, &exit);
+
+  // CheckBothSmi
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(rcx, rcx);
+  __ movq(rdx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(rdx, rdx);
+  cond = masm->CheckBothSmi(rcx, rdx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckBothSmi(rcx, rdx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ xor_(rdx, Immediate(kSmiTagMask));
+  cond = masm->CheckBothSmi(rcx, rdx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckBothSmi(rcx, rdx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  cond = masm->CheckBothSmi(rcx, rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  cond = masm->CheckBothSmi(rdx, rdx);
+  __ j(cond, &exit);
+
+  // CheckInteger32ValidSmiValue
+  __ incq(rax);
+  __ movq(rcx, Immediate(0));
+  cond = masm->CheckInteger32ValidSmiValue(rax);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(-1));
+  cond = masm->CheckInteger32ValidSmiValue(rax);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMaxValue));
+  cond = masm->CheckInteger32ValidSmiValue(rax);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMinValue));
+  cond = masm->CheckInteger32ValidSmiValue(rax);
+  __ j(NegateCondition(cond), &exit);
+
+  // Success
+  __ xor_(rax, rax);
+
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+
+void TestSmiNeg(MacroAssembler* masm, Label* exit, int id, int x) {
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  if (x == Smi::kMinValue || x == 0) {
+    // Negation fails.
+    __ movl(rax, Immediate(id + 8));
+    __ SmiNeg(r9, rcx, exit);
+
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+    __ SmiNeg(rcx, rcx, exit);
+
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);
+    __ j(not_equal, exit);
+  } else {
+    Label smi_ok, smi_ok2;
+    int result = -x;
+    __ movl(rax, Immediate(id));
+    __ Move(r8, Smi::FromInt(result));
+
+    __ SmiNeg(r9, rcx, &smi_ok);
+    __ jmp(exit);
+    __ bind(&smi_ok);
+    __ incq(rax);
+    __ SmiCompare(r9, r8);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+    __ SmiNeg(rcx, rcx, &smi_ok2);
+    __ jmp(exit);
+    __ bind(&smi_ok2);
+    __ incq(rax);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+  }
+}
+
+
+TEST(SmiNeg) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiNeg(masm, &exit, 0x10, 0);
+  TestSmiNeg(masm, &exit, 0x20, 1);
+  TestSmiNeg(masm, &exit, 0x30, -1);
+  TestSmiNeg(masm, &exit, 0x40, 127);
+  TestSmiNeg(masm, &exit, 0x50, 65535);
+  TestSmiNeg(masm, &exit, 0x60, Smi::kMinValue);
+  TestSmiNeg(masm, &exit, 0x70, Smi::kMaxValue);
+  TestSmiNeg(masm, &exit, 0x80, -Smi::kMaxValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+
+
+static void SmiAddTest(MacroAssembler* masm,
+                       Label* exit,
+                       int id,
+                       int first,
+                       int second) {
+  __ movl(rcx, Immediate(first));
+  __ Integer32ToSmi(rcx, rcx);
+  __ movl(rdx, Immediate(second));
+  __ Integer32ToSmi(rdx, rdx);
+  __ movl(r8, Immediate(first + second));
+  __ Integer32ToSmi(r8, r8);
+
+  __ movl(rax, Immediate(id));  // Test number.
+  __ SmiAdd(r9, rcx, rdx, exit);
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiAdd(rcx, rcx, rdx, exit);                              \
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+
+  __ movl(rcx, Immediate(first));
+  __ Integer32ToSmi(rcx, rcx);
+
+  __ incq(rax);
+  __ SmiAddConstant(r9, rcx, Smi::FromInt(second));
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ SmiAddConstant(rcx, rcx, Smi::FromInt(second));
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+
+  __ movl(rcx, Immediate(first));
+  __ Integer32ToSmi(rcx, rcx);
+
+  __ incq(rax);
+  __ SmiAddConstant(r9, rcx, Smi::FromInt(second), exit);
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), exit);
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+}
+
+TEST(SmiAdd) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  // No-overflow tests.
+  SmiAddTest(masm, &exit, 0x10, 1, 2);
+  SmiAddTest(masm, &exit, 0x20, 1, -2);
+  SmiAddTest(masm, &exit, 0x30, -1, 2);
+  SmiAddTest(masm, &exit, 0x40, -1, -2);
+  SmiAddTest(masm, &exit, 0x50, 0x1000, 0x2000);
+  SmiAddTest(masm, &exit, 0x60, Smi::kMinValue, 5);
+  SmiAddTest(masm, &exit, 0x70, Smi::kMaxValue, -5);
+  SmiAddTest(masm, &exit, 0x80, Smi::kMaxValue, Smi::kMinValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+static void SmiSubTest(MacroAssembler* masm,
+                      Label* exit,
+                      int id,
+                      int first,
+                      int second) {
+  __ Move(rcx, Smi::FromInt(first));
+  __ Move(rdx, Smi::FromInt(second));
+  __ Move(r8, Smi::FromInt(first - second));
+
+  __ movl(rax, Immediate(id));  // Test 0.
+  __ SmiSub(r9, rcx, rdx, exit);
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);  // Test 1.
+  __ SmiSub(rcx, rcx, rdx, exit);
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+
+  __ Move(rcx, Smi::FromInt(first));
+
+  __ incq(rax);  // Test 2.
+  __ SmiSubConstant(r9, rcx, Smi::FromInt(second));
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);  // Test 3.
+  __ SmiSubConstant(rcx, rcx, Smi::FromInt(second));
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+
+  __ Move(rcx, Smi::FromInt(first));
+
+  __ incq(rax);  // Test 4.
+  __ SmiSubConstant(r9, rcx, Smi::FromInt(second), exit);
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);  // Test 5.
+  __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), exit);
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+}
+
+static void SmiSubOverflowTest(MacroAssembler* masm,
+                               Label* exit,
+                               int id,
+                               int x) {
+  // Subtracts a Smi from x so that the subtraction overflows.
+  ASSERT(x != -1);  // Can't overflow by subtracting a Smi.
+  int y_max = (x < 0) ? (Smi::kMaxValue + 0) : (Smi::kMinValue + 0);
+  int y_min = (x < 0) ? (Smi::kMaxValue + x + 2) : (Smi::kMinValue + x);
+
+  __ movl(rax, Immediate(id));
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);  // Store original Smi value of x in r11.
+  __ Move(rdx, Smi::FromInt(y_min));
+  {
+    Label overflow_ok;
+    __ SmiSub(r9, rcx, rdx, &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSub(rcx, rcx, rdx, &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  __ movq(rcx, r11);
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSubConstant(r9, rcx, Smi::FromInt(y_min), &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_min), &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  __ Move(rdx, Smi::FromInt(y_max));
+
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSub(r9, rcx, rdx, &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSub(rcx, rcx, rdx, &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  __ movq(rcx, r11);
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSubConstant(r9, rcx, Smi::FromInt(y_max), &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+}
+
+
+TEST(SmiSub) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  SmiSubTest(masm, &exit, 0x10, 1, 2);
+  SmiSubTest(masm, &exit, 0x20, 1, -2);
+  SmiSubTest(masm, &exit, 0x30, -1, 2);
+  SmiSubTest(masm, &exit, 0x40, -1, -2);
+  SmiSubTest(masm, &exit, 0x50, 0x1000, 0x2000);
+  SmiSubTest(masm, &exit, 0x60, Smi::kMinValue, -5);
+  SmiSubTest(masm, &exit, 0x70, Smi::kMaxValue, 5);
+  SmiSubTest(masm, &exit, 0x80, -Smi::kMaxValue, Smi::kMinValue);
+  SmiSubTest(masm, &exit, 0x90, 0, Smi::kMaxValue);
+
+  SmiSubOverflowTest(masm, &exit, 0xA0, 1);
+  SmiSubOverflowTest(masm, &exit, 0xB0, 1024);
+  SmiSubOverflowTest(masm, &exit, 0xC0, Smi::kMaxValue);
+  SmiSubOverflowTest(masm, &exit, 0xD0, -2);
+  SmiSubOverflowTest(masm, &exit, 0xE0, -42000);
+  SmiSubOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
+  SmiSubOverflowTest(masm, &exit, 0x100, 0);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+
+void TestSmiMul(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  int64_t result = static_cast<int64_t>(x) * static_cast<int64_t>(y);
+  bool negative_zero = (result == 0) && (x < 0 || y < 0);
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  __ Move(rdx, Smi::FromInt(y));
+  if (Smi::IsValid(result) && !negative_zero) {
+    __ movl(rax, Immediate(id));
+    __ Move(r8, Smi::FromIntptr(result));
+    __ SmiMul(r9, rcx, rdx, exit);
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ SmiCompare(r9, r8);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+    __ SmiMul(rcx, rcx, rdx, exit);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+  } else {
+    __ movl(rax, Immediate(id + 8));
+    Label overflow_ok, overflow_ok2;
+    __ SmiMul(r9, rcx, rdx, &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ SmiMul(rcx, rcx, rdx, &overflow_ok2);
+    __ jmp(exit);
+    __ bind(&overflow_ok2);
+    // 31-bit version doesn't preserve rcx on failure.
+    // __ incq(rax);
+    // __ SmiCompare(r11, rcx);
+    // __ j(not_equal, exit);
+  }
+}
+
+
+TEST(SmiMul) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiMul(masm, &exit, 0x10, 0, 0);
+  TestSmiMul(masm, &exit, 0x20, -1, 0);
+  TestSmiMul(masm, &exit, 0x30, 0, -1);
+  TestSmiMul(masm, &exit, 0x40, -1, -1);
+  TestSmiMul(masm, &exit, 0x50, 0x10000, 0x10000);
+  TestSmiMul(masm, &exit, 0x60, 0x10000, 0xffff);
+  TestSmiMul(masm, &exit, 0x70, 0x10000, 0xffff);
+  TestSmiMul(masm, &exit, 0x80, Smi::kMaxValue, -1);
+  TestSmiMul(masm, &exit, 0x90, Smi::kMaxValue, -2);
+  TestSmiMul(masm, &exit, 0xa0, Smi::kMaxValue, 2);
+  TestSmiMul(masm, &exit, 0xb0, (Smi::kMaxValue / 2), 2);
+  TestSmiMul(masm, &exit, 0xc0, (Smi::kMaxValue / 2) + 1, 2);
+  TestSmiMul(masm, &exit, 0xd0, (Smi::kMinValue / 2), 2);
+  TestSmiMul(masm, &exit, 0xe0, (Smi::kMinValue / 2) - 1, 2);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiDiv(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  bool division_by_zero = (y == 0);
+  bool negative_zero = (x == 0 && y < 0);
+#ifdef V8_TARGET_ARCH_X64
+  bool overflow = (x == Smi::kMinValue && y < 0);  // Safe approx. used.
+#else
+  bool overflow = (x == Smi::kMinValue && y == -1);
+#endif
+  bool fraction = !division_by_zero && !overflow && (x % y != 0);
+  __ Move(r11, Smi::FromInt(x));
+  __ Move(r12, Smi::FromInt(y));
+  if (!fraction && !overflow && !negative_zero && !division_by_zero) {
+    // Division succeeds
+    __ movq(rcx, r11);
+    __ movq(r15, Immediate(id));
+    int result = x / y;
+    __ Move(r8, Smi::FromInt(result));
+    __ SmiDiv(r9, rcx, r12, exit);
+    // Might have destroyed rcx and r12.
+    __ incq(r15);
+    __ SmiCompare(r9, r8);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ movq(rcx, r11);
+    __ Move(r12, Smi::FromInt(y));
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ SmiDiv(rcx, rcx, r12, exit);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+  } else {
+    // Division fails.
+    __ movq(r15, Immediate(id + 8));
+
+    Label fail_ok, fail_ok2;
+    __ movq(rcx, r11);
+    __ SmiDiv(r9, rcx, r12, &fail_ok);
+    __ jmp(exit);
+    __ bind(&fail_ok);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ SmiDiv(rcx, rcx, r12, &fail_ok2);
+    __ jmp(exit);
+    __ bind(&fail_ok2);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+}
+
+
+TEST(SmiDiv) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiDiv(masm, &exit, 0x10, 1, 1);
+  TestSmiDiv(masm, &exit, 0x20, 1, 0);
+  TestSmiDiv(masm, &exit, 0x30, -1, 0);
+  TestSmiDiv(masm, &exit, 0x40, 0, 1);
+  TestSmiDiv(masm, &exit, 0x50, 0, -1);
+  TestSmiDiv(masm, &exit, 0x60, 4, 2);
+  TestSmiDiv(masm, &exit, 0x70, -4, 2);
+  TestSmiDiv(masm, &exit, 0x80, 4, -2);
+  TestSmiDiv(masm, &exit, 0x90, -4, -2);
+  TestSmiDiv(masm, &exit, 0xa0, 3, 2);
+  TestSmiDiv(masm, &exit, 0xb0, 3, 4);
+  TestSmiDiv(masm, &exit, 0xc0, 1, Smi::kMaxValue);
+  TestSmiDiv(masm, &exit, 0xd0, -1, Smi::kMaxValue);
+  TestSmiDiv(masm, &exit, 0xe0, Smi::kMaxValue, 1);
+  TestSmiDiv(masm, &exit, 0xf0, Smi::kMaxValue, Smi::kMaxValue);
+  TestSmiDiv(masm, &exit, 0x100, Smi::kMaxValue, -Smi::kMaxValue);
+  TestSmiDiv(masm, &exit, 0x110, Smi::kMaxValue, -1);
+  TestSmiDiv(masm, &exit, 0x120, Smi::kMinValue, 1);
+  TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
+  TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1);
+
+  __ xor_(r15, r15);  // Success.
+  __ bind(&exit);
+  __ movq(rax, r15);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiMod(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  bool division_by_zero = (y == 0);
+  bool division_overflow = (x == Smi::kMinValue) && (y == -1);
+  bool fraction = !division_by_zero && !division_overflow && ((x % y) != 0);
+  bool negative_zero = (!fraction && x < 0);
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  __ Move(r12, Smi::FromInt(y));
+  if (!division_overflow && !negative_zero && !division_by_zero) {
+    // Modulo succeeds
+    __ movq(r15, Immediate(id));
+    int result = x % y;
+    __ Move(r8, Smi::FromInt(result));
+    __ SmiMod(r9, rcx, r12, exit);
+
+    __ incq(r15);
+    __ SmiCompare(r9, r8);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ SmiMod(rcx, rcx, r12, exit);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+  } else {
+    // Modulo fails.
+    __ movq(r15, Immediate(id + 8));
+
+    Label fail_ok, fail_ok2;
+    __ SmiMod(r9, rcx, r12, &fail_ok);
+    __ jmp(exit);
+    __ bind(&fail_ok);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ SmiMod(rcx, rcx, r12, &fail_ok2);
+    __ jmp(exit);
+    __ bind(&fail_ok2);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+}
+
+
+TEST(SmiMod) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiMod(masm, &exit, 0x10, 1, 1);
+  TestSmiMod(masm, &exit, 0x20, 1, 0);
+  TestSmiMod(masm, &exit, 0x30, -1, 0);
+  TestSmiMod(masm, &exit, 0x40, 0, 1);
+  TestSmiMod(masm, &exit, 0x50, 0, -1);
+  TestSmiMod(masm, &exit, 0x60, 4, 2);
+  TestSmiMod(masm, &exit, 0x70, -4, 2);
+  TestSmiMod(masm, &exit, 0x80, 4, -2);
+  TestSmiMod(masm, &exit, 0x90, -4, -2);
+  TestSmiMod(masm, &exit, 0xa0, 3, 2);
+  TestSmiMod(masm, &exit, 0xb0, 3, 4);
+  TestSmiMod(masm, &exit, 0xc0, 1, Smi::kMaxValue);
+  TestSmiMod(masm, &exit, 0xd0, -1, Smi::kMaxValue);
+  TestSmiMod(masm, &exit, 0xe0, Smi::kMaxValue, 1);
+  TestSmiMod(masm, &exit, 0xf0, Smi::kMaxValue, Smi::kMaxValue);
+  TestSmiMod(masm, &exit, 0x100, Smi::kMaxValue, -Smi::kMaxValue);
+  TestSmiMod(masm, &exit, 0x110, Smi::kMaxValue, -1);
+  TestSmiMod(masm, &exit, 0x120, Smi::kMinValue, 1);
+  TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
+  TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1);
+
+  __ xor_(r15, r15);  // Success.
+  __ bind(&exit);
+  __ movq(rax, r15);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
+  __ movl(rax, Immediate(id));
+
+  for (int i = 0; i < 8; i++) {
+    __ Move(rcx, Smi::FromInt(x));
+    SmiIndex index = masm->SmiToIndex(rdx, rcx, i);
+    ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
+    __ shl(index.reg, Immediate(index.scale));
+    __ Set(r8, static_cast<intptr_t>(x) << i);
+    __ SmiCompare(index.reg, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ Move(rcx, Smi::FromInt(x));
+    index = masm->SmiToIndex(rcx, rcx, i);
+    ASSERT(index.reg.is(rcx));
+    __ shl(rcx, Immediate(index.scale));
+    __ Set(r8, static_cast<intptr_t>(x) << i);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+
+    __ Move(rcx, Smi::FromInt(x));
+    index = masm->SmiToNegativeIndex(rdx, rcx, i);
+    ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
+    __ shl(index.reg, Immediate(index.scale));
+    __ Set(r8, static_cast<intptr_t>(-x) << i);
+    __ SmiCompare(index.reg, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ Move(rcx, Smi::FromInt(x));
+    index = masm->SmiToNegativeIndex(rcx, rcx, i);
+    ASSERT(index.reg.is(rcx));
+    __ shl(rcx, Immediate(index.scale));
+    __ Set(r8, static_cast<intptr_t>(-x) << i);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+  }
+}
+
+TEST(SmiIndex) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiIndex(masm, &exit, 0x10, 0);
+  TestSmiIndex(masm, &exit, 0x20, 1);
+  TestSmiIndex(masm, &exit, 0x30, 100);
+  TestSmiIndex(masm, &exit, 0x40, 1000);
+  TestSmiIndex(masm, &exit, 0x50, Smi::kMaxValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  __ movl(rax, Immediate(id));
+  __ Move(rcx, Smi::FromInt(x));
+  __ Move(rdx, Smi::FromInt(y));
+  __ xor_(rdx, Immediate(kSmiTagMask));
+  __ SelectNonSmi(r9, rcx, rdx, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r9, rdx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ Move(rcx, Smi::FromInt(x));
+  __ Move(rdx, Smi::FromInt(y));
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  __ SelectNonSmi(r9, rcx, rdx, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r9, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  Label fail_ok;
+  __ Move(rcx, Smi::FromInt(x));
+  __ Move(rdx, Smi::FromInt(y));
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  __ xor_(rdx, Immediate(kSmiTagMask));
+  __ SelectNonSmi(r9, rcx, rdx, &fail_ok);
+  __ jmp(exit);
+  __ bind(&fail_ok);
+}
+
+
+TEST(SmiSelectNonSmi) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);  // Avoid inline checks.
+  Label exit;
+
+  TestSelectNonSmi(masm, &exit, 0x10, 0, 0);
+  TestSelectNonSmi(masm, &exit, 0x20, 0, 1);
+  TestSelectNonSmi(masm, &exit, 0x30, 1, 0);
+  TestSelectNonSmi(masm, &exit, 0x40, 0, -1);
+  TestSelectNonSmi(masm, &exit, 0x50, -1, 0);
+  TestSelectNonSmi(masm, &exit, 0x60, -1, -1);
+  TestSelectNonSmi(masm, &exit, 0x70, 1, 1);
+  TestSelectNonSmi(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+  TestSelectNonSmi(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiAnd(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  int result = x & y;
+
+  __ movl(rax, Immediate(id));
+
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  __ Move(rdx, Smi::FromInt(y));
+  __ Move(r8, Smi::FromInt(result));
+  __ SmiAnd(r9, rcx, rdx);
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiAnd(rcx, rcx, rdx);
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+
+  __ movq(rcx, r11);
+  __ incq(rax);
+  __ SmiAndConstant(r9, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiAndConstant(rcx, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+}
+
+
+TEST(SmiAnd) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiAnd(masm, &exit, 0x10, 0, 0);
+  TestSmiAnd(masm, &exit, 0x20, 0, 1);
+  TestSmiAnd(masm, &exit, 0x30, 1, 0);
+  TestSmiAnd(masm, &exit, 0x40, 0, -1);
+  TestSmiAnd(masm, &exit, 0x50, -1, 0);
+  TestSmiAnd(masm, &exit, 0x60, -1, -1);
+  TestSmiAnd(masm, &exit, 0x70, 1, 1);
+  TestSmiAnd(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+  TestSmiAnd(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+  TestSmiAnd(masm, &exit, 0xA0, Smi::kMinValue, -1);
+  TestSmiAnd(masm, &exit, 0xB0, Smi::kMinValue, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiOr(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  int result = x | y;
+
+  __ movl(rax, Immediate(id));
+
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  __ Move(rdx, Smi::FromInt(y));
+  __ Move(r8, Smi::FromInt(result));
+  __ SmiOr(r9, rcx, rdx);
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiOr(rcx, rcx, rdx);
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+
+  __ movq(rcx, r11);
+  __ incq(rax);
+  __ SmiOrConstant(r9, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiOrConstant(rcx, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+}
+
+
+TEST(SmiOr) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiOr(masm, &exit, 0x10, 0, 0);
+  TestSmiOr(masm, &exit, 0x20, 0, 1);
+  TestSmiOr(masm, &exit, 0x30, 1, 0);
+  TestSmiOr(masm, &exit, 0x40, 0, -1);
+  TestSmiOr(masm, &exit, 0x50, -1, 0);
+  TestSmiOr(masm, &exit, 0x60, -1, -1);
+  TestSmiOr(masm, &exit, 0x70, 1, 1);
+  TestSmiOr(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+  TestSmiOr(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+  TestSmiOr(masm, &exit, 0xA0, Smi::kMinValue, -1);
+  TestSmiOr(masm, &exit, 0xB0, 0x05555555, 0x01234567);
+  TestSmiOr(masm, &exit, 0xC0, 0x05555555, 0x0fedcba9);
+  TestSmiOr(masm, &exit, 0xD0, Smi::kMinValue, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiXor(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  int result = x ^ y;
+
+  __ movl(rax, Immediate(id));
+
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  __ Move(rdx, Smi::FromInt(y));
+  __ Move(r8, Smi::FromInt(result));
+  __ SmiXor(r9, rcx, rdx);
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiXor(rcx, rcx, rdx);
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+
+  __ movq(rcx, r11);
+  __ incq(rax);
+  __ SmiXorConstant(r9, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiXorConstant(rcx, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+}
+
+
+TEST(SmiXor) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiXor(masm, &exit, 0x10, 0, 0);
+  TestSmiXor(masm, &exit, 0x20, 0, 1);
+  TestSmiXor(masm, &exit, 0x30, 1, 0);
+  TestSmiXor(masm, &exit, 0x40, 0, -1);
+  TestSmiXor(masm, &exit, 0x50, -1, 0);
+  TestSmiXor(masm, &exit, 0x60, -1, -1);
+  TestSmiXor(masm, &exit, 0x70, 1, 1);
+  TestSmiXor(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+  TestSmiXor(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+  TestSmiXor(masm, &exit, 0xA0, Smi::kMinValue, -1);
+  TestSmiXor(masm, &exit, 0xB0, 0x5555555, 0x01234567);
+  TestSmiXor(masm, &exit, 0xC0, 0x5555555, 0x0fedcba9);
+  TestSmiXor(masm, &exit, 0xD0, Smi::kMinValue, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiNot(MacroAssembler* masm, Label* exit, int id, int x) {
+  int result = ~x;
+  __ movl(rax, Immediate(id));
+
+  __ Move(r8, Smi::FromInt(result));
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+
+  __ SmiNot(r9, rcx);
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiNot(rcx, rcx);
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+}
+
+
+TEST(SmiNot) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiNot(masm, &exit, 0x10, 0);
+  TestSmiNot(masm, &exit, 0x20, 1);
+  TestSmiNot(masm, &exit, 0x30, -1);
+  TestSmiNot(masm, &exit, 0x40, 127);
+  TestSmiNot(masm, &exit, 0x50, 65535);
+  TestSmiNot(masm, &exit, 0x60, Smi::kMinValue);
+  TestSmiNot(masm, &exit, 0x70, Smi::kMaxValue);
+  TestSmiNot(masm, &exit, 0x80, 0x05555555);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
+  const int shifts[] = { 0, 1, 7, 24, kSmiValueSize - 1};
+  const int kNumShifts = 5;
+  __ movl(rax, Immediate(id));
+  for (int i = 0; i < kNumShifts; i++) {
+    // rax == id + i * 10.
+    int shift = shifts[i];
+    int result = x << shift;
+    if (Smi::IsValid(result)) {
+      __ Move(r8, Smi::FromInt(result));
+      __ Move(rcx, Smi::FromInt(x));
+      __ SmiShiftLeftConstant(r9, rcx, shift, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rcx, Smi::FromInt(x));
+      __ SmiShiftLeftConstant(rcx, rcx, shift, exit);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rdx, Smi::FromInt(x));
+      __ Move(rcx, Smi::FromInt(shift));
+      __ SmiShiftLeft(r9, rdx, rcx, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rdx, Smi::FromInt(x));
+      __ Move(r11, Smi::FromInt(shift));
+      __ SmiShiftLeft(r9, rdx, r11, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rdx, Smi::FromInt(x));
+      __ Move(r11, Smi::FromInt(shift));
+      __ SmiShiftLeft(rdx, rdx, r11, exit);
+
+      __ incq(rax);
+      __ SmiCompare(rdx, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+    } else {
+      // Cannot happen with long smis.
+      Label fail_ok;
+      __ Move(rcx, Smi::FromInt(x));
+      __ movq(r11, rcx);
+      __ SmiShiftLeftConstant(r9, rcx, shift, &fail_ok);
+      __ jmp(exit);
+      __ bind(&fail_ok);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r11);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      Label fail_ok2;
+      __ SmiShiftLeftConstant(rcx, rcx, shift, &fail_ok2);
+      __ jmp(exit);
+      __ bind(&fail_ok2);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r11);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(r8, Smi::FromInt(shift));
+      Label fail_ok3;
+      __ SmiShiftLeft(r9, rcx, r8, &fail_ok3);
+      __ jmp(exit);
+      __ bind(&fail_ok3);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r11);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(r8, Smi::FromInt(shift));
+      __ movq(rdx, r11);
+      Label fail_ok4;
+      __ SmiShiftLeft(rdx, rdx, r8, &fail_ok4);
+      __ jmp(exit);
+      __ bind(&fail_ok4);
+
+      __ incq(rax);
+      __ SmiCompare(rdx, r11);
+      __ j(not_equal, exit);
+
+      __ addq(rax, Immediate(3));
+    }
+  }
+}
+
+
+TEST(SmiShiftLeft) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiShiftLeft(masm, &exit, 0x10, 0);
+  TestSmiShiftLeft(masm, &exit, 0x50, 1);
+  TestSmiShiftLeft(masm, &exit, 0x90, 127);
+  TestSmiShiftLeft(masm, &exit, 0xD0, 65535);
+  TestSmiShiftLeft(masm, &exit, 0x110, Smi::kMaxValue);
+  TestSmiShiftLeft(masm, &exit, 0x150, Smi::kMinValue);
+  TestSmiShiftLeft(masm, &exit, 0x190, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiShiftLogicalRight(MacroAssembler* masm,
+                              Label* exit,
+                              int id,
+                              int x) {
+  const int shifts[] = { 0, 1, 7, 24, kSmiValueSize - 1};
+  const int kNumShifts = 5;
+  __ movl(rax, Immediate(id));
+  for (int i = 0; i < kNumShifts; i++) {
+    int shift = shifts[i];
+    intptr_t result = static_cast<unsigned int>(x) >> shift;
+    if (Smi::IsValid(result)) {
+      __ Move(r8, Smi::FromInt(result));
+      __ Move(rcx, Smi::FromInt(x));
+      __ SmiShiftLogicalRightConstant(r9, rcx, shift, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rdx, Smi::FromInt(x));
+      __ Move(rcx, Smi::FromInt(shift));
+      __ SmiShiftLogicalRight(r9, rdx, rcx, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rdx, Smi::FromInt(x));
+      __ Move(r11, Smi::FromInt(shift));
+      __ SmiShiftLogicalRight(r9, rdx, r11, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+    } else {
+      // Cannot happen with long smis.
+      Label fail_ok;
+      __ Move(rcx, Smi::FromInt(x));
+      __ movq(r11, rcx);
+      __ SmiShiftLogicalRightConstant(r9, rcx, shift, &fail_ok);
+      __ jmp(exit);
+      __ bind(&fail_ok);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r11);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(r8, Smi::FromInt(shift));
+      Label fail_ok3;
+      __ SmiShiftLogicalRight(r9, rcx, r8, &fail_ok3);
+      __ jmp(exit);
+      __ bind(&fail_ok3);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r11);
+      __ j(not_equal, exit);
+
+      __ addq(rax, Immediate(3));
+    }
+  }
+}
+
+
+TEST(SmiShiftLogicalRight) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiShiftLogicalRight(masm, &exit, 0x10, 0);
+  TestSmiShiftLogicalRight(masm, &exit, 0x30, 1);
+  TestSmiShiftLogicalRight(masm, &exit, 0x50, 127);
+  TestSmiShiftLogicalRight(masm, &exit, 0x70, 65535);
+  TestSmiShiftLogicalRight(masm, &exit, 0x90, Smi::kMaxValue);
+  TestSmiShiftLogicalRight(masm, &exit, 0xB0, Smi::kMinValue);
+  TestSmiShiftLogicalRight(masm, &exit, 0xD0, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiShiftArithmeticRight(MacroAssembler* masm,
+                                 Label* exit,
+                                 int id,
+                                 int x) {
+  const int shifts[] = { 0, 1, 7, 24, kSmiValueSize - 1};
+  const int kNumShifts = 5;
+  __ movl(rax, Immediate(id));
+  for (int i = 0; i < kNumShifts; i++) {
+    int shift = shifts[i];
+    // Guaranteed arithmetic shift.
+    int result = (x < 0) ? ~((~x) >> shift) : (x >> shift);
+    __ Move(r8, Smi::FromInt(result));
+    __ Move(rcx, Smi::FromInt(x));
+    __ SmiShiftArithmeticRightConstant(rcx, rcx, shift);
+
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+    __ Move(rdx, Smi::FromInt(x));
+    __ Move(r11, Smi::FromInt(shift));
+    __ SmiShiftArithmeticRight(rdx, rdx, r11);
+
+    __ SmiCompare(rdx, r8);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+  }
+}
+
+
+TEST(SmiShiftArithmeticRight) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiShiftArithmeticRight(masm, &exit, 0x10, 0);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x20, 1);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x30, 127);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x40, 65535);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x50, Smi::kMaxValue);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x60, Smi::kMinValue);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x70, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestPositiveSmiPowerUp(MacroAssembler* masm, Label* exit, int id, int x) {
+  ASSERT(x >= 0);
+  int powers[] = { 0, 1, 2, 3, 8, 16, 24, 31 };
+  int power_count = 8;
+  __ movl(rax, Immediate(id));
+  for (int i = 0; i  < power_count; i++) {
+    int power = powers[i];
+    intptr_t result = static_cast<intptr_t>(x) << power;
+    __ Set(r8, result);
+    __ Move(rcx, Smi::FromInt(x));
+    __ movq(r11, rcx);
+    __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rcx, power);
+    __ SmiCompare(rdx, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);  // rcx unchanged.
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ PositiveSmiTimesPowerOfTwoToInteger64(rcx, rcx, power);
+    __ SmiCompare(rdx, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+  }
+}
+
+
+TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestPositiveSmiPowerUp(masm, &exit, 0x20, 0);
+  TestPositiveSmiPowerUp(masm, &exit, 0x40, 1);
+  TestPositiveSmiPowerUp(masm, &exit, 0x60, 127);
+  TestPositiveSmiPowerUp(masm, &exit, 0x80, 128);
+  TestPositiveSmiPowerUp(masm, &exit, 0xA0, 255);
+  TestPositiveSmiPowerUp(masm, &exit, 0xC0, 256);
+  TestPositiveSmiPowerUp(masm, &exit, 0x100, 65535);
+  TestPositiveSmiPowerUp(masm, &exit, 0x120, 65536);
+  TestPositiveSmiPowerUp(masm, &exit, 0x140, Smi::kMaxValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+#undef __
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index 743375d..e56f0f4 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -71,10 +71,6 @@
 
 
 TEST(Promotion) {
-  // Test the situation that some objects in new space are promoted to the
-  // old space
-  if (Snapshot::IsEnabled()) return;
-
   // Ensure that we get a compacting collection so that objects are promoted
   // from new space.
   FLAG_gc_global = true;
@@ -106,7 +102,6 @@
 
 
 TEST(NoPromotion) {
-  if (Snapshot::IsEnabled()) return;
   Heap::ConfigureHeap(2*256*KB, 4*MB);
 
   // Test the situation that some objects in new space are promoted to
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index 6939a80..db37eb3 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -286,3 +286,22 @@
   v8::Local<v8::Value> value = script->Run();
   CHECK(value->IsUndefined());
 }
+
+
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+
+
+TEST(TestThatAlwaysSucceeds) {
+}
+
+
+TEST(TestThatAlwaysFails) {
+  bool ArtificialFailure = false;
+  CHECK(ArtificialFailure);
+}
+
+
+DEPENDENT_TEST(DependentTestThatAlwaysFails, TestThatAlwaysSucceeds) {
+  bool ArtificialFailure2 = false;
+  CHECK(ArtificialFailure2);
+}
diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc
index d946a7f..1a26883 100644
--- a/test/cctest/test-spaces.cc
+++ b/test/cctest/test-spaces.cc
@@ -99,9 +99,9 @@
 
 TEST(MemoryAllocator) {
   CHECK(Heap::ConfigureHeapDefault());
-  CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
+  CHECK(MemoryAllocator::Setup(Heap::MaxReserved()));
 
-  OldSpace faked_space(Heap::MaxCapacity(), OLD_POINTER_SPACE, NOT_EXECUTABLE);
+  OldSpace faked_space(Heap::MaxReserved(), OLD_POINTER_SPACE, NOT_EXECUTABLE);
   int total_pages = 0;
   int requested = 2;
   int allocated;
@@ -155,16 +155,16 @@
 
 TEST(NewSpace) {
   CHECK(Heap::ConfigureHeapDefault());
-  CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
+  CHECK(MemoryAllocator::Setup(Heap::MaxReserved()));
 
   NewSpace new_space;
 
   void* chunk =
-      MemoryAllocator::ReserveInitialChunk(2 * Heap::YoungGenerationSize());
+      MemoryAllocator::ReserveInitialChunk(4 * Heap::ReservedSemiSpaceSize());
   CHECK(chunk != NULL);
   Address start = RoundUp(static_cast<Address>(chunk),
-                          Heap::YoungGenerationSize());
-  CHECK(new_space.Setup(start, Heap::YoungGenerationSize()));
+                          2 * Heap::ReservedSemiSpaceSize());
+  CHECK(new_space.Setup(start, 2 * Heap::ReservedSemiSpaceSize()));
   CHECK(new_space.HasBeenSetup());
 
   while (new_space.Available() >= Page::kMaxHeapObjectSize) {
@@ -180,18 +180,18 @@
 
 TEST(OldSpace) {
   CHECK(Heap::ConfigureHeapDefault());
-  CHECK(MemoryAllocator::Setup(Heap::MaxCapacity()));
+  CHECK(MemoryAllocator::Setup(Heap::MaxReserved()));
 
-  OldSpace* s = new OldSpace(Heap::OldGenerationSize(),
+  OldSpace* s = new OldSpace(Heap::MaxOldGenerationSize(),
                              OLD_POINTER_SPACE,
                              NOT_EXECUTABLE);
   CHECK(s != NULL);
 
   void* chunk =
-      MemoryAllocator::ReserveInitialChunk(2 * Heap::YoungGenerationSize());
+      MemoryAllocator::ReserveInitialChunk(4 * Heap::ReservedSemiSpaceSize());
   CHECK(chunk != NULL);
   Address start = static_cast<Address>(chunk);
-  size_t size = RoundUp(start, Heap::YoungGenerationSize()) - start;
+  size_t size = RoundUp(start, 2 * Heap::ReservedSemiSpaceSize()) - start;
 
   CHECK(s->Setup(start, size));
 
diff --git a/test/mjsunit/compiler/globals.js b/test/mjsunit/compiler/globals.js
new file mode 100644
index 0000000..066f927
--- /dev/null
+++ b/test/mjsunit/compiler/globals.js
@@ -0,0 +1,55 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test references and assignments to global variables.
+var g = 0;
+
+// Test compilation of a global variable store.
+assertEquals(1, eval('g = 1'));
+// Test that the store worked.
+assertEquals(1, g);
+
+// Test that patching the IC in the compiled code works.
+assertEquals(1, eval('g = 1'));
+assertEquals(1, g);
+assertEquals(1, eval('g = 1'));
+assertEquals(1, g);
+
+// Test a second store.
+assertEquals("2", eval('g = "2"'));
+assertEquals("2", g);
+
+// Test a load.
+assertEquals("2", eval('g'));
+
+// Test that patching the IC in the compiled code works.
+assertEquals("2", eval('g'));
+assertEquals("2", eval('g'));
+
+// Test a second load.
+g = 3;
+assertEquals(3, eval('g'));
diff --git a/test/mjsunit/compiler/literals-assignment.js b/test/mjsunit/compiler/literals-assignment.js
new file mode 100644
index 0000000..932bfa7
--- /dev/null
+++ b/test/mjsunit/compiler/literals-assignment.js
@@ -0,0 +1,71 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests for simple assignments and literals inside an anonymous function
+
+// Test simple return statement.
+assertEquals(8, eval("(function() { return 8; })()"));
+
+// Test simple assignment
+var code = "(function() {\
+               var a;\
+               a = 8;\
+               return a;\
+             })()";
+assertEquals(8, eval(code));
+
+code = "(function() {\
+           var x;\
+           x = 'abc';\
+           return x;\
+         })()";
+assertEquals("abc", eval(code));
+
+// Test assignment as an RHS expression
+
+code = "(function() {\
+           var x, y;\
+           x = y = 8;\
+           return x;\
+         })()";
+assertEquals(8, eval(code));
+
+
+code = "(function() {\
+           var x, y;\
+           x = y = 8;\
+           return y;\
+         })()";
+assertEquals(8, eval(code));
+
+
+code = "(function() {\
+           var x,y,z;\
+           return x = y = z = 8;\
+         })()";
+assertEquals(8, eval(code));
+
diff --git a/test/mjsunit/compiler/literals.js b/test/mjsunit/compiler/literals.js
new file mode 100644
index 0000000..6775401
--- /dev/null
+++ b/test/mjsunit/compiler/literals.js
@@ -0,0 +1,52 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test simple literals.
+assertEquals(8, eval("8"));
+
+assertEquals(null, eval("null"));
+
+assertEquals("abc", eval("'abc'"));
+
+assertEquals(8, eval("6;'abc';8"));
+
+// Test some materialized array literals.
+assertEquals([1,2,3,4], eval('[1,2,3,4]'));
+assertEquals([[1,2],3,4], eval('[[1,2],3,4]'));
+assertEquals([1,[2,3,4]], eval('[1,[2,3,4]]'));
+
+assertEquals([1,2,3,4], eval('var a=1, b=2; [a,b,3,4]'))
+assertEquals([1,2,3,4], eval('var a=1, b=2, c = [a,b,3,4]; c'));
+
+function double(x) { return x + x; }
+var s = 'var a = 1, b = 2; [double(a), double(b), double(3), double(4)]';
+assertEquals([2,4,6,8], eval(s));
+
+// Test array literals in effect context.
+assertEquals(17, eval('[1,2,3,4]; 17'));
+assertEquals(19, eval('var a=1, b=2; [a,b,3,4]; 19'));
+assertEquals(23, eval('var a=1, b=2; c=23; [a,b,3,4]; c'));
diff --git a/test/mjsunit/debug-backtrace.js b/test/mjsunit/debug-backtrace.js
index 0c200ae..d15b2d2 100644
--- a/test/mjsunit/debug-backtrace.js
+++ b/test/mjsunit/debug-backtrace.js
@@ -69,6 +69,11 @@
 }
 
 
+ParsedResponse.prototype.running = function() {
+  return this.response_.running;
+}
+
+
 ParsedResponse.prototype.lookup = function(handle) {
   return this.refs_[handle];
 }
@@ -88,8 +93,9 @@
       var frame;
       var source;
 
-      // Get the debug command processor.
-      var dcp = exec_state.debugCommandProcessor();
+      var dcp;
+      // New copy of debug command processor paused state.
+      dcp = exec_state.debugCommandProcessor(false);
 
       // Get the backtrace.
       var json;
@@ -114,6 +120,7 @@
       assertEquals("g", response.lookup(frames[2].func.ref).name);
       assertEquals(3, frames[3].index);
       assertEquals("", response.lookup(frames[3].func.ref).name);
+      assertFalse(response.running(), "expected not running");
 
       // Get backtrace with two frames.
       json = '{"seq":0,"type":"request","command":"backtrace","arguments":{"fromFrame":1,"toFrame":3}}'
@@ -234,6 +241,17 @@
       source = response.body();
       assertEquals(Debug.findScript(f).source, source.source);
 
+      // New copy of debug command processor in running state.
+      dcp = exec_state.debugCommandProcessor(true);
+      // Get the backtrace.
+      json = '{"seq":0,"type":"request","command":"backtrace"}'
+      resp = dcp.processDebugJSONRequest(json);
+      response = new ParsedResponse(resp);
+      // It might be argueable, but we expect response to have body when
+      // not suspended
+      assertTrue(!!response.body(), "response should be null");
+      assertTrue(response.running(), "expected running");
+
       listenerCalled = true;
     }
   } catch (e) {
diff --git a/test/mjsunit/debug-changebreakpoint.js b/test/mjsunit/debug-changebreakpoint.js
index 477c908..936523a 100644
--- a/test/mjsunit/debug-changebreakpoint.js
+++ b/test/mjsunit/debug-changebreakpoint.js
@@ -59,7 +59,7 @@
   try {
   if (event == Debug.DebugEvent.Break) {
     // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
 
     // Test some illegal clearbreakpoint requests.
     var request = '{' + base_request + '}'
diff --git a/test/mjsunit/debug-clearbreakpoint.js b/test/mjsunit/debug-clearbreakpoint.js
index 28920c5..59479f2 100644
--- a/test/mjsunit/debug-clearbreakpoint.js
+++ b/test/mjsunit/debug-clearbreakpoint.js
@@ -59,7 +59,7 @@
   try {
   if (event == Debug.DebugEvent.Break) {
     // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
 
     // Test some illegal clearbreakpoint requests.
     var request = '{' + base_request + '}'
diff --git a/test/mjsunit/debug-clearbreakpointgroup.js b/test/mjsunit/debug-clearbreakpointgroup.js
index eca9378..aad6c3a 100644
--- a/test/mjsunit/debug-clearbreakpointgroup.js
+++ b/test/mjsunit/debug-clearbreakpointgroup.js
@@ -60,7 +60,7 @@
   try {

     if (event == Debug.DebugEvent.Break) {

       // Get the debug command processor.

-      var dcp = exec_state.debugCommandProcessor();

+      var dcp = exec_state.debugCommandProcessor("unspecified_running_state");

 

       // Clear breakpoint group 1.

       testArguments(dcp, '{"groupId":1}', true);

diff --git a/test/mjsunit/debug-continue.js b/test/mjsunit/debug-continue.js
index 0c11abc..a501aa9 100644
--- a/test/mjsunit/debug-continue.js
+++ b/test/mjsunit/debug-continue.js
@@ -44,7 +44,10 @@
   }
 }
 
-function testArguments(dcp, arguments, success) {
+function testArguments(exec_state, arguments, success) {
+  // Get the debug command processor in paused state.
+  var dcp = exec_state.debugCommandProcessor(false);
+
   // Generate request with the supplied arguments
   var request;
   if (arguments) {
@@ -65,25 +68,23 @@
 function listener(event, exec_state, event_data, data) {
   try {
   if (event == Debug.DebugEvent.Break) {
-    // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
 
     // Test simple continue request.
-    testArguments(dcp, void 0, true);
+    testArguments(exec_state, void 0, true);
 
     // Test some illegal continue requests.
-    testArguments(dcp, '{"stepaction":"maybe"}', false);
-    testArguments(dcp, '{"stepcount":-1}', false);
+    testArguments(exec_state, '{"stepaction":"maybe"}', false);
+    testArguments(exec_state, '{"stepcount":-1}', false);
 
     // Test some legal continue requests.
-    testArguments(dcp, '{"stepaction":"in"}', true);
-    testArguments(dcp, '{"stepaction":"min"}', true);
-    testArguments(dcp, '{"stepaction":"next"}', true);
-    testArguments(dcp, '{"stepaction":"out"}', true);
-    testArguments(dcp, '{"stepcount":1}', true);
-    testArguments(dcp, '{"stepcount":10}', true);
-    testArguments(dcp, '{"stepcount":"10"}', true);
-    testArguments(dcp, '{"stepaction":"next","stepcount":10}', true);
+    testArguments(exec_state, '{"stepaction":"in"}', true);
+    testArguments(exec_state, '{"stepaction":"min"}', true);
+    testArguments(exec_state, '{"stepaction":"next"}', true);
+    testArguments(exec_state, '{"stepaction":"out"}', true);
+    testArguments(exec_state, '{"stepcount":1}', true);
+    testArguments(exec_state, '{"stepcount":10}', true);
+    testArguments(exec_state, '{"stepcount":"10"}', true);
+    testArguments(exec_state, '{"stepaction":"next","stepcount":10}', true);
 
     // Indicate that all was processed.
     listenerComplete = true;
@@ -108,6 +109,6 @@
 Debug.setBreakPoint(g, 0, 0);
 g();
 
+assertFalse(exception, "exception in listener")
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerComplete, "listener did not run to completion");
-assertFalse(exception, "exception in listener")
diff --git a/test/mjsunit/debug-evaluate-bool-constructor.js b/test/mjsunit/debug-evaluate-bool-constructor.js
new file mode 100644
index 0000000..809a5cc
--- /dev/null
+++ b/test/mjsunit/debug-evaluate-bool-constructor.js
@@ -0,0 +1,80 @@
+// Copyright 2009 the V8 project authors. All rights reserved.

+// Redistribution and use in source and binary forms, with or without

+// modification, are permitted provided that the following conditions are

+// met:

+//

+//     * Redistributions of source code must retain the above copyright

+//       notice, this list of conditions and the following disclaimer.

+//     * Redistributions in binary form must reproduce the above

+//       copyright notice, this list of conditions and the following

+//       disclaimer in the documentation and/or other materials provided

+//       with the distribution.

+//     * Neither the name of Google Inc. nor the names of its

+//       contributors may be used to endorse or promote products derived

+//       from this software without specific prior written permission.

+//

+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS

+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT

+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR

+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT

+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,

+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT

+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,

+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY

+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT

+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE

+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+

+// Flags: --expose-debug-as debug

+// Get the Debug object exposed from the debug context global object.

+Debug = debug.Debug

+

+var listenerComplete = false;

+var exception = false;

+

+function listener(event, exec_state, event_data, data) {

+  try {

+    if (event == Debug.DebugEvent.Break) {

+      // Get the debug command processor.

+      var dcp = exec_state.debugCommandProcessor();

+

+      var request = {

+         seq: 0,

+         type: 'request',

+         command: 'evaluate',

+         arguments: {

+           expression: 'a',

+           frame: 0

+         }

+      };

+      request = JSON.stringify(request);

+

+      var resp = dcp.processDebugJSONRequest(request);

+      var response = JSON.parse(resp);

+      assertTrue(response.success, 'Command failed: ' + resp);

+      assertEquals('object', response.body.type);

+      assertEquals('Object', response.body.className);

+

+      // Indicate that all was processed.

+      listenerComplete = true;

+    }

+  } catch (e) {

+   exception = e

+  };

+};

+

+// Add the debug event listener.

+Debug.setListener(listener);

+

+function callDebugger() {

+  // Add set constructor field to a non-function value.

+  var a = {constructor:true};

+  debugger;

+}

+

+callDebugger();

+

+

+// Make sure that the debug event listener vas invoked.

+assertFalse(exception, "exception in listener")

+assertTrue(listenerComplete, "listener did not run to completion");

diff --git a/test/mjsunit/debug-evaluate-recursive.js b/test/mjsunit/debug-evaluate-recursive.js
index 9f037e5..6ee391b 100644
--- a/test/mjsunit/debug-evaluate-recursive.js
+++ b/test/mjsunit/debug-evaluate-recursive.js
@@ -44,7 +44,10 @@
   }
 }
 
-function testRequest(dcp, arguments, success, result) {
+function testRequest(exec_state, arguments, success, result) {
+  // Get the debug command processor in paused state.
+  var dcp = exec_state.debugCommandProcessor(false);
+
   // Generate request with the supplied arguments.
   var request;
   if (arguments) {
@@ -74,23 +77,20 @@
       assertEquals(1, exec_state.frame(0).evaluate('f()', true).value());
       assertEquals(2, exec_state.frame(0).evaluate('g()', true).value());
 
-      // Get the debug command processor.
-      var dcp = exec_state.debugCommandProcessor();
-
       // Call functions with break using the JSON protocol. Tests that argument
       // disable_break is default true.
-      testRequest(dcp, '{"expression":"f()"}', true, 1);
-      testRequest(dcp, '{"expression":"f()","frame":0}',  true, 1);
-      testRequest(dcp, '{"expression":"g()"}', true, 2);
-      testRequest(dcp, '{"expression":"g()","frame":0}',  true, 2);
+      testRequest(exec_state, '{"expression":"f()"}', true, 1);
+      testRequest(exec_state, '{"expression":"f()","frame":0}',  true, 1);
+      testRequest(exec_state, '{"expression":"g()"}', true, 2);
+      testRequest(exec_state, '{"expression":"g()","frame":0}',  true, 2);
 
       // Call functions with break using the JSON protocol. Tests passing
       // argument disable_break is default true.
-      testRequest(dcp, '{"expression":"f()","disable_break":true}', true, 1);
-      testRequest(dcp, '{"expression":"f()","frame":0,"disable_break":true}',
+      testRequest(exec_state, '{"expression":"f()","disable_break":true}', true, 1);
+      testRequest(exec_state, '{"expression":"f()","frame":0,"disable_break":true}',
                   true, 1);
-      testRequest(dcp, '{"expression":"g()","disable_break":true}', true, 2);
-      testRequest(dcp, '{"expression":"g()","frame":0,"disable_break":true}',
+      testRequest(exec_state, '{"expression":"g()","disable_break":true}', true, 2);
+      testRequest(exec_state, '{"expression":"g()","frame":0,"disable_break":true}',
                   true, 2);
 
       // Indicate that all was processed.
@@ -146,9 +146,9 @@
 // Cause a debug break event.
 debugger;
 
+assertFalse(exception, "exception in listener")
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerComplete);
-assertFalse(exception, "exception in listener")
 
 // Remove the debug event listener.
 Debug.setListener(null);
@@ -161,7 +161,7 @@
 
 debugger;
 
+assertFalse(exception, "exception in listener")
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerComplete);
-assertFalse(exception, "exception in listener")
 assertEquals(2, break_count);
diff --git a/test/mjsunit/debug-evaluate.js b/test/mjsunit/debug-evaluate.js
index 5c5734f..c477907 100644
--- a/test/mjsunit/debug-evaluate.js
+++ b/test/mjsunit/debug-evaluate.js
@@ -59,14 +59,15 @@
   } else {
     assertFalse(response.success, request + ' -> ' + response.message);
   }
-  assertFalse(response.running, request + ' -> expected not running');
+  assertEquals(response.running, "unspecified_running_state",
+               request + ' -> expected not running');
 }
 
 function listener(event, exec_state, event_data, data) {
   try {
     if (event == Debug.DebugEvent.Break) {
       // Get the debug command processor.
-      var dcp = exec_state.debugCommandProcessor();
+      var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
 
       // Test some illegal evaluate requests.
       testRequest(dcp, void 0, false);
@@ -112,6 +113,6 @@
 Debug.setBreakPoint(f, 2, 0);
 g();
 
+assertFalse(exception, "exception in listener")
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerComplete, "listener did not run to completion");
-assertFalse(exception, "exception in listener")
diff --git a/test/mjsunit/debug-handle.js b/test/mjsunit/debug-handle.js
index c7ab76a..98875ce 100644
--- a/test/mjsunit/debug-handle.js
+++ b/test/mjsunit/debug-handle.js
@@ -43,7 +43,10 @@
 
 
 // Send an evaluation request and return the handle of the result.
-function evaluateRequest(dcp, arguments) {
+function evaluateRequest(exec_state, arguments) {
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
   // The base part of all evaluate requests.
   var base_request = '"seq":0,"type":"request","command":"evaluate"'
 
@@ -63,7 +66,10 @@
 
 
 // Send a lookup request and return the evaluated JSON response.
-function lookupRequest(dcp, arguments, success) {
+function lookupRequest(exec_state, arguments, success) {
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
   // The base part of all lookup requests.
   var base_request = '"seq":0,"type":"request","command":"lookup"'
   
@@ -81,7 +87,7 @@
   } else {
     assertFalse(response.success, request + ' -> ' + response.message);
   }
-  assertFalse(response.running, request + ' -> expected not running');
+  assertEquals(response.running, dcp.isRunning(), request + ' -> expected not running');
 
   return response;
 }
@@ -90,26 +96,23 @@
 function listener(event, exec_state, event_data, data) {
   try {
   if (event == Debug.DebugEvent.Break) {
-    // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
-
     // Test some illegal lookup requests.
-    lookupRequest(dcp, void 0, false);
-    lookupRequest(dcp, '{"handles":["a"]}', false);
-    lookupRequest(dcp, '{"handles":[-1]}', false);
+    lookupRequest(exec_state, void 0, false);
+    lookupRequest(exec_state, '{"handles":["a"]}', false);
+    lookupRequest(exec_state, '{"handles":[-1]}', false);
 
     // Evaluate and get some handles.
-    var handle_o = evaluateRequest(dcp, '{"expression":"o"}');
-    var handle_p = evaluateRequest(dcp, '{"expression":"p"}');
-    var handle_b = evaluateRequest(dcp, '{"expression":"a"}');
-    var handle_a = evaluateRequest(dcp, '{"expression":"b","frame":1}');
+    var handle_o = evaluateRequest(exec_state, '{"expression":"o"}');
+    var handle_p = evaluateRequest(exec_state, '{"expression":"p"}');
+    var handle_b = evaluateRequest(exec_state, '{"expression":"a"}');
+    var handle_a = evaluateRequest(exec_state, '{"expression":"b","frame":1}');
     assertEquals(handle_o, handle_a);
     assertEquals(handle_a, handle_b);
     assertFalse(handle_o == handle_p, "o and p have he same handle");
 
     var response;
     var count;
-    response = lookupRequest(dcp, '{"handles":[' + handle_o + ']}', true);
+    response = lookupRequest(exec_state, '{"handles":[' + handle_o + ']}', true);
     var obj = response.body[handle_o];
     assertTrue(!!obj, 'Object not found: ' + handle_o);
     assertEquals(handle_o, obj.handle);
@@ -127,20 +130,20 @@
       }
     }
     assertEquals(2, count, 'Either "o" or "p" not found');
-    response = lookupRequest(dcp, '{"handles":[' + handle_p + ']}', true);
+    response = lookupRequest(exec_state, '{"handles":[' + handle_p + ']}', true);
     obj = response.body[handle_p];
     assertTrue(!!obj, 'Object not found: ' + handle_p);
     assertEquals(handle_p, obj.handle);
 
     // Check handles for functions on the stack.
-    var handle_f = evaluateRequest(dcp, '{"expression":"f"}');
-    var handle_g = evaluateRequest(dcp, '{"expression":"g"}');
-    var handle_caller = evaluateRequest(dcp, '{"expression":"f.caller"}');
+    var handle_f = evaluateRequest(exec_state, '{"expression":"f"}');
+    var handle_g = evaluateRequest(exec_state, '{"expression":"g"}');
+    var handle_caller = evaluateRequest(exec_state, '{"expression":"f.caller"}');
 
     assertFalse(handle_f == handle_g, "f and g have he same handle");
     assertEquals(handle_g, handle_caller, "caller for f should be g");
 
-    response = lookupRequest(dcp, '{"handles":[' + handle_f + ']}', true);
+    response = lookupRequest(exec_state, '{"handles":[' + handle_f + ']}', true);
     obj = response.body[handle_f];
     assertEquals(handle_f, obj.handle);
 
@@ -151,14 +154,14 @@
       switch (obj.properties[i].name) {
         case 'name':
           var response_name;
-          response_name = lookupRequest(dcp, arguments, true);
+          response_name = lookupRequest(exec_state, arguments, true);
           assertEquals('string', response_name.body[ref].type);
           assertEquals("f", response_name.body[ref].value);
           count++;
           break;
         case 'length':
           var response_length;
-          response_length = lookupRequest(dcp, arguments, true);
+          response_length = lookupRequest(exec_state, arguments, true);
           assertEquals('number', response_length.body[ref].type);
           assertEquals(1, response_length.body[ref].value);
           count++;
@@ -179,7 +182,7 @@
     }
 
     var arguments = '{"handles":[' + refs.join(',') + ']}';
-    response = lookupRequest(dcp, arguments, true);
+    response = lookupRequest(exec_state, arguments, true);
     count = 0;
     for (i in obj.properties) {
       var ref = obj.properties[i].ref;
@@ -244,6 +247,6 @@
 p.p = p;
 g(o);
 
+assertFalse(exception, "exception in listener")
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerComplete, "listener did not run to completion: " + exception);
-assertFalse(exception, "exception in listener")
diff --git a/test/mjsunit/debug-mirror-cache.js b/test/mjsunit/debug-mirror-cache.js
index d15146f..5b85306 100644
--- a/test/mjsunit/debug-mirror-cache.js
+++ b/test/mjsunit/debug-mirror-cache.js
@@ -41,7 +41,7 @@
 Debug = debug.Debug
 
 listenerCallCount = 0;
-listenerExceptionCount = 0;
+listenerExceptions = [];
 
 
 function listener(event, exec_state, event_data, data) {
@@ -54,8 +54,8 @@
     assertEquals(0, debug.next_handle_, "Mirror cache not cleared");
     assertEquals(0, debug.mirror_cache_.length, "Mirror cache not cleared");
 
-    // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    // Get the debug command processor in paused state.
+    var dcp = exec_state.debugCommandProcessor(false);
 
     // Make a backtrace request to create some mirrors.
     var json;
@@ -68,7 +68,7 @@
   }
   } catch (e) {
     print(e);
-    listenerExceptionCount++;
+    listenerExceptions.push(e);
   };
 };
 
@@ -79,7 +79,7 @@
 debugger;
 debugger;
 
+assertEquals([], listenerExceptions, "Exception in listener");
 // Make sure that the debug event listener vas invoked.
 assertEquals(2, listenerCallCount, "Listener not called");
-assertEquals(0, listenerExceptionCount, "Exception in listener");
 
diff --git a/test/mjsunit/debug-references.js b/test/mjsunit/debug-references.js
index 1fde1ac..452761c 100644
--- a/test/mjsunit/debug-references.js
+++ b/test/mjsunit/debug-references.js
@@ -66,14 +66,14 @@
   } else {

     assertFalse(response.success, request + ' -> ' + response.message);

   }

-  assertFalse(response.running, request + ' -> expected not running');

+  assertEquals(response.running, dcp.isRunning(), request + ' -> expected not running');

 }

 

 function listener(event, exec_state, event_data, data) {

   try {

   if (event == Debug.DebugEvent.Break) {

     // Get the debug command processor.

-    var dcp = exec_state.debugCommandProcessor();

+    var dcp = exec_state.debugCommandProcessor("unspecified_running_state");

 

     // Test some illegal references requests.

     testRequest(dcp, void 0, false);

diff --git a/test/mjsunit/debug-scopes.js b/test/mjsunit/debug-scopes.js
index e87cbb7..af29df9 100644
--- a/test/mjsunit/debug-scopes.js
+++ b/test/mjsunit/debug-scopes.js
@@ -92,7 +92,7 @@
   }
   
   // Get the debug command processor.
-  var dcp = exec_state.debugCommandProcessor();
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
   
   // Send a scopes request and check the result.
   var json;
@@ -155,7 +155,7 @@
   assertEquals(count, scope_size);
 
   // Get the debug command processor.
-  var dcp = exec_state.debugCommandProcessor();
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
   
   // Send a scope request for information on a single scope and check the
   // result.
diff --git a/test/mjsunit/debug-scripts-request.js b/test/mjsunit/debug-scripts-request.js
index 80b3bce..41bff0e 100644
--- a/test/mjsunit/debug-scripts-request.js
+++ b/test/mjsunit/debug-scripts-request.js
@@ -60,7 +60,7 @@
   try {
   if (event == Debug.DebugEvent.Break) {
     // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
 
     // Test illegal scripts requests.
     testArguments(dcp, '{"types":"xx"}', false);
diff --git a/test/mjsunit/debug-setbreakpoint.js b/test/mjsunit/debug-setbreakpoint.js
index f8d9b15..08492b4 100644
--- a/test/mjsunit/debug-setbreakpoint.js
+++ b/test/mjsunit/debug-setbreakpoint.js
@@ -69,7 +69,7 @@
   try {
   if (event == Debug.DebugEvent.Break) {
     // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
 
     // Test some illegal setbreakpoint requests.
     var request = '{' + base_request + '}'
diff --git a/test/mjsunit/debug-suspend.js b/test/mjsunit/debug-suspend.js
new file mode 100644
index 0000000..73a2e8c
--- /dev/null
+++ b/test/mjsunit/debug-suspend.js
@@ -0,0 +1,96 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+// Simple function which stores the last debug event.
+listenerComplete = false;
+exception = false;
+
+var base_backtrace_request = '"seq":0,"type":"request","command":"backtrace"'
+var base_suspend_request = '"seq":0,"type":"request","command":"suspend"'
+
+function safeEval(code) {
+  try {
+    return eval('(' + code + ')');
+  } catch (e) {
+    assertEquals(void 0, e);
+    return undefined;
+  }
+}
+
+function testArguments(exec_state) {
+  // Get the debug command processor in running state.
+  var dcp = exec_state.debugCommandProcessor(true);
+
+  assertTrue(dcp.isRunning());
+
+  var backtrace_request = '{' + base_backtrace_request + '}'
+  var backtrace_response = safeEval(dcp.processDebugJSONRequest(backtrace_request));
+
+  assertTrue(backtrace_response.success);
+
+  assertTrue(backtrace_response.running, backtrace_request + ' -> expected running');
+
+  assertTrue(dcp.isRunning());
+
+  var suspend_request = '{' + base_suspend_request + '}'
+  var suspend_response = safeEval(dcp.processDebugJSONRequest(suspend_request));
+
+  assertTrue(suspend_response.success);
+
+  assertFalse(suspend_response.running, suspend_request + ' -> expected not running');
+
+  assertFalse(dcp.isRunning());
+}
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+
+      // Test simple suspend request.
+      testArguments(exec_state);
+
+      // Indicate that all was processed.
+      listenerComplete = true;
+    }
+  } catch (e) {
+    exception = e
+  };
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Stop debugger and check that suspend command changes running flag.
+debugger;
+
+assertFalse(exception, "exception in listener")
+// Make sure that the debug event listener vas invoked.
+assertTrue(listenerComplete, "listener did not run to completion");
diff --git a/test/mjsunit/debug-version.js b/test/mjsunit/debug-version.js
new file mode 100644
index 0000000..b1bc1e8
--- /dev/null
+++ b/test/mjsunit/debug-version.js
@@ -0,0 +1,90 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+// Simple function which stores the last debug event.
+listenerComplete = false;
+exception = false;
+
+var base_version_request = '"seq":0,"type":"request","command":"version"'
+
+function safeEval(code) {
+  try {
+    return eval('(' + code + ')');
+  } catch (e) {
+    assertEquals(void 0, e);
+    return undefined;
+  }
+}
+
+function testArguments(exec_state) {
+  // Get the debug command processor in running state.
+  var dcp = exec_state.debugCommandProcessor(true);
+
+  assertTrue(dcp.isRunning());
+
+  var version_request = '{' + base_version_request + '}'
+  var version_response = safeEval(dcp.processDebugJSONRequest(version_request));
+
+  assertTrue(version_response.success);
+
+  var version_string = version_response.body.V8Version;
+
+  assertTrue(!!version_string, version_request + ' -> expected version string');
+
+  var version_pattern = /^\d*\.\d*\.\d*/;
+
+  assertTrue(!!(version_string.match(version_pattern)), "unexpected format of version: " + version_string);
+}
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+
+      // Test simple suspend request.
+      testArguments(exec_state);
+
+      // Indicate that all was processed.
+      listenerComplete = true;
+    }
+  } catch (e) {
+    exception = e
+  };
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Stop debugger and check that suspend command changes running flag.
+debugger;
+
+assertFalse(exception, "exception in listener")
+// Make sure that the debug event listener vas invoked.
+assertTrue(listenerComplete, "listener did not run to completion");
diff --git a/test/mjsunit/div-mod.js b/test/mjsunit/div-mod.js
index a8a19b3..b3c77e1 100644
--- a/test/mjsunit/div-mod.js
+++ b/test/mjsunit/div-mod.js
@@ -86,3 +86,72 @@
 for (var i = 0; i < divisors.length; i++) {
   run_tests_for(divisors[i]);
 }
+
+// Test extreme corner cases of modulo.
+
+// Computes the modulo by slow but lossless operations.
+function compute_mod(dividend, divisor) {
+  // Return NaN if either operand is NaN, if divisor is 0 or
+  // dividend is an infinity. Return dividend if divisor is an infinity.
+  if (isNaN(dividend) || isNaN(divisor) || divisor == 0) { return NaN; }
+  var sign = 1;
+  if (dividend < 0) { dividend = -dividend; sign = -1; }
+  if (dividend == Infinity) { return NaN; }
+  if (divisor < 0) { divisor = -divisor; }
+  if (divisor == Infinity) { return sign * dividend; }
+  function rec_mod(a, b) {
+    // Subtracts maximal possible multiplum of b from a.
+    if (a >= b) {
+      a = rec_mod(a, 2 * b);
+      if (a >= b) { a -= b; }
+    }
+    return a;
+  }
+  return sign * rec_mod(dividend, divisor);
+}
+
+(function () {
+  var large_non_smi = 1234567891234.12245;
+  var small_non_smi = 43.2367243;
+  var repeating_decimal = 0.3;
+  var finite_decimal = 0.5;
+  var smi = 43;
+  var power_of_two = 64;
+  var min_normal = Number.MIN_VALUE * Math.pow(2, 52);
+  var max_denormal = Number.MIN_VALUE * (Math.pow(2, 52) - 1);
+
+  // All combinations of NaN, Infinity, normal, denormal and zero.
+  var example_numbers = [
+    NaN,
+    0,
+    Number.MIN_VALUE,
+    3 * Number.MIN_VALUE,
+    max_denormal,
+    min_normal,
+    repeating_decimal,
+    finite_decimal,
+    smi,
+    power_of_two,
+    small_non_smi,
+    large_non_smi,
+    Number.MAX_VALUE,
+    Infinity
+  ];
+
+  function doTest(a, b) {
+    var exp = compute_mod(a, b);
+    var act = a % b;
+    assertEquals(exp, act, a + " % " + b);
+  }
+
+  for (var i = 0; i < example_numbers.length; i++) {
+    for (var j = 0; j < example_numbers.length; j++) {
+      var a = example_numbers[i];
+      var b = example_numbers[j];
+      doTest(a,b);
+      doTest(-a,b);
+      doTest(a,-b);
+      doTest(-a,-b);
+    }
+  }
+})()
diff --git a/test/mjsunit/for-in.js b/test/mjsunit/for-in.js
index dfe721d..e3436ff 100644
--- a/test/mjsunit/for-in.js
+++ b/test/mjsunit/for-in.js
@@ -31,21 +31,21 @@
   return array.sort();
 }
 
-assertEquals(0, props({}).length);
-assertEquals(1, props({x:1}).length);
-assertEquals(2, props({x:1, y:2}).length);
+assertEquals(0, props({}).length, "olen0");
+assertEquals(1, props({x:1}).length, "olen1");
+assertEquals(2, props({x:1, y:2}).length, "olen2");
 
-assertArrayEquals(["x"], props({x:1}));
-assertArrayEquals(["x", "y"], props({x:1, y:2}));
-assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}));
+assertArrayEquals(["x"], props({x:1}), "x");
+assertArrayEquals(["x", "y"], props({x:1, y:2}), "xy");
+assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}), "xyzoom");
 
-assertEquals(0, props([]).length);
-assertEquals(1, props([1]).length);
-assertEquals(2, props([1,2]).length);
+assertEquals(0, props([]).length, "alen0");
+assertEquals(1, props([1]).length, "alen1");
+assertEquals(2, props([1,2]).length, "alen2");
 
-assertArrayEquals(["0"], props([1]));
-assertArrayEquals(["0", "1"], props([1,2]));
-assertArrayEquals(["0", "1", "2"], props([1,2,3]));
+assertArrayEquals(["0"], props([1]), "0");
+assertArrayEquals(["0", "1"], props([1,2]), "01");
+assertArrayEquals(["0", "1", "2"], props([1,2,3]), "012");
 
 var o = {};
 var a = [];
@@ -54,33 +54,33 @@
   a.push(s);
   o[s] = i;
 }
-assertArrayEquals(a, props(o));
+assertArrayEquals(a, props(o), "charcodes");
 
 var a = [];
-assertEquals(0, props(a).length);
+assertEquals(0, props(a).length, "proplen0");
 a[Math.pow(2,30)-1] = 0;
-assertEquals(1, props(a).length);
+assertEquals(1, props(a).length, "proplen1");
 a[Math.pow(2,31)-1] = 0;
-assertEquals(2, props(a).length);
+assertEquals(2, props(a).length, "proplen2");
 a[1] = 0;
-assertEquals(3, props(a).length);
+assertEquals(3, props(a).length, "proplen3");
 
 for (var hest = 'hest' in {}) { }
-assertEquals('hest', hest);
+assertEquals('hest', hest, "empty-no-override");
 
 var result = '';
 for (var p in {a : [0], b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "ab");
 
 var result = '';
 for (var p in {a : {v:1}, b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "ab-nodeep");
 
 var result = '';
 for (var p in { get a() {}, b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "abget");
 
 var result = '';
 for (var p in { get a() {}, set a(x) {}, b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "abgetset");
 
diff --git a/test/mjsunit/fuzz-natives.js b/test/mjsunit/fuzz-natives.js
index c653b18..cdf58a5 100644
--- a/test/mjsunit/fuzz-natives.js
+++ b/test/mjsunit/fuzz-natives.js
@@ -127,6 +127,7 @@
   "IS_VAR": true,
   "ResolvePossiblyDirectEval": true,
   "Log": true,
+  "DeclareGlobals": true,
 
   "CollectStackTrace": true
 };
diff --git a/test/mjsunit/regress/regress-1081309.js b/test/mjsunit/regress/regress-1081309.js
index a771ac0..009ede1 100644
--- a/test/mjsunit/regress/regress-1081309.js
+++ b/test/mjsunit/regress/regress-1081309.js
@@ -69,7 +69,7 @@
     // 0: [anonymous]
     
     // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    var dcp = exec_state.debugCommandProcessor(false);
 
     // Get the backtrace.
     var json;
@@ -105,6 +105,6 @@
   // Ignore the exception "Cannot call method 'x' of undefined"
 }
 
+assertFalse(exception, "exception in listener", exception)
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerCalled, "listener not called");
-assertFalse(exception, "exception in listener", exception)
diff --git a/test/mjsunit/regress/regress-1199401.js b/test/mjsunit/regress/regress-1199401.js
index 792faea..cc7985d 100644
--- a/test/mjsunit/regress/regress-1199401.js
+++ b/test/mjsunit/regress/regress-1199401.js
@@ -27,35 +27,49 @@
 
 // Ensure that we can correctly change the sign of the most negative smi.
 
-assertEquals(1073741824, -1073741824 * -1);
-assertEquals(1073741824, -1073741824 / -1);
-assertEquals(1073741824, -(-1073741824));
-assertEquals(1073741824, 0 - (-1073741824));
+// Possible Smi ranges.
+var ranges = [{min: -1073741824, max: 1073741823, bits: 31},
+              {min: -2147483648, max: 2147483647, bits: 32}];
 
-var min_smi = -1073741824;
+for (var i = 0; i < ranges.length; i++) {
+  var range = ranges[i];
+  var min_smi = range.min;
+  var max_smi = range.max;
+  var bits = range.bits;
+  var name = bits + "-bit";
 
-assertEquals(1073741824, min_smi * -1);
-assertEquals(1073741824, min_smi / -1);
-assertEquals(1073741824, -min_smi);
-assertEquals(1073741824, 0 - min_smi);
+  var result = max_smi + 1;
 
-var zero = 0;
-var minus_one = -1;
+  // Min smi as literal
+  assertEquals(result, eval(min_smi + " * -1"), name + "-litconmult");
+  assertEquals(result, eval(min_smi + " / -1"), name + "-litcondiv");
+  assertEquals(result, eval("-(" + min_smi + ")"), name + "-litneg");
+  assertEquals(result, eval("0 - (" + min_smi + ")")), name + "-conlitsub";
 
-assertEquals(1073741824, min_smi * minus_one);
-assertEquals(1073741824, min_smi / minus_one);
-assertEquals(1073741824, -min_smi);
-assertEquals(1073741824, zero - min_smi);
+  // As variable:
+  assertEquals(result, min_smi * -1, name + "-varconmult");
+  assertEquals(result, min_smi / -1, name + "-varcondiv");
+  assertEquals(result, -min_smi, name + "-varneg");
+  assertEquals(result, 0 - min_smi, name + "-convarsub");
 
-assertEquals(1073741824, -1073741824 * minus_one);
-assertEquals(1073741824, -1073741824 / minus_one);
-assertEquals(1073741824, -(-1073741824));
-assertEquals(1073741824, zero - (-1073741824));
+  // Only variables:
+  var zero = 0;
+  var minus_one = -1;
 
-var half_min_smi = -(1<<15);
-var half_max_smi = (1<<15);
+  assertEquals(result, min_smi * minus_one, name + "-varvarmult");
+  assertEquals(result, min_smi / minus_one, name + "-varvardiv");
+  assertEquals(result, zero - min_smi, name + "-varvarsub");
 
-assertEquals(1073741824, -half_min_smi * half_max_smi);
-assertEquals(1073741824, half_min_smi * -half_max_smi);
-assertEquals(1073741824, half_max_smi * -half_min_smi);
-assertEquals(1073741824, -half_max_smi * half_min_smi);
+  // Constants as variables
+  assertEquals(result, eval(min_smi + " * minus_one"), name + "-litvarmult");
+  assertEquals(result, eval(min_smi + " / minus_one"), name + "-litvarmdiv");
+  assertEquals(result, eval("0 - (" + min_smi + ")"), name + "-varlitsub");
+
+  var half_min_smi = -(1 << (bits >> 1));
+  var half_max_smi = 1 << ((bits - 1) >> 1);
+
+  assertEquals(max_smi + 1, -half_min_smi * half_max_smi, name + "-half1");
+  assertEquals(max_smi + 1, half_min_smi * -half_max_smi, name + "-half2");
+  assertEquals(max_smi + 1, half_max_smi * -half_min_smi, name + "-half3");
+  assertEquals(max_smi + 1, -half_max_smi * half_min_smi, name + "-half4");
+}
diff --git a/test/mjsunit/regress/regress-475.js b/test/mjsunit/regress/regress-475.js
new file mode 100644
index 0000000..4b7dbbd
--- /dev/null
+++ b/test/mjsunit/regress/regress-475.js
@@ -0,0 +1,28 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertEquals(1, (function (){return 1|-1%1})());
diff --git a/test/mjsunit/regress/regress-483.js b/test/mjsunit/regress/regress-483.js
new file mode 100644
index 0000000..db93f59
--- /dev/null
+++ b/test/mjsunit/regress/regress-483.js
@@ -0,0 +1,35 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function X() {
+  this.x = this.x.x;
+}
+
+X.prototype.x = {x:1}
+
+new X()
+
diff --git a/test/mjsunit/smi-negative-zero.js b/test/mjsunit/smi-negative-zero.js
index 719ee49..6906443 100644
--- a/test/mjsunit/smi-negative-zero.js
+++ b/test/mjsunit/smi-negative-zero.js
@@ -37,64 +37,64 @@
 
 // variable op variable
 
-assertEquals(one / (-zero), -Infinity, "one / -0 I");
+assertEquals(-Infinity, one / (-zero), "one / -0 I");
 
-assertEquals(one / (zero * minus_one), -Infinity, "one / -1");
-assertEquals(one / (minus_one * zero), -Infinity, "one / -0 II");
-assertEquals(one / (zero * zero), Infinity, "one / 0 I");
-assertEquals(one / (minus_one * minus_one), 1, "one / 1");
+assertEquals(-Infinity, one / (zero * minus_one), "one / -1");
+assertEquals(-Infinity, one / (minus_one * zero), "one / -0 II");
+assertEquals(Infinity, one / (zero * zero), "one / 0 I");
+assertEquals(1, one / (minus_one * minus_one), "one / 1");
 
-assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III");
-assertEquals(one / (zero / one), Infinity, "one / 0 II");
+assertEquals(-Infinity, one / (zero / minus_one), "one / -0 III");
+assertEquals(Infinity, one / (zero / one), "one / 0 II");
 
-assertEquals(one / (minus_four % two), -Infinity, "foo1");
-assertEquals(one / (minus_four % minus_two), -Infinity, "foo2");
-assertEquals(one / (four % two), Infinity, "foo3");
-assertEquals(one / (four % minus_two), Infinity, "foo4");
+assertEquals(-Infinity, one / (minus_four % two), "foo1");
+assertEquals(-Infinity, one / (minus_four % minus_two), "foo2");
+assertEquals(Infinity, one / (four % two), "foo3");
+assertEquals(Infinity, one / (four % minus_two), "foo4");
 
 // literal op variable
 
-assertEquals(one / (0 * minus_one), -Infinity, "bar1");
-assertEquals(one / (-1 * zero), -Infinity, "bar2");
-assertEquals(one / (0 * zero), Infinity, "bar3");
-assertEquals(one / (-1 * minus_one), 1, "bar4");
+assertEquals(-Infinity, one / (0 * minus_one), "bar1");
+assertEquals(-Infinity, one / (-1 * zero), "bar2");
+assertEquals(Infinity, one / (0 * zero), "bar3");
+assertEquals(1, one / (-1 * minus_one), "bar4");
 
-assertEquals(one / (0 / minus_one), -Infinity, "baz1");
-assertEquals(one / (0 / one), Infinity, "baz2");
+assertEquals(-Infinity, one / (0 / minus_one), "baz1");
+assertEquals(Infinity, one / (0 / one), "baz2");
 
-assertEquals(one / (-4 % two), -Infinity, "baz3");
-assertEquals(one / (-4 % minus_two), -Infinity, "baz4");
-assertEquals(one / (4 % two), Infinity, "baz5");
-assertEquals(one / (4 % minus_two), Infinity, "baz6");
+assertEquals(-Infinity, one / (-4 % two), "baz3");
+assertEquals(-Infinity, one / (-4 % minus_two), "baz4");
+assertEquals(Infinity, one / (4 % two), "baz5");
+assertEquals(Infinity, one / (4 % minus_two), "baz6");
 
 // variable op literal
 
-assertEquals(one / (zero * -1), -Infinity, "fizz1");
-assertEquals(one / (minus_one * 0), -Infinity, "fizz2");
-assertEquals(one / (zero * 0), Infinity, "fizz3");
-assertEquals(one / (minus_one * -1), 1, "fizz4");
+assertEquals(-Infinity, one / (zero * -1), "fizz1");
+assertEquals(-Infinity, one / (minus_one * 0), "fizz2");
+assertEquals(Infinity, one / (zero * 0), "fizz3");
+assertEquals(1, one / (minus_one * -1), "fizz4");
 
-assertEquals(one / (zero / -1), -Infinity, "buzz1");
-assertEquals(one / (zero / 1), Infinity, "buzz2");
+assertEquals(-Infinity, one / (zero / -1), "buzz1");
+assertEquals(Infinity, one / (zero / 1), "buzz2");
 
-assertEquals(one / (minus_four % 2), -Infinity, "buzz3");
-assertEquals(one / (minus_four % -2), -Infinity, "buzz4");
-assertEquals(one / (four % 2), Infinity, "buzz5");
-assertEquals(one / (four % -2), Infinity, "buzz6");
+assertEquals(-Infinity, one / (minus_four % 2), "buzz3");
+assertEquals(-Infinity, one / (minus_four % -2), "buzz4");
+assertEquals(Infinity, one / (four % 2), "buzz5");
+assertEquals(Infinity, one / (four % -2), "buzz6");
 
 // literal op literal
 
-assertEquals(one / (-0), -Infinity, "fisk1");
+assertEquals(-Infinity, one / (-0), "fisk1");
 
-assertEquals(one / (0 * -1), -Infinity, "fisk2");
-assertEquals(one / (-1 * 0), -Infinity, "fisk3");
-assertEquals(one / (0 * 0), Infinity, "fisk4");
-assertEquals(one / (-1 * -1), 1, "fisk5");
+assertEquals(-Infinity, one / (0 * -1), "fisk2");
+assertEquals(-Infinity, one / (-1 * 0), "fisk3");
+assertEquals(Infinity, one / (0 * 0), "fisk4");
+assertEquals(1, one / (-1 * -1), "fisk5");
 
-assertEquals(one / (0 / -1), -Infinity, "hest1");
-assertEquals(one / (0 / 1), Infinity, "hest2");
+assertEquals(-Infinity, one / (0 / -1), "hest1");
+assertEquals(Infinity, one / (0 / 1), "hest2");
 
-assertEquals(one / (-4 % 2), -Infinity, "fiskhest1");
-assertEquals(one / (-4 % -2), -Infinity, "fiskhest2");
-assertEquals(one / (4 % 2), Infinity, "fiskhest3");
-assertEquals(one / (4 % -2), Infinity, "fiskhest4");
+assertEquals(-Infinity, one / (-4 % 2), "fiskhest1");
+assertEquals(-Infinity, one / (-4 % -2), "fiskhest2");
+assertEquals(Infinity, one / (4 % 2), "fiskhest3");
+assertEquals(Infinity, one / (4 % -2), "fiskhest4");
diff --git a/test/mjsunit/testcfg.py b/test/mjsunit/testcfg.py
index e3f3fcd..49064b1 100644
--- a/test/mjsunit/testcfg.py
+++ b/test/mjsunit/testcfg.py
@@ -114,7 +114,8 @@
     bugs = [current_path + ['bugs', t] for t in self.Ls(join(self.root, 'bugs'))]
     third_party = [current_path + ['third_party', t] for t in self.Ls(join(self.root, 'third_party'))]
     tools = [current_path + ['tools', t] for t in self.Ls(join(self.root, 'tools'))]
-    all_tests = mjsunit + regress + bugs + third_party + tools
+    compiler = [current_path + ['compiler', t] for t in self.Ls(join(self.root, 'compiler'))]
+    all_tests = mjsunit + regress + bugs + third_party + tools + compiler
     result = []
     for test in all_tests:
       if self.Contains(path, test):
diff --git a/test/mjsunit/third_party/array-isarray.js b/test/mjsunit/third_party/array-isarray.js
new file mode 100644
index 0000000..0fc42a3
--- /dev/null
+++ b/test/mjsunit/third_party/array-isarray.js
@@ -0,0 +1,48 @@
+// Copyright (c) 2009 Apple Computer, Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// 3. Neither the name of the copyright holder(s) nor the names of any
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Based on LayoutTests/fast/js/resources/Array-isArray.js
+
+assertTrue(Array.isArray([]));
+assertTrue(Array.isArray(new Array));
+assertTrue(Array.isArray(Array()));
+assertTrue(Array.isArray('abc'.match(/(a)*/g)));
+assertFalse((function(){ return Array.isArray(arguments); })());
+assertFalse(Array.isArray());
+assertFalse(Array.isArray(null));
+assertFalse(Array.isArray(undefined));
+assertFalse(Array.isArray(true));
+assertFalse(Array.isArray(false));
+assertFalse(Array.isArray('a string'));
+assertFalse(Array.isArray({}));
+assertFalse(Array.isArray({length: 5}));
+assertFalse(Array.isArray({__proto__: Array.prototype, length:1, 0:1, 1:2}));
+
diff --git a/test/mjsunit/third_party/string-trim.js b/test/mjsunit/third_party/string-trim.js
new file mode 100644
index 0000000..234dff6
--- /dev/null
+++ b/test/mjsunit/third_party/string-trim.js
@@ -0,0 +1,107 @@
+// Copyright (c) 2009 Apple Computer, Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// 3. Neither the name of the copyright holder(s) nor the names of any
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Based on LayoutTests/fast/js/script-tests/string-trim.js
+
+// References to trim(), trimLeft() and trimRight() functions for 
+// testing Function's *.call() and *.apply() methods.
+
+var trim            = String.prototype.trim;
+var trimLeft        = String.prototype.trimLeft;
+var trimRight       = String.prototype.trimRight;
+
+var testString      = 'foo bar';
+var trimString      = '';
+var leftTrimString  = '';
+var rightTrimString = '';
+var wsString        = '';
+
+var whitespace      = [
+  {s : '\u0009', t : 'HORIZONTAL TAB'},
+  {s : '\u000A', t : 'LINE FEED OR NEW LINE'},
+  {s : '\u000B', t : 'VERTICAL TAB'},
+  {s : '\u000C', t : 'FORMFEED'},
+  {s : '\u000D', t : 'CARRIAGE RETURN'},
+  {s : '\u0020', t : 'SPACE'},
+  {s : '\u00A0', t : 'NO-BREAK SPACE'},
+  {s : '\u2000', t : 'EN QUAD'},
+  {s : '\u2001', t : 'EM QUAD'},
+  {s : '\u2002', t : 'EN SPACE'},
+  {s : '\u2003', t : 'EM SPACE'},
+  {s : '\u2004', t : 'THREE-PER-EM SPACE'},
+  {s : '\u2005', t : 'FOUR-PER-EM SPACE'},
+  {s : '\u2006', t : 'SIX-PER-EM SPACE'},
+  {s : '\u2007', t : 'FIGURE SPACE'},
+  {s : '\u2008', t : 'PUNCTUATION SPACE'},
+  {s : '\u2009', t : 'THIN SPACE'},
+  {s : '\u200A', t : 'HAIR SPACE'},
+  {s : '\u3000', t : 'IDEOGRAPHIC SPACE'},
+  {s : '\u2028', t : 'LINE SEPARATOR'},
+  {s : '\u2029', t : 'PARAGRAPH SEPARATOR'},
+  {s : '\u200B', t : 'ZERO WIDTH SPACE (category Cf)'}
+];
+
+for (var i = 0; i < whitespace.length; i++) {
+  assertEquals(whitespace[i].s.trim(), '');
+  assertEquals(whitespace[i].s.trimLeft(), '');
+  assertEquals(whitespace[i].s.trimRight(), '');
+  wsString += whitespace[i].s;
+}
+
+trimString      = wsString   + testString + wsString;
+leftTrimString  = testString + wsString;  // Trimmed from the left.
+rightTrimString = wsString   + testString;  // Trimmed from the right.
+
+assertEquals(wsString.trim(),      '');
+assertEquals(wsString.trimLeft(),  '');
+assertEquals(wsString.trimRight(), '');
+
+assertEquals(trimString.trim(),      testString);
+assertEquals(trimString.trimLeft(),  leftTrimString);
+assertEquals(trimString.trimRight(), rightTrimString);
+
+assertEquals(leftTrimString.trim(),      testString);
+assertEquals(leftTrimString.trimLeft(),  leftTrimString);
+assertEquals(leftTrimString.trimRight(), testString);
+
+assertEquals(rightTrimString.trim(),      testString);
+assertEquals(rightTrimString.trimLeft(),  testString);
+assertEquals(rightTrimString.trimRight(), rightTrimString);
+
+var testValues = [0, Infinity, NaN, true, false, ({}), ['an','array'],
+  ({toString:function(){return 'wibble'}})
+];
+
+for (var i = 0; i < testValues.length; i++) {
+  assertEquals(trim.call(testValues[i]), String(testValues[i]));
+  assertEquals(trimLeft.call(testValues[i]), String(testValues[i]));
+  assertEquals(trimRight.call(testValues[i]), String(testValues[i]));
+}
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 46a00f4..5e2bb88 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -256,14 +256,16 @@
         '../../src/execution.h',
         '../../src/factory.cc',
         '../../src/factory.h',
+        '../../src/fast-codegen.cc',
+        '../../src/fast-codegen.h',
         '../../src/flag-definitions.h',
         '../../src/flags.cc',
         '../../src/flags.h',
+        '../../src/frame-element.cc',
+        '../../src/frame-element.h',
         '../../src/frames-inl.h',
         '../../src/frames.cc',
         '../../src/frames.h',
-        '../../src/frame-element.cc',
-        '../../src/frame-element.h',
         '../../src/func-name-inferrer.cc',
         '../../src/func-name-inferrer.h',
         '../../src/global-handles.cc',
@@ -291,11 +293,12 @@
         '../../src/jsregexp.h',
         '../../src/list-inl.h',
         '../../src/list.h',
-        '../../src/log.cc',
+        '../../src/location.h',
         '../../src/log-inl.h',
-        '../../src/log.h',
         '../../src/log-utils.cc',
         '../../src/log-utils.h',
+        '../../src/log.cc',
+        '../../src/log.h',
         '../../src/macro-assembler.h',
         '../../src/mark-compact.cc',
         '../../src/mark-compact.h',
@@ -394,6 +397,7 @@
             '../../src/arm/cpu-arm.cc',
             '../../src/arm/debug-arm.cc',
             '../../src/arm/disasm-arm.cc',
+            '../../src/arm/fast-codegen-arm.cc',
             '../../src/arm/frames-arm.cc',
             '../../src/arm/frames-arm.h',
             '../../src/arm/ic-arm.cc',
@@ -423,6 +427,7 @@
             '../../src/ia32/cpu-ia32.cc',
             '../../src/ia32/debug-ia32.cc',
             '../../src/ia32/disasm-ia32.cc',
+            '../../src/ia32/fast-codegen-ia32.cc',
             '../../src/ia32/frames-ia32.cc',
             '../../src/ia32/frames-ia32.h',
             '../../src/ia32/ic-ia32.cc',
@@ -451,6 +456,7 @@
             '../../src/x64/cpu-x64.cc',
             '../../src/x64/debug-x64.cc',
             '../../src/x64/disasm-x64.cc',
+            '../../src/x64/fast-codegen-x64.cc',
             '../../src/x64/frames-x64.cc',
             '../../src/x64/frames-x64.h',
             '../../src/x64/ic-x64.cc',
diff --git a/tools/test.py b/tools/test.py
index 3a60c59..586925a 100755
--- a/tools/test.py
+++ b/tools/test.py
@@ -326,6 +326,7 @@
     self.timed_out = timed_out
     self.stdout = stdout
     self.stderr = stderr
+    self.failed = None
 
 
 class TestCase(object):
@@ -333,7 +334,6 @@
   def __init__(self, context, path):
     self.path = path
     self.context = context
-    self.failed = None
     self.duration = None
 
   def IsNegative(self):
@@ -343,9 +343,9 @@
     return cmp(other.duration, self.duration)
 
   def DidFail(self, output):
-    if self.failed is None:
-      self.failed = self.IsFailureOutput(output)
-    return self.failed
+    if output.failed is None:
+      output.failed = self.IsFailureOutput(output)
+    return output.failed
 
   def IsFailureOutput(self, output):
     return output.exit_code != 0
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 84f0eea..fd23987 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -75,7 +75,18 @@
       'tick': { parsers: [this.createAddressParser('code'),
           this.createAddressParser('stack'), parseInt, 'var-args'],
           processor: this.processTick, backrefs: true },
+      'heap-sample-begin': { parsers: [null, null, parseInt],
+          processor: this.processHeapSampleBegin },
+      'heap-sample-end': { parsers: [null, null],
+          processor: this.processHeapSampleEnd },
+      'heap-js-prod-item': { parsers: [null, 'var-args'],
+          processor: this.processJSProducer, backrefs: true },
+      // Ignored events.
       'profiler': null,
+      'heap-sample-stats': null,
+      'heap-sample-item': null,
+      'heap-js-cons-item': null,
+      'heap-js-ret-item': null,
       // Obsolete row types.
       'code-allocate': null,
       'begin-code-region': null,
@@ -113,6 +124,9 @@
   // Count each tick as a time unit.
   this.viewBuilder_ = new devtools.profiler.ViewBuilder(1);
   this.lastLogFileName_ = null;
+
+  this.generation_ = 1;
+  this.currentProducerProfile_ = null;
 };
 inherits(TickProcessor, devtools.profiler.LogReader);
 
@@ -220,6 +234,41 @@
 };
 
 
+TickProcessor.prototype.processHeapSampleBegin = function(space, state, ticks) {
+  if (space != 'Heap') return;
+  this.currentProducerProfile_ = new devtools.profiler.CallTree();
+};
+
+
+TickProcessor.prototype.processHeapSampleEnd = function(space, state) {
+  if (space != 'Heap' || !this.currentProducerProfile_) return;
+
+  print('Generation ' + this.generation_ + ':');
+  var tree = this.currentProducerProfile_;
+  tree.computeTotalWeights();
+  var producersView = this.viewBuilder_.buildView(tree);
+  // Sort by total time, desc, then by name, desc.
+  producersView.sort(function(rec1, rec2) {
+      return rec2.totalTime - rec1.totalTime ||
+          (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
+  this.printHeavyProfile(producersView.head.children);
+
+  this.currentProducerProfile_ = null;
+  this.generation_++;
+};
+
+
+TickProcessor.prototype.processJSProducer = function(constructor, stack) {
+  if (!this.currentProducerProfile_) return;
+  if (stack.length == 0) return;
+  var first = stack.shift();
+  var processedStack =
+      this.profile_.resolveAndFilterFuncs_(this.processStack(first, stack));
+  processedStack.unshift(constructor);
+  this.currentProducerProfile_.addPath(processedStack);
+};
+
+
 TickProcessor.prototype.printStatistics = function() {
   print('Statistical profiling result from ' + this.lastLogFileName_ +
         ', (' + this.ticks_.total +
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index 2d38681..d2af626 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -56,6 +56,7 @@
 		89495E480E79FC23001F68C3 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
 		89495E490E79FC23001F68C3 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
 		8956B6CF0F5D86730033B5A2 /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; };
+		895FA753107FFED3006F39D4 /* constants-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 895FA748107FFE73006F39D4 /* constants-arm.cc */; };
 		896FD03A0E78D717003DFB6A /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; };
 		897F767F0E71B690007ACF34 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; };
 		897F76850E71B6B1007ACF34 /* libv8.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8970F2F00E719FB2006AE7B5 /* libv8.a */; };
@@ -309,6 +310,14 @@
 		89495E470E79FC23001F68C3 /* compilation-cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "compilation-cache.h"; sourceTree = "<group>"; };
 		8956B6CD0F5D86570033B5A2 /* debug-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "debug-agent.cc"; sourceTree = "<group>"; };
 		8956B6CE0F5D86570033B5A2 /* debug-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "debug-agent.h"; sourceTree = "<group>"; };
+		895FA720107FFB15006F39D4 /* jump-target-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-inl.h"; sourceTree = "<group>"; };
+		895FA725107FFB57006F39D4 /* codegen-ia32-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-ia32-inl.h"; path = "ia32/codegen-ia32-inl.h"; sourceTree = "<group>"; };
+		895FA72A107FFB85006F39D4 /* register-allocator-ia32-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-ia32-inl.h"; path = "ia32/register-allocator-ia32-inl.h"; sourceTree = "<group>"; };
+		895FA72B107FFB85006F39D4 /* register-allocator-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-ia32.h"; path = "ia32/register-allocator-ia32.h"; sourceTree = "<group>"; };
+		895FA748107FFE73006F39D4 /* constants-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "constants-arm.cc"; path = "arm/constants-arm.cc"; sourceTree = "<group>"; };
+		895FA74B107FFE82006F39D4 /* codegen-arm-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-arm-inl.h"; path = "arm/codegen-arm-inl.h"; sourceTree = "<group>"; };
+		895FA750107FFEAE006F39D4 /* register-allocator-arm-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-arm-inl.h"; path = "arm/register-allocator-arm-inl.h"; sourceTree = "<group>"; };
+		895FA751107FFEAE006F39D4 /* register-allocator-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-arm.h"; path = "arm/register-allocator-arm.h"; sourceTree = "<group>"; };
 		8964482B0E9C00F700E7C516 /* codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-ia32.h"; path = "ia32/codegen-ia32.h"; sourceTree = "<group>"; };
 		896448BC0E9D530500E7C516 /* codegen-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-arm.h"; path = "arm/codegen-arm.h"; sourceTree = "<group>"; };
 		8970F2F00E719FB2006AE7B5 /* libv8.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libv8.a; sourceTree = BUILT_PRODUCTS_DIR; };
@@ -662,8 +671,10 @@
 				897FF1110E719B8F00D62E90 /* code-stubs.cc */,
 				897FF1120E719B8F00D62E90 /* code-stubs.h */,
 				897FF1130E719B8F00D62E90 /* code.h */,
+				895FA74B107FFE82006F39D4 /* codegen-arm-inl.h */,
 				897FF1140E719B8F00D62E90 /* codegen-arm.cc */,
 				896448BC0E9D530500E7C516 /* codegen-arm.h */,
+				895FA725107FFB57006F39D4 /* codegen-ia32-inl.h */,
 				897FF1150E719B8F00D62E90 /* codegen-ia32.cc */,
 				8964482B0E9C00F700E7C516 /* codegen-ia32.h */,
 				897FF1160E719B8F00D62E90 /* codegen-inl.h */,
@@ -673,6 +684,7 @@
 				89495E470E79FC23001F68C3 /* compilation-cache.h */,
 				897FF1190E719B8F00D62E90 /* compiler.cc */,
 				897FF11A0E719B8F00D62E90 /* compiler.h */,
+				895FA748107FFE73006F39D4 /* constants-arm.cc */,
 				897FF11B0E719B8F00D62E90 /* constants-arm.h */,
 				897FF11C0E719B8F00D62E90 /* contexts.cc */,
 				897FF11D0E719B8F00D62E90 /* contexts.h */,
@@ -739,6 +751,7 @@
 				89A15C670EE4665300B48DEB /* interpreter-irregexp.h */,
 				897FF14E0E719B8F00D62E90 /* jsregexp.cc */,
 				897FF14F0E719B8F00D62E90 /* jsregexp.h */,
+				895FA720107FFB15006F39D4 /* jump-target-inl.h */,
 				58950D4E0F55514900F3E8BA /* jump-target-arm.cc */,
 				58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */,
 				58950D500F55514900F3E8BA /* jump-target.cc */,
@@ -794,8 +807,12 @@
 				89A15C7A0EE466D000B48DEB /* regexp-macro-assembler.h */,
 				8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */,
 				8944AD0F0F1D4D3A0028D560 /* regexp-stack.h */,
+				895FA750107FFEAE006F39D4 /* register-allocator-arm-inl.h */,
 				58950D520F55514900F3E8BA /* register-allocator-arm.cc */,
+				895FA751107FFEAE006F39D4 /* register-allocator-arm.h */,
+				895FA72A107FFB85006F39D4 /* register-allocator-ia32-inl.h */,
 				58950D530F55514900F3E8BA /* register-allocator-ia32.cc */,
+				895FA72B107FFB85006F39D4 /* register-allocator-ia32.h */,
 				893A722D0F7B4A7100303DD2 /* register-allocator-inl.h */,
 				58950D540F55514900F3E8BA /* register-allocator.cc */,
 				58950D550F55514900F3E8BA /* register-allocator.h */,
@@ -1238,6 +1255,7 @@
 				89F23C4B0E78D5B2006B2466 /* codegen.cc in Sources */,
 				89495E490E79FC23001F68C3 /* compilation-cache.cc in Sources */,
 				89F23C4C0E78D5B2006B2466 /* compiler.cc in Sources */,
+				895FA753107FFED3006F39D4 /* constants-arm.cc in Sources */,
 				89F23C4D0E78D5B2006B2466 /* contexts.cc in Sources */,
 				89F23C4E0E78D5B2006B2466 /* conversions.cc in Sources */,
 				89F23C4F0E78D5B2006B2466 /* counters.cc in Sources */,
@@ -1467,6 +1485,7 @@
 					V8_NATIVE_REGEXP,
 					DEBUG,
 					V8_ENABLE_CHECKS,
+					ENABLE_DEBUGGER_SUPPORT,
 				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8_shell;
@@ -1481,6 +1500,7 @@
 					V8_TARGET_ARCH_IA32,
 					V8_NATIVE_REGEXP,
 					NDEBUG,
+					ENABLE_DEBUGGER_SUPPORT,
 				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8_shell;
@@ -1514,6 +1534,7 @@
 					V8_TARGET_ARCH_IA32,
 					V8_NATIVE_REGEXP,
 					NDEBUG,
+					ENABLE_DEBUGGER_SUPPORT,
 				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8;
@@ -1524,6 +1545,10 @@
 		897F767C0E71B4CC007ACF34 /* Debug */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"$(GCC_PREPROCESSOR_DEFINITIONS)",
+					V8_TARGET_ARCH_IA32,
+				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8_shell;
 			};
@@ -1532,6 +1557,10 @@
 		897F767D0E71B4CC007ACF34 /* Release */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"$(GCC_PREPROCESSOR_DEFINITIONS)",
+					V8_TARGET_ARCH_IA32,
+				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8_shell;
 			};
@@ -1571,6 +1600,10 @@
 		89F23C930E78D5B6006B2466 /* Debug */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"$(GCC_PREPROCESSOR_DEFINITIONS)",
+					V8_TARGET_ARCH_ARM,
+				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = "v8_shell-arm";
 			};
@@ -1579,6 +1612,10 @@
 		89F23C940E78D5B6006B2466 /* Release */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"$(GCC_PREPROCESSOR_DEFINITIONS)",
+					V8_TARGET_ARCH_ARM,
+				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = "v8_shell-arm";
 			};
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index 7a013c0..fc7402a 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -388,6 +388,18 @@
 				RelativePath="..\..\src\factory.h"
 				>
 			</File>
+                        <File
+                                RelativePath="..\..\src\ia32\fast-codegen-ia32.cc"
+                                >
+                        </File>
+                        <File
+                                RelativePath="..\..\src\fast-codegen.cc"
+                                >
+                        </File>
+                        <File
+                                RelativePath="..\..\src\fast-codegen.h"
+                                >
+                        </File>
 			<File
 				RelativePath="..\..\src\flags.cc"
 				>
@@ -545,6 +557,10 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\location.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\log.cc"
 				>
 			</File>
@@ -945,7 +961,7 @@
 			Name="include"
 			>
 			<File
-				RelativePath="..\..\include\debug.h"
+				RelativePath="..\..\include\v8-debug.h"
 				>
 			</File>
 			<File
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index abdb418..fca4a96 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -396,6 +396,18 @@
 				RelativePath="..\..\src\factory.h"
 				>
 			</File>
+                        <File
+                                RelativePath="..\..\src\arm\fast-codegen-arm.cc"
+                                >
+                        </File>
+                        <File
+                                RelativePath="..\..\src\fast-codegen.cc"
+                                >
+                        </File>
+                        <File
+                                RelativePath="..\..\src\fast-codegen.h"
+                                >
+                        </File>
 			<File
 				RelativePath="..\..\src\flags.cc"
 				>
@@ -549,6 +561,10 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\location.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\log.cc"
 				>
 			</File>
@@ -957,7 +973,7 @@
 			Name="include"
 			>
 			<File
-				RelativePath="..\..\include\debug.h"
+				RelativePath="..\..\include\v8-debug.h"
 				>
 			</File>
 			<File
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
index 7b8b4d3..a8c8b55 100644
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ b/tools/visual_studio/v8_base_x64.vcproj
@@ -945,7 +945,7 @@
 			Name="include"
 			>
 			<File
-				RelativePath="..\..\include\debug.h"
+				RelativePath="..\..\include\v8-debug.h"
 				>
 			</File>
 			<File