Version 1.3.16
        
X64: Convert smis to holding 32 bits of payload.

Introduce v8::Integer::NewFromUnsigned method.

Add missing null check in Context::GetCurrent.

Add trim, trimLeft and trimRight methods to String
Patch by Jan de Mooij <jandemooij@gmail.com>

Implement ES5 Array.isArray
Patch by Jan de Mooij <jandemooij@gmail.com>

Skip access checks for hidden properties.

Add String::Concat(Handle<String> left, Handle<String> right) to the V8 API.

Fix GYP-based builds of V8.



git-svn-id: http://v8.googlecode.com/svn/trunk@3082 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/.gitignore b/.gitignore
index 685e9a2..e5687e7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,6 +10,10 @@
 *.suo
 *.user
 *.xcodeproj
+*.idb
+*.pdb
+#*#
+*~
 d8
 d8_g
 shell
diff --git a/ChangeLog b/ChangeLog
index d07c458..d13d74f 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,24 @@
+2009-10-16: Version 1.3.16
+        
+        X64: Convert smis to holding 32 bits of payload.
+
+        Introduce v8::Integer::NewFromUnsigned method.
+
+        Add missing null check in Context::GetCurrent.
+
+        Add trim, trimLeft and trimRight methods to String
+        Patch by Jan de Mooij <jandemooij@gmail.com>
+
+        Implement ES5 Array.isArray
+        Patch by Jan de Mooij <jandemooij@gmail.com>
+
+        Skip access checks for hidden properties.
+
+        Add String::Concat(Handle<String> left, Handle<String> right) to the V8 API.
+
+        Fix GYP-based builds of V8.
+
+
 2009-10-07: Version 1.3.15
 
         Expand the maximum size of the code space to 512MB for 64-bit mode.
diff --git a/SConstruct b/SConstruct
old mode 100644
new mode 100755
index b5aa7ab..2b2ce1d
--- a/SConstruct
+++ b/SConstruct
@@ -373,7 +373,8 @@
       'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
     },
     'arch:x64': {
-      'CPPDEFINES':   ['V8_TARGET_ARCH_X64']
+      'CPPDEFINES':   ['V8_TARGET_ARCH_X64'],
+      'LINKFLAGS': ['/STACK:2091752']
     },
   }
 }
@@ -474,7 +475,7 @@
     },
     'arch:x64': {
       'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
-      'LINKFLAGS': ['/MACHINE:X64']
+      'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
     },
     'mode:debug': {
       'CCFLAGS':   ['/Od'],
diff --git a/include/v8.h b/include/v8.h
index adb9f43..d923f97 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -756,7 +756,7 @@
   /** JS == */
   bool Equals(Handle<Value> that) const;
   bool StrictEquals(Handle<Value> that) const;
-  
+
  private:
   inline bool QuickIsString() const;
   bool FullIsString() const;
@@ -919,6 +919,12 @@
   static Local<String> NewSymbol(const char* data, int length = -1);
 
   /**
+   * Creates a new string by concatenating the left and the right strings
+   * passed in as parameters.
+   */
+  static Local<String> Concat(Handle<String> left, Handle<String>right);
+
+  /**
    * Creates a new external string using the data defined in the given
    * resource. The resource is deleted when the external string is no
    * longer live on V8's heap. The caller of this function should not
@@ -1036,7 +1042,7 @@
     Value(const Value&);
     void operator=(const Value&);
   };
-  
+
  private:
   void VerifyExternalStringResource(ExternalStringResource* val) const;
   static void CheckCast(v8::Value* obj);
@@ -1063,6 +1069,7 @@
 class V8EXPORT Integer : public Number {
  public:
   static Local<Integer> New(int32_t value);
+  static inline Local<Integer> NewFromUnsigned(uint32_t value);
   int64_t Value() const;
   static inline Integer* Cast(v8::Value* obj);
  private:
@@ -1193,7 +1200,7 @@
 
   /** Gets a native pointer from an internal field. */
   inline void* GetPointerFromInternalField(int index);
-  
+
   /** Sets a native pointer in an internal field. */
   void SetPointerInInternalField(int index, void* value);
 
@@ -1246,7 +1253,7 @@
   bool SetHiddenValue(Handle<String> key, Handle<Value> value);
   Local<Value> GetHiddenValue(Handle<String> key);
   bool DeleteHiddenValue(Handle<String> key);
-  
+
   /**
    * Returns true if this is an instance of an api function (one
    * created from a function created from a function template) and has
@@ -1277,10 +1284,11 @@
   Object();
   static void CheckCast(Value* obj);
   Local<Value> CheckedGetInternalField(int index);
+  void* SlowGetPointerFromInternalField(int index);
 
   /**
    * If quick access to the internal field is possible this method
-   * returns the value.  Otherwise an empty handle is returned. 
+   * returns the value.  Otherwise an empty handle is returned.
    */
   inline Local<Value> UncheckedGetInternalField(int index);
 };
@@ -2719,12 +2727,37 @@
 const int kHeapObjectTagSize = 2;
 const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
 
-
 // Tag information for Smi.
 const int kSmiTag = 0;
 const int kSmiTagSize = 1;
 const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
 
+template <size_t ptr_size> struct SmiConstants;
+
+// Smi constants for 32-bit systems.
+template <> struct SmiConstants<4> {
+  static const int kSmiShiftSize = 0;
+  static const int kSmiValueSize = 31;
+  static inline int SmiToInt(internal::Object* value) {
+    int shift_bits = kSmiTagSize + kSmiShiftSize;
+    // Throw away top 32 bits and shift down (requires >> to be sign extending).
+    return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
+  }
+};
+
+// Smi constants for 64-bit systems.
+template <> struct SmiConstants<8> {
+  static const int kSmiShiftSize = 31;
+  static const int kSmiValueSize = 32;
+  static inline int SmiToInt(internal::Object* value) {
+    int shift_bits = kSmiTagSize + kSmiShiftSize;
+    // Shift down and throw away top 32 bits.
+    return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
+  }
+};
+
+const int kSmiShiftSize = SmiConstants<sizeof(void*)>::kSmiShiftSize;
+const int kSmiValueSize = SmiConstants<sizeof(void*)>::kSmiValueSize;
 
 /**
  * This class exports constants and functionality from within v8 that
@@ -2743,7 +2776,6 @@
   static const int kJSObjectHeaderSize = 3 * sizeof(void*);
   static const int kFullStringRepresentationMask = 0x07;
   static const int kExternalTwoByteRepresentationTag = 0x03;
-  static const int kAlignedPointerShift = 2;
 
   // These constants are compiler dependent so their values must be
   // defined within the implementation.
@@ -2761,7 +2793,23 @@
   }
 
   static inline int SmiValue(internal::Object* value) {
-    return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> kSmiTagSize;
+    return SmiConstants<sizeof(void*)>::SmiToInt(value);
+  }
+
+  static inline int GetInstanceType(internal::Object* obj) {
+    typedef internal::Object O;
+    O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
+    return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
+  }
+
+  static inline void* GetExternalPointer(internal::Object* obj) {
+    if (HasSmiTag(obj)) {
+      return obj;
+    } else if (GetInstanceType(obj) == kProxyType) {
+      return ReadField<void*>(obj, kProxyProxyOffset);
+    } else {
+      return NULL;
+    }
   }
 
   static inline bool IsExternalTwoByteString(int instance_type) {
@@ -2921,9 +2969,7 @@
   typedef internal::Object O;
   typedef internal::Internals I;
   O* obj = *reinterpret_cast<O**>(this);
-  O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
-  int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
-  if (instance_type == I::kJSObjectType) {
+  if (I::GetInstanceType(obj) == I::kJSObjectType) {
     // If the object is a plain JSObject, which is the common case,
     // we know where to find the internal fields and can return the
     // value directly.
@@ -2948,25 +2994,27 @@
 
 void* External::QuickUnwrap(Handle<v8::Value> wrapper) {
   typedef internal::Object O;
-  typedef internal::Internals I;
   O* obj = *reinterpret_cast<O**>(const_cast<v8::Value*>(*wrapper));
-  if (I::HasSmiTag(obj)) {
-    int value = I::SmiValue(obj) << I::kAlignedPointerShift;
-    return reinterpret_cast<void*>(value);
-  } else {
-    O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
-    int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
-    if (instance_type == I::kProxyType) {
-      return I::ReadField<void*>(obj, I::kProxyProxyOffset);
-    } else {
-      return NULL;
-    }
-  }
+  return internal::Internals::GetExternalPointer(obj);
 }
 
 
 void* Object::GetPointerFromInternalField(int index) {
-  return External::Unwrap(GetInternalField(index));
+  typedef internal::Object O;
+  typedef internal::Internals I;
+
+  O* obj = *reinterpret_cast<O**>(this);
+
+  if (I::GetInstanceType(obj) == I::kJSObjectType) {
+    // If the object is a plain JSObject, which is the common case,
+    // we know where to find the internal fields and can return the
+    // value directly.
+    int offset = I::kJSObjectHeaderSize + (sizeof(void*) * index);
+    O* value = I::ReadField<O*>(obj, offset);
+    return I::GetExternalPointer(value);
+  }
+
+  return SlowGetPointerFromInternalField(index);
 }
 
 
@@ -2982,10 +3030,8 @@
   typedef internal::Object O;
   typedef internal::Internals I;
   O* obj = *reinterpret_cast<O**>(const_cast<String*>(this));
-  O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
-  int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
   String::ExternalStringResource* result;
-  if (I::IsExternalTwoByteString(instance_type)) {
+  if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
     void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
     result = reinterpret_cast<String::ExternalStringResource*>(value);
   } else {
@@ -3011,9 +3057,7 @@
   typedef internal::Internals I;
   O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
   if (!I::HasHeapObjectTag(obj)) return false;
-  O* map = I::ReadField<O*>(obj, I::kHeapObjectMapOffset);
-  int instance_type = I::ReadField<uint8_t>(map, I::kMapInstanceTypeOffset);
-  return (instance_type < I::kFirstNonstringType);
+  return (I::GetInstanceType(obj) < I::kFirstNonstringType);
 }
 
 
@@ -3025,6 +3069,15 @@
 }
 
 
+Local<Integer> Integer::NewFromUnsigned(uint32_t value) {
+  bool fits_into_int32_t = (value & (1 << 31)) == 0;
+  if (fits_into_int32_t) {
+    return Integer::New(static_cast<int32_t>(value));
+  }
+  return Local<Integer>::Cast(Number::New(value));
+}
+
+
 Integer* Integer::Cast(v8::Value* value) {
 #ifdef V8_ENABLE_CHECKS
   CheckCast(value);
diff --git a/src/SConscript b/src/SConscript
index b6c2b4d..85fd724 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -34,51 +34,129 @@
 
 
 SOURCES = {
-  'all': [
-    'accessors.cc', 'allocation.cc', 'api.cc', 'assembler.cc', 'ast.cc',
-    'bootstrapper.cc', 'builtins.cc', 'checks.cc', 'code-stubs.cc',
-    'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
-    'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
-    'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
-    'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
-    'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc',
-    'heap-profiler.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
-    'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc',
-    'messages.cc', 'objects.cc', 'oprofile-agent.cc', 'parser.cc',
-    'property.cc', 'regexp-macro-assembler.cc',
-    'regexp-macro-assembler-irregexp.cc', 'regexp-stack.cc',
-    'register-allocator.cc', 'rewriter.cc', 'runtime.cc', 'scanner.cc',
-    'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
-    'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
-    'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
-    'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
-    'virtual-frame.cc', 'zone.cc'
-  ],
-  'arch:arm': [
-    'arm/assembler-arm.cc', 'arm/builtins-arm.cc', 'arm/codegen-arm.cc',
-    'arm/constants-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
-    'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
-    'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
-    'arm/regexp-macro-assembler-arm.cc', 'arm/register-allocator-arm.cc',
-    'arm/stub-cache-arm.cc', 'arm/virtual-frame-arm.cc'
-  ],
-  'arch:ia32': [
-    'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
-    'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
-    'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
-    'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
-    'ia32/regexp-macro-assembler-ia32.cc',
-    'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
-    'ia32/virtual-frame-ia32.cc'
-  ],
-  'arch:x64': [
-    'x64/assembler-x64.cc', 'x64/builtins-x64.cc', 'x64/codegen-x64.cc',
-    'x64/cpu-x64.cc', 'x64/disasm-x64.cc', 'x64/debug-x64.cc',
-    'x64/frames-x64.cc', 'x64/ic-x64.cc', 'x64/jump-target-x64.cc',
-    'x64/macro-assembler-x64.cc', 'x64/regexp-macro-assembler-x64.cc',
-    'x64/register-allocator-x64.cc', 'x64/stub-cache-x64.cc',
-    'x64/virtual-frame-x64.cc'
-  ],
+  'all': Split("""
+    accessors.cc
+    allocation.cc
+    api.cc
+    assembler.cc
+    ast.cc
+    bootstrapper.cc
+    builtins.cc
+    checks.cc
+    code-stubs.cc
+    codegen.cc
+    compilation-cache.cc
+    compiler.cc
+    contexts.cc
+    conversions.cc
+    counters.cc
+    dateparser.cc
+    debug-agent.cc
+    debug.cc
+    disassembler.cc
+    execution.cc
+    factory.cc
+    fast-codegen.cc
+    flags.cc
+    frame-element.cc
+    frames.cc
+    func-name-inferrer.cc
+    global-handles.cc
+    handles.cc
+    hashmap.cc
+    heap-profiler.cc
+    heap.cc
+    ic.cc
+    interpreter-irregexp.cc
+    jsregexp.cc
+    jump-target.cc
+    log-utils.cc
+    log.cc
+    mark-compact.cc
+    messages.cc
+    objects.cc
+    oprofile-agent.cc
+    parser.cc
+    property.cc
+    regexp-macro-assembler-irregexp.cc
+    regexp-macro-assembler.cc
+    regexp-stack.cc
+    register-allocator.cc
+    rewriter.cc
+    runtime.cc
+    scanner.cc
+    scopeinfo.cc
+    scopes.cc
+    serialize.cc
+    snapshot-common.cc
+    spaces.cc
+    string-stream.cc
+    stub-cache.cc
+    token.cc
+    top.cc
+    unicode.cc
+    usage-analyzer.cc
+    utils.cc
+    v8-counters.cc
+    v8.cc
+    v8threads.cc
+    variables.cc
+    version.cc
+    virtual-frame.cc
+    zone.cc
+    """),
+  'arch:arm': Split("""
+    arm/assembler-arm.cc
+    arm/builtins-arm.cc
+    arm/codegen-arm.cc
+    arm/constants-arm.cc
+    arm/cpu-arm.cc
+    arm/debug-arm.cc
+    arm/disasm-arm.cc
+    arm/fast-codegen-arm.cc
+    arm/frames-arm.cc
+    arm/ic-arm.cc
+    arm/jump-target-arm.cc
+    arm/macro-assembler-arm.cc
+    arm/regexp-macro-assembler-arm.cc
+    arm/register-allocator-arm.cc
+    arm/stub-cache-arm.cc
+    arm/virtual-frame-arm.cc
+    """),
+  'arch:ia32': Split("""
+    ia32/assembler-ia32.cc
+    ia32/builtins-ia32.cc
+    ia32/codegen-ia32.cc
+    ia32/cpu-ia32.cc
+    ia32/debug-ia32.cc
+    ia32/disasm-ia32.cc
+    ia32/fast-codegen-ia32.cc
+    ia32/frames-ia32.cc
+    ia32/ic-ia32.cc
+    ia32/jump-target-ia32.cc
+    ia32/macro-assembler-ia32.cc
+    ia32/regexp-macro-assembler-ia32.cc
+    ia32/register-allocator-ia32.cc
+    ia32/stub-cache-ia32.cc
+    ia32/virtual-frame-ia32.cc
+    """),
+  'arch:x64': Split("""
+    x64/assembler-x64.cc
+    x64/builtins-x64.cc
+    x64/codegen-x64.cc
+    x64/cpu-x64.cc
+    x64/debug-x64.cc
+    x64/disasm-x64.cc
+    x64/fast-codegen-x64.cc
+    x64/frames-x64.cc
+    x64/ic-x64.cc
+    x64/jump-target-x64.cc
+    x64/macro-assembler-x64.cc
+    x64/regexp-macro-assembler-x64.cc
+    x64/register-allocator-x64.cc
+    x64/stub-cache-x64.cc
+    x64/virtual-frame-x64.cc
+    """),
   'simulator:arm': ['arm/simulator-arm.cc'],
   'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
   'os:linux':   ['platform-linux.cc', 'platform-posix.cc'],
diff --git a/src/api.cc b/src/api.cc
index 00f1e0b..630fa8f 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2290,7 +2290,7 @@
   ON_BAILOUT("v8::SetElementsToPixelData()", return);
   ENTER_V8;
   HandleScope scope;
-  if (!ApiCheck(i::Smi::IsValid(length),
+  if (!ApiCheck(length <= i::PixelArray::kMaxLength,
                 "v8::Object::SetIndexedPropertiesToPixelData()",
                 "length exceeds max acceptable value")) {
     return;
@@ -2578,7 +2578,16 @@
 
 
 void v8::Object::SetPointerInInternalField(int index, void* value) {
-  SetInternalField(index, External::Wrap(value));
+  i::Object* as_object = reinterpret_cast<i::Object*>(value);
+  if (as_object->IsSmi()) {
+    Utils::OpenHandle(this)->SetInternalField(index, as_object);
+    return;
+  }
+  HandleScope scope;
+  i::Handle<i::Proxy> proxy =
+      i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
+  if (!proxy.is_null())
+      Utils::OpenHandle(this)->SetInternalField(index, *proxy);
 }
 
 
@@ -2760,7 +2769,9 @@
 
 v8::Local<v8::Context> Context::GetCurrent() {
   if (IsDeadCheck("v8::Context::GetCurrent()")) return Local<Context>();
-  i::Handle<i::Context> context(i::Top::global_context());
+  i::Handle<i::Object> current = i::Top::global_context();
+  if (current.is_null()) return Local<Context>();
+  i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
   return Utils::ToLocal(context);
 }
 
@@ -2837,36 +2848,39 @@
 }
 
 
-static const intptr_t kAlignedPointerMask = 3;
-
 Local<Value> v8::External::Wrap(void* data) {
   STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
   LOG_API("External::Wrap");
   EnsureInitialized("v8::External::Wrap()");
   ENTER_V8;
-  if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) {
-    uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data);
-    intptr_t data_value =
-        static_cast<intptr_t>(data_ptr >> i::Internals::kAlignedPointerShift);
-    STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value));
-    if (i::Smi::IsIntptrValid(data_value)) {
-      i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value));
-      return Utils::ToLocal(obj);
-    }
+  i::Object* as_object = reinterpret_cast<i::Object*>(data);
+  if (as_object->IsSmi()) {
+    return Utils::ToLocal(i::Handle<i::Object>(as_object));
   }
   return ExternalNewImpl(data);
 }
 
 
+void* v8::Object::SlowGetPointerFromInternalField(int index) {
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  i::Object* value = obj->GetInternalField(index);
+  if (value->IsSmi()) {
+    return value;
+  } else if (value->IsProxy()) {
+    return reinterpret_cast<void*>(i::Proxy::cast(value)->proxy());
+  } else {
+    return NULL;
+  }
+}
+
+
 void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
   if (IsDeadCheck("v8::External::Unwrap()")) return 0;
   i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
   void* result;
   if (obj->IsSmi()) {
     // The external value was an aligned pointer.
-    uintptr_t value = static_cast<uintptr_t>(
-        i::Smi::cast(*obj)->value()) << i::Internals::kAlignedPointerShift;
-    result = reinterpret_cast<void*>(value);
+    result = *obj;
   } else if (obj->IsProxy()) {
     result = ExternalValueImpl(obj);
   } else {
@@ -2912,6 +2926,18 @@
 }
 
 
+Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
+  EnsureInitialized("v8::String::New()");
+  LOG_API("String::New(char)");
+  ENTER_V8;
+  i::Handle<i::String> left_string = Utils::OpenHandle(*left);
+  i::Handle<i::String> right_string = Utils::OpenHandle(*right);
+  i::Handle<i::String> result = i::Factory::NewConsString(left_string,
+                                                          right_string);
+  return Utils::ToLocal(result);
+}
+
+
 Local<String> v8::String::NewUndetectable(const char* data, int length) {
   EnsureInitialized("v8::String::NewUndetectable()");
   LOG_API("String::NewUndetectable(char)");
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 5417ed7..48cc090 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -110,7 +110,7 @@
 
 
 Address RelocInfo::call_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   // The 2 instructions offset assumes patched return sequence.
   ASSERT(IsJSReturn(rmode()));
   return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
@@ -118,7 +118,7 @@
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   // The 2 instructions offset assumes patched return sequence.
   ASSERT(IsJSReturn(rmode()));
   Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
@@ -131,7 +131,7 @@
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   // The 2 instructions offset assumes patched return sequence.
   ASSERT(IsJSReturn(rmode()));
   return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
@@ -143,7 +143,7 @@
 }
 
 
-bool RelocInfo::IsCallInstruction() {
+bool RelocInfo::IsPatchedReturnSequence() {
   // On ARM a "call instruction" is actually two instructions.
   //   mov lr, pc
   //   ldr pc, [pc, #XXX]
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index cdd32f3..147c5e3 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -1539,191 +1539,200 @@
 }
 
 
-void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ LoopStatement");
+  Comment cmnt(masm_, "[ DoWhileStatement");
   CodeForStatementPosition(node);
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  JumpTarget body(JumpTarget::BIDIRECTIONAL);
 
-  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
-  // known result for the test expression, with no side effects.
-  enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
-  if (node->cond() == NULL) {
-    ASSERT(node->type() == LoopStatement::FOR_LOOP);
-    info = ALWAYS_TRUE;
-  } else {
-    Literal* lit = node->cond()->AsLiteral();
-    if (lit != NULL) {
-      if (lit->IsTrue()) {
-        info = ALWAYS_TRUE;
-      } else if (lit->IsFalse()) {
-        info = ALWAYS_FALSE;
-      }
-    }
-  }
-
-  switch (node->type()) {
-    case LoopStatement::DO_LOOP: {
-      JumpTarget body(JumpTarget::BIDIRECTIONAL);
-
-      // Label the top of the loop for the backward CFG edge.  If the test
-      // is always true we can use the continue target, and if the test is
-      // always false there is no need.
-      if (info == ALWAYS_TRUE) {
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else if (info == ALWAYS_FALSE) {
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        ASSERT(info == DONT_KNOW);
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        body.Bind();
-      }
-
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      VisitAndSpill(node->body());
-
-      // Compile the test.
-      if (info == ALWAYS_TRUE) {
-        if (has_valid_frame()) {
-          // If control can fall off the end of the body, jump back to the
-          // top.
-          node->continue_target()->Jump();
-        }
-      } else if (info == ALWAYS_FALSE) {
-        // If we have a continue in the body, we only have to bind its jump
-        // target.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-      } else {
-        ASSERT(info == DONT_KNOW);
-        // We have to compile the test expression if it can be reached by
-        // control flow falling out of the body or via continue.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
-                                &body, node->break_target(), true);
-          if (has_valid_frame()) {
-            // A invalid frame here indicates that control did not
-            // fall out of the test expression.
-            Branch(true, &body);
-          }
-        }
-      }
-      break;
-    }
-
-    case LoopStatement::WHILE_LOOP: {
-      // If the test is never true and has no side effects there is no need
-      // to compile the test or body.
-      if (info == ALWAYS_FALSE) break;
-
-      // Label the top of the loop with the continue target for the backward
-      // CFG edge.
+  // Label the top of the loop for the backward CFG edge.  If the test
+  // is always true we can use the continue target, and if the test is
+  // always false there is no need.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  switch (info) {
+    case ALWAYS_TRUE:
       node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
       node->continue_target()->Bind();
+      break;
+    case ALWAYS_FALSE:
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      break;
+    case DONT_KNOW:
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      body.Bind();
+      break;
+  }
 
-      if (info == DONT_KNOW) {
-        JumpTarget body;
-        LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
-                              &body, node->break_target(), true);
-        if (has_valid_frame()) {
-          // A NULL frame indicates that control did not fall out of the
-          // test expression.
-          Branch(false, node->break_target());
-        }
-        if (has_valid_frame() || body.is_linked()) {
-          body.Bind();
-        }
-      }
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  VisitAndSpill(node->body());
 
+      // Compile the test.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // If control can fall off the end of the body, jump back to the
+      // top.
       if (has_valid_frame()) {
-        CheckStack();  // TODO(1222600): ignore if body contains calls.
-        VisitAndSpill(node->body());
-
-        // If control flow can fall out of the body, jump back to the top.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
+        node->continue_target()->Jump();
       }
       break;
-    }
-
-    case LoopStatement::FOR_LOOP: {
-      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-      if (node->init() != NULL) {
-        VisitAndSpill(node->init());
-      }
-
-      // There is no need to compile the test or body.
-      if (info == ALWAYS_FALSE) break;
-
-      // If there is no update statement, label the top of the loop with the
-      // continue target, otherwise with the loop target.
-      if (node->next() == NULL) {
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+    case ALWAYS_FALSE:
+      // If we have a continue in the body, we only have to bind its
+      // jump target.
+      if (node->continue_target()->is_linked()) {
         node->continue_target()->Bind();
-      } else {
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        loop.Bind();
       }
-
-      // If the test is always true, there is no need to compile it.
-      if (info == DONT_KNOW) {
-        JumpTarget body;
+      break;
+    case DONT_KNOW:
+      // We have to compile the test expression if it can be reached by
+      // control flow falling out of the body or via continue.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
+      }
+      if (has_valid_frame()) {
         LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
                               &body, node->break_target(), true);
         if (has_valid_frame()) {
-          Branch(false, node->break_target());
-        }
-        if (has_valid_frame() || body.is_linked()) {
-          body.Bind();
-        }
-      }
-
-      if (has_valid_frame()) {
-        CheckStack();  // TODO(1222600): ignore if body contains calls.
-        VisitAndSpill(node->body());
-
-        if (node->next() == NULL) {
-          // If there is no update statement and control flow can fall out
-          // of the loop, jump directly to the continue label.
-          if (has_valid_frame()) {
-            node->continue_target()->Jump();
-          }
-        } else {
-          // If there is an update statement and control flow can reach it
-          // via falling out of the body of the loop or continuing, we
-          // compile the update statement.
-          if (node->continue_target()->is_linked()) {
-            node->continue_target()->Bind();
-          }
-          if (has_valid_frame()) {
-            // Record source position of the statement as this code which is
-            // after the code for the body actually belongs to the loop
-            // statement and not the body.
-            CodeForStatementPosition(node);
-            VisitAndSpill(node->next());
-            loop.Jump();
-          }
+          // A invalid frame here indicates that control did not
+          // fall out of the test expression.
+          Branch(true, &body);
         }
       }
       break;
-    }
   }
 
   if (node->break_target()->is_linked()) {
     node->break_target()->Bind();
   }
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ WhileStatement");
+  CodeForStatementPosition(node);
+
+  // If the test is never true and has no side effects there is no need
+  // to compile the test or body.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // Label the top of the loop with the continue target for the backward
+  // CFG edge.
+  node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+  node->continue_target()->Bind();
+
+  if (info == DONT_KNOW) {
+    JumpTarget body;
+    LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+                          &body, node->break_target(), true);
+    if (has_valid_frame()) {
+      // A NULL frame indicates that control did not fall out of the
+      // test expression.
+      Branch(false, node->break_target());
+    }
+    if (has_valid_frame() || body.is_linked()) {
+      body.Bind();
+    }
+  }
+
+  if (has_valid_frame()) {
+    CheckStack();  // TODO(1222600): ignore if body contains calls.
+    VisitAndSpill(node->body());
+
+    // If control flow can fall out of the body, jump back to the top.
+    if (has_valid_frame()) {
+      node->continue_target()->Jump();
+    }
+  }
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+#ifdef DEBUG
+  int original_height = frame_->height();
+#endif
+  VirtualFrame::SpilledScope spilled_scope;
+  Comment cmnt(masm_, "[ ForStatement");
+  CodeForStatementPosition(node);
+  if (node->init() != NULL) {
+    VisitAndSpill(node->init());
+  }
+
+  // If the test is never true there is no need to compile the test or
+  // body.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+  // If there is no update statement, label the top of the loop with the
+  // continue target, otherwise with the loop target.
+  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+  if (node->next() == NULL) {
+    node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+    node->continue_target()->Bind();
+  } else {
+    node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+    loop.Bind();
+  }
+
+  // If the test is always true, there is no need to compile it.
+  if (info == DONT_KNOW) {
+    JumpTarget body;
+    LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+                          &body, node->break_target(), true);
+    if (has_valid_frame()) {
+      Branch(false, node->break_target());
+    }
+    if (has_valid_frame() || body.is_linked()) {
+      body.Bind();
+    }
+  }
+
+  if (has_valid_frame()) {
+    CheckStack();  // TODO(1222600): ignore if body contains calls.
+    VisitAndSpill(node->body());
+
+    if (node->next() == NULL) {
+      // If there is no update statement and control flow can fall out
+      // of the loop, jump directly to the continue label.
+      if (has_valid_frame()) {
+        node->continue_target()->Jump();
+      }
+    } else {
+      // If there is an update statement and control flow can reach it
+      // via falling out of the body of the loop or continuing, we
+      // compile the update statement.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
+      }
+      if (has_valid_frame()) {
+        // Record source position of the statement as this code which is
+        // after the code for the body actually belongs to the loop
+        // statement and not the body.
+        CodeForStatementPosition(node);
+        VisitAndSpill(node->next());
+        loop.Jump();
+      }
+    }
+  }
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
   ASSERT(!has_valid_frame() || frame_->height() == original_height);
 }
 
@@ -1918,12 +1927,12 @@
 }
 
 
-void CodeGenerator::VisitTryCatch(TryCatch* node) {
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatch");
+  Comment cmnt(masm_, "[ TryCatchStatement");
   CodeForStatementPosition(node);
 
   JumpTarget try_block;
@@ -2043,12 +2052,12 @@
 }
 
 
-void CodeGenerator::VisitTryFinally(TryFinally* node) {
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
 #ifdef DEBUG
   int original_height = frame_->height();
 #endif
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinally");
+  Comment cmnt(masm_, "[ TryFinallyStatement");
   CodeForStatementPosition(node);
 
   // State: Used to keep track of reason for entering the finally
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 1eb0932..7b50b01 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -147,6 +147,15 @@
                                Handle<Script> script,
                                bool is_eval);
 
+  // Printing of AST, etc. as requested by flags.
+  static void MakeCodePrologue(FunctionLiteral* fun);
+
+  // Allocate and install the code.
+  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
+                                       MacroAssembler* masm,
+                                       Code::Flags flags,
+                                       Handle<Script> script);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -156,6 +165,8 @@
                               bool is_toplevel,
                               Handle<Script> script);
 
+  static void RecordPositions(MacroAssembler* masm, int pos);
+
   // Accessors
   MacroAssembler* masm() { return masm_; }
 
@@ -365,6 +376,14 @@
   inline void GenerateMathSin(ZoneList<Expression*>* args);
   inline void GenerateMathCos(ZoneList<Expression*>* args);
 
+  // Simple condition analysis.
+  enum ConditionAnalysis {
+    ALWAYS_TRUE,
+    ALWAYS_FALSE,
+    DONT_KNOW
+  };
+  ConditionAnalysis AnalyzeCondition(Expression* cond);
+
   // Methods used to indicate which source code is generated for. Source
   // positions are collected by the assembler and emitted with the relocation
   // information.
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index 4f45175..ef33653 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -68,7 +68,7 @@
 // A debug break in the exit code is identified by a call.
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
-  return rinfo->IsCallInstruction();
+  return rinfo->IsPatchedReturnSequence();
 }
 
 
diff --git a/src/arm/fast-codegen-arm.cc b/src/arm/fast-codegen-arm.cc
new file mode 100644
index 0000000..d2e620c
--- /dev/null
+++ b/src/arm/fast-codegen-arm.cc
@@ -0,0 +1,176 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function.  On entry to the function the receiver
+// and arguments have been pushed on the stack left to right.  The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+//   o r1: the JS function object being called (ie, ourselves)
+//   o cp: our context
+//   o fp: our caller's frame pointer
+//   o sp: stack pointer
+//   o lr: return address
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-arm.h for its layout.
+void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+  function_ = fun;
+  // ARM does NOT call SetFunctionPosition.
+
+  __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+  // Adjust fp to point to caller's fp.
+  __ add(fp, sp, Operand(2 * kPointerSize));
+
+  { Comment cmnt(masm_, "[ Allocate locals");
+    int locals_count = fun->scope()->num_stack_slots();
+    if (locals_count > 0) {
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    }
+    if (FLAG_check_stack) {
+      __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+    }
+    for (int i = 0; i < locals_count; i++) {
+      __ push(ip);
+    }
+  }
+
+  if (FLAG_check_stack) {
+    // Put the lr setup instruction in the delay slot.  The kInstrSize is
+    // added to the implicit 8 byte offset that always applies to operations
+    // with pc and gives a return address 12 bytes down.
+    Comment cmnt(masm_, "[ Stack check");
+    __ add(lr, pc, Operand(Assembler::kInstrSize));
+    __ cmp(sp, Operand(r2));
+    StackCheckStub stub;
+    __ mov(pc,
+           Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
+                   RelocInfo::CODE_TARGET),
+           LeaveCC,
+           lo);
+  }
+
+  { Comment cmnt(masm_, "[ Body");
+    VisitStatements(fun->body());
+  }
+
+  { Comment cmnt(masm_, "[ return <undefined>;");
+    // Emit a 'return undefined' in case control fell off the end of the
+    // body.
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    SetReturnPosition(fun);
+    __ RecordJSReturn();
+    __ mov(sp, fp);
+    __ ldm(ia_w, sp, fp.bit() | lr.bit());
+    int num_parameters = function_->scope()->num_parameters();
+    __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
+    __ Jump(lr);
+  }
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  SetStatementPosition(stmt);
+  Visit(stmt->expression());
+}
+
+
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+  Comment cmnt(masm_, "[ ReturnStatement");
+  SetStatementPosition(stmt);
+  Visit(stmt->expression());
+  __ pop(r0);
+  __ RecordJSReturn();
+  __ mov(sp, fp);
+  __ ldm(ia_w, sp, fp.bit() | lr.bit());
+    int num_parameters = function_->scope()->num_parameters();
+  __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize));
+  __ Jump(lr);
+}
+
+
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  Expression* rewrite = expr->var()->rewrite();
+  ASSERT(rewrite != NULL);
+
+  Slot* slot = rewrite->AsSlot();
+  ASSERT(slot != NULL);
+  { Comment cmnt(masm_, "[ Slot");
+    if (expr->location().is_temporary()) {
+      __ ldr(ip, MemOperand(fp, SlotOffset(slot)));
+      __ push(ip);
+    } else {
+      ASSERT(expr->location().is_nowhere());
+    }
+  }
+}
+
+
+void FastCodeGenerator::VisitLiteral(Literal* expr) {
+  Comment cmnt(masm_, "[ Literal");
+  if (expr->location().is_temporary()) {
+    __ mov(ip, Operand(expr->handle()));
+    __ push(ip);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+  Comment cmnt(masm_, "[ Assignment");
+  ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
+
+  Visit(expr->value());
+
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL && var->slot() != NULL);
+
+  if (expr->location().is_temporary()) {
+    __ ldr(ip, MemOperand(sp));
+  } else {
+    ASSERT(expr->location().is_nowhere());
+    __ pop(ip);
+  }
+  __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index ee9d70d..e37bb5e 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -246,7 +246,6 @@
 
   // Call a code stub.
   void CallStub(CodeStub* stub, Condition cond = al);
-  void CallJSExitStub(CodeStub* stub);
 
   // Return from a code stub after popping its arguments.
   void StubReturn(int argc);
diff --git a/src/array.js b/src/array.js
index f8e63d0..94d74a5 100644
--- a/src/array.js
+++ b/src/array.js
@@ -1058,6 +1058,10 @@
   return current;
 }
 
+// ES5, 15.4.3.2
+function ArrayIsArray(obj) {
+  return IS_ARRAY(obj);
+}
 
 // -------------------------------------------------------------------
 
@@ -1075,6 +1079,11 @@
   // object.
   %SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
 
+  // Setup non-enumerable functions on the Array object.
+  InstallFunctions($Array, DONT_ENUM, $Array(
+    "isArray", ArrayIsArray
+  ));
+
   // Setup non-enumerable functions of the Array.prototype object and
   // set their names.
   InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
@@ -1098,8 +1107,9 @@
     "indexOf", ArrayIndexOf,
     "lastIndexOf", ArrayLastIndexOf,
     "reduce", ArrayReduce,
-    "reduceRight", ArrayReduceRight));
-
+    "reduceRight", ArrayReduceRight
+  ));
+    
   // Manipulate the length of some of the functions to meet
   // expectations set by ECMA-262 or Mozilla.
   UpdateFunctionLengths({
diff --git a/src/assembler.cc b/src/assembler.cc
index d81b4b0..34595f8 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -343,9 +343,6 @@
       if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return;
     } else if (tag == kCodeTargetTag) {
       ReadTaggedPC();
-      if (*(reinterpret_cast<int*>(rinfo_.pc())) == 0x61) {
-        tag = 0;
-      }
       if (SetMode(RelocInfo::CODE_TARGET)) return;
     } else if (tag == kPositionTag) {
       ReadTaggedPC();
diff --git a/src/assembler.h b/src/assembler.h
index 323e06a..21a66dd 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -217,10 +217,10 @@
 
   // Patch the code with a call.
   void PatchCodeWithCall(Address target, int guard_bytes);
-  // Check whether the current instruction is currently a call
-  // sequence (whether naturally or a return sequence overwritten
-  // to enter the debugger).
-  INLINE(bool IsCallInstruction());
+
+  // Check whether this return sequence has been patched
+  // with a call to the debugger.
+  INLINE(bool IsPatchedReturnSequence());
 
 #ifdef ENABLE_DISASSEMBLER
   // Printing
diff --git a/src/ast.cc b/src/ast.cc
index 692bec0..f6864b8 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -91,20 +91,6 @@
 }
 
 
-#ifdef DEBUG
-
-const char* LoopStatement::OperatorString() const {
-  switch (type()) {
-    case DO_LOOP: return "DO";
-    case FOR_LOOP: return "FOR";
-    case WHILE_LOOP: return "WHILE";
-  }
-  return NULL;
-}
-
-#endif  // DEBUG
-
-
 Token::Value Assignment::binary_op() const {
   switch (op_) {
     case Token::ASSIGN_BIT_OR: return Token::BIT_OR;
@@ -187,6 +173,13 @@
 // Implementation of AstVisitor
 
 
+void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+  for (int i = 0; i < declarations->length(); i++) {
+    Visit(declarations->at(i));
+  }
+}
+
+
 void AstVisitor::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
     Visit(statements->at(i));
diff --git a/src/ast.h b/src/ast.h
index 6a1cdf5..42154f6 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -28,14 +28,14 @@
 #ifndef V8_AST_H_
 #define V8_AST_H_
 
+#include "location.h"
 #include "execution.h"
 #include "factory.h"
+#include "jsregexp.h"
+#include "jump-target.h"
 #include "runtime.h"
 #include "token.h"
 #include "variables.h"
-#include "macro-assembler.h"
-#include "jsregexp.h"
-#include "jump-target.h"
 
 namespace v8 {
 namespace internal {
@@ -64,10 +64,12 @@
   V(WithEnterStatement)                         \
   V(WithExitStatement)                          \
   V(SwitchStatement)                            \
-  V(LoopStatement)                              \
+  V(DoWhileStatement)                           \
+  V(WhileStatement)                             \
+  V(ForStatement)                               \
   V(ForInStatement)                             \
-  V(TryCatch)                                   \
-  V(TryFinally)                                 \
+  V(TryCatchStatement)                          \
+  V(TryFinallyStatement)                        \
   V(DebuggerStatement)
 
 #define EXPRESSION_NODE_LIST(V)                 \
@@ -160,6 +162,8 @@
 
 class Expression: public AstNode {
  public:
+  Expression() : location_(Location::Temporary()) {}
+
   virtual Expression* AsExpression()  { return this; }
 
   virtual bool IsValidJSON() { return false; }
@@ -173,8 +177,12 @@
   // Static type information for this expression.
   SmiAnalysis* type() { return &type_; }
 
+  Location location() { return location_; }
+  void set_location(Location loc) { location_ = loc; }
+
  private:
   SmiAnalysis type_;
+  Location location_;
 };
 
 
@@ -294,13 +302,59 @@
 };
 
 
-class LoopStatement: public IterationStatement {
+class DoWhileStatement: public IterationStatement {
  public:
-  enum Type { DO_LOOP, FOR_LOOP, WHILE_LOOP };
+  explicit DoWhileStatement(ZoneStringList* labels)
+      : IterationStatement(labels), cond_(NULL) {
+  }
 
-  LoopStatement(ZoneStringList* labels, Type type)
+  void Initialize(Expression* cond, Statement* body) {
+    IterationStatement::Initialize(body);
+    cond_ = cond;
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  Expression* cond() const { return cond_; }
+
+ private:
+  Expression* cond_;
+};
+
+
+class WhileStatement: public IterationStatement {
+ public:
+  explicit WhileStatement(ZoneStringList* labels)
       : IterationStatement(labels),
-        type_(type),
+        cond_(NULL),
+        may_have_function_literal_(true) {
+  }
+
+  void Initialize(Expression* cond, Statement* body) {
+    IterationStatement::Initialize(body);
+    cond_ = cond;
+  }
+
+  virtual void Accept(AstVisitor* v);
+
+  Expression* cond() const { return cond_; }
+  bool may_have_function_literal() const {
+    return may_have_function_literal_;
+  }
+
+ private:
+  Expression* cond_;
+  // True if there is a function literal subexpression in the condition.
+  bool may_have_function_literal_;
+
+  friend class AstOptimizer;
+};
+
+
+class ForStatement: public IterationStatement {
+ public:
+  explicit ForStatement(ZoneStringList* labels)
+      : IterationStatement(labels),
         init_(NULL),
         cond_(NULL),
         next_(NULL),
@@ -311,8 +365,6 @@
                   Expression* cond,
                   Statement* next,
                   Statement* body) {
-    ASSERT(init == NULL || type_ == FOR_LOOP);
-    ASSERT(next == NULL || type_ == FOR_LOOP);
     IterationStatement::Initialize(body);
     init_ = init;
     cond_ = cond;
@@ -321,7 +373,6 @@
 
   virtual void Accept(AstVisitor* v);
 
-  Type type() const  { return type_; }
   Statement* init() const  { return init_; }
   Expression* cond() const  { return cond_; }
   Statement* next() const  { return next_; }
@@ -329,12 +380,7 @@
     return may_have_function_literal_;
   }
 
-#ifdef DEBUG
-  const char* OperatorString() const;
-#endif
-
  private:
-  Type type_;
   Statement* init_;
   Expression* cond_;
   Statement* next_;
@@ -569,9 +615,11 @@
 };
 
 
-class TryCatch: public TryStatement {
+class TryCatchStatement: public TryStatement {
  public:
-  TryCatch(Block* try_block, Expression* catch_var, Block* catch_block)
+  TryCatchStatement(Block* try_block,
+                    Expression* catch_var,
+                    Block* catch_block)
       : TryStatement(try_block),
         catch_var_(catch_var),
         catch_block_(catch_block) {
@@ -589,9 +637,9 @@
 };
 
 
-class TryFinally: public TryStatement {
+class TryFinallyStatement: public TryStatement {
  public:
-  TryFinally(Block* try_block, Block* finally_block)
+  TryFinallyStatement(Block* try_block, Block* finally_block)
       : TryStatement(try_block),
         finally_block_(finally_block) { }
 
@@ -1212,7 +1260,6 @@
                   Scope* scope,
                   ZoneList<Statement*>* body,
                   int materialized_literal_count,
-                  bool contains_array_literal,
                   int expected_property_count,
                   bool has_only_this_property_assignments,
                   bool has_only_simple_this_property_assignments,
@@ -1225,7 +1272,6 @@
         scope_(scope),
         body_(body),
         materialized_literal_count_(materialized_literal_count),
-        contains_array_literal_(contains_array_literal),
         expected_property_count_(expected_property_count),
         has_only_this_property_assignments_(has_only_this_property_assignments),
         has_only_simple_this_property_assignments_(
@@ -1258,7 +1304,6 @@
   bool is_expression() const { return is_expression_; }
 
   int materialized_literal_count() { return materialized_literal_count_; }
-  bool contains_array_literal() { return contains_array_literal_; }
   int expected_property_count() { return expected_property_count_; }
   bool has_only_this_property_assignments() {
       return has_only_this_property_assignments_;
@@ -1293,7 +1338,6 @@
   Scope* scope_;
   ZoneList<Statement*>* body_;
   int materialized_literal_count_;
-  bool contains_array_literal_;
   int expected_property_count_;
   bool has_only_this_property_assignments_;
   bool has_only_simple_this_property_assignments_;
@@ -1690,6 +1734,7 @@
   void Visit(AstNode* node) { node->Accept(this); }
 
   // Iteration
+  virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
   virtual void VisitStatements(ZoneList<Statement*>* statements);
   virtual void VisitExpressions(ZoneList<Expression*>* expressions);
 
diff --git a/src/checks.h b/src/checks.h
index b302e5b..3b0c851 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -80,6 +80,27 @@
   }
 }
 
+// Helper function used by the CHECK_EQ function when given int64_t
+// arguments.  Should not be called directly.
+static inline void CheckEqualsHelper(const char* file, int line,
+                                     const char* expected_source,
+                                     int64_t expected,
+                                     const char* value_source,
+                                     int64_t value) {
+  if (expected != value) {
+    // Print int64_t values in hex, as two int32s,
+    // to avoid platform-dependencies.
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#"
+             "   Expected: 0x%08x%08x\n#   Found: 0x%08x%08x",
+             expected_source, value_source,
+             static_cast<uint32_t>(expected >> 32),
+             static_cast<uint32_t>(expected),
+             static_cast<uint32_t>(value >> 32),
+             static_cast<uint32_t>(value));
+  }
+}
+
 
 // Helper function used by the CHECK_NE function when given int
 // arguments.  Should not be called directly.
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 9c24c60..586c948 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -132,8 +132,6 @@
       return "SetProperty";
     case InvokeBuiltin:
       return "InvokeBuiltin";
-    case JSExit:
-      return "JSExit";
     case ConvertToDouble:
       return "ConvertToDouble";
     case WriteInt32ToHeapNumber:
diff --git a/src/code-stubs.h b/src/code-stubs.h
index ae86c20..91d951f 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -56,7 +56,6 @@
     GetProperty,   // ARM only
     SetProperty,   // ARM only
     InvokeBuiltin,  // ARM only
-    JSExit,        // ARM only
     RegExpCEntry,  // ARM only
     NUMBER_OF_IDS
   };
diff --git a/src/codegen.cc b/src/codegen.cc
index a18fa0f..096a1a1 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -125,102 +125,114 @@
 }
 
 
-// Generate the code. Takes a function literal, generates code for it, assemble
-// all the pieces into a Code object. This function is only to be called by
-// the compiler.cc code.
-Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
-                                     Handle<Script> script,
-                                     bool is_eval) {
-#ifdef ENABLE_DISASSEMBLER
-  bool print_code = Bootstrapper::IsActive()
-      ? FLAG_print_builtin_code
-      : FLAG_print_code;
-#endif
-
+void CodeGenerator::MakeCodePrologue(FunctionLiteral* fun) {
 #ifdef DEBUG
   bool print_source = false;
   bool print_ast = false;
+  bool print_json_ast = false;
   const char* ftype;
 
   if (Bootstrapper::IsActive()) {
     print_source = FLAG_print_builtin_source;
     print_ast = FLAG_print_builtin_ast;
+    print_json_ast = FLAG_print_builtin_json_ast;
     ftype = "builtin";
   } else {
     print_source = FLAG_print_source;
     print_ast = FLAG_print_ast;
+    print_json_ast = FLAG_print_json_ast;
     ftype = "user-defined";
   }
 
   if (FLAG_trace_codegen || print_source || print_ast) {
     PrintF("*** Generate code for %s function: ", ftype);
-    flit->name()->ShortPrint();
+    fun->name()->ShortPrint();
     PrintF(" ***\n");
   }
 
   if (print_source) {
-    PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(flit));
+    PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(fun));
   }
 
   if (print_ast) {
-    PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(flit));
+    PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(fun));
+  }
+
+  if (print_json_ast) {
+    JsonAstBuilder builder;
+    PrintF("%s", builder.BuildProgram(fun));
   }
 #endif  // DEBUG
+}
 
-  // Generate code.
-  const int initial_buffer_size = 4 * KB;
-  CodeGenerator cgen(initial_buffer_size, script, is_eval);
-  CodeGeneratorScope scope(&cgen);
-  cgen.GenCode(flit);
-  if (cgen.HasStackOverflow()) {
-    ASSERT(!Top::has_pending_exception());
-    return Handle<Code>::null();
-  }
 
-  // Allocate and install the code.  Time the rest of this function as
-  // code creation.
-  HistogramTimerScope timer(&Counters::code_creation);
+Handle<Code> CodeGenerator::MakeCodeEpilogue(FunctionLiteral* fun,
+                                             MacroAssembler* masm,
+                                             Code::Flags flags,
+                                             Handle<Script> script) {
+  // Allocate and install the code.
   CodeDesc desc;
-  cgen.masm()->GetCode(&desc);
-  ZoneScopeInfo sinfo(flit->scope());
-  InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
-  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
-  Handle<Code> code = Factory::NewCode(desc,
-                                       &sinfo,
-                                       flags,
-                                       cgen.masm()->CodeObject());
+  masm->GetCode(&desc);
+  ZoneScopeInfo sinfo(fun->scope());
+  Handle<Code> code =
+      Factory::NewCode(desc, &sinfo, flags, masm->CodeObject());
 
   // Add unresolved entries in the code to the fixup list.
-  Bootstrapper::AddFixup(*code, cgen.masm());
+  Bootstrapper::AddFixup(*code, masm);
 
 #ifdef ENABLE_DISASSEMBLER
+  bool print_code = Bootstrapper::IsActive()
+      ? FLAG_print_builtin_code
+      : FLAG_print_code;
   if (print_code) {
     // Print the source code if available.
     if (!script->IsUndefined() && !script->source()->IsUndefined()) {
       PrintF("--- Raw source ---\n");
       StringInputBuffer stream(String::cast(script->source()));
-      stream.Seek(flit->start_position());
-      // flit->end_position() points to the last character in the stream. We
+      stream.Seek(fun->start_position());
+      // fun->end_position() points to the last character in the stream. We
       // need to compensate by adding one to calculate the length.
-      int source_len = flit->end_position() - flit->start_position() + 1;
+      int source_len = fun->end_position() - fun->start_position() + 1;
       for (int i = 0; i < source_len; i++) {
         if (stream.has_more()) PrintF("%c", stream.GetNext());
       }
       PrintF("\n\n");
     }
     PrintF("--- Code ---\n");
-    code->Disassemble(*flit->name()->ToCString());
+    code->Disassemble(*fun->name()->ToCString());
   }
 #endif  // ENABLE_DISASSEMBLER
 
   if (!code.is_null()) {
     Counters::total_compiled_code_size.Increment(code->instruction_size());
   }
-
   return code;
 }
 
 
+// Generate the code. Takes a function literal, generates code for it, assemble
+// all the pieces into a Code object. This function is only to be called by
+// the compiler.cc code.
+Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
+                                     Handle<Script> script,
+                                     bool is_eval) {
+  MakeCodePrologue(fun);
+  // Generate code.
+  const int kInitialBufferSize = 4 * KB;
+  CodeGenerator cgen(kInitialBufferSize, script, is_eval);
+  CodeGeneratorScope scope(&cgen);
+  cgen.GenCode(fun);
+  if (cgen.HasStackOverflow()) {
+    ASSERT(!Top::has_pending_exception());
+    return Handle<Code>::null();
+  }
+
+  InLoopFlag in_loop = (cgen.loop_nesting() != 0) ? IN_LOOP : NOT_IN_LOOP;
+  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
+  return MakeCodeEpilogue(fun, cgen.masm(), flags, script);
+}
+
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
 
 bool CodeGenerator::ShouldGenerateLog(Expression* type) {
@@ -314,7 +326,6 @@
   Handle<JSFunction> function =
       Factory::NewFunctionBoilerplate(node->name(),
                                       node->materialized_literal_count(),
-                                      node->contains_array_literal(),
                                       code);
   CodeGenerator::SetFunctionInfo(function, node, false, script_);
 
@@ -469,26 +480,45 @@
 }
 
 
-static inline void RecordPositions(CodeGenerator* cgen, int pos) {
+// Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
+// known result for the test expression, with no side effects.
+CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
+    Expression* cond) {
+  if (cond == NULL) return ALWAYS_TRUE;
+
+  Literal* lit = cond->AsLiteral();
+  if (lit == NULL) return DONT_KNOW;
+
+  if (lit->IsTrue()) {
+    return ALWAYS_TRUE;
+  } else if (lit->IsFalse()) {
+    return ALWAYS_FALSE;
+  }
+
+  return DONT_KNOW;
+}
+
+
+void CodeGenerator::RecordPositions(MacroAssembler* masm, int pos) {
   if (pos != RelocInfo::kNoPosition) {
-    cgen->masm()->RecordStatementPosition(pos);
-    cgen->masm()->RecordPosition(pos);
+    masm->RecordStatementPosition(pos);
+    masm->RecordPosition(pos);
   }
 }
 
 
 void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) RecordPositions(this, fun->start_position());
+  if (FLAG_debug_info) RecordPositions(masm(), fun->start_position());
 }
 
 
 void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
-  if (FLAG_debug_info) RecordPositions(this, fun->end_position());
+  if (FLAG_debug_info) RecordPositions(masm(), fun->end_position());
 }
 
 
 void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
-  if (FLAG_debug_info) RecordPositions(this, stmt->statement_pos());
+  if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos());
 }
 
 
diff --git a/src/codegen.h b/src/codegen.h
index d03f4b6..1209f36 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -36,6 +36,8 @@
 // The contract  to the shared code is that the the CodeGenerator is a subclass
 // of Visitor and that the following methods are available publicly:
 //   MakeCode
+//   MakeCodePrologue
+//   MakeCodeEpilogue
 //   SetFunctionInfo
 //   masm
 //   frame
@@ -46,6 +48,7 @@
 //   AddDeferred
 //   in_spilled_code
 //   set_in_spilled_code
+//   RecordPositions
 //
 // These methods are either used privately by the shared code or implemented as
 // shared code:
@@ -61,6 +64,7 @@
 //   FindInlineRuntimeLUT
 //   CheckForInlineRuntimeCall
 //   PatchInlineRuntimeEntry
+//   AnalyzeCondition
 //   CodeForFunctionPosition
 //   CodeForReturnPosition
 //   CodeForStatementPosition
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 8dd9ec1..5427367 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -43,20 +43,22 @@
 static const int kEvalContextualGenerations = 1;
 static const int kRegExpGenerations = 1;
 #else
+// The number of ScriptGenerations is carefully chosen based on histograms.
+// See issue 458: http://code.google.com/p/v8/issues/detail?id=458
 static const int kScriptGenerations = 5;
 static const int kEvalGlobalGenerations = 2;
 static const int kEvalContextualGenerations = 2;
 static const int kRegExpGenerations = 2;
 #endif
 
-// Initial of each compilation cache table allocated.
+// Initial size of each compilation cache table allocated.
 static const int kInitialCacheSize = 64;
 
 // The compilation cache consists of several generational sub-caches which uses
 // this class as a base class. A sub-cache contains a compilation cache tables
-// for each generation of the sub-cache. As the same source code string has
-// different compiled code for scripts and evals. Internally, we use separate
-// sub-caches to avoid getting the wrong kind of result when looking up.
+// for each generation of the sub-cache. Since the same source code string has
+// different compiled code for scripts and evals, we use separate sub-caches
+// for different compilation modes, to avoid retrieving the wrong result.
 class CompilationSubCache {
  public:
   explicit CompilationSubCache(int generations): generations_(generations) {
diff --git a/src/compiler.cc b/src/compiler.cc
index 6ba7a9a..2e55683 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -32,6 +32,7 @@
 #include "compilation-cache.h"
 #include "compiler.h"
 #include "debug.h"
+#include "fast-codegen.h"
 #include "oprofile-agent.h"
 #include "rewriter.h"
 #include "scopes.h"
@@ -40,6 +41,29 @@
 namespace v8 {
 namespace internal {
 
+
+class CodeGenSelector: public AstVisitor {
+ public:
+  enum CodeGenTag { NORMAL, FAST };
+
+  CodeGenSelector() : has_supported_syntax_(true) {}
+
+  CodeGenTag Select(FunctionLiteral* fun);
+
+ private:
+  void VisitStatements(ZoneList<Statement*>* stmts);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  bool has_supported_syntax_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeGenSelector);
+};
+
+
 static Handle<Code> MakeCode(FunctionLiteral* literal,
                              Handle<Script> script,
                              Handle<Context> context,
@@ -79,8 +103,15 @@
   }
 
   // Generate code and return it.
-  Handle<Code> result = CodeGenerator::MakeCode(literal, script, is_eval);
-  return result;
+  if (FLAG_fast_compiler) {
+    CodeGenSelector selector;
+    CodeGenSelector::CodeGenTag code_gen = selector.Select(literal);
+    if (code_gen == CodeGenSelector::FAST) {
+      return FastCodeGenerator::MakeCode(literal, script);
+    }
+    ASSERT(code_gen == CodeGenSelector::NORMAL);
+  }
+  return CodeGenerator::MakeCode(literal, script, is_eval);
 }
 
 
@@ -197,7 +228,6 @@
   Handle<JSFunction> fun =
       Factory::NewFunctionBoilerplate(lit->name(),
                                       lit->materialized_literal_count(),
-                                      lit->contains_array_literal(),
                                       code);
 
   ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
@@ -417,4 +447,271 @@
 }
 
 
+CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) {
+  Scope* scope = fun->scope();
+
+  if (!scope->is_global_scope()) return NORMAL;
+  ASSERT(scope->num_heap_slots() == 0);
+  ASSERT(scope->arguments() == NULL);
+
+  if (!scope->declarations()->is_empty()) return NORMAL;
+  if (fun->materialized_literal_count() > 0) return NORMAL;
+  if (fun->body()->is_empty()) return NORMAL;
+
+  has_supported_syntax_ = true;
+  VisitStatements(fun->body());
+  return has_supported_syntax_ ? FAST : NORMAL;
+}
+
+
+#define BAILOUT(reason)                         \
+  do {                                          \
+    if (FLAG_trace_bailout) {                   \
+      PrintF("%s\n", reason);                   \
+    }                                           \
+    has_supported_syntax_ = false;              \
+    return;                                     \
+  } while (false)
+
+
+#define CHECK_BAILOUT                           \
+  do {                                          \
+    if (!has_supported_syntax_) return;         \
+  } while (false)
+
+
+void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
+  for (int i = 0, len = stmts->length(); i < len; i++) {
+    CHECK_BAILOUT;
+    Visit(stmts->at(i));
+  }
+}
+
+
+void CodeGenSelector::VisitDeclaration(Declaration* decl) {
+  BAILOUT("Declaration");
+}
+
+
+void CodeGenSelector::VisitBlock(Block* stmt) {
+  BAILOUT("Block");
+}
+
+
+void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Expression* expr = stmt->expression();
+  Visit(expr);
+  CHECK_BAILOUT;
+  expr->set_location(Location::Nowhere());
+}
+
+
+void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) {
+  BAILOUT("EmptyStatement");
+}
+
+
+void CodeGenSelector::VisitIfStatement(IfStatement* stmt) {
+  BAILOUT("IfStatement");
+}
+
+
+void CodeGenSelector::VisitContinueStatement(ContinueStatement* stmt) {
+  BAILOUT("ContinueStatement");
+}
+
+
+void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) {
+  BAILOUT("BreakStatement");
+}
+
+
+void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void CodeGenSelector::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  BAILOUT("WithEnterStatement");
+}
+
+
+void CodeGenSelector::VisitWithExitStatement(WithExitStatement* stmt) {
+  BAILOUT("WithExitStatement");
+}
+
+
+void CodeGenSelector::VisitSwitchStatement(SwitchStatement* stmt) {
+  BAILOUT("SwitchStatement");
+}
+
+
+void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  BAILOUT("DoWhileStatement");
+}
+
+
+void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) {
+  BAILOUT("WhileStatement");
+}
+
+
+void CodeGenSelector::VisitForStatement(ForStatement* stmt) {
+  BAILOUT("ForStatement");
+}
+
+
+void CodeGenSelector::VisitForInStatement(ForInStatement* stmt) {
+  BAILOUT("ForInStatement");
+}
+
+
+void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  BAILOUT("TryCatchStatement");
+}
+
+
+void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  BAILOUT("TryFinallyStatement");
+}
+
+
+void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  BAILOUT("DebuggerStatement");
+}
+
+
+void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) {
+  BAILOUT("FunctionLiteral");
+}
+
+
+void CodeGenSelector::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* expr) {
+  BAILOUT("FunctionBoilerplateLiteral");
+}
+
+
+void CodeGenSelector::VisitConditional(Conditional* expr) {
+  BAILOUT("Conditional");
+}
+
+
+void CodeGenSelector::VisitSlot(Slot* expr) {
+  Slot::Type type = expr->type();
+  if (type != Slot::PARAMETER && type != Slot::LOCAL) {
+    BAILOUT("non-parameter/non-local slot reference");
+  }
+}
+
+
+void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) {
+  Expression* rewrite = expr->var()->rewrite();
+  if (rewrite == NULL) BAILOUT("global variable reference");
+  Visit(rewrite);
+}
+
+
+void CodeGenSelector::VisitLiteral(Literal* expr) {
+  // All literals are supported.
+}
+
+
+void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) {
+  BAILOUT("RegExpLiteral");
+}
+
+
+void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) {
+  BAILOUT("ObjectLiteral");
+}
+
+
+void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
+  BAILOUT("ArrayLiteral");
+}
+
+
+void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+  BAILOUT("CatchExtensionObject");
+}
+
+
+void CodeGenSelector::VisitAssignment(Assignment* expr) {
+  // We support plain non-compound assignments to parameters and
+  // non-context (stack-allocated) locals.
+  if (expr->starts_initialization_block()) BAILOUT("initialization block");
+
+  Token::Value op = expr->op();
+  if (op == Token::INIT_CONST) BAILOUT("initialize constant");
+  if (op != Token::ASSIGN && op != Token::INIT_VAR) {
+    BAILOUT("compound assignment");
+  }
+
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  if (var == NULL || var->is_global()) BAILOUT("non-variable assignment");
+
+  ASSERT(var->slot() != NULL);
+  Slot::Type type = var->slot()->type();
+  if (type != Slot::PARAMETER && type != Slot::LOCAL) {
+    BAILOUT("non-parameter/non-local slot assignment");
+  }
+
+  Visit(expr->value());
+}
+
+
+void CodeGenSelector::VisitThrow(Throw* expr) {
+  BAILOUT("Throw");
+}
+
+
+void CodeGenSelector::VisitProperty(Property* expr) {
+  BAILOUT("Property");
+}
+
+
+void CodeGenSelector::VisitCall(Call* expr) {
+  BAILOUT("Call");
+}
+
+
+void CodeGenSelector::VisitCallNew(CallNew* expr) {
+  BAILOUT("CallNew");
+}
+
+
+void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) {
+  BAILOUT("CallRuntime");
+}
+
+
+void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
+  BAILOUT("UnaryOperation");
+}
+
+
+void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
+  BAILOUT("CountOperation");
+}
+
+
+void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) {
+  BAILOUT("BinaryOperation");
+}
+
+
+void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) {
+  BAILOUT("CompareOperation");
+}
+
+
+void CodeGenSelector::VisitThisFunction(ThisFunction* expr) {
+  BAILOUT("ThisFunction");
+}
+
+#undef BAILOUT
+#undef CHECK_BAILOUT
+
+
 } }  // namespace v8::internal
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index fe130ce..2535ce0 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -311,7 +311,7 @@
                                int read_timeout,
                                int total_timeout) {
   Handle<String> accumulator = String::Empty();
-  const char* source = "function(a, b) { return a + b; }";
+  const char* source = "(function(a, b) { return a + b; })";
   Handle<Value> cons_as_obj(Script::Compile(String::New(source))->Run());
   Handle<Function> cons_function(Function::Cast(*cons_as_obj));
   Handle<Value> cons_args[2];
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index 3d4161d..d5921d5 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -28,6 +28,8 @@
 #ifndef V8_DATEPARSER_INL_H_
 #define V8_DATEPARSER_INL_H_
 
+#include "dateparser.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/debug-delay.js b/src/debug-delay.js
index cb789be..d9447bd 100644
--- a/src/debug-delay.js
+++ b/src/debug-delay.js
@@ -795,8 +795,8 @@
   return this.selected_frame;
 };
 
-ExecutionState.prototype.debugCommandProcessor = function(protocol) {
-  return new DebugCommandProcessor(this, protocol);
+ExecutionState.prototype.debugCommandProcessor = function(opt_is_running) {
+  return new DebugCommandProcessor(this, opt_is_running);
 };
 
 
@@ -1081,9 +1081,9 @@
 };
 
 
-function DebugCommandProcessor(exec_state) {
+function DebugCommandProcessor(exec_state, opt_is_running) {
   this.exec_state_ = exec_state;
-  this.running_ = false;
+  this.running_ = opt_is_running || false;
 };
 
 
@@ -1107,7 +1107,8 @@
     this.type = 'event';
   }
   this.success = true;
-  this.running = false;
+  // Handler may set this field to control debugger state.
+  this.running = undefined;
 }
 
 
@@ -1168,11 +1169,7 @@
   if (this.message) {
     json.message = this.message;
   }
-  if (this.running) {
-    json.running = true;
-  } else {
-    json.running = false;
-  }
+  json.running = this.running;
   return JSON.stringify(json);
 }
 
@@ -1244,6 +1241,8 @@
         this.scriptsRequest_(request, response);
       } else if (request.command == 'threads') {
         this.threadsRequest_(request, response);
+      } else if (request.command == 'suspend') {
+        this.suspendRequest_(request, response);
       } else {
         throw new Error('Unknown command "' + request.command + '" in request');
       }
@@ -1258,7 +1257,11 @@
 
     // Return the response as a JSON encoded string.
     try {
-      this.running_ = response.running;  // Store the running state.
+      if (!IS_UNDEFINED(response.running)) {
+        // Response controls running state.
+        this.running_ = response.running;
+      }
+      response.running = this.running_; 
       return response.toJSONProtocol();
     } catch (e) {
       // Failed to generate response - return generic error.
@@ -1907,6 +1910,12 @@
 };
 
 
+DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
+  // TODO(peter.rybin): probably we need some body field here.
+  response.running = false;
+};
+
+
 // Check whether the previously processed command caused the VM to become
 // running.
 DebugCommandProcessor.prototype.isRunning = function() {
diff --git a/src/debug.cc b/src/debug.cc
index 4f89f92..d3a6b5b 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -1614,7 +1614,7 @@
     if (RelocInfo::IsJSReturn(it.rinfo()->rmode())) {
       at_js_return = (it.rinfo()->pc() ==
           addr - Assembler::kPatchReturnSequenceAddressOffset);
-      break_at_js_return_active = it.rinfo()->IsCallInstruction();
+      break_at_js_return_active = it.rinfo()->IsPatchedReturnSequence();
     }
     it.next();
   }
@@ -2212,21 +2212,31 @@
     return;
   }
 
-  // Get the DebugCommandProcessor.
-  v8::Local<v8::Object> api_exec_state =
-      v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
-  v8::Local<v8::String> fun_name =
-      v8::String::New("debugCommandProcessor");
-  v8::Local<v8::Function> fun =
-      v8::Function::Cast(*api_exec_state->Get(fun_name));
   v8::TryCatch try_catch;
-  v8::Local<v8::Object> cmd_processor =
-      v8::Object::Cast(*fun->Call(api_exec_state, 0, NULL));
-  if (try_catch.HasCaught()) {
-    PrintLn(try_catch.Exception());
-    return;
+
+  // DebugCommandProcessor goes here.
+  v8::Local<v8::Object> cmd_processor;
+  {
+    v8::Local<v8::Object> api_exec_state =
+        v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state));
+    v8::Local<v8::String> fun_name =
+        v8::String::New("debugCommandProcessor");
+    v8::Local<v8::Function> fun =
+        v8::Function::Cast(*api_exec_state->Get(fun_name));
+
+    v8::Handle<v8::Boolean> running =
+        auto_continue ? v8::True() : v8::False();
+    static const int kArgc = 1;
+    v8::Handle<Value> argv[kArgc] = { running };
+    cmd_processor = v8::Object::Cast(*fun->Call(api_exec_state, kArgc, argv));
+    if (try_catch.HasCaught()) {
+      PrintLn(try_catch.Exception());
+      return;
+    }
   }
 
+  bool running = auto_continue;
+
   // Process requests from the debugger.
   while (true) {
     // Wait for new command in the queue.
@@ -2267,7 +2277,6 @@
 
     // Get the response.
     v8::Local<v8::String> response;
-    bool running = false;
     if (!try_catch.HasCaught()) {
       // Get response string.
       if (!response_val->IsUndefined()) {
@@ -2310,7 +2319,7 @@
     // Return from debug event processing if either the VM is put into the
     // runnning state (through a continue command) or auto continue is active
     // and there are no more commands queued.
-    if (running || (auto_continue && !HasCommands())) {
+    if (running && !HasCommands()) {
       return;
     }
   }
diff --git a/src/execution.cc b/src/execution.cc
index 8bc6b74..229b8df 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -96,8 +96,11 @@
     JSEntryFunction entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
 
     // Call the function through the right JS entry stub.
-    value = CALL_GENERATED_CODE(entry, func->code()->entry(), *func,
-                                *receiver, argc, args);
+    byte* entry_address= func->code()->entry();
+    JSFunction* function = *func;
+    Object* receiver_pointer = *receiver;
+    value = CALL_GENERATED_CODE(entry, entry_address, function,
+                                receiver_pointer, argc, args);
   }
 
 #ifdef DEBUG
@@ -383,7 +386,8 @@
   if (initial_climit_ == kIllegalLimit) {
     // Takes the address of the limit variable in order to find out where
     // the top of stack is right now.
-    intptr_t limit = reinterpret_cast<intptr_t>(&limit) - kLimitSize;
+    uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
+    ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
     initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
     jslimit_ = SimulatorStack::JsLimitFromCLimit(limit);
     initial_climit_ = limit;
diff --git a/src/execution.h b/src/execution.h
index 55307f7..ac00aa4 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -216,6 +216,7 @@
   static void DisableInterrupts();
 
   static const uintptr_t kLimitSize = kPointerSize * 128 * KB;
+
 #ifdef V8_TARGET_ARCH_X64
   static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
   static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
diff --git a/src/factory.cc b/src/factory.cc
index 622055c..5251e34 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -477,7 +477,6 @@
 
 Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name,
                                                    int number_of_literals,
-                                                   bool contains_array_literal,
                                                    Handle<Code> code) {
   Handle<JSFunction> function = NewFunctionBoilerplate(name);
   function->set_code(*code);
@@ -485,7 +484,7 @@
   // If the function contains object, regexp or array literals,
   // allocate extra space for a literals array prefix containing the
   // object, regexp and array constructor functions.
-  if (number_of_literals > 0 || contains_array_literal) {
+  if (number_of_literals > 0) {
     literals_array_size += JSFunction::kLiteralsPrefixSize;
   }
   Handle<FixedArray> literals =
diff --git a/src/factory.h b/src/factory.h
index 0596fbf..7223f08 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -264,7 +264,6 @@
 
   static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name,
                                                    int number_of_literals,
-                                                   bool contains_array_literal,
                                                    Handle<Code> code);
 
   static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name);
diff --git a/src/fast-codegen.cc b/src/fast-codegen.cc
new file mode 100644
index 0000000..4ec6a52
--- /dev/null
+++ b/src/fast-codegen.cc
@@ -0,0 +1,269 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun,
+                                         Handle<Script> script) {
+  CodeGenerator::MakeCodePrologue(fun);
+  const int kInitialBufferSize = 4 * KB;
+  MacroAssembler masm(NULL, kInitialBufferSize);
+  FastCodeGenerator cgen(&masm);
+  cgen.Generate(fun);
+  if (cgen.HasStackOverflow()) {
+    ASSERT(!Top::has_pending_exception());
+    return Handle<Code>::null();
+  }
+  Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, NOT_IN_LOOP);
+  return CodeGenerator::MakeCodeEpilogue(fun, &masm, flags, script);
+}
+
+
+int FastCodeGenerator::SlotOffset(Slot* slot) {
+  // Offset is negative because higher indexes are at lower addresses.
+  int offset = -slot->index() * kPointerSize;
+  // Adjust by a (parameter or local) base offset.
+  switch (slot->type()) {
+    case Slot::PARAMETER:
+      offset += (function_->scope()->num_parameters() + 1) * kPointerSize;
+      break;
+    case Slot::LOCAL:
+      offset += JavaScriptFrameConstants::kLocal0Offset;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  return offset;
+}
+
+void FastCodeGenerator::SetFunctionPosition(FunctionLiteral* fun) {
+  if (FLAG_debug_info) {
+    CodeGenerator::RecordPositions(masm_, fun->start_position());
+  }
+}
+
+
+void FastCodeGenerator::SetReturnPosition(FunctionLiteral* fun) {
+  if (FLAG_debug_info) {
+    CodeGenerator::RecordPositions(masm_, fun->end_position());
+  }
+}
+
+
+void FastCodeGenerator::SetStatementPosition(Statement* stmt) {
+  if (FLAG_debug_info) {
+    CodeGenerator::RecordPositions(masm_, stmt->statement_pos());
+  }
+}
+
+
+void FastCodeGenerator::SetSourcePosition(int pos) {
+  if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
+    masm_->RecordPosition(pos);
+  }
+}
+
+
+void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitBlock(Block* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitWithExitStatement(WithExitStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitForStatement(ForStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitConditional(Conditional* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitSlot(Slot* expr) {
+  // Slots do not appear directly in the AST.
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitThrow(Throw* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitProperty(Property* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCall(Call* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCallNew(CallNew* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+  UNREACHABLE();
+}
+
+
+void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  UNREACHABLE();
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/fast-codegen.h b/src/fast-codegen.h
new file mode 100644
index 0000000..e6bb643
--- /dev/null
+++ b/src/fast-codegen.h
@@ -0,0 +1,71 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FAST_CODEGEN_H_
+#define V8_FAST_CODEGEN_H_
+
+#include "v8.h"
+
+#include "ast.h"
+
+namespace v8 {
+namespace internal {
+
+
+class FastCodeGenerator: public AstVisitor {
+ public:
+  explicit FastCodeGenerator(MacroAssembler* masm)
+      : masm_(masm), function_(NULL) {
+  }
+
+  static Handle<Code> MakeCode(FunctionLiteral* fun, Handle<Script> script);
+
+  void Generate(FunctionLiteral* fun);
+
+ private:
+  int SlotOffset(Slot* slot);
+
+  void SetFunctionPosition(FunctionLiteral* fun);
+  void SetReturnPosition(FunctionLiteral* fun);
+  void SetStatementPosition(Statement* stmt);
+  void SetSourcePosition(int pos);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  MacroAssembler* masm_;
+  FunctionLiteral* function_;
+
+  DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_FAST_CODEGEN_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 91c5bca..2a964ab 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -96,7 +96,7 @@
 //
 #define FLAG FLAG_FULL
 
-// assembler-ia32.cc / assembler-arm.cc
+// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
 DEFINE_bool(debug_code, false,
             "generate extra code (comments, assertions) for debugging")
 DEFINE_bool(emit_branch_hints, false, "emit branch hints")
@@ -104,6 +104,16 @@
             "eliminate redundant push/pops in assembly code")
 DEFINE_bool(print_push_pop_elimination, false,
             "print elimination of redundant push/pops in assembly code")
+DEFINE_bool(enable_sse2, true,
+            "enable use of SSE2 instructions if available")
+DEFINE_bool(enable_sse3, true,
+            "enable use of SSE3 instructions if available")
+DEFINE_bool(enable_cmov, true,
+            "enable use of CMOV instruction if available")
+DEFINE_bool(enable_rdtsc, true,
+            "enable use of RDTSC instruction if available")
+DEFINE_bool(enable_sahf, true,
+            "enable use of SAHF instruction if available (X64 only)")
 
 // bootstrapper.cc
 DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@@ -132,7 +142,11 @@
 // compiler.cc
 DEFINE_bool(strict, false, "strict error checking")
 DEFINE_int(min_preparse_length, 1024,
-           "Minimum length for automatic enable preparsing")
+           "minimum length for automatic enable preparsing")
+DEFINE_bool(fast_compiler, true,
+            "use the fast-mode compiler for some top-level code")
+DEFINE_bool(trace_bailout, false,
+            "print reasons for failing to use fast compilation")
 
 // compilation-cache.cc
 DEFINE_bool(compilation_cache, true, "enable compilation cache")
@@ -263,6 +277,9 @@
             "pretty print source code for builtins")
 DEFINE_bool(print_ast, false, "print source AST")
 DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
+DEFINE_bool(print_json_ast, false, "print source AST as JSON")
+DEFINE_bool(print_builtin_json_ast, false,
+            "print source AST for builtins as JSON")
 DEFINE_bool(trace_calls, false, "trace calls")
 DEFINE_bool(trace_builtin_calls, false, "trace builtins calls")
 DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
@@ -333,6 +350,7 @@
 DEFINE_bool(log_handles, false, "Log global handle events.")
 DEFINE_bool(log_state_changes, false, "Log state changes.")
 DEFINE_bool(log_suspect, false, "Log suspect operations.")
+DEFINE_bool(log_producers, false, "Log stack traces of JS objects allocations.")
 DEFINE_bool(compress_log, false,
             "Compress log to save space (makes log less human-readable).")
 DEFINE_bool(prof, false,
diff --git a/src/global-handles.cc b/src/global-handles.cc
index e51c4aa..f4b69fc 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -264,6 +264,16 @@
 }
 
 
+void GlobalHandles::IterateWeakRoots(WeakReferenceGuest f,
+                                     WeakReferenceCallback callback) {
+  for (Node* current = head_; current != NULL; current = current->next()) {
+    if (current->IsWeak() && current->callback() == callback) {
+      f(current->object_, current->parameter());
+    }
+  }
+}
+
+
 void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
   for (Node* current = head_; current != NULL; current = current->next()) {
     if (current->state_ == Node::WEAK) {
diff --git a/src/global-handles.h b/src/global-handles.h
index 9e63ba7..feb95bf 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -54,6 +54,8 @@
 };
 
 
+typedef void (*WeakReferenceGuest)(Object* object, void* parameter);
+
 class GlobalHandles : public AllStatic {
  public:
   // Creates a new global handle that is alive until Destroy is called.
@@ -99,6 +101,10 @@
   // Iterates over all weak roots in heap.
   static void IterateWeakRoots(ObjectVisitor* v);
 
+  // Iterates over weak roots that are bound to a given callback.
+  static void IterateWeakRoots(WeakReferenceGuest f,
+                               WeakReferenceCallback callback);
+
   // Find all weak handles satisfying the callback predicate, mark
   // them as pending.
   static void IdentifyWeakHandles(WeakSlotCallback f);
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index ecb6919..8f55ce1 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -28,6 +28,8 @@
 #include "v8.h"
 
 #include "heap-profiler.h"
+#include "frames-inl.h"
+#include "global-handles.h"
 #include "string-stream.h"
 
 namespace v8 {
@@ -327,6 +329,11 @@
 }
 
 
+static const char* GetConstructorName(const char* name) {
+  return name[0] != '\0' ? name : "(anonymous)";
+}
+
+
 void JSObjectsCluster::Print(StringStream* accumulator) const {
   ASSERT(!is_null());
   if (constructor_ == FromSpecialCase(ROOTS)) {
@@ -338,7 +345,7 @@
   } else {
     SmartPointer<char> s_name(
         constructor_->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
-    accumulator->Add("%s", (*s_name)[0] != '\0' ? *s_name : "(anonymous)");
+    accumulator->Add("%s", GetConstructorName(*s_name));
     if (instance_ != NULL) {
       accumulator->Add(":%p", static_cast<void*>(instance_));
     }
@@ -574,6 +581,23 @@
 }
 
 
+static void StackWeakReferenceCallback(Persistent<Value> object,
+                                       void* trace) {
+  DeleteArray(static_cast<Address*>(trace));
+  object.Dispose();
+}
+
+
+static void PrintProducerStackTrace(Object* obj, void* trace) {
+  if (!obj->IsJSObject()) return;
+  String* constructor = JSObject::cast(obj)->constructor_name();
+  SmartPointer<char> s_name(
+      constructor->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL));
+  LOG(HeapSampleJSProducerEvent(GetConstructorName(*s_name),
+                                reinterpret_cast<Address*>(trace)));
+}
+
+
 void HeapProfiler::WriteSample() {
   LOG(HeapSampleBeginEvent("Heap", "allocated"));
   LOG(HeapSampleStats(
@@ -616,10 +640,40 @@
   js_cons_profile.PrintStats();
   js_retainer_profile.PrintStats();
 
+  GlobalHandles::IterateWeakRoots(PrintProducerStackTrace,
+                                  StackWeakReferenceCallback);
+
   LOG(HeapSampleEndEvent("Heap", "allocated"));
 }
 
 
+bool ProducerHeapProfile::can_log_ = false;
+
+void ProducerHeapProfile::Setup() {
+  can_log_ = true;
+}
+
+void ProducerHeapProfile::RecordJSObjectAllocation(Object* obj) {
+  if (!can_log_ || !FLAG_log_producers) return;
+  int framesCount = 0;
+  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+    ++framesCount;
+  }
+  if (framesCount == 0) return;
+  ++framesCount;  // Reserve place for the terminator item.
+  Vector<Address> stack(NewArray<Address>(framesCount), framesCount);
+  int i = 0;
+  for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
+    stack[i++] = it.frame()->pc();
+  }
+  stack[i] = NULL;
+  Handle<Object> handle = GlobalHandles::Create(obj);
+  GlobalHandles::MakeWeak(handle.location(),
+                          static_cast<void*>(stack.start()),
+                          StackWeakReferenceCallback);
+}
+
+
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 
diff --git a/src/heap-profiler.h b/src/heap-profiler.h
index 7fda883..bd875df 100644
--- a/src/heap-profiler.h
+++ b/src/heap-profiler.h
@@ -256,6 +256,14 @@
 };
 
 
+class ProducerHeapProfile : public AllStatic {
+ public:
+  static void Setup();
+  static void RecordJSObjectAllocation(Object* obj);
+ private:
+  static bool can_log_;
+};
+
 #endif  // ENABLE_LOGGING_AND_PROFILING
 
 } }  // namespace v8::internal
diff --git a/src/heap.cc b/src/heap.cc
index 0295a99..817a50f 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -1679,8 +1679,8 @@
       && second->IsAsciiRepresentation();
 
   // Make sure that an out of memory exception is thrown if the length
-  // of the new cons string is too large to fit in a Smi.
-  if (length > Smi::kMaxValue || length < -0) {
+  // of the new cons string is too large.
+  if (length > String::kMaxLength || length < 0) {
     Top::context()->mark_out_of_memory();
     return Failure::OutOfMemoryException();
   }
@@ -2021,6 +2021,7 @@
                                TargetSpaceId(map->instance_type()));
   if (result->IsFailure()) return result;
   HeapObject::cast(result)->set_map(map);
+  ProducerHeapProfile::RecordJSObjectAllocation(result);
   return result;
 }
 
@@ -2342,6 +2343,7 @@
     JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
   }
   // Return the new clone.
+  ProducerHeapProfile::RecordJSObjectAllocation(clone);
   return clone;
 }
 
@@ -3308,6 +3310,9 @@
   LOG(IntEvent("heap-capacity", Capacity()));
   LOG(IntEvent("heap-available", Available()));
 
+  // This should be called only after initial objects have been created.
+  ProducerHeapProfile::Setup();
+
   return true;
 }
 
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 1de20f4..5fa75ec 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -52,7 +52,7 @@
   if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
     int32_t* p = reinterpret_cast<int32_t*>(pc_);
     *p -= delta;  // relocate entry
-  } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+  } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
     // Special handling of js_return when a break point is set (call
     // instruction has been inserted).
     int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
@@ -114,36 +114,36 @@
 
 
 Address RelocInfo::call_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return Assembler::target_address_at(pc_ + 1);
 }
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   Assembler::set_target_address_at(pc_ + 1, target);
 }
 
 
 Object* RelocInfo::call_object() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return *call_object_address();
 }
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return reinterpret_cast<Object**>(pc_ + 1);
 }
 
 
 void RelocInfo::set_call_object(Object* target) {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   *call_object_address() = target;
 }
 
 
-bool RelocInfo::IsCallInstruction() {
+bool RelocInfo::IsPatchedReturnSequence() {
   return *pc_ == 0xE8;
 }
 
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index b8dda17..bc28710 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -1166,6 +1166,19 @@
 }
 
 
+void Assembler::subb(const Operand& op, int8_t imm8) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (op.is_reg(eax)) {
+    EMIT(0x2c);
+  } else {
+    EMIT(0x80);
+    emit_operand(ebp, op);  // ebp == 5
+  }
+  EMIT(imm8);
+}
+
+
 void Assembler::sub(const Operand& dst, const Immediate& x) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 610017b..4719f2d 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -367,6 +367,10 @@
   static void Probe();
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(Feature f) {
+    if (f == SSE2 && !FLAG_enable_sse2) return false;
+    if (f == SSE3 && !FLAG_enable_sse3) return false;
+    if (f == CMOV && !FLAG_enable_cmov) return false;
+    if (f == RDTSC && !FLAG_enable_rdtsc) return false;
     return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
   }
   // Check whether a feature is currently enabled.
@@ -590,6 +594,7 @@
   void shr(Register dst);
   void shr_cl(Register dst);
 
+  void subb(const Operand& dst, int8_t imm8);
   void sub(const Operand& dst, const Immediate& x);
   void sub(Register dst, const Operand& src);
   void sub(const Operand& dst, Register src);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 0e314b9..938c8e6 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -824,10 +824,8 @@
 
 
 void DeferredInlineBinaryOperation::Generate() {
-  __ push(left_);
-  __ push(right_);
-  GenericBinaryOpStub stub(op_, mode_, SMI_CODE_INLINED);
-  __ CallStub(&stub);
+  GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
+  stub.GenerateCall(masm_, left_, right_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -856,16 +854,16 @@
       // Bit operations always assume they likely operate on Smis. Still only
       // generate the inline Smi check code if this operation is part of a loop.
       flags = (loop_nesting() > 0)
-              ? SMI_CODE_INLINED
-              : SMI_CODE_IN_STUB;
+              ? NO_SMI_CODE_IN_STUB
+              : NO_GENERIC_BINARY_FLAGS;
       break;
 
     default:
       // By default only inline the Smi check code for likely smis if this
       // operation is part of a loop.
       flags = ((loop_nesting() > 0) && type->IsLikelySmi())
-              ? SMI_CODE_INLINED
-              : SMI_CODE_IN_STUB;
+              ? NO_SMI_CODE_IN_STUB
+              : NO_GENERIC_BINARY_FLAGS;
       break;
   }
 
@@ -924,16 +922,15 @@
     return;
   }
 
-  if (flags == SMI_CODE_INLINED && !generate_no_smi_code) {
+  if (((flags & NO_SMI_CODE_IN_STUB) != 0) && !generate_no_smi_code) {
     LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
   } else {
     frame_->Push(&left);
     frame_->Push(&right);
     // If we know the arguments aren't smis, use the binary operation stub
     // that does not check for the fast smi case.
-    // The same stub is used for NO_SMI_CODE and SMI_CODE_INLINED.
     if (generate_no_smi_code) {
-      flags = SMI_CODE_INLINED;
+      flags = NO_SMI_CODE_IN_STUB;
     }
     GenericBinaryOpStub stub(op, overwrite_mode, flags);
     Result answer = frame_->CallStub(&stub, 2);
@@ -1376,14 +1373,12 @@
 
 
 void DeferredInlineSmiOperation::Generate() {
-  __ push(src_);
-  __ push(Immediate(value_));
   // For mod we don't generate all the Smi code inline.
   GenericBinaryOpStub stub(
       op_,
       overwrite_mode_,
-      (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
-  __ CallStub(&stub);
+      (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
+  stub.GenerateCall(masm_, src_, value_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -1417,10 +1412,8 @@
 
 
 void DeferredInlineSmiOperationReversed::Generate() {
-  __ push(Immediate(value_));
-  __ push(src_);
-  GenericBinaryOpStub igostub(op_, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(op_, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, value_, src_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -1449,10 +1442,8 @@
 void DeferredInlineSmiAdd::Generate() {
   // Undo the optimistic add operation and call the shared stub.
   __ sub(Operand(dst_), Immediate(value_));
-  __ push(dst_);
-  __ push(Immediate(value_));
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, dst_, value_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -1481,10 +1472,8 @@
 void DeferredInlineSmiAddReversed::Generate() {
   // Undo the optimistic add operation and call the shared stub.
   __ sub(Operand(dst_), Immediate(value_));
-  __ push(Immediate(value_));
-  __ push(dst_);
-  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, value_, dst_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -1514,10 +1503,8 @@
 void DeferredInlineSmiSub::Generate() {
   // Undo the optimistic sub operation and call the shared stub.
   __ add(Operand(dst_), Immediate(value_));
-  __ push(dst_);
-  __ push(Immediate(value_));
-  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
-  __ CallStub(&igostub);
+  GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
+  igostub.GenerateCall(masm_, dst_, value_);
   if (!dst_.is(eax)) __ mov(dst_, eax);
 }
 
@@ -2711,288 +2698,332 @@
 }
 
 
-void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
   ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ LoopStatement");
+  Comment cmnt(masm_, "[ DoWhileStatement");
   CodeForStatementPosition(node);
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  JumpTarget body(JumpTarget::BIDIRECTIONAL);
+  IncrementLoopNesting();
 
-  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
-  // known result for the test expression, with no side effects.
-  enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
-  if (node->cond() == NULL) {
-    ASSERT(node->type() == LoopStatement::FOR_LOOP);
-    info = ALWAYS_TRUE;
-  } else {
-    Literal* lit = node->cond()->AsLiteral();
-    if (lit != NULL) {
-      if (lit->IsTrue()) {
-        info = ALWAYS_TRUE;
-      } else if (lit->IsFalse()) {
-        info = ALWAYS_FALSE;
-      }
-    }
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  // Label the top of the loop for the backward jump if necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // Use the continue target.
+      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->Bind();
+      break;
+    case ALWAYS_FALSE:
+      // No need to label it.
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      break;
+    case DONT_KNOW:
+      // Continue is the test, so use the backward body target.
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      body.Bind();
+      break;
   }
 
-  switch (node->type()) {
-    case LoopStatement::DO_LOOP: {
-      JumpTarget body(JumpTarget::BIDIRECTIONAL);
-      IncrementLoopNesting();
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
 
-      // Label the top of the loop for the backward jump if necessary.
-      if (info == ALWAYS_TRUE) {
-        // Use the continue target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else if (info == ALWAYS_FALSE) {
-        // No need to label it.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        // Continue is the test, so use the backward body target.
-        ASSERT(info == DONT_KNOW);
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        body.Bind();
+  // Compile the test.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // If control flow can fall off the end of the body, jump back to
+      // the top and bind the break target at the exit.
+      if (has_valid_frame()) {
+        node->continue_target()->Jump();
       }
-
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
-
-      // Compile the test.
-      if (info == ALWAYS_TRUE) {
-        // If control flow can fall off the end of the body, jump back
-        // to the top and bind the break target at the exit.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-
-      } else if (info == ALWAYS_FALSE) {
-        // We may have had continues or breaks in the body.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-
-      } else {
-        ASSERT(info == DONT_KNOW);
-        // We have to compile the test expression if it can be reached by
-        // control flow falling out of the body or via continue.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-      }
-      break;
-    }
-
-    case LoopStatement::WHILE_LOOP: {
-      // Do not duplicate conditions that may have function literal
-      // subexpressions.  This can cause us to compile the function
-      // literal twice.
-      bool test_at_bottom = !node->may_have_function_literal();
-
-      IncrementLoopNesting();
-
-      // If the condition is always false and has no side effects, we
-      // do not need to compile anything.
-      if (info == ALWAYS_FALSE) break;
-
-      JumpTarget body;
-      if (test_at_bottom) {
-        body.set_direction(JumpTarget::BIDIRECTIONAL);
-      }
-
-      // Based on the condition analysis, compile the test as necessary.
-      if (info == ALWAYS_TRUE) {
-        // We will not compile the test expression.  Label the top of
-        // the loop with the continue target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          // Continue is the test at the bottom, no need to label the
-          // test at the top.  The body is a backward target.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        } else {
-          // Label the test at the top as the continue target.  The
-          // body is a forward-only target.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        }
-        // Compile the test with the body as the true target and
-        // preferred fall-through and with the break target as the
-        // false target.
-        ControlDestination dest(&body, node->break_target(), true);
-        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-
-        if (dest.false_was_fall_through()) {
-          // If we got the break target as fall-through, the test may
-          // have been unconditionally false (if there are no jumps to
-          // the body).
-          if (!body.is_linked()) break;
-
-          // Otherwise, jump around the body on the fall through and
-          // then bind the body target.
-          node->break_target()->Unuse();
-          node->break_target()->Jump();
-          body.Bind();
-        }
-      }
-
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
-
-      // Based on the condition analysis, compile the backward jump as
-      // necessary.
-      if (info == ALWAYS_TRUE) {
-        // The loop body has been labeled with the continue target.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          // If we have chosen to recompile the test at the bottom,
-          // then it is the continue target.
-          if (node->continue_target()->is_linked()) {
-            node->continue_target()->Bind();
-          }
-          if (has_valid_frame()) {
-            // The break target is the fall-through (body is a backward
-            // jump from here and thus an invalid fall-through).
-            ControlDestination dest(&body, node->break_target(), false);
-            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-          }
-        } else {
-          // If we have chosen not to recompile the test at the
-          // bottom, jump back to the one at the top.
-          if (has_valid_frame()) {
-            node->continue_target()->Jump();
-          }
-        }
-      }
-
-      // The break target may be already bound (by the condition), or
-      // there may not be a valid frame.  Bind it only if needed.
       if (node->break_target()->is_linked()) {
         node->break_target()->Bind();
       }
       break;
-    }
-
-    case LoopStatement::FOR_LOOP: {
-      // Do not duplicate conditions that may have function literal
-      // subexpressions.  This can cause us to compile the function
-      // literal twice.
-      bool test_at_bottom = !node->may_have_function_literal();
-
-      // Compile the init expression if present.
-      if (node->init() != NULL) {
-        Visit(node->init());
+    case ALWAYS_FALSE:
+      // We may have had continues or breaks in the body.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
       }
-
-      IncrementLoopNesting();
-
-      // If the condition is always false and has no side effects, we
-      // do not need to compile anything else.
-      if (info == ALWAYS_FALSE) break;
-
-      // Target for backward edge if no test at the bottom, otherwise
-      // unused.
-      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-      // Target for backward edge if there is a test at the bottom,
-      // otherwise used as target for test at the top.
-      JumpTarget body;
-      if (test_at_bottom) {
-        body.set_direction(JumpTarget::BIDIRECTIONAL);
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
       }
-
-      // Based on the condition analysis, compile the test as necessary.
-      if (info == ALWAYS_TRUE) {
-        // We will not compile the test expression.  Label the top of
-        // the loop.
-        if (node->next() == NULL) {
-          // Use the continue target if there is no update expression.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        } else {
-          // Otherwise use the backward loop target.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-          loop.Bind();
-        }
-      } else {
-        ASSERT(info == DONT_KNOW);
-        if (test_at_bottom) {
-          // Continue is either the update expression or the test at
-          // the bottom, no need to label the test at the top.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        } else if (node->next() == NULL) {
-          // We are not recompiling the test at the bottom and there
-          // is no update expression.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        } else {
-          // We are not recompiling the test at the bottom and there
-          // is an update expression.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-          loop.Bind();
-        }
-
-        // Compile the test with the body as the true target and
-        // preferred fall-through and with the break target as the
-        // false target.
-        ControlDestination dest(&body, node->break_target(), true);
+      break;
+    case DONT_KNOW:
+      // We have to compile the test expression if it can be reached by
+      // control flow falling out of the body or via continue.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
+      }
+      if (has_valid_frame()) {
+        ControlDestination dest(&body, node->break_target(), false);
         LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-
-        if (dest.false_was_fall_through()) {
-          // If we got the break target as fall-through, the test may
-          // have been unconditionally false (if there are no jumps to
-          // the body).
-          if (!body.is_linked()) break;
-
-          // Otherwise, jump around the body on the fall through and
-          // then bind the body target.
-          node->break_target()->Unuse();
-          node->break_target()->Jump();
-          body.Bind();
-        }
       }
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
+      break;
+  }
 
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
+  DecrementLoopNesting();
+}
 
-      // If there is an update expression, compile it if necessary.
-      if (node->next() != NULL) {
+
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ WhileStatement");
+  CodeForStatementPosition(node);
+
+  // If the condition is always false and has no side effects, we do not
+  // need to compile anything.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  // Do not duplicate conditions that may have function literal
+  // subexpressions.  This can cause us to compile the function literal
+  // twice.
+  bool test_at_bottom = !node->may_have_function_literal();
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  IncrementLoopNesting();
+  JumpTarget body;
+  if (test_at_bottom) {
+    body.set_direction(JumpTarget::BIDIRECTIONAL);
+  }
+
+  // Based on the condition analysis, compile the test as necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // We will not compile the test expression.  Label the top of the
+      // loop with the continue target.
+      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->Bind();
+      break;
+    case DONT_KNOW: {
+      if (test_at_bottom) {
+        // Continue is the test at the bottom, no need to label the test
+        // at the top.  The body is a backward target.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else {
+        // Label the test at the top as the continue target.  The body
+        // is a forward-only target.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      }
+      // Compile the test with the body as the true target and preferred
+      // fall-through and with the break target as the false target.
+      ControlDestination dest(&body, node->break_target(), true);
+      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+      if (dest.false_was_fall_through()) {
+        // If we got the break target as fall-through, the test may have
+        // been unconditionally false (if there are no jumps to the
+        // body).
+        if (!body.is_linked()) {
+          DecrementLoopNesting();
+          return;
+        }
+
+        // Otherwise, jump around the body on the fall through and then
+        // bind the body target.
+        node->break_target()->Unuse();
+        node->break_target()->Jump();
+        body.Bind();
+      }
+      break;
+    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
+
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
+
+  // Based on the condition analysis, compile the backward jump as
+  // necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // The loop body has been labeled with the continue target.
+      if (has_valid_frame()) {
+        node->continue_target()->Jump();
+      }
+      break;
+    case DONT_KNOW:
+      if (test_at_bottom) {
+        // If we have chosen to recompile the test at the bottom, then
+        // it is the continue target.
         if (node->continue_target()->is_linked()) {
           node->continue_target()->Bind();
         }
-
-        // Control can reach the update by falling out of the body or
-        // by a continue.
         if (has_valid_frame()) {
-          // Record the source position of the statement as this code
-          // which is after the code for the body actually belongs to
-          // the loop statement and not the body.
-          CodeForStatementPosition(node);
-          Visit(node->next());
+          // The break target is the fall-through (body is a backward
+          // jump from here and thus an invalid fall-through).
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+      } else {
+        // If we have chosen not to recompile the test at the bottom,
+        // jump back to the one at the top.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
         }
       }
+      break;
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
 
-      // Based on the condition analysis, compile the backward jump as
-      // necessary.
-      if (info == ALWAYS_TRUE) {
+  // The break target may be already bound (by the condition), or there
+  // may not be a valid frame.  Bind it only if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  DecrementLoopNesting();
+}
+
+
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ForStatement");
+  CodeForStatementPosition(node);
+
+  // Compile the init expression if present.
+  if (node->init() != NULL) {
+    Visit(node->init());
+  }
+
+  // If the condition is always false and has no side effects, we do not
+  // need to compile anything else.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  // Do not duplicate conditions that may have function literal
+  // subexpressions.  This can cause us to compile the function literal
+  // twice.
+  bool test_at_bottom = !node->may_have_function_literal();
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  IncrementLoopNesting();
+
+  // Target for backward edge if no test at the bottom, otherwise
+  // unused.
+  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+  // Target for backward edge if there is a test at the bottom,
+  // otherwise used as target for test at the top.
+  JumpTarget body;
+  if (test_at_bottom) {
+    body.set_direction(JumpTarget::BIDIRECTIONAL);
+  }
+
+  // Based on the condition analysis, compile the test as necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // We will not compile the test expression.  Label the top of the
+      // loop.
+      if (node->next() == NULL) {
+        // Use the continue target if there is no update expression.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        // Otherwise use the backward loop target.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        loop.Bind();
+      }
+      break;
+    case DONT_KNOW: {
+      if (test_at_bottom) {
+        // Continue is either the update expression or the test at the
+        // bottom, no need to label the test at the top.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else if (node->next() == NULL) {
+        // We are not recompiling the test at the bottom and there is no
+        // update expression.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        // We are not recompiling the test at the bottom and there is an
+        // update expression.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        loop.Bind();
+      }
+      // Compile the test with the body as the true target and preferred
+      // fall-through and with the break target as the false target.
+      ControlDestination dest(&body, node->break_target(), true);
+      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+      if (dest.false_was_fall_through()) {
+        // If we got the break target as fall-through, the test may have
+        // been unconditionally false (if there are no jumps to the
+        // body).
+        if (!body.is_linked()) {
+          DecrementLoopNesting();
+          return;
+        }
+
+        // Otherwise, jump around the body on the fall through and then
+        // bind the body target.
+        node->break_target()->Unuse();
+        node->break_target()->Jump();
+        body.Bind();
+      }
+      break;
+    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
+
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
+
+  // If there is an update expression, compile it if necessary.
+  if (node->next() != NULL) {
+    if (node->continue_target()->is_linked()) {
+      node->continue_target()->Bind();
+    }
+
+    // Control can reach the update by falling out of the body or by a
+    // continue.
+    if (has_valid_frame()) {
+      // Record the source position of the statement as this code which
+      // is after the code for the body actually belongs to the loop
+      // statement and not the body.
+      CodeForStatementPosition(node);
+      Visit(node->next());
+    }
+  }
+
+  // Based on the condition analysis, compile the backward jump as
+  // necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      if (has_valid_frame()) {
+        if (node->next() == NULL) {
+          node->continue_target()->Jump();
+        } else {
+          loop.Jump();
+        }
+      }
+      break;
+    case DONT_KNOW:
+      if (test_at_bottom) {
+        if (node->continue_target()->is_linked()) {
+          // We can have dangling jumps to the continue target if there
+          // was no update expression.
+          node->continue_target()->Bind();
+        }
+        // Control can reach the test at the bottom by falling out of
+        // the body, by a continue in the body, or from the update
+        // expression.
+        if (has_valid_frame()) {
+          // The break target is the fall-through (body is a backward
+          // jump from here).
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+      } else {
+        // Otherwise, jump back to the test at the top.
         if (has_valid_frame()) {
           if (node->next() == NULL) {
             node->continue_target()->Jump();
@@ -3000,47 +3031,19 @@
             loop.Jump();
           }
         }
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          if (node->continue_target()->is_linked()) {
-            // We can have dangling jumps to the continue target if
-            // there was no update expression.
-            node->continue_target()->Bind();
-          }
-          // Control can reach the test at the bottom by falling out
-          // of the body, by a continue in the body, or from the
-          // update expression.
-          if (has_valid_frame()) {
-            // The break target is the fall-through (body is a
-            // backward jump from here).
-            ControlDestination dest(&body, node->break_target(), false);
-            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-          }
-        } else {
-          // Otherwise, jump back to the test at the top.
-          if (has_valid_frame()) {
-            if (node->next() == NULL) {
-              node->continue_target()->Jump();
-            } else {
-              loop.Jump();
-            }
-          }
-        }
-      }
-
-      // The break target may be already bound (by the condition), or
-      // there may not be a valid frame.  Bind it only if needed.
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
       }
       break;
-    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
   }
 
+  // The break target may be already bound (by the condition), or
+  // there may not be a valid frame.  Bind it only if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
   DecrementLoopNesting();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
 }
 
 
@@ -3234,10 +3237,10 @@
 }
 
 
-void CodeGenerator::VisitTryCatch(TryCatch* node) {
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
   ASSERT(!in_spilled_code());
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatch");
+  Comment cmnt(masm_, "[ TryCatchStatement");
   CodeForStatementPosition(node);
 
   JumpTarget try_block;
@@ -3370,10 +3373,10 @@
 }
 
 
-void CodeGenerator::VisitTryFinally(TryFinally* node) {
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
   ASSERT(!in_spilled_code());
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinally");
+  Comment cmnt(masm_, "[ TryFinallyStatement");
   CodeForStatementPosition(node);
 
   // State: Used to keep track of reason for entering the finally
@@ -5338,8 +5341,8 @@
     switch (op) {
       case Token::SUB: {
         bool overwrite =
-            (node->AsBinaryOperation() != NULL &&
-             node->AsBinaryOperation()->ResultOverwriteAllowed());
+          (node->expression()->AsBinaryOperation() != NULL &&
+           node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
         UnarySubStub stub(overwrite);
         // TODO(1222589): remove dependency of TOS being cached inside stub
         Result operand = frame_->Pop();
@@ -6523,6 +6526,116 @@
 }
 
 
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Register left,
+    Register right) {
+  if (!ArgsInRegistersSupported()) {
+    // Only pass arguments in registers if there is no smi code in the stub.
+    __ push(left);
+    __ push(right);
+  } else {
+    // The calling convention with registers is left in edx and right in eax.
+    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
+    if (!(left.is(edx) && right.is(eax))) {
+      if (left.is(eax) && right.is(edx)) {
+        if (IsOperationCommutative()) {
+          SetArgsReversed();
+        } else {
+          __ xchg(left, right);
+        }
+      } else if (left.is(edx)) {
+        __ mov(eax, right);
+      } else if (left.is(eax)) {
+        if (IsOperationCommutative()) {
+          __ mov(edx, right);
+          SetArgsReversed();
+        } else {
+          __ mov(edx, left);
+          __ mov(eax, right);
+        }
+      } else if (right.is(edx)) {
+        if (IsOperationCommutative()) {
+          __ mov(eax, left);
+          SetArgsReversed();
+        } else {
+          __ mov(eax, right);
+          __ mov(edx, left);
+        }
+      } else if (right.is(eax)) {
+        __ mov(edx, left);
+      } else {
+        __ mov(edx, left);
+        __ mov(eax, right);
+      }
+    }
+
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Register left,
+    Smi* right) {
+  if (!ArgsInRegistersSupported()) {
+    // Only pass arguments in registers if there is no smi code in the stub.
+    __ push(left);
+    __ push(Immediate(right));
+  } else {
+    // Adapt arguments to the calling convention left in edx and right in eax.
+    if (left.is(edx)) {
+      __ mov(eax, Immediate(right));
+    } else if (left.is(eax) && IsOperationCommutative()) {
+      __ mov(edx, Immediate(right));
+      SetArgsReversed();
+    } else {
+      __ mov(edx, left);
+      __ mov(eax, Immediate(right));
+    }
+
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
+void GenericBinaryOpStub::GenerateCall(
+    MacroAssembler* masm,
+    Smi* left,
+    Register right) {
+  if (flags_ != NO_SMI_CODE_IN_STUB) {
+    // Only pass arguments in registers if there is no smi code in the stub.
+    __ push(Immediate(left));
+    __ push(right);
+  } else {
+    // Adapt arguments to the calling convention left in edx and right in eax.
+    bool is_commutative = (op_ == (Token::ADD) || (op_ == Token::MUL));
+    if (right.is(eax)) {
+      __ mov(edx, Immediate(left));
+    } else if (right.is(edx) && is_commutative) {
+        __ mov(eax, Immediate(left));
+    } else {
+      __ mov(edx, Immediate(left));
+      __ mov(eax, right);
+    }
+    // Update flags to indicate that arguments are in registers.
+    SetArgsInRegisters();
+  }
+
+  // Call the stub.
+  __ CallStub(this);
+}
+
+
 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
   // Perform fast-case smi code for the operation (eax <op> ebx) and
   // leave result in register eax.
@@ -6670,22 +6783,23 @@
 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
   Label call_runtime;
 
-  if (flags_ == SMI_CODE_IN_STUB) {
-    // The fast case smi code wasn't inlined in the stub caller
-    // code. Generate it here to speed up common operations.
-    Label slow;
-    __ mov(ebx, Operand(esp, 1 * kPointerSize));  // get y
-    __ mov(eax, Operand(esp, 2 * kPointerSize));  // get x
-    GenerateSmiCode(masm, &slow);
-    __ ret(2 * kPointerSize);  // remove both operands
+  __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
 
+  // Generate fast case smi code if requested. This flag is set when the fast
+  // case smi code is not generated by the caller. Generating it here will speed
+  // up common operations.
+  if (HasSmiCodeInStub()) {
+    Label slow;
+    __ mov(ebx, Operand(esp, 1 * kPointerSize));
+    __ mov(eax, Operand(esp, 2 * kPointerSize));
+    GenerateSmiCode(masm, &slow);
+    GenerateReturn(masm);
     // Too bad. The fast case smi code didn't succeed.
     __ bind(&slow);
   }
 
-  // Setup registers.
-  __ mov(eax, Operand(esp, 1 * kPointerSize));  // get y
-  __ mov(edx, Operand(esp, 2 * kPointerSize));  // get x
+  // Make sure the arguments are in edx and eax.
+  GenerateLoadArguments(masm);
 
   // Floating point case.
   switch (op_) {
@@ -6719,19 +6833,24 @@
             __ test(eax, Immediate(kSmiTagMask));
             __ j(not_zero, &skip_allocation, not_taken);
             // Fall through!
-          case NO_OVERWRITE:
+          case NO_OVERWRITE: {
+            // Allocate a heap number for the result. Keep eax and edx intact
+            // for the possible runtime call.
             FloatingPointHelper::AllocateHeapNumber(masm,
                                                     &call_runtime,
                                                     ecx,
-                                                    edx,
-                                                    eax);
+                                                    no_reg,
+                                                    ebx);
+            // Now eax can be overwritten losing one of the arguments as we are
+            // now done and will not need it any more.
+            __ mov(eax, ebx);
             __ bind(&skip_allocation);
             break;
+          }
           default: UNREACHABLE();
         }
         __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
-        __ ret(2 * kPointerSize);
-
+        GenerateReturn(masm);
       } else {  // SSE2 not available, use FPU.
         FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
         // Allocate a heap number, if needed.
@@ -6747,11 +6866,16 @@
             __ j(not_zero, &skip_allocation, not_taken);
             // Fall through!
           case NO_OVERWRITE:
+            // Allocate a heap number for the result. Keep eax and edx intact
+            // for the possible runtime call.
             FloatingPointHelper::AllocateHeapNumber(masm,
                                                     &call_runtime,
                                                     ecx,
-                                                    edx,
-                                                    eax);
+                                                    no_reg,
+                                                    ebx);
+            // Now eax can be overwritten losing one of the arguments as we are
+            // now done and will not need it any more.
+            __ mov(eax, ebx);
             __ bind(&skip_allocation);
             break;
           default: UNREACHABLE();
@@ -6766,7 +6890,7 @@
           default: UNREACHABLE();
         }
         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-        __ ret(2 * kPointerSize);
+        GenerateReturn(masm);
       }
     }
     case Token::MOD: {
@@ -6899,8 +7023,20 @@
   }
 
   // If all else fails, use the runtime system to get the correct
-  // result.
+  // result. If arguments was passed in registers now place them on the
+  // stack in the correct order.
   __ bind(&call_runtime);
+  if (HasArgumentsInRegisters()) {
+    __ pop(ecx);
+    if (HasArgumentsReversed()) {
+      __ push(eax);
+      __ push(edx);
+    } else {
+      __ push(edx);
+      __ push(eax);
+    }
+    __ push(ecx);
+  }
   switch (op_) {
     case Token::ADD: {
       // Test for string arguments before calling runtime.
@@ -6977,6 +7113,26 @@
 }
 
 
+void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
+  // If arguments are not passed in registers read them from the stack.
+  if (!HasArgumentsInRegisters()) {
+    __ mov(eax, Operand(esp, 1 * kPointerSize));
+    __ mov(edx, Operand(esp, 2 * kPointerSize));
+  }
+}
+
+
+void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
+  // If arguments are not passed in registers remove them from the stack before
+  // returning.
+  if (!HasArgumentsInRegisters()) {
+    __ ret(2 * kPointerSize);  // Remove both operands
+  } else {
+    __ ret(0);
+  }
+}
+
+
 void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
                                              Label* need_gc,
                                              Register scratch1,
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 142a5a1..ec4a8be 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -294,6 +294,15 @@
                                Handle<Script> script,
                                bool is_eval);
 
+  // Printing of AST, etc. as requested by flags.
+  static void MakeCodePrologue(FunctionLiteral* fun);
+
+  // Allocate and install the code.
+  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
+                                       MacroAssembler* masm,
+                                       Code::Flags flags,
+                                       Handle<Script> script);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -303,6 +312,8 @@
                               bool is_toplevel,
                               Handle<Script> script);
 
+  static void RecordPositions(MacroAssembler* masm, int pos);
+
   // Accessors
   MacroAssembler* masm() { return masm_; }
 
@@ -548,6 +559,14 @@
   inline void GenerateMathSin(ZoneList<Expression*>* args);
   inline void GenerateMathCos(ZoneList<Expression*>* args);
 
+  // Simple condition analysis.
+  enum ConditionAnalysis {
+    ALWAYS_TRUE,
+    ALWAYS_FALSE,
+    DONT_KNOW
+  };
+  ConditionAnalysis AnalyzeCondition(Expression* cond);
+
   // Methods used to indicate which source code is generated for. Source
   // positions are collected by the assembler and emitted with the relocation
   // information.
@@ -604,47 +623,62 @@
 };
 
 
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
+// Flag that indicates whether how to generate code for the stub.
 enum GenericBinaryFlags {
-  SMI_CODE_IN_STUB,
-  SMI_CODE_INLINED
+  NO_GENERIC_BINARY_FLAGS = 0,
+  NO_SMI_CODE_IN_STUB = 1 << 0  // Omit smi code in stub.
 };
 
 
 class GenericBinaryOpStub: public CodeStub {
  public:
-  GenericBinaryOpStub(Token::Value op,
+  GenericBinaryOpStub(Token::Value operation,
                       OverwriteMode mode,
                       GenericBinaryFlags flags)
-      : op_(op), mode_(mode), flags_(flags) {
+      : op_(operation),
+        mode_(mode),
+        flags_(flags),
+        args_in_registers_(false),
+        args_reversed_(false) {
     use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3);
     ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
   }
 
-  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+  // Generate code to call the stub with the supplied arguments. This will add
+  // code at the call site to prepare arguments either in registers or on the
+  // stack together with the actual call.
+  void GenerateCall(MacroAssembler* masm, Register left, Register right);
+  void GenerateCall(MacroAssembler* masm, Register left, Smi* right);
+  void GenerateCall(MacroAssembler* masm, Smi* left, Register right);
 
  private:
   Token::Value op_;
   OverwriteMode mode_;
   GenericBinaryFlags flags_;
+  bool args_in_registers_;  // Arguments passed in registers not on the stack.
+  bool args_reversed_;  // Left and right argument are swapped.
   bool use_sse3_;
 
   const char* GetName();
 
 #ifdef DEBUG
   void Print() {
-    PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+    PrintF("GenericBinaryOpStub (op %s), "
+           "(mode %d, flags %d, registers %d, reversed %d)\n",
            Token::String(op_),
            static_cast<int>(mode_),
-           static_cast<int>(flags_));
+           static_cast<int>(flags_),
+           static_cast<int>(args_in_registers_),
+           static_cast<int>(args_reversed_));
   }
 #endif
 
-  // Minor key encoding in 16 bits FSOOOOOOOOOOOOMM.
+  // Minor key encoding in 16 bits FRASOOOOOOOOOOMM.
   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 12> {};
-  class SSE3Bits: public BitField<bool, 14, 1> {};
+  class OpBits: public BitField<Token::Value, 2, 10> {};
+  class SSE3Bits: public BitField<bool, 12, 1> {};
+  class ArgsInRegistersBits: public BitField<bool, 13, 1> {};
+  class ArgsReversedBits: public BitField<bool, 14, 1> {};
   class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
 
   Major MajorKey() { return GenericBinaryOp; }
@@ -653,9 +687,30 @@
     return OpBits::encode(op_)
            | ModeBits::encode(mode_)
            | FlagBits::encode(flags_)
-           | SSE3Bits::encode(use_sse3_);
+           | SSE3Bits::encode(use_sse3_)
+           | ArgsInRegistersBits::encode(args_in_registers_)
+           | ArgsReversedBits::encode(args_reversed_);
   }
+
   void Generate(MacroAssembler* masm);
+  void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+  void GenerateLoadArguments(MacroAssembler* masm);
+  void GenerateReturn(MacroAssembler* masm);
+
+  bool ArgsInRegistersSupported() {
+    return ((op_ == Token::ADD) || (op_ == Token::SUB)
+             || (op_ == Token::MUL) || (op_ == Token::DIV))
+            && flags_ != NO_SMI_CODE_IN_STUB;
+  }
+  bool IsOperationCommutative() {
+    return (op_ == Token::ADD) || (op_ == Token::MUL);
+  }
+
+  void SetArgsInRegisters() { args_in_registers_ = true; }
+  void SetArgsReversed() { args_reversed_ = true; }
+  bool HasSmiCodeInStub() { return (flags_ & NO_SMI_CODE_IN_STUB) == 0; }
+  bool HasArgumentsInRegisters() { return args_in_registers_; }
+  bool HasArgumentsReversed() { return args_reversed_; }
 };
 
 
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 7e0dfd1..2d20117 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -63,7 +63,7 @@
 // having been patched with a call instruction.
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
-  return rinfo->IsCallInstruction();
+  return rinfo->IsPatchedReturnSequence();
 }
 
 
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 458844e..adedf34 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -124,6 +124,14 @@
 };
 
 
+static const char* conditional_move_mnem[] = {
+  /*0*/ "cmovo", "cmovno", "cmovc", "cmovnc",
+  /*4*/ "cmovz", "cmovnz", "cmovna", "cmova",
+  /*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
+  /*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"
+};
+
+
 enum InstructionType {
   NO_INSTR,
   ZERO_OPERANDS_INSTR,
@@ -311,6 +319,7 @@
   int JumpConditional(byte* data, const char* comment);
   int JumpConditionalShort(byte* data, const char* comment);
   int SetCC(byte* data);
+  int CMov(byte* data);
   int FPUInstruction(byte* data);
   void AppendToBuffer(const char* format, ...);
 
@@ -615,6 +624,16 @@
 
 
 // Returns number of bytes used, including *data.
+int DisassemblerIA32::CMov(byte* data) {
+  assert(*data == 0x0F);
+  byte cond = *(data + 1) & 0x0F;
+  const char* mnem = conditional_move_mnem[cond];
+  int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
+  return 2 + op_size;  // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
 int DisassemblerIA32::FPUInstruction(byte* data) {
   byte b1 = *data;
   byte b2 = *(data + 1);
@@ -861,6 +880,8 @@
             data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
           } else if ((f0byte & 0xF0) == 0x90) {
             data += SetCC(data);
+          } else if ((f0byte & 0xF0) == 0x40) {
+            data += CMov(data);
           } else {
             data += 2;
             if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
@@ -956,6 +977,19 @@
           AppendToBuffer("mov_w ");
           data += PrintRightOperand(data);
           AppendToBuffer(",%s", NameOfCPURegister(regop));
+        } else if (*data == 0x0F) {
+          data++;
+          if (*data == 0x2F) {
+            data++;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("comisd %s,%s",
+                           NameOfXMMRegister(regop),
+                           NameOfXMMRegister(rm));
+            data++;
+          } else {
+            UnimplementedInstruction();
+          }
         } else {
           UnimplementedInstruction();
         }
diff --git a/src/ia32/fast-codegen-ia32.cc b/src/ia32/fast-codegen-ia32.cc
new file mode 100644
index 0000000..ee1b92d
--- /dev/null
+++ b/src/ia32/fast-codegen-ia32.cc
@@ -0,0 +1,163 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function.  On entry to the function the receiver
+// and arguments have been pushed on the stack left to right, with the
+// return address on top of them.  The actual argument count matches the
+// formal parameter count expected by the function.
+//
+// The live registers are:
+//   o edi: the JS function object being called (ie, ourselves)
+//   o esi: our context
+//   o ebp: our caller's frame pointer
+//   o esp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-ia32.h for its layout.
+void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+  function_ = fun;
+  SetFunctionPosition(fun);
+
+  __ push(ebp);  // Caller's frame pointer.
+  __ mov(ebp, esp);
+  __ push(esi);  // Callee's context.
+  __ push(edi);  // Callee's JS Function.
+
+  { Comment cmnt(masm_, "[ Allocate locals");
+    int locals_count = fun->scope()->num_stack_slots();
+    for (int i = 0; i < locals_count; i++) {
+      __ push(Immediate(Factory::undefined_value()));
+    }
+  }
+
+  { Comment cmnt(masm_, "[ Stack check");
+    Label ok;
+    ExternalReference stack_guard_limit =
+        ExternalReference::address_of_stack_guard_limit();
+    __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
+    __ j(above_equal, &ok, taken);
+    StackCheckStub stub;
+    __ CallStub(&stub);
+    __ bind(&ok);
+  }
+
+  { Comment cmnt(masm_, "[ Body");
+    VisitStatements(fun->body());
+  }
+
+  { Comment cmnt(masm_, "[ return <undefined>;");
+    // Emit a 'return undefined' in case control fell off the end of the
+    // body.
+    __ mov(eax, Factory::undefined_value());
+    SetReturnPosition(fun);
+    __ RecordJSReturn();
+    // Do not use the leave instruction here because it is too short to
+    // patch with the code required by the debugger.
+    __ mov(esp, ebp);
+    __ pop(ebp);
+    __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+  }
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  SetStatementPosition(stmt);
+  Visit(stmt->expression());
+}
+
+
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+  Comment cmnt(masm_, "[ ReturnStatement");
+  SetStatementPosition(stmt);
+  Visit(stmt->expression());
+  __ pop(eax);
+  __ RecordJSReturn();
+  // Do not use the leave instruction here because it is too short to
+  // patch with the code required by the debugger.
+  __ mov(esp, ebp);
+  __ pop(ebp);
+  __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+}
+
+
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  Expression* rewrite = expr->var()->rewrite();
+  ASSERT(rewrite != NULL);
+
+  Slot* slot = rewrite->AsSlot();
+  ASSERT(slot != NULL);
+  { Comment cmnt(masm_, "[ Slot");
+    if (expr->location().is_temporary()) {
+      __ push(Operand(ebp, SlotOffset(slot)));
+    } else {
+      ASSERT(expr->location().is_nowhere());
+    }
+  }
+}
+
+
+void FastCodeGenerator::VisitLiteral(Literal* expr) {
+  Comment cmnt(masm_, "[ Literal");
+  if (expr->location().is_temporary()) {
+    __ push(Immediate(expr->handle()));
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+  Comment cmnt(masm_, "[ Assignment");
+  ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
+  Visit(expr->value());
+
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL && var->slot() != NULL);
+
+  if (expr->location().is_temporary()) {
+    __ mov(eax, Operand(esp, 0));
+    __ mov(Operand(ebp, SlotOffset(var->slot())), eax);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+    __ pop(Operand(ebp, SlotOffset(var->slot())));
+  }
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index f7369a8..af05680 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -298,7 +298,6 @@
   __ shl(eax, kSmiTagSize);
   __ ret(0);
 
-
   // Slow case: Load name and receiver from stack and jump to runtime.
   __ bind(&slow);
   __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
@@ -424,14 +423,11 @@
   __ mov(edx, eax);  // Save the value.
   __ sar(eax, kSmiTagSize);  // Untag the value.
   {  // Clamp the value to [0..255].
-    Label done, is_negative;
+    Label done;
     __ test(eax, Immediate(0xFFFFFF00));
     __ j(zero, &done);
-    __ j(negative, &is_negative);
-    __ mov(eax, Immediate(255));
-    __ jmp(&done);
-    __ bind(&is_negative);
-    __ xor_(eax, Operand(eax));  // Clear eax.
+    __ setcc(negative, eax);  // 1 if negative, 0 if positive.
+    __ dec_b(eax);  // 0 if negative, 255 if positive.
     __ bind(&done);
   }
   __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
@@ -458,7 +454,6 @@
   __ sub(Operand(ebx), Immediate(1 << kSmiTagSize));  // decrement ebx again
   __ jmp(&fast);
 
-
   // Array case: Get the length and the elements array from the JS
   // array. Check that the array is in fast mode; if it is the
   // length is always a smi.
diff --git a/src/ia32/virtual-frame-ia32.cc b/src/ia32/virtual-frame-ia32.cc
index 1b8232f..980cec8 100644
--- a/src/ia32/virtual-frame-ia32.cc
+++ b/src/ia32/virtual-frame-ia32.cc
@@ -161,15 +161,16 @@
   // on the stack.
   int start = Min(begin, stack_pointer_ + 1);
 
-  // If positive we have to adjust the stack pointer.
-  int delta = end - stack_pointer_;
-  if (delta > 0) {
-    stack_pointer_ = end;
-    __ sub(Operand(esp), Immediate(delta * kPointerSize));
-  }
-
+  // Emit normal 'push' instructions for elements above stack pointer
+  // and use mov instructions if we are below stack pointer.
   for (int i = start; i <= end; i++) {
-    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+    if (!elements_[i].is_synced()) {
+      if (i <= stack_pointer_) {
+        SyncElementBelowStackPointer(i);
+      } else {
+        SyncElementByPushing(i);
+      }
+    }
   }
 }
 
@@ -454,14 +455,16 @@
   Comment cmnt(masm(), "[ Enter JS frame");
 
 #ifdef DEBUG
-  // Verify that edi contains a JS function.  The following code
-  // relies on eax being available for use.
-  __ test(edi, Immediate(kSmiTagMask));
-  __ Check(not_zero,
-           "VirtualFrame::Enter - edi is not a function (smi check).");
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
-  __ Check(equal,
-           "VirtualFrame::Enter - edi is not a function (map check).");
+  if (FLAG_debug_code) {
+    // Verify that edi contains a JS function.  The following code
+    // relies on eax being available for use.
+    __ test(edi, Immediate(kSmiTagMask));
+    __ Check(not_zero,
+             "VirtualFrame::Enter - edi is not a function (smi check).");
+    __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
+    __ Check(equal,
+             "VirtualFrame::Enter - edi is not a function (map check).");
+  }
 #endif
 
   EmitPush(ebp);
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index e518662..c77f32d 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -45,13 +45,10 @@
 
 #ifdef V8_NATIVE_REGEXP
 #if V8_TARGET_ARCH_IA32
-#include "ia32/macro-assembler-ia32.h"
 #include "ia32/regexp-macro-assembler-ia32.h"
 #elif V8_TARGET_ARCH_X64
-#include "x64/macro-assembler-x64.h"
 #include "x64/regexp-macro-assembler-x64.h"
 #elif V8_TARGET_ARCH_ARM
-#include "arm/macro-assembler-arm.h"
 #include "arm/regexp-macro-assembler-arm.h"
 #else
 #error Unsupported target architecture.
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 3bc30b6..84f8d98 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -28,6 +28,8 @@
 #ifndef V8_JSREGEXP_H_
 #define V8_JSREGEXP_H_
 
+#include "macro-assembler.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/jump-target.h b/src/jump-target.h
index 0c42f1b..0933ee7 100644
--- a/src/jump-target.h
+++ b/src/jump-target.h
@@ -28,6 +28,8 @@
 #ifndef V8_JUMP_TARGET_H_
 #define V8_JUMP_TARGET_H_
 
+#include "macro-assembler.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/location.h b/src/location.h
new file mode 100644
index 0000000..59cd88a
--- /dev/null
+++ b/src/location.h
@@ -0,0 +1,56 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_LOCATION_H_
+#define V8_LOCATION_H_
+
+#include "utils.h"
+
+namespace v8 {
+namespace internal {
+
+class Location BASE_EMBEDDED {
+ public:
+  static Location Temporary() { return Location(TEMP); }
+  static Location Nowhere() { return Location(NOWHERE); }
+  static Location Constant() { return Location(CONSTANT); }
+
+  bool is_temporary() { return type_ == TEMP; }
+  bool is_nowhere() { return type_ == NOWHERE; }
+
+ private:
+  enum Type { TEMP, NOWHERE, CONSTANT };
+
+  explicit Location(Type type) : type_(type) {}
+
+  Type type_;
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_LOCATION_H_
diff --git a/src/log.cc b/src/log.cc
index 1c82f85..d1d9a31 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -934,6 +934,21 @@
 }
 
 
+void Logger::HeapSampleJSProducerEvent(const char* constructor,
+                                       Address* stack) {
+#ifdef ENABLE_LOGGING_AND_PROFILING
+  if (!Log::IsEnabled() || !FLAG_log_gc) return;
+  LogMessageBuilder msg;
+  msg.Append("heap-js-prod-item,%s", constructor);
+  while (*stack != NULL) {
+    msg.Append(",0x%" V8PRIxPTR, *stack++);
+  }
+  msg.Append("\n");
+  msg.WriteToLogFile();
+#endif
+}
+
+
 void Logger::DebugTag(const char* call_site_tag) {
 #ifdef ENABLE_LOGGING_AND_PROFILING
   if (!Log::IsEnabled() || !FLAG_log) return;
diff --git a/src/log.h b/src/log.h
index 07a0429..13d45d2 100644
--- a/src/log.h
+++ b/src/log.h
@@ -223,6 +223,8 @@
                                            int number, int bytes);
   static void HeapSampleJSRetainersEvent(const char* constructor,
                                          const char* event);
+  static void HeapSampleJSProducerEvent(const char* constructor,
+                                        Address* stack);
   static void HeapSampleStats(const char* space, const char* kind,
                               int capacity, int used);
 
diff --git a/src/macros.py b/src/macros.py
index c75f0ea..ddd2f13 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -118,9 +118,7 @@
 # a type error is thrown.
 macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
 
-# Last input and last subject are after the captures so we can omit them on
-# results returned from global searches.  Beware - these evaluate their
-# arguments twice.
+# Last input and last subject of regexp matches.
 macro LAST_SUBJECT(array) = ((array)[1]);
 macro LAST_INPUT(array) = ((array)[2]);
 
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index a20245c..5a3ab89 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -279,7 +279,7 @@
 
   void VisitDebugTarget(RelocInfo* rinfo) {
     ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
-           rinfo->IsCallInstruction());
+           rinfo->IsPatchedReturnSequence());
     HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
     MarkCompactCollector::MarkObject(code);
   }
@@ -1382,7 +1382,8 @@
   }
 
   void VisitDebugTarget(RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsCallInstruction());
+    ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
+           rinfo->IsPatchedReturnSequence());
     Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
     VisitPointer(&target);
     rinfo->set_call_address(
diff --git a/src/mirror-delay.js b/src/mirror-delay.js
index c4ab7b8..cde5534 100644
--- a/src/mirror-delay.js
+++ b/src/mirror-delay.js
@@ -764,7 +764,7 @@
 ObjectMirror.prototype.toText = function() {
   var name;
   var ctor = this.constructorFunction();
-  if (ctor.isUndefined()) {
+  if (!ctor.isFunction()) {
     name = this.className();
   } else {
     name = ctor.name();
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 288cc21..afa51f6 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -29,7 +29,6 @@
 
 #include "disassembler.h"
 #include "disasm.h"
-#include "macro-assembler.h"
 #include "jsregexp.h"
 
 namespace v8 {
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 22e12ac..cb7b7c8 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -744,15 +744,17 @@
 
 Smi* Smi::FromInt(int value) {
   ASSERT(Smi::IsValid(value));
+  int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
   intptr_t tagged_value =
-      (static_cast<intptr_t>(value) << kSmiTagSize) | kSmiTag;
+      (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
   return reinterpret_cast<Smi*>(tagged_value);
 }
 
 
 Smi* Smi::FromIntptr(intptr_t value) {
   ASSERT(Smi::IsValid(value));
-  return reinterpret_cast<Smi*>((value << kSmiTagSize) | kSmiTag);
+  int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
+  return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
 }
 
 
@@ -776,7 +778,7 @@
       kFailureTypeTagSize + kSpaceTagSize - kObjectAlignmentBits;
   STATIC_ASSERT(kShiftBits >= 0);
   ASSERT(type() == RETRY_AFTER_GC);
-  return value() >> kShiftBits;
+  return static_cast<int>(value() >> kShiftBits);
 }
 
 
@@ -802,29 +804,31 @@
 }
 
 
-int Failure::value() const {
-  return static_cast<int>(reinterpret_cast<intptr_t>(this) >> kFailureTagSize);
+intptr_t Failure::value() const {
+  return reinterpret_cast<intptr_t>(this) >> kFailureTagSize;
 }
 
 
 Failure* Failure::RetryAfterGC(int requested_bytes) {
   // Assert that the space encoding fits in the three bytes allotted for it.
   ASSERT((LAST_SPACE & ~kSpaceTagMask) == 0);
-  int requested = requested_bytes >> kObjectAlignmentBits;
+  intptr_t requested = requested_bytes >> kObjectAlignmentBits;
+  int tag_bits = kSpaceTagSize + kFailureTypeTagSize;
+  if (((requested << tag_bits) >> tag_bits) != requested) {
+    // No room for entire requested size in the bits. Round down to
+    // maximally representable size.
+    requested = static_cast<intptr_t>(
+                    (~static_cast<uintptr_t>(0)) >> (tag_bits + 1));
+  }
   int value = (requested << kSpaceTagSize) | NEW_SPACE;
-  ASSERT(value >> kSpaceTagSize == requested);
-  ASSERT(Smi::IsValid(value));
-  ASSERT(value == ((value << kFailureTypeTagSize) >> kFailureTypeTagSize));
-  ASSERT(Smi::IsValid(value << kFailureTypeTagSize));
   return Construct(RETRY_AFTER_GC, value);
 }
 
 
-Failure* Failure::Construct(Type type, int value) {
-  int info = (value << kFailureTypeTagSize) | type;
+Failure* Failure::Construct(Type type, intptr_t value) {
+  intptr_t info = (static_cast<intptr_t>(value) << kFailureTypeTagSize) | type;
   ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
-  return reinterpret_cast<Failure*>(
-      (static_cast<intptr_t>(info) << kFailureTagSize) | kFailureTag);
+  return reinterpret_cast<Failure*>((info << kFailureTagSize) | kFailureTag);
 }
 
 
@@ -832,6 +836,11 @@
 #ifdef DEBUG
   bool in_range = (value >= kMinValue) && (value <= kMaxValue);
 #endif
+
+#ifdef V8_TARGET_ARCH_X64
+  // To be representable as a long smi, the value must be a 32-bit integer.
+  bool result = (value == static_cast<int32_t>(value));
+#else
   // To be representable as an tagged small integer, the two
   // most-significant bits of 'value' must be either 00 or 11 due to
   // sign-extension. To check this we add 01 to the two
@@ -843,20 +852,8 @@
   // in fact doesn't work correctly with gcc4.1.1 in some cases: The
   // compiler may produce undefined results in case of signed integer
   // overflow. The computation must be done w/ unsigned ints.
-  bool result =
-      ((static_cast<unsigned int>(value) + 0x40000000U) & 0x80000000U) == 0;
-  ASSERT(result == in_range);
-  return result;
-}
-
-
-bool Smi::IsIntptrValid(intptr_t value) {
-#ifdef DEBUG
-  bool in_range = (value >= kMinValue) && (value <= kMaxValue);
+  bool result = (static_cast<uintptr_t>(value + 0x40000000U) < 0x80000000U);
 #endif
-  // See Smi::IsValid(int) for description.
-  bool result =
-      ((static_cast<uintptr_t>(value) + 0x40000000U) < 0x80000000U);
   ASSERT(result == in_range);
   return result;
 }
diff --git a/src/objects.cc b/src/objects.cc
index 834589a..ab1d35f 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -618,12 +618,12 @@
 
 
 void Failure::FailurePrint(StringStream* accumulator) {
-  accumulator->Add("Failure(%d)", value());
+  accumulator->Add("Failure(%p)", reinterpret_cast<void*>(value()));
 }
 
 
 void Failure::FailurePrint() {
-  PrintF("Failure(%d)", value());
+  PrintF("Failure(%p)", reinterpret_cast<void*>(value()));
 }
 
 
@@ -4983,7 +4983,8 @@
 
 
 void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsCallInstruction());
+  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
+         rinfo->IsPatchedReturnSequence());
   Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
   Object* old_target = target;
   VisitPointer(&target);
@@ -5009,7 +5010,7 @@
 #ifdef ENABLE_DEBUGGER_SUPPORT
     } else if (Debug::has_break_points() &&
                RelocInfo::IsJSReturn(rmode) &&
-               it.rinfo()->IsCallInstruction()) {
+               it.rinfo()->IsPatchedReturnSequence()) {
       v->VisitDebugTarget(it.rinfo());
 #endif
     } else if (rmode == RelocInfo::RUNTIME_ENTRY) {
@@ -5047,7 +5048,7 @@
           desc.reloc_size);
 
   // unbox handles and relocate
-  int delta = instruction_start() - desc.buffer;
+  intptr_t delta = instruction_start() - desc.buffer;
   int mode_mask = RelocInfo::kCodeTargetMask |
                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
                   RelocInfo::kApplyMask;
@@ -6562,6 +6563,10 @@
       : string_(string),
         flags_(Smi::FromInt(flags.value())) { }
 
+  // Rather than storing the key in the hash table, a pointer to the
+  // stored value is stored where the key should be.  IsMatch then
+  // compares the search key to the found object, rather than comparing
+  // a key to a key.
   bool IsMatch(Object* obj) {
     FixedArray* val = FixedArray::cast(obj);
     return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex)))
@@ -7221,6 +7226,8 @@
   CompilationCacheTable* cache =
       reinterpret_cast<CompilationCacheTable*>(obj);
   int entry = cache->FindInsertionEntry(key.Hash());
+  // We store the value in the key slot, and compare the search key
+  // to the stored value with a custon IsMatch function during lookups.
   cache->set(EntryToIndex(entry), value);
   cache->set(EntryToIndex(entry) + 1, value);
   cache->ElementAdded();
diff --git a/src/objects.h b/src/objects.h
index c78c73b..deb0971 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -32,6 +32,9 @@
 #include "code-stubs.h"
 #include "smart-pointer.h"
 #include "unicode-inl.h"
+#if V8_TARGET_ARCH_ARM
+#include "arm/constants-arm.h"
+#endif
 
 //
 // All object types in the V8 JavaScript are described in this file.
@@ -904,10 +907,10 @@
 
 // Smi represents integer Numbers that can be stored in 31 bits.
 // Smis are immediate which means they are NOT allocated in the heap.
-// Smi stands for small integer.
 // The this pointer has the following format: [31 bit signed int] 0
-// On 64-bit, the top 32 bits of the pointer is allowed to have any
-// value.
+// For long smis it has the following format:
+//     [32 bit signed int] [31 bits zero padding] 0
+// Smi stands for small integer.
 class Smi: public Object {
  public:
   // Returns the integer value.
@@ -921,8 +924,6 @@
   // Returns whether value can be represented in a Smi.
   static inline bool IsValid(intptr_t value);
 
-  static inline bool IsIntptrValid(intptr_t);
-
   // Casting.
   static inline Smi* cast(Object* object);
 
@@ -933,10 +934,8 @@
   void SmiVerify();
 #endif
 
-  static const int kSmiNumBits = 31;
-  // Min and max limits for Smi values.
-  static const int kMinValue = -(1 << (kSmiNumBits - 1));
-  static const int kMaxValue = (1 << (kSmiNumBits - 1)) - 1;
+  static const int kMinValue = (-1 << (kSmiValueSize - 1));
+  static const int kMaxValue = -(kMinValue + 1);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Smi);
@@ -949,10 +948,10 @@
 //
 // Failures are a single word, encoded as follows:
 // +-------------------------+---+--+--+
-// |rrrrrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
+// |...rrrrrrrrrrrrrrrrrrrrrr|sss|tt|11|
 // +-------------------------+---+--+--+
-//  3                       7 6 4 32 10
-//  1
+//                          7 6 4 32 10
+//
 //
 // The low two bits, 0-1, are the failure tag, 11.  The next two bits,
 // 2-3, are a failure type tag 'tt' with possible values:
@@ -1014,8 +1013,8 @@
 #endif
 
  private:
-  inline int value() const;
-  static inline Failure* Construct(Type type, int value = 0);
+  inline intptr_t value() const;
+  static inline Failure* Construct(Type type, intptr_t value = 0);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Failure);
 };
@@ -1291,7 +1290,7 @@
   // is a mixture of sign, exponent and mantissa.  Our current platforms are all
   // little endian apart from non-EABI arm which is little endian with big
   // endian floating point word ordering!
-#if !defined(V8_HOST_ARCH_ARM) || __ARM_EABI__
+#if !defined(V8_HOST_ARCH_ARM) || defined(USE_ARM_EABI)
   static const int kMantissaOffset = kValueOffset;
   static const int kExponentOffset = kValueOffset + 4;
 #else
@@ -2036,33 +2035,33 @@
 //     // The Element size indicates number of elements per entry.
 //     static const int kEntrySize = ..;
 //   };
-// table.  The prefix size indicates an amount of memory in the
+// The prefix size indicates an amount of memory in the
 // beginning of the backing storage that can be used for non-element
 // information by subclasses.
 
 template<typename Shape, typename Key>
 class HashTable: public FixedArray {
  public:
-  // Returns the number of elements in the dictionary.
+  // Returns the number of elements in the hash table.
   int NumberOfElements() {
     return Smi::cast(get(kNumberOfElementsIndex))->value();
   }
 
-  // Returns the capacity of the dictionary.
+  // Returns the capacity of the hash table.
   int Capacity() {
     return Smi::cast(get(kCapacityIndex))->value();
   }
 
   // ElementAdded should be called whenever an element is added to a
-  // dictionary.
+  // hash table.
   void ElementAdded() { SetNumberOfElements(NumberOfElements() + 1); }
 
   // ElementRemoved should be called whenever an element is removed from
-  // a dictionary.
+  // a hash table.
   void ElementRemoved() { SetNumberOfElements(NumberOfElements() - 1); }
   void ElementsRemoved(int n) { SetNumberOfElements(NumberOfElements() - n); }
 
-  // Returns a new array for dictionary usage. Might return Failure.
+  // Returns a new HashTable object. Might return Failure.
   static Object* Allocate(int at_least_space_for);
 
   // Returns the key at entry.
@@ -2112,7 +2111,7 @@
     return (entry * kEntrySize) + kElementsStartIndex;
   }
 
-  // Update the number of elements in the dictionary.
+  // Update the number of elements in the hash table.
   void SetNumberOfElements(int nof) {
     fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
   }
@@ -2148,7 +2147,7 @@
   virtual uint32_t Hash() = 0;
   // Returns the hash value for object.
   virtual uint32_t HashForObject(Object* key) = 0;
-  // Returns the key object for storing into the dictionary.
+  // Returns the key object for storing into the hash table.
   // If allocations fails a failure object is returned.
   virtual Object* AsObject() = 0;
   // Required.
@@ -2495,6 +2494,9 @@
   void PixelArrayVerify();
 #endif  // DEBUG
 
+  // Maximal acceptable length for a pixel array.
+  static const int kMaxLength = 0x3fffffff;
+
   // PixelArray headers are not quadword aligned.
   static const int kExternalPointerOffset = Array::kAlignedSize;
   static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
@@ -3576,6 +3578,7 @@
   static const int kEntrySize = 2;
 };
 
+
 class CompilationCacheTable: public HashTable<CompilationCacheShape,
                                               HashTableKey*> {
  public:
@@ -3849,6 +3852,8 @@
   static const int kShortLengthShift = kHashShift + kShortStringTag;
   static const int kMediumLengthShift = kHashShift + kMediumStringTag;
   static const int kLongLengthShift = kHashShift + kLongStringTag;
+  // Maximal string length that can be stored in the hash/length field.
+  static const int kMaxLength = (1 << (32 - kLongLengthShift)) - 1;
 
   // Limit for truncation in short printing.
   static const int kMaxShortPrintLength = 1024;
diff --git a/src/parser.cc b/src/parser.cc
index 3b24687..02fcfdc 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -177,8 +177,8 @@
   Statement* ParseWithStatement(ZoneStringList* labels, bool* ok);
   CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
   SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok);
-  LoopStatement* ParseDoStatement(ZoneStringList* labels, bool* ok);
-  LoopStatement* ParseWhileStatement(ZoneStringList* labels, bool* ok);
+  DoWhileStatement* ParseDoWhileStatement(ZoneStringList* labels, bool* ok);
+  WhileStatement* ParseWhileStatement(ZoneStringList* labels, bool* ok);
   Statement* ParseForStatement(ZoneStringList* labels, bool* ok);
   Statement* ParseThrowStatement(bool* ok);
   Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
@@ -675,9 +675,6 @@
   }
   int materialized_literal_count() { return materialized_literal_count_; }
 
-  void set_contains_array_literal() { contains_array_literal_ = true; }
-  bool contains_array_literal() { return contains_array_literal_; }
-
   void SetThisPropertyAssignmentInfo(
       bool only_this_property_assignments,
       bool only_simple_this_property_assignments,
@@ -700,17 +697,11 @@
   void AddProperty() { expected_property_count_++; }
   int expected_property_count() { return expected_property_count_; }
  private:
-  // Captures the number of nodes that need materialization in the
-  // function.  regexp literals, and boilerplate for object literals.
+  // Captures the number of literals that need materialization in the
+  // function.  Includes regexp literals, and boilerplate for object
+  // and array literals.
   int materialized_literal_count_;
 
-  // Captures whether or not the function contains array literals.  If
-  // the function contains array literals, we have to allocate space
-  // for the array constructor in the literals array of the function.
-  // This array constructor is used when creating the actual array
-  // literals.
-  bool contains_array_literal_;
-
   // Properties count estimation.
   int expected_property_count_;
 
@@ -728,7 +719,6 @@
 
 TemporaryScope::TemporaryScope(Parser* parser)
   : materialized_literal_count_(0),
-    contains_array_literal_(false),
     expected_property_count_(0),
     only_this_property_assignments_(false),
     only_simple_this_property_assignments_(false),
@@ -1236,7 +1226,6 @@
           top_scope_,
           body.elements(),
           temp_scope.materialized_literal_count(),
-          temp_scope.contains_array_literal(),
           temp_scope.expected_property_count(),
           temp_scope.only_this_property_assignments(),
           temp_scope.only_simple_this_property_assignments(),
@@ -1692,7 +1681,7 @@
       break;
 
     case Token::DO:
-      stmt = ParseDoStatement(labels, ok);
+      stmt = ParseDoWhileStatement(labels, ok);
       break;
 
     case Token::WHILE:
@@ -1903,7 +1892,7 @@
   const int literals = fun->NumberOfLiterals();
   Handle<Code> code = Handle<Code>(fun->shared()->code());
   Handle<JSFunction> boilerplate =
-      Factory::NewFunctionBoilerplate(name, literals, false, code);
+      Factory::NewFunctionBoilerplate(name, literals, code);
 
   // Copy the function data to the boilerplate. Used by
   // builtins.cc:HandleApiCall to perform argument type checks and to
@@ -2361,7 +2350,7 @@
     exit->AddStatement(NEW(WithExitStatement()));
 
     // Return a try-finally statement.
-    TryFinally* wrapper = NEW(TryFinally(body, exit));
+    TryFinallyStatement* wrapper = NEW(TryFinallyStatement(body, exit));
     wrapper->set_escaping_targets(collector.targets());
     result->AddStatement(wrapper);
   }
@@ -2537,7 +2526,8 @@
   //   'try { try { } catch { } } finally { }'
 
   if (!is_pre_parsing_ && catch_block != NULL && finally_block != NULL) {
-    TryCatch* statement = NEW(TryCatch(try_block, catch_var, catch_block));
+    TryCatchStatement* statement =
+        NEW(TryCatchStatement(try_block, catch_var, catch_block));
     statement->set_escaping_targets(collector.targets());
     try_block = NEW(Block(NULL, 1, false));
     try_block->AddStatement(statement);
@@ -2548,11 +2538,11 @@
   if (!is_pre_parsing_) {
     if (catch_block != NULL) {
       ASSERT(finally_block == NULL);
-      result = NEW(TryCatch(try_block, catch_var, catch_block));
+      result = NEW(TryCatchStatement(try_block, catch_var, catch_block));
       result->set_escaping_targets(collector.targets());
     } else {
       ASSERT(finally_block != NULL);
-      result = NEW(TryFinally(try_block, finally_block));
+      result = NEW(TryFinallyStatement(try_block, finally_block));
       // Add the jump targets of the try block and the catch block.
       for (int i = 0; i < collector.targets()->length(); i++) {
         catch_collector.AddTarget(collector.targets()->at(i));
@@ -2565,11 +2555,12 @@
 }
 
 
-LoopStatement* Parser::ParseDoStatement(ZoneStringList* labels, bool* ok) {
+DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
+                                                bool* ok) {
   // DoStatement ::
   //   'do' Statement 'while' '(' Expression ')' ';'
 
-  LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::DO_LOOP));
+  DoWhileStatement* loop = NEW(DoWhileStatement(labels));
   Target target(this, loop);
 
   Expect(Token::DO, CHECK_OK);
@@ -2585,16 +2576,16 @@
   // ExpectSemicolon() functionality here.
   if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON);
 
-  if (loop) loop->Initialize(NULL, cond, NULL, body);
+  if (loop != NULL) loop->Initialize(cond, body);
   return loop;
 }
 
 
-LoopStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
+WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
   // WhileStatement ::
   //   'while' '(' Expression ')' Statement
 
-  LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::WHILE_LOOP));
+  WhileStatement* loop = NEW(WhileStatement(labels));
   Target target(this, loop);
 
   Expect(Token::WHILE, CHECK_OK);
@@ -2603,7 +2594,7 @@
   Expect(Token::RPAREN, CHECK_OK);
   Statement* body = ParseStatement(NULL, CHECK_OK);
 
-  if (loop) loop->Initialize(NULL, cond, NULL, body);
+  if (loop != NULL) loop->Initialize(cond, body);
   return loop;
 }
 
@@ -2676,7 +2667,7 @@
   }
 
   // Standard 'for' loop
-  LoopStatement* loop = NEW(LoopStatement(labels, LoopStatement::FOR_LOOP));
+  ForStatement* loop = NEW(ForStatement(labels));
   Target target(this, loop);
 
   // Parsed initializer at this point.
@@ -3304,7 +3295,6 @@
   Expect(Token::RBRACK, CHECK_OK);
 
   // Update the scope information before the pre-parsing bailout.
-  temp_scope_->set_contains_array_literal();
   int literal_index = temp_scope_->NextMaterializedLiteralIndex();
 
   if (is_pre_parsing_) return NULL;
@@ -3634,7 +3624,6 @@
 
     int materialized_literal_count;
     int expected_property_count;
-    bool contains_array_literal;
     bool only_this_property_assignments;
     bool only_simple_this_property_assignments;
     Handle<FixedArray> this_property_assignments;
@@ -3648,12 +3637,10 @@
       only_this_property_assignments = false;
       only_simple_this_property_assignments = false;
       this_property_assignments = Factory::empty_fixed_array();
-      contains_array_literal = entry.contains_array_literal();
     } else {
       ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
       materialized_literal_count = temp_scope.materialized_literal_count();
       expected_property_count = temp_scope.expected_property_count();
-      contains_array_literal = temp_scope.contains_array_literal();
       only_this_property_assignments =
           temp_scope.only_this_property_assignments();
       only_simple_this_property_assignments =
@@ -3669,7 +3656,6 @@
       entry.set_end_pos(end_pos);
       entry.set_literal_count(materialized_literal_count);
       entry.set_property_count(expected_property_count);
-      entry.set_contains_array_literal(contains_array_literal);
     }
 
     FunctionLiteral* function_literal =
@@ -3677,7 +3663,6 @@
                             top_scope_,
                             body.elements(),
                             materialized_literal_count,
-                            contains_array_literal,
                             expected_property_count,
                             only_this_property_assignments,
                             only_simple_this_property_assignments,
diff --git a/src/parser.h b/src/parser.h
index 86e1f74..7328e81 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -70,16 +70,9 @@
   int property_count() { return backing_[kPropertyCountOffset]; }
   void set_property_count(int value) { backing_[kPropertyCountOffset] = value; }
 
-  bool contains_array_literal() {
-    return backing_[kContainsArrayLiteralOffset] != 0;
-  }
-  void set_contains_array_literal(bool value) {
-    backing_[kContainsArrayLiteralOffset] = value ? 1 : 0;
-  }
-
   bool is_valid() { return backing_.length() > 0; }
 
-  static const int kSize = 5;
+  static const int kSize = 4;
 
  private:
   Vector<unsigned> backing_;
@@ -87,7 +80,6 @@
   static const int kEndPosOffset = 1;
   static const int kLiteralCountOffset = 2;
   static const int kPropertyCountOffset = 3;
-  static const int kContainsArrayLiteralOffset = 4;
 };
 
 
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index d4a183d..26e5ce5 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -1794,7 +1794,6 @@
         context.ContextFlags = CONTEXT_FULL;
         if (GetThreadContext(profiled_thread_, &context) != 0) {
 #if V8_HOST_ARCH_X64
-          UNIMPLEMENTED();
           sample.pc = context.Rip;
           sample.sp = context.Rsp;
           sample.fp = context.Rbp;
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index bf66c4b..10c1ea8 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -147,46 +147,42 @@
 }
 
 
-void PrettyPrinter::VisitLoopStatement(LoopStatement* node) {
+void PrettyPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
   PrintLabels(node->labels());
-  switch (node->type()) {
-    case LoopStatement::DO_LOOP:
-      ASSERT(node->init() == NULL);
-      ASSERT(node->next() == NULL);
-      Print("do ");
-      Visit(node->body());
-      Print(" while (");
-      Visit(node->cond());
-      Print(");");
-      break;
+  Print("do ");
+  Visit(node->body());
+  Print(" while (");
+  Visit(node->cond());
+  Print(");");
+}
 
-    case LoopStatement::FOR_LOOP:
-      Print("for (");
-      if (node->init() != NULL) {
-        Visit(node->init());
-        Print(" ");
-      } else {
-        Print("; ");
-      }
-      if (node->cond() != NULL)
-        Visit(node->cond());
-      Print("; ");
-      if (node->next() != NULL)
-        Visit(node->next());  // prints extra ';', unfortunately
-      // to fix: should use Expression for next
-      Print(") ");
-      Visit(node->body());
-      break;
 
-    case LoopStatement::WHILE_LOOP:
-      ASSERT(node->init() == NULL);
-      ASSERT(node->next() == NULL);
-      Print("while (");
-      Visit(node->cond());
-      Print(") ");
-      Visit(node->body());
-      break;
+void PrettyPrinter::VisitWhileStatement(WhileStatement* node) {
+  PrintLabels(node->labels());
+  Print("while (");
+  Visit(node->cond());
+  Print(") ");
+  Visit(node->body());
+}
+
+
+void PrettyPrinter::VisitForStatement(ForStatement* node) {
+  PrintLabels(node->labels());
+  Print("for (");
+  if (node->init() != NULL) {
+    Visit(node->init());
+    Print(" ");
+  } else {
+    Print("; ");
   }
+  if (node->cond() != NULL) Visit(node->cond());
+  Print("; ");
+  if (node->next() != NULL) {
+    Visit(node->next());  // prints extra ';', unfortunately
+    // to fix: should use Expression for next
+  }
+  Print(") ");
+  Visit(node->body());
 }
 
 
@@ -201,7 +197,7 @@
 }
 
 
-void PrettyPrinter::VisitTryCatch(TryCatch* node) {
+void PrettyPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
   Print("try ");
   Visit(node->try_block());
   Print(" catch (");
@@ -211,7 +207,7 @@
 }
 
 
-void PrettyPrinter::VisitTryFinally(TryFinally* node) {
+void PrettyPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
   Print("try ");
   Visit(node->try_block());
   Print(" finally ");
@@ -841,12 +837,28 @@
 }
 
 
-void AstPrinter::VisitLoopStatement(LoopStatement* node) {
-  IndentedScope indent(node->OperatorString());
+void AstPrinter::VisitDoWhileStatement(DoWhileStatement* node) {
+  IndentedScope indent("DO");
+  PrintLabelsIndented(NULL, node->labels());
+  PrintIndentedVisit("BODY", node->body());
+  PrintIndentedVisit("COND", node->cond());
+}
+
+
+void AstPrinter::VisitWhileStatement(WhileStatement* node) {
+  IndentedScope indent("WHILE");
+  PrintLabelsIndented(NULL, node->labels());
+  PrintIndentedVisit("COND", node->cond());
+  PrintIndentedVisit("BODY", node->body());
+}
+
+
+void AstPrinter::VisitForStatement(ForStatement* node) {
+  IndentedScope indent("FOR");
   PrintLabelsIndented(NULL, node->labels());
   if (node->init()) PrintIndentedVisit("INIT", node->init());
   if (node->cond()) PrintIndentedVisit("COND", node->cond());
-  if (node->body()) PrintIndentedVisit("BODY", node->body());
+  PrintIndentedVisit("BODY", node->body());
   if (node->next()) PrintIndentedVisit("NEXT", node->next());
 }
 
@@ -859,7 +871,7 @@
 }
 
 
-void AstPrinter::VisitTryCatch(TryCatch* node) {
+void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
   IndentedScope indent("TRY CATCH");
   PrintIndentedVisit("TRY", node->try_block());
   PrintIndentedVisit("CATCHVAR", node->catch_var());
@@ -867,7 +879,7 @@
 }
 
 
-void AstPrinter::VisitTryFinally(TryFinally* node) {
+void AstPrinter::VisitTryFinallyStatement(TryFinallyStatement* node) {
   IndentedScope indent("TRY FINALLY");
   PrintIndentedVisit("TRY", node->try_block());
   PrintIndentedVisit("FINALLY", node->finally_block());
@@ -1088,6 +1100,414 @@
 }
 
 
+TagScope::TagScope(JsonAstBuilder* builder, const char* name)
+    : builder_(builder), next_(builder->tag()), has_body_(false) {
+  if (next_ != NULL) {
+    next_->use();
+    builder->Print(",\n");
+  }
+  builder->set_tag(this);
+  builder->PrintIndented("[");
+  builder->Print("\"%s\"", name);
+  builder->increase_indent(JsonAstBuilder::kTagIndentSize);
+}
+
+
+TagScope::~TagScope() {
+  builder_->decrease_indent(JsonAstBuilder::kTagIndentSize);
+  if (has_body_) {
+    builder_->Print("\n");
+    builder_->PrintIndented("]");
+  } else {
+    builder_->Print("]");
+  }
+  builder_->set_tag(next_);
+}
+
+
+AttributesScope::AttributesScope(JsonAstBuilder* builder)
+    : builder_(builder), attribute_count_(0) {
+  builder->set_attributes(this);
+  builder->tag()->use();
+  builder->Print(",\n");
+  builder->PrintIndented("{");
+  builder->increase_indent(JsonAstBuilder::kAttributesIndentSize);
+}
+
+
+AttributesScope::~AttributesScope() {
+  builder_->decrease_indent(JsonAstBuilder::kAttributesIndentSize);
+  if (attribute_count_ > 1) {
+    builder_->Print("\n");
+    builder_->PrintIndented("}");
+  } else {
+    builder_->Print("}");
+  }
+  builder_->set_attributes(NULL);
+}
+
+
+const char* JsonAstBuilder::BuildProgram(FunctionLiteral* program) {
+  Init();
+  Visit(program);
+  Print("\n");
+  return Output();
+}
+
+
+void JsonAstBuilder::AddAttributePrefix(const char* name) {
+  if (attributes()->is_used()) {
+    Print(",\n");
+    PrintIndented("\"");
+  } else {
+    Print("\"");
+  }
+  Print("%s\":", name);
+  attributes()->use();
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, Handle<String> value) {
+  SmartPointer<char> value_string = value->ToCString();
+  AddAttributePrefix(name);
+  Print("\"%s\"", *value_string);
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, const char* value) {
+  AddAttributePrefix(name);
+  Print("\"%s\"", value);
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, int value) {
+  AddAttributePrefix(name);
+  Print("%d", value);
+}
+
+
+void JsonAstBuilder::AddAttribute(const char* name, bool value) {
+  AddAttributePrefix(name);
+  Print(value ? "true" : "false");
+}
+
+
+void JsonAstBuilder::VisitBlock(Block* stmt) {
+  TagScope tag(this, "Block");
+  VisitStatements(stmt->statements());
+}
+
+
+void JsonAstBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  TagScope tag(this, "ExpressionStatement");
+  Visit(stmt->expression());
+}
+
+
+void JsonAstBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  TagScope tag(this, "EmptyStatement");
+}
+
+
+void JsonAstBuilder::VisitIfStatement(IfStatement* stmt) {
+  TagScope tag(this, "IfStatement");
+  Visit(stmt->condition());
+  Visit(stmt->then_statement());
+  Visit(stmt->else_statement());
+}
+
+
+void JsonAstBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  TagScope tag(this, "ContinueStatement");
+}
+
+
+void JsonAstBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  TagScope tag(this, "BreakStatement");
+}
+
+
+void JsonAstBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  TagScope tag(this, "ReturnStatement");
+  Visit(stmt->expression());
+}
+
+
+void JsonAstBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
+  TagScope tag(this, "WithEnterStatement");
+  Visit(stmt->expression());
+}
+
+
+void JsonAstBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
+  TagScope tag(this, "WithExitStatement");
+}
+
+
+void JsonAstBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  TagScope tag(this, "SwitchStatement");
+}
+
+
+void JsonAstBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  TagScope tag(this, "DoWhileStatement");
+  Visit(stmt->body());
+  Visit(stmt->cond());
+}
+
+
+void JsonAstBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  TagScope tag(this, "WhileStatement");
+  Visit(stmt->cond());
+  Visit(stmt->body());
+}
+
+
+void JsonAstBuilder::VisitForStatement(ForStatement* stmt) {
+  TagScope tag(this, "ForStatement");
+  if (stmt->init() != NULL) Visit(stmt->init());
+  if (stmt->cond() != NULL) Visit(stmt->cond());
+  Visit(stmt->body());
+  if (stmt->next() != NULL) Visit(stmt->next());
+}
+
+
+void JsonAstBuilder::VisitForInStatement(ForInStatement* stmt) {
+  TagScope tag(this, "ForInStatement");
+  Visit(stmt->each());
+  Visit(stmt->enumerable());
+  Visit(stmt->body());
+}
+
+
+void JsonAstBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  TagScope tag(this, "TryCatchStatement");
+  Visit(stmt->try_block());
+  Visit(stmt->catch_var());
+  Visit(stmt->catch_block());
+}
+
+
+void JsonAstBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  TagScope tag(this, "TryFinallyStatement");
+  Visit(stmt->try_block());
+  Visit(stmt->finally_block());
+}
+
+
+void JsonAstBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  TagScope tag(this, "DebuggerStatement");
+}
+
+
+void JsonAstBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  TagScope tag(this, "FunctionLiteral");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("name", expr->name());
+  }
+  VisitDeclarations(expr->scope()->declarations());
+  VisitStatements(expr->body());
+}
+
+
+void JsonAstBuilder::VisitFunctionBoilerplateLiteral(
+    FunctionBoilerplateLiteral* expr) {
+  TagScope tag(this, "FunctionBoilerplateLiteral");
+}
+
+
+void JsonAstBuilder::VisitConditional(Conditional* expr) {
+  TagScope tag(this, "Conditional");
+}
+
+
+void JsonAstBuilder::VisitSlot(Slot* expr) {
+  TagScope tag(this, "Slot");
+  {
+    AttributesScope attributes(this);
+    switch (expr->type()) {
+      case Slot::PARAMETER:
+        AddAttribute("type", "PARAMETER");
+        break;
+      case Slot::LOCAL:
+        AddAttribute("type", "LOCAL");
+        break;
+      case Slot::CONTEXT:
+        AddAttribute("type", "CONTEXT");
+        break;
+      case Slot::LOOKUP:
+        AddAttribute("type", "LOOKUP");
+        break;
+      case Slot::GLOBAL:
+        AddAttribute("type", "GLOBAL");
+        break;
+    }
+    AddAttribute("index", expr->index());
+  }
+}
+
+
+void JsonAstBuilder::VisitVariableProxy(VariableProxy* expr) {
+  if (expr->var()->rewrite() == NULL) {
+    TagScope tag(this, "VariableProxy");
+    {
+      AttributesScope attributes(this);
+      AddAttribute("name", expr->name());
+      AddAttribute("mode", Variable::Mode2String(expr->var()->mode()));
+    }
+  } else {
+    Visit(expr->var()->rewrite());
+  }
+}
+
+
+void JsonAstBuilder::VisitLiteral(Literal* expr) {
+  TagScope tag(this, "Literal");
+  {
+    AttributesScope attributes(this);
+    Handle<Object> handle = expr->handle();
+    if (handle->IsString()) {
+      AddAttribute("handle", Handle<String>(String::cast(*handle)));
+    } else if (handle->IsSmi()) {
+      AddAttribute("handle", Smi::cast(*handle)->value());
+    }
+  }
+}
+
+
+void JsonAstBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  TagScope tag(this, "RegExpLiteral");
+}
+
+
+void JsonAstBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+  TagScope tag(this, "ObjectLiteral");
+}
+
+
+void JsonAstBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  TagScope tag(this, "ArrayLiteral");
+}
+
+
+void JsonAstBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+  TagScope tag(this, "CatchExtensionObject");
+  Visit(expr->key());
+  Visit(expr->value());
+}
+
+
+void JsonAstBuilder::VisitAssignment(Assignment* expr) {
+  TagScope tag(this, "Assignment");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->target());
+  Visit(expr->value());
+}
+
+
+void JsonAstBuilder::VisitThrow(Throw* expr) {
+  TagScope tag(this, "Throw");
+  Visit(expr->exception());
+}
+
+
+void JsonAstBuilder::VisitProperty(Property* expr) {
+  TagScope tag(this, "Property");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("type", expr->is_synthetic() ? "SYNTHETIC" : "NORMAL");
+  }
+  Visit(expr->obj());
+  Visit(expr->key());
+}
+
+
+void JsonAstBuilder::VisitCall(Call* expr) {
+  TagScope tag(this, "Call");
+  Visit(expr->expression());
+  VisitExpressions(expr->arguments());
+}
+
+
+void JsonAstBuilder::VisitCallNew(CallNew* expr) {
+  TagScope tag(this, "CallNew");
+  Visit(expr->expression());
+  VisitExpressions(expr->arguments());
+}
+
+
+void JsonAstBuilder::VisitCallRuntime(CallRuntime* expr) {
+  TagScope tag(this, "CallRuntime");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("name", expr->name());
+  }
+  VisitExpressions(expr->arguments());
+}
+
+
+void JsonAstBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  TagScope tag(this, "UnaryOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->expression());
+}
+
+
+void JsonAstBuilder::VisitCountOperation(CountOperation* expr) {
+  TagScope tag(this, "CountOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("is_prefix", expr->is_prefix());
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->expression());
+}
+
+
+void JsonAstBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  TagScope tag(this, "BinaryOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->left());
+  Visit(expr->right());
+}
+
+
+void JsonAstBuilder::VisitCompareOperation(CompareOperation* expr) {
+  TagScope tag(this, "CompareOperation");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("op", Token::Name(expr->op()));
+  }
+  Visit(expr->left());
+  Visit(expr->right());
+}
+
+
+void JsonAstBuilder::VisitThisFunction(ThisFunction* expr) {
+  TagScope tag(this, "ThisFunction");
+}
+
+
+void JsonAstBuilder::VisitDeclaration(Declaration* decl) {
+  TagScope tag(this, "Declaration");
+  {
+    AttributesScope attributes(this);
+    AddAttribute("mode", Variable::Mode2String(decl->mode()));
+  }
+  Visit(decl->proxy());
+  if (decl->fun() != NULL) Visit(decl->fun());
+}
+
 
 #endif  // DEBUG
 
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 8a6d1fb..f885cb3 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -46,14 +46,15 @@
   const char* PrintExpression(FunctionLiteral* program);
   const char* PrintProgram(FunctionLiteral* program);
 
+  void Print(const char* format, ...);
+
   // Print a node to stdout.
   static void PrintOut(AstNode* node);
 
   // Individual nodes
-#define DEF_VISIT(type)                         \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
 
  private:
   char* output_;  // output string buffer
@@ -62,7 +63,6 @@
 
  protected:
   void Init();
-  void Print(const char* format, ...);
   const char* Output() const { return output_; }
 
   virtual void PrintStatements(ZoneList<Statement*>* statements);
@@ -85,10 +85,9 @@
   const char* PrintProgram(FunctionLiteral* program);
 
   // Individual nodes
-#define DEF_VISIT(type)                         \
-  virtual void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
  private:
   friend class IndentedScope;
   void PrintIndented(const char* txt);
@@ -112,6 +111,107 @@
   static int indent_;
 };
 
+
+// Forward declaration of helper classes.
+class TagScope;
+class AttributesScope;
+
+// Build a C string containing a JSON representation of a function's
+// AST. The representation is based on JsonML (www.jsonml.org).
+class JsonAstBuilder: public PrettyPrinter {
+ public:
+  JsonAstBuilder()
+      : indent_(0), top_tag_scope_(NULL), attributes_scope_(NULL) {
+  }
+  virtual ~JsonAstBuilder() {}
+
+  // Controls the indentation of subsequent lines of a tag body after
+  // the first line.
+  static const int kTagIndentSize = 2;
+
+  // Controls the indentation of subsequent lines of an attributes
+  // blocks's body after the first line.
+  static const int kAttributesIndentSize = 1;
+
+  // Construct a JSON representation of a function literal.
+  const char* BuildProgram(FunctionLiteral* program);
+
+  // Print text indented by the current indentation level.
+  void PrintIndented(const char* text) { Print("%*s%s", indent_, "", text); }
+
+  // Change the indentation level.
+  void increase_indent(int amount) { indent_ += amount; }
+  void decrease_indent(int amount) { indent_ -= amount; }
+
+  // The builder maintains a stack of opened AST node constructors.
+  // Each node constructor corresponds to a JsonML tag.
+  TagScope* tag() { return top_tag_scope_; }
+  void set_tag(TagScope* scope) { top_tag_scope_ = scope; }
+
+  // The builder maintains a pointer to the currently opened attributes
+  // of current AST node or NULL if the attributes are not opened.
+  AttributesScope* attributes() { return attributes_scope_; }
+  void set_attributes(AttributesScope* scope) { attributes_scope_ = scope; }
+
+  // Add an attribute to the currently opened attributes.
+  void AddAttribute(const char* name, Handle<String> value);
+  void AddAttribute(const char* name, const char* value);
+  void AddAttribute(const char* name, int value);
+  void AddAttribute(const char* name, bool value);
+
+  // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+  int indent_;
+  TagScope* top_tag_scope_;
+  AttributesScope* attributes_scope_;
+
+  // Utility function used by AddAttribute implementations.
+  void AddAttributePrefix(const char* name);
+};
+
+
+// The JSON AST builder keeps a stack of open element tags (AST node
+// constructors from the current iteration point to the root of the
+// AST).  TagScope is a helper class to manage the opening and closing
+// of tags, the indentation of their bodies, and comma separating their
+// contents.
+class TagScope BASE_EMBEDDED {
+ public:
+  TagScope(JsonAstBuilder* builder, const char* name);
+  ~TagScope();
+
+  void use() { has_body_ = true; }
+
+ private:
+  JsonAstBuilder* builder_;
+  TagScope* next_;
+  bool has_body_;
+};
+
+
+// AttributesScope is a helper class to manage the opening and closing
+// of attribute blocks, the indentation of their bodies, and comma
+// separating their contents. JsonAstBuilder::AddAttribute adds an
+// attribute to the currently open AttributesScope. They cannot be
+// nested so the builder keeps an optional single scope rather than a
+// stack.
+class AttributesScope BASE_EMBEDDED {
+ public:
+  explicit AttributesScope(JsonAstBuilder* builder);
+  ~AttributesScope();
+
+  bool is_used() { return attribute_count_ > 0; }
+  void use() { ++attribute_count_; }
+
+ private:
+  JsonAstBuilder* builder_;
+  int attribute_count_;
+};
+
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/rewriter.cc b/src/rewriter.cc
index 11fc071..de1b95b 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -100,7 +100,21 @@
 }
 
 
-void AstOptimizer::VisitLoopStatement(LoopStatement* node) {
+void AstOptimizer::VisitDoWhileStatement(DoWhileStatement* node) {
+  Visit(node->cond());
+  Visit(node->body());
+}
+
+
+void AstOptimizer::VisitWhileStatement(WhileStatement* node) {
+  has_function_literal_ = false;
+  Visit(node->cond());
+  node->may_have_function_literal_ = has_function_literal_;
+  Visit(node->body());
+}
+
+
+void AstOptimizer::VisitForStatement(ForStatement* node) {
   if (node->init() != NULL) {
     Visit(node->init());
   }
@@ -109,9 +123,7 @@
     Visit(node->cond());
     node->may_have_function_literal_ = has_function_literal_;
   }
-  if (node->body() != NULL) {
-    Visit(node->body());
-  }
+  Visit(node->body());
   if (node->next() != NULL) {
     Visit(node->next());
   }
@@ -125,14 +137,14 @@
 }
 
 
-void AstOptimizer::VisitTryCatch(TryCatch* node) {
+void AstOptimizer::VisitTryCatchStatement(TryCatchStatement* node) {
   Visit(node->try_block());
   Visit(node->catch_var());
   Visit(node->catch_block());
 }
 
 
-void AstOptimizer::VisitTryFinally(TryFinally* node) {
+void AstOptimizer::VisitTryFinallyStatement(TryFinallyStatement* node) {
   Visit(node->try_block());
   Visit(node->finally_block());
 }
@@ -553,6 +565,8 @@
   virtual void Visit##type(type* node);
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
+
+  void VisitIterationStatement(IterationStatement* stmt);
 };
 
 
@@ -596,25 +610,35 @@
 }
 
 
-
-
-void Processor::VisitLoopStatement(LoopStatement* node) {
-  // Rewrite loop body statement.
+void Processor::VisitIterationStatement(IterationStatement* node) {
+  // Rewrite the body.
   bool set_after_loop = is_set_;
   Visit(node->body());
   is_set_ = is_set_ && set_after_loop;
 }
 
 
-void Processor::VisitForInStatement(ForInStatement* node) {
-  // Rewrite for-in body statement.
-  bool set_after_for = is_set_;
-  Visit(node->body());
-  is_set_ = is_set_ && set_after_for;
+void Processor::VisitDoWhileStatement(DoWhileStatement* node) {
+  VisitIterationStatement(node);
 }
 
 
-void Processor::VisitTryCatch(TryCatch* node) {
+void Processor::VisitWhileStatement(WhileStatement* node) {
+  VisitIterationStatement(node);
+}
+
+
+void Processor::VisitForStatement(ForStatement* node) {
+  VisitIterationStatement(node);
+}
+
+
+void Processor::VisitForInStatement(ForInStatement* node) {
+  VisitIterationStatement(node);
+}
+
+
+void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
   // Rewrite both try and catch blocks (reversed order).
   bool set_after_catch = is_set_;
   Visit(node->catch_block());
@@ -626,7 +650,7 @@
 }
 
 
-void Processor::VisitTryFinally(TryFinally* node) {
+void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
   // Rewrite both try and finally block (reversed order).
   Visit(node->finally_block());
   bool save = in_try_;
diff --git a/src/runtime.cc b/src/runtime.cc
index 4e1940d..9eeffd1 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -34,18 +34,17 @@
 #include "arguments.h"
 #include "compiler.h"
 #include "cpu.h"
-#include "dateparser.h"
 #include "dateparser-inl.h"
 #include "debug.h"
 #include "execution.h"
 #include "jsregexp.h"
+#include "parser.h"
 #include "platform.h"
 #include "runtime.h"
 #include "scopeinfo.h"
-#include "v8threads.h"
 #include "smart-pointer.h"
-#include "parser.h"
 #include "stub-cache.h"
+#include "v8threads.h"
 
 namespace v8 {
 namespace internal {
@@ -522,7 +521,7 @@
   RUNTIME_ASSERT(type ==  FUNCTION_TEMPLATE_INFO_TYPE ||
                  type ==  OBJECT_TEMPLATE_INFO_TYPE);
   RUNTIME_ASSERT(offset > 0);
-  if (type ==  FUNCTION_TEMPLATE_INFO_TYPE) {
+  if (type == FUNCTION_TEMPLATE_INFO_TYPE) {
     RUNTIME_ASSERT(offset < FunctionTemplateInfo::kSize);
   } else {
     RUNTIME_ASSERT(offset < ObjectTemplateInfo::kSize);
@@ -3252,8 +3251,8 @@
       } else {
         escaped_length += 3;
       }
-      // We don't allow strings that are longer than Smi range.
-      if (!Smi::IsValid(escaped_length)) {
+      // We don't allow strings that are longer than a maximal length.
+      if (escaped_length > String::kMaxLength) {
         Top::context()->mark_out_of_memory();
         return Failure::OutOfMemoryException();
       }
@@ -3584,6 +3583,36 @@
   return ConvertCase<unibrow::ToUppercase>(args, &to_upper_mapping);
 }
 
+static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
+  return unibrow::WhiteSpace::Is(c) || c == 0x200b;
+}
+
+static Object* Runtime_StringTrim(Arguments args) {
+  NoHandleAllocation ha;
+  ASSERT(args.length() == 3);
+
+  CONVERT_CHECKED(String, s, args[0]);
+  CONVERT_BOOLEAN_CHECKED(trimLeft, args[1]);
+  CONVERT_BOOLEAN_CHECKED(trimRight, args[2]);
+
+  s->TryFlattenIfNotFlat();
+  int length = s->length();
+
+  int left = 0;
+  if (trimLeft) {
+    while (left < length && IsTrimWhiteSpace(s->Get(left))) {
+      left++;
+    }
+  }
+
+  int right = length;
+  if (trimRight) {
+    while (right > left && IsTrimWhiteSpace(s->Get(right - 1))) {
+      right--;
+    }
+  }
+  return s->Slice(left, right);
+}
 
 bool Runtime::IsUpperCaseChar(uint16_t ch) {
   unibrow::uchar chars[unibrow::ToUppercase::kMaxWidth];
@@ -3804,10 +3833,6 @@
     } else if (elt->IsString()) {
       String* element = String::cast(elt);
       int element_length = element->length();
-      if (!Smi::IsValid(element_length + position)) {
-        Top::context()->mark_out_of_memory();
-        return Failure::OutOfMemoryException();
-      }
       position += element_length;
       if (ascii && !element->IsAsciiRepresentation()) {
         ascii = false;
@@ -3815,6 +3840,10 @@
     } else {
       return Top::Throw(Heap::illegal_argument_symbol());
     }
+    if (position > String::kMaxLength) {
+      Top::context()->mark_out_of_memory();
+      return Failure::OutOfMemoryException();
+    }
   }
 
   int length = position;
diff --git a/src/runtime.h b/src/runtime.h
index afa278b..279181d 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -152,6 +152,7 @@
   F(StringSlice, 3, 1) \
   F(StringReplaceRegExpWithString, 4, 1) \
   F(StringMatch, 3, 1) \
+  F(StringTrim, 3, 1) \
   \
   /* Numbers */ \
   F(NumberToRadixString, 2, 1) \
diff --git a/src/serialize.cc b/src/serialize.cc
index 94cd02a..6ff1d7f 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -38,6 +38,7 @@
 #include "serialize.h"
 #include "stub-cache.h"
 #include "v8threads.h"
+#include "top.h"
 
 namespace v8 {
 namespace internal {
@@ -612,12 +613,23 @@
   }
 
   // Top addresses
-  const char* top_address_format = "Top::get_address_from_id(%i)";
-  size_t top_format_length = strlen(top_address_format);
+  const char* top_address_format = "Top::%s";
+
+  const char* AddressNames[] = {
+#define C(name) #name,
+    TOP_ADDRESS_LIST(C)
+    TOP_ADDRESS_LIST_PROF(C)
+    NULL
+#undef C
+  };
+
+  size_t top_format_length = strlen(top_address_format) - 2;
   for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
-    Vector<char> name = Vector<char>::New(top_format_length + 1);
+    const char* address_name = AddressNames[i];
+    Vector<char> name =
+        Vector<char>::New(top_format_length + strlen(address_name) + 1);
     const char* chars = name.start();
-    OS::SNPrintF(name, top_address_format, i);
+    OS::SNPrintF(name, top_address_format, address_name);
     Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
   }
 
diff --git a/src/string.js b/src/string.js
index fbdc307..d2d6e96 100644
--- a/src/string.js
+++ b/src/string.js
@@ -680,6 +680,18 @@
   return %StringToUpperCase(ToString(this));
 }
 
+// ES5, 15.5.4.20
+function StringTrim() {
+  return %StringTrim(ToString(this), true, true);
+}
+
+function StringTrimLeft() {
+  return %StringTrim(ToString(this), true, false);
+}
+
+function StringTrimRight() {
+  return %StringTrim(ToString(this), false, true);
+}
 
 // ECMA-262, section 15.5.3.2
 function StringFromCharCode(code) {
@@ -855,6 +867,9 @@
     "toLocaleLowerCase", StringToLocaleLowerCase,
     "toUpperCase", StringToUpperCase,
     "toLocaleUpperCase", StringToLocaleUpperCase,
+    "trim", StringTrim,
+    "trimLeft", StringTrimLeft,
+    "trimRight", StringTrimRight,
     "link", StringLink,
     "anchor", StringAnchor,
     "fontcolor", StringFontcolor,
diff --git a/src/top.cc b/src/top.cc
index aa7788e..bb2dea4 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -54,6 +54,7 @@
   return top_addresses[id];
 }
 
+
 char* Top::Iterate(ObjectVisitor* v, char* thread_storage) {
   ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
   Iterate(v, thread);
@@ -493,11 +494,17 @@
 
 bool Top::MayNamedAccess(JSObject* receiver, Object* key, v8::AccessType type) {
   ASSERT(receiver->IsAccessCheckNeeded());
+
+  // The callers of this method are not expecting a GC.
+  AssertNoAllocation no_gc;
+
+  // Skip checks for hidden properties access.  Note, we do not
+  // require existence of a context in this case.
+  if (key == Heap::hidden_symbol()) return true;
+
   // Check for compatibility between the security tokens in the
   // current lexical context and the accessed object.
   ASSERT(Top::context());
-  // The callers of this method are not expecting a GC.
-  AssertNoAllocation no_gc;
 
   MayAccessDecision decision = MayAccessPreCheck(receiver, type);
   if (decision != UNKNOWN) return decision == YES;
diff --git a/src/usage-analyzer.cc b/src/usage-analyzer.cc
index 23a4d9f..74cf982 100644
--- a/src/usage-analyzer.cc
+++ b/src/usage-analyzer.cc
@@ -159,14 +159,25 @@
 }
 
 
-void UsageComputer::VisitLoopStatement(LoopStatement* node) {
-  if (node->init() != NULL)
-    Visit(node->init());
+void UsageComputer::VisitDoWhileStatement(DoWhileStatement* node) {
+  WeightScaler ws(this, 10.0);
+  Read(node->cond());
+  Visit(node->body());
+}
+
+
+void UsageComputer::VisitWhileStatement(WhileStatement* node) {
+  WeightScaler ws(this, 10.0);
+  Read(node->cond());
+  Visit(node->body());
+}
+
+
+void UsageComputer::VisitForStatement(ForStatement* node) {
+  if (node->init() != NULL) Visit(node->init());
   { WeightScaler ws(this, 10.0);  // executed in each iteration
-    if (node->cond() != NULL)
-      Read(node->cond());
-    if (node->next() != NULL)
-      Visit(node->next());
+    if (node->cond() != NULL) Read(node->cond());
+    if (node->next() != NULL) Visit(node->next());
     Visit(node->body());
   }
 }
@@ -180,7 +191,7 @@
 }
 
 
-void UsageComputer::VisitTryCatch(TryCatch* node) {
+void UsageComputer::VisitTryCatchStatement(TryCatchStatement* node) {
   Visit(node->try_block());
   { WeightScaler ws(this, 0.25);
     Write(node->catch_var());
@@ -189,7 +200,7 @@
 }
 
 
-void UsageComputer::VisitTryFinally(TryFinally* node) {
+void UsageComputer::VisitTryFinallyStatement(TryFinallyStatement* node) {
   Visit(node->try_block());
   Visit(node->finally_block());
 }
diff --git a/src/utils.h b/src/utils.h
index 275dbb5..f4a0598 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -36,7 +36,8 @@
 // ----------------------------------------------------------------------------
 // General helper functions
 
-// Returns true iff x is a power of 2.  Does not work for zero.
+// Returns true iff x is a power of 2 (or zero). Cannot be used with the
+// maximally negative value of the type T (the -1 overflows).
 template <typename T>
 static inline bool IsPowerOf2(T x) {
   return (x & (x - 1)) == 0;
diff --git a/src/v8-counters.h b/src/v8-counters.h
index e360b55..84f9ee4 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -52,8 +52,8 @@
   HT(variable_allocation, V8.VariableAllocation)                      \
   HT(ast_optimization, V8.ASTOptimization)                            \
   HT(code_generation, V8.CodeGeneration)                              \
-  HT(deferred_code_generation, V8.DeferredCodeGeneration)             \
-  HT(code_creation, V8.CodeCreation)
+  HT(deferred_code_generation, V8.DeferredCodeGeneration)
+
 
 // WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
 // Intellisense to crash.  It was broken into two macros (each of length 40
@@ -150,7 +150,9 @@
   SC(reloc_info_count, V8.RelocInfoCount)                           \
   SC(reloc_info_size, V8.RelocInfoSize)                             \
   SC(zone_segment_bytes, V8.ZoneSegmentBytes)                       \
-  SC(compute_entry_frame, V8.ComputeEntryFrame)
+  SC(compute_entry_frame, V8.ComputeEntryFrame)                     \
+  SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls)          \
+  SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs)
 
 
 // This file contains all the v8 counters that are in use.
diff --git a/src/v8.cc b/src/v8.cc
index 3f8e6cd..3c70ee9 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -178,11 +178,14 @@
   return Heap::IdleNotification();
 }
 
+static const uint32_t kRandomPositiveSmiMax = 0x3fffffff;
 
 Smi* V8::RandomPositiveSmi() {
   uint32_t random = Random();
-  ASSERT(IsPowerOf2(Smi::kMaxValue + 1));
-  return Smi::FromInt(random & Smi::kMaxValue);
+  ASSERT(static_cast<uint32_t>(Smi::kMaxValue) >= kRandomPositiveSmiMax);
+  // kRandomPositiveSmiMax must match the value being divided
+  // by in math.js.
+  return Smi::FromInt(random & kRandomPositiveSmiMax);
 }
 
 } }  // namespace v8::internal
diff --git a/src/version.cc b/src/version.cc
index fdbcb58..8e2cef6 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     1
 #define MINOR_VERSION     3
-#define BUILD_NUMBER      15
+#define BUILD_NUMBER      16
 #define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 899a17c..8f078ff 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -38,11 +38,6 @@
   return static_cast<Condition>(cc ^ 1);
 }
 
-// -----------------------------------------------------------------------------
-
-Immediate::Immediate(Smi* value) {
-  value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
-}
 
 // -----------------------------------------------------------------------------
 // Implementation of Assembler
@@ -199,7 +194,7 @@
     Memory::Address_at(pc_) += delta;
   } else if (IsCodeTarget(rmode_)) {
     Memory::int32_at(pc_) -= delta;
-  } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+  } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
     // Special handling of js_return when a break point is set (call
     // instruction has been inserted).
     Memory::int32_at(pc_ + 1) -= delta;  // relocate entry
@@ -267,45 +262,49 @@
 }
 
 
-bool RelocInfo::IsCallInstruction() {
+bool RelocInfo::IsPatchedReturnSequence() {
   // The recognized call sequence is:
   //  movq(kScratchRegister, immediate64); call(kScratchRegister);
   // It only needs to be distinguished from a return sequence
   //  movq(rsp, rbp); pop(rbp); ret(n); int3 *6
   // The 11th byte is int3 (0xCC) in the return sequence and
   // REX.WB (0x48+register bit) for the call sequence.
+#ifdef ENABLE_DEBUGGER_SUPPORT
   return pc_[10] != 0xCC;
+#else
+  return false;
+#endif
 }
 
 
 Address RelocInfo::call_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return Memory::Address_at(
       pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
 }
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
       target;
 }
 
 
 Object* RelocInfo::call_object() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return *call_object_address();
 }
 
 
 void RelocInfo::set_call_object(Object* target) {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   *call_object_address() = target;
 }
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT(IsCallInstruction());
+  ASSERT(IsPatchedReturnSequence());
   return reinterpret_cast<Object**>(
       pc_ + Assembler::kPatchReturnSequenceAddressOffset);
 }
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index cf79a43..3f3d34e 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -708,7 +708,7 @@
 void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
-  ASSERT(is_uint6(shift_amount.value_));  // illegal shift count
+  ASSERT(is_uint5(shift_amount.value_));  // illegal shift count
   if (shift_amount.value_ == 1) {
     emit_optional_rex_32(dst);
     emit(0xD1);
@@ -794,6 +794,12 @@
 }
 
 
+void Assembler::clc() {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit(0xF8);
+}
+
 void Assembler::cdq() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -802,6 +808,11 @@
 
 
 void Assembler::cmovq(Condition cc, Register dst, Register src) {
+  if (cc == always) {
+    movq(dst, src);
+  } else if (cc == never) {
+    return;
+  }
   // No need to check CpuInfo for CMOV support, it's a required part of the
   // 64-bit architecture.
   ASSERT(cc >= 0);  // Use mov for unconditional moves.
@@ -816,6 +827,11 @@
 
 
 void Assembler::cmovq(Condition cc, Register dst, const Operand& src) {
+  if (cc == always) {
+    movq(dst, src);
+  } else if (cc == never) {
+    return;
+  }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -828,6 +844,11 @@
 
 
 void Assembler::cmovl(Condition cc, Register dst, Register src) {
+  if (cc == always) {
+    movl(dst, src);
+  } else if (cc == never) {
+    return;
+  }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -840,6 +861,11 @@
 
 
 void Assembler::cmovl(Condition cc, Register dst, const Operand& src) {
+  if (cc == always) {
+    movl(dst, src);
+  } else if (cc == never) {
+    return;
+  }
   ASSERT(cc >= 0);
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -913,6 +939,27 @@
 }
 
 
+void Assembler::decb(Register dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (dst.code() > 3) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+    emit_rex_32(dst);
+  }
+  emit(0xFE);
+  emit_modrm(0x1, dst);
+}
+
+
+void Assembler::decb(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  emit_optional_rex_32(dst);
+  emit(0xFE);
+  emit_operand(1, dst);
+}
+
+
 void Assembler::enter(Immediate size) {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1037,6 +1084,12 @@
 
 
 void Assembler::j(Condition cc, Label* L) {
+  if (cc == always) {
+    jmp(L);
+    return;
+  } else if (cc == never) {
+    return;
+  }
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   ASSERT(is_uint4(cc));
@@ -1373,10 +1426,7 @@
     // There is no possible reason to store a heap pointer without relocation
     // info, so it must be a smi.
     ASSERT(value->IsSmi());
-    // Smis never have more than 32 significant bits, but they might
-    // have garbage in the high bits.
-    movq(dst,
-         Immediate(static_cast<int32_t>(reinterpret_cast<intptr_t>(*value))));
+    movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
   } else {
     EnsureSpace ensure_space(this);
     last_pc_ = pc_;
@@ -1650,22 +1700,6 @@
 }
 
 
-void Assembler::rcl(Register dst, uint8_t imm8) {
-  EnsureSpace ensure_space(this);
-  last_pc_ = pc_;
-  ASSERT(is_uint6(imm8));  // illegal shift count
-  if (imm8 == 1) {
-    emit_rex_64(dst);
-    emit(0xD1);
-    emit_modrm(0x2, dst);
-  } else {
-    emit_rex_64(dst);
-    emit(0xC1);
-    emit_modrm(0x2, dst);
-    emit(imm8);
-  }
-}
-
 void Assembler::rdtsc() {
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
@@ -1689,6 +1723,10 @@
 
 
 void Assembler::setcc(Condition cc, Register reg) {
+  if (cc > last_condition) {
+    movb(reg, Immediate(cc == always ? 1 : 0));
+    return;
+  }
   EnsureSpace ensure_space(this);
   last_pc_ = pc_;
   ASSERT(is_uint4(cc));
@@ -1750,6 +1788,18 @@
 }
 
 
+void Assembler::testb(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  last_pc_ = pc_;
+  if (dst.code() > 3 || src.code() > 3) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+    emit_rex_32(dst, src);
+  }
+  emit(0x84);
+  emit_modrm(dst, src);
+}
+
+
 void Assembler::testb(Register reg, Immediate mask) {
   ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
   EnsureSpace ensure_space(this);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index e17a55d..7e09a41 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -222,13 +222,18 @@
   less_equal    = 14,
   greater       = 15,
 
+  // Fake conditions that are handled by the
+  // opcodes using them.
+  always        = 16,
+  never         = 17,
   // aliases
   carry         = below,
   not_carry     = above_equal,
   zero          = equal,
   not_zero      = not_equal,
   sign          = negative,
-  not_sign      = positive
+  not_sign      = positive,
+  last_condition = greater
 };
 
 
@@ -284,7 +289,6 @@
 class Immediate BASE_EMBEDDED {
  public:
   explicit Immediate(int32_t value) : value_(value) {}
-  inline explicit Immediate(Smi* value);
 
  private:
   int32_t value_;
@@ -372,6 +376,11 @@
   static void Probe();
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(Feature f) {
+    if (f == SSE2 && !FLAG_enable_sse2) return false;
+    if (f == SSE3 && !FLAG_enable_sse3) return false;
+    if (f == CMOV && !FLAG_enable_cmov) return false;
+    if (f == RDTSC && !FLAG_enable_rdtsc) return false;
+    if (f == SAHF && !FLAG_enable_sahf) return false;
     return (supported_ & (V8_UINT64_C(1) << f)) != 0;
   }
   // Check whether a feature is currently enabled.
@@ -699,10 +708,17 @@
     immediate_arithmetic_op_32(0x4, dst, src);
   }
 
+  void andl(Register dst, Register src) {
+    arithmetic_op_32(0x23, dst, src);
+  }
+
+
   void decq(Register dst);
   void decq(const Operand& dst);
   void decl(Register dst);
   void decl(const Operand& dst);
+  void decb(Register dst);
+  void decb(const Operand& dst);
 
   // Sign-extends rax into rdx:rax.
   void cqo();
@@ -758,12 +774,34 @@
     immediate_arithmetic_op(0x1, dst, src);
   }
 
+  void orl(Register dst, Immediate src) {
+    immediate_arithmetic_op_32(0x1, dst, src);
+  }
+
   void or_(const Operand& dst, Immediate src) {
     immediate_arithmetic_op(0x1, dst, src);
   }
 
+  void orl(const Operand& dst, Immediate src) {
+    immediate_arithmetic_op_32(0x1, dst, src);
+  }
 
-  void rcl(Register dst, uint8_t imm8);
+
+  void rcl(Register dst, Immediate imm8) {
+    shift(dst, imm8, 0x2);
+  }
+
+  void rol(Register dst, Immediate imm8) {
+    shift(dst, imm8, 0x0);
+  }
+
+  void rcr(Register dst, Immediate imm8) {
+    shift(dst, imm8, 0x3);
+  }
+
+  void ror(Register dst, Immediate imm8) {
+    shift(dst, imm8, 0x1);
+  }
 
   // Shifts dst:src left by cl bits, affecting only dst.
   void shld(Register dst, Register src);
@@ -864,6 +902,7 @@
     immediate_arithmetic_op_8(0x5, dst, src);
   }
 
+  void testb(Register dst, Register src);
   void testb(Register reg, Immediate mask);
   void testb(const Operand& op, Immediate mask);
   void testl(Register dst, Register src);
@@ -902,6 +941,7 @@
   void bts(const Operand& dst, Register src);
 
   // Miscellaneous
+  void clc();
   void cpuid();
   void hlt();
   void int3();
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index 35eddc4..01992ce 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -53,7 +53,7 @@
   __ movq(rbp, rsp);
 
   // Store the arguments adaptor context sentinel.
-  __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
 
   // Push the function on the stack.
   __ push(rdi);
@@ -75,14 +75,9 @@
   __ pop(rbp);
 
   // Remove caller arguments from the stack.
-  // rbx holds a Smi, so we convery to dword offset by multiplying by 4.
-  // TODO(smi): Find a way to abstract indexing by a smi.
-  ASSERT_EQ(kSmiTagSize, 1 && kSmiTag == 0);
-  ASSERT_EQ(kPointerSize, (1 << kSmiTagSize) * 4);
-  // TODO(smi): Find way to abstract indexing by a smi.
   __ pop(rcx);
-  // 1 * kPointerSize is offset of receiver.
-  __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
+  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+  __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
   __ push(rcx);
 }
 
@@ -342,7 +337,7 @@
     // Because runtime functions always remove the receiver from the stack, we
     // have to fake one to avoid underflowing the stack.
     __ push(rax);
-    __ push(Immediate(Smi::FromInt(0)));
+    __ Push(Smi::FromInt(0));
 
     // Do call to runtime routine.
     __ CallRuntime(Runtime::kStackGuard, 1);
@@ -434,7 +429,7 @@
 
   // Update the index on the stack and in register rax.
   __ movq(rax, Operand(rbp, kIndexOffset));
-  __ addq(rax, Immediate(Smi::FromInt(1)));
+  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
   __ movq(Operand(rbp, kIndexOffset), rax);
 
   __ bind(&entry);
@@ -507,7 +502,7 @@
   __ Move(FieldOperand(result, JSArray::kPropertiesOffset),
           Factory::empty_fixed_array());
   // Field JSArray::kElementsOffset is initialized later.
-  __ movq(FieldOperand(result, JSArray::kLengthOffset), Immediate(0));
+  __ Move(FieldOperand(result, JSArray::kLengthOffset), Smi::FromInt(0));
 
   // If no storage is requested for the elements array just set the empty
   // fixed array.
@@ -718,14 +713,12 @@
   __ cmpq(rax, Immediate(1));
   __ j(not_equal, &argc_two_or_more);
   __ movq(rdx, Operand(rsp, kPointerSize));  // Get the argument from the stack.
-  Condition not_positive_smi = __ CheckNotPositiveSmi(rdx);
-  __ j(not_positive_smi, call_generic_code);
+  __ JumpIfNotPositiveSmi(rdx, call_generic_code);
 
   // Handle construction of an empty array of a certain size. Bail out if size
   // is to large to actually allocate an elements array.
-  __ JumpIfSmiGreaterEqualsConstant(rdx,
-                                    JSObject::kInitialMaxFastElementArray,
-                                    call_generic_code);
+  __ SmiCompare(rdx, Smi::FromInt(JSObject::kInitialMaxFastElementArray));
+  __ j(greater_equal, call_generic_code);
 
   // rax: argc
   // rdx: array_size (smi)
@@ -825,10 +818,10 @@
     __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi.
     ASSERT(kSmiTag == 0);
-    Condition not_smi = __ CheckNotSmi(rbx);
-    __ Assert(not_smi, "Unexpected initial map for Array function");
+    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+    __ Check(not_smi, "Unexpected initial map for Array function");
     __ CmpObjectType(rbx, MAP_TYPE, rcx);
-    __ Assert(equal, "Unexpected initial map for Array function");
+    __ Check(equal, "Unexpected initial map for Array function");
   }
 
   // Run the native code for the Array function called as a normal function.
@@ -857,15 +850,15 @@
     // does always have a map.
     GenerateLoadArrayFunction(masm, rbx);
     __ cmpq(rdi, rbx);
-    __ Assert(equal, "Unexpected Array function");
+    __ Check(equal, "Unexpected Array function");
     // Initial map for the builtin Array function should be a map.
     __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
     // Will both indicate a NULL and a Smi.
     ASSERT(kSmiTag == 0);
-    Condition not_smi = __ CheckNotSmi(rbx);
-    __ Assert(not_smi, "Unexpected initial map for Array function");
+    Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+    __ Check(not_smi, "Unexpected initial map for Array function");
     __ CmpObjectType(rbx, MAP_TYPE, rcx);
-    __ Assert(equal, "Unexpected initial map for Array function");
+    __ Check(equal, "Unexpected initial map for Array function");
   }
 
   // Run the native code for the Array function called as constructor.
@@ -902,7 +895,6 @@
   // edi: called object
   // eax: number of arguments
   __ bind(&non_function_call);
-
   // Set expected number of arguments to zero (not changing eax).
   __ movq(rbx, Immediate(0));
   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
@@ -1143,11 +1135,9 @@
   __ LeaveConstructFrame();
 
   // Remove caller arguments from the stack and return.
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-  // TODO(smi): Find a way to abstract indexing by a smi.
   __ pop(rcx);
-  // 1 * kPointerSize is offset of receiver.
-  __ lea(rsp, Operand(rsp, rbx, times_half_pointer_size, 1 * kPointerSize));
+  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+  __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
   __ push(rcx);
   __ IncrementCounter(&Counters::constructed_objects, 1);
   __ ret(0);
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 8e6dbef..d72257e 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -54,6 +54,7 @@
   }
 }
 
+
 void DeferredCode::RestoreRegisters() {
   // Restore registers in reverse order due to the stack.
   for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
@@ -237,8 +238,8 @@
   // Test if operands are smi or number objects (fp). Requirements:
   // operand_1 in rax, operand_2 in rdx; falls through on float or smi
   // operands, jumps to the non_float label otherwise.
-  static void CheckFloatOperands(MacroAssembler* masm,
-                                 Label* non_float);
+  static void CheckNumberOperands(MacroAssembler* masm,
+                                  Label* non_float);
 
   // Allocate a heap number in new space with undefined value.
   // Returns tagged pointer in result, or jumps to need_gc if new space is full.
@@ -278,7 +279,7 @@
   __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
   frame_->EmitPush(kScratchRegister);
   frame_->EmitPush(rsi);  // The context is the second argument.
-  frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
+  frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
   Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
@@ -767,8 +768,8 @@
     // adaptor frame below it.
     Label invoke, adapted;
     __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-    __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
-    __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+                  Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
     __ j(equal, &adapted);
 
     // No arguments adaptor frame. Copy fixed number of arguments.
@@ -793,12 +794,12 @@
     // have to worry about getting rid of the elements from the virtual
     // frame.
     Label loop;
-    __ bind(&loop);
     __ testl(rcx, rcx);
     __ j(zero, &invoke);
+    __ bind(&loop);
     __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize));
     __ decl(rcx);
-    __ jmp(&loop);
+    __ j(not_zero, &loop);
 
     // Invoke the function. The virtual frame knows about the receiver
     // so make sure to forget that explicitly.
@@ -933,7 +934,7 @@
     // Declaration nodes are always introduced in one of two modes.
     ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
     PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
-    frame_->EmitPush(Immediate(Smi::FromInt(attr)));
+    frame_->EmitPush(Smi::FromInt(attr));
     // Push initial value, if any.
     // Note: For variables we must not push an initial value (such as
     // 'undefined') because we may have a (legal) redeclaration and we
@@ -943,7 +944,7 @@
     } else if (node->fun() != NULL) {
       Load(node->fun());
     } else {
-      frame_->EmitPush(Immediate(Smi::FromInt(0)));  // no initial value!
+      frame_->EmitPush(Smi::FromInt(0));  // no initial value!
     }
     Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
     // Ignore the return value (declarations are statements).
@@ -1291,288 +1292,335 @@
 }
 
 
-void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
   ASSERT(!in_spilled_code());
-  Comment cmnt(masm_, "[ LoopStatement");
+  Comment cmnt(masm_, "[ DoWhileStatement");
   CodeForStatementPosition(node);
   node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  JumpTarget body(JumpTarget::BIDIRECTIONAL);
+  IncrementLoopNesting();
 
-  // Simple condition analysis.  ALWAYS_TRUE and ALWAYS_FALSE represent a
-  // known result for the test expression, with no side effects.
-  enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
-  if (node->cond() == NULL) {
-    ASSERT(node->type() == LoopStatement::FOR_LOOP);
-    info = ALWAYS_TRUE;
-  } else {
-    Literal* lit = node->cond()->AsLiteral();
-    if (lit != NULL) {
-      if (lit->IsTrue()) {
-        info = ALWAYS_TRUE;
-      } else if (lit->IsFalse()) {
-        info = ALWAYS_FALSE;
-      }
-    }
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  // Label the top of the loop for the backward jump if necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // Use the continue target.
+      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->Bind();
+      break;
+    case ALWAYS_FALSE:
+      // No need to label it.
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      break;
+    case DONT_KNOW:
+      // Continue is the test, so use the backward body target.
+      node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      body.Bind();
+      break;
   }
 
-  switch (node->type()) {
-    case LoopStatement::DO_LOOP: {
-      JumpTarget body(JumpTarget::BIDIRECTIONAL);
-      IncrementLoopNesting();
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
 
-      // Label the top of the loop for the backward jump if necessary.
-      if (info == ALWAYS_TRUE) {
-        // Use the continue target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else if (info == ALWAYS_FALSE) {
-        // No need to label it.
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-      } else {
-        // Continue is the test, so use the backward body target.
-        ASSERT(info == DONT_KNOW);
-        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        body.Bind();
+  // Compile the test.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // If control flow can fall off the end of the body, jump back
+      // to the top and bind the break target at the exit.
+      if (has_valid_frame()) {
+        node->continue_target()->Jump();
       }
-
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
-
-      // Compile the test.
-      if (info == ALWAYS_TRUE) {
-        // If control flow can fall off the end of the body, jump back
-        // to the top and bind the break target at the exit.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-
-      } else if (info == ALWAYS_FALSE) {
-        // We may have had continues or breaks in the body.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-
-      } else {
-        ASSERT(info == DONT_KNOW);
-        // We have to compile the test expression if it can be reached by
-        // control flow falling out of the body or via continue.
-        if (node->continue_target()->is_linked()) {
-          node->continue_target()->Bind();
-        }
-        if (has_valid_frame()) {
-          ControlDestination dest(&body, node->break_target(), false);
-          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-        }
-        if (node->break_target()->is_linked()) {
-          node->break_target()->Bind();
-        }
-      }
-      break;
-    }
-
-    case LoopStatement::WHILE_LOOP: {
-      // Do not duplicate conditions that may have function literal
-      // subexpressions.  This can cause us to compile the function
-      // literal twice.
-      bool test_at_bottom = !node->may_have_function_literal();
-
-      IncrementLoopNesting();
-
-      // If the condition is always false and has no side effects, we
-      // do not need to compile anything.
-      if (info == ALWAYS_FALSE) break;
-
-      JumpTarget body;
-      if (test_at_bottom) {
-        body.set_direction(JumpTarget::BIDIRECTIONAL);
-      }
-
-      // Based on the condition analysis, compile the test as necessary.
-      if (info == ALWAYS_TRUE) {
-        // We will not compile the test expression.  Label the top of
-        // the loop with the continue target.
-        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-        node->continue_target()->Bind();
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          // Continue is the test at the bottom, no need to label the
-          // test at the top.  The body is a backward target.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        } else {
-          // Label the test at the top as the continue target.  The
-          // body is a forward-only target.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        }
-        // Compile the test with the body as the true target and
-        // preferred fall-through and with the break target as the
-        // false target.
-        ControlDestination dest(&body, node->break_target(), true);
-        LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-
-        if (dest.false_was_fall_through()) {
-          // If we got the break target as fall-through, the test may
-          // have been unconditionally false (if there are no jumps to
-          // the body).
-          if (!body.is_linked()) break;
-
-          // Otherwise, jump around the body on the fall through and
-          // then bind the body target.
-          node->break_target()->Unuse();
-          node->break_target()->Jump();
-          body.Bind();
-        }
-      }
-
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
-
-      // Based on the condition analysis, compile the backward jump as
-      // necessary.
-      if (info == ALWAYS_TRUE) {
-        // The loop body has been labeled with the continue target.
-        if (has_valid_frame()) {
-          node->continue_target()->Jump();
-        }
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          // If we have chosen to recompile the test at the bottom,
-          // then it is the continue target.
-          if (node->continue_target()->is_linked()) {
-            node->continue_target()->Bind();
-          }
-          if (has_valid_frame()) {
-            // The break target is the fall-through (body is a backward
-            // jump from here and thus an invalid fall-through).
-            ControlDestination dest(&body, node->break_target(), false);
-            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-          }
-        } else {
-          // If we have chosen not to recompile the test at the
-          // bottom, jump back to the one at the top.
-          if (has_valid_frame()) {
-            node->continue_target()->Jump();
-          }
-        }
-      }
-
-      // The break target may be already bound (by the condition), or
-      // there may not be a valid frame.  Bind it only if needed.
       if (node->break_target()->is_linked()) {
         node->break_target()->Bind();
       }
       break;
-    }
-
-    case LoopStatement::FOR_LOOP: {
-      // Do not duplicate conditions that may have function literal
-      // subexpressions.  This can cause us to compile the function
-      // literal twice.
-      bool test_at_bottom = !node->may_have_function_literal();
-
-      // Compile the init expression if present.
-      if (node->init() != NULL) {
-        Visit(node->init());
+    case ALWAYS_FALSE:
+      // We may have had continues or breaks in the body.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
       }
-
-      IncrementLoopNesting();
-
-      // If the condition is always false and has no side effects, we
-      // do not need to compile anything else.
-      if (info == ALWAYS_FALSE) break;
-
-      // Target for backward edge if no test at the bottom, otherwise
-      // unused.
-      JumpTarget loop(JumpTarget::BIDIRECTIONAL);
-
-      // Target for backward edge if there is a test at the bottom,
-      // otherwise used as target for test at the top.
-      JumpTarget body;
-      if (test_at_bottom) {
-        body.set_direction(JumpTarget::BIDIRECTIONAL);
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
       }
-
-      // Based on the condition analysis, compile the test as necessary.
-      if (info == ALWAYS_TRUE) {
-        // We will not compile the test expression.  Label the top of
-        // the loop.
-        if (node->next() == NULL) {
-          // Use the continue target if there is no update expression.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        } else {
-          // Otherwise use the backward loop target.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-          loop.Bind();
-        }
-      } else {
-        ASSERT(info == DONT_KNOW);
-        if (test_at_bottom) {
-          // Continue is either the update expression or the test at
-          // the bottom, no need to label the test at the top.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-        } else if (node->next() == NULL) {
-          // We are not recompiling the test at the bottom and there
-          // is no update expression.
-          node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
-          node->continue_target()->Bind();
-        } else {
-          // We are not recompiling the test at the bottom and there
-          // is an update expression.
-          node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
-          loop.Bind();
-        }
-
-        // Compile the test with the body as the true target and
-        // preferred fall-through and with the break target as the
-        // false target.
-        ControlDestination dest(&body, node->break_target(), true);
+      break;
+    case DONT_KNOW:
+      // We have to compile the test expression if it can be reached by
+      // control flow falling out of the body or via continue.
+      if (node->continue_target()->is_linked()) {
+        node->continue_target()->Bind();
+      }
+      if (has_valid_frame()) {
+        ControlDestination dest(&body, node->break_target(), false);
         LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-
-        if (dest.false_was_fall_through()) {
-          // If we got the break target as fall-through, the test may
-          // have been unconditionally false (if there are no jumps to
-          // the body).
-          if (!body.is_linked()) break;
-
-          // Otherwise, jump around the body on the fall through and
-          // then bind the body target.
-          node->break_target()->Unuse();
-          node->break_target()->Jump();
-          body.Bind();
-        }
       }
+      if (node->break_target()->is_linked()) {
+        node->break_target()->Bind();
+      }
+      break;
+  }
 
-      CheckStack();  // TODO(1222600): ignore if body contains calls.
-      Visit(node->body());
+  DecrementLoopNesting();
+  node->continue_target()->Unuse();
+  node->break_target()->Unuse();
+}
 
-      // If there is an update expression, compile it if necessary.
-      if (node->next() != NULL) {
+
+void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ WhileStatement");
+  CodeForStatementPosition(node);
+
+  // If the condition is always false and has no side effects, we do not
+  // need to compile anything.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  // Do not duplicate conditions that may have function literal
+  // subexpressions.  This can cause us to compile the function literal
+  // twice.
+  bool test_at_bottom = !node->may_have_function_literal();
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  IncrementLoopNesting();
+  JumpTarget body;
+  if (test_at_bottom) {
+    body.set_direction(JumpTarget::BIDIRECTIONAL);
+  }
+
+  // Based on the condition analysis, compile the test as necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // We will not compile the test expression.  Label the top of the
+      // loop with the continue target.
+      node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+      node->continue_target()->Bind();
+      break;
+    case DONT_KNOW: {
+      if (test_at_bottom) {
+        // Continue is the test at the bottom, no need to label the test
+        // at the top.  The body is a backward target.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else {
+        // Label the test at the top as the continue target.  The body
+        // is a forward-only target.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      }
+      // Compile the test with the body as the true target and preferred
+      // fall-through and with the break target as the false target.
+      ControlDestination dest(&body, node->break_target(), true);
+      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+      if (dest.false_was_fall_through()) {
+        // If we got the break target as fall-through, the test may have
+        // been unconditionally false (if there are no jumps to the
+        // body).
+        if (!body.is_linked()) {
+          DecrementLoopNesting();
+          return;
+        }
+
+        // Otherwise, jump around the body on the fall through and then
+        // bind the body target.
+        node->break_target()->Unuse();
+        node->break_target()->Jump();
+        body.Bind();
+      }
+      break;
+    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
+
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
+
+  // Based on the condition analysis, compile the backward jump as
+  // necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // The loop body has been labeled with the continue target.
+      if (has_valid_frame()) {
+        node->continue_target()->Jump();
+      }
+      break;
+    case DONT_KNOW:
+      if (test_at_bottom) {
+        // If we have chosen to recompile the test at the bottom,
+        // then it is the continue target.
         if (node->continue_target()->is_linked()) {
           node->continue_target()->Bind();
         }
-
-        // Control can reach the update by falling out of the body or
-        // by a continue.
         if (has_valid_frame()) {
-          // Record the source position of the statement as this code
-          // which is after the code for the body actually belongs to
-          // the loop statement and not the body.
-          CodeForStatementPosition(node);
-          Visit(node->next());
+          // The break target is the fall-through (body is a backward
+          // jump from here and thus an invalid fall-through).
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+      } else {
+        // If we have chosen not to recompile the test at the
+        // bottom, jump back to the one at the top.
+        if (has_valid_frame()) {
+          node->continue_target()->Jump();
         }
       }
+      break;
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
 
-      // Based on the condition analysis, compile the backward jump as
-      // necessary.
-      if (info == ALWAYS_TRUE) {
+  // The break target may be already bound (by the condition), or there
+  // may not be a valid frame.  Bind it only if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
+  DecrementLoopNesting();
+}
+
+
+void CodeGenerator::VisitForStatement(ForStatement* node) {
+  ASSERT(!in_spilled_code());
+  Comment cmnt(masm_, "[ ForStatement");
+  CodeForStatementPosition(node);
+
+  // Compile the init expression if present.
+  if (node->init() != NULL) {
+    Visit(node->init());
+  }
+
+  // If the condition is always false and has no side effects, we do not
+  // need to compile anything else.
+  ConditionAnalysis info = AnalyzeCondition(node->cond());
+  if (info == ALWAYS_FALSE) return;
+
+  // Do not duplicate conditions that may have function literal
+  // subexpressions.  This can cause us to compile the function literal
+  // twice.
+  bool test_at_bottom = !node->may_have_function_literal();
+  node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+  IncrementLoopNesting();
+
+  // Target for backward edge if no test at the bottom, otherwise
+  // unused.
+  JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+  // Target for backward edge if there is a test at the bottom,
+  // otherwise used as target for test at the top.
+  JumpTarget body;
+  if (test_at_bottom) {
+    body.set_direction(JumpTarget::BIDIRECTIONAL);
+  }
+
+  // Based on the condition analysis, compile the test as necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      // We will not compile the test expression.  Label the top of the
+      // loop.
+      if (node->next() == NULL) {
+        // Use the continue target if there is no update expression.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        // Otherwise use the backward loop target.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        loop.Bind();
+      }
+      break;
+    case DONT_KNOW: {
+      if (test_at_bottom) {
+        // Continue is either the update expression or the test at the
+        // bottom, no need to label the test at the top.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+      } else if (node->next() == NULL) {
+        // We are not recompiling the test at the bottom and there is no
+        // update expression.
+        node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+        node->continue_target()->Bind();
+      } else {
+        // We are not recompiling the test at the bottom and there is an
+        // update expression.
+        node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+        loop.Bind();
+      }
+
+      // Compile the test with the body as the true target and preferred
+      // fall-through and with the break target as the false target.
+      ControlDestination dest(&body, node->break_target(), true);
+      LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+
+      if (dest.false_was_fall_through()) {
+        // If we got the break target as fall-through, the test may have
+        // been unconditionally false (if there are no jumps to the
+        // body).
+        if (!body.is_linked()) {
+          DecrementLoopNesting();
+          return;
+        }
+
+        // Otherwise, jump around the body on the fall through and then
+        // bind the body target.
+        node->break_target()->Unuse();
+        node->break_target()->Jump();
+        body.Bind();
+      }
+      break;
+    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
+  }
+
+  CheckStack();  // TODO(1222600): ignore if body contains calls.
+  Visit(node->body());
+
+  // If there is an update expression, compile it if necessary.
+  if (node->next() != NULL) {
+    if (node->continue_target()->is_linked()) {
+      node->continue_target()->Bind();
+    }
+
+    // Control can reach the update by falling out of the body or by a
+    // continue.
+    if (has_valid_frame()) {
+      // Record the source position of the statement as this code which
+      // is after the code for the body actually belongs to the loop
+      // statement and not the body.
+      CodeForStatementPosition(node);
+      Visit(node->next());
+    }
+  }
+
+  // Based on the condition analysis, compile the backward jump as
+  // necessary.
+  switch (info) {
+    case ALWAYS_TRUE:
+      if (has_valid_frame()) {
+        if (node->next() == NULL) {
+          node->continue_target()->Jump();
+        } else {
+          loop.Jump();
+        }
+      }
+      break;
+    case DONT_KNOW:
+      if (test_at_bottom) {
+        if (node->continue_target()->is_linked()) {
+          // We can have dangling jumps to the continue target if there
+          // was no update expression.
+          node->continue_target()->Bind();
+        }
+        // Control can reach the test at the bottom by falling out of
+        // the body, by a continue in the body, or from the update
+        // expression.
+        if (has_valid_frame()) {
+          // The break target is the fall-through (body is a backward
+          // jump from here).
+          ControlDestination dest(&body, node->break_target(), false);
+          LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
+        }
+      } else {
+        // Otherwise, jump back to the test at the top.
         if (has_valid_frame()) {
           if (node->next() == NULL) {
             node->continue_target()->Jump();
@@ -1580,47 +1628,19 @@
             loop.Jump();
           }
         }
-      } else {
-        ASSERT(info == DONT_KNOW);  // ALWAYS_FALSE cannot reach here.
-        if (test_at_bottom) {
-          if (node->continue_target()->is_linked()) {
-            // We can have dangling jumps to the continue target if
-            // there was no update expression.
-            node->continue_target()->Bind();
-          }
-          // Control can reach the test at the bottom by falling out
-          // of the body, by a continue in the body, or from the
-          // update expression.
-          if (has_valid_frame()) {
-            // The break target is the fall-through (body is a
-            // backward jump from here).
-            ControlDestination dest(&body, node->break_target(), false);
-            LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true);
-          }
-        } else {
-          // Otherwise, jump back to the test at the top.
-          if (has_valid_frame()) {
-            if (node->next() == NULL) {
-              node->continue_target()->Jump();
-            } else {
-              loop.Jump();
-            }
-          }
-        }
-      }
-
-      // The break target may be already bound (by the condition), or
-      // there may not be a valid frame.  Bind it only if needed.
-      if (node->break_target()->is_linked()) {
-        node->break_target()->Bind();
       }
       break;
-    }
+    case ALWAYS_FALSE:
+      UNREACHABLE();
+      break;
   }
 
+  // The break target may be already bound (by the condition), or there
+  // may not be a valid frame.  Bind it only if needed.
+  if (node->break_target()->is_linked()) {
+    node->break_target()->Bind();
+  }
   DecrementLoopNesting();
-  node->continue_target()->Unuse();
-  node->break_target()->Unuse();
 }
 
 
@@ -1700,19 +1720,19 @@
   __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
   __ Integer32ToSmi(rax, rax);
   frame_->EmitPush(rax);  // <- slot 1
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
   entry.Jump();
 
   fixed_array.Bind();
   // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 3
+  frame_->EmitPush(Smi::FromInt(0));  // <- slot 3
   frame_->EmitPush(rax);  // <- slot 2
 
   // Push the length of the array and the initial index onto the stack.
   __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
   __ Integer32ToSmi(rax, rax);
   frame_->EmitPush(rax);  // <- slot 1
-  frame_->EmitPush(Immediate(Smi::FromInt(0)));  // <- slot 0
+  frame_->EmitPush(Smi::FromInt(0));  // <- slot 0
 
   // Condition.
   entry.Bind();
@@ -1722,8 +1742,8 @@
   node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
 
   __ movq(rax, frame_->ElementAt(0));  // load the current count
-  __ cmpl(rax, frame_->ElementAt(1));  // compare to the array length
-  node->break_target()->Branch(above_equal);
+  __ SmiCompare(frame_->ElementAt(1), rax);  // compare to the array length
+  node->break_target()->Branch(below_equal);
 
   // Get the i'th entry of the array.
   __ movq(rdx, frame_->ElementAt(2));
@@ -1796,7 +1816,7 @@
   node->continue_target()->Bind();
   frame_->SpillAll();
   frame_->EmitPop(rax);
-  __ addq(rax, Immediate(Smi::FromInt(1)));
+  __ SmiAddConstant(rax, rax, Smi::FromInt(1));
   frame_->EmitPush(rax);
   entry.Jump();
 
@@ -1812,10 +1832,10 @@
   node->break_target()->Unuse();
 }
 
-void CodeGenerator::VisitTryCatch(TryCatch* node) {
+void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
   ASSERT(!in_spilled_code());
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryCatch");
+  Comment cmnt(masm_, "[ TryCatchStatement");
   CodeForStatementPosition(node);
 
   JumpTarget try_block;
@@ -1951,10 +1971,10 @@
 }
 
 
-void CodeGenerator::VisitTryFinally(TryFinally* node) {
+void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
   ASSERT(!in_spilled_code());
   VirtualFrame::SpilledScope spilled_scope;
-  Comment cmnt(masm_, "[ TryFinally");
+  Comment cmnt(masm_, "[ TryFinallyStatement");
   CodeForStatementPosition(node);
 
   // State: Used to keep track of reason for entering the finally
@@ -1969,7 +1989,7 @@
 
   frame_->EmitPush(rax);
   // In case of thrown exceptions, this is where we continue.
-  __ movq(rcx, Immediate(Smi::FromInt(THROWING)));
+  __ Move(rcx, Smi::FromInt(THROWING));
   finally_block.Jump();
 
   // --- Try block ---
@@ -2028,7 +2048,7 @@
     // Fake a top of stack value (unneeded when FALLING) and set the
     // state in ecx, then jump around the unlink blocks if any.
     frame_->EmitPush(Heap::kUndefinedValueRootIndex);
-    __ movq(rcx, Immediate(Smi::FromInt(FALLING)));
+    __ Move(rcx, Smi::FromInt(FALLING));
     if (nof_unlinks > 0) {
       finally_block.Jump();
     }
@@ -2074,7 +2094,7 @@
         // Fake TOS for targets that shadowed breaks and continues.
         frame_->EmitPush(Heap::kUndefinedValueRootIndex);
       }
-      __ movq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+      __ Move(rcx, Smi::FromInt(JUMPING + i));
       if (--nof_unlinks > 0) {
         // If this is not the last unlink block, jump around the next.
         finally_block.Jump();
@@ -2105,7 +2125,7 @@
   for (int i = 0; i < shadows.length(); i++) {
     if (has_valid_frame() && shadows[i]->is_bound()) {
       BreakTarget* original = shadows[i]->other_target();
-      __ cmpq(rcx, Immediate(Smi::FromInt(JUMPING + i)));
+      __ SmiCompare(rcx, Smi::FromInt(JUMPING + i));
       if (i == kReturnShadowIndex) {
         // The return value is (already) in rax.
         Result return_value = allocator_->Allocate(rax);
@@ -2130,7 +2150,7 @@
   if (has_valid_frame()) {
     // Check if we need to rethrow the exception.
     JumpTarget exit;
-    __ cmpq(rcx, Immediate(Smi::FromInt(THROWING)));
+    __ SmiCompare(rcx, Smi::FromInt(THROWING));
     exit.Branch(not_equal);
 
     // Rethrow exception.
@@ -2278,7 +2298,7 @@
   // Literal array (0).
   __ push(literals_);
   // Literal index (1).
-  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  __ Push(Smi::FromInt(node_->literal_index()));
   // RegExp pattern (2).
   __ Push(node_->pattern());
   // RegExp flags (3).
@@ -2351,7 +2371,7 @@
   // Literal array (0).
   __ push(literals_);
   // Literal index (1).
-  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  __ Push(Smi::FromInt(node_->literal_index()));
   // Constant properties (2).
   __ Push(node_->constant_properties());
   __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
@@ -2484,7 +2504,7 @@
   // Literal array (0).
   __ push(literals_);
   // Literal index (1).
-  __ push(Immediate(Smi::FromInt(node_->literal_index())));
+  __ Push(Smi::FromInt(node_->literal_index()));
   // Constant properties (2).
   __ Push(node_->literals());
   __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
@@ -3072,8 +3092,8 @@
 
       case Token::SUB: {
         bool overwrite =
-            (node->AsBinaryOperation() != NULL &&
-             node->AsBinaryOperation()->ResultOverwriteAllowed());
+          (node->expression()->AsBinaryOperation() != NULL &&
+           node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
         UnarySubStub stub(overwrite);
         // TODO(1222589): remove dependency of TOS being cached inside stub
         Result operand = frame_->Pop();
@@ -3151,7 +3171,7 @@
   __ push(dst_);
   __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
   __ push(rax);
-  __ push(Immediate(Smi::FromInt(1)));
+  __ Push(Smi::FromInt(1));
   if (is_increment_) {
     __ CallRuntime(Runtime::kNumberAdd, 2);
   } else {
@@ -3191,7 +3211,7 @@
 
   // Call the runtime for the addition or subtraction.
   __ push(rax);
-  __ push(Immediate(Smi::FromInt(1)));
+  __ Push(Smi::FromInt(1));
   if (is_increment_) {
     __ CallRuntime(Runtime::kNumberAdd, 2);
   } else {
@@ -3249,15 +3269,18 @@
                                                   is_increment);
     }
 
-    __ movq(kScratchRegister, new_value.reg());
+    __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
     if (is_increment) {
-      __ addl(kScratchRegister, Immediate(Smi::FromInt(1)));
+      __ SmiAddConstant(kScratchRegister,
+                        new_value.reg(),
+                        Smi::FromInt(1),
+                        deferred->entry_label());
     } else {
-      __ subl(kScratchRegister, Immediate(Smi::FromInt(1)));
+      __ SmiSubConstant(kScratchRegister,
+                        new_value.reg(),
+                        Smi::FromInt(1),
+                        deferred->entry_label());
     }
-    // Smi test.
-    deferred->Branch(overflow);
-    __ JumpIfNotSmi(kScratchRegister, deferred->entry_label());
     __ movq(new_value.reg(), kScratchRegister);
     deferred->BindExit();
 
@@ -3634,15 +3657,15 @@
 
   // Skip the arguments adaptor frame if it exists.
   Label check_frame_marker;
-  __ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
-          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &check_frame_marker);
   __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
 
   // Check the marker in the calling frame.
   __ bind(&check_frame_marker);
-  __ cmpq(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
-          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+  __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+                Smi::FromInt(StackFrame::CONSTRUCT));
   fp.Unuse();
   destination()->Split(equal);
 }
@@ -3878,7 +3901,7 @@
 void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
   // RBP value is aligned, so it should be tagged as a smi (without necesarily
-  // being padded as a smi).
+  // being padded as a smi, so it should not be treated as a smi.).
   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   Result rbp_as_smi = allocator_->Allocate();
   ASSERT(rbp_as_smi.is_valid());
@@ -4262,8 +4285,8 @@
   dest->false_target()->Branch(equal);
 
   // Smi => false iff zero.
-  Condition equals = masm_->CheckSmiEqualsConstant(value.reg(), 0);
-  dest->false_target()->Branch(equals);
+  __ SmiCompare(value.reg(), Smi::FromInt(0));
+  dest->false_target()->Branch(equal);
   Condition is_smi = masm_->CheckSmi(value.reg());
   dest->true_target()->Branch(is_smi);
 
@@ -4945,7 +4968,7 @@
       right_side = Result(right_val);
       // Test smi equality and comparison by signed int comparison.
       // Both sides are smis, so we can use an Immediate.
-      __ cmpl(left_side.reg(), Immediate(Smi::cast(*right_side.handle())));
+      __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
       left_side.Unuse();
       right_side.Unuse();
       dest->Split(cc);
@@ -4978,7 +5001,7 @@
       Result temp = allocator()->Allocate();
       ASSERT(temp.is_valid());
       __ movq(temp.reg(),
-             FieldOperand(operand.reg(), HeapObject::kMapOffset));
+              FieldOperand(operand.reg(), HeapObject::kMapOffset));
       __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
                Immediate(1 << Map::kIsUndetectable));
       temp.Unuse();
@@ -4998,7 +5021,7 @@
       CompareStub stub(cc, strict);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
       // The result is a Smi, which is negative, zero, or positive.
-      __ testl(answer.reg(), answer.reg());  // Both zero and sign flag right.
+      __ SmiTest(answer.reg());  // Sets both zero and sign flag.
       answer.Unuse();
       dest->Split(cc);
     } else {
@@ -5016,7 +5039,7 @@
       // When non-smi, call out to the compare stub.
       CompareStub stub(cc, strict);
       Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      __ testl(answer.reg(), answer.reg());  // Sets both zero and sign flags.
+      __ SmiTest(answer.reg());  // Sets both zero and sign flags.
       answer.Unuse();
       dest->true_target()->Branch(cc);
       dest->false_target()->Jump();
@@ -5024,7 +5047,7 @@
       is_smi.Bind();
       left_side = Result(left_reg);
       right_side = Result(right_reg);
-      __ cmpl(left_side.reg(), right_side.reg());
+      __ SmiCompare(left_side.reg(), right_side.reg());
       right_side.Unuse();
       left_side.Unuse();
       dest->Split(cc);
@@ -5221,7 +5244,7 @@
 
 void DeferredInlineSmiAdd::Generate() {
   __ push(dst_);
-  __ push(Immediate(value_));
+  __ Push(value_);
   GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
   __ CallStub(&igostub);
   if (!dst_.is(rax)) __ movq(dst_, rax);
@@ -5229,7 +5252,7 @@
 
 
 void DeferredInlineSmiAddReversed::Generate() {
-  __ push(Immediate(value_));  // Note: sign extended.
+  __ Push(value_);
   __ push(dst_);
   GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
   __ CallStub(&igostub);
@@ -5239,7 +5262,7 @@
 
 void DeferredInlineSmiSub::Generate() {
   __ push(dst_);
-  __ push(Immediate(value_));  // Note: sign extended.
+  __ Push(value_);
   GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
   __ CallStub(&igostub);
   if (!dst_.is(rax)) __ movq(dst_, rax);
@@ -5248,7 +5271,7 @@
 
 void DeferredInlineSmiOperation::Generate() {
   __ push(src_);
-  __ push(Immediate(value_));  // Note: sign extended.
+  __ Push(value_);
   // For mod we don't generate all the Smi code inline.
   GenericBinaryOpStub stub(
       op_,
@@ -5306,7 +5329,7 @@
       __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
       __ SmiAddConstant(operand->reg(),
                         operand->reg(),
-                        int_value,
+                        smi_value,
                         deferred->entry_label());
       deferred->BindExit();
       frame_->Push(operand);
@@ -5328,7 +5351,7 @@
         // A smi currently fits in a 32-bit Immediate.
         __ SmiSubConstant(operand->reg(),
                           operand->reg(),
-                          int_value,
+                          smi_value,
                           deferred->entry_label());
         deferred->BindExit();
         frame_->Push(operand);
@@ -5382,9 +5405,9 @@
                                            overwrite_mode);
         __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
         __ SmiShiftLogicalRightConstant(answer.reg(),
-                                      operand->reg(),
-                                      shift_value,
-                                      deferred->entry_label());
+                                        operand->reg(),
+                                        shift_value,
+                                        deferred->entry_label());
         deferred->BindExit();
         operand->Unuse();
         frame_->Push(&answer);
@@ -5453,15 +5476,15 @@
                                                                overwrite_mode);
       __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
       if (op == Token::BIT_AND) {
-        __ SmiAndConstant(operand->reg(), operand->reg(), int_value);
+        __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
       } else if (op == Token::BIT_XOR) {
         if (int_value != 0) {
-          __ SmiXorConstant(operand->reg(), operand->reg(), int_value);
+          __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
         }
       } else {
         ASSERT(op == Token::BIT_OR);
         if (int_value != 0) {
-          __ SmiOrConstant(operand->reg(), operand->reg(), int_value);
+          __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
         }
       }
       deferred->BindExit();
@@ -5476,18 +5499,21 @@
           (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
         operand->ToRegister();
         frame_->Spill(operand->reg());
-        DeferredCode* deferred = new DeferredInlineSmiOperation(op,
-                                                                operand->reg(),
-                                                                operand->reg(),
-                                                                smi_value,
-                                                                overwrite_mode);
+        DeferredCode* deferred =
+            new DeferredInlineSmiOperation(op,
+                                           operand->reg(),
+                                           operand->reg(),
+                                           smi_value,
+                                           overwrite_mode);
         // Check for negative or non-Smi left hand side.
         __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
         if (int_value < 0) int_value = -int_value;
         if (int_value == 1) {
-          __ movl(operand->reg(), Immediate(Smi::FromInt(0)));
+          __ Move(operand->reg(), Smi::FromInt(0));
         } else {
-          __ SmiAndConstant(operand->reg(), operand->reg(), int_value - 1);
+          __ SmiAndConstant(operand->reg(),
+                            operand->reg(),
+                            Smi::FromInt(int_value - 1));
         }
         deferred->BindExit();
         frame_->Push(operand);
@@ -6085,8 +6111,6 @@
 
         // Check that the key is a non-negative smi.
         __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
-        // Ensure that the smi is zero-extended.  This is not guaranteed.
-        __ movl(key.reg(), key.reg());
 
         // Check that the receiver is not a smi.
         __ JumpIfSmi(receiver.reg(), deferred->entry_label());
@@ -6096,10 +6120,10 @@
         deferred->Branch(not_equal);
 
         // Check that the key is within bounds.  Both the key and the
-        // length of the JSArray are smis, so compare only low 32 bits.
-        __ cmpl(key.reg(),
-                FieldOperand(receiver.reg(), JSArray::kLengthOffset));
-        deferred->Branch(greater_equal);
+        // length of the JSArray are smis.
+        __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
+                      key.reg());
+        deferred->Branch(less_equal);
 
         // Get the elements array from the receiver and check that it
         // is a flat array (not a dictionary).
@@ -6303,22 +6327,17 @@
   Label slow;
   Label done;
   Label try_float;
-  Label special;
   // Check whether the value is a smi.
   __ JumpIfNotSmi(rax, &try_float);
 
   // Enter runtime system if the value of the smi is zero
   // to make sure that we switch between 0 and -0.
-  // Also enter it if the value of the smi is Smi::kMinValue
-  __ testl(rax, Immediate(0x7FFFFFFE));
-  __ j(zero, &special);
-  __ negl(rax);
-  __ jmp(&done);
+  // Also enter it if the value of the smi is Smi::kMinValue.
+  __ SmiNeg(rax, rax, &done);
 
-  __ bind(&special);
-  // Either zero or -0x4000000, neither of which become a smi when negated.
-  __ testl(rax, rax);
-  __ j(not_zero, &slow);
+  // Either zero or Smi::kMinValue, neither of which become a smi when negated.
+  __ SmiCompare(rax, Smi::FromInt(0));
+  __ j(not_equal, &slow);
   __ Move(rax, Factory::minus_zero_value());
   __ jmp(&done);
 
@@ -6470,7 +6489,7 @@
   // Call builtin if operands are not floating point or smi.
   Label check_for_symbols;
   // Push arguments on stack, for helper functions.
-  FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols);
+  FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
   FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
   __ FCmp();
 
@@ -6527,7 +6546,7 @@
       ASSERT(cc_ == greater || cc_ == greater_equal);  // remaining cases
       ncr = LESS;
     }
-    __ push(Immediate(Smi::FromInt(ncr)));
+    __ Push(Smi::FromInt(ncr));
   }
 
   // Restore return address on the stack.
@@ -6626,7 +6645,7 @@
   __ ret(2 * kPointerSize);
 
   __ bind(&is_not_instance);
-  __ movq(rax, Immediate(Smi::FromInt(1)));
+  __ Move(rax, Smi::FromInt(1));
   __ ret(2 * kPointerSize);
 
   // Slow-case: Go through the JavaScript implementation.
@@ -6644,8 +6663,8 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label runtime;
   __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
-  __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(not_equal, &runtime);
   // Value in rcx is Smi encoded.
 
@@ -6678,8 +6697,8 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor;
   __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
-  __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &adaptor);
 
   // Check index against formal parameters count limit passed in
@@ -6726,8 +6745,8 @@
   // Check if the calling frame is an arguments adaptor frame.
   Label adaptor;
   __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
-  __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
+                Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   __ j(equal, &adaptor);
 
   // Nothing to do: The formal number of parameters has already been
@@ -7069,8 +7088,8 @@
 
   // Push the stack frame type marker twice.
   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
-  __ push(Immediate(Smi::FromInt(marker)));  // context slot
-  __ push(Immediate(Smi::FromInt(marker)));  // function slot
+  __ Push(Smi::FromInt(marker));  // context slot
+  __ Push(Smi::FromInt(marker));  // function slot
   // Save callee-saved registers (X64 calling conventions).
   __ push(r12);
   __ push(r13);
@@ -7182,7 +7201,7 @@
   // must be inserted below the return address on the stack so we
   // temporarily store that in a register.
   __ pop(rax);
-  __ push(Immediate(Smi::FromInt(0)));
+  __ Push(Smi::FromInt(0));
   __ push(rax);
 
   // Do tail-call to runtime routine.
@@ -7321,8 +7340,8 @@
 }
 
 
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
-                                             Label* non_float) {
+void FloatingPointHelper::CheckNumberOperands(MacroAssembler* masm,
+                                              Label* non_float) {
   Label test_other, done;
   // Test if both operands are numbers (heap_numbers or smis).
   // If not, jump to label non_float.
@@ -7403,17 +7422,17 @@
     case Token::SHR:
     case Token::SAR:
       // Move the second operand into register ecx.
-      __ movl(rcx, rbx);
+      __ movq(rcx, rbx);
       // Perform the operation.
       switch (op_) {
         case Token::SAR:
-          __ SmiShiftArithmeticRight(rax, rax, rbx);
+          __ SmiShiftArithmeticRight(rax, rax, rcx);
           break;
         case Token::SHR:
-          __ SmiShiftLogicalRight(rax, rax, rbx, slow);
+          __ SmiShiftLogicalRight(rax, rax, rcx, slow);
           break;
         case Token::SHL:
-          __ SmiShiftLeft(rax, rax, rbx, slow);
+          __ SmiShiftLeft(rax, rax, rcx, slow);
           break;
         default:
           UNREACHABLE();
@@ -7454,7 +7473,7 @@
     case Token::DIV: {
       // rax: y
       // rdx: x
-      FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+      FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
       // Fast-case: Both operands are numbers.
       // Allocate a heap number, if needed.
       Label skip_allocation;
@@ -7499,7 +7518,7 @@
     case Token::SAR:
     case Token::SHL:
     case Token::SHR: {
-      FloatingPointHelper::CheckFloatOperands(masm, &call_runtime);
+      FloatingPointHelper::CheckNumberOperands(masm, &call_runtime);
       // TODO(X64): Don't convert a Smi to float and then back to int32
       // afterwards.
       FloatingPointHelper::LoadFloatOperands(masm);
@@ -7553,29 +7572,27 @@
       __ pop(rcx);
       __ pop(rax);
       switch (op_) {
-        case Token::BIT_OR:  __ or_(rax, rcx); break;
-        case Token::BIT_AND: __ and_(rax, rcx); break;
-        case Token::BIT_XOR: __ xor_(rax, rcx); break;
+        case Token::BIT_OR:  __ orl(rax, rcx); break;
+        case Token::BIT_AND: __ andl(rax, rcx); break;
+        case Token::BIT_XOR: __ xorl(rax, rcx); break;
         case Token::SAR: __ sarl(rax); break;
         case Token::SHL: __ shll(rax); break;
         case Token::SHR: __ shrl(rax); break;
         default: UNREACHABLE();
       }
       if (op_ == Token::SHR) {
-        // Check if result is non-negative and fits in a smi.
-        __ testl(rax, Immediate(0xc0000000));
-        __ j(not_zero, &non_smi_result);
-      } else {
-        // Check if result fits in a smi.
-        __ cmpl(rax, Immediate(0xc0000000));
+        // Check if result is non-negative. This can only happen for a shift
+        // by zero, which also doesn't update the sign flag.
+        __ testl(rax, rax);
         __ j(negative, &non_smi_result);
       }
-      // Tag smi result and return.
+      __ JumpIfNotValidSmiValue(rax, &non_smi_result);
+      // Tag smi result, if possible, and return.
       __ Integer32ToSmi(rax, rax);
       __ ret(2 * kPointerSize);
 
       // All ops except SHR return a signed int32 that we load in a HeapNumber.
-      if (op_ != Token::SHR) {
+      if (op_ != Token::SHR && non_smi_result.is_linked()) {
         __ bind(&non_smi_result);
         // Allocate a heap number if needed.
         __ movsxlq(rbx, rax);  // rbx: sign extended 32-bit result
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 87db3a9..5fa6583 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -294,6 +294,15 @@
                                Handle<Script> script,
                                bool is_eval);
 
+  // Printing of AST, etc. as requested by flags.
+  static void MakeCodePrologue(FunctionLiteral* fun);
+
+  // Allocate and install the code.
+  static Handle<Code> MakeCodeEpilogue(FunctionLiteral* fun,
+                                       MacroAssembler* masm,
+                                       Code::Flags flags,
+                                       Handle<Script> script);
+
 #ifdef ENABLE_LOGGING_AND_PROFILING
   static bool ShouldGenerateLog(Expression* type);
 #endif
@@ -303,6 +312,8 @@
                               bool is_toplevel,
                               Handle<Script> script);
 
+  static void RecordPositions(MacroAssembler* masm, int pos);
+
   // Accessors
   MacroAssembler* masm() { return masm_; }
 
@@ -548,6 +559,14 @@
   inline void GenerateMathSin(ZoneList<Expression*>* args);
   inline void GenerateMathCos(ZoneList<Expression*>* args);
 
+  // Simple condition analysis.
+  enum ConditionAnalysis {
+    ALWAYS_TRUE,
+    ALWAYS_FALSE,
+    DONT_KNOW
+  };
+  ConditionAnalysis AnalyzeCondition(Expression* cond);
+
   // Methods used to indicate which source code is generated for. Source
   // positions are collected by the assembler and emitted with the relocation
   // information.
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 10092c5..49240b4 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -39,10 +39,7 @@
 
 bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
   ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
-  // 11th byte of patch is 0x49 (REX.WB byte of computed jump/call to r10),
-  // 11th byte of JS return is 0xCC (int3).
-  ASSERT(*(rinfo->pc() + 10) == 0x49 || *(rinfo->pc() + 10) == 0xCC);
-  return (*(rinfo->pc() + 10) != 0xCC);
+  return rinfo->IsPatchedReturnSequence();
 }
 
 #define __ ACCESS_MASM(masm)
diff --git a/src/x64/fast-codegen-x64.cc b/src/x64/fast-codegen-x64.cc
new file mode 100644
index 0000000..c433836
--- /dev/null
+++ b/src/x64/fast-codegen-x64.cc
@@ -0,0 +1,181 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+#include "fast-codegen.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Generate code for a JS function.  On entry to the function the receiver
+// and arguments have been pushed on the stack left to right, with the
+// return address on top of them.  The actual argument count matches the
+// formal parameter count expected by the function.
+//
+// The live registers are:
+//   o rdi: the JS function object being called (ie, ourselves)
+//   o rsi: our context
+//   o rbp: our caller's frame pointer
+//   o rsp: stack pointer (pointing to return address)
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-x64.h for its layout.
+void FastCodeGenerator::Generate(FunctionLiteral* fun) {
+  function_ = fun;
+  SetFunctionPosition(fun);
+
+  __ push(rbp);  // Caller's frame pointer.
+  __ movq(rbp, rsp);
+  __ push(rsi);  // Callee's context.
+  __ push(rdi);  // Callee's JS Function.
+
+  { Comment cmnt(masm_, "[ Allocate locals");
+    int locals_count = fun->scope()->num_stack_slots();
+    for (int i = 0; i < locals_count; i++) {
+      __ PushRoot(Heap::kUndefinedValueRootIndex);
+    }
+  }
+
+  { Comment cmnt(masm_, "[ Stack check");
+    Label ok;
+    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+    __ j(above_equal, &ok);
+    StackCheckStub stub;
+    __ CallStub(&stub);
+    __ bind(&ok);
+  }
+
+  { Comment cmnt(masm_, "[ Body");
+    VisitStatements(fun->body());
+  }
+
+  { Comment cmnt(masm_, "[ return <undefined>;");
+    // Emit a 'return undefined' in case control fell off the end of the
+    // body.
+    __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
+    SetReturnPosition(fun);
+    __ RecordJSReturn();
+    // Do not use the leave instruction here because it is too short to
+    // patch with the code required by the debugger.
+    __ movq(rsp, rbp);
+    __ pop(rbp);
+    __ ret((fun->scope()->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+    // Add padding that will be overwritten by a debugger breakpoint.  We
+    // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
+    // (3 + 1 + 3).
+    const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+    for (int i = 0; i < kPadding; ++i) {
+      masm_->int3();
+    }
+#endif
+  }
+}
+
+
+void FastCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Comment cmnt(masm_, "[ ExpressionStatement");
+  SetStatementPosition(stmt);
+  Visit(stmt->expression());
+}
+
+
+void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+  Comment cmnt(masm_, "[ ReturnStatement");
+  SetStatementPosition(stmt);
+  Visit(stmt->expression());
+  __ pop(rax);
+  __ RecordJSReturn();
+  // Do not use the leave instruction here because it is too short to
+  // patch with the code required by the debugger.
+  __ movq(rsp, rbp);
+  __ pop(rbp);
+  __ ret((function_->scope()->num_parameters() + 1) * kPointerSize);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+  // Add padding that will be overwritten by a debugger breakpoint.  We
+  // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7
+  // (3 + 1 + 3).
+  const int kPadding = Debug::kX64JSReturnSequenceLength - 7;
+  for (int i = 0; i < kPadding; ++i) {
+    masm_->int3();
+  }
+#endif
+}
+
+
+void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  Expression* rewrite = expr->var()->rewrite();
+  ASSERT(rewrite != NULL);
+
+  Slot* slot = rewrite->AsSlot();
+  ASSERT(slot != NULL);
+  { Comment cmnt(masm_, "[ Slot");
+    if (expr->location().is_temporary()) {
+      __ push(Operand(rbp, SlotOffset(slot)));
+    } else {
+      ASSERT(expr->location().is_nowhere());
+    }
+  }
+}
+
+
+void FastCodeGenerator::VisitLiteral(Literal* expr) {
+  Comment cmnt(masm_, "[ Literal");
+  if (expr->location().is_temporary()) {
+    __ Push(expr->handle());
+  } else {
+    ASSERT(expr->location().is_nowhere());
+  }
+}
+
+
+void FastCodeGenerator::VisitAssignment(Assignment* expr) {
+  Comment cmnt(masm_, "[ Assignment");
+  ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR);
+
+  Visit(expr->value());
+
+  Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+  ASSERT(var != NULL && var->slot() != NULL);
+
+  if (expr->location().is_temporary()) {
+    __ movq(rax, Operand(rsp, 0));
+    __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+  } else {
+    ASSERT(expr->location().is_nowhere());
+    __ pop(Operand(rbp, SlotOffset(var->slot())));
+  }
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/x64/frames-x64.h b/src/x64/frames-x64.h
index 5442be9..eefaa0a 100644
--- a/src/x64/frames-x64.h
+++ b/src/x64/frames-x64.h
@@ -31,9 +31,6 @@
 namespace v8 {
 namespace internal {
 
-// TODO(x64): This is a stub, mostly just a copy of the ia32 bit version.
-// This might all need to change to be correct for x64.
-
 static const int kNumRegs = 8;
 static const RegList kJSCallerSaved =
     1 << 0 |  // rax
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 8209091..7108025 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -131,8 +131,8 @@
   // Check that the value is a normal property.
   __ bind(&done);
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
-           Immediate(Smi::FromInt(PropertyDetails::TypeField::mask())));
+  __ Test(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+          Smi::FromInt(PropertyDetails::TypeField::mask()));
   __ j(not_zero, miss_label);
 
   // Get the value at the masked, scaled index.
@@ -336,13 +336,13 @@
   __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
            Immediate(kIsSymbolMask));
   __ j(zero, &slow);
-  // Probe the dictionary leaving result in ecx.
+  // Probe the dictionary leaving result in rcx.
   GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
   GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
   __ movq(rax, rcx);
   __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
   __ ret(0);
-  // Array index string: If short enough use cache in length/hash field (ebx).
+  // Array index string: If short enough use cache in length/hash field (rbx).
   // We assert that there are enough bits in an int32_t after the hash shift
   // bits have been subtracted to allow space for the length and the cached
   // array index.
@@ -434,9 +434,6 @@
   __ movq(rbx, Operand(rsp, 1 * kPointerSize));  // 1 ~ return address
   // Check that the key is a smi.
   __ JumpIfNotSmi(rbx, &slow);
-  // If it is a smi, make sure it is zero-extended, so it can be
-  // used as an index in a memory operand.
-  __ movl(rbx, rbx);  // Clear the high bits of rbx.
 
   __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
   __ j(equal, &array);
@@ -447,7 +444,7 @@
   // Object case: Check key against length in the elements array.
   // rax: value
   // rdx: JSObject
-  // rbx: index (as a smi), zero-extended.
+  // rbx: index (as a smi)
   __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
   // Check that the object is in fast mode (not dictionary).
   __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
@@ -488,14 +485,11 @@
   __ movq(rdx, rax);  // Save the value.
   __ SmiToInteger32(rax, rax);
   {  // Clamp the value to [0..255].
-    Label done, is_negative;
+    Label done;
     __ testl(rax, Immediate(0xFFFFFF00));
     __ j(zero, &done);
-    __ j(negative, &is_negative);
-    __ movl(rax, Immediate(255));
-    __ jmp(&done);
-    __ bind(&is_negative);
-    __ xorl(rax, rax);  // Clear rax.
+    __ setcc(negative, rax);  // 1 if negative, 0 if positive.
+    __ decb(rax);  // 0 if negative, 255 if positive.
     __ bind(&done);
   }
   __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
@@ -511,15 +505,15 @@
   // rdx: JSArray
   // rcx: FixedArray
   // rbx: index (as a smi)
-  // flags: compare (rbx, rdx.length())
+  // flags: smicompare (rdx.length(), rbx)
   __ j(not_equal, &slow);  // do not leave holes in the array
   __ SmiToInteger64(rbx, rbx);
   __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
   __ j(above_equal, &slow);
   // Increment and restore smi-tag.
-  __ Integer64AddToSmi(rbx, rbx, 1);
+  __ Integer64PlusConstantToSmi(rbx, rbx, 1);
   __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
-  __ SmiSubConstant(rbx, rbx, 1, NULL);
+  __ SmiSubConstant(rbx, rbx, Smi::FromInt(1));
   __ jmp(&fast);
 
   // Array case: Get the length and the elements array from the JS
@@ -530,25 +524,36 @@
   // rdx: JSArray
   // rbx: index (as a smi)
   __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
+  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+                 Heap::kFixedArrayMapRootIndex);
   __ j(not_equal, &slow);
 
   // Check the key against the length in the array, compute the
   // address to store into and fall through to fast case.
-  __ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
-  __ j(above_equal, &extra);
+  __ SmiCompare(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
+  __ j(below_equal, &extra);
 
   // Fast case: Do the store.
   __ bind(&fast);
   // rax: value
   // rcx: FixedArray
   // rbx: index (as a smi)
-  __ movq(Operand(rcx, rbx, times_half_pointer_size,
+  Label non_smi_value;
+  __ JumpIfNotSmi(rax, &non_smi_value);
+  SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+  __ movq(Operand(rcx, index.reg, index.scale,
                   FixedArray::kHeaderSize - kHeapObjectTag),
-         rax);
+          rax);
+  __ ret(0);
+  __ bind(&non_smi_value);
+  // Slow case that needs to retain rbx for use by RecordWrite.
   // Update write barrier for the elements array address.
+  SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rbx, kPointerSizeLog2);
+  __ movq(Operand(rcx, index2.reg, index2.scale,
+                  FixedArray::kHeaderSize - kHeapObjectTag),
+          rax);
   __ movq(rdx, rax);
-  __ RecordWrite(rcx, 0, rdx, rbx);
+  __ RecordWriteNonSmi(rcx, 0, rdx, rbx);
   __ ret(0);
 }
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 2689e38..614f67b 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -38,16 +38,15 @@
 namespace internal {
 
 MacroAssembler::MacroAssembler(void* buffer, int size)
-  : Assembler(buffer, size),
-    unresolved_(0),
-    generating_stub_(false),
-    allow_stub_calls_(true),
-    code_object_(Heap::undefined_value()) {
+    : Assembler(buffer, size),
+      unresolved_(0),
+      generating_stub_(false),
+      allow_stub_calls_(true),
+      code_object_(Heap::undefined_value()) {
 }
 
 
-void MacroAssembler::LoadRoot(Register destination,
-                              Heap::RootListIndex index) {
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
   movq(destination, Operand(r13, index << kPointerSizeLog2));
 }
 
@@ -57,14 +56,12 @@
 }
 
 
-void MacroAssembler::CompareRoot(Register with,
-                                 Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
   cmpq(with, Operand(r13, index << kPointerSizeLog2));
 }
 
 
-void MacroAssembler::CompareRoot(Operand with,
-                                 Heap::RootListIndex index) {
+void MacroAssembler::CompareRoot(Operand with, Heap::RootListIndex index) {
   LoadRoot(kScratchRegister, index);
   cmpq(with, kScratchRegister);
 }
@@ -144,9 +141,9 @@
 
   // Minor key encoding in 12 bits of three registers (object, address and
   // scratch) OOOOAAAASSSS.
-  class ScratchBits: public BitField<uint32_t, 0, 4> {};
-  class AddressBits: public BitField<uint32_t, 4, 4> {};
-  class ObjectBits: public BitField<uint32_t, 8, 4> {};
+  class ScratchBits : public BitField<uint32_t, 0, 4> {};
+  class AddressBits : public BitField<uint32_t, 4, 4> {};
+  class ObjectBits : public BitField<uint32_t, 8, 4> {};
 
   Major MajorKey() { return RecordWrite; }
 
@@ -167,33 +164,45 @@
 
 // Set the remembered set bit for [object+offset].
 // object is the object being stored into, value is the object being stored.
-// If offset is zero, then the scratch register contains the array index into
-// the elements array represented as a Smi.
+// If offset is zero, then the smi_index register contains the array index into
+// the elements array represented as a smi. Otherwise it can be used as a
+// scratch register.
 // All registers are clobbered by the operation.
 void MacroAssembler::RecordWrite(Register object,
                                  int offset,
                                  Register value,
-                                 Register scratch) {
+                                 Register smi_index) {
   // First, check if a remembered set write is even needed. The tests below
   // catch stores of Smis and stores into young gen (which does not have space
   // for the remembered set bits.
   Label done;
+  JumpIfSmi(value, &done);
 
+  RecordWriteNonSmi(object, offset, value, smi_index);
+  bind(&done);
+}
+
+
+void MacroAssembler::RecordWriteNonSmi(Register object,
+                                       int offset,
+                                       Register scratch,
+                                       Register smi_index) {
+  Label done;
   // Test that the object address is not in the new space.  We cannot
   // set remembered set bits in the new space.
-  movq(value, object);
+  movq(scratch, object);
   ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
-  and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+  and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
   movq(kScratchRegister, ExternalReference::new_space_start());
-  cmpq(value, kScratchRegister);
+  cmpq(scratch, kScratchRegister);
   j(equal, &done);
 
   if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
     // Compute the bit offset in the remembered set, leave it in 'value'.
-    lea(value, Operand(object, offset));
+    lea(scratch, Operand(object, offset));
     ASSERT(is_int32(Page::kPageAlignmentMask));
-    and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
-    shr(value, Immediate(kObjectAlignmentBits));
+    and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
+    shr(scratch, Immediate(kObjectAlignmentBits));
 
     // Compute the page address from the heap object pointer, leave it in
     // 'object' (immediate value is sign extended).
@@ -203,24 +212,26 @@
     // to limit code size. We should probably evaluate this decision by
     // measuring the performance of an equivalent implementation using
     // "simpler" instructions
-    bts(Operand(object, Page::kRSetOffset), value);
+    bts(Operand(object, Page::kRSetOffset), scratch);
   } else {
-    Register dst = scratch;
+    Register dst = smi_index;
     if (offset != 0) {
       lea(dst, Operand(object, offset));
     } else {
       // array access: calculate the destination address in the same manner as
-      // KeyedStoreIC::GenerateGeneric.  Multiply a smi by 4 to get an offset
-      // into an array of pointers.
-      lea(dst, Operand(object, dst, times_half_pointer_size,
+      // KeyedStoreIC::GenerateGeneric.
+      SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
+      lea(dst, Operand(object,
+                       index.reg,
+                       index.scale,
                        FixedArray::kHeaderSize - kHeapObjectTag));
     }
     // If we are already generating a shared stub, not inlining the
     // record write code isn't going to save us any memory.
     if (generating_stub()) {
-      RecordWriteHelper(this, object, dst, value);
+      RecordWriteHelper(this, object, dst, scratch);
     } else {
-      RecordWriteStub stub(object, dst, value);
+      RecordWriteStub stub(object, dst, scratch);
       CallStub(&stub);
     }
   }
@@ -370,7 +381,6 @@
   addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
 }
 
-
 Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
                                             bool* resolved) {
   // Move the builtin function into the temporary function slot by
@@ -384,7 +394,6 @@
       JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
   movq(rdi, FieldOperand(rdx, builtins_offset));
 
-
   return Builtins::GetCode(id, resolved);
 }
 
@@ -416,68 +425,96 @@
   }
 }
 
-
 // ----------------------------------------------------------------------------
 // Smi tagging, untagging and tag detection.
 
+static int kSmiShift = kSmiTagSize + kSmiShiftSize;
 
 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
-  ASSERT_EQ(1, kSmiTagSize);
   ASSERT_EQ(0, kSmiTag);
-#ifdef DEBUG
-    cmpq(src, Immediate(0xC0000000u));
-    Check(positive, "Smi conversion overflow");
-#endif
-  if (dst.is(src)) {
-    addl(dst, src);
-  } else {
-    lea(dst, Operand(src, src, times_1, 0));
+  if (!dst.is(src)) {
+    movl(dst, src);
   }
+  shl(dst, Immediate(kSmiShift));
 }
 
 
 void MacroAssembler::Integer32ToSmi(Register dst,
                                     Register src,
                                     Label* on_overflow) {
-  ASSERT_EQ(1, kSmiTagSize);
   ASSERT_EQ(0, kSmiTag);
+  // 32-bit integer always fits in a long smi.
   if (!dst.is(src)) {
     movl(dst, src);
   }
-  addl(dst, src);
-  j(overflow, on_overflow);
+  shl(dst, Immediate(kSmiShift));
 }
 
 
-void MacroAssembler::Integer64AddToSmi(Register dst,
-                                       Register src,
-                                       int constant) {
-#ifdef DEBUG
-  movl(kScratchRegister, src);
-  addl(kScratchRegister, Immediate(constant));
-  Check(no_overflow, "Add-and-smi-convert overflow");
-  Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
-  Check(valid, "Add-and-smi-convert overflow");
-#endif
-  lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
+void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
+                                                Register src,
+                                                int constant) {
+  if (dst.is(src)) {
+    addq(dst, Immediate(constant));
+  } else {
+    lea(dst, Operand(src, constant));
+  }
+  shl(dst, Immediate(kSmiShift));
 }
 
 
 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
-  ASSERT_EQ(1, kSmiTagSize);
   ASSERT_EQ(0, kSmiTag);
   if (!dst.is(src)) {
-    movl(dst, src);
+    movq(dst, src);
   }
-  sarl(dst, Immediate(kSmiTagSize));
+  shr(dst, Immediate(kSmiShift));
 }
 
 
 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
-  ASSERT_EQ(1, kSmiTagSize);
   ASSERT_EQ(0, kSmiTag);
-  movsxlq(dst, src);
-  sar(dst, Immediate(kSmiTagSize));
+  if (!dst.is(src)) {
+    movq(dst, src);
+  }
+  sar(dst, Immediate(kSmiShift));
+}
+
+
+void MacroAssembler::SmiTest(Register src) {
+  testq(src, src);
+}
+
+
+void MacroAssembler::SmiCompare(Register dst, Register src) {
+  cmpq(dst, src);
+}
+
+
+void MacroAssembler::SmiCompare(Register dst, Smi* src) {
+  ASSERT(!dst.is(kScratchRegister));
+  if (src->value() == 0) {
+    testq(dst, dst);
+  } else {
+    Move(kScratchRegister, src);
+    cmpq(dst, kScratchRegister);
+  }
+}
+
+
+void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
+  cmpq(dst, src);
+}
+
+
+void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
+  if (src->value() == 0) {
+    // Only tagged long smi to have 32-bit representation.
+    cmpq(dst, Immediate(0));
+  } else {
+    Move(kScratchRegister, src);
+    cmpq(dst, kScratchRegister);
+  }
 }
 
 
@@ -490,170 +527,75 @@
     SmiToInteger64(dst, src);
     return;
   }
-  movsxlq(dst, src);
-  shl(dst, Immediate(power - 1));
-}
-
-void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
-  ASSERT_EQ(0, kSmiTag);
-  testl(src, Immediate(kSmiTagMask));
-  j(zero, on_smi);
-}
-
-
-void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
-  Condition not_smi = CheckNotSmi(src);
-  j(not_smi, on_not_smi);
-}
-
-
-void MacroAssembler::JumpIfNotPositiveSmi(Register src,
-                                          Label* on_not_positive_smi) {
-  Condition not_positive_smi = CheckNotPositiveSmi(src);
-  j(not_positive_smi, on_not_positive_smi);
-}
-
-
-void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
-                                             int constant,
-                                             Label* on_equals) {
-  if (Smi::IsValid(constant)) {
-    Condition are_equal = CheckSmiEqualsConstant(src, constant);
-    j(are_equal, on_equals);
+  if (!dst.is(src)) {
+    movq(dst, src);
+  }
+  if (power < kSmiShift) {
+    sar(dst, Immediate(kSmiShift - power));
+  } else if (power > kSmiShift) {
+    shl(dst, Immediate(power - kSmiShift));
   }
 }
 
 
-void MacroAssembler::JumpIfSmiGreaterEqualsConstant(Register src,
-                                                    int constant,
-                                                    Label* on_greater_equals) {
-  if (Smi::IsValid(constant)) {
-    Condition are_greater_equal = CheckSmiGreaterEqualsConstant(src, constant);
-    j(are_greater_equal, on_greater_equals);
-  } else if (constant < Smi::kMinValue) {
-    jmp(on_greater_equals);
-  }
-}
-
-
-void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
-  Condition is_valid = CheckInteger32ValidSmiValue(src);
-  j(ReverseCondition(is_valid), on_invalid);
-}
-
-
-
-void MacroAssembler::JumpIfNotBothSmi(Register src1,
-                                      Register src2,
-                                      Label* on_not_both_smi) {
-  Condition not_both_smi = CheckNotBothSmi(src1, src2);
-  j(not_both_smi, on_not_both_smi);
-}
-
 Condition MacroAssembler::CheckSmi(Register src) {
-  testb(src, Immediate(kSmiTagMask));
-  return zero;
-}
-
-
-Condition MacroAssembler::CheckNotSmi(Register src) {
   ASSERT_EQ(0, kSmiTag);
   testb(src, Immediate(kSmiTagMask));
-  return not_zero;
+  return zero;
 }
 
 
 Condition MacroAssembler::CheckPositiveSmi(Register src) {
   ASSERT_EQ(0, kSmiTag);
-  testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
+  movq(kScratchRegister, src);
+  rol(kScratchRegister, Immediate(1));
+  testl(kScratchRegister, Immediate(0x03));
   return zero;
 }
 
 
-Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
-  ASSERT_EQ(0, kSmiTag);
-  testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
-  return not_zero;
-}
-
-
 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
   if (first.is(second)) {
     return CheckSmi(first);
   }
   movl(kScratchRegister, first);
   orl(kScratchRegister, second);
-  return CheckSmi(kScratchRegister);
-}
-
-
-Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
-  ASSERT_EQ(0, kSmiTag);
-  if (first.is(second)) {
-    return CheckNotSmi(first);
-  }
-  movl(kScratchRegister, first);
-  or_(kScratchRegister, second);
-  return CheckNotSmi(kScratchRegister);
+  testb(kScratchRegister, Immediate(kSmiTagMask));
+  return zero;
 }
 
 
 Condition MacroAssembler::CheckIsMinSmi(Register src) {
   ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  cmpl(src, Immediate(0x40000000));
+  movq(kScratchRegister, src);
+  rol(kScratchRegister, Immediate(1));
+  cmpq(kScratchRegister, Immediate(1));
   return equal;
 }
 
-Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
-  if (constant == 0) {
-    testl(src, src);
-    return zero;
-  }
-  if (Smi::IsValid(constant)) {
-    cmpl(src, Immediate(Smi::FromInt(constant)));
-    return zero;
-  }
-  // Can't be equal.
-  UNREACHABLE();
-  return no_condition;
-}
-
-
-Condition MacroAssembler::CheckSmiGreaterEqualsConstant(Register src,
-                                                        int constant) {
-  if (constant == 0) {
-    testl(src, Immediate(static_cast<uint32_t>(0x80000000u)));
-    return positive;
-  }
-  if (Smi::IsValid(constant)) {
-    cmpl(src, Immediate(Smi::FromInt(constant)));
-    return greater_equal;
-  }
-  // Can't be equal.
-  UNREACHABLE();
-  return no_condition;
-}
-
 
 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
-  // A 32-bit integer value can be converted to a smi if it is in the
-  // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
-  // representation have bits 30 and 31 be equal.
-  cmpl(src, Immediate(0xC0000000u));
-  return positive;
+  // A 32-bit integer value can always be converted to a smi.
+  return always;
 }
 
 
-void MacroAssembler::SmiNeg(Register dst,
-                            Register src,
-                            Label* on_not_smi_result) {
-  if (!dst.is(src)) {
-    movl(dst, src);
+void MacroAssembler::SmiNeg(Register dst, Register src, Label* on_smi_result) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    movq(kScratchRegister, src);
+    neg(dst);  // Low 32 bits are retained as zero by negation.
+    // Test if result is zero or Smi::kMinValue.
+    cmpq(dst, kScratchRegister);
+    j(not_equal, on_smi_result);
+    movq(src, kScratchRegister);
+  } else {
+    movq(dst, src);
+    neg(dst);
+    cmpq(dst, src);
+    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+    j(not_equal, on_smi_result);
   }
-  negl(dst);
-  testl(dst, Immediate(0x7fffffff));
-  // If the result is zero or 0x80000000, negation failed to create a smi.
-  j(equal, on_not_smi_result);
 }
 
 
@@ -662,42 +604,39 @@
                             Register src2,
                             Label* on_not_smi_result) {
   ASSERT(!dst.is(src2));
-  if (!dst.is(src1)) {
-    movl(dst, src1);
-  }
-  addl(dst, src2);
-  if (!dst.is(src1)) {
-    j(overflow, on_not_smi_result);
-  } else {
+  if (dst.is(src1)) {
+    addq(dst, src2);
     Label smi_result;
     j(no_overflow, &smi_result);
     // Restore src1.
-    subl(src1, src2);
+    subq(src1, src2);
     jmp(on_not_smi_result);
     bind(&smi_result);
+  } else {
+    movq(dst, src1);
+    addq(dst, src2);
+    j(overflow, on_not_smi_result);
   }
 }
 
 
-
 void MacroAssembler::SmiSub(Register dst,
                             Register src1,
                             Register src2,
                             Label* on_not_smi_result) {
   ASSERT(!dst.is(src2));
-  if (!dst.is(src1)) {
-    movl(dst, src1);
-  }
-  subl(dst, src2);
-  if (!dst.is(src1)) {
-    j(overflow, on_not_smi_result);
-  } else {
+  if (dst.is(src1)) {
+    subq(dst, src2);
     Label smi_result;
     j(no_overflow, &smi_result);
     // Restore src1.
-    addl(src1, src2);
+    addq(src1, src2);
     jmp(on_not_smi_result);
     bind(&smi_result);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result);
   }
 }
 
@@ -707,80 +646,137 @@
                             Register src2,
                             Label* on_not_smi_result) {
   ASSERT(!dst.is(src2));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
 
   if (dst.is(src1)) {
+    Label failure, zero_correct_result;
+    movq(kScratchRegister, src1);  // Create backup for later testing.
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, &failure);
+
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    Label correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result);
+
+    movq(dst, kScratchRegister);
+    xor_(dst, src2);
+    j(positive, &zero_correct_result);  // Result was positive zero.
+
+    bind(&failure);  // Reused failure exit, restores src1.
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+
+    bind(&zero_correct_result);
+    xor_(dst, dst);
+
+    bind(&correct_result);
+  } else {
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, on_not_smi_result);
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    Label correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result);
+    // One of src1 and src2 is zero, the check whether the other is
+    // negative.
     movq(kScratchRegister, src1);
+    xor_(kScratchRegister, src2);
+    j(negative, on_not_smi_result);
+    bind(&correct_result);
   }
-  SmiToInteger32(dst, src1);
-
-  imull(dst, src2);
-  j(overflow, on_not_smi_result);
-
-  // Check for negative zero result.  If product is zero, and one
-  // argument is negative, go to slow case.  The frame is unchanged
-  // in this block, so local control flow can use a Label rather
-  // than a JumpTarget.
-  Label non_zero_result;
-  testl(dst, dst);
-  j(not_zero, &non_zero_result);
-
-  // Test whether either operand is negative (the other must be zero).
-  orl(kScratchRegister, src2);
-  j(negative, on_not_smi_result);
-  bind(&non_zero_result);
 }
 
 
 void MacroAssembler::SmiTryAddConstant(Register dst,
                                        Register src,
-                                       int32_t constant,
+                                       Smi* constant,
                                        Label* on_not_smi_result) {
   // Does not assume that src is a smi.
-  ASSERT_EQ(1, kSmiTagMask);
+  ASSERT_EQ(static_cast<intptr_t>(1), kSmiTagMask);
   ASSERT_EQ(0, kSmiTag);
-  ASSERT(Smi::IsValid(constant));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src.is(kScratchRegister));
 
-  Register tmp = (src.is(dst) ? kScratchRegister : dst);
-  movl(tmp, src);
-  addl(tmp, Immediate(Smi::FromInt(constant)));
-  if (tmp.is(kScratchRegister)) {
-    j(overflow, on_not_smi_result);
-    testl(tmp, Immediate(kSmiTagMask));
-    j(not_zero, on_not_smi_result);
-    movl(dst, tmp);
+  JumpIfNotSmi(src, on_not_smi_result);
+  Register tmp = (dst.is(src) ? kScratchRegister : dst);
+  Move(tmp, constant);
+  addq(tmp, src);
+  j(overflow, on_not_smi_result);
+  if (dst.is(src)) {
+    movq(dst, tmp);
+  }
+}
+
+
+void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    Move(kScratchRegister, constant);
+    addq(dst, kScratchRegister);
   } else {
-    movl(kScratchRegister, Immediate(kSmiTagMask));
-    cmovl(overflow, dst, kScratchRegister);
-    testl(dst, kScratchRegister);
-    j(not_zero, on_not_smi_result);
+    Move(dst, constant);
+    addq(dst, src);
   }
 }
 
 
 void MacroAssembler::SmiAddConstant(Register dst,
                                     Register src,
-                                    int32_t constant,
+                                    Smi* constant,
                                     Label* on_not_smi_result) {
-  ASSERT(Smi::IsValid(constant));
-  if (on_not_smi_result == NULL) {
-    if (dst.is(src)) {
-      movl(dst, src);
-    } else {
-      lea(dst, Operand(src, constant << kSmiTagSize));
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
     }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    Move(kScratchRegister, constant);
+    addq(dst, kScratchRegister);
+    Label result_ok;
+    j(no_overflow, &result_ok);
+    subq(dst, kScratchRegister);
+    jmp(on_not_smi_result);
+    bind(&result_ok);
   } else {
+    Move(dst, constant);
+    addq(dst, src);
+    j(overflow, on_not_smi_result);
+  }
+}
+
+
+void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
+  if (constant->value() == 0) {
     if (!dst.is(src)) {
-      movl(dst, src);
+      movq(dst, src);
     }
-    addl(dst, Immediate(Smi::FromInt(constant)));
-    if (!dst.is(src)) {
-      j(overflow, on_not_smi_result);
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    Move(kScratchRegister, constant);
+    subq(dst, kScratchRegister);
+  } else {
+    // Subtract by adding the negative, to do it in two operations.
+    if (constant->value() == Smi::kMinValue) {
+      Move(kScratchRegister, constant);
+      movq(dst, src);
+      subq(dst, kScratchRegister);
     } else {
-      Label result_ok;
-      j(no_overflow, &result_ok);
-      subl(dst, Immediate(Smi::FromInt(constant)));
-      jmp(on_not_smi_result);
-      bind(&result_ok);
+      Move(dst, Smi::FromInt(-constant->value()));
+      addq(dst, src);
     }
   }
 }
@@ -788,24 +784,33 @@
 
 void MacroAssembler::SmiSubConstant(Register dst,
                                     Register src,
-                                    int32_t constant,
+                                    Smi* constant,
                                     Label* on_not_smi_result) {
-  ASSERT(Smi::IsValid(constant));
-  Smi* smi_value = Smi::FromInt(constant);
-  if (dst.is(src)) {
-    // Optimistic subtract - may change value of dst register,
-    // if it has garbage bits in the higher half, but will not change
-    // the value as a tagged smi.
-    subl(dst, Immediate(smi_value));
-    if (on_not_smi_result != NULL) {
-      Label add_success;
-      j(no_overflow, &add_success);
-      addl(dst, Immediate(smi_value));
-      jmp(on_not_smi_result);
-      bind(&add_success);
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
     }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    Move(kScratchRegister, constant);
+    subq(dst, kScratchRegister);
+    Label sub_success;
+    j(no_overflow, &sub_success);
+    addq(src, kScratchRegister);
+    jmp(on_not_smi_result);
+    bind(&sub_success);
   } else {
-    UNIMPLEMENTED();  // Not used yet.
+    if (constant->value() == Smi::kMinValue) {
+      Move(kScratchRegister, constant);
+      movq(dst, src);
+      subq(dst, kScratchRegister);
+      j(overflow, on_not_smi_result);
+    } else {
+      Move(dst, Smi::FromInt(-(constant->value())));
+      addq(dst, src);
+      j(overflow, on_not_smi_result);
+    }
   }
 }
 
@@ -814,38 +819,61 @@
                             Register src1,
                             Register src2,
                             Label* on_not_smi_result) {
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(kScratchRegister));
   ASSERT(!src2.is(rax));
   ASSERT(!src2.is(rdx));
   ASSERT(!src1.is(rdx));
 
   // Check for 0 divisor (result is +/-Infinity).
   Label positive_divisor;
-  testl(src2, src2);
+  testq(src2, src2);
   j(zero, on_not_smi_result);
-  j(positive, &positive_divisor);
-  // Check for negative zero result.  If the dividend is zero, and the
-  // divisor is negative, return a floating point negative zero.
-  testl(src1, src1);
-  j(zero, on_not_smi_result);
-  bind(&positive_divisor);
 
-  // Sign extend src1 into edx:eax.
-  if (!src1.is(rax)) {
-    movl(rax, src1);
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
   }
-  cdq();
+  SmiToInteger32(rax, src1);
+  // We need to rule out dividing Smi::kMinValue by -1, since that would
+  // overflow in idiv and raise an exception.
+  // We combine this with negative zero test (negative zero only happens
+  // when dividing zero by a negative number).
 
+  // We overshoot a little and go to slow case if we divide min-value
+  // by any negative value, not just -1.
+  Label safe_div;
+  testl(rax, Immediate(0x7fffffff));
+  j(not_zero, &safe_div);
+  testq(src2, src2);
+  if (src1.is(rax)) {
+    j(positive, &safe_div);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+  } else {
+    j(negative, on_not_smi_result);
+  }
+  bind(&safe_div);
+
+  SmiToInteger32(src2, src2);
+  // Sign extend src1 into edx:eax.
+  cdq();
   idivl(src2);
-  // Check for the corner case of dividing the most negative smi by
-  // -1. We cannot use the overflow flag, since it is not set by
-  // idiv instruction.
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
-  cmpl(rax, Immediate(0x40000000));
-  j(equal, on_not_smi_result);
+  Integer32ToSmi(src2, src2);
   // Check that the remainder is zero.
   testl(rdx, rdx);
-  j(not_zero, on_not_smi_result);
-  // Tag the result and store it in the destination register.
+  if (src1.is(rax)) {
+    Label smi_result;
+    j(zero, &smi_result);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result);
+    bind(&smi_result);
+  } else {
+    j(not_zero, on_not_smi_result);
+  }
+  if (!dst.is(src1) && src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
   Integer32ToSmi(dst, rax);
 }
 
@@ -860,109 +888,136 @@
   ASSERT(!src2.is(rax));
   ASSERT(!src2.is(rdx));
   ASSERT(!src1.is(rdx));
+  ASSERT(!src1.is(src2));
 
-  testl(src2, src2);
+  testq(src2, src2);
   j(zero, on_not_smi_result);
 
   if (src1.is(rax)) {
-    // Mist remember the value to see if a zero result should
-    // be a negative zero.
-    movl(kScratchRegister, rax);
-  } else {
-    movl(rax, src1);
+    movq(kScratchRegister, src1);
   }
+  SmiToInteger32(rax, src1);
+  SmiToInteger32(src2, src2);
+
+  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+  Label safe_div;
+  cmpl(rax, Immediate(Smi::kMinValue));
+  j(not_equal, &safe_div);
+  cmpl(src2, Immediate(-1));
+  j(not_equal, &safe_div);
+  // Retag inputs and go slow case.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  jmp(on_not_smi_result);
+  bind(&safe_div);
+
   // Sign extend eax into edx:eax.
   cdq();
   idivl(src2);
-  // Check for a negative zero result.  If the result is zero, and the
-  // dividend is negative, return a floating point negative zero.
-  Label non_zero_result;
-  testl(rdx, rdx);
-  j(not_zero, &non_zero_result);
+  // Restore smi tags on inputs.
+  Integer32ToSmi(src2, src2);
   if (src1.is(rax)) {
-    testl(kScratchRegister, kScratchRegister);
-  } else {
-    testl(src1, src1);
+    movq(src1, kScratchRegister);
   }
+  // Check for a negative zero result.  If the result is zero, and the
+  // dividend is negative, go slow to return a floating point negative zero.
+  Label smi_result;
+  testl(rdx, rdx);
+  j(not_zero, &smi_result);
+  testq(src1, src1);
   j(negative, on_not_smi_result);
-  bind(&non_zero_result);
-  if (!dst.is(rdx)) {
-    movl(dst, rdx);
-  }
+  bind(&smi_result);
+  Integer32ToSmi(dst, rdx);
 }
 
 
 void MacroAssembler::SmiNot(Register dst, Register src) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src.is(kScratchRegister));
+  // Set tag and padding bits before negating, so that they are zero afterwards.
+  movl(kScratchRegister, Immediate(~0));
   if (dst.is(src)) {
-    not_(dst);
-    // Remove inverted smi-tag.  The mask is sign-extended to 64 bits.
-    xor_(src, Immediate(kSmiTagMask));
+    xor_(dst, kScratchRegister);
   } else {
-    ASSERT_EQ(0, kSmiTag);
-    lea(dst, Operand(src, kSmiTagMask));
-    not_(dst);
+    lea(dst, Operand(src, kScratchRegister, times_1, 0));
   }
+  not_(dst);
 }
 
 
 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
+  ASSERT(!dst.is(src2));
   if (!dst.is(src1)) {
-    movl(dst, src1);
+    movq(dst, src1);
   }
   and_(dst, src2);
 }
 
 
-void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
-  ASSERT(Smi::IsValid(constant));
-  if (!dst.is(src)) {
-    movl(dst, src);
+void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
+  if (constant->value() == 0) {
+    xor_(dst, dst);
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    Move(kScratchRegister, constant);
+    and_(dst, kScratchRegister);
+  } else {
+    Move(dst, constant);
+    and_(dst, src);
   }
-  and_(dst, Immediate(Smi::FromInt(constant)));
 }
 
 
 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
-    movl(dst, src1);
+    movq(dst, src1);
   }
   or_(dst, src2);
 }
 
 
-void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
-  ASSERT(Smi::IsValid(constant));
-  if (!dst.is(src)) {
-    movl(dst, src);
+void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    Move(kScratchRegister, constant);
+    or_(dst, kScratchRegister);
+  } else {
+    Move(dst, constant);
+    or_(dst, src);
   }
-  or_(dst, Immediate(Smi::FromInt(constant)));
 }
 
+
 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
-    movl(dst, src1);
+    movq(dst, src1);
   }
   xor_(dst, src2);
 }
 
 
-void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
-  ASSERT(Smi::IsValid(constant));
-  if (!dst.is(src)) {
-    movl(dst, src);
+void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    Move(kScratchRegister, constant);
+    xor_(dst, kScratchRegister);
+  } else {
+    Move(dst, constant);
+    xor_(dst, src);
   }
-  xor_(dst, Immediate(Smi::FromInt(constant)));
 }
 
 
-
 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
                                                      Register src,
                                                      int shift_value) {
+  ASSERT(is_uint5(shift_value));
   if (shift_value > 0) {
     if (dst.is(src)) {
-      sarl(dst, Immediate(shift_value));
-      and_(dst, Immediate(~kSmiTagMask));
+      sar(dst, Immediate(shift_value + kSmiShift));
+      shl(dst, Immediate(kSmiShift));
     } else {
       UNIMPLEMENTED();  // Not used.
     }
@@ -978,20 +1033,13 @@
   if (dst.is(src)) {
     UNIMPLEMENTED();  // Not used.
   } else {
-    movl(dst, src);
-    // Untag the smi.
-    sarl(dst, Immediate(kSmiTagSize));
-    if (shift_value < 2) {
-      // A negative Smi shifted right two is in the positive Smi range,
-      // but if shifted only by zero or one, it never is.
+    movq(dst, src);
+    if (shift_value == 0) {
+      testq(dst, dst);
       j(negative, on_not_smi_result);
     }
-    if (shift_value > 0) {
-      // Do the right shift on the integer value.
-      shrl(dst, Immediate(shift_value));
-    }
-    // Re-tag the result.
-    addl(dst, dst);
+    shr(dst, Immediate(shift_value + kSmiShift));
+    shl(dst, Immediate(kSmiShift));
   }
 }
 
@@ -1000,20 +1048,11 @@
                                           Register src,
                                           int shift_value,
                                           Label* on_not_smi_result) {
-  if (dst.is(src)) {
-    UNIMPLEMENTED();  // Not used.
-  } else {
-    movl(dst, src);
-    if (shift_value > 0) {
-      // Treat dst as an untagged integer value equal to two times the
-      // smi value of src, i.e., already shifted left by one.
-      if (shift_value > 1) {
-        shll(dst, Immediate(shift_value - 1));
-      }
-      // Convert int result to Smi, checking that it is in smi range.
-      ASSERT(kSmiTagSize == 1);  // adjust code if not the case
-      Integer32ToSmi(dst, dst, on_not_smi_result);
-    }
+  if (!dst.is(src)) {
+    movq(dst, src);
+  }
+  if (shift_value > 0) {
+    shl(dst, Immediate(shift_value));
   }
 }
 
@@ -1024,23 +1063,14 @@
                                   Label* on_not_smi_result) {
   ASSERT(!dst.is(rcx));
   Label result_ok;
-  // Untag both operands.
-  SmiToInteger32(dst, src1);
-  SmiToInteger32(rcx, src2);
-  shll(dst);
-  // Check that the *signed* result fits in a smi.
-  Condition is_valid = CheckInteger32ValidSmiValue(dst);
-  j(is_valid, &result_ok);
-  // Restore the relevant bits of the source registers
-  // and call the slow version.
-  if (dst.is(src1)) {
-    shrl(dst);
-    Integer32ToSmi(dst, dst);
+  // Untag shift amount.
+  if (!dst.is(src1)) {
+    movq(dst, src1);
   }
-  Integer32ToSmi(rcx, rcx);
-  jmp(on_not_smi_result);
-  bind(&result_ok);
-  Integer32ToSmi(dst, dst);
+  SmiToInteger32(rcx, src2);
+  // Shift amount specified by lower 5 bits, not six as the shl opcode.
+  and_(rcx, Immediate(0x1f));
+  shl(dst);
 }
 
 
@@ -1048,48 +1078,62 @@
                                           Register src1,
                                           Register src2,
                                           Label* on_not_smi_result) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
   ASSERT(!dst.is(rcx));
   Label result_ok;
-  // Untag both operands.
-  SmiToInteger32(dst, src1);
-  SmiToInteger32(rcx, src2);
-
-  shrl(dst);
-  // Check that the *unsigned* result fits in a smi.
-  // I.e., that it is a valid positive smi value. The positive smi
-  // values are  0..0x3fffffff, i.e., neither of the top-most two
-  // bits can be set.
-  //
-  // These two cases can only happen with shifts by 0 or 1 when
-  // handed a valid smi.  If the answer cannot be represented by a
-  // smi, restore the left and right arguments, and jump to slow
-  // case.  The low bit of the left argument may be lost, but only
-  // in a case where it is dropped anyway.
-  testl(dst, Immediate(0xc0000000));
-  j(zero, &result_ok);
-  if (dst.is(src1)) {
-    shll(dst);
-    Integer32ToSmi(dst, dst);
+  if (src1.is(rcx) || src2.is(rcx)) {
+    movq(kScratchRegister, rcx);
   }
-  Integer32ToSmi(rcx, rcx);
-  jmp(on_not_smi_result);
-  bind(&result_ok);
-  // Smi-tag the result in answer.
-  Integer32ToSmi(dst, dst);
+  if (!dst.is(src1)) {
+    movq(dst, src1);
+  }
+  SmiToInteger32(rcx, src2);
+  orl(rcx, Immediate(kSmiShift));
+  shr(dst);  // Shift is rcx modulo 0x1f + 32.
+  shl(dst, Immediate(kSmiShift));
+  testq(dst, dst);
+  if (src1.is(rcx) || src2.is(rcx)) {
+    Label positive_result;
+    j(positive, &positive_result);
+    if (src1.is(rcx)) {
+      movq(src1, kScratchRegister);
+    } else {
+      movq(src2, kScratchRegister);
+    }
+    jmp(on_not_smi_result);
+    bind(&positive_result);
+  } else {
+    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
+  }
 }
 
 
 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
                                              Register src1,
                                              Register src2) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
   ASSERT(!dst.is(rcx));
-  // Untag both operands.
-  SmiToInteger32(dst, src1);
+  if (src1.is(rcx)) {
+    movq(kScratchRegister, src1);
+  } else if (src2.is(rcx)) {
+    movq(kScratchRegister, src2);
+  }
+  if (!dst.is(src1)) {
+    movq(dst, src1);
+  }
   SmiToInteger32(rcx, src2);
-  // Shift as integer.
-  sarl(dst);
-  // Retag result.
-  Integer32ToSmi(dst, dst);
+  orl(rcx, Immediate(kSmiShift));
+  sar(dst);  // Shift 32 + original rcx & 0x1f.
+  shl(dst, Immediate(kSmiShift));
+  if (src1.is(rcx)) {
+    movq(src1, kScratchRegister);
+  } else if (src2.is(rcx)) {
+    movq(src2, kScratchRegister);
+  }
 }
 
 
@@ -1097,21 +1141,27 @@
                                   Register src1,
                                   Register src2,
                                   Label* on_not_smis) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
   ASSERT(!dst.is(src1));
   ASSERT(!dst.is(src2));
   // Both operands must not be smis.
 #ifdef DEBUG
-  Condition not_both_smis = CheckNotBothSmi(src1, src2);
-  Check(not_both_smis, "Both registers were smis.");
+  if (allow_stub_calls()) {  // Check contains a stub call.
+    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+  }
 #endif
   ASSERT_EQ(0, kSmiTag);
   ASSERT_EQ(0, Smi::FromInt(0));
-  movq(kScratchRegister, Immediate(kSmiTagMask));
+  movl(kScratchRegister, Immediate(kSmiTagMask));
   and_(kScratchRegister, src1);
   testl(kScratchRegister, src2);
+  // If non-zero then both are smis.
   j(not_zero, on_not_smis);
-  // One operand is a smi.
 
+  // Exactly one operand is a smi.
   ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
   // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
   subq(kScratchRegister, Immediate(1));
@@ -1121,71 +1171,117 @@
   and_(dst, kScratchRegister);
   // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
   xor_(dst, src1);
-  // If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
+  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
 }
 
-
-SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
+SmiIndex MacroAssembler::SmiToIndex(Register dst,
+                                    Register src,
+                                    int shift) {
   ASSERT(is_uint6(shift));
-  if (shift == 0) {  // times_1.
-    SmiToInteger32(dst, src);
-    return SmiIndex(dst, times_1);
+  // There is a possible optimization if shift is in the range 60-63, but that
+  // will (and must) never happen.
+  if (!dst.is(src)) {
+    movq(dst, src);
   }
-  if (shift <= 4) {  // 2 - 16 times multiplier is handled using ScaleFactor.
-    // We expect that all smis are actually zero-padded. If this holds after
-    // checking, this line can be omitted.
-    movl(dst, src);  // Ensure that the smi is zero-padded.
-    return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
+  if (shift < kSmiShift) {
+    sar(dst, Immediate(kSmiShift - shift));
+  } else {
+    shl(dst, Immediate(shift - kSmiShift));
   }
-  // Shift by shift-kSmiTagSize.
-  movl(dst, src);  // Ensure that the smi is zero-padded.
-  shl(dst, Immediate(shift - kSmiTagSize));
   return SmiIndex(dst, times_1);
 }
 
-
 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
                                             Register src,
                                             int shift) {
   // Register src holds a positive smi.
   ASSERT(is_uint6(shift));
-  if (shift == 0) {  // times_1.
-    SmiToInteger32(dst, src);
-    neg(dst);
-    return SmiIndex(dst, times_1);
+  if (!dst.is(src)) {
+    movq(dst, src);
   }
-  if (shift <= 4) {  // 2 - 16 times multiplier is handled using ScaleFactor.
-    movl(dst, src);
-    neg(dst);
-    return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
-  }
-  // Shift by shift-kSmiTagSize.
-  movl(dst, src);
   neg(dst);
-  shl(dst, Immediate(shift - kSmiTagSize));
+  if (shift < kSmiShift) {
+    sar(dst, Immediate(kSmiShift - shift));
+  } else {
+    shl(dst, Immediate(shift - kSmiShift));
+  }
   return SmiIndex(dst, times_1);
 }
 
 
+void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
+  ASSERT_EQ(0, kSmiTag);
+  Condition smi = CheckSmi(src);
+  j(smi, on_smi);
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
+  Condition smi = CheckSmi(src);
+  j(NegateCondition(smi), on_not_smi);
+}
+
+
+void MacroAssembler::JumpIfNotPositiveSmi(Register src,
+                                          Label* on_not_positive_smi) {
+  Condition positive_smi = CheckPositiveSmi(src);
+  j(NegateCondition(positive_smi), on_not_positive_smi);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+                                             Smi* constant,
+                                             Label* on_equals) {
+  SmiCompare(src, constant);
+  j(equal, on_equals);
+}
+
+
+void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
+  Condition is_valid = CheckInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid);
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1, Register src2,
+                                      Label* on_not_both_smi) {
+  Condition both_smi = CheckBothSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi);
+}
 
 bool MacroAssembler::IsUnsafeSmi(Smi* value) {
   return false;
 }
 
+
 void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
   UNIMPLEMENTED();
 }
 
 
+void MacroAssembler::Move(Register dst, Smi* source) {
+  if (IsUnsafeSmi(source)) {
+    LoadUnsafeSmi(dst, source);
+  } else {
+    Set(dst, reinterpret_cast<int64_t>(source));
+  }
+}
+
+
+void MacroAssembler::Move(const Operand& dst, Smi* source) {
+  if (IsUnsafeSmi(source)) {
+    LoadUnsafeSmi(kScratchRegister, source);
+    movq(dst, kScratchRegister);
+  } else {
+    Set(dst, reinterpret_cast<int64_t>(source));
+  }
+}
+
+
 void MacroAssembler::Move(Register dst, Handle<Object> source) {
   ASSERT(!source->IsFailure());
   if (source->IsSmi()) {
-    if (IsUnsafeSmi(source)) {
-      LoadUnsafeSmi(dst, source);
-    } else {
-      int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
-      movq(dst, Immediate(smi));
-    }
+    Move(dst, Smi::cast(*source));
   } else {
     movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
   }
@@ -1193,9 +1289,9 @@
 
 
 void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
+  ASSERT(!source->IsFailure());
   if (source->IsSmi()) {
-    int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
-    movq(dst, Immediate(smi));
+    Move(dst, Smi::cast(*source));
   } else {
     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
     movq(dst, kScratchRegister);
@@ -1204,21 +1300,18 @@
 
 
 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
-  Move(kScratchRegister, source);
-  cmpq(dst, kScratchRegister);
+  if (source->IsSmi()) {
+    SmiCompare(dst, Smi::cast(*source));
+  } else {
+    Move(kScratchRegister, source);
+    cmpq(dst, kScratchRegister);
+  }
 }
 
 
 void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
   if (source->IsSmi()) {
-    if (IsUnsafeSmi(source)) {
-      LoadUnsafeSmi(kScratchRegister, source);
-      cmpl(dst, kScratchRegister);
-    } else {
-      // For smi-comparison, it suffices to compare the low 32 bits.
-      int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
-      cmpl(dst, Immediate(smi));
-    }
+    SmiCompare(dst, Smi::cast(*source));
   } else {
     ASSERT(source->IsHeapObject());
     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
@@ -1229,13 +1322,7 @@
 
 void MacroAssembler::Push(Handle<Object> source) {
   if (source->IsSmi()) {
-    if (IsUnsafeSmi(source)) {
-      LoadUnsafeSmi(kScratchRegister, source);
-      push(kScratchRegister);
-    } else {
-      int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
-      push(Immediate(smi));
-    }
+    Push(Smi::cast(*source));
   } else {
     ASSERT(source->IsHeapObject());
     movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
@@ -1249,8 +1336,29 @@
     LoadUnsafeSmi(kScratchRegister, source);
     push(kScratchRegister);
   } else {
-    int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
-    push(Immediate(smi));
+    intptr_t smi = reinterpret_cast<intptr_t>(source);
+    if (is_int32(smi)) {
+      push(Immediate(static_cast<int32_t>(smi)));
+    } else {
+      Set(kScratchRegister, smi);
+      push(kScratchRegister);
+    }
+  }
+}
+
+
+void MacroAssembler::Test(const Operand& src, Smi* source) {
+  if (IsUnsafeSmi(source)) {
+    LoadUnsafeSmi(kScratchRegister, source);
+    testq(src, kScratchRegister);
+  } else {
+    intptr_t smi = reinterpret_cast<intptr_t>(source);
+    if (is_int32(smi)) {
+      testl(src, Immediate(static_cast<int32_t>(smi)));
+    } else {
+      Move(kScratchRegister, source);
+      testq(src, kScratchRegister);
+    }
   }
 }
 
@@ -1446,7 +1554,6 @@
   }
 }
 
-
 #ifdef ENABLE_DEBUGGER_SUPPORT
 
 void MacroAssembler::PushRegistersFromMemory(RegList regs) {
@@ -1463,6 +1570,7 @@
   }
 }
 
+
 void MacroAssembler::SaveRegistersToMemory(RegList regs) {
   ASSERT((regs & ~kJSCallerSaved) == 0);
   // Copy the content of registers to memory location.
@@ -1545,8 +1653,11 @@
   // arguments match the expected number of arguments. Fake a
   // parameter count to avoid emitting code to do the check.
   ParameterCount expected(0);
-  InvokeCode(Handle<Code>(code), expected, expected,
-             RelocInfo::CODE_TARGET, flag);
+  InvokeCode(Handle<Code>(code),
+             expected,
+             expected,
+             RelocInfo::CODE_TARGET,
+             flag);
 
   const char* name = Builtins::GetName(id);
   int argc = Builtins::GetArgumentsCount(id);
@@ -1578,7 +1689,7 @@
     } else {
       movq(rax, Immediate(actual.immediate()));
       if (expected.immediate() ==
-          SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
         // Don't worry about adapting arguments for built-ins that
         // don't want that done. Skip adaption code by making it look
         // like we have a match between expected and actual number of
@@ -1684,7 +1795,7 @@
   push(rbp);
   movq(rbp, rsp);
   push(rsi);  // Context.
-  push(Immediate(Smi::FromInt(type)));
+  Push(Smi::FromInt(type));
   movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   push(kScratchRegister);
   if (FLAG_debug_code) {
@@ -1699,7 +1810,7 @@
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   if (FLAG_debug_code) {
-    movq(kScratchRegister, Immediate(Smi::FromInt(type)));
+    Move(kScratchRegister, Smi::FromInt(type));
     cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
     Check(equal, "stack frame types must match");
   }
@@ -1708,7 +1819,6 @@
 }
 
 
-
 void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
   ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
 
@@ -1721,7 +1831,7 @@
   movq(rbp, rsp);
 
   // Reserve room for entry stack pointer and push the debug marker.
-  ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
+  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
   push(Immediate(0));  // saved entry sp, patched before call
   push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
 
@@ -1834,8 +1944,10 @@
 }
 
 
-Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
-                                   JSObject* holder, Register holder_reg,
+Register MacroAssembler::CheckMaps(JSObject* object,
+                                   Register object_reg,
+                                   JSObject* holder,
+                                   Register holder_reg,
                                    Register scratch,
                                    Label* miss) {
   // Make sure there's no overlap between scratch and the other
@@ -1901,8 +2013,7 @@
   }
 
   // Check the holder map.
-  Cmp(FieldOperand(reg, HeapObject::kMapOffset),
-      Handle<Map>(holder->map()));
+  Cmp(FieldOperand(reg, HeapObject::kMapOffset), Handle<Map>(holder->map()));
   j(not_equal, miss);
 
   // Log the check depth.
@@ -1919,8 +2030,6 @@
 }
 
 
-
-
 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
                                             Register scratch,
                                             Label* miss) {
@@ -1974,8 +2083,8 @@
 
   movq(kScratchRegister,
        FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
-  int token_offset = Context::kHeaderSize +
-                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
+  int token_offset =
+      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
   movq(scratch, FieldOperand(scratch, token_offset));
   cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
   j(not_equal, miss);
@@ -2160,5 +2269,4 @@
   ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
-
 } }  // namespace v8::internal
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index adc136a..2aa4ce0 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -72,6 +72,18 @@
                    Register value,
                    Register scratch);
 
+  // Set the remembered set bit for [object+offset].
+  // The value is known to not be a smi.
+  // object is the object being stored into, value is the object being stored.
+  // If offset is zero, then the scratch register contains the array index into
+  // the elements array represented as a Smi.
+  // All registers are clobbered by the operation.
+  void RecordWriteNonSmi(Register object,
+                         int offset,
+                         Register value,
+                         Register scratch);
+
+
 #ifdef ENABLE_DEBUGGER_SUPPORT
   // ---------------------------------------------------------------------------
   // Debugger Support
@@ -146,11 +158,12 @@
 
   // Tag an integer value if possible, or jump the integer value cannot be
   // represented as a smi. Only uses the low 32 bit of the src registers.
+  // NOTICE: Destroys the dst register even if unsuccessful!
   void Integer32ToSmi(Register dst, Register src, Label* on_overflow);
 
   // Adds constant to src and tags the result as a smi.
   // Result must be a valid smi.
-  void Integer64AddToSmi(Register dst, Register src, int constant);
+  void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
 
   // Convert smi to 32-bit integer. I.e., not sign extended into
   // high 32 bits of destination.
@@ -165,38 +178,31 @@
                                              Register src,
                                              int power);
 
+  // Simple comparison of smis.
+  void SmiCompare(Register dst, Register src);
+  void SmiCompare(Register dst, Smi* src);
+  void SmiCompare(const Operand& dst, Register src);
+  void SmiCompare(const Operand& dst, Smi* src);
+  // Sets sign and zero flags depending on value of smi in register.
+  void SmiTest(Register src);
+
   // Functions performing a check on a known or potential smi. Returns
   // a condition that is satisfied if the check is successful.
 
   // Is the value a tagged smi.
   Condition CheckSmi(Register src);
 
-  // Is the value not a tagged smi.
-  Condition CheckNotSmi(Register src);
-
   // Is the value a positive tagged smi.
   Condition CheckPositiveSmi(Register src);
 
-  // Is the value not a positive tagged smi.
-  Condition CheckNotPositiveSmi(Register src);
-
   // Are both values are tagged smis.
   Condition CheckBothSmi(Register first, Register second);
 
-  // Is one of the values not a tagged smi.
-  Condition CheckNotBothSmi(Register first, Register second);
-
   // Is the value the minimum smi value (since we are using
   // two's complement numbers, negating the value is known to yield
   // a non-smi value).
   Condition CheckIsMinSmi(Register src);
 
-  // Check whether a tagged smi is equal to a constant.
-  Condition CheckSmiEqualsConstant(Register src, int constant);
-
-  // Check whether a tagged smi is greater than or equal to a constant.
-  Condition CheckSmiGreaterEqualsConstant(Register src, int constant);
-
   // Checks whether an 32-bit integer value is a valid for conversion
   // to a smi.
   Condition CheckInteger32ValidSmiValue(Register src);
@@ -216,15 +222,9 @@
   // Jump to label if the value is not a positive tagged smi.
   void JumpIfNotPositiveSmi(Register src, Label* on_not_smi);
 
-  // Jump to label if the value is a tagged smi with value equal
+  // Jump to label if the value, which must be a tagged smi, has value equal
   // to the constant.
-  void JumpIfSmiEqualsConstant(Register src, int constant, Label* on_equals);
-
-  // Jump to label if the value is a tagged smi with value greater than or equal
-  // to the constant.
-  void JumpIfSmiGreaterEqualsConstant(Register src,
-                                      int constant,
-                                      Label* on_equals);
+  void JumpIfSmiEqualsConstant(Register src,  Smi* constant, Label* on_equals);
 
   // Jump if either or both register are not smi values.
   void JumpIfNotBothSmi(Register src1, Register src2, Label* on_not_both_smi);
@@ -239,29 +239,36 @@
   // the label.
   void SmiTryAddConstant(Register dst,
                          Register src,
-                         int32_t constant,
+                         Smi* constant,
                          Label* on_not_smi_result);
 
+  // Add an integer constant to a tagged smi, giving a tagged smi as result.
+  // No overflow testing on the result is done.
+  void SmiAddConstant(Register dst, Register src, Smi* constant);
+
   // Add an integer constant to a tagged smi, giving a tagged smi as result,
   // or jumping to a label if the result cannot be represented by a smi.
-  // If the label is NULL, no testing on the result is done.
   void SmiAddConstant(Register dst,
                       Register src,
-                      int32_t constant,
+                      Smi* constant,
                       Label* on_not_smi_result);
 
   // Subtract an integer constant from a tagged smi, giving a tagged smi as
+  // result. No testing on the result is done.
+  void SmiSubConstant(Register dst, Register src, Smi* constant);
+
+  // Subtract an integer constant from a tagged smi, giving a tagged smi as
   // result, or jumping to a label if the result cannot be represented by a smi.
-  // If the label is NULL, no testing on the result is done.
   void SmiSubConstant(Register dst,
                       Register src,
-                      int32_t constant,
+                      Smi* constant,
                       Label* on_not_smi_result);
 
   // Negating a smi can give a negative zero or too large positive value.
+  // NOTICE: This operation jumps on success, not failure!
   void SmiNeg(Register dst,
               Register src,
-              Label* on_not_smi_result);
+              Label* on_smi_result);
 
   // Adds smi values and return the result as a smi.
   // If dst is src1, then src1 will be destroyed, even if
@@ -307,9 +314,9 @@
   void SmiAnd(Register dst, Register src1, Register src2);
   void SmiOr(Register dst, Register src1, Register src2);
   void SmiXor(Register dst, Register src1, Register src2);
-  void SmiAndConstant(Register dst, Register src1, int constant);
-  void SmiOrConstant(Register dst, Register src1, int constant);
-  void SmiXorConstant(Register dst, Register src1, int constant);
+  void SmiAndConstant(Register dst, Register src1, Smi* constant);
+  void SmiOrConstant(Register dst, Register src1, Smi* constant);
+  void SmiXorConstant(Register dst, Register src1, Smi* constant);
 
   void SmiShiftLeftConstant(Register dst,
                             Register src,
@@ -367,20 +374,27 @@
   // Converts a positive smi to a negative index.
   SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
 
+  bool IsUnsafeSmi(Smi* value);
+  void LoadUnsafeSmi(Register dst, Smi* source);
+
+  // Basic Smi operations.
+  void Move(Register dst, Smi* source);
+  void Move(const Operand& dst, Smi* source);
+  void Push(Smi* smi);
+  void Test(const Operand& dst, Smi* source);
+
   // ---------------------------------------------------------------------------
   // Macro instructions
 
-  // Expression support
+  // Load a register with a long value as efficiently as possible.
   void Set(Register dst, int64_t x);
   void Set(const Operand& dst, int64_t x);
 
   // Handle support
-  bool IsUnsafeSmi(Smi* value);
   bool IsUnsafeSmi(Handle<Object> value) {
     return IsUnsafeSmi(Smi::cast(*value));
   }
 
-  void LoadUnsafeSmi(Register dst, Smi* source);
   void LoadUnsafeSmi(Register dst, Handle<Object> source) {
     LoadUnsafeSmi(dst, Smi::cast(*source));
   }
@@ -390,7 +404,6 @@
   void Cmp(Register dst, Handle<Object> source);
   void Cmp(const Operand& dst, Handle<Object> source);
   void Push(Handle<Object> source);
-  void Push(Smi* smi);
 
   // Control Flow
   void Jump(Address destination, RelocInfo::Mode rmode);
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 0994230..58a3e0f 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -47,19 +47,24 @@
                        StubCache::Table table,
                        Register name,
                        Register offset) {
-  // The offset register must hold a *positive* smi.
+  ASSERT_EQ(8, kPointerSize);
+  ASSERT_EQ(16, sizeof(StubCache::Entry));
+  // The offset register holds the entry offset times four (due to masking
+  // and shifting optimizations).
   ExternalReference key_offset(SCTableReference::keyReference(table));
   Label miss;
 
   __ movq(kScratchRegister, key_offset);
-  SmiIndex index = masm->SmiToIndex(offset, offset, kPointerSizeLog2);
   // Check that the key in the entry matches the name.
-  __ cmpl(name, Operand(kScratchRegister, index.reg, index.scale, 0));
+  // Multiply entry offset by 16 to get the entry address. Since the
+  // offset register already holds the entry offset times four, multiply
+  // by a further four.
+  __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
   __ j(not_equal, &miss);
   // Get the code entry from the cache.
   // Use key_offset + kPointerSize, rather than loading value_offset.
   __ movq(kScratchRegister,
-          Operand(kScratchRegister, index.reg, index.scale, kPointerSize));
+          Operand(kScratchRegister, offset, times_4, kPointerSize));
   // Check that the flags match what we're looking for.
   __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
   __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index 655f4c6..781efd1 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -63,14 +63,16 @@
   Comment cmnt(masm(), "[ Enter JS frame");
 
 #ifdef DEBUG
-  // Verify that rdi contains a JS function.  The following code
-  // relies on rax being available for use.
-  Condition not_smi = masm()->CheckNotSmi(rdi);
-  __ Check(not_smi,
-           "VirtualFrame::Enter - rdi is not a function (smi check).");
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
-  __ Check(equal,
-           "VirtualFrame::Enter - rdi is not a function (map check).");
+  if (FLAG_debug_code) {
+    // Verify that rdi contains a JS function.  The following code
+    // relies on rax being available for use.
+    Condition not_smi = NegateCondition(masm()->CheckSmi(rdi));
+    __ Check(not_smi,
+             "VirtualFrame::Enter - rdi is not a function (smi check).");
+    __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
+    __ Check(equal,
+             "VirtualFrame::Enter - rdi is not a function (map check).");
+  }
 #endif
 
   EmitPush(rbp);
@@ -197,6 +199,14 @@
 }
 
 
+void VirtualFrame::EmitPush(Smi* smi_value) {
+  ASSERT(stack_pointer_ == element_count() - 1);
+  elements_.Add(FrameElement::MemoryElement());
+  stack_pointer_++;
+  __ Push(smi_value);
+}
+
+
 void VirtualFrame::EmitPush(Handle<Object> value) {
   ASSERT(stack_pointer_ == element_count() - 1);
   elements_.Add(FrameElement::MemoryElement());
@@ -841,7 +851,7 @@
 
   switch (element.type()) {
     case FrameElement::INVALID:
-      __ push(Immediate(Smi::FromInt(0)));
+      __ Push(Smi::FromInt(0));
       break;
 
     case FrameElement::MEMORY:
@@ -883,15 +893,16 @@
   // on the stack.
   int start = Min(begin, stack_pointer_ + 1);
 
-  // If positive we have to adjust the stack pointer.
-  int delta = end - stack_pointer_;
-  if (delta > 0) {
-    stack_pointer_ = end;
-    __ subq(rsp, Immediate(delta * kPointerSize));
-  }
-
+  // Emit normal 'push' instructions for elements above stack pointer
+  // and use mov instructions if we are below stack pointer.
   for (int i = start; i <= end; i++) {
-    if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
+    if (!elements_[i].is_synced()) {
+      if (i <= stack_pointer_) {
+        SyncElementBelowStackPointer(i);
+      } else {
+        SyncElementByPushing(i);
+      }
+    }
   }
 }
 
@@ -1004,7 +1015,7 @@
   function.ToRegister(rdi);
 
   // Constructors are called with the number of arguments in register
-  // eax for now. Another option would be to have separate construct
+  // rax for now. Another option would be to have separate construct
   // call trampolines per different arguments counts encountered.
   Result num_args = cgen()->allocator()->Allocate(rax);
   ASSERT(num_args.is_valid());
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 006148d..e492305 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -377,6 +377,7 @@
   void EmitPush(const Operand& operand);
   void EmitPush(Heap::RootListIndex index);
   void EmitPush(Immediate immediate);
+  void EmitPush(Smi* value);
   // Uses kScratchRegister, emits appropriate relocation info.
   void EmitPush(Handle<Object> value);
 
diff --git a/test/cctest/SConscript b/test/cctest/SConscript
index 9103403..f041041 100644
--- a/test/cctest/SConscript
+++ b/test/cctest/SConscript
@@ -67,7 +67,9 @@
     'test-disasm-ia32.cc',
     'test-log-stack-tracer.cc'
   ],
-  'arch:x64': ['test-assembler-x64.cc', 'test-log-stack-tracer.cc'],
+  'arch:x64': ['test-assembler-x64.cc',
+               'test-macro-assembler-x64.cc',
+               'test-log-stack-tracer.cc'],
   'os:linux':  ['test-platform-linux.cc'],
   'os:macos':  ['test-platform-macos.cc'],
   'os:nullos': ['test-platform-nullos.cc'],
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 2282c2d..c63ba31 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <stdlib.h>
-
 #include "v8.h"
 
 #include "api.h"
@@ -574,6 +572,44 @@
 }
 
 
+THREADED_TEST(StringConcat) {
+  {
+    v8::HandleScope scope;
+    LocalContext env;
+    const char* one_byte_string_1 = "function a_times_t";
+    const char* two_byte_string_1 = "wo_plus_b(a, b) {return ";
+    const char* one_byte_extern_1 = "a * 2 + b;} a_times_two_plus_b(4, 8) + ";
+    const char* two_byte_extern_1 = "a_times_two_plus_b(4, 8) + ";
+    const char* one_byte_string_2 = "a_times_two_plus_b(4, 8) + ";
+    const char* two_byte_string_2 = "a_times_two_plus_b(4, 8) + ";
+    const char* two_byte_extern_2 = "a_times_two_plus_b(1, 2);";
+    Local<String> left = v8_str(one_byte_string_1);
+    Local<String> right = String::New(AsciiToTwoByteString(two_byte_string_1));
+    Local<String> source = String::Concat(left, right);
+    right = String::NewExternal(
+        new TestAsciiResource(i::StrDup(one_byte_extern_1)));
+    source = String::Concat(source, right);
+    right = String::NewExternal(
+        new TestResource(AsciiToTwoByteString(two_byte_extern_1)));
+    source = String::Concat(source, right);
+    right = v8_str(one_byte_string_2);
+    source = String::Concat(source, right);
+    right = String::New(AsciiToTwoByteString(two_byte_string_2));
+    source = String::Concat(source, right);
+    right = String::NewExternal(
+        new TestResource(AsciiToTwoByteString(two_byte_extern_2)));
+    source = String::Concat(source, right);
+    Local<Script> script = Script::Compile(source);
+    Local<Value> value = script->Run();
+    CHECK(value->IsNumber());
+    CHECK_EQ(68, value->Int32Value());
+  }
+  v8::internal::CompilationCache::Clear();
+  i::Heap::CollectAllGarbage(false);
+  i::Heap::CollectAllGarbage(false);
+}
+
+
 THREADED_TEST(GlobalProperties) {
   v8::HandleScope scope;
   LocalContext env;
@@ -702,6 +738,88 @@
 }
 
 
+THREADED_TEST(TinyInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  int32_t value = 239;
+  Local<v8::Integer> value_obj = v8::Integer::New(value);
+  CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+}
+
+
+THREADED_TEST(BigSmiInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  int32_t value = i::Smi::kMaxValue;
+  // We cannot add one to a Smi::kMaxValue without wrapping.
+  if (i::kSmiValueSize < 32) {
+    CHECK(i::Smi::IsValid(value));
+    CHECK(!i::Smi::IsValid(value + 1));
+    Local<v8::Integer> value_obj = v8::Integer::New(value);
+    CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+  }
+}
+
+
+THREADED_TEST(BigInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  // We cannot add one to a Smi::kMaxValue without wrapping.
+  if (i::kSmiValueSize < 32) {
+    // The casts allow this to compile, even if Smi::kMaxValue is 2^31-1.
+    // The code will not be run in that case, due to the "if" guard.
+    int32_t value =
+        static_cast<int32_t>(static_cast<uint32_t>(i::Smi::kMaxValue) + 1);
+    CHECK(value > i::Smi::kMaxValue);
+    CHECK(!i::Smi::IsValid(value));
+    Local<v8::Integer> value_obj = v8::Integer::New(value);
+    CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+  }
+}
+
+
+THREADED_TEST(TinyUnsignedInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  uint32_t value = 239;
+  Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+  CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+}
+
+
+THREADED_TEST(BigUnsignedSmiInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  uint32_t value = static_cast<uint32_t>(i::Smi::kMaxValue);
+  CHECK(i::Smi::IsValid(value));
+  CHECK(!i::Smi::IsValid(value + 1));
+  Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+  CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+}
+
+
+THREADED_TEST(BigUnsignedInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  uint32_t value = static_cast<uint32_t>(i::Smi::kMaxValue) + 1;
+  CHECK(value > static_cast<uint32_t>(i::Smi::kMaxValue));
+  CHECK(!i::Smi::IsValid(value));
+  Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+  CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+}
+
+
+THREADED_TEST(OutOfSignedRangeUnsignedInteger) {
+  v8::HandleScope scope;
+  LocalContext env;
+  uint32_t INT32_MAX_AS_UINT = (1U << 31) - 1;
+  uint32_t value = INT32_MAX_AS_UINT + 1;
+  CHECK(value > INT32_MAX_AS_UINT);  // No overflow.
+  Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value);
+  CHECK_EQ(static_cast<int64_t>(value), value_obj->Value());
+}
+
+
 THREADED_TEST(Number) {
   v8::HandleScope scope;
   LocalContext env;
@@ -1346,6 +1464,44 @@
 }
 
 
+THREADED_TEST(InternalFieldsNativePointersAndExternal) {
+  v8::HandleScope scope;
+  LocalContext env;
+
+  Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
+  Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
+  instance_templ->SetInternalFieldCount(1);
+  Local<v8::Object> obj = templ->GetFunction()->NewInstance();
+  CHECK_EQ(1, obj->InternalFieldCount());
+  CHECK(obj->GetPointerFromInternalField(0) == NULL);
+
+  char* data = new char[100];
+
+  void* aligned = data;
+  CHECK_EQ(0, reinterpret_cast<uintptr_t>(aligned) & 0x1);
+  void* unaligned = data + 1;
+  CHECK_EQ(1, reinterpret_cast<uintptr_t>(unaligned) & 0x1);
+
+  obj->SetPointerInInternalField(0, aligned);
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(aligned, v8::External::Unwrap(obj->GetInternalField(0)));
+
+  obj->SetPointerInInternalField(0, unaligned);
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(unaligned, v8::External::Unwrap(obj->GetInternalField(0)));
+
+  obj->SetInternalField(0, v8::External::Wrap(aligned));
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
+
+  obj->SetInternalField(0, v8::External::Wrap(unaligned));
+  i::Heap::CollectAllGarbage(false);
+  CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
+
+  delete[] data;
+}
+
+
 THREADED_TEST(IdentityHash) {
   v8::HandleScope scope;
   LocalContext env;
@@ -1810,7 +1966,7 @@
   // Build huge string. This should fail with out of memory exception.
   Local<Value> result = CompileRun(
     "var str = Array.prototype.join.call({length: 513}, \"A\").toUpperCase();"
-    "for (var i = 0; i < 21; i++) { str = str + str; }");
+    "for (var i = 0; i < 22; i++) { str = str + str; }");
 
   // Check for out of memory state.
   CHECK(result.IsEmpty());
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index cd750c5..81aa973 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -44,6 +44,7 @@
 using v8::internal::rax;
 using v8::internal::rsi;
 using v8::internal::rdi;
+using v8::internal::rcx;
 using v8::internal::rdx;
 using v8::internal::rbp;
 using v8::internal::rsp;
@@ -53,20 +54,28 @@
 using v8::internal::not_equal;
 using v8::internal::greater;
 
-
 // Test the x64 assembler by compiling some simple functions into
 // a buffer and executing them.  These tests do not initialize the
 // V8 library, create a context, or use any V8 objects.
-// The AMD64 calling convention is used, with the first five arguments
-// in RSI, RDI, RDX, RCX, R8, and R9, and floating point arguments in
+// The AMD64 calling convention is used, with the first six arguments
+// in RDI, RSI, RDX, RCX, R8, and R9, and floating point arguments in
 // the XMM registers.  The return value is in RAX.
 // This calling convention is used on Linux, with GCC, and on Mac OS,
-// with GCC.  A different convention is used on 64-bit windows.
+// with GCC.  A different convention is used on 64-bit windows,
+// where the first four integer arguments are passed in RCX, RDX, R8 and R9.
 
 typedef int (*F0)();
 typedef int (*F1)(int64_t x);
 typedef int (*F2)(int64_t x, int64_t y);
 
+#ifdef _WIN64
+static const v8::internal::Register arg1 = rcx;
+static const v8::internal::Register arg2 = rdx;
+#else
+static const v8::internal::Register arg1 = rdi;
+static const v8::internal::Register arg2 = rsi;
+#endif
+
 #define __ assm.
 
 
@@ -80,7 +89,7 @@
   Assembler assm(buffer, actual_size);
 
   // Assemble a simple function that copies argument 2 and returns it.
-  __ movq(rax, rsi);
+  __ movq(rax, arg2);
   __ nop();
   __ ret(0);
 
@@ -105,9 +114,9 @@
   // incorrect stack frames when debugging this function (which has them).
   __ push(rbp);
   __ movq(rbp, rsp);
-  __ push(rsi);  // Value at (rbp - 8)
-  __ push(rsi);  // Value at (rbp - 16)
-  __ push(rdi);  // Value at (rbp - 24)
+  __ push(arg2);  // Value at (rbp - 8)
+  __ push(arg2);  // Value at (rbp - 16)
+  __ push(arg1);  // Value at (rbp - 24)
   __ pop(rax);
   __ pop(rax);
   __ pop(rax);
@@ -132,8 +141,8 @@
   Assembler assm(buffer, actual_size);
 
   // Assemble a simple function that adds arguments returning the sum.
-  __ movq(rax, rsi);
-  __ addq(rax, rdi);
+  __ movq(rax, arg2);
+  __ addq(rax, arg1);
   __ ret(0);
 
   CodeDesc desc;
@@ -154,8 +163,8 @@
 
   // Assemble a simple function that multiplies arguments returning the high
   // word.
-  __ movq(rax, rsi);
-  __ imul(rdi);
+  __ movq(rax, arg2);
+  __ imul(arg1);
   __ movq(rax, rdx);
   __ ret(0);
 
@@ -182,14 +191,16 @@
   // Assemble a simple function that copies argument 2 and returns it.
   __ push(rbp);
   __ movq(rbp, rsp);
-  __ push(rsi);  // Value at (rbp - 8)
-  __ push(rsi);  // Value at (rbp - 16)
-  __ push(rdi);  // Value at (rbp - 24)
+
+  __ push(arg2);  // Value at (rbp - 8)
+  __ push(arg2);  // Value at (rbp - 16)
+  __ push(arg1);  // Value at (rbp - 24)
+
   const int kStackElementSize = 8;
   __ movq(rax, Operand(rbp, -3 * kStackElementSize));
-  __ pop(rsi);
-  __ pop(rsi);
-  __ pop(rsi);
+  __ pop(arg2);
+  __ pop(arg2);
+  __ pop(arg2);
   __ pop(rbp);
   __ nop();
   __ ret(0);
@@ -210,13 +221,14 @@
   CHECK(buffer);
   Assembler assm(buffer, actual_size);
 
-  // Assemble a simple function that copies argument 2 and returns it.
+  // Assemble a simple function that copies argument 1 and returns it.
   __ push(rbp);
+
   __ movq(rbp, rsp);
-  __ movq(rax, rdi);
+  __ movq(rax, arg1);
   Label target;
   __ jmp(&target);
-  __ movq(rax, rsi);
+  __ movq(rax, arg2);
   __ bind(&target);
   __ pop(rbp);
   __ ret(0);
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 1da363c..4ffcee3 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -3539,6 +3539,52 @@
 }
 
 
+// We match parts of the message to decide if it is a exception message.
+bool IsExceptionEventMessage(char *message) {
+  const char* type_event = "\"type\":\"event\"";
+  const char* event_exception = "\"event\":\"exception\"";
+  // Does the message contain both type:event and event:exception?
+  return strstr(message, type_event) != NULL &&
+      strstr(message, event_exception) != NULL;
+}
+
+
+// We match the message wether it is an evaluate response message.
+bool IsEvaluateResponseMessage(char* message) {
+  const char* type_response = "\"type\":\"response\"";
+  const char* command_evaluate = "\"command\":\"evaluate\"";
+  // Does the message contain both type:response and command:evaluate?
+  return strstr(message, type_response) != NULL &&
+         strstr(message, command_evaluate) != NULL;
+}
+
+
+// We match parts of the message to get evaluate result int value.
+int GetEvaluateIntResult(char *message) {
+  const char* value = "\"value\":";
+  char* pos = strstr(message, value);
+  if (pos == NULL) {
+    return -1;
+  }
+  int res = -1;
+  res = atoi(pos + strlen(value));
+  return res;
+}
+
+
+// We match parts of the message to get hit breakpoint id.
+int GetBreakpointIdFromBreakEventMessage(char *message) {
+  const char* breakpoints = "\"breakpoints\":[";
+  char* pos = strstr(message, breakpoints);
+  if (pos == NULL) {
+    return -1;
+  }
+  int res = -1;
+  res = atoi(pos + strlen(breakpoints));
+  return res;
+}
+
+
 /* Test MessageQueues */
 /* Tests the message queues that hold debugger commands and
  * response messages to the debugger.  Fills queues and makes
@@ -3566,8 +3612,6 @@
   // Allow message handler to block on a semaphore, to test queueing of
   // messages while blocked.
   message_queue_barriers.semaphore_1->Wait();
-  printf("%s\n", print_buffer);
-  fflush(stdout);
 }
 
 void MessageQueueDebuggerThread::Run() {
@@ -3822,8 +3866,6 @@
   if (IsBreakEventMessage(print_buffer)) {
     threaded_debugging_barriers.barrier_2.Wait();
   }
-  printf("%s\n", print_buffer);
-  fflush(stdout);
 }
 
 
@@ -3911,16 +3953,20 @@
 
 
 Barriers* breakpoints_barriers;
+int break_event_breakpoint_id;
+int evaluate_int_result;
 
 static void BreakpointsMessageHandler(const v8::Debug::Message& message) {
   static char print_buffer[1000];
   v8::String::Value json(message.GetJSON());
   Utf16ToAscii(*json, json.length(), print_buffer);
-  printf("%s\n", print_buffer);
-  fflush(stdout);
 
-  // Is break_template a prefix of the message?
   if (IsBreakEventMessage(print_buffer)) {
+    break_event_breakpoint_id =
+        GetBreakpointIdFromBreakEventMessage(print_buffer);
+    breakpoints_barriers->semaphore_1->Signal();
+  } else if (IsEvaluateResponseMessage(print_buffer)) {
+    evaluate_int_result = GetEvaluateIntResult(print_buffer);
     breakpoints_barriers->semaphore_1->Signal();
   }
 }
@@ -3930,9 +3976,9 @@
   const char* source_1 = "var y_global = 3;\n"
     "function cat( new_value ) {\n"
     "  var x = new_value;\n"
-    "  y_global = 4;\n"
+    "  y_global = y_global + 4;\n"
     "  x = 3 * x + 1;\n"
-    "  y_global = 5;\n"
+    "  y_global = y_global + 5;\n"
     "  return x;\n"
     "}\n"
     "\n"
@@ -3970,59 +4016,76 @@
       "\"type\":\"request\","
       "\"command\":\"setbreakpoint\","
       "\"arguments\":{\"type\":\"function\",\"target\":\"dog\",\"line\":3}}";
-  const char* command_3 = "{\"seq\":104,"
+  const char* command_3 = "{\"seq\":103,"
       "\"type\":\"request\","
       "\"command\":\"evaluate\","
       "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":false}}";
-  const char* command_4 = "{\"seq\":105,"
+  const char* command_4 = "{\"seq\":104,"
       "\"type\":\"request\","
       "\"command\":\"evaluate\","
-      "\"arguments\":{\"expression\":\"x\",\"disable_break\":true}}";
-  const char* command_5 = "{\"seq\":106,"
+      "\"arguments\":{\"expression\":\"x + 1\",\"disable_break\":true}}";
+  const char* command_5 = "{\"seq\":105,"
       "\"type\":\"request\","
       "\"command\":\"continue\"}";
-  const char* command_6 = "{\"seq\":107,"
+  const char* command_6 = "{\"seq\":106,"
       "\"type\":\"request\","
       "\"command\":\"continue\"}";
-  const char* command_7 = "{\"seq\":108,"
+  const char* command_7 = "{\"seq\":107,"
      "\"type\":\"request\","
      "\"command\":\"evaluate\","
      "\"arguments\":{\"expression\":\"dog()\",\"disable_break\":true}}";
-  const char* command_8 = "{\"seq\":109,"
+  const char* command_8 = "{\"seq\":108,"
       "\"type\":\"request\","
       "\"command\":\"continue\"}";
 
 
   // v8 thread initializes, runs source_1
   breakpoints_barriers->barrier_1.Wait();
-  // 1:Set breakpoint in cat().
+  // 1:Set breakpoint in cat() (will get id 1).
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_1, buffer));
-  // 2:Set breakpoint in dog()
+  // 2:Set breakpoint in dog() (will get id 2).
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_2, buffer));
   breakpoints_barriers->barrier_2.Wait();
-  // v8 thread starts compiling source_2.
+  // V8 thread starts compiling source_2.
   // Automatic break happens, to run queued commands
   // breakpoints_barriers->semaphore_1->Wait();
   // Commands 1 through 3 run, thread continues.
   // v8 thread runs source_2 to breakpoint in cat().
   // message callback receives break event.
   breakpoints_barriers->semaphore_1->Wait();
+  // Must have hit breakpoint #1.
+  CHECK_EQ(1, break_event_breakpoint_id);
   // 4:Evaluate dog() (which has a breakpoint).
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_3, buffer));
-  // v8 thread hits breakpoint in dog()
+  // V8 thread hits breakpoint in dog().
   breakpoints_barriers->semaphore_1->Wait();  // wait for break event
-  // 5:Evaluate x
+  // Must have hit breakpoint #2.
+  CHECK_EQ(2, break_event_breakpoint_id);
+  // 5:Evaluate (x + 1).
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_4, buffer));
-  // 6:Continue evaluation of dog()
+  // Evaluate (x + 1) finishes.
+  breakpoints_barriers->semaphore_1->Wait();
+  // Must have result 108.
+  CHECK_EQ(108, evaluate_int_result);
+  // 6:Continue evaluation of dog().
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_5, buffer));
-  // dog() finishes.
+  // Evaluate dog() finishes.
+  breakpoints_barriers->semaphore_1->Wait();
+  // Must have result 107.
+  CHECK_EQ(107, evaluate_int_result);
   // 7:Continue evaluation of source_2, finish cat(17), hit breakpoint
   // in cat(19).
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_6, buffer));
-  // message callback gets break event
+  // Message callback gets break event.
   breakpoints_barriers->semaphore_1->Wait();  // wait for break event
-  // 8: Evaluate dog() with breaks disabled
+  // Must have hit breakpoint #1.
+  CHECK_EQ(1, break_event_breakpoint_id);
+  // 8: Evaluate dog() with breaks disabled.
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_7, buffer));
+  // Evaluate dog() finishes.
+  breakpoints_barriers->semaphore_1->Wait();
+  // Must have result 116.
+  CHECK_EQ(116, evaluate_int_result);
   // 9: Continue evaluation of source2, reach end.
   v8::Debug::SendCommand(buffer, AsciiToUtf16(command_8, buffer));
 }
@@ -4325,7 +4388,13 @@
 static void MessageHandlerHitCount(const v8::Debug::Message& message) {
   message_handler_hit_count++;
 
-  SendContinueCommand();
+  static char print_buffer[1000];
+  v8::String::Value json(message.GetJSON());
+  Utf16ToAscii(*json, json.length(), print_buffer);
+  if (IsExceptionEventMessage(print_buffer)) {
+    // Send a continue command for exception events.
+    SendContinueCommand();
+  }
 }
 
 
@@ -4415,8 +4484,6 @@
   static char print_buffer[1000];
   v8::String::Value json(message.GetJSON());
   Utf16ToAscii(*json, json.length(), print_buffer);
-  printf("%s\n", print_buffer);
-  fflush(stdout);
 }
 
 
@@ -4776,8 +4843,12 @@
       expected_context_data));
   message_handler_hit_count++;
 
+  static char print_buffer[1000];
+  v8::String::Value json(message.GetJSON());
+  Utf16ToAscii(*json, json.length(), print_buffer);
+
   // Send a continue command for break events.
-  if (message.GetEvent() == v8::Break) {
+  if (IsBreakEventMessage(print_buffer)) {
     SendContinueCommand();
   }
 }
@@ -5016,7 +5087,11 @@
       expected_context_data));
   message_handler_hit_count++;
 
-  if (message.IsEvent() && message.GetEvent() == v8::Break) {
+  static char print_buffer[1000];
+  v8::String::Value json(message.GetJSON());
+  Utf16ToAscii(*json, json.length(), print_buffer);
+
+  if (IsBreakEventMessage(print_buffer)) {
     break_count++;
     if (!sent_eval) {
       sent_eval = true;
@@ -5038,7 +5113,8 @@
       SendContinueCommand();
       continue_command_send_count++;
     }
-  } else if (message.IsResponse() && continue_command_send_count < 2) {
+  } else if (IsEvaluateResponseMessage(print_buffer) &&
+      continue_command_send_count < 2) {
     // Response to the evaluation request. We're still on the breakpoint so
     // send continue.
     SendContinueCommand();
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index af9fb97..74db234 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -363,7 +363,31 @@
     __ divsd(xmm1, xmm0);
     __ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
     __ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
+    __ comisd(xmm0, xmm1);
   }
+
+  // cmov.
+  {
+    CHECK(CpuFeatures::IsSupported(CpuFeatures::CMOV));
+    CpuFeatures::Scope use_cmov(CpuFeatures::CMOV);
+    __ cmov(overflow, eax, Operand(eax, 0));
+    __ cmov(no_overflow, eax, Operand(eax, 1));
+    __ cmov(below, eax, Operand(eax, 2));
+    __ cmov(above_equal, eax, Operand(eax, 3));
+    __ cmov(equal, eax, Operand(ebx, 0));
+    __ cmov(not_equal, eax, Operand(ebx, 1));
+    __ cmov(below_equal, eax, Operand(ebx, 2));
+    __ cmov(above, eax, Operand(ebx, 3));
+    __ cmov(sign, eax, Operand(ecx, 0));
+    __ cmov(not_sign, eax, Operand(ecx, 1));
+    __ cmov(parity_even, eax, Operand(ecx, 2));
+    __ cmov(parity_odd, eax, Operand(ecx, 3));
+    __ cmov(less, eax, Operand(edx, 0));
+    __ cmov(greater_equal, eax, Operand(edx, 1));
+    __ cmov(less_equal, eax, Operand(edx, 2));
+    __ cmov(greater, eax, Operand(edx, 3));
+  }
+
   __ ret(0);
 
   CodeDesc desc;
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index eb32b65..9911ce4 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -132,15 +132,19 @@
   CHECK(value->IsNumber());
   CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
 
+#ifndef V8_TARGET_ARCH_X64
+  // TODO(lrn): We need a NumberFromIntptr function in order to test this.
   value = Heap::NumberFromInt32(Smi::kMinValue - 1);
   CHECK(value->IsHeapNumber());
   CHECK(value->IsNumber());
   CHECK_EQ(static_cast<double>(Smi::kMinValue - 1), value->Number());
+#endif
 
-  value = Heap::NumberFromInt32(Smi::kMaxValue + 1);
+  value = Heap::NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
   CHECK(value->IsHeapNumber());
   CHECK(value->IsNumber());
-  CHECK_EQ(static_cast<double>(Smi::kMaxValue + 1), value->Number());
+  CHECK_EQ(static_cast<double>(static_cast<uint32_t>(Smi::kMaxValue) + 1),
+           value->Number());
 
   // nan oddball checks
   CHECK(Heap::nan_value()->IsNumber());
@@ -640,8 +644,9 @@
   CHECK_EQ(Smi::FromInt(1), array->length());
   CHECK_EQ(array->GetElement(0), name);
 
-  // Set array length with larger than smi value.
-  Object* length = Heap::NumberFromInt32(Smi::kMaxValue + 1);
+// Set array length with larger than smi value.
+  Object* length =
+      Heap::NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
   array->SetElementsLength(length);
 
   uint32_t int_length = 0;
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
new file mode 100755
index 0000000..9c1197f
--- /dev/null
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -0,0 +1,2096 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "macro-assembler.h"
+#include "factory.h"
+#include "platform.h"
+#include "serialize.h"
+#include "cctest.h"
+
+using v8::internal::byte;
+using v8::internal::OS;
+using v8::internal::Assembler;
+using v8::internal::Condition;
+using v8::internal::MacroAssembler;
+using v8::internal::HandleScope;
+using v8::internal::Operand;
+using v8::internal::Immediate;
+using v8::internal::SmiIndex;
+using v8::internal::Label;
+using v8::internal::RelocInfo;
+using v8::internal::rax;
+using v8::internal::rbx;
+using v8::internal::rsi;
+using v8::internal::rdi;
+using v8::internal::rcx;
+using v8::internal::rdx;
+using v8::internal::rbp;
+using v8::internal::rsp;
+using v8::internal::r8;
+using v8::internal::r9;
+using v8::internal::r11;
+using v8::internal::r12;
+using v8::internal::r13;
+using v8::internal::r14;
+using v8::internal::r15;
+using v8::internal::FUNCTION_CAST;
+using v8::internal::CodeDesc;
+using v8::internal::less_equal;
+using v8::internal::not_equal;
+using v8::internal::not_zero;
+using v8::internal::greater;
+using v8::internal::greater_equal;
+using v8::internal::carry;
+using v8::internal::not_carry;
+using v8::internal::negative;
+using v8::internal::positive;
+using v8::internal::Smi;
+using v8::internal::kSmiTagMask;
+using v8::internal::kSmiValueSize;
+
+// Test the x64 assembler by compiling some simple functions into
+// a buffer and executing them.  These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+// The AMD64 calling convention is used, with the first five arguments
+// in RSI, RDI, RDX, RCX, R8, and R9, and floating point arguments in
+// the XMM registers.  The return value is in RAX.
+// This calling convention is used on Linux, with GCC, and on Mac OS,
+// with GCC.  A different convention is used on 64-bit windows.
+
+typedef int (*F0)();
+
+#define __ masm->
+
+TEST(Smi) {
+  // Check that C++ Smi operations work as expected.
+  intptr_t test_numbers[] = {
+      0, 1, -1, 127, 128, -128, -129, 255, 256, -256, -257,
+      Smi::kMaxValue, static_cast<intptr_t>(Smi::kMaxValue) + 1,
+      Smi::kMinValue, static_cast<intptr_t>(Smi::kMinValue) - 1
+  };
+  int test_number_count = 15;
+  for (int i = 0; i < test_number_count; i++) {
+    intptr_t number = test_numbers[i];
+    bool is_valid = Smi::IsValid(number);
+    bool is_in_range = number >= Smi::kMinValue && number <= Smi::kMaxValue;
+    CHECK_EQ(is_in_range, is_valid);
+    if (is_valid) {
+      Smi* smi_from_intptr = Smi::FromIntptr(number);
+      if (static_cast<int>(number) == number) {  // Is a 32-bit int.
+        Smi* smi_from_int = Smi::FromInt(static_cast<int32_t>(number));
+        CHECK_EQ(smi_from_int, smi_from_intptr);
+      }
+      int smi_value = smi_from_intptr->value();
+      CHECK_EQ(number, static_cast<intptr_t>(smi_value));
+    }
+  }
+}
+
+
+static void TestMoveSmi(MacroAssembler* masm, Label* exit, int id, Smi* value) {
+  __ movl(rax, Immediate(id));
+  __ Move(rcx, Smi::FromInt(0));
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+  __ cmpq(rcx, rdx);
+  __ j(not_equal, exit);
+}
+
+
+// Test that we can move a Smi value literally into a register.
+TEST(SmiMove) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                   &actual_size,
+                                                   true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+  MacroAssembler* masm = &assembler;  // Create a pointer for the __ macro.
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestMoveSmi(masm, &exit, 1, Smi::FromInt(0));
+  TestMoveSmi(masm, &exit, 2, Smi::FromInt(127));
+  TestMoveSmi(masm, &exit, 3, Smi::FromInt(128));
+  TestMoveSmi(masm, &exit, 4, Smi::FromInt(255));
+  TestMoveSmi(masm, &exit, 5, Smi::FromInt(256));
+  TestMoveSmi(masm, &exit, 6, Smi::FromInt(Smi::kMaxValue));
+  TestMoveSmi(masm, &exit, 7, Smi::FromInt(-1));
+  TestMoveSmi(masm, &exit, 8, Smi::FromInt(-128));
+  TestMoveSmi(masm, &exit, 9, Smi::FromInt(-129));
+  TestMoveSmi(masm, &exit, 10, Smi::FromInt(-256));
+  TestMoveSmi(masm, &exit, 11, Smi::FromInt(-257));
+  TestMoveSmi(masm, &exit, 12, Smi::FromInt(Smi::kMinValue));
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiCompare(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r8, rcx);
+  __ Move(rdx, Smi::FromInt(y));
+  __ movq(r9, rdx);
+  __ SmiCompare(rcx, rdx);
+  if (x < y) {
+    __ movl(rax, Immediate(id + 1));
+    __ j(greater_equal, exit);
+  } else if (x > y) {
+    __ movl(rax, Immediate(id + 2));
+    __ j(less_equal, exit);
+  } else {
+    ASSERT_EQ(x, y);
+    __ movl(rax, Immediate(id + 3));
+    __ j(not_equal, exit);
+  }
+  __ movl(rax, Immediate(id + 4));
+  __ cmpq(rcx, r8);
+  __ j(not_equal, exit);
+  __ incq(rax);
+  __ cmpq(rdx, r9);
+  __ j(not_equal, exit);
+
+  if (x != y) {
+    __ SmiCompare(rdx, rcx);
+    if (y < x) {
+      __ movl(rax, Immediate(id + 9));
+      __ j(greater_equal, exit);
+    } else {
+      ASSERT(y > x);
+      __ movl(rax, Immediate(id + 10));
+      __ j(less_equal, exit);
+    }
+  } else {
+    __ SmiCompare(rcx, rcx);
+    __ movl(rax, Immediate(id + 11));
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ cmpq(rcx, r8);
+    __ j(not_equal, exit);
+  }
+}
+
+
+// Test that we can compare smis for equality (and more).
+TEST(SmiCompare) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiCompare(masm, &exit, 0x10, 0, 0);
+  TestSmiCompare(masm, &exit, 0x20, 0, 1);
+  TestSmiCompare(masm, &exit, 0x30, 1, 0);
+  TestSmiCompare(masm, &exit, 0x40, 1, 1);
+  TestSmiCompare(masm, &exit, 0x50, 0, -1);
+  TestSmiCompare(masm, &exit, 0x60, -1, 0);
+  TestSmiCompare(masm, &exit, 0x70, -1, -1);
+  TestSmiCompare(masm, &exit, 0x80, 0, Smi::kMinValue);
+  TestSmiCompare(masm, &exit, 0x90, Smi::kMinValue, 0);
+  TestSmiCompare(masm, &exit, 0xA0, 0, Smi::kMaxValue);
+  TestSmiCompare(masm, &exit, 0xB0, Smi::kMaxValue, 0);
+  TestSmiCompare(masm, &exit, 0xC0, -1, Smi::kMinValue);
+  TestSmiCompare(masm, &exit, 0xD0, Smi::kMinValue, -1);
+  TestSmiCompare(masm, &exit, 0xE0, -1, Smi::kMaxValue);
+  TestSmiCompare(masm, &exit, 0xF0, Smi::kMaxValue, -1);
+  TestSmiCompare(masm, &exit, 0x100, Smi::kMinValue, Smi::kMinValue);
+  TestSmiCompare(masm, &exit, 0x110, Smi::kMinValue, Smi::kMaxValue);
+  TestSmiCompare(masm, &exit, 0x120, Smi::kMaxValue, Smi::kMinValue);
+  TestSmiCompare(masm, &exit, 0x130, Smi::kMaxValue, Smi::kMaxValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+
+TEST(Integer32ToSmi) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  __ movq(rax, Immediate(1));  // Test number.
+  __ movl(rcx, Immediate(0));
+  __ Integer32ToSmi(rcx, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+  __ SmiCompare(rcx, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(2));  // Test number.
+  __ movl(rcx, Immediate(1024));
+  __ Integer32ToSmi(rcx, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
+  __ SmiCompare(rcx, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(3));  // Test number.
+  __ movl(rcx, Immediate(-1));
+  __ Integer32ToSmi(rcx, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
+  __ SmiCompare(rcx, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(4));  // Test number.
+  __ movl(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(rcx, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
+  __ SmiCompare(rcx, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(5));  // Test number.
+  __ movl(rcx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(rcx, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
+  __ SmiCompare(rcx, rdx);
+  __ j(not_equal, &exit);
+
+  // Different target register.
+
+  __ movq(rax, Immediate(6));  // Test number.
+  __ movl(rcx, Immediate(0));
+  __ Integer32ToSmi(r8, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(0)));
+  __ SmiCompare(r8, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(7));  // Test number.
+  __ movl(rcx, Immediate(1024));
+  __ Integer32ToSmi(r8, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(1024)));
+  __ SmiCompare(r8, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(8));  // Test number.
+  __ movl(rcx, Immediate(-1));
+  __ Integer32ToSmi(r8, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(-1)));
+  __ SmiCompare(r8, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(9));  // Test number.
+  __ movl(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(r8, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMaxValue)));
+  __ SmiCompare(r8, rdx);
+  __ j(not_equal, &exit);
+
+  __ movq(rax, Immediate(10));  // Test number.
+  __ movl(rcx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(r8, rcx);
+  __ Set(rdx, reinterpret_cast<intptr_t>(Smi::FromInt(Smi::kMinValue)));
+  __ SmiCompare(r8, rdx);
+  __ j(not_equal, &exit);
+
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestI64PlusConstantToSmi(MacroAssembler* masm,
+                              Label* exit,
+                              int id,
+                              int64_t x,
+                              int y) {
+  int64_t result = x + y;
+  ASSERT(Smi::IsValid(result));
+  __ movl(rax, Immediate(id));
+  __ Move(r8, Smi::FromInt(result));
+  __ movq(rcx, x, RelocInfo::NONE);
+  __ movq(r11, rcx);
+  __ Integer64PlusConstantToSmi(rdx, rcx, y);
+  __ SmiCompare(rdx, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ Integer64PlusConstantToSmi(rcx, rcx, y);
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+}
+
+
+TEST(Integer64PlusConstantToSmi) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  int64_t twice_max = static_cast<int64_t>(Smi::kMaxValue) * 2;
+
+  TestI64PlusConstantToSmi(masm, &exit, 0x10, 0, 0);
+  TestI64PlusConstantToSmi(masm, &exit, 0x20, 0, 1);
+  TestI64PlusConstantToSmi(masm, &exit, 0x30, 1, 0);
+  TestI64PlusConstantToSmi(masm, &exit, 0x40, Smi::kMaxValue - 5, 5);
+  TestI64PlusConstantToSmi(masm, &exit, 0x50, Smi::kMinValue + 5, 5);
+  TestI64PlusConstantToSmi(masm, &exit, 0x60, twice_max, -Smi::kMaxValue);
+  TestI64PlusConstantToSmi(masm, &exit, 0x70, -twice_max, Smi::kMaxValue);
+  TestI64PlusConstantToSmi(masm, &exit, 0x80, 0, Smi::kMinValue);
+  TestI64PlusConstantToSmi(masm, &exit, 0x90, 0, Smi::kMaxValue);
+  TestI64PlusConstantToSmi(masm, &exit, 0xA0, Smi::kMinValue, 0);
+  TestI64PlusConstantToSmi(masm, &exit, 0xB0, Smi::kMaxValue, 0);
+  TestI64PlusConstantToSmi(masm, &exit, 0xC0, twice_max, Smi::kMinValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+TEST(SmiCheck) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                   &actual_size,
+                                                   true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+  Condition cond;
+
+  __ movl(rax, Immediate(1));  // Test number.
+
+  // CheckSmi
+
+  __ movl(rcx, Immediate(0));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckSmi(rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckSmi(rcx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movl(rcx, Immediate(-1));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckSmi(rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckSmi(rcx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movl(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckSmi(rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckSmi(rcx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movl(rcx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckSmi(rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckSmi(rcx);
+  __ j(cond, &exit);
+
+  // CheckPositiveSmi
+
+  __ incq(rax);
+  __ movl(rcx, Immediate(0));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckPositiveSmi(rcx);  // Zero counts as positive.
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckPositiveSmi(rcx);  // "zero" non-smi.
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(-1));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckPositiveSmi(rcx);  // Negative smis are not positive.
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckPositiveSmi(rcx);  // Most negative smi is not positive.
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckPositiveSmi(rcx);  // "Negative" non-smi.
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckPositiveSmi(rcx);  // Most positive smi is positive.
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckPositiveSmi(rcx);  // "Positive" non-smi.
+  __ j(cond, &exit);
+
+  // CheckIsMinSmi
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckIsMinSmi(rcx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(0));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckIsMinSmi(rcx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckIsMinSmi(rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMinValue + 1));
+  __ Integer32ToSmi(rcx, rcx);
+  cond = masm->CheckIsMinSmi(rcx);
+  __ j(cond, &exit);
+
+  // CheckBothSmi
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMaxValue));
+  __ Integer32ToSmi(rcx, rcx);
+  __ movq(rdx, Immediate(Smi::kMinValue));
+  __ Integer32ToSmi(rdx, rdx);
+  cond = masm->CheckBothSmi(rcx, rdx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckBothSmi(rcx, rdx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ xor_(rdx, Immediate(kSmiTagMask));
+  cond = masm->CheckBothSmi(rcx, rdx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  cond = masm->CheckBothSmi(rcx, rdx);
+  __ j(cond, &exit);
+
+  __ incq(rax);
+  cond = masm->CheckBothSmi(rcx, rcx);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  cond = masm->CheckBothSmi(rdx, rdx);
+  __ j(cond, &exit);
+
+  // CheckInteger32ValidSmiValue
+  __ incq(rax);
+  __ movq(rcx, Immediate(0));
+  cond = masm->CheckInteger32ValidSmiValue(rax);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(-1));
+  cond = masm->CheckInteger32ValidSmiValue(rax);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMaxValue));
+  cond = masm->CheckInteger32ValidSmiValue(rax);
+  __ j(NegateCondition(cond), &exit);
+
+  __ incq(rax);
+  __ movq(rcx, Immediate(Smi::kMinValue));
+  cond = masm->CheckInteger32ValidSmiValue(rax);
+  __ j(NegateCondition(cond), &exit);
+
+  // Success
+  __ xor_(rax, rax);
+
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+
+void TestSmiNeg(MacroAssembler* masm, Label* exit, int id, int x) {
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  if (x == Smi::kMinValue || x == 0) {
+    // Negation fails.
+    __ movl(rax, Immediate(id + 8));
+    __ SmiNeg(r9, rcx, exit);
+
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+    __ SmiNeg(rcx, rcx, exit);
+
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);
+    __ j(not_equal, exit);
+  } else {
+    Label smi_ok, smi_ok2;
+    int result = -x;
+    __ movl(rax, Immediate(id));
+    __ Move(r8, Smi::FromInt(result));
+
+    __ SmiNeg(r9, rcx, &smi_ok);
+    __ jmp(exit);
+    __ bind(&smi_ok);
+    __ incq(rax);
+    __ SmiCompare(r9, r8);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+    __ SmiNeg(rcx, rcx, &smi_ok2);
+    __ jmp(exit);
+    __ bind(&smi_ok2);
+    __ incq(rax);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+  }
+}
+
+
+TEST(SmiNeg) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiNeg(masm, &exit, 0x10, 0);
+  TestSmiNeg(masm, &exit, 0x20, 1);
+  TestSmiNeg(masm, &exit, 0x30, -1);
+  TestSmiNeg(masm, &exit, 0x40, 127);
+  TestSmiNeg(masm, &exit, 0x50, 65535);
+  TestSmiNeg(masm, &exit, 0x60, Smi::kMinValue);
+  TestSmiNeg(masm, &exit, 0x70, Smi::kMaxValue);
+  TestSmiNeg(masm, &exit, 0x80, -Smi::kMaxValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+
+
+static void SmiAddTest(MacroAssembler* masm,
+                       Label* exit,
+                       int id,
+                       int first,
+                       int second) {
+  __ movl(rcx, Immediate(first));
+  __ Integer32ToSmi(rcx, rcx);
+  __ movl(rdx, Immediate(second));
+  __ Integer32ToSmi(rdx, rdx);
+  __ movl(r8, Immediate(first + second));
+  __ Integer32ToSmi(r8, r8);
+
+  __ movl(rax, Immediate(id));  // Test number.
+  __ SmiAdd(r9, rcx, rdx, exit);
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiAdd(rcx, rcx, rdx, exit);                              \
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+
+  __ movl(rcx, Immediate(first));
+  __ Integer32ToSmi(rcx, rcx);
+
+  __ incq(rax);
+  __ SmiAddConstant(r9, rcx, Smi::FromInt(second));
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ SmiAddConstant(rcx, rcx, Smi::FromInt(second));
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+
+  __ movl(rcx, Immediate(first));
+  __ Integer32ToSmi(rcx, rcx);
+
+  __ incq(rax);
+  __ SmiAddConstant(r9, rcx, Smi::FromInt(second), exit);
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiAddConstant(rcx, rcx, Smi::FromInt(second), exit);
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+}
+
+TEST(SmiAdd) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  // No-overflow tests.
+  SmiAddTest(masm, &exit, 0x10, 1, 2);
+  SmiAddTest(masm, &exit, 0x20, 1, -2);
+  SmiAddTest(masm, &exit, 0x30, -1, 2);
+  SmiAddTest(masm, &exit, 0x40, -1, -2);
+  SmiAddTest(masm, &exit, 0x50, 0x1000, 0x2000);
+  SmiAddTest(masm, &exit, 0x60, Smi::kMinValue, 5);
+  SmiAddTest(masm, &exit, 0x70, Smi::kMaxValue, -5);
+  SmiAddTest(masm, &exit, 0x80, Smi::kMaxValue, Smi::kMinValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+static void SmiSubTest(MacroAssembler* masm,
+                      Label* exit,
+                      int id,
+                      int first,
+                      int second) {
+  __ Move(rcx, Smi::FromInt(first));
+  __ Move(rdx, Smi::FromInt(second));
+  __ Move(r8, Smi::FromInt(first - second));
+
+  __ movl(rax, Immediate(id));  // Test 0.
+  __ SmiSub(r9, rcx, rdx, exit);
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);  // Test 1.
+  __ SmiSub(rcx, rcx, rdx, exit);
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+
+  __ Move(rcx, Smi::FromInt(first));
+
+  __ incq(rax);  // Test 2.
+  __ SmiSubConstant(r9, rcx, Smi::FromInt(second));
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);  // Test 3.
+  __ SmiSubConstant(rcx, rcx, Smi::FromInt(second));
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+
+  __ Move(rcx, Smi::FromInt(first));
+
+  __ incq(rax);  // Test 4.
+  __ SmiSubConstant(r9, rcx, Smi::FromInt(second), exit);
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);  // Test 5.
+  __ SmiSubConstant(rcx, rcx, Smi::FromInt(second), exit);
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+}
+
+static void SmiSubOverflowTest(MacroAssembler* masm,
+                               Label* exit,
+                               int id,
+                               int x) {
+  // Subtracts a Smi from x so that the subtraction overflows.
+  ASSERT(x != -1);  // Can't overflow by subtracting a Smi.
+  int y_max = (x < 0) ? (Smi::kMaxValue + 0) : (Smi::kMinValue + 0);
+  int y_min = (x < 0) ? (Smi::kMaxValue + x + 2) : (Smi::kMinValue + x);
+
+  __ movl(rax, Immediate(id));
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);  // Store original Smi value of x in r11.
+  __ Move(rdx, Smi::FromInt(y_min));
+  {
+    Label overflow_ok;
+    __ SmiSub(r9, rcx, rdx, &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSub(rcx, rcx, rdx, &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  __ movq(rcx, r11);
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSubConstant(r9, rcx, Smi::FromInt(y_min), &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_min), &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  __ Move(rdx, Smi::FromInt(y_max));
+
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSub(r9, rcx, rdx, &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSub(rcx, rcx, rdx, &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  __ movq(rcx, r11);
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSubConstant(r9, rcx, Smi::FromInt(y_max), &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+
+  {
+    Label overflow_ok;
+    __ incq(rax);
+    __ SmiSubConstant(rcx, rcx, Smi::FromInt(y_max), &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+}
+
+
+TEST(SmiSub) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  SmiSubTest(masm, &exit, 0x10, 1, 2);
+  SmiSubTest(masm, &exit, 0x20, 1, -2);
+  SmiSubTest(masm, &exit, 0x30, -1, 2);
+  SmiSubTest(masm, &exit, 0x40, -1, -2);
+  SmiSubTest(masm, &exit, 0x50, 0x1000, 0x2000);
+  SmiSubTest(masm, &exit, 0x60, Smi::kMinValue, -5);
+  SmiSubTest(masm, &exit, 0x70, Smi::kMaxValue, 5);
+  SmiSubTest(masm, &exit, 0x80, -Smi::kMaxValue, Smi::kMinValue);
+  SmiSubTest(masm, &exit, 0x90, 0, Smi::kMaxValue);
+
+  SmiSubOverflowTest(masm, &exit, 0xA0, 1);
+  SmiSubOverflowTest(masm, &exit, 0xB0, 1024);
+  SmiSubOverflowTest(masm, &exit, 0xC0, Smi::kMaxValue);
+  SmiSubOverflowTest(masm, &exit, 0xD0, -2);
+  SmiSubOverflowTest(masm, &exit, 0xE0, -42000);
+  SmiSubOverflowTest(masm, &exit, 0xF0, Smi::kMinValue);
+  SmiSubOverflowTest(masm, &exit, 0x100, 0);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+
+void TestSmiMul(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  int64_t result = static_cast<int64_t>(x) * static_cast<int64_t>(y);
+  bool negative_zero = (result == 0) && (x < 0 || y < 0);
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  __ Move(rdx, Smi::FromInt(y));
+  if (Smi::IsValid(result) && !negative_zero) {
+    __ movl(rax, Immediate(id));
+    __ Move(r8, Smi::FromIntptr(result));
+    __ SmiMul(r9, rcx, rdx, exit);
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ SmiCompare(r9, r8);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+    __ SmiMul(rcx, rcx, rdx, exit);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+  } else {
+    __ movl(rax, Immediate(id + 8));
+    Label overflow_ok, overflow_ok2;
+    __ SmiMul(r9, rcx, rdx, &overflow_ok);
+    __ jmp(exit);
+    __ bind(&overflow_ok);
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ SmiMul(rcx, rcx, rdx, &overflow_ok2);
+    __ jmp(exit);
+    __ bind(&overflow_ok2);
+    // 31-bit version doesn't preserve rcx on failure.
+    // __ incq(rax);
+    // __ SmiCompare(r11, rcx);
+    // __ j(not_equal, exit);
+  }
+}
+
+
+TEST(SmiMul) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                                 &actual_size,
+                                                 true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiMul(masm, &exit, 0x10, 0, 0);
+  TestSmiMul(masm, &exit, 0x20, -1, 0);
+  TestSmiMul(masm, &exit, 0x30, 0, -1);
+  TestSmiMul(masm, &exit, 0x40, -1, -1);
+  TestSmiMul(masm, &exit, 0x50, 0x10000, 0x10000);
+  TestSmiMul(masm, &exit, 0x60, 0x10000, 0xffff);
+  TestSmiMul(masm, &exit, 0x70, 0x10000, 0xffff);
+  TestSmiMul(masm, &exit, 0x80, Smi::kMaxValue, -1);
+  TestSmiMul(masm, &exit, 0x90, Smi::kMaxValue, -2);
+  TestSmiMul(masm, &exit, 0xa0, Smi::kMaxValue, 2);
+  TestSmiMul(masm, &exit, 0xb0, (Smi::kMaxValue / 2), 2);
+  TestSmiMul(masm, &exit, 0xc0, (Smi::kMaxValue / 2) + 1, 2);
+  TestSmiMul(masm, &exit, 0xd0, (Smi::kMinValue / 2), 2);
+  TestSmiMul(masm, &exit, 0xe0, (Smi::kMinValue / 2) - 1, 2);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiDiv(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  bool division_by_zero = (y == 0);
+  bool negative_zero = (x == 0 && y < 0);
+#ifdef V8_TARGET_ARCH_X64
+  bool overflow = (x == Smi::kMinValue && y < 0);  // Safe approx. used.
+#else
+  bool overflow = (x == Smi::kMinValue && y == -1);
+#endif
+  bool fraction = !division_by_zero && !overflow && (x % y != 0);
+  __ Move(r11, Smi::FromInt(x));
+  __ Move(r12, Smi::FromInt(y));
+  if (!fraction && !overflow && !negative_zero && !division_by_zero) {
+    // Division succeeds
+    __ movq(rcx, r11);
+    __ movq(r15, Immediate(id));
+    int result = x / y;
+    __ Move(r8, Smi::FromInt(result));
+    __ SmiDiv(r9, rcx, r12, exit);
+    // Might have destroyed rcx and r12.
+    __ incq(r15);
+    __ SmiCompare(r9, r8);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ movq(rcx, r11);
+    __ Move(r12, Smi::FromInt(y));
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ SmiDiv(rcx, rcx, r12, exit);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+  } else {
+    // Division fails.
+    __ movq(r15, Immediate(id + 8));
+
+    Label fail_ok, fail_ok2;
+    __ movq(rcx, r11);
+    __ SmiDiv(r9, rcx, r12, &fail_ok);
+    __ jmp(exit);
+    __ bind(&fail_ok);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ SmiDiv(rcx, rcx, r12, &fail_ok2);
+    __ jmp(exit);
+    __ bind(&fail_ok2);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+}
+
+
+TEST(SmiDiv) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiDiv(masm, &exit, 0x10, 1, 1);
+  TestSmiDiv(masm, &exit, 0x20, 1, 0);
+  TestSmiDiv(masm, &exit, 0x30, -1, 0);
+  TestSmiDiv(masm, &exit, 0x40, 0, 1);
+  TestSmiDiv(masm, &exit, 0x50, 0, -1);
+  TestSmiDiv(masm, &exit, 0x60, 4, 2);
+  TestSmiDiv(masm, &exit, 0x70, -4, 2);
+  TestSmiDiv(masm, &exit, 0x80, 4, -2);
+  TestSmiDiv(masm, &exit, 0x90, -4, -2);
+  TestSmiDiv(masm, &exit, 0xa0, 3, 2);
+  TestSmiDiv(masm, &exit, 0xb0, 3, 4);
+  TestSmiDiv(masm, &exit, 0xc0, 1, Smi::kMaxValue);
+  TestSmiDiv(masm, &exit, 0xd0, -1, Smi::kMaxValue);
+  TestSmiDiv(masm, &exit, 0xe0, Smi::kMaxValue, 1);
+  TestSmiDiv(masm, &exit, 0xf0, Smi::kMaxValue, Smi::kMaxValue);
+  TestSmiDiv(masm, &exit, 0x100, Smi::kMaxValue, -Smi::kMaxValue);
+  TestSmiDiv(masm, &exit, 0x110, Smi::kMaxValue, -1);
+  TestSmiDiv(masm, &exit, 0x120, Smi::kMinValue, 1);
+  TestSmiDiv(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
+  TestSmiDiv(masm, &exit, 0x140, Smi::kMinValue, -1);
+
+  __ xor_(r15, r15);  // Success.
+  __ bind(&exit);
+  __ movq(rax, r15);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiMod(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  bool division_by_zero = (y == 0);
+  bool division_overflow = (x == Smi::kMinValue) && (y == -1);
+  bool fraction = !division_by_zero && !division_overflow && ((x % y) != 0);
+  bool negative_zero = (!fraction && x < 0);
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  __ Move(r12, Smi::FromInt(y));
+  if (!division_overflow && !negative_zero && !division_by_zero) {
+    // Modulo succeeds
+    __ movq(r15, Immediate(id));
+    int result = x % y;
+    __ Move(r8, Smi::FromInt(result));
+    __ SmiMod(r9, rcx, r12, exit);
+
+    __ incq(r15);
+    __ SmiCompare(r9, r8);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ SmiMod(rcx, rcx, r12, exit);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+  } else {
+    // Modulo fails.
+    __ movq(r15, Immediate(id + 8));
+
+    Label fail_ok, fail_ok2;
+    __ SmiMod(r9, rcx, r12, &fail_ok);
+    __ jmp(exit);
+    __ bind(&fail_ok);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+
+    __ incq(r15);
+    __ SmiMod(rcx, rcx, r12, &fail_ok2);
+    __ jmp(exit);
+    __ bind(&fail_ok2);
+
+    __ incq(r15);
+    __ SmiCompare(rcx, r11);
+    __ j(not_equal, exit);
+  }
+}
+
+
+TEST(SmiMod) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiMod(masm, &exit, 0x10, 1, 1);
+  TestSmiMod(masm, &exit, 0x20, 1, 0);
+  TestSmiMod(masm, &exit, 0x30, -1, 0);
+  TestSmiMod(masm, &exit, 0x40, 0, 1);
+  TestSmiMod(masm, &exit, 0x50, 0, -1);
+  TestSmiMod(masm, &exit, 0x60, 4, 2);
+  TestSmiMod(masm, &exit, 0x70, -4, 2);
+  TestSmiMod(masm, &exit, 0x80, 4, -2);
+  TestSmiMod(masm, &exit, 0x90, -4, -2);
+  TestSmiMod(masm, &exit, 0xa0, 3, 2);
+  TestSmiMod(masm, &exit, 0xb0, 3, 4);
+  TestSmiMod(masm, &exit, 0xc0, 1, Smi::kMaxValue);
+  TestSmiMod(masm, &exit, 0xd0, -1, Smi::kMaxValue);
+  TestSmiMod(masm, &exit, 0xe0, Smi::kMaxValue, 1);
+  TestSmiMod(masm, &exit, 0xf0, Smi::kMaxValue, Smi::kMaxValue);
+  TestSmiMod(masm, &exit, 0x100, Smi::kMaxValue, -Smi::kMaxValue);
+  TestSmiMod(masm, &exit, 0x110, Smi::kMaxValue, -1);
+  TestSmiMod(masm, &exit, 0x120, Smi::kMinValue, 1);
+  TestSmiMod(masm, &exit, 0x130, Smi::kMinValue, Smi::kMinValue);
+  TestSmiMod(masm, &exit, 0x140, Smi::kMinValue, -1);
+
+  __ xor_(r15, r15);  // Success.
+  __ bind(&exit);
+  __ movq(rax, r15);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiIndex(MacroAssembler* masm, Label* exit, int id, int x) {
+  __ movl(rax, Immediate(id));
+
+  for (int i = 0; i < 8; i++) {
+    __ Move(rcx, Smi::FromInt(x));
+    SmiIndex index = masm->SmiToIndex(rdx, rcx, i);
+    ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
+    __ shl(index.reg, Immediate(index.scale));
+    __ Set(r8, static_cast<intptr_t>(x) << i);
+    __ SmiCompare(index.reg, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ Move(rcx, Smi::FromInt(x));
+    index = masm->SmiToIndex(rcx, rcx, i);
+    ASSERT(index.reg.is(rcx));
+    __ shl(rcx, Immediate(index.scale));
+    __ Set(r8, static_cast<intptr_t>(x) << i);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+
+    __ Move(rcx, Smi::FromInt(x));
+    index = masm->SmiToNegativeIndex(rdx, rcx, i);
+    ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
+    __ shl(index.reg, Immediate(index.scale));
+    __ Set(r8, static_cast<intptr_t>(-x) << i);
+    __ SmiCompare(index.reg, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ Move(rcx, Smi::FromInt(x));
+    index = masm->SmiToNegativeIndex(rcx, rcx, i);
+    ASSERT(index.reg.is(rcx));
+    __ shl(rcx, Immediate(index.scale));
+    __ Set(r8, static_cast<intptr_t>(-x) << i);
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+  }
+}
+
+TEST(SmiIndex) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiIndex(masm, &exit, 0x10, 0);
+  TestSmiIndex(masm, &exit, 0x20, 1);
+  TestSmiIndex(masm, &exit, 0x30, 100);
+  TestSmiIndex(masm, &exit, 0x40, 1000);
+  TestSmiIndex(masm, &exit, 0x50, Smi::kMaxValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSelectNonSmi(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  __ movl(rax, Immediate(id));
+  __ Move(rcx, Smi::FromInt(x));
+  __ Move(rdx, Smi::FromInt(y));
+  __ xor_(rdx, Immediate(kSmiTagMask));
+  __ SelectNonSmi(r9, rcx, rdx, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r9, rdx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ Move(rcx, Smi::FromInt(x));
+  __ Move(rdx, Smi::FromInt(y));
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  __ SelectNonSmi(r9, rcx, rdx, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r9, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  Label fail_ok;
+  __ Move(rcx, Smi::FromInt(x));
+  __ Move(rdx, Smi::FromInt(y));
+  __ xor_(rcx, Immediate(kSmiTagMask));
+  __ xor_(rdx, Immediate(kSmiTagMask));
+  __ SelectNonSmi(r9, rcx, rdx, &fail_ok);
+  __ jmp(exit);
+  __ bind(&fail_ok);
+}
+
+
+TEST(SmiSelectNonSmi) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);  // Avoid inline checks.
+  Label exit;
+
+  TestSelectNonSmi(masm, &exit, 0x10, 0, 0);
+  TestSelectNonSmi(masm, &exit, 0x20, 0, 1);
+  TestSelectNonSmi(masm, &exit, 0x30, 1, 0);
+  TestSelectNonSmi(masm, &exit, 0x40, 0, -1);
+  TestSelectNonSmi(masm, &exit, 0x50, -1, 0);
+  TestSelectNonSmi(masm, &exit, 0x60, -1, -1);
+  TestSelectNonSmi(masm, &exit, 0x70, 1, 1);
+  TestSelectNonSmi(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+  TestSelectNonSmi(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiAnd(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  int result = x & y;
+
+  __ movl(rax, Immediate(id));
+
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  __ Move(rdx, Smi::FromInt(y));
+  __ Move(r8, Smi::FromInt(result));
+  __ SmiAnd(r9, rcx, rdx);
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiAnd(rcx, rcx, rdx);
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+
+  __ movq(rcx, r11);
+  __ incq(rax);
+  __ SmiAndConstant(r9, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiAndConstant(rcx, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+}
+
+
+TEST(SmiAnd) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiAnd(masm, &exit, 0x10, 0, 0);
+  TestSmiAnd(masm, &exit, 0x20, 0, 1);
+  TestSmiAnd(masm, &exit, 0x30, 1, 0);
+  TestSmiAnd(masm, &exit, 0x40, 0, -1);
+  TestSmiAnd(masm, &exit, 0x50, -1, 0);
+  TestSmiAnd(masm, &exit, 0x60, -1, -1);
+  TestSmiAnd(masm, &exit, 0x70, 1, 1);
+  TestSmiAnd(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+  TestSmiAnd(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+  TestSmiAnd(masm, &exit, 0xA0, Smi::kMinValue, -1);
+  TestSmiAnd(masm, &exit, 0xB0, Smi::kMinValue, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiOr(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  int result = x | y;
+
+  __ movl(rax, Immediate(id));
+
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  __ Move(rdx, Smi::FromInt(y));
+  __ Move(r8, Smi::FromInt(result));
+  __ SmiOr(r9, rcx, rdx);
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiOr(rcx, rcx, rdx);
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+
+  __ movq(rcx, r11);
+  __ incq(rax);
+  __ SmiOrConstant(r9, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiOrConstant(rcx, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+}
+
+
+TEST(SmiOr) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiOr(masm, &exit, 0x10, 0, 0);
+  TestSmiOr(masm, &exit, 0x20, 0, 1);
+  TestSmiOr(masm, &exit, 0x30, 1, 0);
+  TestSmiOr(masm, &exit, 0x40, 0, -1);
+  TestSmiOr(masm, &exit, 0x50, -1, 0);
+  TestSmiOr(masm, &exit, 0x60, -1, -1);
+  TestSmiOr(masm, &exit, 0x70, 1, 1);
+  TestSmiOr(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+  TestSmiOr(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+  TestSmiOr(masm, &exit, 0xA0, Smi::kMinValue, -1);
+  TestSmiOr(masm, &exit, 0xB0, 0x05555555, 0x01234567);
+  TestSmiOr(masm, &exit, 0xC0, 0x05555555, 0x0fedcba9);
+  TestSmiOr(masm, &exit, 0xD0, Smi::kMinValue, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiXor(MacroAssembler* masm, Label* exit, int id, int x, int y) {
+  int result = x ^ y;
+
+  __ movl(rax, Immediate(id));
+
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+  __ Move(rdx, Smi::FromInt(y));
+  __ Move(r8, Smi::FromInt(result));
+  __ SmiXor(r9, rcx, rdx);
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiXor(rcx, rcx, rdx);
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+
+  __ movq(rcx, r11);
+  __ incq(rax);
+  __ SmiXorConstant(r9, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, r9);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiXorConstant(rcx, rcx, Smi::FromInt(y));
+  __ SmiCompare(r8, rcx);
+  __ j(not_equal, exit);
+}
+
+
+TEST(SmiXor) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiXor(masm, &exit, 0x10, 0, 0);
+  TestSmiXor(masm, &exit, 0x20, 0, 1);
+  TestSmiXor(masm, &exit, 0x30, 1, 0);
+  TestSmiXor(masm, &exit, 0x40, 0, -1);
+  TestSmiXor(masm, &exit, 0x50, -1, 0);
+  TestSmiXor(masm, &exit, 0x60, -1, -1);
+  TestSmiXor(masm, &exit, 0x70, 1, 1);
+  TestSmiXor(masm, &exit, 0x80, Smi::kMinValue, Smi::kMaxValue);
+  TestSmiXor(masm, &exit, 0x90, Smi::kMinValue, Smi::kMinValue);
+  TestSmiXor(masm, &exit, 0xA0, Smi::kMinValue, -1);
+  TestSmiXor(masm, &exit, 0xB0, 0x5555555, 0x01234567);
+  TestSmiXor(masm, &exit, 0xC0, 0x5555555, 0x0fedcba9);
+  TestSmiXor(masm, &exit, 0xD0, Smi::kMinValue, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiNot(MacroAssembler* masm, Label* exit, int id, int x) {
+  int result = ~x;
+  __ movl(rax, Immediate(id));
+
+  __ Move(r8, Smi::FromInt(result));
+  __ Move(rcx, Smi::FromInt(x));
+  __ movq(r11, rcx);
+
+  __ SmiNot(r9, rcx);
+  __ SmiCompare(r9, r8);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiCompare(r11, rcx);
+  __ j(not_equal, exit);
+
+  __ incq(rax);
+  __ SmiNot(rcx, rcx);
+  __ SmiCompare(rcx, r8);
+  __ j(not_equal, exit);
+}
+
+
+TEST(SmiNot) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiNot(masm, &exit, 0x10, 0);
+  TestSmiNot(masm, &exit, 0x20, 1);
+  TestSmiNot(masm, &exit, 0x30, -1);
+  TestSmiNot(masm, &exit, 0x40, 127);
+  TestSmiNot(masm, &exit, 0x50, 65535);
+  TestSmiNot(masm, &exit, 0x60, Smi::kMinValue);
+  TestSmiNot(masm, &exit, 0x70, Smi::kMaxValue);
+  TestSmiNot(masm, &exit, 0x80, 0x05555555);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiShiftLeft(MacroAssembler* masm, Label* exit, int id, int x) {
+  const int shifts[] = { 0, 1, 7, 24, kSmiValueSize - 1};
+  const int kNumShifts = 5;
+  __ movl(rax, Immediate(id));
+  for (int i = 0; i < kNumShifts; i++) {
+    // rax == id + i * 10.
+    int shift = shifts[i];
+    int result = x << shift;
+    if (Smi::IsValid(result)) {
+      __ Move(r8, Smi::FromInt(result));
+      __ Move(rcx, Smi::FromInt(x));
+      __ SmiShiftLeftConstant(r9, rcx, shift, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rcx, Smi::FromInt(x));
+      __ SmiShiftLeftConstant(rcx, rcx, shift, exit);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rdx, Smi::FromInt(x));
+      __ Move(rcx, Smi::FromInt(shift));
+      __ SmiShiftLeft(r9, rdx, rcx, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rdx, Smi::FromInt(x));
+      __ Move(r11, Smi::FromInt(shift));
+      __ SmiShiftLeft(r9, rdx, r11, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rdx, Smi::FromInt(x));
+      __ Move(r11, Smi::FromInt(shift));
+      __ SmiShiftLeft(rdx, rdx, r11, exit);
+
+      __ incq(rax);
+      __ SmiCompare(rdx, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+    } else {
+      // Cannot happen with long smis.
+      Label fail_ok;
+      __ Move(rcx, Smi::FromInt(x));
+      __ movq(r11, rcx);
+      __ SmiShiftLeftConstant(r9, rcx, shift, &fail_ok);
+      __ jmp(exit);
+      __ bind(&fail_ok);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r11);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      Label fail_ok2;
+      __ SmiShiftLeftConstant(rcx, rcx, shift, &fail_ok2);
+      __ jmp(exit);
+      __ bind(&fail_ok2);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r11);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(r8, Smi::FromInt(shift));
+      Label fail_ok3;
+      __ SmiShiftLeft(r9, rcx, r8, &fail_ok3);
+      __ jmp(exit);
+      __ bind(&fail_ok3);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r11);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(r8, Smi::FromInt(shift));
+      __ movq(rdx, r11);
+      Label fail_ok4;
+      __ SmiShiftLeft(rdx, rdx, r8, &fail_ok4);
+      __ jmp(exit);
+      __ bind(&fail_ok4);
+
+      __ incq(rax);
+      __ SmiCompare(rdx, r11);
+      __ j(not_equal, exit);
+
+      __ addq(rax, Immediate(3));
+    }
+  }
+}
+
+
+TEST(SmiShiftLeft) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiShiftLeft(masm, &exit, 0x10, 0);
+  TestSmiShiftLeft(masm, &exit, 0x50, 1);
+  TestSmiShiftLeft(masm, &exit, 0x90, 127);
+  TestSmiShiftLeft(masm, &exit, 0xD0, 65535);
+  TestSmiShiftLeft(masm, &exit, 0x110, Smi::kMaxValue);
+  TestSmiShiftLeft(masm, &exit, 0x150, Smi::kMinValue);
+  TestSmiShiftLeft(masm, &exit, 0x190, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiShiftLogicalRight(MacroAssembler* masm,
+                              Label* exit,
+                              int id,
+                              int x) {
+  const int shifts[] = { 0, 1, 7, 24, kSmiValueSize - 1};
+  const int kNumShifts = 5;
+  __ movl(rax, Immediate(id));
+  for (int i = 0; i < kNumShifts; i++) {
+    int shift = shifts[i];
+    intptr_t result = static_cast<unsigned int>(x) >> shift;
+    if (Smi::IsValid(result)) {
+      __ Move(r8, Smi::FromInt(result));
+      __ Move(rcx, Smi::FromInt(x));
+      __ SmiShiftLogicalRightConstant(r9, rcx, shift, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rdx, Smi::FromInt(x));
+      __ Move(rcx, Smi::FromInt(shift));
+      __ SmiShiftLogicalRight(r9, rdx, rcx, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(rdx, Smi::FromInt(x));
+      __ Move(r11, Smi::FromInt(shift));
+      __ SmiShiftLogicalRight(r9, rdx, r11, exit);
+
+      __ incq(rax);
+      __ SmiCompare(r9, r8);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+    } else {
+      // Cannot happen with long smis.
+      Label fail_ok;
+      __ Move(rcx, Smi::FromInt(x));
+      __ movq(r11, rcx);
+      __ SmiShiftLogicalRightConstant(r9, rcx, shift, &fail_ok);
+      __ jmp(exit);
+      __ bind(&fail_ok);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r11);
+      __ j(not_equal, exit);
+
+      __ incq(rax);
+      __ Move(r8, Smi::FromInt(shift));
+      Label fail_ok3;
+      __ SmiShiftLogicalRight(r9, rcx, r8, &fail_ok3);
+      __ jmp(exit);
+      __ bind(&fail_ok3);
+
+      __ incq(rax);
+      __ SmiCompare(rcx, r11);
+      __ j(not_equal, exit);
+
+      __ addq(rax, Immediate(3));
+    }
+  }
+}
+
+
+TEST(SmiShiftLogicalRight) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiShiftLogicalRight(masm, &exit, 0x10, 0);
+  TestSmiShiftLogicalRight(masm, &exit, 0x30, 1);
+  TestSmiShiftLogicalRight(masm, &exit, 0x50, 127);
+  TestSmiShiftLogicalRight(masm, &exit, 0x70, 65535);
+  TestSmiShiftLogicalRight(masm, &exit, 0x90, Smi::kMaxValue);
+  TestSmiShiftLogicalRight(masm, &exit, 0xB0, Smi::kMinValue);
+  TestSmiShiftLogicalRight(masm, &exit, 0xD0, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestSmiShiftArithmeticRight(MacroAssembler* masm,
+                                 Label* exit,
+                                 int id,
+                                 int x) {
+  const int shifts[] = { 0, 1, 7, 24, kSmiValueSize - 1};
+  const int kNumShifts = 5;
+  __ movl(rax, Immediate(id));
+  for (int i = 0; i < kNumShifts; i++) {
+    int shift = shifts[i];
+    // Guaranteed arithmetic shift.
+    int result = (x < 0) ? ~((~x) >> shift) : (x >> shift);
+    __ Move(r8, Smi::FromInt(result));
+    __ Move(rcx, Smi::FromInt(x));
+    __ SmiShiftArithmeticRightConstant(rcx, rcx, shift);
+
+    __ SmiCompare(rcx, r8);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+    __ Move(rdx, Smi::FromInt(x));
+    __ Move(r11, Smi::FromInt(shift));
+    __ SmiShiftArithmeticRight(rdx, rdx, r11);
+
+    __ SmiCompare(rdx, r8);
+    __ j(not_equal, exit);
+
+    __ incq(rax);
+  }
+}
+
+
+TEST(SmiShiftArithmeticRight) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestSmiShiftArithmeticRight(masm, &exit, 0x10, 0);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x20, 1);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x30, 127);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x40, 65535);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x50, Smi::kMaxValue);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x60, Smi::kMinValue);
+  TestSmiShiftArithmeticRight(masm, &exit, 0x70, -1);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+void TestPositiveSmiPowerUp(MacroAssembler* masm, Label* exit, int id, int x) {
+  ASSERT(x >= 0);
+  int powers[] = { 0, 1, 2, 3, 8, 16, 24, 31 };
+  int power_count = 8;
+  __ movl(rax, Immediate(id));
+  for (int i = 0; i  < power_count; i++) {
+    int power = powers[i];
+    intptr_t result = static_cast<intptr_t>(x) << power;
+    __ Set(r8, result);
+    __ Move(rcx, Smi::FromInt(x));
+    __ movq(r11, rcx);
+    __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rcx, power);
+    __ SmiCompare(rdx, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ SmiCompare(r11, rcx);  // rcx unchanged.
+    __ j(not_equal, exit);
+    __ incq(rax);
+    __ PositiveSmiTimesPowerOfTwoToInteger64(rcx, rcx, power);
+    __ SmiCompare(rdx, r8);
+    __ j(not_equal, exit);
+    __ incq(rax);
+  }
+}
+
+
+TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
+                                      &actual_size,
+                                      true));
+  CHECK(buffer);
+  HandleScope handles;
+  MacroAssembler assembler(buffer, actual_size);
+
+  MacroAssembler* masm = &assembler;
+  masm->set_allow_stub_calls(false);
+  Label exit;
+
+  TestPositiveSmiPowerUp(masm, &exit, 0x20, 0);
+  TestPositiveSmiPowerUp(masm, &exit, 0x40, 1);
+  TestPositiveSmiPowerUp(masm, &exit, 0x60, 127);
+  TestPositiveSmiPowerUp(masm, &exit, 0x80, 128);
+  TestPositiveSmiPowerUp(masm, &exit, 0xA0, 255);
+  TestPositiveSmiPowerUp(masm, &exit, 0xC0, 256);
+  TestPositiveSmiPowerUp(masm, &exit, 0x100, 65535);
+  TestPositiveSmiPowerUp(masm, &exit, 0x120, 65536);
+  TestPositiveSmiPowerUp(masm, &exit, 0x140, Smi::kMaxValue);
+
+  __ xor_(rax, rax);  // Success.
+  __ bind(&exit);
+  __ ret(0);
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  // Call the function from C++.
+  int result = FUNCTION_CAST<F0>(buffer)();
+  CHECK_EQ(0, result);
+}
+
+
+#undef __
diff --git a/test/mjsunit/compiler/literals-assignment.js b/test/mjsunit/compiler/literals-assignment.js
new file mode 100644
index 0000000..932bfa7
--- /dev/null
+++ b/test/mjsunit/compiler/literals-assignment.js
@@ -0,0 +1,71 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests for simple assignments and literals inside an anonymous function
+
+// Test simple return statement.
+assertEquals(8, eval("(function() { return 8; })()"));
+
+// Test simple assignment
+var code = "(function() {\
+               var a;\
+               a = 8;\
+               return a;\
+             })()";
+assertEquals(8, eval(code));
+
+code = "(function() {\
+           var x;\
+           x = 'abc';\
+           return x;\
+         })()";
+assertEquals("abc", eval(code));
+
+// Test assignment as an RHS expression
+
+code = "(function() {\
+           var x, y;\
+           x = y = 8;\
+           return x;\
+         })()";
+assertEquals(8, eval(code));
+
+
+code = "(function() {\
+           var x, y;\
+           x = y = 8;\
+           return y;\
+         })()";
+assertEquals(8, eval(code));
+
+
+code = "(function() {\
+           var x,y,z;\
+           return x = y = z = 8;\
+         })()";
+assertEquals(8, eval(code));
+
diff --git a/test/mjsunit/compiler/literals.js b/test/mjsunit/compiler/literals.js
new file mode 100644
index 0000000..e0e532f
--- /dev/null
+++ b/test/mjsunit/compiler/literals.js
@@ -0,0 +1,35 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test simple literals.
+assertEquals(8, eval("8"));
+
+assertEquals(null, eval("null"));
+
+assertEquals("abc", eval("'abc'"));
+
+assertEquals(8, eval("6;'abc';8"));
diff --git a/test/mjsunit/debug-backtrace.js b/test/mjsunit/debug-backtrace.js
index 0c200ae..d15b2d2 100644
--- a/test/mjsunit/debug-backtrace.js
+++ b/test/mjsunit/debug-backtrace.js
@@ -69,6 +69,11 @@
 }
 
 
+ParsedResponse.prototype.running = function() {
+  return this.response_.running;
+}
+
+
 ParsedResponse.prototype.lookup = function(handle) {
   return this.refs_[handle];
 }
@@ -88,8 +93,9 @@
       var frame;
       var source;
 
-      // Get the debug command processor.
-      var dcp = exec_state.debugCommandProcessor();
+      var dcp;
+      // New copy of debug command processor paused state.
+      dcp = exec_state.debugCommandProcessor(false);
 
       // Get the backtrace.
       var json;
@@ -114,6 +120,7 @@
       assertEquals("g", response.lookup(frames[2].func.ref).name);
       assertEquals(3, frames[3].index);
       assertEquals("", response.lookup(frames[3].func.ref).name);
+      assertFalse(response.running(), "expected not running");
 
       // Get backtrace with two frames.
       json = '{"seq":0,"type":"request","command":"backtrace","arguments":{"fromFrame":1,"toFrame":3}}'
@@ -234,6 +241,17 @@
       source = response.body();
       assertEquals(Debug.findScript(f).source, source.source);
 
+      // New copy of debug command processor in running state.
+      dcp = exec_state.debugCommandProcessor(true);
+      // Get the backtrace.
+      json = '{"seq":0,"type":"request","command":"backtrace"}'
+      resp = dcp.processDebugJSONRequest(json);
+      response = new ParsedResponse(resp);
+      // It might be argueable, but we expect response to have body when
+      // not suspended
+      assertTrue(!!response.body(), "response should be null");
+      assertTrue(response.running(), "expected running");
+
       listenerCalled = true;
     }
   } catch (e) {
diff --git a/test/mjsunit/debug-changebreakpoint.js b/test/mjsunit/debug-changebreakpoint.js
index 477c908..936523a 100644
--- a/test/mjsunit/debug-changebreakpoint.js
+++ b/test/mjsunit/debug-changebreakpoint.js
@@ -59,7 +59,7 @@
   try {
   if (event == Debug.DebugEvent.Break) {
     // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
 
     // Test some illegal clearbreakpoint requests.
     var request = '{' + base_request + '}'
diff --git a/test/mjsunit/debug-clearbreakpoint.js b/test/mjsunit/debug-clearbreakpoint.js
index 28920c5..59479f2 100644
--- a/test/mjsunit/debug-clearbreakpoint.js
+++ b/test/mjsunit/debug-clearbreakpoint.js
@@ -59,7 +59,7 @@
   try {
   if (event == Debug.DebugEvent.Break) {
     // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
 
     // Test some illegal clearbreakpoint requests.
     var request = '{' + base_request + '}'
diff --git a/test/mjsunit/debug-clearbreakpointgroup.js b/test/mjsunit/debug-clearbreakpointgroup.js
index eca9378..aad6c3a 100644
--- a/test/mjsunit/debug-clearbreakpointgroup.js
+++ b/test/mjsunit/debug-clearbreakpointgroup.js
@@ -60,7 +60,7 @@
   try {

     if (event == Debug.DebugEvent.Break) {

       // Get the debug command processor.

-      var dcp = exec_state.debugCommandProcessor();

+      var dcp = exec_state.debugCommandProcessor("unspecified_running_state");

 

       // Clear breakpoint group 1.

       testArguments(dcp, '{"groupId":1}', true);

diff --git a/test/mjsunit/debug-continue.js b/test/mjsunit/debug-continue.js
index 0c11abc..a501aa9 100644
--- a/test/mjsunit/debug-continue.js
+++ b/test/mjsunit/debug-continue.js
@@ -44,7 +44,10 @@
   }
 }
 
-function testArguments(dcp, arguments, success) {
+function testArguments(exec_state, arguments, success) {
+  // Get the debug command processor in paused state.
+  var dcp = exec_state.debugCommandProcessor(false);
+
   // Generate request with the supplied arguments
   var request;
   if (arguments) {
@@ -65,25 +68,23 @@
 function listener(event, exec_state, event_data, data) {
   try {
   if (event == Debug.DebugEvent.Break) {
-    // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
 
     // Test simple continue request.
-    testArguments(dcp, void 0, true);
+    testArguments(exec_state, void 0, true);
 
     // Test some illegal continue requests.
-    testArguments(dcp, '{"stepaction":"maybe"}', false);
-    testArguments(dcp, '{"stepcount":-1}', false);
+    testArguments(exec_state, '{"stepaction":"maybe"}', false);
+    testArguments(exec_state, '{"stepcount":-1}', false);
 
     // Test some legal continue requests.
-    testArguments(dcp, '{"stepaction":"in"}', true);
-    testArguments(dcp, '{"stepaction":"min"}', true);
-    testArguments(dcp, '{"stepaction":"next"}', true);
-    testArguments(dcp, '{"stepaction":"out"}', true);
-    testArguments(dcp, '{"stepcount":1}', true);
-    testArguments(dcp, '{"stepcount":10}', true);
-    testArguments(dcp, '{"stepcount":"10"}', true);
-    testArguments(dcp, '{"stepaction":"next","stepcount":10}', true);
+    testArguments(exec_state, '{"stepaction":"in"}', true);
+    testArguments(exec_state, '{"stepaction":"min"}', true);
+    testArguments(exec_state, '{"stepaction":"next"}', true);
+    testArguments(exec_state, '{"stepaction":"out"}', true);
+    testArguments(exec_state, '{"stepcount":1}', true);
+    testArguments(exec_state, '{"stepcount":10}', true);
+    testArguments(exec_state, '{"stepcount":"10"}', true);
+    testArguments(exec_state, '{"stepaction":"next","stepcount":10}', true);
 
     // Indicate that all was processed.
     listenerComplete = true;
@@ -108,6 +109,6 @@
 Debug.setBreakPoint(g, 0, 0);
 g();
 
+assertFalse(exception, "exception in listener")
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerComplete, "listener did not run to completion");
-assertFalse(exception, "exception in listener")
diff --git a/test/mjsunit/debug-evaluate-bool-constructor.js b/test/mjsunit/debug-evaluate-bool-constructor.js
new file mode 100644
index 0000000..809a5cc
--- /dev/null
+++ b/test/mjsunit/debug-evaluate-bool-constructor.js
@@ -0,0 +1,80 @@
+// Copyright 2009 the V8 project authors. All rights reserved.

+// Redistribution and use in source and binary forms, with or without

+// modification, are permitted provided that the following conditions are

+// met:

+//

+//     * Redistributions of source code must retain the above copyright

+//       notice, this list of conditions and the following disclaimer.

+//     * Redistributions in binary form must reproduce the above

+//       copyright notice, this list of conditions and the following

+//       disclaimer in the documentation and/or other materials provided

+//       with the distribution.

+//     * Neither the name of Google Inc. nor the names of its

+//       contributors may be used to endorse or promote products derived

+//       from this software without specific prior written permission.

+//

+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS

+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT

+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR

+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT

+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,

+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT

+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,

+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY

+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT

+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE

+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+

+// Flags: --expose-debug-as debug

+// Get the Debug object exposed from the debug context global object.

+Debug = debug.Debug

+

+var listenerComplete = false;

+var exception = false;

+

+function listener(event, exec_state, event_data, data) {

+  try {

+    if (event == Debug.DebugEvent.Break) {

+      // Get the debug command processor.

+      var dcp = exec_state.debugCommandProcessor();

+

+      var request = {

+         seq: 0,

+         type: 'request',

+         command: 'evaluate',

+         arguments: {

+           expression: 'a',

+           frame: 0

+         }

+      };

+      request = JSON.stringify(request);

+

+      var resp = dcp.processDebugJSONRequest(request);

+      var response = JSON.parse(resp);

+      assertTrue(response.success, 'Command failed: ' + resp);

+      assertEquals('object', response.body.type);

+      assertEquals('Object', response.body.className);

+

+      // Indicate that all was processed.

+      listenerComplete = true;

+    }

+  } catch (e) {

+   exception = e

+  };

+};

+

+// Add the debug event listener.

+Debug.setListener(listener);

+

+function callDebugger() {

+  // Add set constructor field to a non-function value.

+  var a = {constructor:true};

+  debugger;

+}

+

+callDebugger();

+

+

+// Make sure that the debug event listener vas invoked.

+assertFalse(exception, "exception in listener")

+assertTrue(listenerComplete, "listener did not run to completion");

diff --git a/test/mjsunit/debug-evaluate-recursive.js b/test/mjsunit/debug-evaluate-recursive.js
index 9f037e5..6ee391b 100644
--- a/test/mjsunit/debug-evaluate-recursive.js
+++ b/test/mjsunit/debug-evaluate-recursive.js
@@ -44,7 +44,10 @@
   }
 }
 
-function testRequest(dcp, arguments, success, result) {
+function testRequest(exec_state, arguments, success, result) {
+  // Get the debug command processor in paused state.
+  var dcp = exec_state.debugCommandProcessor(false);
+
   // Generate request with the supplied arguments.
   var request;
   if (arguments) {
@@ -74,23 +77,20 @@
       assertEquals(1, exec_state.frame(0).evaluate('f()', true).value());
       assertEquals(2, exec_state.frame(0).evaluate('g()', true).value());
 
-      // Get the debug command processor.
-      var dcp = exec_state.debugCommandProcessor();
-
       // Call functions with break using the JSON protocol. Tests that argument
       // disable_break is default true.
-      testRequest(dcp, '{"expression":"f()"}', true, 1);
-      testRequest(dcp, '{"expression":"f()","frame":0}',  true, 1);
-      testRequest(dcp, '{"expression":"g()"}', true, 2);
-      testRequest(dcp, '{"expression":"g()","frame":0}',  true, 2);
+      testRequest(exec_state, '{"expression":"f()"}', true, 1);
+      testRequest(exec_state, '{"expression":"f()","frame":0}',  true, 1);
+      testRequest(exec_state, '{"expression":"g()"}', true, 2);
+      testRequest(exec_state, '{"expression":"g()","frame":0}',  true, 2);
 
       // Call functions with break using the JSON protocol. Tests passing
       // argument disable_break is default true.
-      testRequest(dcp, '{"expression":"f()","disable_break":true}', true, 1);
-      testRequest(dcp, '{"expression":"f()","frame":0,"disable_break":true}',
+      testRequest(exec_state, '{"expression":"f()","disable_break":true}', true, 1);
+      testRequest(exec_state, '{"expression":"f()","frame":0,"disable_break":true}',
                   true, 1);
-      testRequest(dcp, '{"expression":"g()","disable_break":true}', true, 2);
-      testRequest(dcp, '{"expression":"g()","frame":0,"disable_break":true}',
+      testRequest(exec_state, '{"expression":"g()","disable_break":true}', true, 2);
+      testRequest(exec_state, '{"expression":"g()","frame":0,"disable_break":true}',
                   true, 2);
 
       // Indicate that all was processed.
@@ -146,9 +146,9 @@
 // Cause a debug break event.
 debugger;
 
+assertFalse(exception, "exception in listener")
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerComplete);
-assertFalse(exception, "exception in listener")
 
 // Remove the debug event listener.
 Debug.setListener(null);
@@ -161,7 +161,7 @@
 
 debugger;
 
+assertFalse(exception, "exception in listener")
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerComplete);
-assertFalse(exception, "exception in listener")
 assertEquals(2, break_count);
diff --git a/test/mjsunit/debug-evaluate.js b/test/mjsunit/debug-evaluate.js
index 5c5734f..c477907 100644
--- a/test/mjsunit/debug-evaluate.js
+++ b/test/mjsunit/debug-evaluate.js
@@ -59,14 +59,15 @@
   } else {
     assertFalse(response.success, request + ' -> ' + response.message);
   }
-  assertFalse(response.running, request + ' -> expected not running');
+  assertEquals(response.running, "unspecified_running_state",
+               request + ' -> expected not running');
 }
 
 function listener(event, exec_state, event_data, data) {
   try {
     if (event == Debug.DebugEvent.Break) {
       // Get the debug command processor.
-      var dcp = exec_state.debugCommandProcessor();
+      var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
 
       // Test some illegal evaluate requests.
       testRequest(dcp, void 0, false);
@@ -112,6 +113,6 @@
 Debug.setBreakPoint(f, 2, 0);
 g();
 
+assertFalse(exception, "exception in listener")
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerComplete, "listener did not run to completion");
-assertFalse(exception, "exception in listener")
diff --git a/test/mjsunit/debug-handle.js b/test/mjsunit/debug-handle.js
index c7ab76a..98875ce 100644
--- a/test/mjsunit/debug-handle.js
+++ b/test/mjsunit/debug-handle.js
@@ -43,7 +43,10 @@
 
 
 // Send an evaluation request and return the handle of the result.
-function evaluateRequest(dcp, arguments) {
+function evaluateRequest(exec_state, arguments) {
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
   // The base part of all evaluate requests.
   var base_request = '"seq":0,"type":"request","command":"evaluate"'
 
@@ -63,7 +66,10 @@
 
 
 // Send a lookup request and return the evaluated JSON response.
-function lookupRequest(dcp, arguments, success) {
+function lookupRequest(exec_state, arguments, success) {
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
   // The base part of all lookup requests.
   var base_request = '"seq":0,"type":"request","command":"lookup"'
   
@@ -81,7 +87,7 @@
   } else {
     assertFalse(response.success, request + ' -> ' + response.message);
   }
-  assertFalse(response.running, request + ' -> expected not running');
+  assertEquals(response.running, dcp.isRunning(), request + ' -> expected not running');
 
   return response;
 }
@@ -90,26 +96,23 @@
 function listener(event, exec_state, event_data, data) {
   try {
   if (event == Debug.DebugEvent.Break) {
-    // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
-
     // Test some illegal lookup requests.
-    lookupRequest(dcp, void 0, false);
-    lookupRequest(dcp, '{"handles":["a"]}', false);
-    lookupRequest(dcp, '{"handles":[-1]}', false);
+    lookupRequest(exec_state, void 0, false);
+    lookupRequest(exec_state, '{"handles":["a"]}', false);
+    lookupRequest(exec_state, '{"handles":[-1]}', false);
 
     // Evaluate and get some handles.
-    var handle_o = evaluateRequest(dcp, '{"expression":"o"}');
-    var handle_p = evaluateRequest(dcp, '{"expression":"p"}');
-    var handle_b = evaluateRequest(dcp, '{"expression":"a"}');
-    var handle_a = evaluateRequest(dcp, '{"expression":"b","frame":1}');
+    var handle_o = evaluateRequest(exec_state, '{"expression":"o"}');
+    var handle_p = evaluateRequest(exec_state, '{"expression":"p"}');
+    var handle_b = evaluateRequest(exec_state, '{"expression":"a"}');
+    var handle_a = evaluateRequest(exec_state, '{"expression":"b","frame":1}');
     assertEquals(handle_o, handle_a);
     assertEquals(handle_a, handle_b);
     assertFalse(handle_o == handle_p, "o and p have he same handle");
 
     var response;
     var count;
-    response = lookupRequest(dcp, '{"handles":[' + handle_o + ']}', true);
+    response = lookupRequest(exec_state, '{"handles":[' + handle_o + ']}', true);
     var obj = response.body[handle_o];
     assertTrue(!!obj, 'Object not found: ' + handle_o);
     assertEquals(handle_o, obj.handle);
@@ -127,20 +130,20 @@
       }
     }
     assertEquals(2, count, 'Either "o" or "p" not found');
-    response = lookupRequest(dcp, '{"handles":[' + handle_p + ']}', true);
+    response = lookupRequest(exec_state, '{"handles":[' + handle_p + ']}', true);
     obj = response.body[handle_p];
     assertTrue(!!obj, 'Object not found: ' + handle_p);
     assertEquals(handle_p, obj.handle);
 
     // Check handles for functions on the stack.
-    var handle_f = evaluateRequest(dcp, '{"expression":"f"}');
-    var handle_g = evaluateRequest(dcp, '{"expression":"g"}');
-    var handle_caller = evaluateRequest(dcp, '{"expression":"f.caller"}');
+    var handle_f = evaluateRequest(exec_state, '{"expression":"f"}');
+    var handle_g = evaluateRequest(exec_state, '{"expression":"g"}');
+    var handle_caller = evaluateRequest(exec_state, '{"expression":"f.caller"}');
 
     assertFalse(handle_f == handle_g, "f and g have he same handle");
     assertEquals(handle_g, handle_caller, "caller for f should be g");
 
-    response = lookupRequest(dcp, '{"handles":[' + handle_f + ']}', true);
+    response = lookupRequest(exec_state, '{"handles":[' + handle_f + ']}', true);
     obj = response.body[handle_f];
     assertEquals(handle_f, obj.handle);
 
@@ -151,14 +154,14 @@
       switch (obj.properties[i].name) {
         case 'name':
           var response_name;
-          response_name = lookupRequest(dcp, arguments, true);
+          response_name = lookupRequest(exec_state, arguments, true);
           assertEquals('string', response_name.body[ref].type);
           assertEquals("f", response_name.body[ref].value);
           count++;
           break;
         case 'length':
           var response_length;
-          response_length = lookupRequest(dcp, arguments, true);
+          response_length = lookupRequest(exec_state, arguments, true);
           assertEquals('number', response_length.body[ref].type);
           assertEquals(1, response_length.body[ref].value);
           count++;
@@ -179,7 +182,7 @@
     }
 
     var arguments = '{"handles":[' + refs.join(',') + ']}';
-    response = lookupRequest(dcp, arguments, true);
+    response = lookupRequest(exec_state, arguments, true);
     count = 0;
     for (i in obj.properties) {
       var ref = obj.properties[i].ref;
@@ -244,6 +247,6 @@
 p.p = p;
 g(o);
 
+assertFalse(exception, "exception in listener")
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerComplete, "listener did not run to completion: " + exception);
-assertFalse(exception, "exception in listener")
diff --git a/test/mjsunit/debug-mirror-cache.js b/test/mjsunit/debug-mirror-cache.js
index d15146f..5b85306 100644
--- a/test/mjsunit/debug-mirror-cache.js
+++ b/test/mjsunit/debug-mirror-cache.js
@@ -41,7 +41,7 @@
 Debug = debug.Debug
 
 listenerCallCount = 0;
-listenerExceptionCount = 0;
+listenerExceptions = [];
 
 
 function listener(event, exec_state, event_data, data) {
@@ -54,8 +54,8 @@
     assertEquals(0, debug.next_handle_, "Mirror cache not cleared");
     assertEquals(0, debug.mirror_cache_.length, "Mirror cache not cleared");
 
-    // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    // Get the debug command processor in paused state.
+    var dcp = exec_state.debugCommandProcessor(false);
 
     // Make a backtrace request to create some mirrors.
     var json;
@@ -68,7 +68,7 @@
   }
   } catch (e) {
     print(e);
-    listenerExceptionCount++;
+    listenerExceptions.push(e);
   };
 };
 
@@ -79,7 +79,7 @@
 debugger;
 debugger;
 
+assertEquals([], listenerExceptions, "Exception in listener");
 // Make sure that the debug event listener vas invoked.
 assertEquals(2, listenerCallCount, "Listener not called");
-assertEquals(0, listenerExceptionCount, "Exception in listener");
 
diff --git a/test/mjsunit/debug-references.js b/test/mjsunit/debug-references.js
index 1fde1ac..452761c 100644
--- a/test/mjsunit/debug-references.js
+++ b/test/mjsunit/debug-references.js
@@ -66,14 +66,14 @@
   } else {

     assertFalse(response.success, request + ' -> ' + response.message);

   }

-  assertFalse(response.running, request + ' -> expected not running');

+  assertEquals(response.running, dcp.isRunning(), request + ' -> expected not running');

 }

 

 function listener(event, exec_state, event_data, data) {

   try {

   if (event == Debug.DebugEvent.Break) {

     // Get the debug command processor.

-    var dcp = exec_state.debugCommandProcessor();

+    var dcp = exec_state.debugCommandProcessor("unspecified_running_state");

 

     // Test some illegal references requests.

     testRequest(dcp, void 0, false);

diff --git a/test/mjsunit/debug-scopes.js b/test/mjsunit/debug-scopes.js
index e87cbb7..af29df9 100644
--- a/test/mjsunit/debug-scopes.js
+++ b/test/mjsunit/debug-scopes.js
@@ -92,7 +92,7 @@
   }
   
   // Get the debug command processor.
-  var dcp = exec_state.debugCommandProcessor();
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
   
   // Send a scopes request and check the result.
   var json;
@@ -155,7 +155,7 @@
   assertEquals(count, scope_size);
 
   // Get the debug command processor.
-  var dcp = exec_state.debugCommandProcessor();
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
   
   // Send a scope request for information on a single scope and check the
   // result.
diff --git a/test/mjsunit/debug-scripts-request.js b/test/mjsunit/debug-scripts-request.js
index 80b3bce..41bff0e 100644
--- a/test/mjsunit/debug-scripts-request.js
+++ b/test/mjsunit/debug-scripts-request.js
@@ -60,7 +60,7 @@
   try {
   if (event == Debug.DebugEvent.Break) {
     // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
 
     // Test illegal scripts requests.
     testArguments(dcp, '{"types":"xx"}', false);
diff --git a/test/mjsunit/debug-setbreakpoint.js b/test/mjsunit/debug-setbreakpoint.js
index f8d9b15..08492b4 100644
--- a/test/mjsunit/debug-setbreakpoint.js
+++ b/test/mjsunit/debug-setbreakpoint.js
@@ -69,7 +69,7 @@
   try {
   if (event == Debug.DebugEvent.Break) {
     // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
 
     // Test some illegal setbreakpoint requests.
     var request = '{' + base_request + '}'
diff --git a/test/mjsunit/debug-suspend.js b/test/mjsunit/debug-suspend.js
new file mode 100644
index 0000000..73a2e8c
--- /dev/null
+++ b/test/mjsunit/debug-suspend.js
@@ -0,0 +1,96 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+// Simple function which stores the last debug event.
+listenerComplete = false;
+exception = false;
+
+var base_backtrace_request = '"seq":0,"type":"request","command":"backtrace"'
+var base_suspend_request = '"seq":0,"type":"request","command":"suspend"'
+
+function safeEval(code) {
+  try {
+    return eval('(' + code + ')');
+  } catch (e) {
+    assertEquals(void 0, e);
+    return undefined;
+  }
+}
+
+function testArguments(exec_state) {
+  // Get the debug command processor in running state.
+  var dcp = exec_state.debugCommandProcessor(true);
+
+  assertTrue(dcp.isRunning());
+
+  var backtrace_request = '{' + base_backtrace_request + '}'
+  var backtrace_response = safeEval(dcp.processDebugJSONRequest(backtrace_request));
+
+  assertTrue(backtrace_response.success);
+
+  assertTrue(backtrace_response.running, backtrace_request + ' -> expected running');
+
+  assertTrue(dcp.isRunning());
+
+  var suspend_request = '{' + base_suspend_request + '}'
+  var suspend_response = safeEval(dcp.processDebugJSONRequest(suspend_request));
+
+  assertTrue(suspend_response.success);
+
+  assertFalse(suspend_response.running, suspend_request + ' -> expected not running');
+
+  assertFalse(dcp.isRunning());
+}
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+
+      // Test simple suspend request.
+      testArguments(exec_state);
+
+      // Indicate that all was processed.
+      listenerComplete = true;
+    }
+  } catch (e) {
+    exception = e
+  };
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+// Stop debugger and check that suspend command changes running flag.
+debugger;
+
+assertFalse(exception, "exception in listener")
+// Make sure that the debug event listener vas invoked.
+assertTrue(listenerComplete, "listener did not run to completion");
diff --git a/test/mjsunit/for-in.js b/test/mjsunit/for-in.js
index dfe721d..e3436ff 100644
--- a/test/mjsunit/for-in.js
+++ b/test/mjsunit/for-in.js
@@ -31,21 +31,21 @@
   return array.sort();
 }
 
-assertEquals(0, props({}).length);
-assertEquals(1, props({x:1}).length);
-assertEquals(2, props({x:1, y:2}).length);
+assertEquals(0, props({}).length, "olen0");
+assertEquals(1, props({x:1}).length, "olen1");
+assertEquals(2, props({x:1, y:2}).length, "olen2");
 
-assertArrayEquals(["x"], props({x:1}));
-assertArrayEquals(["x", "y"], props({x:1, y:2}));
-assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}));
+assertArrayEquals(["x"], props({x:1}), "x");
+assertArrayEquals(["x", "y"], props({x:1, y:2}), "xy");
+assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}), "xyzoom");
 
-assertEquals(0, props([]).length);
-assertEquals(1, props([1]).length);
-assertEquals(2, props([1,2]).length);
+assertEquals(0, props([]).length, "alen0");
+assertEquals(1, props([1]).length, "alen1");
+assertEquals(2, props([1,2]).length, "alen2");
 
-assertArrayEquals(["0"], props([1]));
-assertArrayEquals(["0", "1"], props([1,2]));
-assertArrayEquals(["0", "1", "2"], props([1,2,3]));
+assertArrayEquals(["0"], props([1]), "0");
+assertArrayEquals(["0", "1"], props([1,2]), "01");
+assertArrayEquals(["0", "1", "2"], props([1,2,3]), "012");
 
 var o = {};
 var a = [];
@@ -54,33 +54,33 @@
   a.push(s);
   o[s] = i;
 }
-assertArrayEquals(a, props(o));
+assertArrayEquals(a, props(o), "charcodes");
 
 var a = [];
-assertEquals(0, props(a).length);
+assertEquals(0, props(a).length, "proplen0");
 a[Math.pow(2,30)-1] = 0;
-assertEquals(1, props(a).length);
+assertEquals(1, props(a).length, "proplen1");
 a[Math.pow(2,31)-1] = 0;
-assertEquals(2, props(a).length);
+assertEquals(2, props(a).length, "proplen2");
 a[1] = 0;
-assertEquals(3, props(a).length);
+assertEquals(3, props(a).length, "proplen3");
 
 for (var hest = 'hest' in {}) { }
-assertEquals('hest', hest);
+assertEquals('hest', hest, "empty-no-override");
 
 var result = '';
 for (var p in {a : [0], b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "ab");
 
 var result = '';
 for (var p in {a : {v:1}, b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "ab-nodeep");
 
 var result = '';
 for (var p in { get a() {}, b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "abget");
 
 var result = '';
 for (var p in { get a() {}, set a(x) {}, b : 1}) { result += p; }
-assertEquals('ab', result);
+assertEquals('ab', result, "abgetset");
 
diff --git a/test/mjsunit/regress/regress-1081309.js b/test/mjsunit/regress/regress-1081309.js
index a771ac0..009ede1 100644
--- a/test/mjsunit/regress/regress-1081309.js
+++ b/test/mjsunit/regress/regress-1081309.js
@@ -69,7 +69,7 @@
     // 0: [anonymous]
     
     // Get the debug command processor.
-    var dcp = exec_state.debugCommandProcessor();
+    var dcp = exec_state.debugCommandProcessor(false);
 
     // Get the backtrace.
     var json;
@@ -105,6 +105,6 @@
   // Ignore the exception "Cannot call method 'x' of undefined"
 }
 
+assertFalse(exception, "exception in listener", exception)
 // Make sure that the debug event listener vas invoked.
 assertTrue(listenerCalled, "listener not called");
-assertFalse(exception, "exception in listener", exception)
diff --git a/test/mjsunit/regress/regress-1199401.js b/test/mjsunit/regress/regress-1199401.js
index 792faea..cc7985d 100644
--- a/test/mjsunit/regress/regress-1199401.js
+++ b/test/mjsunit/regress/regress-1199401.js
@@ -27,35 +27,49 @@
 
 // Ensure that we can correctly change the sign of the most negative smi.
 
-assertEquals(1073741824, -1073741824 * -1);
-assertEquals(1073741824, -1073741824 / -1);
-assertEquals(1073741824, -(-1073741824));
-assertEquals(1073741824, 0 - (-1073741824));
+// Possible Smi ranges.
+var ranges = [{min: -1073741824, max: 1073741823, bits: 31},
+              {min: -2147483648, max: 2147483647, bits: 32}];
 
-var min_smi = -1073741824;
+for (var i = 0; i < ranges.length; i++) {
+  var range = ranges[i];
+  var min_smi = range.min;
+  var max_smi = range.max;
+  var bits = range.bits;
+  var name = bits + "-bit";
 
-assertEquals(1073741824, min_smi * -1);
-assertEquals(1073741824, min_smi / -1);
-assertEquals(1073741824, -min_smi);
-assertEquals(1073741824, 0 - min_smi);
+  var result = max_smi + 1;
 
-var zero = 0;
-var minus_one = -1;
+  // Min smi as literal
+  assertEquals(result, eval(min_smi + " * -1"), name + "-litconmult");
+  assertEquals(result, eval(min_smi + " / -1"), name + "-litcondiv");
+  assertEquals(result, eval("-(" + min_smi + ")"), name + "-litneg");
+  assertEquals(result, eval("0 - (" + min_smi + ")")), name + "-conlitsub";
 
-assertEquals(1073741824, min_smi * minus_one);
-assertEquals(1073741824, min_smi / minus_one);
-assertEquals(1073741824, -min_smi);
-assertEquals(1073741824, zero - min_smi);
+  // As variable:
+  assertEquals(result, min_smi * -1, name + "-varconmult");
+  assertEquals(result, min_smi / -1, name + "-varcondiv");
+  assertEquals(result, -min_smi, name + "-varneg");
+  assertEquals(result, 0 - min_smi, name + "-convarsub");
 
-assertEquals(1073741824, -1073741824 * minus_one);
-assertEquals(1073741824, -1073741824 / minus_one);
-assertEquals(1073741824, -(-1073741824));
-assertEquals(1073741824, zero - (-1073741824));
+  // Only variables:
+  var zero = 0;
+  var minus_one = -1;
 
-var half_min_smi = -(1<<15);
-var half_max_smi = (1<<15);
+  assertEquals(result, min_smi * minus_one, name + "-varvarmult");
+  assertEquals(result, min_smi / minus_one, name + "-varvardiv");
+  assertEquals(result, zero - min_smi, name + "-varvarsub");
 
-assertEquals(1073741824, -half_min_smi * half_max_smi);
-assertEquals(1073741824, half_min_smi * -half_max_smi);
-assertEquals(1073741824, half_max_smi * -half_min_smi);
-assertEquals(1073741824, -half_max_smi * half_min_smi);
+  // Constants as variables
+  assertEquals(result, eval(min_smi + " * minus_one"), name + "-litvarmult");
+  assertEquals(result, eval(min_smi + " / minus_one"), name + "-litvarmdiv");
+  assertEquals(result, eval("0 - (" + min_smi + ")"), name + "-varlitsub");
+
+  var half_min_smi = -(1 << (bits >> 1));
+  var half_max_smi = 1 << ((bits - 1) >> 1);
+
+  assertEquals(max_smi + 1, -half_min_smi * half_max_smi, name + "-half1");
+  assertEquals(max_smi + 1, half_min_smi * -half_max_smi, name + "-half2");
+  assertEquals(max_smi + 1, half_max_smi * -half_min_smi, name + "-half3");
+  assertEquals(max_smi + 1, -half_max_smi * half_min_smi, name + "-half4");
+}
diff --git a/test/mjsunit/smi-negative-zero.js b/test/mjsunit/smi-negative-zero.js
index 719ee49..6906443 100644
--- a/test/mjsunit/smi-negative-zero.js
+++ b/test/mjsunit/smi-negative-zero.js
@@ -37,64 +37,64 @@
 
 // variable op variable
 
-assertEquals(one / (-zero), -Infinity, "one / -0 I");
+assertEquals(-Infinity, one / (-zero), "one / -0 I");
 
-assertEquals(one / (zero * minus_one), -Infinity, "one / -1");
-assertEquals(one / (minus_one * zero), -Infinity, "one / -0 II");
-assertEquals(one / (zero * zero), Infinity, "one / 0 I");
-assertEquals(one / (minus_one * minus_one), 1, "one / 1");
+assertEquals(-Infinity, one / (zero * minus_one), "one / -1");
+assertEquals(-Infinity, one / (minus_one * zero), "one / -0 II");
+assertEquals(Infinity, one / (zero * zero), "one / 0 I");
+assertEquals(1, one / (minus_one * minus_one), "one / 1");
 
-assertEquals(one / (zero / minus_one), -Infinity, "one / -0 III");
-assertEquals(one / (zero / one), Infinity, "one / 0 II");
+assertEquals(-Infinity, one / (zero / minus_one), "one / -0 III");
+assertEquals(Infinity, one / (zero / one), "one / 0 II");
 
-assertEquals(one / (minus_four % two), -Infinity, "foo1");
-assertEquals(one / (minus_four % minus_two), -Infinity, "foo2");
-assertEquals(one / (four % two), Infinity, "foo3");
-assertEquals(one / (four % minus_two), Infinity, "foo4");
+assertEquals(-Infinity, one / (minus_four % two), "foo1");
+assertEquals(-Infinity, one / (minus_four % minus_two), "foo2");
+assertEquals(Infinity, one / (four % two), "foo3");
+assertEquals(Infinity, one / (four % minus_two), "foo4");
 
 // literal op variable
 
-assertEquals(one / (0 * minus_one), -Infinity, "bar1");
-assertEquals(one / (-1 * zero), -Infinity, "bar2");
-assertEquals(one / (0 * zero), Infinity, "bar3");
-assertEquals(one / (-1 * minus_one), 1, "bar4");
+assertEquals(-Infinity, one / (0 * minus_one), "bar1");
+assertEquals(-Infinity, one / (-1 * zero), "bar2");
+assertEquals(Infinity, one / (0 * zero), "bar3");
+assertEquals(1, one / (-1 * minus_one), "bar4");
 
-assertEquals(one / (0 / minus_one), -Infinity, "baz1");
-assertEquals(one / (0 / one), Infinity, "baz2");
+assertEquals(-Infinity, one / (0 / minus_one), "baz1");
+assertEquals(Infinity, one / (0 / one), "baz2");
 
-assertEquals(one / (-4 % two), -Infinity, "baz3");
-assertEquals(one / (-4 % minus_two), -Infinity, "baz4");
-assertEquals(one / (4 % two), Infinity, "baz5");
-assertEquals(one / (4 % minus_two), Infinity, "baz6");
+assertEquals(-Infinity, one / (-4 % two), "baz3");
+assertEquals(-Infinity, one / (-4 % minus_two), "baz4");
+assertEquals(Infinity, one / (4 % two), "baz5");
+assertEquals(Infinity, one / (4 % minus_two), "baz6");
 
 // variable op literal
 
-assertEquals(one / (zero * -1), -Infinity, "fizz1");
-assertEquals(one / (minus_one * 0), -Infinity, "fizz2");
-assertEquals(one / (zero * 0), Infinity, "fizz3");
-assertEquals(one / (minus_one * -1), 1, "fizz4");
+assertEquals(-Infinity, one / (zero * -1), "fizz1");
+assertEquals(-Infinity, one / (minus_one * 0), "fizz2");
+assertEquals(Infinity, one / (zero * 0), "fizz3");
+assertEquals(1, one / (minus_one * -1), "fizz4");
 
-assertEquals(one / (zero / -1), -Infinity, "buzz1");
-assertEquals(one / (zero / 1), Infinity, "buzz2");
+assertEquals(-Infinity, one / (zero / -1), "buzz1");
+assertEquals(Infinity, one / (zero / 1), "buzz2");
 
-assertEquals(one / (minus_four % 2), -Infinity, "buzz3");
-assertEquals(one / (minus_four % -2), -Infinity, "buzz4");
-assertEquals(one / (four % 2), Infinity, "buzz5");
-assertEquals(one / (four % -2), Infinity, "buzz6");
+assertEquals(-Infinity, one / (minus_four % 2), "buzz3");
+assertEquals(-Infinity, one / (minus_four % -2), "buzz4");
+assertEquals(Infinity, one / (four % 2), "buzz5");
+assertEquals(Infinity, one / (four % -2), "buzz6");
 
 // literal op literal
 
-assertEquals(one / (-0), -Infinity, "fisk1");
+assertEquals(-Infinity, one / (-0), "fisk1");
 
-assertEquals(one / (0 * -1), -Infinity, "fisk2");
-assertEquals(one / (-1 * 0), -Infinity, "fisk3");
-assertEquals(one / (0 * 0), Infinity, "fisk4");
-assertEquals(one / (-1 * -1), 1, "fisk5");
+assertEquals(-Infinity, one / (0 * -1), "fisk2");
+assertEquals(-Infinity, one / (-1 * 0), "fisk3");
+assertEquals(Infinity, one / (0 * 0), "fisk4");
+assertEquals(1, one / (-1 * -1), "fisk5");
 
-assertEquals(one / (0 / -1), -Infinity, "hest1");
-assertEquals(one / (0 / 1), Infinity, "hest2");
+assertEquals(-Infinity, one / (0 / -1), "hest1");
+assertEquals(Infinity, one / (0 / 1), "hest2");
 
-assertEquals(one / (-4 % 2), -Infinity, "fiskhest1");
-assertEquals(one / (-4 % -2), -Infinity, "fiskhest2");
-assertEquals(one / (4 % 2), Infinity, "fiskhest3");
-assertEquals(one / (4 % -2), Infinity, "fiskhest4");
+assertEquals(-Infinity, one / (-4 % 2), "fiskhest1");
+assertEquals(-Infinity, one / (-4 % -2), "fiskhest2");
+assertEquals(Infinity, one / (4 % 2), "fiskhest3");
+assertEquals(Infinity, one / (4 % -2), "fiskhest4");
diff --git a/test/mjsunit/testcfg.py b/test/mjsunit/testcfg.py
index e3f3fcd..49064b1 100644
--- a/test/mjsunit/testcfg.py
+++ b/test/mjsunit/testcfg.py
@@ -114,7 +114,8 @@
     bugs = [current_path + ['bugs', t] for t in self.Ls(join(self.root, 'bugs'))]
     third_party = [current_path + ['third_party', t] for t in self.Ls(join(self.root, 'third_party'))]
     tools = [current_path + ['tools', t] for t in self.Ls(join(self.root, 'tools'))]
-    all_tests = mjsunit + regress + bugs + third_party + tools
+    compiler = [current_path + ['compiler', t] for t in self.Ls(join(self.root, 'compiler'))]
+    all_tests = mjsunit + regress + bugs + third_party + tools + compiler
     result = []
     for test in all_tests:
       if self.Contains(path, test):
diff --git a/test/mjsunit/third_party/array-isarray.js b/test/mjsunit/third_party/array-isarray.js
new file mode 100644
index 0000000..0fc42a3
--- /dev/null
+++ b/test/mjsunit/third_party/array-isarray.js
@@ -0,0 +1,48 @@
+// Copyright (c) 2009 Apple Computer, Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// 3. Neither the name of the copyright holder(s) nor the names of any
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Based on LayoutTests/fast/js/resources/Array-isArray.js
+
+assertTrue(Array.isArray([]));
+assertTrue(Array.isArray(new Array));
+assertTrue(Array.isArray(Array()));
+assertTrue(Array.isArray('abc'.match(/(a)*/g)));
+assertFalse((function(){ return Array.isArray(arguments); })());
+assertFalse(Array.isArray());
+assertFalse(Array.isArray(null));
+assertFalse(Array.isArray(undefined));
+assertFalse(Array.isArray(true));
+assertFalse(Array.isArray(false));
+assertFalse(Array.isArray('a string'));
+assertFalse(Array.isArray({}));
+assertFalse(Array.isArray({length: 5}));
+assertFalse(Array.isArray({__proto__: Array.prototype, length:1, 0:1, 1:2}));
+
diff --git a/test/mjsunit/third_party/string-trim.js b/test/mjsunit/third_party/string-trim.js
new file mode 100644
index 0000000..234dff6
--- /dev/null
+++ b/test/mjsunit/third_party/string-trim.js
@@ -0,0 +1,107 @@
+// Copyright (c) 2009 Apple Computer, Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+//
+// 3. Neither the name of the copyright holder(s) nor the names of any
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Based on LayoutTests/fast/js/script-tests/string-trim.js
+
+// References to trim(), trimLeft() and trimRight() functions for 
+// testing Function's *.call() and *.apply() methods.
+
+var trim            = String.prototype.trim;
+var trimLeft        = String.prototype.trimLeft;
+var trimRight       = String.prototype.trimRight;
+
+var testString      = 'foo bar';
+var trimString      = '';
+var leftTrimString  = '';
+var rightTrimString = '';
+var wsString        = '';
+
+var whitespace      = [
+  {s : '\u0009', t : 'HORIZONTAL TAB'},
+  {s : '\u000A', t : 'LINE FEED OR NEW LINE'},
+  {s : '\u000B', t : 'VERTICAL TAB'},
+  {s : '\u000C', t : 'FORMFEED'},
+  {s : '\u000D', t : 'CARRIAGE RETURN'},
+  {s : '\u0020', t : 'SPACE'},
+  {s : '\u00A0', t : 'NO-BREAK SPACE'},
+  {s : '\u2000', t : 'EN QUAD'},
+  {s : '\u2001', t : 'EM QUAD'},
+  {s : '\u2002', t : 'EN SPACE'},
+  {s : '\u2003', t : 'EM SPACE'},
+  {s : '\u2004', t : 'THREE-PER-EM SPACE'},
+  {s : '\u2005', t : 'FOUR-PER-EM SPACE'},
+  {s : '\u2006', t : 'SIX-PER-EM SPACE'},
+  {s : '\u2007', t : 'FIGURE SPACE'},
+  {s : '\u2008', t : 'PUNCTUATION SPACE'},
+  {s : '\u2009', t : 'THIN SPACE'},
+  {s : '\u200A', t : 'HAIR SPACE'},
+  {s : '\u3000', t : 'IDEOGRAPHIC SPACE'},
+  {s : '\u2028', t : 'LINE SEPARATOR'},
+  {s : '\u2029', t : 'PARAGRAPH SEPARATOR'},
+  {s : '\u200B', t : 'ZERO WIDTH SPACE (category Cf)'}
+];
+
+for (var i = 0; i < whitespace.length; i++) {
+  assertEquals(whitespace[i].s.trim(), '');
+  assertEquals(whitespace[i].s.trimLeft(), '');
+  assertEquals(whitespace[i].s.trimRight(), '');
+  wsString += whitespace[i].s;
+}
+
+trimString      = wsString   + testString + wsString;
+leftTrimString  = testString + wsString;  // Trimmed from the left.
+rightTrimString = wsString   + testString;  // Trimmed from the right.
+
+assertEquals(wsString.trim(),      '');
+assertEquals(wsString.trimLeft(),  '');
+assertEquals(wsString.trimRight(), '');
+
+assertEquals(trimString.trim(),      testString);
+assertEquals(trimString.trimLeft(),  leftTrimString);
+assertEquals(trimString.trimRight(), rightTrimString);
+
+assertEquals(leftTrimString.trim(),      testString);
+assertEquals(leftTrimString.trimLeft(),  leftTrimString);
+assertEquals(leftTrimString.trimRight(), testString);
+
+assertEquals(rightTrimString.trim(),      testString);
+assertEquals(rightTrimString.trimLeft(),  testString);
+assertEquals(rightTrimString.trimRight(), rightTrimString);
+
+var testValues = [0, Infinity, NaN, true, false, ({}), ['an','array'],
+  ({toString:function(){return 'wibble'}})
+];
+
+for (var i = 0; i < testValues.length; i++) {
+  assertEquals(trim.call(testValues[i]), String(testValues[i]));
+  assertEquals(trimLeft.call(testValues[i]), String(testValues[i]));
+  assertEquals(trimRight.call(testValues[i]), String(testValues[i]));
+}
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 46a00f4..5e2bb88 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -256,14 +256,16 @@
         '../../src/execution.h',
         '../../src/factory.cc',
         '../../src/factory.h',
+        '../../src/fast-codegen.cc',
+        '../../src/fast-codegen.h',
         '../../src/flag-definitions.h',
         '../../src/flags.cc',
         '../../src/flags.h',
+        '../../src/frame-element.cc',
+        '../../src/frame-element.h',
         '../../src/frames-inl.h',
         '../../src/frames.cc',
         '../../src/frames.h',
-        '../../src/frame-element.cc',
-        '../../src/frame-element.h',
         '../../src/func-name-inferrer.cc',
         '../../src/func-name-inferrer.h',
         '../../src/global-handles.cc',
@@ -291,11 +293,12 @@
         '../../src/jsregexp.h',
         '../../src/list-inl.h',
         '../../src/list.h',
-        '../../src/log.cc',
+        '../../src/location.h',
         '../../src/log-inl.h',
-        '../../src/log.h',
         '../../src/log-utils.cc',
         '../../src/log-utils.h',
+        '../../src/log.cc',
+        '../../src/log.h',
         '../../src/macro-assembler.h',
         '../../src/mark-compact.cc',
         '../../src/mark-compact.h',
@@ -394,6 +397,7 @@
             '../../src/arm/cpu-arm.cc',
             '../../src/arm/debug-arm.cc',
             '../../src/arm/disasm-arm.cc',
+            '../../src/arm/fast-codegen-arm.cc',
             '../../src/arm/frames-arm.cc',
             '../../src/arm/frames-arm.h',
             '../../src/arm/ic-arm.cc',
@@ -423,6 +427,7 @@
             '../../src/ia32/cpu-ia32.cc',
             '../../src/ia32/debug-ia32.cc',
             '../../src/ia32/disasm-ia32.cc',
+            '../../src/ia32/fast-codegen-ia32.cc',
             '../../src/ia32/frames-ia32.cc',
             '../../src/ia32/frames-ia32.h',
             '../../src/ia32/ic-ia32.cc',
@@ -451,6 +456,7 @@
             '../../src/x64/cpu-x64.cc',
             '../../src/x64/debug-x64.cc',
             '../../src/x64/disasm-x64.cc',
+            '../../src/x64/fast-codegen-x64.cc',
             '../../src/x64/frames-x64.cc',
             '../../src/x64/frames-x64.h',
             '../../src/x64/ic-x64.cc',
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 84f0eea..fd23987 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -75,7 +75,18 @@
       'tick': { parsers: [this.createAddressParser('code'),
           this.createAddressParser('stack'), parseInt, 'var-args'],
           processor: this.processTick, backrefs: true },
+      'heap-sample-begin': { parsers: [null, null, parseInt],
+          processor: this.processHeapSampleBegin },
+      'heap-sample-end': { parsers: [null, null],
+          processor: this.processHeapSampleEnd },
+      'heap-js-prod-item': { parsers: [null, 'var-args'],
+          processor: this.processJSProducer, backrefs: true },
+      // Ignored events.
       'profiler': null,
+      'heap-sample-stats': null,
+      'heap-sample-item': null,
+      'heap-js-cons-item': null,
+      'heap-js-ret-item': null,
       // Obsolete row types.
       'code-allocate': null,
       'begin-code-region': null,
@@ -113,6 +124,9 @@
   // Count each tick as a time unit.
   this.viewBuilder_ = new devtools.profiler.ViewBuilder(1);
   this.lastLogFileName_ = null;
+
+  this.generation_ = 1;
+  this.currentProducerProfile_ = null;
 };
 inherits(TickProcessor, devtools.profiler.LogReader);
 
@@ -220,6 +234,41 @@
 };
 
 
+TickProcessor.prototype.processHeapSampleBegin = function(space, state, ticks) {
+  if (space != 'Heap') return;
+  this.currentProducerProfile_ = new devtools.profiler.CallTree();
+};
+
+
+TickProcessor.prototype.processHeapSampleEnd = function(space, state) {
+  if (space != 'Heap' || !this.currentProducerProfile_) return;
+
+  print('Generation ' + this.generation_ + ':');
+  var tree = this.currentProducerProfile_;
+  tree.computeTotalWeights();
+  var producersView = this.viewBuilder_.buildView(tree);
+  // Sort by total time, desc, then by name, desc.
+  producersView.sort(function(rec1, rec2) {
+      return rec2.totalTime - rec1.totalTime ||
+          (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
+  this.printHeavyProfile(producersView.head.children);
+
+  this.currentProducerProfile_ = null;
+  this.generation_++;
+};
+
+
+TickProcessor.prototype.processJSProducer = function(constructor, stack) {
+  if (!this.currentProducerProfile_) return;
+  if (stack.length == 0) return;
+  var first = stack.shift();
+  var processedStack =
+      this.profile_.resolveAndFilterFuncs_(this.processStack(first, stack));
+  processedStack.unshift(constructor);
+  this.currentProducerProfile_.addPath(processedStack);
+};
+
+
 TickProcessor.prototype.printStatistics = function() {
   print('Statistical profiling result from ' + this.lastLogFileName_ +
         ', (' + this.ticks_.total +
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index 2d38681..d2af626 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -56,6 +56,7 @@
 		89495E480E79FC23001F68C3 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
 		89495E490E79FC23001F68C3 /* compilation-cache.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89495E460E79FC23001F68C3 /* compilation-cache.cc */; };
 		8956B6CF0F5D86730033B5A2 /* debug-agent.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */; };
+		895FA753107FFED3006F39D4 /* constants-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 895FA748107FFE73006F39D4 /* constants-arm.cc */; };
 		896FD03A0E78D717003DFB6A /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; };
 		897F767F0E71B690007ACF34 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; };
 		897F76850E71B6B1007ACF34 /* libv8.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8970F2F00E719FB2006AE7B5 /* libv8.a */; };
@@ -309,6 +310,14 @@
 		89495E470E79FC23001F68C3 /* compilation-cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "compilation-cache.h"; sourceTree = "<group>"; };
 		8956B6CD0F5D86570033B5A2 /* debug-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "debug-agent.cc"; sourceTree = "<group>"; };
 		8956B6CE0F5D86570033B5A2 /* debug-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "debug-agent.h"; sourceTree = "<group>"; };
+		895FA720107FFB15006F39D4 /* jump-target-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-inl.h"; sourceTree = "<group>"; };
+		895FA725107FFB57006F39D4 /* codegen-ia32-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-ia32-inl.h"; path = "ia32/codegen-ia32-inl.h"; sourceTree = "<group>"; };
+		895FA72A107FFB85006F39D4 /* register-allocator-ia32-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-ia32-inl.h"; path = "ia32/register-allocator-ia32-inl.h"; sourceTree = "<group>"; };
+		895FA72B107FFB85006F39D4 /* register-allocator-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-ia32.h"; path = "ia32/register-allocator-ia32.h"; sourceTree = "<group>"; };
+		895FA748107FFE73006F39D4 /* constants-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "constants-arm.cc"; path = "arm/constants-arm.cc"; sourceTree = "<group>"; };
+		895FA74B107FFE82006F39D4 /* codegen-arm-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-arm-inl.h"; path = "arm/codegen-arm-inl.h"; sourceTree = "<group>"; };
+		895FA750107FFEAE006F39D4 /* register-allocator-arm-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-arm-inl.h"; path = "arm/register-allocator-arm-inl.h"; sourceTree = "<group>"; };
+		895FA751107FFEAE006F39D4 /* register-allocator-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-arm.h"; path = "arm/register-allocator-arm.h"; sourceTree = "<group>"; };
 		8964482B0E9C00F700E7C516 /* codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-ia32.h"; path = "ia32/codegen-ia32.h"; sourceTree = "<group>"; };
 		896448BC0E9D530500E7C516 /* codegen-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-arm.h"; path = "arm/codegen-arm.h"; sourceTree = "<group>"; };
 		8970F2F00E719FB2006AE7B5 /* libv8.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libv8.a; sourceTree = BUILT_PRODUCTS_DIR; };
@@ -662,8 +671,10 @@
 				897FF1110E719B8F00D62E90 /* code-stubs.cc */,
 				897FF1120E719B8F00D62E90 /* code-stubs.h */,
 				897FF1130E719B8F00D62E90 /* code.h */,
+				895FA74B107FFE82006F39D4 /* codegen-arm-inl.h */,
 				897FF1140E719B8F00D62E90 /* codegen-arm.cc */,
 				896448BC0E9D530500E7C516 /* codegen-arm.h */,
+				895FA725107FFB57006F39D4 /* codegen-ia32-inl.h */,
 				897FF1150E719B8F00D62E90 /* codegen-ia32.cc */,
 				8964482B0E9C00F700E7C516 /* codegen-ia32.h */,
 				897FF1160E719B8F00D62E90 /* codegen-inl.h */,
@@ -673,6 +684,7 @@
 				89495E470E79FC23001F68C3 /* compilation-cache.h */,
 				897FF1190E719B8F00D62E90 /* compiler.cc */,
 				897FF11A0E719B8F00D62E90 /* compiler.h */,
+				895FA748107FFE73006F39D4 /* constants-arm.cc */,
 				897FF11B0E719B8F00D62E90 /* constants-arm.h */,
 				897FF11C0E719B8F00D62E90 /* contexts.cc */,
 				897FF11D0E719B8F00D62E90 /* contexts.h */,
@@ -739,6 +751,7 @@
 				89A15C670EE4665300B48DEB /* interpreter-irregexp.h */,
 				897FF14E0E719B8F00D62E90 /* jsregexp.cc */,
 				897FF14F0E719B8F00D62E90 /* jsregexp.h */,
+				895FA720107FFB15006F39D4 /* jump-target-inl.h */,
 				58950D4E0F55514900F3E8BA /* jump-target-arm.cc */,
 				58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */,
 				58950D500F55514900F3E8BA /* jump-target.cc */,
@@ -794,8 +807,12 @@
 				89A15C7A0EE466D000B48DEB /* regexp-macro-assembler.h */,
 				8944AD0E0F1D4D3A0028D560 /* regexp-stack.cc */,
 				8944AD0F0F1D4D3A0028D560 /* regexp-stack.h */,
+				895FA750107FFEAE006F39D4 /* register-allocator-arm-inl.h */,
 				58950D520F55514900F3E8BA /* register-allocator-arm.cc */,
+				895FA751107FFEAE006F39D4 /* register-allocator-arm.h */,
+				895FA72A107FFB85006F39D4 /* register-allocator-ia32-inl.h */,
 				58950D530F55514900F3E8BA /* register-allocator-ia32.cc */,
+				895FA72B107FFB85006F39D4 /* register-allocator-ia32.h */,
 				893A722D0F7B4A7100303DD2 /* register-allocator-inl.h */,
 				58950D540F55514900F3E8BA /* register-allocator.cc */,
 				58950D550F55514900F3E8BA /* register-allocator.h */,
@@ -1238,6 +1255,7 @@
 				89F23C4B0E78D5B2006B2466 /* codegen.cc in Sources */,
 				89495E490E79FC23001F68C3 /* compilation-cache.cc in Sources */,
 				89F23C4C0E78D5B2006B2466 /* compiler.cc in Sources */,
+				895FA753107FFED3006F39D4 /* constants-arm.cc in Sources */,
 				89F23C4D0E78D5B2006B2466 /* contexts.cc in Sources */,
 				89F23C4E0E78D5B2006B2466 /* conversions.cc in Sources */,
 				89F23C4F0E78D5B2006B2466 /* counters.cc in Sources */,
@@ -1467,6 +1485,7 @@
 					V8_NATIVE_REGEXP,
 					DEBUG,
 					V8_ENABLE_CHECKS,
+					ENABLE_DEBUGGER_SUPPORT,
 				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8_shell;
@@ -1481,6 +1500,7 @@
 					V8_TARGET_ARCH_IA32,
 					V8_NATIVE_REGEXP,
 					NDEBUG,
+					ENABLE_DEBUGGER_SUPPORT,
 				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8_shell;
@@ -1514,6 +1534,7 @@
 					V8_TARGET_ARCH_IA32,
 					V8_NATIVE_REGEXP,
 					NDEBUG,
+					ENABLE_DEBUGGER_SUPPORT,
 				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8;
@@ -1524,6 +1545,10 @@
 		897F767C0E71B4CC007ACF34 /* Debug */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"$(GCC_PREPROCESSOR_DEFINITIONS)",
+					V8_TARGET_ARCH_IA32,
+				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8_shell;
 			};
@@ -1532,6 +1557,10 @@
 		897F767D0E71B4CC007ACF34 /* Release */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"$(GCC_PREPROCESSOR_DEFINITIONS)",
+					V8_TARGET_ARCH_IA32,
+				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = v8_shell;
 			};
@@ -1571,6 +1600,10 @@
 		89F23C930E78D5B6006B2466 /* Debug */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"$(GCC_PREPROCESSOR_DEFINITIONS)",
+					V8_TARGET_ARCH_ARM,
+				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = "v8_shell-arm";
 			};
@@ -1579,6 +1612,10 @@
 		89F23C940E78D5B6006B2466 /* Release */ = {
 			isa = XCBuildConfiguration;
 			buildSettings = {
+				GCC_PREPROCESSOR_DEFINITIONS = (
+					"$(GCC_PREPROCESSOR_DEFINITIONS)",
+					V8_TARGET_ARCH_ARM,
+				);
 				HEADER_SEARCH_PATHS = ../src;
 				PRODUCT_NAME = "v8_shell-arm";
 			};
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index 7a013c0..fc7402a 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -388,6 +388,18 @@
 				RelativePath="..\..\src\factory.h"
 				>
 			</File>
+                        <File
+                                RelativePath="..\..\src\ia32\fast-codegen-ia32.cc"
+                                >
+                        </File>
+                        <File
+                                RelativePath="..\..\src\fast-codegen.cc"
+                                >
+                        </File>
+                        <File
+                                RelativePath="..\..\src\fast-codegen.h"
+                                >
+                        </File>
 			<File
 				RelativePath="..\..\src\flags.cc"
 				>
@@ -545,6 +557,10 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\location.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\log.cc"
 				>
 			</File>
@@ -945,7 +961,7 @@
 			Name="include"
 			>
 			<File
-				RelativePath="..\..\include\debug.h"
+				RelativePath="..\..\include\v8-debug.h"
 				>
 			</File>
 			<File
diff --git a/tools/visual_studio/v8_base_arm.vcproj b/tools/visual_studio/v8_base_arm.vcproj
index abdb418..fca4a96 100644
--- a/tools/visual_studio/v8_base_arm.vcproj
+++ b/tools/visual_studio/v8_base_arm.vcproj
@@ -396,6 +396,18 @@
 				RelativePath="..\..\src\factory.h"
 				>
 			</File>
+                        <File
+                                RelativePath="..\..\src\arm\fast-codegen-arm.cc"
+                                >
+                        </File>
+                        <File
+                                RelativePath="..\..\src\fast-codegen.cc"
+                                >
+                        </File>
+                        <File
+                                RelativePath="..\..\src\fast-codegen.h"
+                                >
+                        </File>
 			<File
 				RelativePath="..\..\src\flags.cc"
 				>
@@ -549,6 +561,10 @@
 				>
 			</File>
 			<File
+				RelativePath="..\..\src\location.h"
+				>
+			</File>
+			<File
 				RelativePath="..\..\src\log.cc"
 				>
 			</File>
@@ -957,7 +973,7 @@
 			Name="include"
 			>
 			<File
-				RelativePath="..\..\include\debug.h"
+				RelativePath="..\..\include\v8-debug.h"
 				>
 			</File>
 			<File
diff --git a/tools/visual_studio/v8_base_x64.vcproj b/tools/visual_studio/v8_base_x64.vcproj
index 7b8b4d3..a8c8b55 100644
--- a/tools/visual_studio/v8_base_x64.vcproj
+++ b/tools/visual_studio/v8_base_x64.vcproj
@@ -945,7 +945,7 @@
 			Name="include"
 			>
 			<File
-				RelativePath="..\..\include\debug.h"
+				RelativePath="..\..\include\v8-debug.h"
 				>
 			</File>
 			<File